content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
---|---|---|---|---|---|
PHP | PHP | add table prefix | 4ffe40fb169c6bcce9193ff56958eca41e64294f | <ide><path>src/Illuminate/Database/Console/DumpCommand.php
<ide> public function handle(ConnectionResolverInterface $connections, Dispatcher $dis
<ide> protected function schemaState(Connection $connection)
<ide> {
<ide> return $connection->getSchemaState()
<del> ->withMigrationTable(Config::get('database.migrations', 'migrations'))
<add> ->withMigrationTable($connection->getTablePrefix().Config::get('database.migrations', 'migrations'))
<ide> ->handleOutputUsing(function ($type, $buffer) {
<ide> $this->output->write($buffer);
<ide> }); | 1 |
Python | Python | fix numpy ticket 53 | dbb89ee8932e1f2b9f4a8940d90d6fd96c8c3f8e | <ide><path>numpy/distutils/misc_util.py
<ide> def add_data_dir(self,data_path):
<ide> return self.add_data_dir((data_path, data_path))
<ide> paths = self.paths(data_path, include_non_existing=False)
<ide> if is_glob_pattern(data_path):
<add> pp = self.path_in_package.split(os.sep)
<ide> if is_glob_pattern(d):
<del> pattern_list = d.split(os.sep)
<add> pattern_list = pp + d.split(os.sep)
<ide> pattern_list.reverse()
<ide> for path in paths:
<ide> path_list = path.split(os.sep)
<ide> def add_data_dir(self,data_path):
<ide> raise ValueError,'cannot fill pattern %r with %r' \
<ide> % (d, path)
<ide> target_list.append(path_list[i])
<del> i += 1
<ide> else:
<add> assert s==path_list[i],`s,path_list[i],data_path,d`
<ide> target_list.append(s)
<add> i += 1
<add> if path_list[i:]:
<add> self.warn('mismatch of pattern_list=%s and path_list=%s'\
<add> % (pattern_list,path_list))
<ide> target_list.reverse()
<add> target_list = target_list[len(pp):]
<ide> self.add_data_dir((os.sep.join(target_list),path))
<ide> else:
<ide> for path in paths: | 1 |
PHP | PHP | use support\carbon. | 60860f3fdb277154606a1f3584104ed97d24d16a | <ide><path>src/Illuminate/Foundation/helpers.php
<ide> <?php
<ide>
<del>use Carbon\Carbon;
<add>use Illuminate\Support\Carbon;
<ide> use Illuminate\Support\HtmlString;
<ide> use Illuminate\Container\Container;
<ide> use Illuminate\Contracts\Bus\Dispatcher;
<ide> function mix($path, $manifestDirectory = '')
<ide> /**
<ide> * Create a new Carbon instance for the current time.
<ide> *
<del> * @return \Carbon\Carbon
<add> * @return \Illuminate\Support\Carbon
<ide> */
<ide> function now()
<ide> {
<ide> function storage_path($path = '')
<ide> /**
<ide> * Create a new Carbon instance for the current date.
<ide> *
<del> * @return \Carbon\Carbon
<add> * @return \Illuminate\Support\Carbon
<ide> */
<ide> function today()
<ide> { | 1 |
Javascript | Javascript | detect detach/attach in same observation | bccb1276f023448e2539a49c879c7a97bd83f763 | <ide><path>src/platform/platform.dom.js
<ide> function fromNativeEvent(event, chart) {
<ide> };
<ide> }
<ide>
<add>function nodeListContains(nodeList, canvas) {
<add> for (const node of nodeList) {
<add> if (node === canvas || node.contains(canvas)) {
<add> return true;
<add> }
<add> }
<add>}
<add>
<ide> function createAttachObserver(chart, type, listener) {
<ide> const canvas = chart.canvas;
<ide> const observer = new MutationObserver(entries => {
<add> let trigger = false;
<ide> for (const entry of entries) {
<del> for (const node of entry.addedNodes) {
<del> if (node === canvas || node.contains(canvas)) {
<del> return listener();
<del> }
<del> }
<add> trigger = trigger || nodeListContains(entry.addedNodes, canvas);
<add> trigger = trigger && !nodeListContains(entry.removedNodes, canvas);
<add> }
<add> if (trigger) {
<add> listener();
<ide> }
<ide> });
<ide> observer.observe(document, {childList: true, subtree: true});
<ide> function createAttachObserver(chart, type, listener) {
<ide> function createDetachObserver(chart, type, listener) {
<ide> const canvas = chart.canvas;
<ide> const observer = new MutationObserver(entries => {
<add> let trigger = false;
<ide> for (const entry of entries) {
<del> for (const node of entry.removedNodes) {
<del> if (node === canvas || node.contains(canvas)) {
<del> return listener();
<del> }
<del> }
<add> trigger = trigger || nodeListContains(entry.removedNodes, canvas);
<add> trigger = trigger && !nodeListContains(entry.addedNodes, canvas);
<add> }
<add> if (trigger) {
<add> listener();
<ide> }
<ide> });
<ide> observer.observe(document, {childList: true, subtree: true});
<ide><path>test/specs/core.controller.tests.js
<ide> describe('Chart', function() {
<ide> }, 0);
<ide> });
<ide>
<add> // https://github.com/chartjs/Chart.js/issues/9875
<add> it('should detect detach/attach in series', function(done) {
<add> var chart = acquireChart({
<add> options: {
<add> responsive: true,
<add> maintainAspectRatio: false
<add> }
<add> }, {
<add> canvas: {
<add> style: ''
<add> },
<add> wrapper: {
<add> style: 'width: 320px; height: 350px'
<add> }
<add> });
<add>
<add> var wrapper = chart.canvas.parentNode;
<add> var parent = wrapper.parentNode;
<add>
<add> waitForResize(chart, function() {
<add> expect(chart).toBeChartOfSize({
<add> dw: 320, dh: 350,
<add> rw: 320, rh: 350,
<add> });
<add>
<add> done();
<add> });
<add>
<add> parent.removeChild(wrapper);
<add> parent.appendChild(wrapper);
<add> });
<add>
<add> it('should detect detach/attach/detach in series', function(done) {
<add> var chart = acquireChart({
<add> options: {
<add> responsive: true,
<add> maintainAspectRatio: false
<add> }
<add> }, {
<add> canvas: {
<add> style: ''
<add> },
<add> wrapper: {
<add> style: 'width: 320px; height: 350px'
<add> }
<add> });
<add>
<add> var wrapper = chart.canvas.parentNode;
<add> var parent = wrapper.parentNode;
<add>
<add> waitForResize(chart, function() {
<add> fail();
<add> });
<add>
<add> parent.removeChild(wrapper);
<add> parent.appendChild(wrapper);
<add> parent.removeChild(wrapper);
<add>
<add> setTimeout(function() {
<add> expect(chart.attached).toBeFalse();
<add> done();
<add> }, 100);
<add> });
<add>
<add> it('should detect attach/detach in series', function(done) {
<add> var chart = acquireChart({
<add> options: {
<add> responsive: true,
<add> maintainAspectRatio: false
<add> }
<add> }, {
<add> canvas: {
<add> style: ''
<add> },
<add> wrapper: {
<add> style: 'width: 320px; height: 350px'
<add> }
<add> });
<add>
<add> var wrapper = chart.canvas.parentNode;
<add> var parent = wrapper.parentNode;
<add>
<add> parent.removeChild(wrapper);
<add>
<add> setTimeout(function() {
<add> expect(chart.attached).toBeFalse();
<add>
<add> waitForResize(chart, function() {
<add> fail();
<add> });
<add>
<add> parent.appendChild(wrapper);
<add> parent.removeChild(wrapper);
<add>
<add> setTimeout(function() {
<add> expect(chart.attached).toBeFalse();
<add>
<add> done();
<add> }, 100);
<add> }, 100);
<add> });
<add>
<ide> // https://github.com/chartjs/Chart.js/issues/4737
<ide> it('should resize the canvas when re-creating the chart', function(done) {
<ide> var chart = acquireChart({ | 2 |
Ruby | Ruby | remove remaining (unused) ruby files | c09fa056a31552a34ae29a76e341e422d7c15837 | <ide><path>scripts/list_file_sizes.rb
<del>files = Dir["packages/ember-*/lib/**/*.js"] - Dir["packages/ember-runtime/**/*.js"]
<del>files = Dir["packages/ember-{metal,views,handlebars}/lib/**/*.js"]
<del>
<del>def uglify(string)
<del> IO.popen("uglifyjs", "r+") do |io|
<del> io.puts string
<del> io.close_write
<del> return io.read
<del> end
<del>end
<del>
<del>def gzip(string)
<del> IO.popen("gzip -f", "r+") do |io|
<del> io.puts string
<del> io.close_write
<del> return io.read
<del> end
<del>end
<del>
<del>
<del>all_files = ""
<del>sizes = []
<del>
<del>files.each do |file|
<del> this_file = File.read(file)
<del> all_files += this_file
<del> size = this_file.size
<del> uglified = uglify(this_file)
<del> gzipped = gzip(uglified)
<del> sizes << [size, uglified.size, gzipped.size, file]
<del>end
<del>
<del># HEADER
<del>puts " RAW MIN MIN+GZ"
<del>
<del>sizes.sort{|a,b| b[2] <=> a[2] }.each do |size|
<del> puts "%8d %8d %8d - %s" % size
<del>end
<del>
<del>uglified = uglify(all_files)
<del>gzipped = gzip(uglified)
<del>
<del>puts "%8d %8d %8d" % [all_files.size, uglified.size, gzipped.size] | 1 |
Ruby | Ruby | produce better curl error messages. (#441) | 23306ab434afc185e22a0f357a27e8da687620af | <ide><path>Library/Homebrew/dev-cmd/test-bot.rb
<ide> def homebrew
<ide> @category = __method__
<ide> return if @skip_homebrew
<ide>
<del> ruby_two = RUBY_VERSION.split(".").first.to_i >= 2
<del>
<ide> if @tap.nil?
<ide> tests_args = []
<del> if ruby_two
<add> if RUBY_TWO
<ide> tests_args << "--official-cmd-taps"
<ide> tests_args << "--coverage" if ENV["TRAVIS"]
<ide> end
<ide> def sanitize_ARGV_and_ENV
<ide>
<ide> ENV["HOMEBREW_DEVELOPER"] = "1"
<ide> ENV["HOMEBREW_SANDBOX"] = "1"
<del> ENV["HOMEBREW_RUBY_MACHO"] = "1" if RUBY_VERSION.split(".").first.to_i >= 2
<add> ENV["HOMEBREW_RUBY_MACHO"] = "1" if RUBY_TWO
<ide> ENV["HOMEBREW_NO_EMOJI"] = "1"
<ide> ENV["HOMEBREW_FAIL_LOG_LINES"] = "150"
<ide> ENV["HOMEBREW_EXPERIMENTAL_FILTER_FLAGS_ON_DEPS"] = "1"
<ide><path>Library/Homebrew/global.rb
<ide> )
<ide> end
<ide> RUBY_BIN = RUBY_PATH.dirname
<add>RUBY_TWO = RUBY_VERSION.split(".").first.to_i >= 2
<ide>
<ide> HOMEBREW_USER_AGENT_CURL = ENV["HOMEBREW_USER_AGENT_CURL"]
<ide> HOMEBREW_USER_AGENT_RUBY = "#{ENV["HOMEBREW_USER_AGENT"]} ruby/#{RUBY_VERSION}-p#{RUBY_PATCHLEVEL}"
<ide><path>Library/Homebrew/utils/curl.rb
<ide> require "pathname"
<add>require "open3"
<ide>
<ide> def curl_args(extra_args=[])
<ide> curl = Pathname.new ENV["HOMEBREW_CURL"]
<ide> def curl(*args)
<ide> end
<ide>
<ide> def curl_output(*args)
<del> curl_args = curl_args(args) - ["--fail"]
<del> Utils.popen_read_text(*curl_args)
<add> curl_args = curl_args(args)
<add> curl_args -= ["--fail"]
<add> if RUBY_TWO
<add> curl_args -= ["--silent"]
<add> Open3.popen3(*curl_args) do |_, stdout, stderr, wait_thread|
<add> [stdout.read, stderr.read, wait_thread.value]
<add> end
<add> else
<add> output = Utils.popen_read_text(*curl_args)
<add> [output, nil, $?]
<add> end
<ide> end
<ide><path>Library/Homebrew/utils/github.rb
<ide> def open(url, data=nil)
<ide>
<ide> args += ["--dump-header", "#{headers_tmpfile.path}"]
<ide>
<del> output, _, http_code = curl_output(url.to_s, *args).rpartition("\n")
<add> output, errors, status = curl_output(url.to_s, *args)
<add> output, _, http_code = output.rpartition("\n")
<ide> output, _, http_code = output.rpartition("\n") if http_code == "000"
<ide> headers = headers_tmpfile.read
<ide> ensure
<ide> def open(url, data=nil)
<ide> end
<ide>
<ide> begin
<del> if !http_code.start_with?("2") && !$?.success?
<del> raise_api_error(output, http_code, headers)
<add> if !http_code.start_with?("2") && !status.success?
<add> raise_api_error(output, errors, http_code, headers)
<ide> end
<ide> json = Utils::JSON.load output
<ide> if block_given?
<ide> def open(url, data=nil)
<ide> end
<ide> end
<ide>
<del> def raise_api_error(output, http_code, headers)
<add> def raise_api_error(output, errors, http_code, headers)
<ide> meta = {}
<ide> headers.lines.each do |l|
<ide> key, _, value = l.delete(":").partition(" ")
<ide> def raise_api_error(output, http_code, headers)
<ide> raise HTTPNotFoundError, output
<ide> else
<ide> error = Utils::JSON.load(output)["message"] rescue nil
<del> error ||= output
<add> error ||= "curl failed! #{errors}"
<ide> raise Error, error
<ide> end
<ide> end | 4 |
Python | Python | remove unused import | ef03e9ea53238d9d9242fb54de8c145809d0cb0b | <ide><path>spacy/lang/da/tokenizer_exceptions.py
<ide>
<ide> from __future__ import unicode_literals
<ide>
<del>from ...symbols import ORTH, LEMMA, NORM, TAG, ADP, PUNCT
<add>from ...symbols import ORTH, LEMMA, NORM, TAG, PUNCT
<ide>
<ide>
<ide> _exc = {} | 1 |
Python | Python | change download_video parameter to resourcename | a6e60ce25d9f3d621a7b4089834ca5e50cd123db | <ide><path>airflow/providers/google/marketing_platform/hooks/display_video.py
<ide> def download_media(self, resource_name: str):
<ide> :param resource_name: of the media that is being downloaded.
<ide> :type resource_name: str
<ide> """
<del> request = self.get_conn_to_display_video().media().download_media(resource_name=resource_name)
<add> request = self.get_conn_to_display_video().media().download_media(resourceName=resource_name)
<ide> return request
<ide><path>airflow/providers/google/marketing_platform/operators/display_video.py
<ide> def execute(self, context: 'Context') -> Dict[str, Any]:
<ide> self.log.info("Creating operation for SDF download task...")
<ide> operation = hook.create_sdf_download_operation(body_request=self.body_request)
<ide>
<add> name = operation["name"]
<add> self.xcom_push(context, key="name", value=name)
<add> self.log.info("Created SDF operation with name: %s", name)
<add>
<ide> return operation
<ide>
<ide>
<ide> def execute(self, context: 'Context') -> str:
<ide> )
<ide>
<ide> self.log.info("Retrieving operation...")
<del> operation = hook.get_sdf_download_operation(operation_name=self.operation_name)
<add> operation_state = hook.get_sdf_download_operation(operation_name=self.operation_name)
<ide>
<ide> self.log.info("Creating file for upload...")
<del> media = hook.download_media(resource_name=operation)
<add> media = hook.download_media(resource_name=operation_state)
<ide>
<ide> self.log.info("Sending file to the Google Cloud Storage...")
<ide> with tempfile.NamedTemporaryFile() as temp_file:
<ide><path>tests/providers/google/marketing_platform/hooks/test_display_video.py
<ide> def test_download_media_called_once_with_params(self, get_conn_to_display_video)
<ide>
<ide> self.hook.download_media(resource_name=resource_name)
<ide> get_conn_to_display_video.return_value.media.return_value.download_media.assert_called_once_with(
<del> resource_name=resource_name
<add> resourceName=resource_name
<ide> )
<ide><path>tests/providers/google/marketing_platform/operators/test_display_video.py
<ide> class TestGoogleDisplayVideo360SDFtoGCSOperator(TestCase):
<ide> def test_execute(self, mock_temp, gcs_mock_hook, mock_hook):
<ide> operation_name = "operation_name"
<ide> operation = {"key": "value"}
<add> operation = {"response": {"resourceName": "test_name"}}
<ide> gzip = False
<ide>
<ide> # mock_hook.return_value.create_sdf_download_operation.return_value = response_name
<ide> def test_execute(self, mock_temp, gcs_mock_hook, mock_hook):
<ide>
<ide>
<ide> class TestGoogleDisplayVideo360CreateSDFDownloadTaskOperator(TestCase):
<add> @mock.patch(
<add> "airflow.providers.google.marketing_platform.operators."
<add> "display_video.GoogleDisplayVideo360CreateSDFDownloadTaskOperator.xcom_push"
<add> )
<ide> @mock.patch(
<ide> "airflow.providers.google.marketing_platform.operators.display_video.GoogleDisplayVideo360Hook"
<ide> )
<del> def test_execute(self, mock_hook):
<add> def test_execute(self, mock_hook, xcom_mock):
<ide> body_request = {
<ide> "version": "1",
<ide> "id": "id",
<ide> "filter": {"id": []},
<ide> }
<add> test_name = 'test_task'
<add> mock_hook.return_value.create_sdf_download_operation.return_value = {"name": test_name}
<ide>
<ide> op = GoogleDisplayVideo360CreateSDFDownloadTaskOperator(
<ide> body_request=body_request,
<ide> def test_execute(self, mock_hook):
<ide> mock_hook.return_value.create_sdf_download_operation.assert_called_once_with(
<ide> body_request=body_request
<ide> )
<add> xcom_mock.assert_called_once_with(None, key="name", value=test_name) | 4 |
Java | Java | replace use of the <tt> html tag in javadoc | 0f479293b17765f7077a61a6b7b130754b6176de | <ide><path>spring-context/src/main/java/org/springframework/cache/Cache.java
<ide> * Interface that defines common cache operations.
<ide> *
<ide> * <b>Note:</b> Due to the generic use of caching, it is recommended that
<del> * implementations allow storage of <tt>null</tt> values (for example to
<add> * implementations allow storage of {@code null} values (for example to
<ide> * cache methods that return {@code null}).
<ide> *
<ide> * @author Costin Leau
<ide><path>spring-core/src/main/java/org/springframework/core/annotation/AnnotationTypeMappings.java
<ide> int size() {
<ide> * @param index the index to return
<ide> * @return the {@link AnnotationTypeMapping}
<ide> * @throws IndexOutOfBoundsException if the index is out of range
<del> * (<tt>index < 0 || index >= size()</tt>)
<add> * ({@code index < 0 || index >= size()})
<ide> */
<ide> AnnotationTypeMapping get(int index) {
<ide> return this.mappings.get(index);
<ide><path>spring-core/src/main/java/org/springframework/core/annotation/AttributeMethods.java
<ide> Method get(String name) {
<ide> * @param index the index of the attribute to return
<ide> * @return the attribute method
<ide> * @throws IndexOutOfBoundsException if the index is out of range
<del> * (<tt>index < 0 || index >= size()</tt>)
<add> * ({@code index < 0 || index >= size()})
<ide> */
<ide> Method get(int index) {
<ide> return this.attributeMethods[index];
<ide><path>spring-core/src/main/java/org/springframework/core/convert/TypeDescriptor.java
<ide> public Annotation[] getAnnotations() {
<ide> * <p>As of Spring Framework 4.2, this method supports arbitrary levels
<ide> * of meta-annotations.
<ide> * @param annotationType the annotation type
<del> * @return <tt>true</tt> if the annotation is present
<add> * @return {@code true} if the annotation is present
<ide> */
<ide> public boolean hasAnnotation(Class<? extends Annotation> annotationType) {
<ide> if (this.annotatedElement.isEmpty()) {
<ide> public <T extends Annotation> T getAnnotation(Class<T> annotationType) {
<ide> * On the other hand, {@code valueOf(Number.class).isAssignableTo(valueOf(Integer.class))}
<ide> * returns {@code false} because, while all Integers are Numbers, not all Numbers are Integers.
<ide> * <p>For arrays, collections, and maps, element and key/value types are checked if declared.
<del> * For example, a List<String> field value is assignable to a Collection<CharSequence>
<del> * field, but List<Number> is not assignable to List<Integer>.
<add> * For example, a {@code List<String>} field value is assignable to a {@code Collection<CharSequence>}
<add> * field, but {@code List<Number>} is not assignable to {@code List<Integer>}.
<ide> * @return {@code true} if this type is assignable to the type represented by the provided
<ide> * type descriptor
<ide> * @see #getObjectType()
<ide> public static TypeDescriptor collection(Class<?> collectionType, @Nullable TypeD
<ide> /**
<ide> * Create a new type descriptor from a {@link java.util.Map} type.
<ide> * <p>Useful for converting to typed Maps.
<del> * <p>For example, a Map<String, String> could be converted to a Map<Id, EmailAddress>
<add> * <p>For example, a {@code Map<String, String>} could be converted to a {@code Map<Id, EmailAddress>}
<ide> * by converting to a targetType built with this method:
<ide> * The method call to construct such a TypeDescriptor would look something like:
<ide> * <pre class="code">
<ide><path>spring-core/src/main/java/org/springframework/core/convert/converter/ConvertingComparator.java
<ide> public static <K, V> ConvertingComparator<Map.Entry<K, V>, V> mapEntryValues(Com
<ide>
<ide>
<ide> /**
<del> * Adapts a {@link ConversionService} and <tt>targetType</tt> to a {@link Converter}.
<add> * Adapts a {@link ConversionService} and {@code targetType} to a {@link Converter}.
<ide> */
<ide> private static class ConversionServiceConverter<S, T> implements Converter<S, T> {
<ide>
<ide><path>spring-core/src/main/java/org/springframework/core/io/buffer/DataBuffer.java
<ide> * the following invariant holds for the read and write positions, and the capacity:
<ide> *
<ide> * <blockquote>
<del> * <tt>0</tt> <tt><=</tt>
<del> * <i>readPosition</i> <tt><=</tt>
<del> * <i>writePosition</i> <tt><=</tt>
<add> * {@code 0} {@code <=}
<add> * <i>readPosition</i> {@code <=}
<add> * <i>writePosition</i> {@code <=}
<ide> * <i>capacity</i>
<ide> * </blockquote>
<ide> *
<ide><path>spring-core/src/main/java/org/springframework/util/AntPathMatcher.java
<ide> public Comparator<String> getPatternComparator(String path) {
<ide> /**
<ide> * Tests whether a string matches against a pattern via a {@link Pattern}.
<ide> * <p>The pattern may contain special characters: '*' means zero or more characters; '?' means one and
<del> * only one character; '{' and '}' indicate a URI template pattern. For example <tt>/users/{user}</tt>.
<add> * only one character; '{' and '}' indicate a URI template pattern. For example {@code /users/{user}}.
<ide> */
<ide> protected static class AntPathStringMatcher {
<ide>
<ide><path>spring-core/src/main/java/org/springframework/util/FastByteArrayOutputStream.java
<ide> public void close() {
<ide>
<ide> /**
<ide> * Convert the buffer's contents into a string decoding bytes using the
<del> * platform's default character set. The length of the new <tt>String</tt>
<add> * platform's default character set. The length of the new {@code String}
<ide> * is a function of the character set, and hence may not be equal to the
<ide> * size of the buffer.
<ide> * <p>This method always replaces malformed-input and unmappable-character
<ide><path>spring-web/src/main/java/org/springframework/web/util/WebUtils.java
<ide> public static String findParameterValue(ServletRequest request, String name) {
<ide> * following algorithm:
<ide> * <ol>
<ide> * <li>Try to get the parameter value using just the given <i>logical</i> name.
<del> * This handles parameters of the form <tt>logicalName = value</tt>. For normal
<add> * This handles parameters of the form {@code logicalName = value}. For normal
<ide> * parameters, e.g. submitted using a hidden HTML form field, this will return
<ide> * the requested value.</li>
<ide> * <li>Try to obtain the parameter value from the parameter name, where the
<del> * parameter name in the request is of the form <tt>logicalName_value = xyz</tt>
<add> * parameter name in the request is of the form {@code logicalName_value = xyz}
<ide> * with "_" being the configured delimiter. This deals with parameter values
<ide> * submitted using an HTML form submit button.</li>
<ide> * <li>If the value obtained in the previous step has a ".x" or ".y" suffix,
<ide> * remove that. This handles cases where the value was submitted using an
<ide> * HTML form image button. In this case the parameter in the request would
<del> * actually be of the form <tt>logicalName_value.x = 123</tt>. </li>
<add> * actually be of the form {@code logicalName_value.x = 123}.</li>
<ide> * </ol>
<ide> * @param parameters the available parameter map
<ide> * @param name the <i>logical</i> name of the request parameter
<ide><path>spring-web/src/main/java/org/springframework/web/util/pattern/InternalPathPatternParser.java
<ide> else if ((this.pos > (this.variableCaptureStart + 1 + (this.isCaptureTheRestVari
<ide> * Just hit a ':' and want to jump over the regex specification for this
<ide> * variable. pos will be pointing at the ':', we want to skip until the }.
<ide> * <p>
<del> * Nested {...} pairs don't have to be escaped: <tt>/abc/{var:x{1,2}}/def</tt>
<del> * <p>An escaped } will not be treated as the end of the regex: <tt>/abc/{var:x\\{y:}/def</tt>
<add> * Nested {...} pairs don't have to be escaped: <code>/abc/{var:x{1,2}}/def</code>
<add> * <p>An escaped } will not be treated as the end of the regex: <code>/abc/{var:x\\{y:}/def</code>
<ide> * <p>A separator that should not indicate the end of the regex can be escaped:
<ide> */
<ide> private void skipCaptureRegex() {
<ide><path>spring-web/src/main/java/org/springframework/web/util/pattern/RegexPathElement.java
<ide>
<ide> /**
<ide> * A regex path element. Used to represent any complicated element of the path.
<del> * For example in '<tt>/foo/*_*/*_{foobar}</tt>' both <tt>*_*</tt> and <tt>*_{foobar}</tt>
<add> * For example in '<code>/foo/*_*/*_{foobar}</code>' both {@code *_*} and {@code *_{foobar}}
<ide> * are {@link RegexPathElement} path elements. Derived from the general
<ide> * {@link org.springframework.util.AntPathMatcher} approach.
<ide> *
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/view/json/MappingJackson2JsonView.java
<ide> public void setJsonPrefix(String jsonPrefix) {
<ide> }
<ide>
<ide> /**
<del> * Indicates whether the JSON output by this view should be prefixed with <tt>")]}', "</tt>.
<add> * Indicates whether the JSON output by this view should be prefixed with <code>")]}', "</code>.
<ide> * Default is {@code false}.
<ide> * <p>Prefixing the JSON string in this manner is used to help prevent JSON Hijacking.
<ide> * The prefix renders the string syntactically invalid as a script so that it cannot be hijacked. | 12 |
Text | Text | hide solution from tests in arabic challenge | 863c9ad1d08b341bc99c874a375ddd4769226500 | <ide><path>curriculum/challenges/arabic/01-responsive-web-design/css-grid/use-grid-area-without-creating-an-areas-template.arabic.md
<ide> undefined
<ide>
<ide> ```yml
<ide> tests:
<del> - text: ''
<del> testString: 'assert(code.match(/.item5\s*?{[\s\S]*grid-area\s*?:\s*?3\s*?\/\s*?1\s*?\/\s*?4\s*?\/\s*?4\s*?;[\s\S]*}/gi), "<code>item5</code> class should have a <code>grid-area</code> property that has the value of <code>3/1/4/4</code>.");'
<add> - text: 'يجب أن تحتوي فئة <code>item5</code> على خاصية <code>grid-area</code> بحيث تكون بين الخطين الأفقيين الثالث والرابع وبين الخطين الرأسيين الأول والرابع.'
<add> testString: 'assert(code.match(/.item5\s*?{[\s\S]*grid-area\s*?:\s*?3\s*?\/\s*?1\s*?\/\s*?4\s*?\/\s*?4\s*?;[\s\S]*}/gi), "يجب أن تحتوي فئة <code>item5</code> على خاصية <code>grid-area</code> بحيث تكون بين الخطين الأفقيين الثالث والرابع وبين الخطين الرأسيين الأول والرابع.");'
<ide>
<ide> ```
<ide> | 1 |
Ruby | Ruby | remove env variable that is no longer used | 6278ffdba0a15ca8d1f2c0dc1865f83743deb17b | <ide><path>Library/Homebrew/cmd/sh.rb
<ide> def sh
<ide> end
<ide> ENV['PS1'] = 'brew \[\033[1;32m\]\w\[\033[0m\]$ '
<ide> ENV['VERBOSE'] = '1'
<del> ENV['HOMEBREW_LOG'] = '1'
<ide> puts <<-EOS.undent_________________________________________________________72
<ide> Your shell has been configured to use Homebrew's build environment:
<ide> this should help you build stuff. Notably though, the system versions of | 1 |
Javascript | Javascript | show task_id in the graph view tooltip | f7229e53605fe7a21100c82cd4d456657d9d3187 | <ide><path>airflow/www/static/js/graph.js
<ide> function update_nodes_states(task_instances) {
<ide> const task = tasks[task_id];
<ide> let tt = "";
<ide> if(ti.task_id != undefined) {
<del> tt += "Task_id: " + escapeHtml(task.task_id) + "<br>";
<add> tt += "Task_id: " + escapeHtml(ti.task_id) + "<br>";
<ide> }
<ide> tt += "Run: " + converAndFormatUTC(task.execution_date) + "<br>";
<ide> if(ti.run_id != undefined) { | 1 |
Text | Text | add changelog entry for | 450743cf56125d153b64c3509e75b0e4f86851d1 | <ide><path>actionview/CHANGELOG.md
<add>* Update `select_tag` to work correctly with `:include_blank` option passing a string.
<add>
<add> Fixes #16483.
<add>
<add> *Frank Groeneveld*
<add>
<ide> * Changed the meaning of `render "foo/bar"`.
<ide>
<ide> Previously, calling `render "foo/bar"` in a controller action is equivalent | 1 |
Javascript | Javascript | remove dup logic case | c0068f2ea45fdd88b77c191ab40d9319ddbf3068 | <ide><path>lib/performance/EmittedAssetSizeLimitPlugin.js
<ide> EmittedAssetSizeLimitPlugin.prototype.apply = function(compiler) {
<ide> sizeLimit
<ide> )
<ide> );
<add> if(!hasAsyncChunks) {
<add> warnings.push(new NoAsyncChunksWarning());
<add> }
<add> }
<ide>
<add> if(entrypointsOverLimit.length > 0) {
<ide> warnings.push(
<ide> new EntrypointsOverSizeLimitWarning(
<ide> entrypointsOverLimit,
<ide> EmittedAssetSizeLimitPlugin.prototype.apply = function(compiler) {
<ide> if(!hasAsyncChunks) {
<ide> warnings.push(new NoAsyncChunksWarning());
<ide> }
<del> } else {
<del> if(entrypointsOverLimit.length > 0) {
<del> warnings.push(
<del> new EntrypointsOverSizeLimitWarning(
<del> entrypointsOverLimit,
<del> compilation,
<del> entrypointSizeLimit
<del> )
<del> );
<del>
<del> if(!hasAsyncChunks) {
<del> warnings.push(new NoAsyncChunksWarning());
<del> }
<del> }
<add>
<ide> }
<ide>
<ide> if(warnings.length > 0) { | 1 |
Text | Text | add mixins property to context example | f6d429300366a29d76d0eae08cb8cd833024adfd | <ide><path>docs/_posts/2016-07-13-mixins-considered-harmful.md
<ide> var RouterMixin = {
<ide> };
<ide>
<ide> var Link = React.createClass({
<add> mixins: [RouterMixin],
<add>
<ide> handleClick: function(e) {
<ide> e.stopPropagation();
<ide> | 1 |
Python | Python | make get_last_lr in trainer backward compatible | 9de4afa8974b5afbaf61c41c4186eef6546932d4 | <ide><path>src/transformers/trainer.py
<ide>
<ide> import numpy as np
<ide> import torch
<add>from packaging import version
<ide> from torch import nn
<ide> from torch.utils.data.dataloader import DataLoader
<ide> from torch.utils.data.dataset import Dataset
<ide> def train(self, model_path: Optional[str] = None):
<ide> ):
<ide> logs: Dict[str, float] = {}
<ide> logs["loss"] = (tr_loss - logging_loss) / self.args.logging_steps
<del> logs["learning_rate"] = scheduler.get_last_lr()[0]
<add> # maintaining backward compatibility.
<add> # could use "scheduler.get_last_lr()[0]" instead for pytorch >= 1.4.0
<add> logs["learning_rate"] = (
<add> scheduler.get_last_lr()[0]
<add> if version.parse(torch.__version__) >= version.parse("1.4")
<add> else scheduler.get_lr()[0]
<add> )
<add>
<ide> logging_loss = tr_loss
<ide>
<ide> self._log(logs) | 1 |
Mixed | Javascript | establish y in cursorto as optional | f25bbf12556eb5478ea876db825c230d1b1c650c | <ide><path>doc/api/readline.md
<ide> function completer(linePartial, callback) {
<ide> }
<ide> ```
<ide>
<del>## readline.cursorTo(stream, x, y[, callback])
<add>## readline.cursorTo(stream, x[, y][, callback])
<ide> <!-- YAML
<ide> added: v0.7.7
<ide> changes:
<ide><path>doc/api/tty.md
<ide> added: v0.7.7
<ide> A `number` specifying the number of columns the TTY currently has. This property
<ide> is updated whenever the `'resize'` event is emitted.
<ide>
<del>### writeStream.cursorTo(x, y[, callback])
<add>### writeStream.cursorTo(x[, y][, callback])
<ide> <!-- YAML
<ide> added: v0.7.7
<ide> changes:
<ide><path>lib/readline.js
<ide> function cursorTo(stream, x, y, callback) {
<ide> if (callback !== undefined && typeof callback !== 'function')
<ide> throw new ERR_INVALID_CALLBACK(callback);
<ide>
<add> if (typeof y === 'function') {
<add> callback = y;
<add> y = undefined;
<add> }
<add>
<ide> if (stream == null || (typeof x !== 'number' && typeof y !== 'number')) {
<ide> if (typeof callback === 'function')
<ide> process.nextTick(callback);
<ide><path>test/parallel/test-readline-csi.js
<ide> writable.data = '';
<ide> assert.strictEqual(readline.cursorTo(writable, 1, 'a'), true);
<ide> assert.strictEqual(writable.data, '\x1b[2G');
<ide>
<add>writable.data = '';
<add>assert.strictEqual(readline.cursorTo(writable, 1), true);
<add>assert.strictEqual(writable.data, '\x1b[2G');
<add>
<ide> writable.data = '';
<ide> assert.strictEqual(readline.cursorTo(writable, 1, 2), true);
<ide> assert.strictEqual(writable.data, '\x1b[3;2H');
<ide> writable.data = '';
<ide> assert.strictEqual(readline.cursorTo(writable, 1, 2, common.mustCall()), true);
<ide> assert.strictEqual(writable.data, '\x1b[3;2H');
<ide>
<add>writable.data = '';
<add>assert.strictEqual(readline.cursorTo(writable, 1, common.mustCall()), true);
<add>assert.strictEqual(writable.data, '\x1b[2G');
<add>
<ide> // Verify that cursorTo() throws on invalid callback.
<ide> assert.throws(() => {
<ide> readline.cursorTo(writable, 1, 1, null); | 4 |
Python | Python | fix indexerror for illegal axis in np.mean | 36dd1b36474d9a6e46c86f708b9a62efa0a7309a | <ide><path>numpy/core/_methods.py
<ide> def _count_reduce_items(arr, axis):
<ide> axis = (axis,)
<ide> items = 1
<ide> for ax in axis:
<del> items *= arr.shape[ax]
<add> items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
<ide> return items
<ide>
<ide> # Numpy 1.17.0, 2019-02-24
<ide><path>numpy/core/tests/test_multiarray.py
<ide> def test_mean_float16(self):
<ide> # of float32.
<ide> assert_(_mean(np.ones(100000, dtype='float16')) == 1)
<ide>
<add> def test_mean_axis_error(self):
<add> # Ensure that AxisError is raised instead of IndexError when axis is
<add> # out of bounds, see gh-15817.
<add> with assert_raises(np.core._exceptions.AxisError):
<add> np.arange(10).mean(axis=2)
<add>
<ide> def test_var_values(self):
<ide> for mat in [self.rmat, self.cmat, self.omat]:
<ide> for axis in [0, 1, None]:
<ide> def test_var_complex_byteorder(self):
<ide> cmat_swapped = cmat.astype(cmat.dtype.newbyteorder())
<ide> assert_almost_equal(cmat.var(), cmat_swapped.var())
<ide>
<add> def test_var_axis_error(self):
<add> # Ensure that AxisError is raised instead of IndexError when axis is
<add> # out of bounds, see gh-15817.
<add> with assert_raises(np.core._exceptions.AxisError):
<add> np.arange(10).var(axis=2)
<add>
<ide> def test_std_values(self):
<ide> for mat in [self.rmat, self.cmat, self.omat]:
<ide> for axis in [0, 1, None]: | 2 |
Javascript | Javascript | add credits, and a "decode" function | 4a615ddd9c29b57dd5b526b46dcbcf4585acd95f | <ide><path>lib/uri.js
<ide> * A URI parser, compliant with assorted RFCs, providing parsing and resolution utilities.
<ide> **/
<ide>
<add>/*
<add>Blatantly stolen with permission from Narwhal, which did the same from Chiron,
<add>and bits taken from parseUri 1.2.1 (c) 2007 Steven Levithan <stevenlevithan.com> MIT License,
<add>and probably also plagiarizing http://code.google.com/p/js-uri/ in some ways.
<add>Most lines have been changed, please don't blame any of the above persons for
<add>any errors in this file.
<add>
<add>*/
<add>
<ide> exports.parse = uri_parse;
<ide> exports.format = uri_format;
<ide> exports.resolve = uri_resolve;
<ide> exports.resolveObject = uri_resolveObject;
<add>exports.decode = uri_decode;
<ide>
<add>/****
<add>Decode a URI, replacing + with space
<add>*/
<add>function uri_decode (s) {
<add> return decodeURIComponent(s.replace(/\+/g, ' '));
<add>}
<ide>
<ide> /**** expressionKeys
<ide> members of a parsed URI object that you get | 1 |
Text | Text | add types for some `process` properties | d59e2492f5142d554a8716ec03f68a0df041ead1 | <ide><path>doc/api/process.md
<ide> $ bash -c 'exec -a customArgv0 ./node'
<ide> added: v7.1.0
<ide> -->
<ide>
<add>* {Object}
<add>
<ide> If the Node.js process was spawned with an IPC channel (see the
<ide> [Child Process][] documentation), the `process.channel`
<ide> property is a reference to the IPC channel. If no IPC channel exists, this
<ide> console.log(process.env.test);
<ide> added: v0.7.7
<ide> -->
<ide>
<del>* {Object}
<add>* {Array}
<ide>
<ide> The `process.execArgv` property returns the set of Node.js-specific command-line
<ide> options passed when the Node.js process was launched. These options do not
<ide> debugger, see [Signal Events][].
<ide> added: v0.1.17
<ide> -->
<ide>
<add>* {Object}
<add>
<ide> The `process.mainModule` property provides an alternative way of retrieving
<ide> [`require.main`][]. The difference is that if the main module changes at
<ide> runtime, [`require.main`][] may still refer to the original main module in
<ide> changes:
<ide> description: The `lts` property is now supported.
<ide> -->
<ide>
<add>* {Object}
<add>
<ide> The `process.release` property returns an Object containing metadata related to
<ide> the current release, including URLs for the source tarball and headers-only
<ide> tarball. | 1 |
PHP | PHP | fix cs error | 2d32a1e9c82b5426db379e11a34ab30b9992255b | <ide><path>tests/TestCase/Utility/XmlTest.php
<ide> public function testBuild()
<ide> public function testBuildHuge()
<ide> {
<ide> $xml = '<tag>value</tag>';
<del> $obj = Xml::build($xml, array('parseHuge' => true));
<add> $obj = Xml::build($xml, ['parseHuge' => true]);
<ide> $this->assertEquals('tag', $obj->getName());
<ide> $this->assertEquals('value', (string)$obj);
<ide> } | 1 |
PHP | PHP | apply fixes from styleci | 463144cf3ae51baa7d85d98099a4eabea0260e99 | <ide><path>src/Illuminate/Database/Concerns/ManagesTransactions.php
<ide> public function transaction(Closure $callback, $attempts = 1)
<ide> // back the transaction, resulting in an error if we attempt to manually roll back.
<ide> try {
<ide> $this->commit();
<del> }
<del> catch (Exception $e) {
<add> } catch (Exception $e) {
<ide> $this->handleCommitTransactionException(
<ide> $e, $currentAttempt, $attempts
<ide> );
<ide><path>tests/Database/DatabaseConnectionTest.php
<ide> public function testTransactionRetriesOnSerializationFailure()
<ide> $pdo->expects($this->exactly(3))->method('beginTransaction');
<ide> $pdo->expects($this->never())->method('rollBack');
<ide> $pdo->expects($this->exactly(3))->method('commit');
<del> $mock->transaction(function () {}, 3);
<add> $mock->transaction(function () {
<add> }, 3);
<ide> }
<ide>
<ide> public function testTransactionMethodRetriesOnDeadlock()
<ide><path>tests/Database/stubs/PDOExceptionStub.php
<ide> <?php
<ide>
<del>class PDOExceptionStub extends PDOException {
<del>
<add>class PDOExceptionStub extends PDOException
<add>{
<ide> /**
<ide> * Overrides Exception::__construct, which casts $code to integer, so that we can create
<ide> * an exception with a string $code consistent with the real PDOException behavior.
<del> *
<add> *
<ide> * @param string|null $message
<ide> * @param string|null $code
<ide> * @return void | 3 |
Go | Go | add imageinsert tests | bf8e0277bbd1c2df2310bc20ecc4003d1ed7a657 | <ide><path>server_test.go
<ide> package docker
<ide> import (
<ide> "github.com/dotcloud/docker/utils"
<ide> "strings"
<add> "io/ioutil"
<ide> "testing"
<ide> "time"
<ide> )
<ide> func TestImagesFilter(t *testing.T) {
<ide> t.Fatal("incorrect number of matches returned")
<ide> }
<ide> }
<add>
<add>func TestImageInsert(t *testing.T) {
<add> runtime := mkRuntime(t)
<add> defer nuke(runtime)
<add> srv := &Server{runtime: runtime}
<add> sf := utils.NewStreamFormatter(true)
<add>
<add> // bad image name fails
<add> if err := srv.ImageInsert("foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err == nil {
<add> t.Fatal("expected an error and got none")
<add> }
<add>
<add> // bad url fails
<add> if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
<add> t.Fatal("expected an error and got none")
<add> }
<add>
<add> // success returns nil
<add> if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
<add> t.Fatalf("expected no error, but got %v", err)
<add> }
<add>} | 1 |
Go | Go | remove secretrequestoption type | e7c39f4d5d761f68e6ac432934d8c3910e452855 | <ide><path>api/types/client.go
<ide> import (
<ide> "bufio"
<ide> "io"
<ide> "net"
<del> "os"
<ide>
<ide> "github.com/docker/docker/api/types/container"
<ide> "github.com/docker/docker/api/types/filters"
<ide> type PluginInstallOptions struct {
<ide> Args []string
<ide> }
<ide>
<del>// SecretRequestOption is a type for requesting secrets
<del>type SecretRequestOption struct {
<del> Source string
<del> Target string
<del> UID string
<del> GID string
<del> Mode os.FileMode
<del>}
<del>
<ide> // SwarmUnlockKeyResponse contains the response for Engine API:
<ide> // GET /swarm/unlockkey
<ide> type SwarmUnlockKeyResponse struct {
<ide><path>cli/command/service/parse.go
<ide> import (
<ide> "golang.org/x/net/context"
<ide> )
<ide>
<del>// ParseSecrets retrieves the secrets from the requested names and converts
<del>// them to secret references to use with the spec
<del>func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*types.SecretRequestOption) ([]*swarmtypes.SecretReference, error) {
<add>// ParseSecrets retrieves the secrets with the requested names and fills
<add>// secret IDs into the secret references.
<add>func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) {
<ide> secretRefs := make(map[string]*swarmtypes.SecretReference)
<ide> ctx := context.Background()
<ide>
<ide> for _, secret := range requestedSecrets {
<del> if _, exists := secretRefs[secret.Target]; exists {
<del> return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source)
<add> if _, exists := secretRefs[secret.File.Name]; exists {
<add> return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.SecretName)
<ide> }
<del> secretRef := &swarmtypes.SecretReference{
<del> File: &swarmtypes.SecretReferenceFileTarget{
<del> Name: secret.Target,
<del> UID: secret.UID,
<del> GID: secret.GID,
<del> Mode: secret.Mode,
<del> },
<del> SecretName: secret.Source,
<del> }
<del>
<del> secretRefs[secret.Target] = secretRef
<add> secretRef := new(swarmtypes.SecretReference)
<add> *secretRef = *secret
<add> secretRefs[secret.File.Name] = secretRef
<ide> }
<ide>
<ide> args := filters.NewArgs()
<ide><path>cli/compose/convert/service.go
<ide> import (
<ide> "strings"
<ide> "time"
<ide>
<del> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/container"
<ide> "github.com/docker/docker/api/types/swarm"
<ide> servicecli "github.com/docker/docker/cli/command/service"
<ide> func convertServiceSecrets(
<ide> secrets []composetypes.ServiceSecretConfig,
<ide> secretSpecs map[string]composetypes.SecretConfig,
<ide> ) ([]*swarm.SecretReference, error) {
<del> opts := []*types.SecretRequestOption{}
<add> refs := []*swarm.SecretReference{}
<ide> for _, secret := range secrets {
<ide> target := secret.Target
<ide> if target == "" {
<ide> func convertServiceSecrets(
<ide> mode = uint32Ptr(0444)
<ide> }
<ide>
<del> opts = append(opts, &types.SecretRequestOption{
<del> Source: source,
<del> Target: target,
<del> UID: uid,
<del> GID: gid,
<del> Mode: os.FileMode(*mode),
<add> refs = append(refs, &swarm.SecretReference{
<add> File: &swarm.SecretReferenceFileTarget{
<add> Name: target,
<add> UID: uid,
<add> GID: gid,
<add> Mode: os.FileMode(*mode),
<add> },
<add> SecretName: source,
<ide> })
<ide> }
<ide>
<del> return servicecli.ParseSecrets(client, opts)
<add> return servicecli.ParseSecrets(client, refs)
<ide> }
<ide>
<ide> func uint32Ptr(value uint32) *uint32 {
<ide><path>opts/secret.go
<ide> import (
<ide> "strconv"
<ide> "strings"
<ide>
<del> "github.com/docker/docker/api/types"
<add> swarmtypes "github.com/docker/docker/api/types/swarm"
<ide> )
<ide>
<ide> // SecretOpt is a Value type for parsing secrets
<ide> type SecretOpt struct {
<del> values []*types.SecretRequestOption
<add> values []*swarmtypes.SecretReference
<ide> }
<ide>
<ide> // Set a new secret value
<ide> func (o *SecretOpt) Set(value string) error {
<ide> return err
<ide> }
<ide>
<del> options := &types.SecretRequestOption{
<del> Source: "",
<del> Target: "",
<del> UID: "0",
<del> GID: "0",
<del> Mode: 0444,
<add> options := &swarmtypes.SecretReference{
<add> File: &swarmtypes.SecretReferenceFileTarget{
<add> UID: "0",
<add> GID: "0",
<add> Mode: 0444,
<add> },
<ide> }
<ide>
<ide> // support a simple syntax of --secret foo
<ide> if len(fields) == 1 {
<del> options.Source = fields[0]
<del> options.Target = fields[0]
<add> options.File.Name = fields[0]
<add> options.SecretName = fields[0]
<ide> o.values = append(o.values, options)
<ide> return nil
<ide> }
<ide> func (o *SecretOpt) Set(value string) error {
<ide> value := parts[1]
<ide> switch key {
<ide> case "source", "src":
<del> options.Source = value
<add> options.SecretName = value
<ide> case "target":
<ide> tDir, _ := filepath.Split(value)
<ide> if tDir != "" {
<ide> return fmt.Errorf("target must not be a path")
<ide> }
<del> options.Target = value
<add> options.File.Name = value
<ide> case "uid":
<del> options.UID = value
<add> options.File.UID = value
<ide> case "gid":
<del> options.GID = value
<add> options.File.GID = value
<ide> case "mode":
<ide> m, err := strconv.ParseUint(value, 0, 32)
<ide> if err != nil {
<ide> return fmt.Errorf("invalid mode specified: %v", err)
<ide> }
<ide>
<del> options.Mode = os.FileMode(m)
<add> options.File.Mode = os.FileMode(m)
<ide> default:
<del> if len(fields) == 1 && value == "" {
<del>
<del> } else {
<del> return fmt.Errorf("invalid field in secret request: %s", key)
<del> }
<add> return fmt.Errorf("invalid field in secret request: %s", key)
<ide> }
<ide> }
<ide>
<del> if options.Source == "" {
<add> if options.SecretName == "" {
<ide> return fmt.Errorf("source is required")
<ide> }
<ide>
<ide> func (o *SecretOpt) Type() string {
<ide> func (o *SecretOpt) String() string {
<ide> secrets := []string{}
<ide> for _, secret := range o.values {
<del> repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target)
<add> repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name)
<ide> secrets = append(secrets, repr)
<ide> }
<ide> return strings.Join(secrets, ", ")
<ide> }
<ide>
<ide> // Value returns the secret requests
<del>func (o *SecretOpt) Value() []*types.SecretRequestOption {
<add>func (o *SecretOpt) Value() []*swarmtypes.SecretReference {
<ide> return o.values
<ide> }
<ide><path>opts/secret_test.go
<ide> func TestSecretOptionsSimple(t *testing.T) {
<ide> reqs := opt.Value()
<ide> assert.Equal(t, len(reqs), 1)
<ide> req := reqs[0]
<del> assert.Equal(t, req.Source, "app-secret")
<del> assert.Equal(t, req.Target, "app-secret")
<del> assert.Equal(t, req.UID, "0")
<del> assert.Equal(t, req.GID, "0")
<add> assert.Equal(t, req.SecretName, "app-secret")
<add> assert.Equal(t, req.File.Name, "app-secret")
<add> assert.Equal(t, req.File.UID, "0")
<add> assert.Equal(t, req.File.GID, "0")
<ide> }
<ide>
<ide> func TestSecretOptionsSourceTarget(t *testing.T) {
<ide> func TestSecretOptionsSourceTarget(t *testing.T) {
<ide> reqs := opt.Value()
<ide> assert.Equal(t, len(reqs), 1)
<ide> req := reqs[0]
<del> assert.Equal(t, req.Source, "foo")
<del> assert.Equal(t, req.Target, "testing")
<add> assert.Equal(t, req.SecretName, "foo")
<add> assert.Equal(t, req.File.Name, "testing")
<ide> }
<ide>
<ide> func TestSecretOptionsShorthand(t *testing.T) {
<ide> func TestSecretOptionsShorthand(t *testing.T) {
<ide> reqs := opt.Value()
<ide> assert.Equal(t, len(reqs), 1)
<ide> req := reqs[0]
<del> assert.Equal(t, req.Source, "foo")
<add> assert.Equal(t, req.SecretName, "foo")
<ide> }
<ide>
<ide> func TestSecretOptionsCustomUidGid(t *testing.T) {
<ide> func TestSecretOptionsCustomUidGid(t *testing.T) {
<ide> reqs := opt.Value()
<ide> assert.Equal(t, len(reqs), 1)
<ide> req := reqs[0]
<del> assert.Equal(t, req.Source, "foo")
<del> assert.Equal(t, req.Target, "testing")
<del> assert.Equal(t, req.UID, "1000")
<del> assert.Equal(t, req.GID, "1001")
<add> assert.Equal(t, req.SecretName, "foo")
<add> assert.Equal(t, req.File.Name, "testing")
<add> assert.Equal(t, req.File.UID, "1000")
<add> assert.Equal(t, req.File.GID, "1001")
<ide> }
<ide>
<ide> func TestSecretOptionsCustomMode(t *testing.T) {
<ide> func TestSecretOptionsCustomMode(t *testing.T) {
<ide> reqs := opt.Value()
<ide> assert.Equal(t, len(reqs), 1)
<ide> req := reqs[0]
<del> assert.Equal(t, req.Source, "foo")
<del> assert.Equal(t, req.Target, "testing")
<del> assert.Equal(t, req.UID, "1000")
<del> assert.Equal(t, req.GID, "1001")
<del> assert.Equal(t, req.Mode, os.FileMode(0444))
<add> assert.Equal(t, req.SecretName, "foo")
<add> assert.Equal(t, req.File.Name, "testing")
<add> assert.Equal(t, req.File.UID, "1000")
<add> assert.Equal(t, req.File.GID, "1001")
<add> assert.Equal(t, req.File.Mode, os.FileMode(0444))
<ide> } | 5 |
Ruby | Ruby | fix todo time.xmlschema used instead of time.parse | 4fe6d01a3b3bd9fc8b3c443f4e8f8b486203bb08 | <ide><path>activesupport/lib/active_support/xml_mini.rb
<ide> def content_type
<ide> "yaml" => Proc.new { |yaml| yaml.to_yaml }
<ide> } unless defined?(FORMATTING)
<ide>
<del> # TODO: use Time.xmlschema instead of Time.parse;
<del> # use regexp instead of Date.parse
<add> # TODO use regexp instead of Date.parse
<ide> unless defined?(PARSING)
<ide> PARSING = {
<ide> "symbol" => Proc.new { |symbol| symbol.to_sym },
<ide> "date" => Proc.new { |date| ::Date.parse(date) },
<del> "datetime" => Proc.new { |time| ::Time.parse(time).utc rescue ::DateTime.parse(time).utc },
<add> "datetime" => Proc.new { |time| ::Time.xmlschema(time).utc rescue ::DateTime.parse(time).utc },
<ide> "integer" => Proc.new { |integer| integer.to_i },
<ide> "float" => Proc.new { |float| float.to_f },
<ide> "decimal" => Proc.new { |number| BigDecimal(number) }, | 1 |
Text | Text | clarify guide on testing internal errors | fc485a9cbe552902e2c1880210f83170d9d951aa | <ide><path>doc/contributing/writing-tests.md
<ide> assert.throws(
<ide> );
<ide> ```
<ide>
<add>In the case of internal errors, prefer checking only the `code` property:
<add>
<add>```js
<add>assert.throws(
<add> () => {
<add> throw new ERR_FS_FILE_TOO_LARGE(`${sizeKiB} Kb`);
<add> },
<add> { code: 'ERR_FS_FILE_TOO_LARGE' }
<add> // Do not include message: /^File size ([0-9]+ Kb) is greater than 2 GiB$/
<add>);
<add>```
<add>
<ide> ### Console output
<ide>
<ide> Output written by tests to stdout or stderr, such as with `console.log()` or | 1 |
Javascript | Javascript | move hmr to external transformer | 181896e4d95982aaf22d4c67fe0ef3158d2ecf37 | <ide><path>packager/react-packager/src/JSTransformer/__tests__/worker-test.js
<ide> describe('Resolver', function() {
<ide> });
<ide>
<ide> describe('when no external transform is provided', () => {
<del> it('should invoke internal transform if available', () => {
<add> xit('should invoke internal transform if available', () => {
<ide> transform({
<ide> sourceCode: 'code',
<ide> filename: 'test',
<ide> describe('Resolver', function() {
<ide> });
<ide>
<ide> describe('when external transform is provided', () => {
<del> it('should invoke both transformers if internal is available', () => {
<add> xit('should invoke both transformers if internal is available', () => {
<ide> transform({
<ide> sourceCode: code,
<ide> filename: 'test',
<ide> describe('Resolver', function() {
<ide> expect(babel.transform.mock.calls.length).toBe(1);
<ide> });
<ide>
<del> it('should pipe errors through transform pipeline', () => {
<add> xit('should pipe errors through transform pipeline', () => {
<ide> const error = new Error('transform error');
<ide> babel.transform.mockImpl((source, options) => {
<ide> throw error;
<ide><path>packager/react-packager/src/transforms/index.js
<ide> 'use strict';
<ide>
<ide> exports.getAll = function(options) {
<del> var plugins = [];
<del> if (options.hot) {
<del> plugins = plugins.concat([
<del> [
<del> 'react-transform',
<del> {
<del> transforms: [{
<del> transform: 'react-transform-hmr/lib/index.js',
<del> imports: ['React'],
<del> locals: ['module'],
<del> }]
<del> },
<del> ],
<del> 'transform-es2015-block-scoping',
<del> 'transform-es2015-constants',
<del> ['transform-es2015-modules-commonjs', {strict: false, allowTopLevelThis: true}],
<del> ]);
<del> }
<del>
<del> return plugins;
<add> return [];
<ide> };
<ide>
<ide><path>packager/transformer.js
<ide> function transform(src, filename, options) {
<ide> };
<ide>
<ide> const config = Object.assign({}, babelRC, extraConfig);
<add> if (options.hot) {
<add> extraPlugins.push([
<add> 'react-transform',
<add> {
<add> transforms: [{
<add> transform: 'react-transform-hmr/lib/index.js',
<add> imports: ['React'],
<add> locals: ['module'],
<add> }]
<add> },
<add> ]);
<add> }
<ide>
<ide> if (options.inlineRequires) {
<ide> extraPlugins.push(inlineRequires); | 3 |
Text | Text | update cloning details | a39c0dc49f632353cf19f688532ca58a5686c1cf | <ide><path>docs/Taps.md
<ide> dunn/emacs
<ide>
<ide> <!-- vale Homebrew.Terms = OFF -->
<ide> <!-- The `terms` lint suggests changing "repo" to "repository". But we need the abbreviation in the tap syntax and URL example. -->
<del>* `brew tap <user/repo>` makes a shallow clone of the repository at
<add>* `brew tap <user/repo>` makes a clone of the repository at
<ide> https://github.com/user/homebrew-repo. After that, `brew` will be able to work on
<ide> those formulae as if they were in Homebrew's canonical repository. You can
<ide> install and uninstall them with `brew [un]install`, and the formulae are
<ide> automatically updated when you run `brew update`. (See below for details
<ide> about how `brew tap` handles the names of repositories.)
<ide> <!-- vale Homebrew.Terms = ON -->
<ide>
<del>* `brew tap <user/repo> <URL>` makes a shallow clone of the repository at URL.
<add>* `brew tap <user/repo> <URL>` makes a clone of the repository at URL.
<ide> Unlike the one-argument version, URL is not assumed to be GitHub, and it
<ide> doesn't have to be HTTP. Any location and any protocol that Git can handle is
<ide> fine.
<ide>
<del>* Add `--full` to either the one- or two-argument invocations above to have Git
<del> make a complete clone rather than a shallow one. Full is the default for
<del> Homebrew developers.
<del>
<ide> * `brew tap --repair` migrates tapped formulae from a symlink-based to
<ide> directory-based structure. (This should only need to be run once.)
<ide> | 1 |
Ruby | Ruby | remove incorrect todo | 633501ab089f3c18fea0406f5324a9ebbec55be2 | <ide><path>Library/Homebrew/dev-cmd/audit.rb
<ide> def audit_download_strategy
<ide> url_strategy = DownloadStrategyDetector.detect(url)
<ide>
<ide> if using == :git || url_strategy == GitDownloadStrategy
<del> # TODO: check could be in RuboCop
<ide> problem "Git should specify :revision when a :tag is specified." if specs[:tag] && !specs[:revision]
<ide> end
<ide> | 1 |
Python | Python | remove old debug code leftover. | eac4aecc3df00f9fd3b0444a5d12d5fd0d6a04d8 | <ide><path>src/transformers/pipelines/automatic_speech_recognition.py
<ide> def postprocess(self, model_outputs):
<ide> logits = outputs["logits"].numpy()
<ide> stride = outputs.get("stride", None)
<ide> if stride is not None:
<del> try:
<del> total_n, left, right = stride
<del> except Exception:
<del> import ipdb
<del>
<del> ipdb.set_trace()
<add> total_n, left, right = stride
<ide> # Total_n might be < logits.shape[1]
<ide> # because of padding, that's why
<ide> # we need to reconstruct this information | 1 |
Go | Go | update secret create url for consistency | 86d768284303665bf137ac4766a623167a605c76 | <ide><path>api/server/router/swarm/cluster.go
<ide> func (sr *swarmRouter) initRoutes() {
<ide> router.NewGetRoute("/tasks", sr.getTasks),
<ide> router.NewGetRoute("/tasks/{id}", sr.getTask),
<ide> router.NewGetRoute("/secrets", sr.getSecrets),
<del> router.NewPostRoute("/secrets", sr.createSecret),
<add> router.NewPostRoute("/secrets/create", sr.createSecret),
<ide> router.NewDeleteRoute("/secrets/{id}", sr.removeSecret),
<ide> router.NewGetRoute("/secrets/{id}", sr.getSecret),
<ide> router.NewPostRoute("/secrets/{id}/update", sr.updateSecret),
<ide><path>client/secret_create.go
<ide> func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (t
<ide> var headers map[string][]string
<ide>
<ide> var response types.SecretCreateResponse
<del> resp, err := cli.post(ctx, "/secrets", nil, secret, headers)
<add> resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers)
<ide> if err != nil {
<ide> return response, err
<ide> }
<ide><path>client/secret_create_test.go
<ide> func TestSecretCreateError(t *testing.T) {
<ide> }
<ide>
<ide> func TestSecretCreate(t *testing.T) {
<del> expectedURL := "/secrets"
<add> expectedURL := "/secrets/create"
<ide> client := &Client{
<ide> client: newMockClient(func(req *http.Request) (*http.Response, error) {
<ide> if !strings.HasPrefix(req.URL.Path, expectedURL) {
<ide> func TestSecretCreate(t *testing.T) {
<ide> return nil, err
<ide> }
<ide> return &http.Response{
<del> StatusCode: http.StatusOK,
<add> StatusCode: http.StatusCreated,
<ide> Body: ioutil.NopCloser(bytes.NewReader(b)),
<ide> }, nil
<ide> }),
<ide><path>integration-cli/daemon_swarm.go
<ide> func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service {
<ide> }
<ide>
<ide> func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string {
<del> status, out, err := d.SockRequest("POST", "/secrets", secretSpec)
<add> status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec)
<ide>
<ide> c.Assert(err, checker.IsNil, check.Commentf(string(out)))
<ide> c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) | 4 |
Python | Python | fix examples in docstring | 72622926e59056b72cec8e95d4e45ee0927f20aa | <ide><path>pytorch_transformers/modeling_transfo_xl.py
<ide> class TransfoXLModel(TransfoXLPreTrainedModel):
<ide>
<ide> Examples::
<ide>
<del> >>> tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
<del> >>> model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
<del> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
<del> >>> outputs = model(input_ids)
<del> >>> last_hidden_states, mems = outputs[:2]
<add> tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
<add> model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
<add> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
<add> outputs = model(input_ids)
<add> last_hidden_states, mems = outputs[:2]
<ide>
<ide> """
<ide> def __init__(self, config):
<ide> class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
<ide>
<ide> Examples::
<ide>
<del> >>> tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
<del> >>> model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
<del> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
<del> >>> outputs = model(input_ids)
<del> >>> prediction_scores, mems = outputs[:2]
<add> tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
<add> model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
<add> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
<add> outputs = model(input_ids)
<add> prediction_scores, mems = outputs[:2]
<ide>
<ide> """
<ide> def __init__(self, config): | 1 |
Ruby | Ruby | add collectionproxy#find documentation | 952737af35be26ac4760efa3b06a0313ceae2c68 | <ide><path>activerecord/lib/active_record/associations/collection_proxy.rb
<ide> module Associations
<ide> class CollectionProxy < Relation
<ide> delegate :target, :load_target, :loaded?, :to => :@association
<ide>
<add> ##
<add> # :method: find
<add> # Finds an object in the collection responding to the +id+. Uses the same
<add> # rules as +ActiveRecord::Base.find+. Returns +ActiveRecord::RecordNotFound++
<add> # error if the object can not be found.
<add> #
<add> # class Person < ActiveRecord::Base
<add> # has_many :pets
<add> # end
<add> #
<add> # person.pets
<add> # # => [
<add> # # #<Pet id: 1, name: "Fancy-Fancy", person_id: 1>,
<add> # # #<Pet id: 2, name: "Spook", person_id: 1>,
<add> # # #<Pet id: 3, name: "Choo-Choo", person_id: 1>
<add> # # ]
<add> #
<add> # person.pets.find(1) # => #<Pet id: 1, name: "Fancy-Fancy", person_id: 1>
<add> # person.pets.find(4) # => ActiveRecord::RecordNotFound: Couldn't find Pet with id=4
<add>
<ide> ##
<ide> # :method: first
<ide> # Returns the first record, or the first +n+ records, from the collection. | 1 |
Go | Go | fix termcaps on the linux client | 50bee2f8114ce562d63b08665f7371aa5a568b2c | <ide><path>container.go
<ide> func (container *Container) Start() error {
<ide>
<ide> var err error
<ide> if container.Config.Tty {
<add> container.cmd.Env = append(container.Config.Env,
<add> "TERM="+os.Getenv("TERM"),
<add> )
<ide> err = container.startPty()
<ide> } else {
<ide> err = container.start()
<ide><path>term/termios_linux.go
<ide> package term
<ide>
<ide> import (
<del> "syscall"
<del> "unsafe"
<add> "os"
<add> "syscall"
<add> "unsafe"
<ide> )
<ide>
<add>// #include <termios.h>
<add>// #include <sys/ioctl.h>
<add>/*
<add>void MakeRaw() {
<add> struct termios t;
<add>
<add> // FIXME: Handle errors?
<add> ioctl(0, TCGETS, &t);
<add>
<add> t.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON);
<add> t.c_oflag &= ~OPOST;
<add> t.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
<add> t.c_cflag &= ~(CSIZE | PARENB);
<add> t.c_cflag |= CS8;
<add>
<add> ioctl(0, TCSETS, &t);
<add>}
<add>*/
<add>import "C"
<add>
<ide> const (
<ide> getTermios = syscall.TCGETS
<ide> setTermios = syscall.TCSETS
<ide> const (
<ide> // mode and returns the previous state of the terminal so that it can be
<ide> // restored.
<ide> func MakeRaw(fd int) (*State, error) {
<del> var oldState State
<del> if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
<del> return nil, err
<del> }
<del>
<del> newState := oldState.termios
<del> newState.Iflag &^= ISTRIP | IXON | IXOFF
<del> newState.Iflag |= ICRNL
<del> newState.Oflag |= ONLCR
<del> newState.Lflag &^= ECHO | ICANON | ISIG
<del> if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(setTermios), uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
<del> return nil, err
<del> }
<del>
<del> return &oldState, nil
<del>}
<ide>\ No newline at end of file
<add>
<add> fd = int(os.Stdin.Fd())
<add>
<add> var oldState State
<add> if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TCGETS, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
<add> return nil, err
<add> }
<add> C.MakeRaw()
<add> return &oldState, nil
<add>
<add> // FIXME: post on goland issues this: very same as the C function bug non-working
<add>
<add> // newState := oldState.termios
<add>
<add> // newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
<add> // newState.Oflag &^= OPOST
<add> // newState.Lflag &^= (ECHO | syscall.ECHONL | ICANON | ISIG | IEXTEN)
<add> // newState.Cflag &^= (CSIZE | syscall.PARENB)
<add> // newState.Cflag |= CS8
<add>
<add> // if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TCSETS, uintptr(unsafe.Pointer(&newState))); err != 0 {
<add> // return nil, err
<add> // }
<add> // return &oldState, nil
<add>} | 2 |
Javascript | Javascript | add article meta | 903e28ee6afcaa1b4fc1dc1ec0d9ac2f9c920b22 | <ide><path>news/routes/Featured/Featured.js
<ide> class Featured extends Component {
<ide> <div>
<ide> <Helmet>
<ide> <style>{styles}</style>
<add> <title>Featured | freeCodeCamp News</title>
<ide> </Helmet>
<ide> <ul className='featured-list'>{this.renderFeatured(featuredList)}</ul>
<ide> </div>
<ide><path>news/routes/Show/Show.js
<ide> class ShowArticle extends Component {
<ide> return <h2>Oh noes!! Something went wrong!</h2>;
<ide> }
<ide>
<add> // RegEx finds the first paragraph and groups the content
<add> const description = renderableContent.match(/<p>(.*?)<\/p>/)[1];
<add> const slug = this.props.location.pathname;
<ide> return (
<ide> <article className='show-article'>
<ide> <Helmet>
<ide> <style>{styles}</style>
<add> <title>{`${title} | freeCodeCamp News`}</title>
<add> <link
<add> href={`https://www.freecodecamp.org/news${slug}`}
<add> rel='canonical'
<add> />
<add> <meta
<add> content={`https://www.freecodecamp.org/news${slug}`}
<add> property='og:url'
<add> />
<add> <meta content={title} property='og:title' />
<add> <meta content={description} property='og:description' />
<add> <meta content={description} name='description' />
<add> <meta content={featureImage.src} property='og:image' />
<ide> </Helmet>
<ide> <Author article={currentArticle} />
<ide> <h2>{title}</h2> | 2 |
Text | Text | add a note about using an open schema model | c8f3151aa6a0b68674aa68061b723424278a54c5 | <ide><path>docs/sources/reference/api/docker_remote_api.md
<ide> page_keywords: API, Docker, rcli, REST, documentation
<ide> "serveraddress" : "string", "auth": ""}`. Notice that `auth` is to be left
<ide> empty, `serveraddress` is a domain/ip without protocol, and that double
<ide> quotes (instead of single ones) are required.
<add> - The Remote API uses an open schema model. In this model, unknown
<add> properties in incoming messages will be ignored.
<add> Client applications need to take this into account to ensure
<add> they will not break when talking to newer Docker daemons.
<ide>
<ide> The current version of the API is v1.15
<ide> | 1 |
Go | Go | fix compatibility with go1.16 | 303ea8e8206381fe34ef7716c7504e5d52b5df4e | <ide><path>pkg/plugins/discovery.go
<ide> package plugins // import "github.com/docker/docker/pkg/plugins"
<ide> import (
<ide> "encoding/json"
<ide> "fmt"
<del> "io/fs"
<ide> "net/url"
<ide> "os"
<ide> "path/filepath"
<ide> func Scan() ([]string, error) {
<ide> continue
<ide> }
<ide>
<del> entry = fs.FileInfoToDirEntry(fi)
<add> entry = fileInfoToDirEntry(fi)
<ide> }
<ide>
<ide> if entry.Type()&os.ModeSocket != 0 {
<ide><path>pkg/plugins/utils.go
<add>//go:build go1.17
<add>// +build go1.17
<add>
<add>package plugins
<add>
<add>import "io/fs"
<add>
<add>var fileInfoToDirEntry = fs.FileInfoToDirEntry
<ide><path>pkg/plugins/utils_go1.16.go
<add>//go:build !go1.17
<add>// +build !go1.17
<add>
<add>// This code is taken from https://github.com/golang/go/blob/go1.17/src/io/fs/readdir.go#L49-L77
<add>// and provides the io/fs.FileInfoToDirEntry() utility for go1.16. Go 1.16 and up
<add>// provide a new implementation of ioutil.ReadDir() (in os.ReadDir()) that returns
<add>// an os.DirEntry instead of fs.FileInfo. go1.17 added the io/fs.FileInfoToDirEntry()
<add>// utility to allow existing uses of ReadDir() to get the old type. This utility
<add>// is not available in go1.16, so we copied it to assist the migration to os.ReadDir().
<add>
<add>// Copyright 2020 The Go Authors. All rights reserved.
<add>// Use of this source code is governed by a BSD-style
<add>// license that can be found in the LICENSE file.
<add>
<add>package plugins
<add>
<add>import "os"
<add>
<add>// dirInfo is a DirEntry based on a FileInfo.
<add>type dirInfo struct {
<add> fileInfo os.FileInfo
<add>}
<add>
<add>func (di dirInfo) IsDir() bool {
<add> return di.fileInfo.IsDir()
<add>}
<add>
<add>func (di dirInfo) Type() os.FileMode {
<add> return di.fileInfo.Mode().Type()
<add>}
<add>
<add>func (di dirInfo) Info() (os.FileInfo, error) {
<add> return di.fileInfo, nil
<add>}
<add>
<add>func (di dirInfo) Name() string {
<add> return di.fileInfo.Name()
<add>}
<add>
<add>// fileInfoToDirEntry returns a DirEntry that returns information from info.
<add>// If info is nil, fileInfoToDirEntry returns nil.
<add>func fileInfoToDirEntry(info os.FileInfo) os.DirEntry {
<add> if info == nil {
<add> return nil
<add> }
<add> return dirInfo{fileInfo: info}
<add>} | 3 |
Text | Text | add section about how the articles are built | 245b5e2642b94c37ab7b1d64b4181ab9a455ab65 | <ide><path>docs/language-lead-handbook.md
<ide> The "Editor" level allows the user to access all Drafts and publish them. Select
<ide>
<ide> The "Administrator" level is reserved for freeCodeCamp staff and Language Leads.
<ide>
<add>### How are the articles built
<add>
<add>We use a [JAMStack](https://www.google.com/search?q=what+is+jamstack)-based approach to build and deploy the articles. This strategy makes for a speedy static site cached and served from a CDN.
<add>
<add>[Ghost](https://ghost.org) acts as our content management platform, and [11ty](https://11ty.dev) builds the articles into static assets – plain HTML, JavaScript, and CSS. Only these static assets are deployed to our servers.
<add>
<add>This process is automated and runs periodically. If you publish something now, it will be available on the news site in a few hours.
<add>
<add>You can find the up-to-date build schedules and status here: https://github.com/freeCodeCamp/news#build
<add>
<ide> ## How to mention the original author of a translated article
<ide>
<ide> The original author and the original article are linked automatically adding this code to the Code Injection -> head section in the Draft Settings on ghost. | 1 |
Python | Python | fix collation of best models | 2c703d99c264b8782441aef4bd1408acb1f6b6c9 | <ide><path>spacy/cli/train.py
<ide> def train(lang, output_dir, train_data, dev_data, n_iter=30, n_sents=0,
<ide> components.append('parser')
<ide> if not no_tagger:
<ide> components.append('tagger')
<del> if not no_entity:
<add> if not no_entities:
<ide> components.append('ner')
<ide> _collate_best_model(meta, output_path, components)
<ide> | 1 |
PHP | PHP | fix another failing test on postgres | 7145bd65746dd1a6f20b703f11278380e00bcd4b | <ide><path>lib/Cake/Test/Case/Controller/Component/PaginatorComponentTest.php
<ide> public function _findTotalsOperation($state, $query, $results = array()) {
<ide> $query['fields'] = array('author_id', 'Author.user');
<ide> $this->virtualFields['total_posts'] = "COUNT({$this->alias}.id)";
<ide> $query['fields'][] = 'total_posts';
<del> $query['group'] = array('author_id');
<add> $query['group'] = array('author_id', 'Author.user');
<ide> $query['order'] = array('author_id' => 'ASC');
<ide> return $query;
<ide> } | 1 |
Python | Python | add resnet56 short tests. | 2519f29bde45c395e5290063b5183b30323fc6d5 | <ide><path>official/resnet/keras/keras_benchmark.py
<add># Copyright 2018 The TensorFlow Authors. All Rights Reserved.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add># ==============================================================================
<add>"""Executes Keras benchmarks and accuracy tests."""
<add>
<add>from __future__ import absolute_import
<add>from __future__ import division
<add>from __future__ import print_function
<add>
<add>import os
<add>
<add>from absl import flags
<add>from absl.testing import flagsaver
<add>import tensorflow as tf # pylint: disable=g-bad-import-order
<add>
<add>FLAGS = flags.FLAGS
<add>
<add>
<add>class KerasBenchmark(object):
<add> """Base benchmark class with methods to simplify testing."""
<add> local_flags = None
<add>
<add> def __init__(self, output_dir=None, default_flags=None, flag_methods=None):
<add> self.oss_report_object = None
<add> self.output_dir = output_dir
<add> self.default_flags = default_flags or {}
<add> self.flag_methods = flag_methods or {}
<add>
<add> def _get_model_dir(self, folder_name):
<add> return os.path.join(self.output_dir, folder_name)
<add>
<add> def _setup(self):
<add> """Sets up and resets flags before each test."""
<add> tf.logging.set_verbosity(tf.logging.DEBUG)
<add> if KerasBenchmark.local_flags is None:
<add> for flag_method in self.flag_methods:
<add> flag_method()
<add> # Loads flags to get defaults to then override. List cannot be empty.
<add> flags.FLAGS(['foo'])
<add> # Overrides flag values with defaults for the class of tests.
<add> for k, v in self.default_flags.items():
<add> setattr(FLAGS, k, v)
<add> saved_flag_values = flagsaver.save_flag_values()
<add> KerasBenchmark.local_flags = saved_flag_values
<add> else:
<add> flagsaver.restore_flag_values(KerasBenchmark.local_flags)
<add>
<add> def fill_report_object(self, stats, top_1_max=None, top_1_min=None,
<add> log_steps=None, total_batch_size=None, warmup=1):
<add> """Fills report object to report results.
<add>
<add> Args:
<add> stats: dict returned from keras models with known entries.
<add> top_1_max: highest passing level for top_1 accuracy.
<add> top_1_min: lowest passing level for top_1 accuracy.
<add> log_steps: How often the log was created for stats['step_timestamp_log'].
<add> total_batch_size: Global batch-size.
<add> warmup: number of entries in stats['step_timestamp_log'] to ignore.
<add> """
<add> if self.oss_report_object:
<add>
<add> if 'accuracy_top_1' in stats:
<add> self.oss_report_object.add_top_1(stats['accuracy_top_1'],
<add> expected_min=top_1_min,
<add> expected_max=top_1_max)
<add> self.oss_report_object.add_other_quality(
<add> stats['training_accuracy_top_1'],
<add> 'top_1_train_accuracy')
<add> if (warmup and
<add> 'step_timestamp_log' in stats and
<add> len(stats['step_timestamp_log']) > warmup):
<add> # first entry in the time_log is start of step 1. The rest of the
<add> # entries are the end of each step recorded
<add> time_log = stats['step_timestamp_log']
<add> elapsed = time_log[-1].timestamp - time_log[warmup].timestamp
<add> num_examples = (total_batch_size * log_steps * (len(time_log)-warmup-1))
<add> examples_per_sec = num_examples / elapsed
<add> self.oss_report_object.add_examples_per_second(examples_per_sec)
<add>
<add> if 'avg_exp_per_second' in stats:
<add> self.oss_report_object.add_result(stats['avg_exp_per_second'],
<add> 'avg_exp_per_second',
<add> 'exp_per_second')
<add> else:
<add> raise ValueError('oss_report_object has not been set.')
<ide><path>official/resnet/keras/keras_cifar_benchmark.py
<ide> from __future__ import division
<ide> from __future__ import print_function
<ide>
<del>import os
<del>
<ide> from absl import flags
<del>from absl.testing import flagsaver
<del>import tensorflow as tf # pylint: disable=g-bad-import-order
<ide>
<ide> from official.resnet import cifar10_main as cifar_main
<add>from official.resnet.keras import keras_benchmark
<ide> from official.resnet.keras import keras_cifar_main
<ide> from official.resnet.keras import keras_common
<ide>
<del>
<ide> DATA_DIR = '/data/cifar10_data/cifar-10-batches-bin'
<ide> MIN_TOP_1_ACCURACY = 0.925
<ide> MAX_TOP_1_ACCURACY = 0.938
<ide>
<add>FLAGS = flags.FLAGS
<ide>
<del>class KerasCifar10BenchmarkTests(object):
<del> """Benchmarks and accuracy tests for KerasCifar10."""
<ide>
<del> local_flags = None
<add>class Resnet56KerasAccuracy(keras_benchmark.KerasBenchmark):
<add> """Accuracy tests for ResNet56 Keras CIFAR-10."""
<ide>
<ide> def __init__(self, output_dir=None):
<del> self.oss_report_object = None
<del> self.output_dir = output_dir
<add> flag_methods = [keras_common.define_keras_flags,
<add> cifar_main.define_cifar_flags]
<add>
<add> super(Resnet56KerasAccuracy, self).__init__(output_dir=output_dir,
<add> flag_methods=flag_methods)
<ide>
<del> def keras_resnet56_1_gpu(self):
<add> def benchmark_graph_1_gpu(self):
<ide> """Test keras based model with Keras fit and distribution strategies."""
<ide> self._setup()
<del> flags.FLAGS.num_gpus = 1
<del> flags.FLAGS.data_dir = DATA_DIR
<del> flags.FLAGS.batch_size = 128
<del> flags.FLAGS.train_epochs = 182
<del> flags.FLAGS.model_dir = self._get_model_dir('keras_resnet56_1_gpu')
<del> flags.FLAGS.dtype = 'fp32'
<del> stats = keras_cifar_main.run(flags.FLAGS)
<del> self._fill_report_object(stats)
<del>
<del> def keras_resnet56_eager_1_gpu(self):
<add> FLAGS.num_gpus = 1
<add> FLAGS.data_dir = DATA_DIR
<add> FLAGS.batch_size = 128
<add> FLAGS.train_epochs = 182
<add> FLAGS.model_dir = self._get_model_dir('keras_resnet56_1_gpu')
<add> FLAGS.dtype = 'fp32'
<add> stats = keras_cifar_main.run(FLAGS)
<add> self.fill_report_object(stats, FLAGS.batch_size)
<add>
<add> def benchmark_1_gpu(self):
<ide> """Test keras based model with eager and distribution strategies."""
<ide> self._setup()
<del> flags.FLAGS.num_gpus = 1
<del> flags.FLAGS.data_dir = DATA_DIR
<del> flags.FLAGS.batch_size = 128
<del> flags.FLAGS.train_epochs = 182
<del> flags.FLAGS.model_dir = self._get_model_dir('keras_resnet56_eager_1_gpu')
<del> flags.FLAGS.dtype = 'fp32'
<del> flags.FLAGS.enable_eager = True
<add> FLAGS.num_gpus = 1
<add> FLAGS.data_dir = DATA_DIR
<add> FLAGS.batch_size = 128
<add> FLAGS.train_epochs = 182
<add> FLAGS.model_dir = self._get_model_dir('keras_resnet56_eager_1_gpu')
<add> FLAGS.dtype = 'fp32'
<add> FLAGS.enable_eager = True
<ide> stats = keras_cifar_main.run(flags.FLAGS)
<del> self._fill_report_object(stats)
<add> self.fill_report_object(stats, FLAGS.batch_size)
<ide>
<del> def keras_resnet56_eager_2_gpu(self):
<add> def benchmark_2_gpu(self):
<ide> """Test keras based model with eager and distribution strategies."""
<ide> self._setup()
<del> flags.FLAGS.num_gpus = 2
<del> flags.FLAGS.data_dir = DATA_DIR
<del> flags.FLAGS.batch_size = 128
<del> flags.FLAGS.train_epochs = 182
<del> flags.FLAGS.model_dir = self._get_model_dir('keras_resnet56_eager_2_gpu')
<del> flags.FLAGS.dtype = 'fp32'
<del> flags.FLAGS.enable_eager = True
<del> stats = keras_cifar_main.run(flags.FLAGS)
<del> self._fill_report_object(stats)
<del>
<del> def keras_resnet56_2_gpu(self):
<add> FLAGS.num_gpus = 2
<add> FLAGS.data_dir = DATA_DIR
<add> FLAGS.batch_size = 128
<add> FLAGS.train_epochs = 182
<add> FLAGS.model_dir = self._get_model_dir('keras_resnet56_eager_2_gpu')
<add> FLAGS.dtype = 'fp32'
<add> FLAGS.enable_eager = True
<add> stats = keras_cifar_main.run(FLAGS)
<add> self.fill_report_object(stats, FLAGS.batch_size)
<add>
<add> def benchmark_graph_2_gpu(self):
<ide> """Test keras based model with Keras fit and distribution strategies."""
<ide> self._setup()
<del> flags.FLAGS.num_gpus = 2
<del> flags.FLAGS.data_dir = DATA_DIR
<del> flags.FLAGS.data_dir = self._get_model_dir('keras_resnet56_2_gpu')
<del> flags.FLAGS.batch_size = 128
<del> flags.FLAGS.train_epochs = 182
<del> flags.FLAGS.model_dir = ''
<del> flags.FLAGS.dtype = 'fp32'
<del> stats = keras_cifar_main.run(flags.FLAGS)
<del> self._fill_report_object(stats)
<del>
<del> def keras_resnet56_no_dist_strat_1_gpu(self):
<add> FLAGS.num_gpus = 2
<add> FLAGS.data_dir = DATA_DIR
<add> FLAGS.batch_size = 128
<add> FLAGS.train_epochs = 182
<add> FLAGS.model_dir = self._get_model_dir('keras_resnet56_2_gpu')
<add> FLAGS.dtype = 'fp32'
<add> stats = keras_cifar_main.run(FLAGS)
<add> self.fill_report_object(stats, FLAGS.batch_size)
<add>
<add> def benchmark_graph_1_gpu_no_dist_strat(self):
<ide> """Test keras based model with Keras fit but not distribution strategies."""
<ide> self._setup()
<del> flags.FLAGS.turn_off_distribution_strategy = True
<del> flags.FLAGS.num_gpus = 1
<del> flags.FLAGS.data_dir = DATA_DIR
<del> flags.FLAGS.batch_size = 128
<del> flags.FLAGS.train_epochs = 182
<del> flags.FLAGS.model_dir = self._get_model_dir(
<add> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.num_gpus = 1
<add> FLAGS.data_dir = DATA_DIR
<add> FLAGS.batch_size = 128
<add> FLAGS.train_epochs = 182
<add> FLAGS.model_dir = self._get_model_dir(
<ide> 'keras_resnet56_no_dist_strat_1_gpu')
<del> flags.FLAGS.dtype = 'fp32'
<del> stats = keras_cifar_main.run(flags.FLAGS)
<del> self._fill_report_object(stats)
<del>
<del> def _fill_report_object(self, stats):
<del> if self.oss_report_object:
<del> self.oss_report_object.add_top_1(stats['accuracy_top_1'],
<del> expected_min=MIN_TOP_1_ACCURACY,
<del> expected_max=MAX_TOP_1_ACCURACY)
<del> self.oss_report_object.add_other_quality(stats['training_accuracy_top_1'],
<del> 'top_1_train_accuracy')
<del> else:
<del> raise ValueError('oss_report_object has not been set.')
<del>
<del> def _get_model_dir(self, folder_name):
<del> return os.path.join(self.output_dir, folder_name)
<del>
<del> def _setup(self):
<del> """Setups up and resets flags before each test."""
<del> tf.logging.set_verbosity(tf.logging.DEBUG)
<del> if KerasCifar10BenchmarkTests.local_flags is None:
<del> keras_common.define_keras_flags()
<del> cifar_main.define_cifar_flags()
<del> # Loads flags to get defaults to then override. List cannot be empty.
<del> flags.FLAGS(['foo'])
<del> saved_flag_values = flagsaver.save_flag_values()
<del> KerasCifar10BenchmarkTests.local_flags = saved_flag_values
<del> return
<del> flagsaver.restore_flag_values(KerasCifar10BenchmarkTests.local_flags)
<add> FLAGS.dtype = 'fp32'
<add> stats = keras_cifar_main.run(FLAGS)
<add> self.fill_report_object(stats, FLAGS.batch_size)
<add>
<add> def fill_report_object(self, stats, total_batch_size):
<add> super(Resnet56KerasAccuracy, self).fill_report_object(
<add> stats,
<add> top_1_min=MIN_TOP_1_ACCURACY,
<add> top_1_max=MAX_TOP_1_ACCURACY,
<add> total_batch_size=total_batch_size,
<add> log_steps=100)
<add>
<add>
<add>class Resnet56KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
<add> """Short performance tests for ResNet56 via Keras and CIFAR-10."""
<add>
<add> def __init__(self, output_dir=None, default_flags=None):
<add> flag_methods = [keras_common.define_keras_flags,
<add> cifar_main.define_cifar_flags]
<add>
<add> super(Resnet56KerasBenchmarkBase, self).__init__(
<add> output_dir=output_dir,
<add> flag_methods=flag_methods,
<add> default_flags=default_flags)
<add>
<add> def _run_benchmark(self):
<add> stats = keras_cifar_main.run(FLAGS)
<add> self.fill_report_object(stats)
<add>
<add> def benchmark_1_gpu_no_dist_strat(self):
<add> self._setup()
<add> FLAGS.num_gpus = 1
<add> FLAGS.enable_eager = True
<add> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.batch_size = 128
<add>
<add> self._run_benchmark()
<add>
<add> def benchmark_graph_1_gpu_no_dist_strat(self):
<add> self._setup()
<add> FLAGS.num_gpus = 1
<add> FLAGS.enable_eager = False
<add> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.batch_size = 128
<add>
<add> self._run_benchmark()
<add>
<add> def benchmark_1_gpu(self):
<add> self._setup()
<add> FLAGS.num_gpus = 1
<add> FLAGS.enable_eager = True
<add> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.batch_size = 128
<add>
<add> self._run_benchmark()
<add>
<add> def benchmark_graph_1_gpu(self):
<add> self._setup()
<add> FLAGS.num_gpus = 1
<add> FLAGS.enable_eager = False
<add> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.batch_size = 128
<add>
<add> self._run_benchmark()
<add>
<add> def benchmark_2_gpu(self):
<add> self._setup()
<add> FLAGS.num_gpus = 2
<add> FLAGS.enable_eager = True
<add> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.batch_size = 128 * 2 # 2 GPUs
<add>
<add> self._run_benchmark()
<add>
<add> def benchmark_graph_2_gpu(self):
<add> self._setup()
<add> FLAGS.num_gpus = 2
<add> FLAGS.enable_eager = False
<add> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.batch_size = 128 * 2 # 2 GPUs
<add>
<add> self._run_benchmark()
<add>
<add> def fill_report_object(self, stats):
<add> super(Resnet56KerasBenchmarkBase, self).fill_report_object(
<add> stats,
<add> total_batch_size=FLAGS.batch_size,
<add> log_steps=FLAGS.log_steps)
<add>
<add>
<add>class Resnet56KerasBenchmarkSynth(Resnet56KerasBenchmarkBase):
<add> """Synthetic benchmarks for ResNet56 and Keras."""
<add>
<add> def __init__(self, output_dir=None):
<add> def_flags = {}
<add> def_flags['skip_eval'] = True
<add> def_flags['use_synthetic_data'] = True
<add> def_flags['train_steps'] = 110
<add> def_flags['log_steps'] = 10
<add>
<add> super(Resnet56KerasBenchmarkSynth, self).__init__(output_dir=output_dir,
<add> default_flags=def_flags)
<add>
<add>
<add>class Resnet56KerasBenchmarkReal(Resnet56KerasBenchmarkBase):
<add> """Real data benchmarks for ResNet56 and Keras."""
<add>
<add> def __init__(self, output_dir=None):
<add> def_flags = {}
<add> def_flags['skip_eval'] = True
<add> def_flags['data_dir'] = DATA_DIR
<add> def_flags['train_steps'] = 110
<add> def_flags['log_steps'] = 10
<add>
<add> super(Resnet56KerasBenchmarkReal, self).__init__(output_dir=output_dir,
<add> default_flags=def_flags) | 2 |
Go | Go | update memory limit for container | 9b755412ab03816f0b7d6013618c65ae68781bcb | <ide><path>daemon/create.go
<ide> func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
<ide> return job.Errorf("Usage: %s", job.Name)
<ide> }
<ide> config := runconfig.ContainerConfigFromJob(job)
<del> if config.Memory != 0 && config.Memory < 524288 {
<del> return job.Errorf("Minimum memory limit allowed is 512k")
<add> if config.Memory != 0 && config.Memory < 4194304 {
<add> return job.Errorf("Minimum memory limit allowed is 4MB")
<ide> }
<ide> if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
<ide> job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
<ide><path>integration-cli/docker_cli_run_test.go
<ide> func TestRunEchoStdout(t *testing.T) {
<ide>
<ide> // "test" should be printed
<ide> func TestRunEchoStdoutWithMemoryLimit(t *testing.T) {
<del> runCmd := exec.Command(dockerBinary, "run", "-m", "2786432", "busybox", "echo", "test")
<add> runCmd := exec.Command(dockerBinary, "run", "-m", "4m", "busybox", "echo", "test")
<ide> out, _, _, err := runCommandWithStdoutStderr(runCmd)
<ide> if err != nil {
<ide> t.Fatalf("failed to run container: %v, output: %q", err, out)
<ide> func TestRunEchoStdoutWitCPULimit(t *testing.T) {
<ide>
<ide> // "test" should be printed
<ide> func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) {
<del> runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "2786432", "busybox", "echo", "test")
<add> runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "4m", "busybox", "echo", "test")
<ide> out, _, _, err := runCommandWithStdoutStderr(runCmd)
<ide> if err != nil {
<ide> t.Fatalf("failed to run container: %v, output: %q", err, out) | 2 |
Python | Python | replace tabs with spaces | 25526d1a26f4b7ac245ecedd16c82cfd7d67f741 | <ide><path>numpy/core/memmap.py
<ide> def sync(self):
<ide> self._mmap.flush()
<ide>
<ide> def close(self):
<del> self._mmap.close()
<add> self._mmap.close()
<ide>
<ide> def __del__(self):
<ide> if self._mmap is not None:
<ide><path>numpy/lib/tests/test_function_base.py
<ide> def check_large(self):
<ide>
<ide> class test_digitize(NumpyTestCase):
<ide> def check_forward(self):
<del> x = arange(-6,5)
<del> bins = arange(-5,5)
<del> assert_array_equal(digitize(x,bins),arange(11))
<add> x = arange(-6,5)
<add> bins = arange(-5,5)
<add> assert_array_equal(digitize(x,bins),arange(11))
<ide>
<ide> def check_reverse(self):
<del> x = arange(5,-6,-1)
<del> bins = arange(5,-5,-1)
<del> assert_array_equal(digitize(x,bins),arange(11))
<add> x = arange(5,-6,-1)
<add> bins = arange(5,-5,-1)
<add> assert_array_equal(digitize(x,bins),arange(11))
<ide>
<ide> def check_random(self):
<del> x = rand(10)
<del> bin = linspace(x.min(), x.max(), 10)
<del> assert all(digitize(x,bin) != 0)
<add> x = rand(10)
<add> bin = linspace(x.min(), x.max(), 10)
<add> assert all(digitize(x,bin) != 0)
<ide>
<ide> class test_unwrap(NumpyTestCase):
<ide> def check_simple(self): | 2 |
PHP | PHP | remove unneeded import | 334f1580e21439808567f6ecb200adf3a3670fb0 | <ide><path>src/Illuminate/Foundation/Bus/DispatchesJobs.php
<ide>
<ide> namespace Illuminate\Foundation\Bus;
<ide>
<del>use ArrayAccess;
<ide> use Illuminate\Contracts\Bus\Dispatcher;
<ide>
<ide> trait DispatchesJobs | 1 |
Go | Go | add build prefix to copy tests | 184fe67bbc3759307adb6cb3e5338b5325bb88bb | <ide><path>integration-cli/docker_cli_build_test.go
<ide> func TestAddEtcToRoot(t *testing.T) {
<ide> logDone("build - add etc directory to root")
<ide> }
<ide>
<del>func TestCopySingleFileToRoot(t *testing.T) {
<add>func TestBuildCopySingleFileToRoot(t *testing.T) {
<ide> testDirName := "SingleFileToRoot"
<ide> sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName)
<ide> buildDirectory, err := ioutil.TempDir("", "test-build-add")
<ide> func TestCopySingleFileToRoot(t *testing.T) {
<ide> }
<ide>
<ide> // Issue #3960: "ADD src ." hangs - adapted for COPY
<del>func TestCopySingleFileToWorkdir(t *testing.T) {
<add>func TestBuildCopySingleFileToWorkdir(t *testing.T) {
<ide> testDirName := "SingleFileToWorkdir"
<ide> sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName)
<ide> buildDirectory, err := ioutil.TempDir("", "test-build-add")
<ide> func TestCopySingleFileToWorkdir(t *testing.T) {
<ide> logDone("build - copy single file to workdir")
<ide> }
<ide>
<del>func TestCopySingleFileToExistDir(t *testing.T) {
<add>func TestBuildCopySingleFileToExistDir(t *testing.T) {
<ide> buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
<ide> out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir")
<ide> errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
<ide> func TestCopySingleFileToExistDir(t *testing.T) {
<ide> logDone("build - copy single file to existing dir")
<ide> }
<ide>
<del>func TestCopySingleFileToNonExistDir(t *testing.T) {
<add>func TestBuildCopySingleFileToNonExistDir(t *testing.T) {
<ide> buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
<ide> out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir")
<ide> errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
<ide> func TestCopySingleFileToNonExistDir(t *testing.T) {
<ide> logDone("build - copy single file to non-existing dir")
<ide> }
<ide>
<del>func TestCopyDirContentToRoot(t *testing.T) {
<add>func TestBuildCopyDirContentToRoot(t *testing.T) {
<ide> buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
<ide> out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot")
<ide> errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
<ide> func TestCopyDirContentToRoot(t *testing.T) {
<ide> logDone("build - copy directory contents to root")
<ide> }
<ide>
<del>func TestCopyDirContentToExistDir(t *testing.T) {
<add>func TestBuildCopyDirContentToExistDir(t *testing.T) {
<ide> buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
<ide> out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir")
<ide> errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
<ide> func TestCopyDirContentToExistDir(t *testing.T) {
<ide> logDone("build - copy directory contents to existing dir")
<ide> }
<ide>
<del>func TestCopyWholeDirToRoot(t *testing.T) {
<add>func TestBuildCopyWholeDirToRoot(t *testing.T) {
<ide> testDirName := "WholeDirToRoot"
<ide> sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName)
<ide> buildDirectory, err := ioutil.TempDir("", "test-build-add")
<ide> func TestCopyWholeDirToRoot(t *testing.T) {
<ide> logDone("build - copy whole directory to root")
<ide> }
<ide>
<del>func TestCopyEtcToRoot(t *testing.T) {
<add>func TestBuildCopyEtcToRoot(t *testing.T) {
<ide> buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
<ide> out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot")
<ide> errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
<ide> func TestCopyEtcToRoot(t *testing.T) {
<ide> logDone("build - copy etc directory to root")
<ide> }
<ide>
<del>func TestCopyDisallowRemote(t *testing.T) {
<add>func TestBuildCopyDisallowRemote(t *testing.T) {
<ide> buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
<ide> buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DisallowRemote")
<ide> buildCmd.Dir = buildDirectory | 1 |
Python | Python | add example mnist_net2net.py | 8d9cb782fb73e5d08dd75f29a1158e68df0e17fc | <ide><path>examples/mnist_net2net.py
<add>'''This is an implementation of Net2Net experiment with MNIST in
<add>'Net2Net: Accelerating Learning via Knowledge Transfer'
<add>by Tianqi Chen, Ian Goodfellow, and Jonathon Shlens
<add>
<add>arXiv:1511.05641v4 [cs.LG] 23 Apr 2016
<add>http://arxiv.org/abs/1511.05641
<add>
<add>Notes
<add>- What:
<add> + Net2Net is a group of methods to transfer knowledge from a teacher neural
<add> net to a student net,so that the student net can be trained faster than
<add> from scratch.
<add> + The paper discussed two specific methods of Net2Net, i.e. Net2WiderNet
<add> and Net2DeeperNet.
<add> + Net2WiderNet replaces a model with an equivalent wider model that has
<add> more units in each hidden layer.
<add> + Net2DeeperNet replaces a model with an equivalent deeper model.
<add> + Both are based on the idea of 'function-preserving transformations of
<add> neural nets'.
<add>- Why:
<add> + Enable fast exploration of multiple neural nets in experimentation and
<add> design process,by creating a series of wider and deeper models with
<add> transferable knowledge.
<add> + Enable 'lifelong learning system' by gradually adjusting model complexity
<add> to data availability,and reusing transferable knowledge.
<add>
<add>Experiments
<add>- Teacher model: a basic CNN model trained on MNIST for 3 epochs.
<add>- Net2WiderNet exepriment:
<add> + Student model has a wider Conv2D layer and a wider FC layer.
<add> + Comparison of 'random-padding' vs 'net2wider' weight initialization.
<add> + With both methods, student model should immediately perform as well as
<add> teacher model, but 'net2wider' is slightly better.
<add>- Net2DeeperNet experiment:
<add> + Student model has an extra Conv2D layer and an extra FC layer.
<add> + Comparison of 'random-init' vs 'net2deeper' weight initialization.
<add> + Starting performance of 'net2deeper' is better than 'random-init'.
<add>- Hyper-parameters:
<add> + SGD with momentum=0.9 is used for training teacher and student models.
<add> + Learning rate adjustment: it's suggested to reduce learning rate
<add> to 1/10 for student model.
<add> + Addition of noise in 'net2wider' is used to break weight symmetry
<add> and thus enable full capacity of student models. It is optional
<add> when a Dropout layer is used.
<add>
<add>Results
<add>- Tested with 'Theano' backend and 'th' image_dim_ordering.
<add>- Running on GPU GeForce GTX 980M
<add>- Performance Comparisons - validation loss values during first 3 epochs:
<add>(1) teacher_model: 0.075 0.041 0.041
<add>(2) wider_random_pad: 0.036 0.034 0.032
<add>(3) wider_net2wider: 0.032 0.030 0.030
<add>(4) deeper_random_init: 0.061 0.043 0.041
<add>(5) deeper_net2deeper: 0.032 0.031 0.029
<add>'''
<add>
<add>from __future__ import print_function
<add>import numpy as np
<add>np.random.seed(1337)
<add>
<add>from keras.models import Sequential
<add>from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
<add>from keras.optimizers import SGD
<add>from keras.utils import np_utils
<add>from keras.datasets import mnist
<add>
<add>input_shape = (1, 28, 28) # image shape
<add>nb_class = 10 # number of class
<add>
<add>
<add># load and pre-process data
<add>def preprocess_input(x):
<add> return x.reshape((-1, ) + input_shape) / 255.
<add>
<add>
<add>def preprocess_output(y):
<add> return np_utils.to_categorical(y)
<add>
<add>(train_x, train_y), (validation_x, validation_y) = mnist.load_data()
<add>train_x, validation_x = map(preprocess_input, [train_x, validation_x])
<add>train_y, validation_y = map(preprocess_output, [train_y, validation_y])
<add>print('Loading MNIST data...')
<add>print('train_x shape:', train_x.shape, 'train_y shape:', train_y.shape)
<add>print('validation_x shape:', validation_x.shape,
<add> 'validation_y shape', validation_y.shape)
<add>
<add>
<add># knowledge transfer algorithms
<add>def wider2net_conv2d(teacher_w1, teacher_b1, teacher_w2, new_width, init):
<add> '''Get initial weights for a wider conv2d layer with a bigger nb_filter,
<add> by 'random-padding' or 'net2wider'.
<add>
<add> # Arguments
<add> teacher_w1: `weight` of conv2d layer to become wider,
<add> of shape (nb_filter1, nb_channel1, kh1, kw1)
<add> teacher_b1: `bias` of conv2d layer to become wider,
<add> of shape (nb_filter1, )
<add> teacher_w2: `weight` of next connected conv2d layer,
<add> of shape (nb_filter2, nb_channel2, kh2, kw2)
<add> new_width: new `nb_filter` for the wider conv2d layer
<add> init: initialization algorithm for new weights,
<add> either 'random-pad' or 'net2wider'
<add> '''
<add> assert teacher_w1.shape[0] == teacher_w2.shape[1], (
<add> 'successive layers from teacher model should have compatible shapes')
<add> assert teacher_w1.shape[0] == teacher_b1.shape[0], (
<add> 'weight and bias from same layer should have compatible shapes')
<add> assert new_width > teacher_w1.shape[0], (
<add> 'new width (nb_filter) should be bigger than the existing one')
<add>
<add> n = new_width - teacher_w1.shape[0]
<add> if init == 'random-pad':
<add> new_w1 = np.random.normal(0, 0.1, size=(n, ) + teacher_w1.shape[1:])
<add> new_b1 = np.ones(n) * 0.1
<add> new_w2 = np.random.normal(0, 0.1, size=(
<add> teacher_w2.shape[0], n) + teacher_w2.shape[2:])
<add> elif init == 'net2wider':
<add> index = np.random.randint(teacher_w1.shape[0], size=n)
<add> factors = np.bincount(index)[index] + 1.
<add> new_w1 = teacher_w1[index, :, :, :]
<add> new_b1 = teacher_b1[index]
<add> new_w2 = teacher_w2[:, index, :, :] / factors.reshape((1, -1, 1, 1))
<add> else:
<add> raise ValueError('Unsupported weight initializer: %s' % init)
<add>
<add> student_w1 = np.concatenate((teacher_w1, new_w1), axis=0)
<add> if init == 'random-pad':
<add> student_w2 = np.concatenate((teacher_w2, new_w2), axis=1)
<add> elif init == 'net2wider':
<add> # add small noise to break symmetry, so that student model will have
<add> # full capacity later
<add> noise = np.random.normal(0, 5e-2 * new_w2.std(), size=new_w2.shape)
<add> student_w2 = np.concatenate((teacher_w2, new_w2 + noise), axis=1)
<add> student_w2[:, index, :, :] = new_w2
<add> student_b1 = np.concatenate((teacher_b1, new_b1), axis=0)
<add>
<add> return student_w1, student_b1, student_w2
<add>
<add>
<add>def wider2net_fc(teacher_w1, teacher_b1, teacher_w2, new_width, init):
<add> '''Get initial weights for a wider fully connected (dense) layer
<add> with a bigger nout, by 'random-padding' or 'net2wider'.
<add>
<add> # Arguments
<add> teacher_w1: `weight` of fc layer to become wider,
<add> of shape (nin1, nout1)
<add> teacher_b1: `bias` of fc layer to become wider,
<add> of shape (nout1, )
<add> teacher_w2: `weight` of next connected fc layer,
<add> of shape (nin2, nout2)
<add> new_width: new `nout` for the wider fc layer
<add> init: initialization algorithm for new weights,
<add> either 'random-pad' or 'net2wider'
<add> '''
<add> assert teacher_w1.shape[1] == teacher_w2.shape[0], (
<add> 'successive layers from teacher model should have compatible shapes')
<add> assert teacher_w1.shape[1] == teacher_b1.shape[0], (
<add> 'weight and bias from same layer should have compatible shapes')
<add> assert new_width > teacher_w1.shape[1], (
<add> 'new width (nout) should be bigger than the existing one')
<add>
<add> n = new_width - teacher_w1.shape[1]
<add> if init == 'random-pad':
<add> new_w1 = np.random.normal(0, 0.1, size=(teacher_w1.shape[0], n))
<add> new_b1 = np.ones(n) * 0.1
<add> new_w2 = np.random.normal(0, 0.1, size=(n, teacher_w2.shape[1]))
<add> elif init == 'net2wider':
<add> index = np.random.randint(teacher_w1.shape[1], size=n)
<add> factors = np.bincount(index)[index] + 1.
<add> new_w1 = teacher_w1[:, index]
<add> new_b1 = teacher_b1[index]
<add> new_w2 = teacher_w2[index, :] / factors[:, np.newaxis]
<add> else:
<add> raise ValueError('Unsupported weight initializer: %s' % init)
<add>
<add> student_w1 = np.concatenate((teacher_w1, new_w1), axis=1)
<add> if init == 'random-pad':
<add> student_w2 = np.concatenate((teacher_w2, new_w2), axis=0)
<add> elif init == 'net2wider':
<add> # add small noise to break symmetry, so that student model will have
<add> # full capacity later
<add> noise = np.random.normal(0, 5e-2 * new_w2.std(), size=new_w2.shape)
<add> student_w2 = np.concatenate((teacher_w2, new_w2 + noise), axis=0)
<add> student_w2[index, :] = new_w2
<add> student_b1 = np.concatenate((teacher_b1, new_b1), axis=0)
<add>
<add> return student_w1, student_b1, student_w2
<add>
<add>
<add>def deeper2net_conv2d(teacher_w):
<add> '''Get initial weights for a deeper conv2d layer by net2deeper'.
<add>
<add> # Arguments
<add> teacher_w: `weight` of previous conv2d layer,
<add> of shape (nb_filter, nb_channel, kh, kw)
<add> '''
<add> nb_filter, nb_channel, kh, kw = teacher_w.shape
<add> student_w = np.zeros((nb_filter, nb_filter, kh, kw))
<add> for i in xrange(nb_filter):
<add> student_w[i, i, (kh - 1) / 2, (kw - 1) / 2] = 1.
<add> student_b = np.zeros(nb_filter)
<add> return student_w, student_b
<add>
<add>
<add>def copy_weights(teacher_model, student_model, layer_names):
<add> '''Copy weights from teacher_model to student_model,
<add> for layers with names listed in layer_names
<add> '''
<add> for name in layer_names:
<add> weights = teacher_model.get_layer(name=name).get_weights()
<add> student_model.get_layer(name=name).set_weights(weights)
<add>
<add>
<add># methods to construct teacher_model and student_models
<add>def make_teacher_model(train_data, validation_data, nb_epoch=3):
<add> '''Train a simple CNN as teacher model.
<add> '''
<add> model = Sequential()
<add> model.add(Conv2D(64, 3, 3, input_shape=input_shape,
<add> border_mode='same', name='conv1'))
<add> model.add(MaxPooling2D(name='pool1'))
<add> model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
<add> model.add(MaxPooling2D(name='pool2'))
<add> model.add(Flatten(name='flatten'))
<add> model.add(Dense(64, activation='relu', name='fc1'))
<add> model.add(Dense(nb_class, activation='softmax', name='fc2'))
<add> model.compile(loss='categorical_crossentropy',
<add> optimizer=SGD(lr=0.01, momentum=0.9),
<add> metrics=['accuracy'])
<add>
<add> train_x, train_y = train_data
<add> history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
<add> validation_data=validation_data)
<add> return model, history
<add>
<add>
<add>def make_wider_student_model(teacher_model, train_data,
<add> validation_data, init, nb_epoch=3):
<add> '''Train a wider student model based on teacher_model,
<add> with either 'random-pad' (baseline) or 'net2wider'
<add> '''
<add> new_conv1_width = 128
<add> new_fc1_width = 128
<add>
<add> model = Sequential()
<add> # a wider conv1 compared to teacher_model
<add> model.add(Conv2D(new_conv1_width, 3, 3, input_shape=input_shape,
<add> border_mode='same', name='conv1'))
<add> model.add(MaxPooling2D(name='pool1'))
<add> model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
<add> model.add(MaxPooling2D(name='pool2'))
<add> model.add(Flatten(name='flatten'))
<add> # a wider fc1 compared to teacher model
<add> model.add(Dense(new_fc1_width, activation='relu', name='fc1'))
<add> model.add(Dense(nb_class, activation='softmax', name='fc2'))
<add>
<add> # The weights for other layers need to be copied from teacher_model
<add> # to student_model, except for widened layers
<add> # and their immediate downstreams, which will be initialized separately.
<add> # For this example there are no other layers that need to be copied.
<add>
<add> w_conv1, b_conv1 = teacher_model.get_layer('conv1').get_weights()
<add> w_conv2, b_conv2 = teacher_model.get_layer('conv2').get_weights()
<add> new_w_conv1, new_b_conv1, new_w_conv2 = wider2net_conv2d(
<add> w_conv1, b_conv1, w_conv2, new_conv1_width, init)
<add> model.get_layer('conv1').set_weights([new_w_conv1, new_b_conv1])
<add> model.get_layer('conv2').set_weights([new_w_conv2, b_conv2])
<add>
<add> w_fc1, b_fc1 = teacher_model.get_layer('fc1').get_weights()
<add> w_fc2, b_fc2 = teacher_model.get_layer('fc2').get_weights()
<add> new_w_fc1, new_b_fc1, new_w_fc2 = wider2net_fc(
<add> w_fc1, b_fc1, w_fc2, new_fc1_width, init)
<add> model.get_layer('fc1').set_weights([new_w_fc1, new_b_fc1])
<add> model.get_layer('fc2').set_weights([new_w_fc2, b_fc2])
<add>
<add> model.compile(loss='categorical_crossentropy',
<add> optimizer=SGD(lr=0.001, momentum=0.9),
<add> metrics=['accuracy'])
<add>
<add> train_x, train_y = train_data
<add> history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
<add> validation_data=validation_data)
<add> return model, history
<add>
<add>
<add>def make_deeper_student_model(teacher_model, train_data,
<add> validation_data, init, nb_epoch=3):
<add> '''Train a deeper student model based on teacher_model,
<add> with either 'random-init' (baseline) or 'net2deeper'
<add> '''
<add> model = Sequential()
<add> model.add(Conv2D(64, 3, 3, input_shape=input_shape,
<add> border_mode='same', name='conv1'))
<add> model.add(MaxPooling2D(name='pool1'))
<add> model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
<add> # add another conv2d layer to make original conv2 deeper
<add> if init == 'net2deeper':
<add> prev_w, _ = model.get_layer('conv2').get_weights()
<add> new_weights = deeper2net_conv2d(prev_w)
<add> model.add(Conv2D(64, 3, 3, border_mode='same',
<add> name='conv2-deeper', weights=new_weights))
<add> elif init == 'random-init':
<add> model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2-deeper'))
<add> else:
<add> raise ValueError('Unsupported weight initializer: %s' % init)
<add> model.add(MaxPooling2D(name='pool2'))
<add> model.add(Flatten(name='flatten'))
<add> model.add(Dense(64, activation='relu', name='fc1'))
<add> # add another fc layer to make original fc1 deeper
<add> if init == 'net2deeper':
<add> # net2deeper for fc layer with relu, is just an identity initializer
<add> model.add(Dense(64, init='identity',
<add> activation='relu', name='fc1-deeper'))
<add> elif init == 'random-init':
<add> model.add(Dense(64, activation='relu', name='fc1-deeper'))
<add> else:
<add> raise ValueError('Unsupported weight initializer: %s' % init)
<add> model.add(Dense(nb_class, activation='softmax', name='fc2'))
<add>
<add> # copy weights for other layers
<add> copy_weights(teacher_model, model, layer_names=[
<add> 'conv1', 'conv2', 'fc1', 'fc2'])
<add>
<add> model.compile(loss='categorical_crossentropy',
<add> optimizer=SGD(lr=0.001, momentum=0.9),
<add> metrics=['accuracy'])
<add>
<add> train_x, train_y = train_data
<add> history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
<add> validation_data=validation_data)
<add> return model, history
<add>
<add>
<add># experiments setup
<add>def net2wider_experiment():
<add> '''Benchmark performances of
<add> (1) a teacher model,
<add> (2) a wider student model with `random_pad` initializer
<add> (3) a wider student model with `Net2WiderNet` initializer
<add> '''
<add> train_data = (train_x, train_y)
<add> validation_data = (validation_x, validation_y)
<add> print('\nExperiment of Net2WiderNet ...')
<add> print('\nbuilding teacher model ...')
<add> teacher_model, _ = make_teacher_model(train_data,
<add> validation_data,
<add> nb_epoch=3)
<add>
<add> print('\nbuilding wider student model by random padding ...')
<add> make_wider_student_model(teacher_model, train_data,
<add> validation_data, 'random-pad',
<add> nb_epoch=3)
<add> print('\nbuilding wider student model by net2wider ...')
<add> make_wider_student_model(teacher_model, train_data,
<add> validation_data, 'net2wider',
<add> nb_epoch=3)
<add>
<add>
<add>def net2deeper_experiment():
<add> '''Benchmark performances of
<add> (1) a teacher model,
<add> (2) a deeper student model with `random_init` initializer
<add> (3) a deeper student model with `Net2DeeperNet` initializer
<add> '''
<add> train_data = (train_x, train_y)
<add> validation_data = (validation_x, validation_y)
<add> print('\nExperiment of Net2DeeperNet ...')
<add> print('\nbuilding teacher model ...')
<add> teacher_model, _ = make_teacher_model(train_data,
<add> validation_data,
<add> nb_epoch=3)
<add>
<add> print('\nbuilding deeper student model by random init ...')
<add> make_deeper_student_model(teacher_model, train_data,
<add> validation_data, 'random-init',
<add> nb_epoch=3)
<add> print('\nbuilding deeper student model by net2deeper ...')
<add> make_deeper_student_model(teacher_model, train_data,
<add> validation_data, 'net2deeper',
<add> nb_epoch=3)
<add>
<add># run the experiments
<add>net2wider_experiment()
<add>net2deeper_experiment() | 1 |
Python | Python | remove old example | c30258c3a2635e21f6e6f3c8ed7cb314a431794e | <ide><path>examples/training/train_ner_standalone.py
<del>#!/usr/bin/env python
<del>'''Example of training a named entity recognition system from scratch using spaCy
<del>
<del>This example is written to be self-contained and reasonably transparent.
<del>To achieve that, it duplicates some of spaCy's internal functionality.
<del>
<del>Specifically, in this example, we don't use spaCy's built-in Language class to
<del>wire together the Vocab, Tokenizer and EntityRecognizer. Instead, we write
<del>our own simple Pipeline class, so that it's easier to see how the pieces
<del>interact.
<del>
<del>Input data:
<del>https://www.lt.informatik.tu-darmstadt.de/fileadmin/user_upload/Group_LangTech/data/GermEval2014_complete_data.zip
<del>
<del>Developed for: spaCy 1.7.1
<del>Last tested for: spaCy 2.0.0a13
<del>'''
<del>from __future__ import unicode_literals, print_function
<del>import plac
<del>from pathlib import Path
<del>import random
<del>import json
<del>import tqdm
<del>
<del>from thinc.neural.optimizers import Adam
<del>from thinc.neural.ops import NumpyOps
<del>
<del>from spacy.vocab import Vocab
<del>from spacy.pipeline import TokenVectorEncoder, NeuralEntityRecognizer
<del>from spacy.tokenizer import Tokenizer
<del>from spacy.tokens import Doc
<del>from spacy.attrs import *
<del>from spacy.gold import GoldParse
<del>from spacy.gold import iob_to_biluo
<del>from spacy.gold import minibatch
<del>from spacy.scorer import Scorer
<del>import spacy.util
<del>
<del>
<del>try:
<del> unicode
<del>except NameError:
<del> unicode = str
<del>
<del>
<del>spacy.util.set_env_log(True)
<del>
<del>
<del>def init_vocab():
<del> return Vocab(
<del> lex_attr_getters={
<del> LOWER: lambda string: string.lower(),
<del> NORM: lambda string: string.lower(),
<del> PREFIX: lambda string: string[0],
<del> SUFFIX: lambda string: string[-3:],
<del> })
<del>
<del>
<del>class Pipeline(object):
<del> def __init__(self, vocab=None, tokenizer=None, entity=None):
<del> if vocab is None:
<del> vocab = init_vocab()
<del> if tokenizer is None:
<del> tokenizer = Tokenizer(vocab, {}, None, None, None)
<del> if entity is None:
<del> entity = NeuralEntityRecognizer(vocab)
<del> self.vocab = vocab
<del> self.tokenizer = tokenizer
<del> self.entity = entity
<del> self.pipeline = [self.entity]
<del>
<del> def begin_training(self):
<del> for model in self.pipeline:
<del> model.begin_training([])
<del> optimizer = Adam(NumpyOps(), 0.001)
<del> return optimizer
<del>
<del> def __call__(self, input_):
<del> doc = self.make_doc(input_)
<del> for process in self.pipeline:
<del> process(doc)
<del> return doc
<del>
<del> def make_doc(self, input_):
<del> if isinstance(input_, bytes):
<del> input_ = input_.decode('utf8')
<del> if isinstance(input_, unicode):
<del> return self.tokenizer(input_)
<del> else:
<del> return Doc(self.vocab, words=input_)
<del>
<del> def make_gold(self, input_, annotations):
<del> doc = self.make_doc(input_)
<del> gold = GoldParse(doc, entities=annotations)
<del> return gold
<del>
<del> def update(self, inputs, annots, sgd, losses=None, drop=0.):
<del> if losses is None:
<del> losses = {}
<del> docs = [self.make_doc(input_) for input_ in inputs]
<del> golds = [self.make_gold(input_, annot) for input_, annot in
<del> zip(inputs, annots)]
<del>
<del> self.entity.update(docs, golds, drop=drop,
<del> sgd=sgd, losses=losses)
<del> return losses
<del>
<del> def evaluate(self, examples):
<del> scorer = Scorer()
<del> for input_, annot in examples:
<del> gold = self.make_gold(input_, annot)
<del> doc = self(input_)
<del> scorer.score(doc, gold)
<del> return scorer.scores
<del>
<del> def to_disk(self, path):
<del> path = Path(path)
<del> if not path.exists():
<del> path.mkdir()
<del> elif not path.is_dir():
<del> raise IOError("Can't save pipeline to %s\nNot a directory" % path)
<del> self.vocab.to_disk(path / 'vocab')
<del> self.entity.to_disk(path / 'ner')
<del>
<del> def from_disk(self, path):
<del> path = Path(path)
<del> if not path.exists():
<del> raise IOError("Cannot load pipeline from %s\nDoes not exist" % path)
<del> if not path.is_dir():
<del> raise IOError("Cannot load pipeline from %s\nNot a directory" % path)
<del> self.vocab = self.vocab.from_disk(path / 'vocab')
<del> self.entity = self.entity.from_disk(path / 'ner')
<del>
<del>
<del>def train(nlp, train_examples, dev_examples, nr_epoch=5):
<del> sgd = nlp.begin_training()
<del> print("Iter", "Loss", "P", "R", "F")
<del> for i in range(nr_epoch):
<del> random.shuffle(train_examples)
<del> losses = {}
<del> for batch in minibatch(tqdm.tqdm(train_examples, leave=False), size=8):
<del> inputs, annots = zip(*batch)
<del> nlp.update(list(inputs), list(annots), sgd, losses=losses)
<del> scores = nlp.evaluate(dev_examples)
<del> report_scores(i+1, losses['ner'], scores)
<del>
<del>
<del>def report_scores(i, loss, scores):
<del> precision = '%.2f' % scores['ents_p']
<del> recall = '%.2f' % scores['ents_r']
<del> f_measure = '%.2f' % scores['ents_f']
<del> print('Epoch %d: %d %s %s %s' % (
<del> i, int(loss), precision, recall, f_measure))
<del>
<del>
<del>def read_examples(path):
<del> path = Path(path)
<del> with path.open() as file_:
<del> sents = file_.read().strip().split('\n\n')
<del> for sent in sents:
<del> sent = sent.strip()
<del> if not sent:
<del> continue
<del> tokens = sent.split('\n')
<del> while tokens and tokens[0].startswith('#'):
<del> tokens.pop(0)
<del> words = []
<del> iob = []
<del> for token in tokens:
<del> if token.strip():
<del> pieces = token.split('\t')
<del> words.append(pieces[1])
<del> iob.append(pieces[2])
<del> yield words, iob_to_biluo(iob)
<del>
<del>
<del>def get_labels(examples):
<del> labels = set()
<del> for words, tags in examples:
<del> for tag in tags:
<del> if '-' in tag:
<del> labels.add(tag.split('-')[1])
<del> return sorted(labels)
<del>
<del>
<del>@plac.annotations(
<del> model_dir=("Path to save the model", "positional", None, Path),
<del> train_loc=("Path to your training data", "positional", None, Path),
<del> dev_loc=("Path to your development data", "positional", None, Path),
<del>)
<del>def main(model_dir, train_loc, dev_loc, nr_epoch=30):
<del> print(model_dir, train_loc, dev_loc)
<del> train_examples = list(read_examples(train_loc))
<del> dev_examples = read_examples(dev_loc)
<del> nlp = Pipeline()
<del> for label in get_labels(train_examples):
<del> nlp.entity.add_label(label)
<del> print("Add label", label)
<del>
<del> train(nlp, train_examples, list(dev_examples), nr_epoch)
<del>
<del> nlp.to_disk(model_dir)
<del>
<del>
<del>if __name__ == '__main__':
<del> plac.call(main) | 1 |
Ruby | Ruby | require info controller from info routes | 14b7452c578b8a1594daa8ae87fa26530601669c | <ide><path>railties/lib/rails/info_controller.rb
<add>require 'rails/info'
<add>
<ide> class Rails::InfoController < ActionController::Base
<ide> def properties
<ide> if consider_all_requests_local? || local_request?
<ide><path>railties/lib/rails/info_routes.rb
<add>require 'rails/info_controller'
<add>
<ide> Rails.application.routes.draw do |map|
<ide> match '/rails/info/properties' => "rails/info#properties"
<ide> end | 2 |
Ruby | Ruby | fix connected_to_many argument and docs | 7811bc5e17528ade61b7dfcdf445bb3ecf6e11d6 | <ide><path>activerecord/lib/active_record/connection_handling.rb
<ide> def connected_to(database: nil, role: nil, shard: nil, prevent_writes: false, &b
<ide> #
<ide> # Usage:
<ide> #
<del> # ActiveRecord::Base.connected_to(AnimalsRecord, MealsRecord], role: :reading) do
<add> # ActiveRecord::Base.connected_to_many(AnimalsRecord, MealsRecord, role: :reading) do
<ide> # Dog.first # Read from animals replica
<ide> # Dinner.first # Read from meals replica
<ide> # Person.first # Read from primary writer
<ide> # end
<del> def connected_to_many(classes, role:, shard: nil, prevent_writes: false)
<add> def connected_to_many(*classes, role:, shard: nil, prevent_writes: false)
<add> classes = classes.flatten
<add>
<ide> if legacy_connection_handling
<ide> raise NotImplementedError, "connected_to_many is not available with legacy connection handling"
<ide> end
<ide><path>activerecord/test/cases/base_test.rb
<ide> def test_protected_environments_are_stored_as_an_array_of_string
<ide> assert_not ActiveRecord::Base.current_preventing_writes
<ide> end
<ide> end
<add>
<add> test "#connected_to_many with a single argument for classes" do
<add> ActiveRecord::Base.connected_to_many(AbstractCompany, role: :reading) do
<add> assert AbstractCompany.current_preventing_writes
<add> assert_not ActiveRecord::Base.current_preventing_writes
<add> end
<add> end
<add>
<add> test "#connected_to_many with a multiple classes without brackets works" do
<add> ActiveRecord::Base.connected_to_many(AbstractCompany, FirstAbstractClass, role: :reading) do
<add> assert AbstractCompany.current_preventing_writes
<add> assert FirstAbstractClass.current_preventing_writes
<add> assert_not ActiveRecord::Base.current_preventing_writes
<add> end
<add> end
<ide> end | 2 |
Java | Java | remove unused import | 82d32f08758e14a39830a7e6bb0d0a6be1b9648c | <ide><path>spring-web/src/test/java/org/springframework/http/codec/multipart/MultipartHttpMessageWriterTests.java
<ide> /*
<del> * Copyright 2002-2018 the original author or authors.
<add> * Copyright 2002-2019 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> import java.util.List;
<ide> import java.util.Map;
<ide>
<del>import org.junit.Before;
<ide> import org.junit.Test;
<del>import org.reactivestreams.Publisher;
<ide> import reactor.core.publisher.Flux;
<ide> import reactor.core.publisher.Mono;
<ide> import reactor.core.publisher.UnicastProcessor;
<ide> public class MultipartHttpMessageWriterTests extends AbstractLeakCheckingTestCas
<ide> private final MultipartHttpMessageWriter writer =
<ide> new MultipartHttpMessageWriter(ClientCodecConfigurer.create().getWriters());
<ide>
<del> private MockServerHttpResponse response;
<del>
<del>
<del> @Before
<del> public void setUp() {
<del> this.response = new MockServerHttpResponse(this.bufferFactory);
<del> }
<add> private final MockServerHttpResponse response = new MockServerHttpResponse(this.bufferFactory);
<ide>
<ide>
<ide> @Test
<ide> public void canWrite() {
<ide>
<ide> @Test
<ide> public void writeMultipart() throws Exception {
<del>
<ide> Resource logo = new ClassPathResource("/org/springframework/http/converter/logo.jpg");
<ide> Resource utf8 = new ClassPathResource("/org/springframework/http/converter/logo.jpg") {
<ide> @Override
<ide> private String decodeToString(Part part) {
<ide> Collections.emptyMap()).block(Duration.ZERO);
<ide> }
<ide>
<del> @Test // SPR-16402
<add> @Test // SPR-16402
<ide> public void singleSubscriberWithResource() throws IOException {
<ide> UnicastProcessor<Resource> processor = UnicastProcessor.create();
<ide> Resource logo = new ClassPathResource("/org/springframework/http/converter/logo.jpg");
<ide> public void singleSubscriberWithStrings() {
<ide> this.response.getBodyAsString().block(Duration.ofSeconds(5));
<ide> }
<ide>
<del> @Test // SPR-16376
<add> @Test // SPR-16376
<ide> public void customContentDisposition() throws IOException {
<ide> Resource logo = new ClassPathResource("/org/springframework/http/converter/logo.jpg");
<ide> Flux<DataBuffer> buffers = DataBufferUtils.read(logo, new DefaultDataBufferFactory(), 1024); | 1 |
Ruby | Ruby | use the native `class#descendants` if available | d8936b755debd45a0fc0db14655de716f249bdf8 | <ide><path>activesupport/lib/active_support/core_ext/class/subclasses.rb
<ide> def descendants
<ide> ObjectSpace.each_object(singleton_class).reject do |k|
<ide> k.singleton_class? || k == self
<ide> end
<del> end
<add> end unless method_defined?(:descendants) # RUBY_VERSION >= "3.1"
<ide>
<ide> # Returns an array with the direct children of +self+.
<ide> # | 1 |
Ruby | Ruby | adapt output to adjust for single-formula queries | 2822a7b0c3276fcab8335374bc305d048c10feba | <ide><path>Library/Homebrew/dev-cmd/bump.rb
<ide> def bump
<ide> Repology.parse_api_response
<ide> end
<ide>
<add> if requested_formula && outdated_repology_packages.nil?
<add> ohai "The requested formula, #{requested_formula}, is up-to-date."
<add> puts "Current version: #{get_formula_details(requested_formula).version}"
<add> return
<add> end
<add>
<ide> outdated_packages = validate_and_format_packages(outdated_repology_packages)
<ide> display(outdated_packages)
<ide> end
<ide>
<ide> def validate_and_format_packages(outdated_repology_packages)
<del> ohai "Verifying outdated repology packages as Homebrew formulae"
<add> ohai "Verifying outdated repology #{"package".pluralize(outdated_repology_packages.size)} " \
<add> "as Homebrew #{"formula".pluralize(outdated_repology_packages.size)}"
<ide>
<ide> packages = {}
<ide> outdated_repology_packages.each do |_name, repositories|
<ide> def parse_livecheck_response(response)
<ide> end
<ide>
<ide> def display(outdated_packages)
<del> ohai "Outdated formulae"
<add> ohai "Outdated #{"formula".pluralize(outdated_packages.size)}"
<ide> puts
<ide> outdated_packages.each do |formula, package_details|
<ide> ohai formula
<ide><path>Library/Homebrew/utils/repology.rb
<ide> def single_package_query(name)
<ide> url = "https://repology.org/api/v1/project/#{name}"
<ide>
<ide> output, _errors, _status = curl_output(url.to_s)
<del> { name: JSON.parse(output) }
<add> data = JSON.parse(output)
<add>
<add> outdated_homebrew = data.select do |repo|
<add> repo["repo"] == "homebrew" && repo["status"] == "outdated"
<add> end
<add>
<add> outdated_homebrew.empty? ? nil : { name: data }
<ide> end
<ide>
<ide> def parse_api_response | 2 |
PHP | PHP | move cachedispatcher tests into their own file | a33ee0c499b28a8d64f8c8900132f867439d1037 | <ide><path>tests/TestCase/Routing/DispatcherTest.php
<ide> */
<ide> namespace Cake\Test\TestCase\Routing;
<ide>
<del>use Cake\Cache\Cache;
<ide> use Cake\Controller\Controller;
<ide> use Cake\Controller\Error\MissingActionException;
<ide> use Cake\Controller\Error\MissingControllerException;
<ide> public function setUp() {
<ide> Configure::write('App.webroot', 'webroot');
<ide> Configure::write('App.namespace', 'TestApp');
<ide>
<del> Cache::disable();
<del>
<ide> App::objects('Plugin', null, false);
<ide> }
<ide>
<ide> public function testMissingControllerAbstract() {
<ide> ]
<ide> ]);
<ide> $response = $this->getMock('Cake\Network\Response');
<del>
<ide> $Dispatcher->dispatch($request, $response, array('return' => 1));
<ide> }
<ide>
<ide> public function testChangingParamsFromBeforeFilter() {
<ide> $expected = array('changed');
<ide> $this->assertSame($expected, $url->params['pass']);
<ide> }
<del>
<del>/**
<del> * Data provider for cached actions.
<del> *
<del> * - Test simple views
<del> * - Test views with nocache tags
<del> * - Test requests with named + passed params.
<del> * - Test requests with query string params
<del> * - Test themed views.
<del> *
<del> * @return array
<del> */
<del> public static function cacheActionProvider() {
<del> return array(
<del> array('/'),
<del> array('test_cached_pages/index'),
<del> array('TestCachedPages/index'),
<del> array('test_cached_pages/test_nocache_tags'),
<del> array('TestCachedPages/test_nocache_tags'),
<del> array('test_cached_pages/view/param/param'),
<del> array('test_cached_pages/view?q=cakephp'),
<del> array('test_cached_pages/themed'),
<del> );
<del> }
<del>
<del>/**
<del> * testFullPageCachingDispatch method
<del> *
<del> * @dataProvider cacheActionProvider
<del> * @return void
<del> */
<del> public function testFullPageCachingDispatch($url) {
<del> $this->markTestIncomplete();
<del> Cache::enable();
<del> Configure::write('Cache.disable', false);
<del> Configure::write('Cache.check', true);
<del> Configure::write('debug', true);
<del>
<del> Router::reload();
<del> Router::connect('/', array('controller' => 'test_cached_pages', 'action' => 'index'));
<del> Router::connect('/:controller/:action/*');
<del>
<del> $dispatcher = new TestDispatcher();
<del> $request = new Request($url);
<del> $response = $this->getMock('Cake\Network\Response', array('send'));
<del>
<del> $dispatcher->dispatch($request, $response);
<del> $out = $response->body();
<del>
<del> Configure::write('Dispatcher.filters', array('CacheDispatcher'));
<del> $request = new Request($url);
<del> $response = $this->getMock('Cake\Network\Response', array('send'));
<del> $dispatcher = new TestDispatcher();
<del> $dispatcher->dispatch($request, $response);
<del> $cached = $response->body();
<del>
<del> $cached = preg_replace('/<!--+[^<>]+-->/', '', $cached);
<del>
<del> $this->assertTextEquals($out, $cached);
<del>
<del> $filename = $this->_cachePath($request->here());
<del> unlink($filename);
<del> }
<del>
<del>/**
<del> * testHttpMethodOverrides method
<del> *
<del> * @return void
<del> */
<del> public function testHttpMethodOverrides() {
<del> $this->markTestIncomplete();
<del> Router::reload();
<del> Router::mapResources('Posts');
<del>
<del> $dispatcher = new Dispatcher();
<del>
<del> $request = new Request([
<del> 'url' => '/posts',
<del> 'environment' => ['REQUEST_METHOD' => 'POST']
<del> ]);
<del> $event = new Event(__CLASS__, $dispatcher, array('request' => $request));
<del> $dispatcher->parseParams($event);
<del> $expected = array(
<del> 'pass' => [],
<del> 'plugin' => null,
<del> 'controller' => 'posts',
<del> 'action' => 'add',
<del> '[method]' => 'POST'
<del> );
<del> foreach ($expected as $key => $value) {
<del> $this->assertEquals($value, $request[$key], 'Value mismatch for ' . $key . ' %s');
<del> }
<del>
<del> $request = new Request([
<del> 'url' => '/posts/5',
<del> 'environment' => [
<del> 'REQUEST_METHOD' => 'GET',
<del> 'HTTP_X_HTTP_METHOD_OVERRIDE' => 'PUT'
<del> ]
<del> ]);
<del> $event = new Event(__CLASS__, $dispatcher, array('request' => $request));
<del> $dispatcher->parseParams($event);
<del> $expected = array(
<del> 'pass' => array('5'),
<del> 'id' => '5',
<del> 'plugin' => null,
<del> 'controller' => 'posts',
<del> 'action' => 'edit',
<del> '[method]' => 'PUT'
<del> );
<del> foreach ($expected as $key => $value) {
<del> $this->assertEquals($value, $request[$key], 'Value mismatch for ' . $key . ' %s');
<del> }
<del>
<del> $request = new Request([
<del> 'url' => '/posts/5',
<del> 'environment' => [
<del> 'REQUEST_METHOD' => 'GET'
<del> ]
<del> ]);
<del> $event = new Event(__CLASS__, $dispatcher, array('request' => $request));
<del> $dispatcher->parseParams($event);
<del> $expected = array(
<del> 'pass' => array('5'),
<del> 'id' => '5',
<del> 'plugin' => null,
<del> 'controller' => 'posts',
<del> 'action' => 'view',
<del> '[method]' => 'GET'
<del> );
<del> foreach ($expected as $key => $value) {
<del> $this->assertEquals($value, $request[$key], 'Value mismatch for ' . $key . ' %s');
<del> }
<del>
<del> $request = new Request([
<del> 'url' => '/posts/5',
<del> 'post' => array('_method' => 'PUT')
<del> ]);
<del> $event = new Event(__CLASS__, $dispatcher, array('request' => $request));
<del> $dispatcher->parseParams($event);
<del> $expected = array(
<del> 'pass' => array('5'),
<del> 'id' => '5',
<del> 'plugin' => null,
<del> 'controller' => 'posts',
<del> 'action' => 'edit',
<del> '[method]' => 'PUT'
<del> );
<del> foreach ($expected as $key => $value) {
<del> $this->assertEquals($value, $request[$key], 'Value mismatch for ' . $key . ' %s');
<del> }
<del>
<del> $request = new Request(array(
<del> 'url' => '/posts',
<del> 'post' => array(
<del> '_method' => 'POST',
<del> 'Post' => array('title' => 'New Post'),
<del> 'extra' => 'data'
<del> ),
<del> ));
<del> $event = new Event(__CLASS__, $dispatcher, array('request' => $request));
<del> $dispatcher->parseParams($event);
<del> $expected = array(
<del> 'pass' => [],
<del> 'plugin' => null,
<del> 'controller' => 'posts',
<del> 'action' => 'add',
<del> '[method]' => 'POST',
<del> 'data' => array('extra' => 'data', 'Post' => array('title' => 'New Post')),
<del> );
<del> foreach ($expected as $key => $value) {
<del> $this->assertEquals($value, $request[$key], 'Value mismatch for ' . $key . ' %s');
<del> }
<del> }
<del>
<del>/**
<del> * cachePath method
<del> *
<del> * @param string $here
<del> * @return string
<del> */
<del> protected function _cachePath($here) {
<del> $path = $here;
<del> if ($here === '/') {
<del> $path = 'home';
<del> }
<del> $path = strtolower(Inflector::slug($path));
<del>
<del> $filename = CACHE . 'views/' . $path . '.php';
<del>
<del> if (!file_exists($filename)) {
<del> $filename = CACHE . 'views/' . $path . '_index.php';
<del> }
<del> return $filename;
<del> }
<ide> }
<ide><path>tests/TestCase/Routing/Filter/CacheDispatcherTest.php
<add><?php
<add>/**
<add> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<add> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> *
<add> * Licensed under The MIT License
<add> * For full copyright and license information, please see the LICENSE.txt
<add> * Redistributions of files must retain the above copyright notice.
<add> *
<add> * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> * @link http://book.cakephp.org/2.0/en/development/testing.html CakePHP(tm) Tests
<add> * @since 3.0.0
<add> * @license http://www.opensource.org/licenses/mit-license.php MIT License
<add> */
<add>namespace Cake\Test\TestCase\Routing;
<add>
<add>use Cake\Cache\Cache;
<add>use Cake\Core\Configure;
<add>use Cake\Network\Request;
<add>use Cake\Network\Response;
<add>use Cake\Routing\Dispatcher;
<add>use Cake\Routing\Filter\CacheDispatcher;
<add>use Cake\Routing\Filter\RoutingFilter;
<add>use Cake\Routing\Router;
<add>use Cake\TestSuite\TestCase;
<add>use Cake\Utility\Inflector;
<add>
<add>/**
<add> * CacheDispatcherTest class
<add> *
<add> */
<add>class CacheDispatcherTest extends TestCase {
<add>
<add>/**
<add> * setUp method
<add> *
<add> * @return void
<add> */
<add> public function setUp() {
<add> parent::setUp();
<add> $_GET = [];
<add>
<add> Configure::write('App.base', false);
<add> Configure::write('App.baseUrl', false);
<add> Configure::write('App.dir', 'app');
<add> Configure::write('App.webroot', 'webroot');
<add> Configure::write('App.namespace', 'TestApp');
<add> }
<add>
<add>/**
<add> * Data provider for cached actions.
<add> *
<add> * - Test simple views
<add> * - Test views with nocache tags
<add> * - Test requests with named + passed params.
<add> * - Test requests with query string params
<add> * - Test themed views.
<add> *
<add> * @return array
<add> */
<add> public static function cacheActionProvider() {
<add> return array(
<add> array('/'),
<add> array('test_cached_pages/index'),
<add> array('TestCachedPages/index'),
<add> array('test_cached_pages/test_nocache_tags'),
<add> array('TestCachedPages/test_nocache_tags'),
<add> array('test_cached_pages/view/param/param'),
<add> array('test_cached_pages/view?q=cakephp'),
<add> array('test_cached_pages/themed'),
<add> );
<add> }
<add>
<add>/**
<add> * testFullPageCachingDispatch method
<add> *
<add> * @dataProvider cacheActionProvider
<add> * @return void
<add> */
<add> public function testFullPageCachingDispatch($url) {
<add> Cache::enable();
<add> Configure::write('Cache.disable', false);
<add> Configure::write('Cache.check', true);
<add> Configure::write('debug', true);
<add>
<add> Router::reload();
<add> Router::connect('/', array('controller' => 'test_cached_pages', 'action' => 'index'));
<add> Router::connect('/:controller/:action/*');
<add>
<add> $dispatcher = new Dispatcher();
<add> $dispatcher->add(new RoutingFilter());
<add> $request = new Request($url);
<add> $response = $this->getMock('Cake\Network\Response', array('send'));
<add>
<add> $dispatcher->dispatch($request, $response);
<add> $out = $response->body();
<add>
<add> $request = new Request($url);
<add> $response = $this->getMock('Cake\Network\Response', array('send'));
<add> $dispatcher = new Dispatcher();
<add> $dispatcher->add(new RoutingFilter());
<add> $dispatcher->add(new CacheDispatcher());
<add> $dispatcher->dispatch($request, $response);
<add> $cached = $response->body();
<add>
<add> $cached = preg_replace('/<!--+[^<>]+-->/', '', $cached);
<add>
<add> $this->assertTextEquals($out, $cached);
<add>
<add> $filename = $this->_cachePath($request->here());
<add> unlink($filename);
<add> }
<add>
<add>/**
<add> * cachePath method
<add> *
<add> * @param string $here
<add> * @return string
<add> */
<add> protected function _cachePath($here) {
<add> $path = $here;
<add> if ($here === '/') {
<add> $path = 'home';
<add> }
<add> $path = strtolower(Inflector::slug($path));
<add>
<add> $filename = CACHE . 'views/' . $path . '.php';
<add>
<add> if (!file_exists($filename)) {
<add> $filename = CACHE . 'views/' . $path . '_index.php';
<add> }
<add> return $filename;
<add> }
<add>
<add>}
<ide><path>tests/TestCase/Routing/RouterTest.php
<ide> <?php
<ide> /**
<del> * RouterTest file
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> * | 3 |
Text | Text | add process.debugport to doc/api/process.md | 65ca369c070cefcaf6300a234de6448f3015e8e8 | <ide><path>doc/api/process.md
<ide> process.
<ide> ```js
<ide> console.log(`Current directory: ${process.cwd()}`);
<ide> ```
<add>## process.debugPort
<add><!-- YAML
<add>added: v0.7.2
<add>-->
<add>* {number}
<ide>
<add>The port used by Node.js's debugger when enabled.
<add>
<add>```js
<add>process.debugPort = 5858;
<add>```
<ide> ## process.disconnect()
<ide> <!-- YAML
<ide> added: v0.7.2 | 1 |
Mixed | Go | provide a new parameter dm.deferred_device_removal | 15c158b20725fd62e2ee0a72ffaf1617852cd0d9 | <ide><path>daemon/graphdriver/devmapper/README.md
<ide> Here is the list of supported options:
<ide> > Otherwise, set this flag for migrating existing Docker daemons to a
<ide> > daemon with a supported environment.
<ide>
<add> * `dm.use_deferred_removal`
<add>
<add> Enables use of deferred device removal if libdm and kernel driver
<add> support the mechanism.
<add>
<add> Deferred device removal means that if device is busy when devices is
<add> being removed/deactivated, then a deferred removal is scheduled on
<add> device. And devices automatically goes away when last user of device
<add> exits.
<add>
<add> For example, when contianer exits, its associated thin device is
<add> removed. If that devices has leaked into some other mount namespace
<add> can can't be removed now, container exit will still be successful
<add> and this option will just schedule device for deferred removal and
<add> will not wait in a loop trying to remove a busy device.
<add>
<add> Example use:
<add>
<add> ``docker -d --storage-opt dm.use_deferred_device_removal=true``
<add>
<ide><path>daemon/graphdriver/devmapper/deviceset.go
<ide> var (
<ide> // We retry device removal so many a times that even error messages
<ide> // will fill up console during normal operation. So only log Fatal
<ide> // messages by default.
<del> DMLogLevel int = devicemapper.LogLevelFatal
<add> DMLogLevel int = devicemapper.LogLevelFatal
<add> DriverDeferredRemovalSupport bool = false
<add> EnableDeferredRemoval bool = false
<ide> )
<ide>
<ide> const deviceSetMetaFile string = "deviceset-metadata"
<ide> type DeviceSet struct {
<ide> thinPoolDevice string
<ide> Transaction `json:"-"`
<ide> overrideUdevSyncCheck bool
<add> deferredRemove bool // use deferred removal
<ide> }
<ide>
<ide> type DiskUsage struct {
<ide> func (devices *DeviceSet) closeTransaction() error {
<ide> return nil
<ide> }
<ide>
<add>func determineDriverCapabilities(version string) error {
<add> /*
<add> * Driver version 4.27.0 and greater support deferred activation
<add> * feature.
<add> */
<add>
<add> logrus.Debugf("devicemapper: driver version is %s", version)
<add>
<add> versionSplit := strings.Split(version, ".")
<add> major, err := strconv.Atoi(versionSplit[0])
<add> if err != nil {
<add> return graphdriver.ErrNotSupported
<add> }
<add>
<add> if major > 4 {
<add> DriverDeferredRemovalSupport = true
<add> return nil
<add> }
<add>
<add> if major < 4 {
<add> return nil
<add> }
<add>
<add> minor, err := strconv.Atoi(versionSplit[1])
<add> if err != nil {
<add> return graphdriver.ErrNotSupported
<add> }
<add>
<add> /*
<add> * If major is 4 and minor is 27, then there is no need to
<add> * check for patch level as it can not be less than 0.
<add> */
<add> if minor >= 27 {
<add> DriverDeferredRemovalSupport = true
<add> return nil
<add> }
<add>
<add> return nil
<add>}
<add>
<ide> func (devices *DeviceSet) initDevmapper(doInit bool) error {
<ide> // give ourselves to libdm as a log handler
<ide> devicemapper.LogInit(devices)
<ide>
<del> _, err := devicemapper.GetDriverVersion()
<add> version, err := devicemapper.GetDriverVersion()
<ide> if err != nil {
<ide> // Can't even get driver version, assume not supported
<ide> return graphdriver.ErrNotSupported
<ide> }
<ide>
<add> if err := determineDriverCapabilities(version); err != nil {
<add> return graphdriver.ErrNotSupported
<add> }
<add>
<add> // If user asked for deferred removal and both library and driver
<add> // supports deferred removal use it.
<add> if EnableDeferredRemoval && DriverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport == true {
<add> logrus.Debugf("devmapper: Deferred removal support enabled.")
<add> devices.deferredRemove = true
<add> }
<add>
<ide> // https://github.com/docker/docker/issues/4036
<ide> if supported := devicemapper.UdevSetSyncSupport(true); !supported {
<ide> logrus.Errorf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/cli/#daemon-storage-driver-option")
<ide> func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
<ide> if err != nil {
<ide> return nil, err
<ide> }
<add>
<add> case "dm.use_deferred_removal":
<add> EnableDeferredRemoval, err = strconv.ParseBool(val)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<ide> default:
<ide> return nil, fmt.Errorf("Unknown option %s\n", key)
<ide> } | 2 |
Java | Java | add maybe.flatmapsingleelement returning maybe | a94a307ab11339b09cb2e69a00a874dea532635f | <ide><path>src/main/java/io/reactivex/Maybe.java
<ide> public final <R> Single<R> flatMapSingle(final Function<? super T, ? extends Sin
<ide> return RxJavaPlugins.onAssembly(new MaybeFlatMapSingle<T, R>(this, mapper));
<ide> }
<ide>
<add> /**
<add> * Returns a {@link Maybe} based on applying a specified function to the item emitted by the
<add> * source {@link Maybe}, where that function returns a {@link Single}.
<add> * When this Maybe just completes the resulting {@code Maybe} completes as well.
<add> * <p>
<add> * <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapSingle.png" alt="">
<add> * <dl>
<add> * <dt><b>Scheduler:</b></dt>
<add> * <dd>{@code flatMapSingleElement} does not operate by default on a particular {@link Scheduler}.</dd>
<add> * </dl>
<add> *
<add> * @param <R> the result value type
<add> * @param mapper
<add> * a function that, when applied to the item emitted by the source Maybe, returns a
<add> * Single
<add> * @return the new Maybe instance
<add> * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
<add> * @since 2.0.2 - experimental
<add> */
<add> @SchedulerSupport(SchedulerSupport.NONE)
<add> @Experimental
<add> public final <R> Maybe<R> flatMapSingleElement(final Function<? super T, ? extends SingleSource<? extends R>> mapper) {
<add> ObjectHelper.requireNonNull(mapper, "mapper is null");
<add> return RxJavaPlugins.onAssembly(new MaybeFlatMapSingleElement<T, R>(this, mapper));
<add> }
<add>
<ide> /**
<ide> * Returns a {@link Completable} that completes based on applying a specified function to the item emitted by the
<ide> * source {@link Maybe}, where that function returns a {@link Completable}.
<ide><path>src/main/java/io/reactivex/internal/operators/maybe/MaybeFlatMapSingleElement.java
<add>/**
<add> * Copyright 2016 Netflix, Inc.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
<add> * compliance with the License. You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software distributed under the License is
<add> * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
<add> * the License for the specific language governing permissions and limitations under the License.
<add> */
<add>
<add>package io.reactivex.internal.operators.maybe;
<add>
<add>import java.util.concurrent.atomic.AtomicReference;
<add>
<add>import io.reactivex.*;
<add>import io.reactivex.annotations.Experimental;
<add>import io.reactivex.disposables.Disposable;
<add>import io.reactivex.exceptions.Exceptions;
<add>import io.reactivex.functions.Function;
<add>import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.internal.functions.ObjectHelper;
<add>
<add>/**
<add> * Maps the success value of the source MaybeSource into a Single.
<add> * @param <T> the input value type
<add> * @param <R> the result value type
<add> *
<add> * @since 2.0.2 - experimental
<add> */
<add>@Experimental
<add>public final class MaybeFlatMapSingleElement<T, R> extends Maybe<R> {
<add>
<add> final MaybeSource<T> source;
<add>
<add> final Function<? super T, ? extends SingleSource<? extends R>> mapper;
<add>
<add> public MaybeFlatMapSingleElement(MaybeSource<T> source, Function<? super T, ? extends SingleSource<? extends R>> mapper) {
<add> this.source = source;
<add> this.mapper = mapper;
<add> }
<add>
<add> @Override
<add> protected void subscribeActual(MaybeObserver<? super R> actual) {
<add> source.subscribe(new FlatMapMaybeObserver<T, R>(actual, mapper));
<add> }
<add>
<add> static final class FlatMapMaybeObserver<T, R>
<add> extends AtomicReference<Disposable>
<add> implements MaybeObserver<T>, Disposable {
<add>
<add> private static final long serialVersionUID = 4827726964688405508L;
<add>
<add> final MaybeObserver<? super R> actual;
<add>
<add> final Function<? super T, ? extends SingleSource<? extends R>> mapper;
<add>
<add> FlatMapMaybeObserver(MaybeObserver<? super R> actual, Function<? super T, ? extends SingleSource<? extends R>> mapper) {
<add> this.actual = actual;
<add> this.mapper = mapper;
<add> }
<add>
<add> @Override
<add> public void dispose() {
<add> DisposableHelper.dispose(this);
<add> }
<add>
<add> @Override
<add> public boolean isDisposed() {
<add> return DisposableHelper.isDisposed(get());
<add> }
<add>
<add> @Override
<add> public void onSubscribe(Disposable d) {
<add> if (DisposableHelper.setOnce(this, d)) {
<add> actual.onSubscribe(this);
<add> }
<add> }
<add>
<add> @Override
<add> public void onSuccess(T value) {
<add> SingleSource<? extends R> ss;
<add>
<add> try {
<add> ss = ObjectHelper.requireNonNull(mapper.apply(value), "The mapper returned a null SingleSource");
<add> } catch (Throwable ex) {
<add> Exceptions.throwIfFatal(ex);
<add> onError(ex);
<add> return;
<add> }
<add>
<add> ss.subscribe(new FlatMapSingleObserver<R>(this, actual));
<add> }
<add>
<add> @Override
<add> public void onError(Throwable e) {
<add> actual.onError(e);
<add> }
<add>
<add> @Override
<add> public void onComplete() {
<add> actual.onComplete();
<add> }
<add> }
<add>
<add> static final class FlatMapSingleObserver<R> implements SingleObserver<R> {
<add>
<add> final AtomicReference<Disposable> parent;
<add>
<add> final MaybeObserver<? super R> actual;
<add>
<add> FlatMapSingleObserver(AtomicReference<Disposable> parent, MaybeObserver<? super R> actual) {
<add> this.parent = parent;
<add> this.actual = actual;
<add> }
<add>
<add> @Override
<add> public void onSubscribe(final Disposable d) {
<add> DisposableHelper.replace(parent, d);
<add> }
<add>
<add> @Override
<add> public void onSuccess(final R value) {
<add> actual.onSuccess(value);
<add> }
<add>
<add> @Override
<add> public void onError(final Throwable e) {
<add> actual.onError(e);
<add> }
<add> }
<add>}
<ide><path>src/test/java/io/reactivex/internal/operators/maybe/MaybeFlatMapSingleElementTest.java
<add>/**
<add> * Copyright 2016 Netflix, Inc.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
<add> * compliance with the License. You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software distributed under the License is
<add> * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
<add> * the License for the specific language governing permissions and limitations under the License.
<add> */
<add>
<add>package io.reactivex.internal.operators.maybe;
<add>
<add>import org.junit.Test;
<add>
<add>import io.reactivex.*;
<add>import io.reactivex.exceptions.TestException;
<add>import io.reactivex.functions.Function;
<add>
<add>public class MaybeFlatMapSingleElementTest {
<add> @Test(expected = NullPointerException.class)
<add> public void flatMapSingleElementNull() {
<add> Maybe.just(1)
<add> .flatMapSingleElement(null);
<add> }
<add>
<add> @Test
<add> public void flatMapSingleElementValue() {
<add> Maybe.just(1).flatMapSingleElement(new Function<Integer, SingleSource<Integer>>() {
<add> @Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
<add> if (integer == 1) {
<add> return Single.just(2);
<add> }
<add>
<add> return Single.just(1);
<add> }
<add> })
<add> .test()
<add> .assertResult(2);
<add> }
<add>
<add> @Test
<add> public void flatMapSingleElementValueDifferentType() {
<add> Maybe.just(1).flatMapSingleElement(new Function<Integer, SingleSource<String>>() {
<add> @Override public SingleSource<String> apply(final Integer integer) throws Exception {
<add> if (integer == 1) {
<add> return Single.just("2");
<add> }
<add>
<add> return Single.just("1");
<add> }
<add> })
<add> .test()
<add> .assertResult("2");
<add> }
<add>
<add> @Test
<add> public void flatMapSingleElementValueNull() {
<add> Maybe.just(1).flatMapSingleElement(new Function<Integer, SingleSource<Integer>>() {
<add> @Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
<add> return null;
<add> }
<add> })
<add> .test()
<add> .assertNoValues()
<add> .assertError(NullPointerException.class)
<add> .assertErrorMessage("The mapper returned a null SingleSource");
<add> }
<add>
<add> @Test
<add> public void flatMapSingleElementValueErrorThrown() {
<add> Maybe.just(1).flatMapSingleElement(new Function<Integer, SingleSource<Integer>>() {
<add> @Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
<add> throw new RuntimeException("something went terribly wrong!");
<add> }
<add> })
<add> .test()
<add> .assertNoValues()
<add> .assertError(RuntimeException.class)
<add> .assertErrorMessage("something went terribly wrong!");
<add> }
<add>
<add> @Test
<add> public void flatMapSingleElementError() {
<add> RuntimeException exception = new RuntimeException("test");
<add>
<add> Maybe.error(exception).flatMapSingleElement(new Function<Object, SingleSource<Object>>() {
<add> @Override public SingleSource<Object> apply(final Object integer) throws Exception {
<add> return Single.just(new Object());
<add> }
<add> })
<add> .test()
<add> .assertError(exception);
<add> }
<add>
<add> @Test
<add> public void flatMapSingleElementEmpty() {
<add> Maybe.<Integer>empty().flatMapSingleElement(new Function<Integer, SingleSource<Integer>>() {
<add> @Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
<add> return Single.just(2);
<add> }
<add> })
<add> .test()
<add> .assertNoValues()
<add> .assertResult();
<add> }
<add>
<add> @Test
<add> public void dispose() {
<add> TestHelper.checkDisposed(Maybe.just(1).flatMapSingleElement(new Function<Integer, SingleSource<Integer>>() {
<add> @Override
<add> public SingleSource<Integer> apply(final Integer integer) throws Exception {
<add> return Single.just(2);
<add> }
<add> }));
<add> }
<add>
<add> @Test
<add> public void doubleOnSubscribe() {
<add> TestHelper.checkDoubleOnSubscribeMaybe(new Function<Maybe<Integer>, Maybe<Integer>>() {
<add> @Override
<add> public Maybe<Integer> apply(Maybe<Integer> m) throws Exception {
<add> return m.flatMapSingleElement(new Function<Integer, SingleSource<Integer>>() {
<add> @Override
<add> public SingleSource<Integer> apply(final Integer integer) throws Exception {
<add> return Single.just(2);
<add> }
<add> });
<add> }
<add> });
<add> }
<add>
<add> @Test
<add> public void singleErrors() {
<add> Maybe.just(1)
<add> .flatMapSingleElement(new Function<Integer, SingleSource<Integer>>() {
<add> @Override
<add> public SingleSource<Integer> apply(final Integer integer) throws Exception {
<add> return Single.error(new TestException());
<add> }
<add> })
<add> .test()
<add> .assertFailure(TestException.class);
<add> }
<add>} | 3 |
Python | Python | fix issue with conv3d conversion interface | 8b8ffe0ea4b9739e8b4ad397cc5e232beb24cad6 | <ide><path>keras/legacy/interfaces.py
<ide> def deconv2d_args_preprocessor(args, kwargs):
<ide>
<ide>
<ide> def conv3d_args_preprocessor(args, kwargs):
<add> converted = []
<ide> if len(args) > 5:
<ide> raise TypeError('Layer can receive at most 4 positional arguments.')
<ide> if len(args) == 5:
<ide> if isinstance(args[2], int) and isinstance(args[3], int) and isinstance(args[4], int):
<ide> kernel_size = (args[2], args[3], args[4])
<ide> args = [args[0], args[1], kernel_size]
<add> converted.append(('kernel_size', 'kernel_dim*'))
<ide> elif len(args) == 4 and isinstance(args[3], int):
<ide> if isinstance(args[2], int) and isinstance(args[3], int):
<ide> new_keywords = ['padding', 'strides', 'data_format']
<ide> def conv3d_args_preprocessor(args, kwargs):
<ide> if 'kernel_dim3' in kwargs:
<ide> kernel_size = (args[2], args[3], kwargs.pop('kernel_dim3'))
<ide> args = [args[0], args[1], kernel_size]
<add> converted.append(('kernel_size', 'kernel_dim*'))
<ide> elif len(args) == 3:
<ide> if 'kernel_dim2' in kwargs and 'kernel_dim3' in kwargs:
<ide> kernel_size = (args[2],
<ide> kwargs.pop('kernel_dim2'),
<ide> kwargs.pop('kernel_dim3'))
<ide> args = [args[0], args[1], kernel_size]
<add> converted.append(('kernel_size', 'kernel_dim*'))
<ide> elif len(args) == 2:
<ide> if 'kernel_dim1' in kwargs and 'kernel_dim2' in kwargs and 'kernel_dim3' in kwargs:
<ide> kernel_size = (kwargs.pop('kernel_dim1'),
<ide> kwargs.pop('kernel_dim2'),
<ide> kwargs.pop('kernel_dim3'))
<ide> args = [args[0], args[1], kernel_size]
<del> return args, kwargs, [('kernel_size', 'kernel_dim*')]
<add> converted.append(('kernel_size', 'kernel_dim*'))
<add> return args, kwargs, converted
<ide>
<ide> legacy_conv3d_support = generate_legacy_interface(
<ide> allowed_positional_args=['filters', 'kernel_size'], | 1 |
Ruby | Ruby | make hermes from source the default | bb01b75637edc1159a3bdb3af86936e1c92f39c1 | <ide><path>scripts/react_native_pods.rb
<ide> def use_react_native! (options={})
<ide>
<ide> if hermes_enabled
<ide> pod 'React-hermes', :path => "#{prefix}/ReactCommon/hermes"
<del> if ENV['BUILD_HERMES_SOURCE'] == '1'
<del> Pod::UI.puts "[Hermes] Building Hermes from source"
<del> hermes_source_path = downloadAndConfigureHermesSource(prefix)
<del> pod 'hermes-engine', :path => "#{hermes_source_path}/hermes-engine.podspec"
<del> else
<del> Pod::UI.warn "[Hermes] Installing Hermes from CocoaPods. The `hermes-engine` pod has been deprecated and will not see future updates."
<del> pod 'hermes-engine', '~> 0.11.0'
<del> end
<add> Pod::UI.puts "[Hermes] Building Hermes from source"
<add> hermes_source_path = downloadAndConfigureHermesSource(prefix)
<add> pod 'hermes-engine', :path => "#{hermes_source_path}/hermes-engine.podspec"
<ide> pod 'libevent', '~> 2.1.12'
<ide> end
<ide> | 1 |
Text | Text | correct factual error for page not found | 3fe7b5f4a1edcbb9239a1587d2a565bc45c41f2e | <ide><path>guides/source/upgrading_ruby_on_rails.md
<ide> being used, you can update your form to use the `PUT` method instead:
<ide> <%= form_for [ :update_name, @user ], method: :put do |f| %>
<ide> ```
<ide>
<del>For more on PATCH and why this change was made, see [this post](http://weblog.rubyonrails.org/2012/2/26/edge-rails-patch-is-the-new-primary-http-method-for-updates/)
<add>For more on PATCH and why this change was made, see [this post](http://weblog.rubyonrails.org/2012/2/25/edge-rails-patch-is-the-new-primary-http-method-for-updates/)
<ide> on the Rails blog.
<ide>
<ide> #### A note about media types | 1 |
Python | Python | add some documentation to linalg.py | 8290d01cba2320c7b1f9367f73356596d279c2da | <ide><path>numpy/linalg/linalg.py
<ide> def _assertSquareness(*arrays):
<ide> # Linear equations
<ide>
<ide> def solve(a, b):
<add> """Return the solution of a*x = b
<add> """
<ide> one_eq = len(b.shape) == 1
<ide> if one_eq:
<ide> b = b[:, newaxis]
<ide> def eig(a):
<ide>
<ide>
<ide> def eigh(a, UPLO='L'):
<add> """Compute eigenvalues for a Hermitian-symmetric matrix.
<add> """
<ide> a, wrap = _makearray(a)
<ide> _assertRank2(a)
<ide> _assertSquareness(a)
<ide> def eigh(a, UPLO='L'):
<ide> # Singular value decomposition
<ide>
<ide> def svd(a, full_matrices=1, compute_uv=1):
<add> """Singular Value Decomposition.
<add>
<add> u,s,vh = svd(a)
<add>
<add> If a is an M x N array, then the svd produces a factoring of the array
<add> into two unitary (orthogonal) 2-d arrays u (MxM) and vh (NxN) and a
<add> min(M,N)-length array of singular values such that
<add>
<add> a == dot(u,dot(S,vh))
<add>
<add> where S is an MxN array of zeros whose main diagonal is s.
<add>
<add> if compute_uv == 0, then return only the singular values
<add> if full_matrices == 0, then only part of either u or vh is
<add> returned so that it is MxN
<add> """
<ide> a, wrap = _makearray(a)
<ide> _assertRank2(a)
<ide> m, n = a.shape
<ide> def svd(a, full_matrices=1, compute_uv=1):
<ide> # Generalized inverse
<ide>
<ide> def pinv(a, rcond = 1.e-10):
<add> """Return the (Moore-Penrose) pseudo-inverse of a 2-d array
<add>
<add> This method computes the generalized inverse using the
<add> singular-value decomposition and all singular values larger than
<add> rcond of the largest.
<add> """
<ide> a, wrap = _makearray(a)
<ide> if a.dtype.char in typecodes['Complex']:
<ide> a = conjugate(a)
<ide> def pinv(a, rcond = 1.e-10):
<ide> # Determinant
<ide>
<ide> def det(a):
<add> "The determinant of the 2-d array a"
<ide> a = asarray(a)
<ide> _assertRank2(a)
<ide> _assertSquareness(a) | 1 |
Javascript | Javascript | add siblings timeout components test case | c601f7a64640290af85c9f0e33c78480656b46bc | <ide><path>packages/react-reconciler/src/__tests__/ReactSuspense-test.internal.js
<ide> describe('ReactSuspense', () => {
<ide> expect(ReactNoop.getChildren()).toEqual([span('A'), span('B')]);
<ide> });
<ide>
<add> it('suspends siblings and later recovers each independently', async () => {
<add> // Render two sibling Timeout components
<add> ReactNoop.render(
<add> <Fragment>
<add> <Fallback timeout={1000} placeholder={<Text text="Loading A..." />}>
<add> <AsyncText text="A" ms={5000} />
<add> </Fallback>
<add> <Fallback timeout={3000} placeholder={<Text text="Loading B..." />}>
<add> <AsyncText text="B" ms={6000} />
<add> </Fallback>
<add> </Fragment>,
<add> );
<add> expect(ReactNoop.flush()).toEqual(['Suspend! [A]', 'Suspend! [B]']);
<add> expect(ReactNoop.getChildren()).toEqual([]);
<add>
<add> // Advance time by enough to timeout both components and commit their placeholders
<add> ReactNoop.expire(4000);
<add> await advanceTimers(4000);
<add>
<add> expect(ReactNoop.flush()).toEqual([
<add> 'Suspend! [A]',
<add> 'Loading A...',
<add> 'Suspend! [B]',
<add> 'Loading B...',
<add> ]);
<add> expect(ReactNoop.getChildren()).toEqual([
<add> span('Loading A...'),
<add> span('Loading B...'),
<add> ]);
<add>
<add> // Advance time by enough that the first Timeout's promise resolves
<add> // and switches back to the normal view. The second Timeout should still show the placeholder
<add> ReactNoop.expire(1000);
<add> await advanceTimers(1000);
<add>
<add> expect(ReactNoop.flush()).toEqual(['Promise resolved [A]', 'A']);
<add> expect(ReactNoop.getChildren()).toEqual([span('A'), span('Loading B...')]);
<add>
<add> // Advance time by enough that the second Timeout's promise resolves
<add> // and switches back to the normal view
<add> ReactNoop.expire(1000);
<add> await advanceTimers(1000);
<add>
<add> expect(ReactNoop.flush()).toEqual(['Promise resolved [B]', 'B']);
<add> expect(ReactNoop.getChildren()).toEqual([span('A'), span('B')]);
<add> });
<add>
<ide> it('continues rendering siblings after suspending', async () => {
<ide> ReactNoop.render(
<ide> <Fallback> | 1 |
Javascript | Javascript | handle array holes in setservers() | b176d30a691f5b04dd9fef99c1ca21e548384b36 | <ide><path>lib/dns.js
<ide> exports.setServers = function(servers) {
<ide> // cache the original servers because in the event of an error setting the
<ide> // servers cares won't have any servers available for resolution
<ide> const orig = cares.getServers();
<add> const newSet = [];
<ide>
<del> const newSet = servers.map((serv) => {
<add> servers.forEach((serv) => {
<ide> var ipVersion = isIP(serv);
<ide> if (ipVersion !== 0)
<del> return [ipVersion, serv];
<add> return newSet.push([ipVersion, serv]);
<ide>
<ide> const match = serv.match(/\[(.*)\](:\d+)?/);
<ide> // we have an IPv6 in brackets
<ide> if (match) {
<ide> ipVersion = isIP(match[1]);
<ide> if (ipVersion !== 0)
<del> return [ipVersion, match[1]];
<add> return newSet.push([ipVersion, match[1]]);
<ide> }
<ide>
<ide> const s = serv.split(/:\d+$/)[0];
<ide> ipVersion = isIP(s);
<ide>
<ide> if (ipVersion !== 0)
<del> return [ipVersion, s];
<add> return newSet.push([ipVersion, s]);
<ide>
<ide> throw new Error(`IP address is not properly formatted: ${serv}`);
<ide> });
<ide><path>test/parallel/test-dns.js
<ide> const dns = require('dns');
<ide> var existing = dns.getServers();
<ide> assert(existing.length);
<ide>
<add>// Verify that setServers() handles arrays with holes and other oddities
<add>assert.doesNotThrow(() => {
<add> const servers = [];
<add>
<add> servers[0] = '127.0.0.1';
<add> servers[2] = '0.0.0.0';
<add> dns.setServers(servers);
<add>});
<add>
<add>assert.doesNotThrow(() => {
<add> const servers = ['127.0.0.1', '192.168.1.1'];
<add>
<add> servers[3] = '127.1.0.1';
<add> servers[4] = '127.1.0.1';
<add> servers[5] = '127.1.1.1';
<add>
<add> Object.defineProperty(servers, 2, {
<add> enumerable: true,
<add> get: () => {
<add> servers.length = 3;
<add> return '0.0.0.0';
<add> }
<add> });
<add>
<add> dns.setServers(servers);
<add>});
<add>
<ide> function noop() {}
<ide>
<ide> var goog = [ | 2 |
Python | Python | remove rogue comment | 193e2df8ba95efd6e3326cb0907576a0c74f1d74 | <ide><path>examples/run_classifier.py
<ide> def main():
<ide>
<ide> if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
<ide> eval_examples = processor.get_dev_examples(args.data_dir)
<del> # should tokenize this too.
<ide> eval_features = convert_examples_to_features(
<ide> eval_examples, label_list, args.max_seq_length, tokenizer)
<ide> logger.info("***** Running evaluation *****") | 1 |
PHP | PHP | add singletonif method to the container | a70352e0a1b94da97510bf1370268b80074cee77 | <ide><path>src/Illuminate/Container/Container.php
<ide> public function singleton($abstract, $concrete = null)
<ide> $this->bind($abstract, $concrete, true);
<ide> }
<ide>
<add> /**
<add> * Register a shared binding if it hasn't already been registered.
<add> *
<add> * @param string $abstract
<add> * @param \Closure|string|null $concrete
<add> * @return void
<add> */
<add> public function singletonIf($abstract, $concrete = null)
<add> {
<add> if (! $this->bound($abstract)) {
<add> $this->singleton($abstract, $concrete);
<add> }
<add> }
<add>
<ide> /**
<ide> * "Extend" an abstract type in the container.
<ide> *
<ide><path>src/Illuminate/Contracts/Container/Container.php
<ide> public function bindIf($abstract, $concrete = null, $shared = false);
<ide> */
<ide> public function singleton($abstract, $concrete = null);
<ide>
<add> /**
<add> * Register a shared binding if it hasn't already been registered.
<add> *
<add> * @param string $abstract
<add> * @param \Closure|string|null $concrete
<add> * @return void
<add> */
<add> public function singletonIf($abstract, $concrete = null);
<add>
<ide> /**
<ide> * "Extend" an abstract type in the container.
<ide> *
<ide><path>tests/Container/ContainerTest.php
<ide> public function testBindIfDoesRegisterIfServiceNotRegisteredYet()
<ide> $this->assertEquals('Dayle', $container->make('name'));
<ide> }
<ide>
<add> public function testSingletonIfDoesntRegisterIfBindingAlreadyRegistered()
<add> {
<add> $container = new Container;
<add> $class = new stdClass;
<add> $container->singleton('class', function () use ($class) {
<add> return $class;
<add> });
<add> $otherClass = new stdClass;
<add> $container->singletonIf('class', function () use ($otherClass) {
<add> return $otherClass;
<add> });
<add>
<add> $this->assertSame($class, $container->make('class'));
<add> }
<add>
<add> public function testSingletonIfDoesRegisterIfBindingNotRegisteredYet()
<add> {
<add> $container = new Container;
<add> $class = new stdClass;
<add> $container->singleton('class', function () use ($class) {
<add> return $class;
<add> });
<add> $otherClass = new stdClass;
<add> $container->singletonIf('otherClass', function () use ($otherClass) {
<add> return $otherClass;
<add> });
<add>
<add> $this->assertSame($otherClass, $container->make('otherClass'));
<add> }
<add>
<ide> public function testSharedClosureResolution()
<ide> {
<ide> $container = new Container; | 3 |
Python | Python | add type hints to aws provider | b9d677cdd660e0be8278a64658e73359276a9682 | <ide><path>airflow/providers/amazon/aws/hooks/elasticache_replication_group.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> from time import sleep
<ide>
<ide> class ElastiCacheReplicationGroupHook(AwsBaseHook):
<ide> TERMINAL_STATES = frozenset({"available", "create-failed", "deleting"})
<ide>
<ide> def __init__(
<del> self, max_retries=10, exponential_back_off_factor=1, initial_poke_interval=60, *args, **kwargs
<add> self,
<add> max_retries: int = 10,
<add> exponential_back_off_factor: float = 1,
<add> initial_poke_interval: float = 60,
<add> *args,
<add> **kwargs,
<ide> ):
<ide> self.max_retries = max_retries
<ide> self.exponential_back_off_factor = exponential_back_off_factor
<ide> self.initial_poke_interval = initial_poke_interval
<ide>
<del> super().__init__(client_type='elasticache', *args, **kwargs)
<add> kwargs["client_type"] = "elasticache"
<add> super().__init__(*args, **kwargs)
<ide>
<del> def create_replication_group(self, config):
<add> def create_replication_group(self, config: dict) -> dict:
<ide> """
<ide> Call ElastiCache API for creating a replication group
<ide>
<ide> def create_replication_group(self, config):
<ide> """
<ide> return self.conn.create_replication_group(**config)
<ide>
<del> def delete_replication_group(self, replication_group_id):
<add> def delete_replication_group(self, replication_group_id: str) -> dict:
<ide> """
<ide> Call ElastiCache API for deleting a replication group
<ide>
<ide> def delete_replication_group(self, replication_group_id):
<ide> """
<ide> return self.conn.delete_replication_group(ReplicationGroupId=replication_group_id)
<ide>
<del> def describe_replication_group(self, replication_group_id):
<add> def describe_replication_group(self, replication_group_id: str) -> dict:
<ide> """
<ide> Call ElastiCache API for describing a replication group
<ide>
<ide> def describe_replication_group(self, replication_group_id):
<ide> """
<ide> return self.conn.describe_replication_groups(ReplicationGroupId=replication_group_id)
<ide>
<del> def get_replication_group_status(self, replication_group_id):
<add> def get_replication_group_status(self, replication_group_id: str) -> str:
<ide> """
<ide> Get current status of replication group
<ide>
<ide> def get_replication_group_status(self, replication_group_id):
<ide> """
<ide> return self.describe_replication_group(replication_group_id)['ReplicationGroups'][0]['Status']
<ide>
<del> def is_replication_group_available(self, replication_group_id):
<add> def is_replication_group_available(self, replication_group_id: str) -> bool:
<ide> """
<ide> Helper for checking if replication group is available or not
<ide>
<ide> def is_replication_group_available(self, replication_group_id):
<ide>
<ide> def wait_for_availability(
<ide> self,
<del> replication_group_id,
<del> initial_sleep_time=None,
<del> exponential_back_off_factor=None,
<del> max_retries=None,
<add> replication_group_id: str,
<add> initial_sleep_time: Optional[float] = None,
<add> exponential_back_off_factor: Optional[float] = None,
<add> max_retries: Optional[int] = None,
<ide> ):
<ide> """
<ide> Check if replication group is available or not by performing a describe over it
<ide> def wait_for_availability(
<ide>
<ide> def wait_for_deletion(
<ide> self,
<del> replication_group_id,
<del> initial_sleep_time=None,
<del> exponential_back_off_factor=None,
<del> max_retries=None,
<add> replication_group_id: str,
<add> initial_sleep_time: Optional[float] = None,
<add> exponential_back_off_factor: Optional[float] = None,
<add> max_retries: Optional[int] = None,
<ide> ):
<ide> """
<ide> Helper for deleting a replication group ensuring it is either deleted or can't be deleted
<ide> def wait_for_deletion(
<ide>
<ide> def ensure_delete_replication_group(
<ide> self,
<del> replication_group_id,
<del> initial_sleep_time=None,
<del> exponential_back_off_factor=None,
<del> max_retries=None,
<add> replication_group_id: str,
<add> initial_sleep_time: Optional[float] = None,
<add> exponential_back_off_factor: Optional[float] = None,
<add> max_retries: Optional[int] = None,
<ide> ):
<ide> """
<ide> Delete a replication group ensuring it is either deleted or can't be deleted
<ide><path>airflow/providers/amazon/aws/hooks/glue.py
<ide> def get_iam_execution_role(self) -> Dict:
<ide> self.log.error("Failed to create aws glue job, error: %s", general_error)
<ide> raise
<ide>
<del> def initialize_job(self, script_arguments: Optional[List] = None) -> Dict[str, str]:
<add> def initialize_job(self, script_arguments: Optional[dict] = None) -> Dict[str, str]:
<ide> """
<ide> Initializes connection with AWS Glue
<ide> to run job
<ide> :return:
<ide> """
<ide> glue_client = self.get_conn()
<del> script_arguments = script_arguments or []
<add> script_arguments = script_arguments or {}
<ide>
<ide> try:
<ide> job_name = self.get_or_create_glue_job()
<ide><path>airflow/providers/amazon/aws/hooks/sagemaker.py
<ide> import time
<ide> import warnings
<ide> from functools import partial
<del>from typing import Dict, List, Optional, Set
<add>from typing import Dict, List, Optional, Set, Any, Callable, Generator
<ide>
<ide> from botocore.exceptions import ClientError
<ide>
<ide> class LogState:
<ide> Position = collections.namedtuple('Position', ['timestamp', 'skip'])
<ide>
<ide>
<del>def argmin(arr, f) -> Optional[int]:
<add>def argmin(arr, f: Callable) -> Optional[int]:
<ide> """Return the index, i, in arr that minimizes f(arr[i])"""
<ide> min_value = None
<ide> min_idx = None
<ide> def secondary_training_status_changed(current_job_description: dict, prev_job_de
<ide> return message != last_message
<ide>
<ide>
<del>def secondary_training_status_message(job_description, prev_description):
<add>def secondary_training_status_message(
<add> job_description: Dict[str, List[dict]], prev_description: Optional[dict]
<add>) -> str:
<ide> """
<ide> Returns a string contains start time and the secondary training job status message.
<ide>
<ide> def secondary_training_status_message(job_description, prev_description):
<ide>
<ide> :return: Job status string to be printed.
<ide> """
<del> if (
<del> job_description is None
<del> or job_description.get('SecondaryStatusTransitions') is None
<del> or len(job_description.get('SecondaryStatusTransitions')) == 0
<del> ):
<add> current_transitions = job_description.get('SecondaryStatusTransitions')
<add> if current_transitions is None or len(current_transitions) == 0:
<ide> return ''
<ide>
<del> prev_description_secondary_transitions = (
<del> prev_description.get('SecondaryStatusTransitions') if prev_description is not None else None
<del> )
<del> prev_transitions_num = (
<del> len(prev_description['SecondaryStatusTransitions'])
<del> if prev_description_secondary_transitions is not None
<del> else 0
<del> )
<del> current_transitions = job_description['SecondaryStatusTransitions']
<add> prev_transitions_num = 0
<add> if prev_description is not None:
<add> if prev_description.get('SecondaryStatusTransitions') is not None:
<add> prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])
<ide>
<ide> transitions_to_print = (
<ide> current_transitions[-1:]
<ide> def log_stream(self, log_group, stream_name, start_time=0, skip=0):
<ide>
<ide> return self.logs_hook.get_log_events(log_group, stream_name, start_time, skip)
<ide>
<del> def multi_stream_iter(self, log_group, streams, positions=None):
<add> def multi_stream_iter(self, log_group: str, streams: list, positions=None) -> Generator:
<ide> """
<ide> Iterate over the available events coming from a set of log streams in a single log group
<ide> interleaving the events from each stream so they're yielded in timestamp order.
<ide> def multi_stream_iter(self, log_group, streams, positions=None):
<ide> self.logs_hook.get_log_events(log_group, s, positions[s].timestamp, positions[s].skip)
<ide> for s in streams
<ide> ]
<del> events = []
<add> events: List[Optional[Any]] = []
<ide> for event_stream in event_iters:
<ide> if not event_stream:
<ide> events.append(None)
<ide> def multi_stream_iter(self, log_group, streams, positions=None):
<ide> events.append(None)
<ide>
<ide> while any(events):
<del> i = argmin(events, lambda x: x['timestamp'] if x else 9999999999)
<del> yield (i, events[i])
<add> i = argmin(events, lambda x: x['timestamp'] if x else 9999999999) or 0
<add> yield i, events[i]
<ide> try:
<ide> events[i] = next(event_iters[i])
<ide> except StopIteration:
<ide> def describe_training_job(self, name: str):
<ide>
<ide> def describe_training_job_with_log(
<ide> self,
<del> job_name,
<add> job_name: str,
<ide> positions,
<del> stream_names,
<del> instance_count,
<del> state,
<del> last_description,
<del> last_describe_job_call,
<add> stream_names: list,
<add> instance_count: int,
<add> state: int,
<add> last_description: dict,
<add> last_describe_job_call: float,
<ide> ):
<ide> """Return the training job info associated with job_name and print CloudWatch logs"""
<ide> log_group = '/aws/sagemaker/TrainingJobs'
<ide> def describe_training_job_with_log(
<ide> state = LogState.JOB_COMPLETE
<ide> return state, last_description, last_describe_job_call
<ide>
<del> def describe_tuning_job(self, name: str):
<add> def describe_tuning_job(self, name: str) -> dict:
<ide> """
<ide> Return the tuning job info associated with the name
<ide>
<ide> def describe_tuning_job(self, name: str):
<ide> """
<ide> return self.get_conn().describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)
<ide>
<del> def describe_model(self, name: str):
<add> def describe_model(self, name: str) -> dict:
<ide> """
<ide> Return the SageMaker model info associated with the name
<ide>
<ide> def describe_model(self, name: str):
<ide> """
<ide> return self.get_conn().describe_model(ModelName=name)
<ide>
<del> def describe_transform_job(self, name: str):
<add> def describe_transform_job(self, name: str) -> dict:
<ide> """
<ide> Return the transform job info associated with the name
<ide>
<ide> def describe_transform_job(self, name: str):
<ide> """
<ide> return self.get_conn().describe_transform_job(TransformJobName=name)
<ide>
<del> def describe_processing_job(self, name: str):
<add> def describe_processing_job(self, name: str) -> dict:
<ide> """
<ide> Return the processing job info associated with the name
<ide>
<ide> def describe_processing_job(self, name: str):
<ide> """
<ide> return self.get_conn().describe_processing_job(ProcessingJobName=name)
<ide>
<del> def describe_endpoint_config(self, name: str):
<add> def describe_endpoint_config(self, name: str) -> dict:
<ide> """
<ide> Return the endpoint config info associated with the name
<ide>
<ide> def describe_endpoint_config(self, name: str):
<ide> """
<ide> return self.get_conn().describe_endpoint_config(EndpointConfigName=name)
<ide>
<del> def describe_endpoint(self, name: str):
<add> def describe_endpoint(self, name: str) -> dict:
<ide> """
<ide> :param name: the name of the endpoint
<ide> :type name: str
<ide> def check_status(
<ide> self,
<ide> job_name: str,
<ide> key: str,
<del> describe_function,
<add> describe_function: Callable,
<ide> check_interval: int,
<ide> max_ingestion_time: Optional[int] = None,
<ide> non_terminal_states: Optional[Set] = None,
<ide> def list_processing_jobs(self, **kwargs) -> List[Dict]: # noqa: D402
<ide> )
<ide> return results
<ide>
<del> def _list_request(self, partial_func, result_key: str, max_results: Optional[int] = None) -> List[Dict]:
<add> def _list_request(
<add> self, partial_func: Callable, result_key: str, max_results: Optional[int] = None
<add> ) -> List[Dict]:
<ide> """
<ide> All AWS boto3 list_* requests return results in batches (if the key "NextToken" is contained in the
<ide> result, there are more results to fetch). The default AWS batch size is 10, and configurable up to
<ide><path>airflow/providers/amazon/aws/log/cloudwatch_task_handler.py
<ide> class CloudwatchTaskHandler(FileTaskHandler, LoggingMixin):
<ide> :type filename_template: str
<ide> """
<ide>
<del> def __init__(self, base_log_folder, log_group_arn, filename_template):
<add> def __init__(self, base_log_folder: str, log_group_arn: str, filename_template: str):
<ide> super().__init__(base_log_folder, filename_template)
<ide> split_arn = log_group_arn.split(':')
<ide>
<ide> def _read(self, task_instance, try_number, metadata=None):
<ide> {'end_of_log': True},
<ide> )
<ide>
<del> def get_cloudwatch_logs(self, stream_name):
<add> def get_cloudwatch_logs(self, stream_name: str) -> str:
<ide> """
<ide> Return all logs from the given log stream.
<ide>
<ide><path>airflow/providers/amazon/aws/log/s3_task_handler.py
<ide> class S3TaskHandler(FileTaskHandler, LoggingMixin):
<ide> uploads to and reads from S3 remote storage.
<ide> """
<ide>
<del> def __init__(self, base_log_folder, s3_log_folder, filename_template):
<add> def __init__(self, base_log_folder: str, s3_log_folder: str, filename_template: str):
<ide> super().__init__(base_log_folder, filename_template)
<ide> self.remote_base = s3_log_folder
<ide> self.log_relative_path = ''
<ide> def _read(self, ti, try_number, metadata=None):
<ide> else:
<ide> return super()._read(ti, try_number)
<ide>
<del> def s3_log_exists(self, remote_log_location):
<add> def s3_log_exists(self, remote_log_location: str) -> bool:
<ide> """
<ide> Check if remote_log_location exists in remote storage
<ide>
<ide> :param remote_log_location: log's location in remote storage
<add> :type remote_log_location: str
<ide> :return: True if location exists else False
<ide> """
<ide> try:
<ide> def s3_log_exists(self, remote_log_location):
<ide> pass
<ide> return False
<ide>
<del> def s3_read(self, remote_log_location, return_error=False):
<add> def s3_read(self, remote_log_location: str, return_error: bool = False) -> str:
<ide> """
<ide> Returns the log found at the remote_log_location. Returns '' if no
<ide> logs are found or there is an error.
<ide> def s3_read(self, remote_log_location, return_error=False):
<ide> :param return_error: if True, returns a string error message if an
<ide> error occurs. Otherwise returns '' when an error occurs.
<ide> :type return_error: bool
<add> :return: the log found at the remote_log_location
<ide> """
<ide> try:
<ide> return self.hook.read_key(remote_log_location)
<ide> def s3_read(self, remote_log_location, return_error=False):
<ide> # return error if needed
<ide> if return_error:
<ide> return msg
<add> return ''
<ide>
<del> def s3_write(self, log, remote_log_location, append=True):
<add> def s3_write(self, log: str, remote_log_location: str, append: bool = True):
<ide> """
<ide> Writes the log to the remote_log_location. Fails silently if no hook
<ide> was created.
<ide><path>airflow/providers/amazon/aws/operators/batch.py
<ide> - http://boto3.readthedocs.io/en/latest/reference/services/batch.html
<ide> - https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
<ide> """
<del>from typing import Dict, Optional
<add>from typing import Dict, Optional, Any
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.models import BaseOperator
<ide> class AwsBatchOperator(BaseOperator):
<ide> :type job_queue: str
<ide>
<ide> :param overrides: the `containerOverrides` parameter for boto3 (templated)
<del> :type overrides: Dict
<add> :type overrides: Optional[dict]
<ide>
<ide> :param array_properties: the `arrayProperties` parameter for boto3
<del> :type array_properties: Dict
<add> :type array_properties: Optional[dict]
<ide>
<ide> :param parameters: the `parameters` for boto3 (templated)
<del> :type parameters: Dict
<add> :type parameters: Optional[dict]
<ide>
<ide> :param job_id: the job ID, usually unknown (None) until the
<ide> submit_job operation gets the jobId defined by AWS Batch
<ide> class AwsBatchOperator(BaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> job_name,
<del> job_definition,
<del> job_queue,
<del> overrides,
<del> array_properties=None,
<del> parameters=None,
<del> job_id=None,
<del> waiters=None,
<del> max_retries=None,
<del> status_retries=None,
<del> aws_conn_id=None,
<del> region_name=None,
<add> job_name: str,
<add> job_definition: str,
<add> job_queue: str,
<add> overrides: dict,
<add> array_properties: Optional[dict] = None,
<add> parameters: Optional[dict] = None,
<add> job_id: Optional[str] = None,
<add> waiters: Optional[Any] = None,
<add> max_retries: Optional[int] = None,
<add> status_retries: Optional[int] = None,
<add> aws_conn_id: Optional[str] = None,
<add> region_name: Optional[str] = None,
<ide> **kwargs,
<ide> ): # pylint: disable=too-many-arguments
<ide>
<ide> def __init__(
<ide> self.job_name = job_name
<ide> self.job_definition = job_definition
<ide> self.job_queue = job_queue
<del> self.overrides = overrides
<add> self.overrides = overrides or {}
<ide> self.array_properties = array_properties or {}
<del> self.parameters = parameters
<add> self.parameters = parameters or {}
<ide> self.waiters = waiters
<ide> self.hook = AwsBatchClientHook(
<ide> max_retries=max_retries,
<ide> def monitor_job(self, context: Dict): # pylint: disable=unused-argument
<ide>
<ide> :raises: AirflowException
<ide> """
<add> if not self.job_id:
<add> raise AirflowException('AWS Batch job - job_id was not found')
<add>
<ide> try:
<ide> if self.waiters:
<ide> self.waiters.wait_for_job(self.job_id)
<ide><path>airflow/providers/amazon/aws/operators/cloud_formation.py
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<ide> """This module contains CloudFormation create/delete stack operators."""
<del>from typing import List
<add>from typing import List, Optional
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
<ide> class CloudFormationCreateStackOperator(BaseOperator):
<ide> ui_color = '#6b9659'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, stack_name, params, aws_conn_id='aws_default', **kwargs):
<add> def __init__(self, *, stack_name: str, params: dict, aws_conn_id: str = 'aws_default', **kwargs):
<ide> super().__init__(**kwargs)
<ide> self.stack_name = stack_name
<ide> self.params = params
<ide> class CloudFormationDeleteStackOperator(BaseOperator):
<ide> ui_fgcolor = '#FFF'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, stack_name, params=None, aws_conn_id='aws_default', **kwargs):
<add> def __init__(
<add> self, *, stack_name: str, params: Optional[dict] = None, aws_conn_id: str = 'aws_default', **kwargs
<add> ):
<ide> super().__init__(**kwargs)
<ide> self.params = params or {}
<ide> self.stack_name = stack_name
<del> self.params = params
<ide> self.aws_conn_id = aws_conn_id
<ide>
<ide> def execute(self, context):
<ide><path>airflow/providers/amazon/aws/operators/datasync.py
<ide>
<ide> import logging
<ide> import random
<add>from typing import Optional, List
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.models import BaseOperator
<ide> class AWSDataSyncOperator(BaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> aws_conn_id="aws_default",
<del> wait_interval_seconds=5,
<del> task_arn=None,
<del> source_location_uri=None,
<del> destination_location_uri=None,
<del> allow_random_task_choice=False,
<del> allow_random_location_choice=False,
<del> create_task_kwargs=None,
<del> create_source_location_kwargs=None,
<del> create_destination_location_kwargs=None,
<del> update_task_kwargs=None,
<del> task_execution_kwargs=None,
<del> delete_task_after_execution=False,
<add> aws_conn_id: str = "aws_default",
<add> wait_interval_seconds: int = 5,
<add> task_arn: Optional[str] = None,
<add> source_location_uri: Optional[str] = None,
<add> destination_location_uri: Optional[str] = None,
<add> allow_random_task_choice: bool = False,
<add> allow_random_location_choice: bool = False,
<add> create_task_kwargs: Optional[dict] = None,
<add> create_source_location_kwargs: Optional[dict] = None,
<add> create_destination_location_kwargs: Optional[dict] = None,
<add> update_task_kwargs: Optional[dict] = None,
<add> task_execution_kwargs: Optional[dict] = None,
<add> delete_task_after_execution: bool = False,
<ide> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide> def __init__(
<ide> )
<ide>
<ide> # Others
<del> self.hook = None
<add> self.hook: Optional[AWSDataSyncHook] = None
<ide> # Candidates - these are found in AWS as possible things
<ide> # for us to use
<del> self.candidate_source_location_arns = None
<del> self.candidate_destination_location_arns = None
<del> self.candidate_task_arns = None
<add> self.candidate_source_location_arns: Optional[List[str]] = None
<add> self.candidate_destination_location_arns: Optional[List[str]] = None
<add> self.candidate_task_arns: Optional[List[str]] = None
<ide> # Actuals
<del> self.source_location_arn = None
<del> self.destination_location_arn = None
<del> self.task_execution_arn = None
<add> self.source_location_arn: Optional[str] = None
<add> self.destination_location_arn: Optional[str] = None
<add> self.task_execution_arn: Optional[str] = None
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> AWSDataSyncHook:
<ide> """Create and return AWSDataSyncHook.
<ide>
<ide> :return AWSDataSyncHook: An AWSDataSyncHook instance.
<ide> """
<del> if not self.hook:
<del> self.hook = AWSDataSyncHook(
<del> aws_conn_id=self.aws_conn_id,
<del> wait_interval_seconds=self.wait_interval_seconds,
<del> )
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = AWSDataSyncHook(
<add> aws_conn_id=self.aws_conn_id,
<add> wait_interval_seconds=self.wait_interval_seconds,
<add> )
<ide> return self.hook
<ide>
<ide> def execute(self, context):
<ide> def execute(self, context):
<ide>
<ide> return {"TaskArn": self.task_arn, "TaskExecutionArn": self.task_execution_arn}
<ide>
<del> def _get_tasks_and_locations(self):
<add> def _get_tasks_and_locations(self) -> None:
<ide> """Find existing DataSync Task based on source and dest Locations."""
<ide> hook = self.get_hook()
<ide>
<ide> def _get_tasks_and_locations(self):
<ide> )
<ide> self.log.info("Found candidate DataSync TaskArns %s", self.candidate_task_arns)
<ide>
<del> def choose_task(self, task_arn_list):
<add> def choose_task(self, task_arn_list: list) -> Optional[str]:
<ide> """Select 1 DataSync TaskArn from a list"""
<ide> if not task_arn_list:
<ide> return None
<ide> def choose_task(self, task_arn_list):
<ide> return random.choice(task_arn_list)
<ide> raise AirflowException("Unable to choose a Task from {}".format(task_arn_list))
<ide>
<del> def choose_location(self, location_arn_list):
<add> def choose_location(self, location_arn_list: List[str]) -> Optional[str]:
<ide> """Select 1 DataSync LocationArn from a list"""
<ide> if not location_arn_list:
<ide> return None
<ide> def choose_location(self, location_arn_list):
<ide> return random.choice(location_arn_list)
<ide> raise AirflowException("Unable to choose a Location from {}".format(location_arn_list))
<ide>
<del> def _create_datasync_task(self):
<add> def _create_datasync_task(self) -> None:
<ide> """Create a AWS DataSyncTask."""
<add> if not self.candidate_source_location_arns or not self.candidate_destination_location_arns:
<add> return
<add>
<ide> hook = self.get_hook()
<ide>
<ide> self.source_location_arn = self.choose_location(self.candidate_source_location_arns)
<del> if not self.source_location_arn and self.create_source_location_kwargs:
<add> if not self.source_location_arn and self.source_location_uri and self.create_source_location_kwargs:
<ide> self.log.info('Attempting to create source Location')
<ide> self.source_location_arn = hook.create_location(
<ide> self.source_location_uri, **self.create_source_location_kwargs
<ide> def _create_datasync_task(self):
<ide> )
<ide>
<ide> self.destination_location_arn = self.choose_location(self.candidate_destination_location_arns)
<del> if not self.destination_location_arn and self.create_destination_location_kwargs:
<add> if (
<add> not self.destination_location_arn
<add> and self.destination_location_uri
<add> and self.create_destination_location_kwargs
<add> ):
<ide> self.log.info('Attempting to create destination Location')
<ide> self.destination_location_arn = hook.create_location(
<ide> self.destination_location_uri, **self.create_destination_location_kwargs
<ide> def _create_datasync_task(self):
<ide> if not self.task_arn:
<ide> raise AirflowException("Task could not be created")
<ide> self.log.info("Created a Task with TaskArn %s", self.task_arn)
<del> return self.task_arn
<ide>
<del> def _update_datasync_task(self):
<add> def _update_datasync_task(self) -> None:
<ide> """Update a AWS DataSyncTask."""
<add> if not self.task_arn:
<add> return
<add>
<ide> hook = self.get_hook()
<ide> self.log.info("Updating TaskArn %s", self.task_arn)
<ide> hook.update_task(self.task_arn, **self.update_task_kwargs)
<ide> self.log.info("Updated TaskArn %s", self.task_arn)
<del> return self.task_arn
<ide>
<del> def _execute_datasync_task(self):
<add> def _execute_datasync_task(self) -> None:
<ide> """Create and monitor an AWSDataSync TaskExecution for a Task."""
<add> if not self.task_arn:
<add> raise AirflowException("Missing TaskArn")
<add>
<ide> hook = self.get_hook()
<ide>
<ide> # Create a task execution:
<ide> def _execute_datasync_task(self):
<ide>
<ide> if not result:
<ide> raise AirflowException("Failed TaskExecutionArn %s" % self.task_execution_arn)
<del> return self.task_execution_arn
<ide>
<del> def on_kill(self):
<add> def on_kill(self) -> None:
<ide> """Cancel the submitted DataSync task."""
<ide> hook = self.get_hook()
<ide> if self.task_execution_arn:
<ide> self.log.info("Cancelling TaskExecutionArn %s", self.task_execution_arn)
<ide> hook.cancel_task_execution(task_execution_arn=self.task_execution_arn)
<ide> self.log.info("Cancelled TaskExecutionArn %s", self.task_execution_arn)
<ide>
<del> def _delete_datasync_task(self):
<add> def _delete_datasync_task(self) -> None:
<ide> """Deletes an AWS DataSync Task."""
<add> if not self.task_arn:
<add> return
<add>
<ide> hook = self.get_hook()
<ide> # Delete task:
<ide> self.log.info("Deleting Task with TaskArn %s", self.task_arn)
<ide> hook.delete_task(self.task_arn)
<ide> self.log.info("Task Deleted")
<del> return self.task_arn
<ide>
<del> def _get_location_arns(self, location_uri):
<add> def _get_location_arns(self, location_uri) -> List[str]:
<ide> location_arns = self.get_hook().get_location_arns(location_uri)
<ide> self.log.info("Found LocationArns %s for LocationUri %s", location_arns, location_uri)
<ide> return location_arns
<ide><path>airflow/providers/amazon/aws/operators/ecs.py
<ide> import re
<ide> import sys
<ide> from datetime import datetime
<del>from typing import Dict, Optional
<add>from typing import Optional
<add>
<add>from botocore.waiter import Waiter
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.models import BaseOperator
<ide> class ECSProtocol(Protocol):
<ide> - https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
<ide> """
<ide>
<del> def run_task(self, **kwargs):
<add> def run_task(self, **kwargs) -> dict:
<ide> """https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task""" # noqa: E501 # pylint: disable=line-too-long
<ide> ...
<ide>
<del> def get_waiter(self, x: str):
<add> def get_waiter(self, x: str) -> Waiter:
<ide> """https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.get_waiter""" # noqa: E501 # pylint: disable=line-too-long
<ide> ...
<ide>
<del> def describe_tasks(self, cluster: str, tasks) -> Dict:
<add> def describe_tasks(self, cluster: str, tasks) -> dict:
<ide> """https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_tasks""" # noqa: E501 # pylint: disable=line-too-long
<ide> ...
<ide>
<del> def stop_task(self, cluster, task, reason: str) -> Dict:
<add> def stop_task(self, cluster, task, reason: str) -> dict:
<ide> """https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.stop_task""" # noqa: E501 # pylint: disable=line-too-long
<ide> ...
<ide>
<ide> class ECSOperator(BaseOperator): # pylint: disable=too-many-instance-attributes
<ide> """
<ide>
<ide> ui_color = '#f0ede4'
<del> client = None # type: Optional[ECSProtocol]
<del> arn = None # type: Optional[str]
<ide> template_fields = ('overrides',)
<ide>
<ide> @apply_defaults
<ide> def __init__(
<ide> self,
<ide> *,
<del> task_definition,
<del> cluster,
<del> overrides, # pylint: disable=too-many-arguments
<del> aws_conn_id=None,
<del> region_name=None,
<del> launch_type='EC2',
<del> group=None,
<del> placement_constraints=None,
<del> placement_strategy=None,
<del> platform_version='LATEST',
<del> network_configuration=None,
<del> tags=None,
<del> awslogs_group=None,
<del> awslogs_region=None,
<del> awslogs_stream_prefix=None,
<del> propagate_tags=None,
<add> task_definition: str,
<add> cluster: str,
<add> overrides: dict, # pylint: disable=too-many-arguments
<add> aws_conn_id: Optional[str] = None,
<add> region_name: Optional[str] = None,
<add> launch_type: str = 'EC2',
<add> group: Optional[str] = None,
<add> placement_constraints: Optional[list] = None,
<add> placement_strategy: Optional[list] = None,
<add> platform_version: str = 'LATEST',
<add> network_configuration: Optional[dict] = None,
<add> tags: Optional[dict] = None,
<add> awslogs_group: Optional[str] = None,
<add> awslogs_region: Optional[str] = None,
<add> awslogs_stream_prefix: Optional[str] = None,
<add> propagate_tags: Optional[str] = None,
<ide> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide> def __init__(
<ide> if self.awslogs_region is None:
<ide> self.awslogs_region = region_name
<ide>
<del> self.hook = None
<add> self.hook: Optional[AwsBaseHook] = None
<add> self.client: Optional[ECSProtocol] = None
<add> self.arn: Optional[str] = None
<ide>
<ide> def execute(self, context):
<ide> self.log.info(
<ide> def execute(self, context):
<ide> self._check_success_task()
<ide> self.log.info('ECS Task has been successfully executed: %s', response)
<ide>
<del> def _wait_for_task_ended(self):
<add> def _wait_for_task_ended(self) -> None:
<add> if not self.client or not self.arn:
<add> return
<add>
<ide> waiter = self.client.get_waiter('tasks_stopped')
<ide> waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
<ide> waiter.wait(cluster=self.cluster, tasks=[self.arn])
<ide>
<del> def _check_success_task(self):
<add> def _check_success_task(self) -> None:
<add> if not self.client or not self.arn:
<add> return
<add>
<ide> response = self.client.describe_tasks(cluster=self.cluster, tasks=[self.arn])
<ide> self.log.info('ECS Task stopped, check status: %s', response)
<ide>
<ide> def _check_success_task(self):
<ide> )
<ide> )
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> AwsBaseHook:
<ide> """Create and return an AwsHook."""
<del> if not self.hook:
<del> self.hook = AwsBaseHook(
<del> aws_conn_id=self.aws_conn_id, client_type='ecs', region_name=self.region_name
<del> )
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = AwsBaseHook(aws_conn_id=self.aws_conn_id, client_type='ecs', region_name=self.region_name)
<ide> return self.hook
<ide>
<del> def get_logs_hook(self):
<add> def get_logs_hook(self) -> AwsLogsHook:
<ide> """Create and return an AwsLogsHook."""
<ide> return AwsLogsHook(aws_conn_id=self.aws_conn_id, region_name=self.awslogs_region)
<ide>
<del> def on_kill(self):
<add> def on_kill(self) -> None:
<add> if not self.client or not self.arn:
<add> return
<add>
<ide> response = self.client.stop_task(
<ide> cluster=self.cluster, task=self.arn, reason='Task killed by the user'
<ide> )
<ide><path>airflow/providers/amazon/aws/operators/glue.py
<ide> from __future__ import unicode_literals
<ide>
<ide> import os.path
<add>from typing import Optional
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.glue import AwsGlueJobHook
<ide> class AwsGlueJobOperator(BaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> job_name='aws_glue_default_job',
<del> job_desc='AWS Glue Job with Airflow',
<del> script_location=None,
<del> concurrent_run_limit=None,
<del> script_args=None,
<del> retry_limit=None,
<del> num_of_dpus=6,
<del> aws_conn_id='aws_default',
<del> region_name=None,
<del> s3_bucket=None,
<del> iam_role_name=None,
<add> job_name: str = 'aws_glue_default_job',
<add> job_desc: str = 'AWS Glue Job with Airflow',
<add> script_location: Optional[str] = None,
<add> concurrent_run_limit: Optional[int] = None,
<add> script_args: Optional[dict] = None,
<add> retry_limit: Optional[int] = None,
<add> num_of_dpus: int = 6,
<add> aws_conn_id: str = 'aws_default',
<add> region_name: Optional[str] = None,
<add> s3_bucket: Optional[str] = None,
<add> iam_role_name: Optional[str] = None,
<ide> **kwargs,
<ide> ): # pylint: disable=too-many-arguments
<ide> super(AwsGlueJobOperator, self).__init__(**kwargs)
<ide> self.job_name = job_name
<ide> self.job_desc = job_desc
<ide> self.script_location = script_location
<del> self.concurrent_run_limit = concurrent_run_limit
<add> self.concurrent_run_limit = concurrent_run_limit or 1
<ide> self.script_args = script_args or {}
<ide> self.retry_limit = retry_limit
<ide> self.num_of_dpus = num_of_dpus
<ide> def __init__(
<ide> self.s3_bucket = s3_bucket
<ide> self.iam_role_name = iam_role_name
<ide> self.s3_protocol = "s3://"
<del> self.s3_artifcats_prefix = 'artifacts/glue-scripts/'
<add> self.s3_artifacts_prefix = 'artifacts/glue-scripts/'
<ide>
<ide> def execute(self, context):
<ide> """
<ide> def execute(self, context):
<ide> if self.script_location and not self.script_location.startswith(self.s3_protocol):
<ide> s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
<ide> script_name = os.path.basename(self.script_location)
<del> s3_hook.load_file(self.script_location, self.s3_bucket, self.s3_artifcats_prefix + script_name)
<add> s3_hook.load_file(self.script_location, self.s3_bucket, self.s3_artifacts_prefix + script_name)
<ide> glue_job = AwsGlueJobHook(
<ide> job_name=self.job_name,
<ide> desc=self.job_desc,
<ide><path>airflow/providers/amazon/aws/operators/s3_bucket.py
<ide> class S3CreateBucketOperator(BaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> bucket_name,
<add> bucket_name: str,
<ide> aws_conn_id: Optional[str] = "aws_default",
<ide> region_name: Optional[str] = None,
<ide> **kwargs,
<ide> class S3DeleteBucketOperator(BaseOperator):
<ide>
<ide> def __init__(
<ide> self,
<del> bucket_name,
<add> bucket_name: str,
<ide> force_delete: bool = False,
<ide> aws_conn_id: Optional[str] = "aws_default",
<ide> **kwargs,
<ide><path>airflow/providers/amazon/aws/operators/s3_copy_object.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional, Union
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.s3 import S3Hook
<ide> class S3CopyObjectOperator(BaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> source_bucket_key,
<del> dest_bucket_key,
<del> source_bucket_name=None,
<del> dest_bucket_name=None,
<del> source_version_id=None,
<del> aws_conn_id='aws_default',
<del> verify=None,
<add> source_bucket_key: str,
<add> dest_bucket_key: str,
<add> source_bucket_name: Optional[str] = None,
<add> dest_bucket_name: Optional[str] = None,
<add> source_version_id: Optional[str] = None,
<add> aws_conn_id: str = 'aws_default',
<add> verify: Optional[Union[str, bool]] = None,
<ide> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide><path>airflow/providers/amazon/aws/operators/s3_delete_objects.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional, Union
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.s3 import S3Hook
<ide> class S3DeleteObjectsOperator(BaseOperator):
<ide> template_fields = ('keys', 'bucket', 'prefix')
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, bucket, keys=None, prefix=None, aws_conn_id='aws_default', verify=None, **kwargs):
<add> def __init__(
<add> self,
<add> *,
<add> bucket: str,
<add> keys: Optional[Union[str, list]] = None,
<add> prefix: Optional[str] = None,
<add> aws_conn_id: str = 'aws_default',
<add> verify: Optional[Union[str, bool]] = None,
<add> **kwargs,
<add> ):
<ide>
<ide> if not bool(keys) ^ bool(prefix):
<ide> raise ValueError("Either keys or prefix should be set.")
<ide><path>airflow/providers/amazon/aws/operators/s3_list.py
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<ide>
<del>from typing import Iterable
<add>from typing import Iterable, Optional, Union
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.s3 import S3Hook
<ide> class S3ListOperator(BaseOperator):
<ide> ui_color = '#ffd700'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, bucket, prefix='', delimiter='', aws_conn_id='aws_default', verify=None, **kwargs):
<add> def __init__(
<add> self,
<add> *,
<add> bucket: str,
<add> prefix: str = '',
<add> delimiter: str = '',
<add> aws_conn_id: str = 'aws_default',
<add> verify: Optional[Union[str, bool]] = None,
<add> **kwargs,
<add> ):
<ide> super().__init__(**kwargs)
<ide> self.bucket = bucket
<ide> self.prefix = prefix
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_base.py
<ide> import json
<ide> from typing import Iterable
<ide>
<add>from cached_property import cached_property
<add>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
<ide> from airflow.utils.decorators import apply_defaults
<ide> class SageMakerBaseOperator(BaseOperator):
<ide> integer_fields = [] # type: Iterable[Iterable[str]]
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, config, aws_conn_id='aws_default', **kwargs):
<add> def __init__(self, *, config: dict, aws_conn_id: str = 'aws_default', **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.aws_conn_id = aws_conn_id
<ide> self.config = config
<del> self.hook = None
<ide>
<ide> def parse_integer(self, config, field):
<ide> """Recursive method for parsing string fields holding integer values to integers."""
<ide> def expand_role(self): # noqa: D402
<ide> def preprocess_config(self):
<ide> """Process the config into a usable form."""
<ide> self.log.info('Preprocessing the config and doing required s3_operations')
<del> self.hook = SageMakerHook(aws_conn_id=self.aws_conn_id)
<ide>
<ide> self.hook.configure_s3_resources(self.config)
<ide> self.parse_config_integers()
<ide> def preprocess_config(self):
<ide>
<ide> def execute(self, context):
<ide> raise NotImplementedError('Please implement execute() in sub class!')
<add>
<add> @cached_property
<add> def hook(self):
<add> """Return SageMakerHook"""
<add> return SageMakerHook(aws_conn_id=self.aws_conn_id)
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_endpoint.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> from botocore.exceptions import ClientError
<ide>
<ide> class SageMakerEndpointOperator(SageMakerBaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> config,
<del> wait_for_completion=True,
<del> check_interval=30,
<del> max_ingestion_time=None,
<del> operation='create',
<add> config: dict,
<add> wait_for_completion: bool = True,
<add> check_interval: int = 30,
<add> max_ingestion_time: Optional[int] = None,
<add> operation: str = 'create',
<ide> **kwargs,
<ide> ):
<ide> super().__init__(config=config, **kwargs)
<ide> def __init__(
<ide> raise ValueError('Invalid value! Argument operation has to be one of "create" and "update"')
<ide> self.create_integer_fields()
<ide>
<del> def create_integer_fields(self):
<add> def create_integer_fields(self) -> None:
<ide> """Set fields which should be casted to integers."""
<ide> if 'EndpointConfig' in self.config:
<ide> self.integer_fields = [['EndpointConfig', 'ProductionVariants', 'InitialInstanceCount']]
<ide>
<del> def expand_role(self):
<add> def expand_role(self) -> None:
<ide> if 'Model' not in self.config:
<ide> return
<ide> hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
<ide> config = self.config['Model']
<ide> if 'ExecutionRoleArn' in config:
<ide> config['ExecutionRoleArn'] = hook.expand_role(config['ExecutionRoleArn'])
<ide>
<del> def execute(self, context):
<add> def execute(self, context) -> dict:
<ide> self.preprocess_config()
<ide>
<ide> model_info = self.config.get('Model')
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_endpoint_config.py
<ide> class SageMakerEndpointConfigOperator(SageMakerBaseOperator):
<ide> integer_fields = [['ProductionVariants', 'InitialInstanceCount']]
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, config, **kwargs):
<add> def __init__(self, *, config: dict, **kwargs):
<ide> super().__init__(config=config, **kwargs)
<ide>
<ide> self.config = config
<ide>
<del> def execute(self, context):
<add> def execute(self, context) -> dict:
<ide> self.preprocess_config()
<ide>
<ide> self.log.info('Creating SageMaker Endpoint Config %s.', self.config['EndpointConfigName'])
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_model.py
<ide> def __init__(self, *, config, **kwargs):
<ide>
<ide> self.config = config
<ide>
<del> def expand_role(self):
<add> def expand_role(self) -> None:
<ide> if 'ExecutionRoleArn' in self.config:
<ide> hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
<ide> self.config['ExecutionRoleArn'] = hook.expand_role(self.config['ExecutionRoleArn'])
<ide>
<del> def execute(self, context):
<add> def execute(self, context) -> dict:
<ide> self.preprocess_config()
<ide>
<ide> self.log.info('Creating SageMaker Model %s.', self.config['ModelName'])
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_processing.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
<ide> class SageMakerProcessingOperator(SageMakerBaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> config,
<del> aws_conn_id,
<del> wait_for_completion=True,
<del> print_log=True,
<del> check_interval=30,
<del> max_ingestion_time=None,
<add> config: dict,
<add> aws_conn_id: str,
<add> wait_for_completion: bool = True,
<add> print_log: bool = True,
<add> check_interval: int = 30,
<add> max_ingestion_time: Optional[int] = None,
<ide> action_if_job_exists: str = "increment", # TODO use typing.Literal for this in Python 3.8
<ide> **kwargs,
<ide> ):
<ide> def __init__(
<ide> self.max_ingestion_time = max_ingestion_time
<ide> self._create_integer_fields()
<ide>
<del> def _create_integer_fields(self):
<add> def _create_integer_fields(self) -> None:
<ide> """Set fields which should be casted to integers."""
<ide> self.integer_fields = [
<ide> ['ProcessingResources', 'ClusterConfig', 'InstanceCount'],
<ide> def _create_integer_fields(self):
<ide> if 'StoppingCondition' in self.config:
<ide> self.integer_fields += [['StoppingCondition', 'MaxRuntimeInSeconds']]
<ide>
<del> def expand_role(self):
<add> def expand_role(self) -> None:
<ide> if 'RoleArn' in self.config:
<ide> hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
<ide> self.config['RoleArn'] = hook.expand_role(self.config['RoleArn'])
<ide>
<del> def execute(self, context):
<add> def execute(self, context) -> dict:
<ide> self.preprocess_config()
<ide>
<ide> processing_job_name = self.config["ProcessingJobName"]
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_training.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
<ide> class SageMakerTrainingOperator(SageMakerBaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> config,
<del> wait_for_completion=True,
<del> print_log=True,
<del> check_interval=30,
<del> max_ingestion_time=None,
<add> config: dict,
<add> wait_for_completion: bool = True,
<add> print_log: bool = True,
<add> check_interval: int = 30,
<add> max_ingestion_time: Optional[int] = None,
<ide> action_if_job_exists: str = "increment", # TODO use typing.Literal for this in Python 3.8
<ide> **kwargs,
<ide> ):
<ide> def __init__(
<ide> f"Provided value: '{action_if_job_exists}'."
<ide> )
<ide>
<del> def expand_role(self):
<add> def expand_role(self) -> None:
<ide> if 'RoleArn' in self.config:
<ide> hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
<ide> self.config['RoleArn'] = hook.expand_role(self.config['RoleArn'])
<ide>
<del> def execute(self, context):
<add> def execute(self, context) -> dict:
<ide> self.preprocess_config()
<ide>
<ide> training_job_name = self.config["TrainingJobName"]
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_transform.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional, List
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
<ide> class SageMakerTransformOperator(SageMakerBaseOperator):
<ide>
<ide> @apply_defaults
<ide> def __init__(
<del> self, *, config, wait_for_completion=True, check_interval=30, max_ingestion_time=None, **kwargs
<add> self,
<add> *,
<add> config: dict,
<add> wait_for_completion: bool = True,
<add> check_interval: int = 30,
<add> max_ingestion_time: Optional[int] = None,
<add> **kwargs,
<ide> ):
<ide> super().__init__(config=config, **kwargs)
<ide> self.config = config
<ide> def __init__(
<ide> self.max_ingestion_time = max_ingestion_time
<ide> self.create_integer_fields()
<ide>
<del> def create_integer_fields(self):
<add> def create_integer_fields(self) -> None:
<ide> """Set fields which should be casted to integers."""
<del> self.integer_fields = [
<add> self.integer_fields: List[List[str]] = [
<ide> ['Transform', 'TransformResources', 'InstanceCount'],
<ide> ['Transform', 'MaxConcurrentTransforms'],
<ide> ['Transform', 'MaxPayloadInMB'],
<ide> def create_integer_fields(self):
<ide> for field in self.integer_fields:
<ide> field.pop(0)
<ide>
<del> def expand_role(self):
<add> def expand_role(self) -> None:
<ide> if 'Model' not in self.config:
<ide> return
<ide> config = self.config['Model']
<ide> if 'ExecutionRoleArn' in config:
<ide> hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
<ide> config['ExecutionRoleArn'] = hook.expand_role(config['ExecutionRoleArn'])
<ide>
<del> def execute(self, context):
<add> def execute(self, context) -> dict:
<ide> self.preprocess_config()
<ide>
<ide> model_config = self.config.get('Model')
<ide><path>airflow/providers/amazon/aws/operators/sagemaker_tuning.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
<ide> class SageMakerTuningOperator(SageMakerBaseOperator):
<ide>
<ide> @apply_defaults
<ide> def __init__(
<del> self, *, config, wait_for_completion=True, check_interval=30, max_ingestion_time=None, **kwargs
<add> self,
<add> *,
<add> config: dict,
<add> wait_for_completion: bool = True,
<add> check_interval: int = 30,
<add> max_ingestion_time: Optional[int] = None,
<add> **kwargs,
<ide> ):
<ide> super().__init__(config=config, **kwargs)
<ide> self.config = config
<ide> self.wait_for_completion = wait_for_completion
<ide> self.check_interval = check_interval
<ide> self.max_ingestion_time = max_ingestion_time
<ide>
<del> def expand_role(self):
<add> def expand_role(self) -> None:
<ide> if 'TrainingJobDefinition' in self.config:
<ide> config = self.config['TrainingJobDefinition']
<ide> if 'RoleArn' in config:
<ide> hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
<ide> config['RoleArn'] = hook.expand_role(config['RoleArn'])
<ide>
<del> def execute(self, context):
<add> def execute(self, context) -> dict:
<ide> self.preprocess_config()
<ide>
<ide> self.log.info(
<ide><path>airflow/providers/amazon/aws/operators/sns.py
<ide> # under the License.
<ide>
<ide> """Publish message to SNS queue"""
<add>from typing import Optional
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.sns import AwsSnsHook
<ide> class SnsPublishOperator(BaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> target_arn,
<del> message,
<del> aws_conn_id='aws_default',
<del> subject=None,
<del> message_attributes=None,
<add> target_arn: str,
<add> message: str,
<add> aws_conn_id: str = 'aws_default',
<add> subject: Optional[str] = None,
<add> message_attributes: Optional[dict] = None,
<ide> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide><path>airflow/providers/amazon/aws/operators/sqs.py
<ide> # under the License.
<ide>
<ide> """Publish message to SQS queue"""
<add>from typing import Optional
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.sqs import SQSHook
<ide> class SQSPublishOperator(BaseOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> sqs_queue,
<del> message_content,
<del> message_attributes=None,
<del> delay_seconds=0,
<del> aws_conn_id='aws_default',
<add> sqs_queue: str,
<add> message_content: str,
<add> message_attributes: Optional[dict] = None,
<add> delay_seconds: int = 0,
<add> aws_conn_id: str = 'aws_default',
<ide> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide><path>airflow/providers/amazon/aws/operators/step_function_get_execution_output.py
<ide> # under the License.
<ide>
<ide> import json
<add>from typing import Optional
<ide>
<ide> from airflow.models import BaseOperator
<ide> from airflow.providers.amazon.aws.hooks.step_function import StepFunctionHook
<ide> class StepFunctionGetExecutionOutputOperator(BaseOperator):
<ide> ui_color = '#f9c915'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, execution_arn: str, aws_conn_id='aws_default', region_name=None, **kwargs):
<add> def __init__(
<add> self,
<add> *,
<add> execution_arn: str,
<add> aws_conn_id: str = 'aws_default',
<add> region_name: Optional[str] = None,
<add> **kwargs,
<add> ):
<ide> super().__init__(**kwargs)
<ide> self.execution_arn = execution_arn
<ide> self.aws_conn_id = aws_conn_id
<ide><path>airflow/providers/amazon/aws/operators/step_function_start_execution.py
<ide> def __init__(
<ide> state_machine_arn: str,
<ide> name: Optional[str] = None,
<ide> state_machine_input: Union[dict, str, None] = None,
<del> aws_conn_id='aws_default',
<del> region_name=None,
<add> aws_conn_id: str = 'aws_default',
<add> region_name: Optional[str] = None,
<ide> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide><path>airflow/providers/amazon/aws/sensors/cloud_formation.py
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<ide> """This module contains sensors for AWS CloudFormation."""
<add>from typing import Optional
<add>
<ide> from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
<ide> from airflow.sensors.base_sensor_operator import BaseSensorOperator
<ide> from airflow.utils.decorators import apply_defaults
<ide> class CloudFormationDeleteStackSensor(BaseSensorOperator):
<ide> ui_color = '#C5CAE9'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, stack_name, aws_conn_id='aws_default', region_name=None, **kwargs):
<add> def __init__(
<add> self,
<add> *,
<add> stack_name: str,
<add> aws_conn_id: str = 'aws_default',
<add> region_name: Optional[str] = None,
<add> **kwargs,
<add> ):
<ide> super().__init__(**kwargs)
<ide> self.aws_conn_id = aws_conn_id
<ide> self.region_name = region_name
<ide> self.stack_name = stack_name
<del> self.hook = None
<add> self.hook: Optional[AWSCloudFormationHook] = None
<ide>
<ide> def poke(self, context):
<ide> stack_status = self.get_hook().get_stack_status(self.stack_name)
<ide> def poke(self, context):
<ide> return False
<ide> raise ValueError(f'Stack {self.stack_name} in bad state: {stack_status}')
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> AWSCloudFormationHook:
<ide> """Create and return an AWSCloudFormationHook"""
<del> if not self.hook:
<del> self.hook = AWSCloudFormationHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = AWSCloudFormationHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
<ide> return self.hook
<ide><path>airflow/providers/amazon/aws/sensors/emr_base.py
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<ide>
<del>from typing import Any, Dict, Optional
<add>from typing import Any, Dict, Optional, Iterable
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.emr import EmrHook
<ide> class EmrBaseSensor(BaseSensorOperator):
<ide> ui_color = '#66c3ff'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, aws_conn_id='aws_default', **kwargs):
<add> def __init__(self, *, aws_conn_id: str = 'aws_default', **kwargs):
<ide> super().__init__(**kwargs)
<ide> self.aws_conn_id = aws_conn_id
<del> self.target_states = None # will be set in subclasses
<del> self.failed_states = None # will be set in subclasses
<del> self.hook = None
<add> self.target_states: Optional[Iterable[str]] = None # will be set in subclasses
<add> self.failed_states: Optional[Iterable[str]] = None # will be set in subclasses
<add> self.hook: Optional[EmrHook] = None
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> EmrHook:
<ide> """Get EmrHook"""
<del> if not self.hook:
<del> self.hook = EmrHook(aws_conn_id=self.aws_conn_id)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = EmrHook(aws_conn_id=self.aws_conn_id)
<ide> return self.hook
<ide>
<ide> def poke(self, context):
<ide><path>airflow/providers/amazon/aws/sensors/glue.py
<ide> class AwsGlueJobSensor(BaseSensorOperator):
<ide> template_fields = ('job_name', 'run_id')
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, job_name, run_id, aws_conn_id='aws_default', **kwargs):
<add> def __init__(self, *, job_name: str, run_id: str, aws_conn_id: str = 'aws_default', **kwargs):
<ide> super().__init__(**kwargs)
<ide> self.job_name = job_name
<ide> self.run_id = run_id
<ide><path>airflow/providers/amazon/aws/sensors/glue_catalog_partition.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> from airflow.providers.amazon.aws.hooks.glue_catalog import AwsGlueCatalogHook
<ide> from airflow.sensors.base_sensor_operator import BaseSensorOperator
<ide> class AwsGlueCatalogPartitionSensor(BaseSensorOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> table_name,
<del> expression="ds='{{ ds }}'",
<del> aws_conn_id='aws_default',
<del> region_name=None,
<del> database_name='default',
<del> poke_interval=60 * 3,
<add> table_name: str,
<add> expression: str = "ds='{{ ds }}'",
<add> aws_conn_id: str = 'aws_default',
<add> region_name: Optional[str] = None,
<add> database_name: str = 'default',
<add> poke_interval: int = 60 * 3,
<ide> **kwargs,
<ide> ):
<ide> super().__init__(poke_interval=poke_interval, **kwargs)
<ide> def __init__(
<ide> self.table_name = table_name
<ide> self.expression = expression
<ide> self.database_name = database_name
<del> self.hook = None
<add> self.hook: Optional[AwsGlueCatalogHook] = None
<ide>
<ide> def poke(self, context):
<ide> """Checks for existence of the partition in the AWS Glue Catalog table"""
<ide> def poke(self, context):
<ide>
<ide> return self.get_hook().check_for_partition(self.database_name, self.table_name, self.expression)
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> AwsGlueCatalogHook:
<ide> """Gets the AwsGlueCatalogHook"""
<del> if not self.hook:
<del> self.hook = AwsGlueCatalogHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = AwsGlueCatalogHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
<ide> return self.hook
<ide><path>airflow/providers/amazon/aws/sensors/redshift.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> from airflow.providers.amazon.aws.hooks.redshift import RedshiftHook
<ide> from airflow.sensors.base_sensor_operator import BaseSensorOperator
<ide> class AwsRedshiftClusterSensor(BaseSensorOperator):
<ide> template_fields = ('cluster_identifier', 'target_status')
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, cluster_identifier, target_status='available', aws_conn_id='aws_default', **kwargs):
<add> def __init__(
<add> self,
<add> *,
<add> cluster_identifier: str,
<add> target_status: str = 'available',
<add> aws_conn_id: str = 'aws_default',
<add> **kwargs,
<add> ):
<ide> super().__init__(**kwargs)
<ide> self.cluster_identifier = cluster_identifier
<ide> self.target_status = target_status
<ide> self.aws_conn_id = aws_conn_id
<del> self.hook = None
<add> self.hook: Optional[RedshiftHook] = None
<ide>
<ide> def poke(self, context):
<ide> self.log.info('Poking for status : %s\nfor cluster %s', self.target_status, self.cluster_identifier)
<ide> return self.get_hook().cluster_status(self.cluster_identifier) == self.target_status
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> RedshiftHook:
<ide> """Create and return a RedshiftHook"""
<del> if not self.hook:
<del> self.hook = RedshiftHook(aws_conn_id=self.aws_conn_id)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = RedshiftHook(aws_conn_id=self.aws_conn_id)
<ide> return self.hook
<ide><path>airflow/providers/amazon/aws/sensors/s3_key.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<del>
<del>
<add>from typing import Optional, Union
<ide> from urllib.parse import urlparse
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> class S3KeySensor(BaseSensorOperator):
<ide> def __init__(
<ide> self,
<ide> *,
<del> bucket_key,
<del> bucket_name=None,
<del> wildcard_match=False,
<del> aws_conn_id='aws_default',
<del> verify=None,
<add> bucket_key: str,
<add> bucket_name: Optional[str] = None,
<add> wildcard_match: bool = False,
<add> aws_conn_id: str = 'aws_default',
<add> verify: Optional[Union[str, bool]] = None,
<ide> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide> def __init__(
<ide> self.wildcard_match = wildcard_match
<ide> self.aws_conn_id = aws_conn_id
<ide> self.verify = verify
<del> self.hook = None
<add> self.hook: Optional[S3Hook] = None
<ide>
<ide> def poke(self, context):
<ide> self.log.info('Poking for key : s3://%s/%s', self.bucket_name, self.bucket_key)
<ide> if self.wildcard_match:
<ide> return self.get_hook().check_for_wildcard_key(self.bucket_key, self.bucket_name)
<ide> return self.get_hook().check_for_key(self.bucket_key, self.bucket_name)
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> S3Hook:
<ide> """Create and return an S3Hook"""
<del> if not self.hook:
<del> self.hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
<ide> return self.hook
<ide><path>airflow/providers/amazon/aws/sensors/s3_prefix.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional, Union
<ide>
<ide> from airflow.providers.amazon.aws.hooks.s3 import S3Hook
<ide> from airflow.sensors.base_sensor_operator import BaseSensorOperator
<ide> class S3PrefixSensor(BaseSensorOperator):
<ide>
<ide> @apply_defaults
<ide> def __init__(
<del> self, *, bucket_name, prefix, delimiter='/', aws_conn_id='aws_default', verify=None, **kwargs
<add> self,
<add> *,
<add> bucket_name: str,
<add> prefix: str,
<add> delimiter: str = '/',
<add> aws_conn_id: str = 'aws_default',
<add> verify: Optional[Union[str, bool]] = None,
<add> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide> # Parse
<ide> def __init__(
<ide> self.full_url = "s3://" + bucket_name + '/' + prefix
<ide> self.aws_conn_id = aws_conn_id
<ide> self.verify = verify
<del> self.hook = None
<add> self.hook: Optional[S3Hook] = None
<ide>
<ide> def poke(self, context):
<ide> self.log.info('Poking for prefix : %s in bucket s3://%s', self.prefix, self.bucket_name)
<ide> return self.get_hook().check_for_prefix(
<ide> prefix=self.prefix, delimiter=self.delimiter, bucket_name=self.bucket_name
<ide> )
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> S3Hook:
<ide> """Create and return an S3Hook"""
<del> if not self.hook:
<del> self.hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
<ide> return self.hook
<ide><path>airflow/providers/amazon/aws/sensors/sagemaker_base.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional, Set
<add>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
<ide> from airflow.sensors.base_sensor_operator import BaseSensorOperator
<ide> class SageMakerBaseSensor(BaseSensorOperator):
<ide> ui_color = '#ededed'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, aws_conn_id='aws_default', **kwargs):
<add> def __init__(self, *, aws_conn_id: str = 'aws_default', **kwargs):
<ide> super().__init__(**kwargs)
<ide> self.aws_conn_id = aws_conn_id
<del> self.hook = None
<add> self.hook: Optional[SageMakerHook] = None
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> SageMakerHook:
<ide> """Get SageMakerHook"""
<del> if not self.hook:
<del> self.hook = SageMakerHook(aws_conn_id=self.aws_conn_id)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = SageMakerHook(aws_conn_id=self.aws_conn_id)
<ide> return self.hook
<ide>
<ide> def poke(self, context):
<ide> def poke(self, context):
<ide> raise AirflowException('Sagemaker job failed for the following reason: %s' % failed_reason)
<ide> return True
<ide>
<del> def non_terminal_states(self):
<add> def non_terminal_states(self) -> Set[str]:
<ide> """Placeholder for returning states with should not terminate."""
<ide> raise NotImplementedError('Please implement non_terminal_states() in subclass')
<ide>
<del> def failed_states(self):
<add> def failed_states(self) -> Set[str]:
<ide> """Placeholder for returning states with are considered failed."""
<ide> raise NotImplementedError('Please implement failed_states() in subclass')
<ide>
<del> def get_sagemaker_response(self):
<add> def get_sagemaker_response(self) -> Optional[dict]:
<ide> """Placeholder for checking status of a SageMaker task."""
<ide> raise NotImplementedError('Please implement get_sagemaker_response() in subclass')
<ide>
<del> def get_failed_reason_from_response(self, response): # pylint: disable=unused-argument
<add> def get_failed_reason_from_response(self, response: dict) -> str: # pylint: disable=unused-argument
<ide> """Placeholder for extracting the reason for failure from an AWS response."""
<ide> return 'Unknown'
<ide>
<del> def state_from_response(self, response):
<add> def state_from_response(self, response: dict) -> str:
<ide> """Placeholder for extracting the state from an AWS response."""
<ide> raise NotImplementedError('Please implement state_from_response() in subclass')
<ide><path>airflow/providers/amazon/aws/sensors/sagemaker_training.py
<ide> # KIND, either express or implied. See the License for the
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<add>from typing import Optional
<ide>
<ide> import time
<ide>
<ide> def __init__(self, *, job_name, print_log=True, **kwargs):
<ide> self.print_log = print_log
<ide> self.positions = {}
<ide> self.stream_names = []
<del> self.instance_count = None
<del> self.state = None
<add> self.instance_count: Optional[int] = None
<add> self.state: Optional[int] = None
<ide> self.last_description = None
<ide> self.last_describe_job_call = None
<ide> self.log_resource_inited = False
<ide>
<del> def init_log_resource(self, hook):
<add> def init_log_resource(self, hook: SageMakerHook) -> None:
<ide> """Set tailing LogState for associated training job."""
<ide> description = hook.describe_training_job(self.job_name)
<ide> self.instance_count = description['ResourceConfig']['InstanceCount']
<ide><path>airflow/providers/amazon/aws/sensors/sagemaker_transform.py
<ide> class SageMakerTransformSensor(SageMakerBaseSensor):
<ide> template_ext = ()
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, job_name, **kwargs):
<add> def __init__(self, *, job_name: str, **kwargs):
<ide> super().__init__(**kwargs)
<ide> self.job_name = job_name
<ide>
<ide><path>airflow/providers/amazon/aws/sensors/sagemaker_tuning.py
<ide> class SageMakerTuningSensor(SageMakerBaseSensor):
<ide> template_ext = ()
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, job_name, **kwargs):
<add> def __init__(self, *, job_name: str, **kwargs):
<ide> super().__init__(**kwargs)
<ide> self.job_name = job_name
<ide>
<ide><path>airflow/providers/amazon/aws/sensors/sqs.py
<ide> # specific language governing permissions and limitations
<ide> # under the License.
<ide> """Reads and then deletes the message from SQS queue"""
<add>from typing import Optional
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.sqs import SQSHook
<ide> class SQSSensor(BaseSensorOperator):
<ide>
<ide> @apply_defaults
<ide> def __init__(
<del> self, *, sqs_queue, aws_conn_id='aws_default', max_messages=5, wait_time_seconds=1, **kwargs
<add> self,
<add> *,
<add> sqs_queue,
<add> aws_conn_id: str = 'aws_default',
<add> max_messages: int = 5,
<add> wait_time_seconds: int = 1,
<add> **kwargs,
<ide> ):
<ide> super().__init__(**kwargs)
<ide> self.sqs_queue = sqs_queue
<ide> self.aws_conn_id = aws_conn_id
<ide> self.max_messages = max_messages
<ide> self.wait_time_seconds = wait_time_seconds
<del> self.hook = None
<add> self.hook: Optional[SQSHook] = None
<ide>
<ide> def poke(self, context):
<ide> """
<ide> def poke(self, context):
<ide>
<ide> return False
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> SQSHook:
<ide> """Create and return an SQSHook"""
<del> if not self.hook:
<del> self.hook = SQSHook(aws_conn_id=self.aws_conn_id)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = SQSHook(aws_conn_id=self.aws_conn_id)
<ide> return self.hook
<ide><path>airflow/providers/amazon/aws/sensors/step_function_execution.py
<ide> # under the License.
<ide>
<ide> import json
<add>from typing import Optional
<ide>
<ide> from airflow.exceptions import AirflowException
<ide> from airflow.providers.amazon.aws.hooks.step_function import StepFunctionHook
<ide> class StepFunctionExecutionSensor(BaseSensorOperator):
<ide> ui_color = '#66c3ff'
<ide>
<ide> @apply_defaults
<del> def __init__(self, *, execution_arn: str, aws_conn_id='aws_default', region_name=None, **kwargs):
<add> def __init__(
<add> self,
<add> *,
<add> execution_arn: str,
<add> aws_conn_id: str = 'aws_default',
<add> region_name: Optional[str] = None,
<add> **kwargs,
<add> ):
<ide> super().__init__(**kwargs)
<ide> self.execution_arn = execution_arn
<ide> self.aws_conn_id = aws_conn_id
<ide> self.region_name = region_name
<del> self.hook = None
<add> self.hook: Optional[StepFunctionHook] = None
<ide>
<ide> def poke(self, context):
<ide> execution_status = self.get_hook().describe_execution(self.execution_arn)
<ide> def poke(self, context):
<ide> self.xcom_push(context, 'output', output)
<ide> return True
<ide>
<del> def get_hook(self):
<add> def get_hook(self) -> StepFunctionHook:
<ide> """Create and return a StepFunctionHook"""
<del> if not self.hook:
<del> self.hook = StepFunctionHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
<add> if self.hook:
<add> return self.hook
<add>
<add> self.hook = StepFunctionHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
<ide> return self.hook
<ide><path>tests/providers/amazon/aws/operators/test_batch.py
<ide> def test_init(self):
<ide> self.assertEqual(self.batch.waiters, None)
<ide> self.assertEqual(self.batch.hook.max_retries, self.MAX_RETRIES)
<ide> self.assertEqual(self.batch.hook.status_retries, self.STATUS_RETRIES)
<del> self.assertEqual(self.batch.parameters, None)
<add> self.assertEqual(self.batch.parameters, {})
<ide> self.assertEqual(self.batch.overrides, {})
<ide> self.assertEqual(self.batch.array_properties, {})
<ide> self.assertEqual(self.batch.hook.region_name, "eu-west-1")
<ide> def test_execute_without_failures(self, check_mock, wait_mock):
<ide> containerOverrides={},
<ide> jobDefinition="hello-world",
<ide> arrayProperties={},
<del> parameters=None,
<add> parameters={},
<ide> )
<ide>
<ide> self.assertEqual(self.batch.job_id, JOB_ID)
<ide> def test_execute_with_failures(self):
<ide> containerOverrides={},
<ide> jobDefinition="hello-world",
<ide> arrayProperties={},
<del> parameters=None,
<add> parameters={},
<ide> )
<ide>
<ide> @mock.patch.object(AwsBatchClientHook, "check_job_success") | 40 |
Javascript | Javascript | cover triggerreport() failure case | d73d86166541dd121235f708f83d2ad9cf24b997 | <ide><path>test/report/test-report-triggerreport.js
<ide> function validate() {
<ide> const report = child.stderr.toString().split('Node.js report completed')[0];
<ide> helper.validateContent(report);
<ide> }
<add>
<add>{
<add> // Test the case where the report file cannot be opened.
<add> const reportDir = path.join(tmpdir.path, 'does', 'not', 'exist');
<add> const args = ['--experimental-report',
<add> `--diagnostic-report-directory=${reportDir}`,
<add> '-e',
<add> 'process.report.triggerReport()'];
<add> const child = spawnSync(process.execPath, args, { cwd: tmpdir.path });
<add>
<add> assert.strictEqual(child.status, 0);
<add> assert.strictEqual(child.signal, null);
<add> assert.strictEqual(child.stdout.toString().trim(), '');
<add> const stderr = child.stderr.toString();
<add> assert(stderr.includes('Failed to open Node.js report file:'));
<add>} | 1 |
Python | Python | reduce layers line-too-long | 8401e08334d4b1f102a6ee9479738bacfee0600c | <ide><path>keras/layers/__init__.py
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<del># pylint: disable=g-bad-import-order,g-direct-tensorflow-import,disable=g-import-not-at-top
<ide> from tensorflow.python import tf2
<ide>
<ide> # Generic layers.
<ide><path>keras/layers/activation/softmax.py
<ide> class Softmax(Layer):
<ide> normalization is applied.
<ide> Call arguments:
<ide> inputs: The inputs, or logits to the softmax layer.
<del> mask: A boolean mask of the same shape as `inputs`. Defaults to `None`. The
<del> mask specifies 1 to keep and 0 to mask.
<add> mask: A boolean mask of the same shape as `inputs`. Defaults to `None`.
<add> The mask specifies 1 to keep and 0 to mask.
<ide>
<ide> Returns:
<ide> softmaxed output with the same shape as `inputs`.
<ide> def __init__(self, axis=-1, **kwargs):
<ide>
<ide> def call(self, inputs, mask=None):
<ide> if mask is not None:
<del> # Since mask is 1.0 for positions we want to keep and 0.0 for
<del> # masked positions, this operation will create a tensor which is 0.0 for
<add> # Since mask is 1.0 for positions we want to keep and 0.0 for masked
<add> # positions, this operation will create a tensor which is 0.0 for
<ide> # positions we want to attend and -1e.9 for masked positions.
<ide> adder = (1.0 - tf.cast(mask, inputs.dtype)) * (
<ide> _large_compatible_negative(inputs.dtype)
<ide> )
<ide>
<del> # Since we are adding it to the raw scores before the softmax, this is
<del> # effectively the same as removing these entirely.
<add> # Since we are adding it to the raw scores before the softmax, this
<add> # is effectively the same as removing these entirely.
<ide> inputs += adder
<ide> if isinstance(self.axis, (tuple, list)):
<ide> if len(self.axis) > 1:
<ide><path>keras/layers/activation/thresholded_relu.py
<ide> def __init__(self, theta=1.0, **kwargs):
<ide> super().__init__(**kwargs)
<ide> if theta is None:
<ide> raise ValueError(
<del> "Theta of a Thresholded ReLU layer cannot be None, expecting a float."
<del> f" Received: {theta}"
<add> "Theta of a Thresholded ReLU layer cannot be None, expecting a "
<add> f"float. Received: {theta}"
<ide> )
<ide> if theta < 0:
<ide> raise ValueError(
<ide><path>keras/layers/attention/additive_attention.py
<ide> class AdditiveAttention(BaseDenseAttention):
<ide> """Additive attention layer, a.k.a. Bahdanau-style attention.
<ide>
<del> Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
<del> shape `[batch_size, Tv, dim]` and `key` tensor of shape
<add> Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor
<add> of shape `[batch_size, Tv, dim]` and `key` tensor of shape
<ide> `[batch_size, Tv, dim]`. The calculation follows the steps:
<ide>
<ide> 1. Reshape `query` and `key` into shapes `[batch_size, Tq, 1, dim]`
<ide> class AdditiveAttention(BaseDenseAttention):
<ide> `return tf.matmul(distribution, value)`.
<ide>
<ide> Args:
<del> use_scale: If `True`, will create a variable to scale the attention scores.
<del> causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
<del> that position `i` cannot attend to positions `j > i`. This prevents the
<del> flow of information from the future towards the past.
<del> Defaults to `False`.
<add> use_scale: If `True`, will create a variable to scale the attention
<add> scores.
<add> causal: Boolean. Set to `True` for decoder self-attention. Adds a mask
<add> such that position `i` cannot attend to positions `j > i`. This prevents
<add> the flow of information from the future towards the past. Defaults to
<add> `False`.
<ide> dropout: Float between 0 and 1. Fraction of the units to drop for the
<ide> attention scores. Defaults to 0.0.
<ide>
<ide><path>keras/layers/attention/additive_attention_test.py
<ide> def test_calculate_scores_multi_dim(self):
<ide> )
<ide> actual = attention_layer._calculate_scores(query=q, key=k)
<ide>
<del> # pylint:disable=line-too-long
<del> # expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581
<del> # expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449
<del> # expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652
<del> # expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449
<del> # expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652
<del> # expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916
<del> # pylint:enable=line-too-long
<add> # expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + \
<add> # 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581
<add> # expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + \
<add> # 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449
<add> # expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + \
<add> # 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652
<add> # expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + \
<add> # 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449
<add> # expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + \
<add> # 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652
<add> # expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + \
<add> # 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916
<ide> expected = np.array(
<ide> [
<ide> [
<ide> def test_multi_dim(self):
<ide> attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
<ide> actual = attention_layer([q, v], mask=[None, v_mask])
<ide>
<del> # pylint:disable=line-too-long
<ide> # Expected scores of shape [1, 1, 3]
<del> # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
<add> # scores = [[[0.5 * tanh(1.1 + 1.6),
<add> # 0.5 * tanh(1.1 + 0.7),
<add> # 0.5 * tanh(1.1 - 0.8)]]]
<ide> # = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
<ide> # Expected attention distribution = softmax(scores) with zeros in
<ide> # positions where v_mask == False.
<ide> def test_multi_dim(self):
<ide> # Expected tensor of shape [1, 1, 1].
<ide> # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
<ide> # = 1.15497245968
<del> # pylint:enable=line-too-long
<ide> expected = np.array([[[1.15497245968]]], dtype=np.float32)
<ide> self.assertAllClose(expected, actual)
<ide>
<ide> def test_multi_dim_with_key(self):
<ide> attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
<ide> actual = attention_layer([q, v, k], mask=[None, v_mask])
<ide>
<del> # pylint:disable=line-too-long
<ide> # Expected scores of shape [1, 1, 3]
<del> # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
<add> # scores = [[[0.5 * tanh(1.1 + 1.6),
<add> # 0.5 * tanh(1.1 + 0.7),
<add> # 0.5 * tanh(1.1 - 0.8)]]]
<ide> # = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
<ide> # Expected attention distribution = softmax(scores) with zeros in
<ide> # positions where v_mask == False.
<ide> def test_multi_dim_with_key(self):
<ide> # Expected tensor of shape [1, 1, 1].
<ide> # expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3
<ide> # = 0.64834251342
<del> # pylint:enable=line-too-long
<ide> expected = np.array([[[0.64834251342]]], dtype=np.float32)
<ide> self.assertAllClose(expected, actual)
<ide>
<ide> def test_multi_dim_with_query_mask(self):
<ide> attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
<ide> actual = attention_layer([q, v], mask=[q_mask, v_mask])
<ide>
<del> # pylint:disable=line-too-long
<ide> # Expected scores of shape [1, 2, 3]
<del> # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)],
<del> # [0.5 * tanh(-0.5 + 1.6), 0.5 * tanh(-0.5 + 0.7), 0.5 * tanh(-0.5 - 0.8)]]]
<add> # scores = [[[0.5 * tanh(1.1 + 1.6),
<add> # 0.5 * tanh(1.1 + 0.7),
<add> # 0.5 * tanh(1.1 - 0.8)],
<add> # [0.5 * tanh(-0.5 + 1.6),
<add> # 0.5 * tanh(-0.5 + 0.7),
<add> # 0.5 * tanh(-0.5 - 0.8)]]]
<ide> # = [[[0.49550372683, 0.47340300642, 0.14565630622],
<ide> # [0.40024951088, 0.09868766011, -0.43086157965]]]
<ide> # Expected attention distribution = softmax(scores) with zeros in
<ide> def test_multi_dim_with_query_mask(self):
<ide> # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
<ide> # = 1.15497245968
<ide> # expected000 = 0
<del> # pylint:enable=line-too-long
<ide> expected = np.array([[[1.15497245968], [0.0]]], dtype=np.float32)
<ide> self.assertAllClose(expected, actual)
<ide>
<ide><path>keras/layers/attention/attention.py
<ide> class Attention(BaseDenseAttention):
<ide> """Dot-product attention layer, a.k.a. Luong-style attention.
<ide>
<del> Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
<del> shape `[batch_size, Tv, dim]` and `key` tensor of shape
<add> Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor
<add> of shape `[batch_size, Tv, dim]` and `key` tensor of shape
<ide> `[batch_size, Tv, dim]`. The calculation follows the steps:
<ide>
<ide> 1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot
<ide> class Attention(BaseDenseAttention):
<ide> Args:
<ide> use_scale: If `True`, will create a scalar variable to scale the attention
<ide> scores.
<del> causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
<del> that position `i` cannot attend to positions `j > i`. This prevents the
<del> flow of information from the future towards the past.
<del> Defaults to `False`.
<add> causal: Boolean. Set to `True` for decoder self-attention. Adds a mask
<add> such that position `i` cannot attend to positions `j > i`. This prevents
<add> the flow of information from the future towards the past. Defaults to
<add> `False`.
<ide> dropout: Float between 0 and 1. Fraction of the units to drop for the
<ide> attention scores. Defaults to 0.0.
<ide> score_mode: Function to use to compute attention scores, one of
<ide> def __init__(self, use_scale=False, score_mode="dot", **kwargs):
<ide> )
<ide>
<ide> def build(self, input_shape):
<del> """Creates variable when `use_scale` is True or `score_mode` is `concat`."""
<add> """Creates variable when `use_scale` is True or `score_mode` is
<add> `concat`."""
<ide> if self.use_scale:
<ide> self.scale = self.add_weight(
<ide> name="scale",
<ide><path>keras/layers/attention/attention_test.py
<ide> def test_calculate_scores_multi_dim_concat(self):
<ide> attention_layer._calculate_scores(query=q, key=k)
<ide> )
<ide>
<del> # pylint:disable=line-too-long
<del> # expected000 = tanh(1.+1.5) + tanh(1.1+1.6) + tanh(1.2+1.7) + tanh(1.3+1.8) = 3.96753427840
<del> # expected001 = tanh(1.+2.5) + tanh(1.1+2.6) + tanh(1.2+2.7) + tanh(1.3+2.8) = 3.99558784825
<del> # expected002 = tanh(1.+3.5) + tanh(1.1+3.6) + tanh(1.2+3.7) + tanh(1.3+3.8) = 3.99940254147
<del> # expected010 = tanh(2.+1.5) + tanh(2.1+1.6) + tanh(2.2+1.7) + tanh(2.3+1.8) = 3.99558784825
<del> # expected011 = tanh(2.+2.5) + tanh(2.1+2.6) + tanh(2.2+2.7) + tanh(2.3+2.8) = 3.99940254147
<del> # expected012 = tanh(2.+3.5) + tanh(2.1+3.6) + tanh(2.2+3.7) + tanh(2.3+3.8) = 3.99991913657
<add> # expected000 = tanh(1.+1.5) + tanh(1.1+1.6) + \
<add> # tanh(1.2+1.7) + tanh(1.3+1.8) = 3.96753427840
<add> # expected001 = tanh(1.+2.5) + tanh(1.1+2.6) + \
<add> # tanh(1.2+2.7) + tanh(1.3+2.8) = 3.99558784825
<add> # expected002 = tanh(1.+3.5) + tanh(1.1+3.6) + \
<add> # tanh(1.2+3.7) + tanh(1.3+3.8) = 3.99940254147
<add> # expected010 = tanh(2.+1.5) + tanh(2.1+1.6) + \
<add> # tanh(2.2+1.7) + tanh(2.3+1.8) = 3.99558784825
<add> # expected011 = tanh(2.+2.5) + tanh(2.1+2.6) + \
<add> # tanh(2.2+2.7) + tanh(2.3+2.8) = 3.99940254147
<add> # expected012 = tanh(2.+3.5) + tanh(2.1+3.6) + \
<add> # tanh(2.2+3.7) + tanh(2.3+3.8) = 3.99991913657
<ide> expected = np.array(
<ide> [
<ide> [
<ide> def test_multi_dim_with_query_mask(self, return_attention_scores):
<ide> )
<ide>
<ide> # Expected scores of shape [1, 2, 3]
<del> # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], [-0.5*1.6, -0.5*0.7, 0.5*0.8]]]
<add> # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8],
<add> # [-0.5*1.6, -0.5*0.7, 0.5*0.8]]]
<ide> # = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]]
<ide> # Expected attention distribution = softmax(scores) with zeros in
<ide> # positions where v_mask == False.
<ide> def test_self_attention_causal(self, return_attention_scores):
<ide> )
<ide>
<ide> # Expected scores of shape [1, 3, 3]
<del> # scores = [[0.25, 0.4, -0.15], [0.4, 0.64, -0.24], [-0.15, -0.24, 0.09]]
<add> # scores = [[0.25, 0.4, -0.15],
<add> # [0.4, 0.64, -0.24],
<add> # [-0.15, -0.24, 0.09]]
<ide> # Expected attention distribution = softmax(scores) lower triangular
<ide> # => attention_distribution00 = [1., 0., 0.]
<ide> # attention_distribution01
<ide> def test_self_attention_causal(self, return_attention_scores):
<ide> # expected000 = 0.5
<ide> # expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8
<ide> # = 0.66791409477
<del> # expected020 = 0.31395396638 * 0.5 +0.28693232061 * 0.8 -0.399113713 * 0.3
<add> # expected020 = 0.31395396638 * 0.5 + \
<add> # 0.28693232061 * 0.8 -0.399113713 * 0.3
<ide> # = 0.26678872577
<ide> expected = np.array(
<ide> [[[0.5], [0.66791409477], [0.26678872577]]], dtype=np.float32
<ide><path>keras/layers/attention/base_dense_attention.py
<ide> class BaseDenseAttention(base_layer.BaseRandomLayer):
<ide> reuse the `apply_attention_scores()` method.
<ide>
<ide> Args:
<del> causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
<del> that position `i` cannot attend to positions `j > i`. This prevents the
<del> flow of information from the future towards the past.
<add> causal: Boolean. Set to `True` for decoder self-attention. Adds a mask
<add> such that position `i` cannot attend to positions `j > i`. This prevents
<add> the flow of information from the future towards the past.
<ide> dropout: Float between 0 and 1. Fraction of the units to drop for the
<ide> attention scores.
<ide>
<ide> def _apply_scores(self, scores, value, scores_mask=None, training=None):
<ide>
<ide> To use this method in your attention layer, follow the steps:
<ide>
<del> * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
<del> `[batch_size, Tv]` to calculate the attention `scores`.
<add> * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of
<add> shape `[batch_size, Tv]` to calculate the attention `scores`.
<ide> * Pass `scores` and `value` tensors to this method. The method applies
<del> `scores_mask`, calculates `attention_distribution = softmax(scores)`, then
<del> returns `matmul(attention_distribution, value).
<add> `scores_mask`, calculates `attention_distribution = softmax(scores)`,
<add> then returns `matmul(attention_distribution, value).
<ide> * Apply `query_mask` and return the result.
<ide>
<ide> Args:
<ide> scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
<ide> value: Value tensor of shape `[batch_size, Tv, dim]`.
<ide> scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
<ide> `[batch_size, Tq, Tv]`. If given, scores at positions where
<del> `scores_mask==False` do not contribute to the result. It must contain
<del> at least one `True` value in each line along the last dimension.
<add> `scores_mask==False` do not contribute to the result. It must
<add> contain at least one `True` value in each line along the last
<add> dimension.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode (adding dropout) or in inference mode (no dropout).
<ide>
<ide> def _apply_scores(self, scores, value, scores_mask=None, training=None):
<ide> """
<ide> if scores_mask is not None:
<ide> padding_mask = tf.logical_not(scores_mask)
<del> # Bias so padding positions do not contribute to attention distribution.
<del> # Note 65504. is the max float16 value.
<add> # Bias so padding positions do not contribute to attention
<add> # distribution. Note 65504. is the max float16 value.
<ide> if scores.dtype is tf.float16:
<ide> scores -= 65504.0 * tf.cast(padding_mask, dtype=scores.dtype)
<ide> else:
<ide> def call(
<ide> v_mask = tf.expand_dims(v_mask, axis=-2)
<ide> if self.causal:
<ide> # Creates a lower triangular mask, so position i cannot attend to
<del> # positions j>i. This prevents the flow of information from the future
<del> # into the past.
<add> # positions j>i. This prevents the flow of information from the
<add> # future into the past.
<ide> scores_shape = tf.shape(scores)
<ide> # causal_mask_shape = [1, Tq, Tv].
<ide> causal_mask_shape = tf.concat(
<ide> def _validate_call_args(self, inputs, mask):
<ide> if len(mask) < 2 or len(mask) > len(inputs):
<ide> raise ValueError(
<ide> f"{class_name} layer mask must be a list of length 2, "
<del> f"namely [query_mask, value_mask]. Received length: {len(mask)}."
<add> "namely [query_mask, value_mask]. "
<add> f"Received length: {len(mask)}."
<ide> )
<ide>
<ide> def get_config(self):
<ide><path>keras/layers/attention/base_dense_attention_test.py
<ide> def test_multi_dim_with_mask(self):
<ide> scores=scores, value=v, scores_mask=scores_mask
<ide> )
<ide>
<del> # Expected softmax scores = softmax(scores) with zeros in positions where
<del> # v_mask == False.
<add> # Expected softmax scores = softmax(scores) with zeros in positions
<add> # where v_mask == False.
<ide> # => softmax_scores000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863
<ide> # softmax_scores001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137
<ide> # softmax_scores002 = 0
<ide><path>keras/layers/attention/multi_head_attention.py
<ide> def _build_attention_equation(rank, attn_axes):
<ide> num_heads, <query attention dims>, <key attention dims>)`
<ide> (2) Combination:
<ide> `(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
<del> (<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<del> <query attention dims>, num_heads, channels)`
<add> (<batch dims>, <value attention dims>, num_heads, channels) -> (<batch
<add> dims>, <query attention dims>, num_heads, channels)`
<ide>
<ide> Args:
<ide> rank: Rank of query, key, value tensors.
<ide> def _get_output_shape(output_rank, known_last_dims):
<ide> class MultiHeadAttention(Layer):
<ide> """MultiHeadAttention layer.
<ide>
<del> This is an implementation of multi-headed attention as described in the paper
<del> "Attention is all you Need" (Vaswani et al., 2017).
<add> This is an implementation of multi-headed attention as described in the
<add> paper "Attention is all you Need" (Vaswani et al., 2017).
<ide> If `query`, `key,` `value` are the same, then
<ide> this is self-attention. Each timestep in `query` attends to the
<ide> corresponding sequence in `key`, and returns a fixed-width vector.
<ide> class MultiHeadAttention(Layer):
<ide> When using MultiHeadAttention inside a custom Layer, the custom Layer must
<ide> implement `build()` and call MultiHeadAttention's `_build_from_signature()`.
<ide> This enables weights to be restored correctly when the model is loaded.
<del> TODO(b/172609172): link to documentation about calling custom build functions
<del> when used in a custom Layer.
<add> TODO(b/172609172): link to documentation about calling custom build
<add> functions when used in a custom Layer.
<ide>
<ide> Examples:
<ide>
<ide> class MultiHeadAttention(Layer):
<ide>
<ide> Performs 2D self-attention over a 5D input tensor on axes 2 and 3.
<ide>
<del> >>> layer = MultiHeadAttention(num_heads=2, key_dim=2, attention_axes=(2, 3))
<add> >>> layer = MultiHeadAttention(
<add> ... num_heads=2, key_dim=2, attention_axes=(2, 3))
<ide> >>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16])
<ide> >>> output_tensor = layer(input_tensor, input_tensor)
<ide> >>> print(output_tensor.shape)
<ide> class MultiHeadAttention(Layer):
<ide> value_dim: Size of each attention head for value.
<ide> dropout: Dropout probability.
<ide> use_bias: Boolean, whether the dense layers use bias vectors/matrices.
<del> output_shape: The expected shape of an output tensor, besides the batch and
<del> sequence dims. If not specified, projects back to the key feature dim.
<add> output_shape: The expected shape of an output tensor, besides the batch
<add> and sequence dims. If not specified, projects back to the key feature
<add> dim.
<ide> attention_axes: axes over which the attention is applied. `None` means
<ide> attention over all axes, but batch, heads, and features.
<ide> kernel_initializer: Initializer for dense layer kernels.
<ide> class MultiHeadAttention(Layer):
<ide> indicates no attention. Broadcasting can happen for the missing batch
<ide> dimensions and the head dimension.
<ide> return_attention_scores: A boolean to indicate whether the output should
<del> be `(attention_output, attention_scores)` if `True`, or `attention_output`
<del> if `False`. Defaults to `False`.
<add> be `(attention_output, attention_scores)` if `True`, or
<add> `attention_output` if `False`. Defaults to `False`.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode (adding dropout) or in inference mode (no dropout).
<ide> Defaults to either using the training mode of the parent layer/model,
<ide> def from_config(cls, config):
<ide> layer = cls(**config)
<ide> if None in [query_shape, key_shape, value_shape]:
<ide> logging.warning(
<del> "One of dimensions of the input shape is missing. It should have been"
<del> " memorized when the layer was serialized. "
<add> "One of dimensions of the input shape is missing. It "
<add> "should have been memorized when the layer was serialized. "
<ide> "%s is created without weights.",
<ide> str(cls),
<ide> )
<ide> def from_config(cls, config):
<ide> def _build_from_signature(self, query, value, key=None):
<ide> """Builds layers and variables.
<ide>
<del> Once the method is called, self._built_from_signature will be set to True.
<add> Once the method is called, self._built_from_signature will be set to
<add> True.
<ide>
<ide> Args:
<ide> query: Query tensor or TensorShape.
<ide> def _build_from_signature(self, query, value, key=None):
<ide> **self._get_common_kwargs_for_sublayer()
<ide> )
<ide>
<del> # Builds the attention computations for multi-head dot product attention.
<del> # These computations could be wrapped into the keras attention layer once
<del> # it supports mult-head einsum computations.
<add> # Builds the attention computations for multi-head dot product
<add> # attention. These computations could be wrapped into the keras
<add> # attention layer once it supports mult-head einsum computations.
<ide> self._build_attention(output_rank)
<ide> self._output_dense = self._make_output_dense(
<ide> free_dims,
<ide> def _get_common_kwargs_for_sublayer(self):
<ide> kernel_constraint=self._kernel_constraint,
<ide> bias_constraint=self._bias_constraint,
<ide> )
<del> # Create new clone of kernel/bias initializer, so that we don't reuse the
<del> # initializer instance, which could lead to same init value since
<add> # Create new clone of kernel/bias initializer, so that we don't reuse
<add> # the initializer instance, which could lead to same init value since
<ide> # initializer is stateless.
<ide> kernel_initializer = self._kernel_initializer.__class__.from_config(
<ide> self._kernel_initializer.get_config()
<ide> def _masked_softmax(self, attention_scores, attention_mask=None):
<ide> # `attention_scores` = [B, N, T, S]
<ide> if attention_mask is not None:
<ide> # The expand dim happens starting from the `num_heads` dimension,
<del> # (<batch_dims>, num_heads, <query_attention_dims, key_attention_dims>)
<add> # (<batch_dims>, num_heads, <query_attention_dims,
<add> # key_attention_dims>)
<ide> mask_expansion_axis = -len(self._attention_axes) * 2 - 1
<ide> for _ in range(
<ide> len(attention_scores.shape) - len(attention_mask.shape)
<ide> def _compute_attention(
<ide> """Applies Dot-product attention with query, key, value tensors.
<ide>
<ide> This function defines the computation inside `call` with projected
<del> multi-head Q, K, V inputs. Users can override this function for customized
<del> attention implementation.
<add> multi-head Q, K, V inputs. Users can override this function for
<add> customized attention implementation.
<ide>
<ide> Args:
<ide> query: Projected query `Tensor` of shape `(B, T, N, key_dim)`.
<ide><path>keras/layers/attention/multi_head_attention_test.py
<ide> class MultiHeadAttentionTest(test_combinations.TestCase):
<ide> ("key_value_different_proj", 32, 60, [40, 60]),
<ide> )
<ide> def test_non_masked_attention(self, value_dim, output_shape, output_dims):
<del> """Test that the attention layer can be created without a mask tensor."""
<add> """Test that the attention layer can be created without a mask
<add> tensor."""
<ide> test_layer = keras.layers.MultiHeadAttention(
<ide> num_heads=12,
<ide> key_dim=64,
<ide> def test_masked_attention(self, use_bias):
<ide> from_data = 10 * np.random.random_sample((batch_size, 4, 8))
<ide> to_data = 10 * np.random.random_sample((batch_size, 2, 8))
<ide>
<del> # Invoke the data with a random set of mask data. This should mask at least
<del> # one element.
<add> # Invoke the data with a random set of mask data. This should mask at
<add> # least one element.
<ide> mask_data = np.random.randint(2, size=(batch_size, 4, 2))
<ide> masked_output_data = model.predict([from_data, to_data, mask_data])
<ide>
<del> # Invoke the same data, but with a null mask (where no elements are masked).
<add> # Invoke the same data, but with a null mask (where no elements are
<add> # masked).
<ide> null_mask_data = np.ones((batch_size, 4, 2))
<ide> unmasked_output_data = model.predict(
<ide> [from_data, to_data, null_mask_data]
<ide> )
<ide>
<del> # Because one data is masked and one is not, the outputs should not be the
<del> # same.
<add> # Because one data is masked and one is not, the outputs should not be
<add> # the same.
<ide> self.assertNotAllClose(masked_output_data, unmasked_output_data)
<ide>
<ide> # Tests the layer with three inputs: Q, K, V.
<ide> def test_masked_attention(self, use_bias):
<ide> unmasked_output_data = model.predict(
<ide> [from_data, to_data, to_data, null_mask_data]
<ide> )
<del> # Because one data is masked and one is not, the outputs should not be the
<del> # same.
<add> # Because one data is masked and one is not, the outputs should not be
<add> # the same.
<ide> self.assertNotAllClose(masked_output_data, unmasked_output_data)
<ide>
<ide> if use_bias:
<ide> def test_initializer(self):
<ide> output = test_layer(query, query)
<ide> self.assertEqual(output.shape.as_list(), [None, 40, 80])
<ide>
<del> # Make sure the sub layers have different kernel init value, and not reusing
<del> # the initializers.
<add> # Make sure the sub layers have different kernel init value, and not
<add> # reusing the initializers.
<ide> self.assertNotAllClose(
<ide> keras.backend.eval(test_layer._query_dense.kernel),
<ide> keras.backend.eval(test_layer._key_dense.kernel),
<ide> def test_masked_attention_with_scores(self):
<ide> from_data = 10 * np.random.random_sample((batch_size, 4, 8))
<ide> to_data = 10 * np.random.random_sample((batch_size, 2, 8))
<ide>
<del> # Invoke the data with a random set of mask data. This should mask at least
<del> # one element.
<add> # Invoke the data with a random set of mask data. This should mask at
<add> # least one element.
<ide> mask_data = np.random.randint(2, size=(batch_size, 4, 2))
<ide> masked_output_data = model.predict([from_data, to_data, mask_data])
<ide>
<del> # Invoke the same data, but with a null mask (where no elements are masked).
<add> # Invoke the same data, but with a null mask (where no elements are
<add> # masked).
<ide> null_mask_data = np.ones((batch_size, 4, 2))
<ide> unmasked_output_data = model.predict(
<ide> [from_data, to_data, null_mask_data]
<ide> )
<ide>
<del> # Because one data is masked and one is not, the outputs should not be the
<del> # same.
<add> # Because one data is masked and one is not, the outputs should not be
<add> # the same.
<ide> self.assertNotAllClose(masked_output_data, unmasked_output_data)
<ide>
<ide> # Create a model containing attention scores.
<ide> def test_high_dim_attention(
<ide> query = 10 * np.random.random_sample(query_shape)
<ide> value = 10 * np.random.random_sample(value_shape)
<ide>
<del> # Invoke the data with a random set of mask data. This should mask at least
<del> # one element.
<add> # Invoke the data with a random set of mask data. This should mask at
<add> # least one element.
<ide> mask_data = np.random.randint(2, size=mask_shape).astype("bool")
<del> # Invoke the same data, but with a null mask (where no elements are masked).
<add> # Invoke the same data, but with a null mask (where no elements are
<add> # masked).
<ide> null_mask_data = np.ones(mask_shape)
<del> # Because one data is masked and one is not, the outputs should not be the
<del> # same.
<add> # Because one data is masked and one is not, the outputs should not be
<add> # the same.
<ide> query_tensor = keras.Input(query_shape[1:], name="query")
<ide> value_tensor = keras.Input(value_shape[1:], name="value")
<ide> mask_tensor = keras.Input(mask_shape[1:], name="mask")
<ide><path>keras/layers/convolutional/base_conv.py
<ide> class Conv(Layer):
<ide> once (except the `trainable` attribute).
<ide>
<ide> Args:
<del> rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
<add> rank: An integer, the rank of the convolution, e.g. "2" for 2D
<add> convolution.
<ide> filters: Integer, the dimensionality of the output space (i.e. the number
<ide> of filters in the convolution). Could be "None", eg in the case of
<ide> depth wise convolution.
<ide> class Conv(Layer):
<ide> any `dilation_rate` value != 1.
<ide> padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
<ide> `"valid"` means no padding. `"same"` results in padding with zeros
<del> evenly to the left/right or up/down of the input such that output has the
<del> same height/width dimension as the input. `"causal"` results in causal
<del> (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input. `"causal"` results in
<add> causal (dilated) convolutions, e.g. `output[t]` does not depend on
<add> `input[t+1:]`.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`.
<ide> The ordering of the dimensions in the inputs.
<ide> `channels_last` corresponds to inputs with shape
<ide> `(batch_size, ..., channels)` while `channels_first` corresponds to
<ide> class Conv(Layer):
<ide> activation: Activation function to use.
<ide> If you don't specify anything, no activation is applied.
<ide> use_bias: Boolean, whether the layer uses a bias.
<del> kernel_initializer: An initializer for the convolution kernel. If None, the
<del> default initializer (glorot_uniform) will be used.
<add> kernel_initializer: An initializer for the convolution kernel. If None,
<add> the default initializer (glorot_uniform) will be used.
<ide> bias_initializer: An initializer for the bias vector. If None, the default
<ide> initializer (zeros) will be used.
<ide> kernel_regularizer: Optional regularizer for the convolution kernel.
<ide> def __init__(
<ide> def _validate_init(self):
<ide> if self.filters is not None and self.filters % self.groups != 0:
<ide> raise ValueError(
<del> "The number of filters must be evenly divisible by the number of "
<del> "groups. Received: groups={}, filters={}".format(
<add> "The number of filters must be evenly divisible by the "
<add> "number of groups. Received: groups={}, filters={}".format(
<ide> self.groups, self.filters
<ide> )
<ide> )
<ide> def build(self, input_shape):
<ide> input_channel = self._get_input_channel(input_shape)
<ide> if input_channel % self.groups != 0:
<ide> raise ValueError(
<del> "The number of input channels must be evenly divisible by the number "
<del> "of groups. Received groups={}, but the input has {} channels "
<del> "(full input shape is {}).".format(
<add> "The number of input channels must be evenly divisible by "
<add> "the number of groups. Received groups={}, but the input "
<add> "has {} channels (full input shape is {}).".format(
<ide> self.groups, input_channel, input_shape
<ide> )
<ide> )
<ide> def build(self, input_shape):
<ide> self.filters,
<ide> )
<ide>
<del> # compute_output_shape contains some validation logic for the input shape,
<del> # and make sure the output shape has all positive dimensions.
<add> # compute_output_shape contains some validation logic for the input
<add> # shape, and make sure the output shape has all positive dimensions.
<ide> self.compute_output_shape(input_shape)
<ide>
<ide> self.kernel = self.add_weight(
<ide> def convolution_op(self, inputs, kernel):
<ide> name=self.__class__.__name__,
<ide> )
<ide>
<del> # TODO(b/213173659): remove this when grouped convolutions are fully supported
<del> # on the CPU for compiled functions. For now, we need this as a workaround for
<del> # CPU support.
<add> # TODO(b/213173659): remove this when grouped convolutions are fully
<add> # supported on the CPU for compiled functions. For now, we need this as a
<add> # workaround for CPU support.
<ide> @tf.function(jit_compile=True)
<ide> def _jit_compiled_convolution_op(self, inputs, kernel):
<ide> return self.convolution_op(inputs, kernel)
<ide> def _apply_fn(o):
<ide>
<ide> def _spatial_output_shape(self, spatial_input_shape):
<ide> return [
<del> conv_utils.conv_output_length( # pylint: disable=g-complex-comprehension
<add> conv_utils.conv_output_length(
<ide> length,
<ide> self.kernel_size[i],
<ide> padding=self.padding,
<ide><path>keras/layers/convolutional/base_depthwise_conv.py
<ide> class DepthwiseConv(Conv):
<ide> """Depthwise convolution.
<ide>
<del> Depthwise convolution is a type of convolution in which each input channel is
<del> convolved with a different kernel (called a depthwise kernel). You
<del> can understand depthwise convolution as the first step in a depthwise
<del> separable convolution.
<add> Depthwise convolution is a type of convolution in which each input channel
<add> is convolved with a different kernel (called a depthwise kernel). You can
<add> understand depthwise convolution as the first step in a depthwise separable
<add> convolution.
<ide>
<ide> It is implemented via the following steps:
<ide>
<ide> class DepthwiseConv(Conv):
<ide> Unlike a regular convolution, depthwise convolution does not mix
<ide> information across different input channels.
<ide>
<del> The `depth_multiplier` argument determines how many filter are applied to one
<del> input channel. As such, it controls the amount of output channels that are
<del> generated per input channel in the depthwise step.
<add> The `depth_multiplier` argument determines how many filter are applied to
<add> one input channel. As such, it controls the amount of output channels that
<add> are generated per input channel in the depthwise step.
<ide>
<ide> Args:
<ide> kernel_size: A tuple or list of integers specifying the spatial dimensions
<del> of the filters. Can be a single integer to specify the same value for all
<del> spatial dimensions.
<add> of the filters. Can be a single integer to specify the same value for
<add> all spatial dimensions.
<ide> strides: A tuple or list of integers specifying the strides of the
<ide> convolution. Can be a single integer to specify the same value for all
<ide> spatial dimensions. Specifying any `stride` value != 1 is incompatible
<ide> with specifying any `dilation_rate` value != 1.
<del> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no
<del> padding. `"same"` results in padding with zeros evenly to the left/right
<del> or up/down of the input such that output has the same height/width
<del> dimension as the input.
<add> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
<add> no padding. `"same"` results in padding with zeros evenly to the
<add> left/right or up/down of the input such that output has the same
<add> height/width dimension as the input.
<ide> depth_multiplier: The number of depthwise convolution output channels for
<ide> each input channel. The total number of depthwise convolution output
<ide> channels will be equal to `filters_in * depth_multiplier`.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `(batch_size, height, width, channels)` while
<del> `channels_first` corresponds to inputs with shape `(batch_size, channels,
<del> height, width)`. It defaults to the `image_data_format` value found in
<del> your Keras config file at `~/.keras/keras.json`. If you never set it, then
<del> it will be 'channels_last'.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `(batch_size, height,
<add> width, channels)` while `channels_first` corresponds to inputs with
<add> shape `(batch_size, channels, height, width)`. It defaults to the
<add> `image_data_format` value found in your Keras config file at
<add> `~/.keras/keras.json`. If you never set it, then it will be
<add> 'channels_last'.
<ide> dilation_rate: An integer or tuple/list of 2 integers, specifying the
<ide> dilation rate to use for dilated convolution. Currently, specifying any
<ide> `dilation_rate` value != 1 is incompatible with specifying any `strides`
<ide> class DepthwiseConv(Conv):
<ide> `keras.initializers`). If None, the default initializer
<ide> ('glorot_uniform') will be used.
<ide> bias_initializer: Initializer for the bias vector (see
<del> `keras.initializers`). If None, the default initializer ('zeros') will be
<del> used.
<del> depthwise_regularizer: Regularizer function applied to the depthwise kernel
<del> matrix (see `keras.regularizers`).
<add> `keras.initializers`). If None, the default initializer ('zeros') will
<add> be used.
<add> depthwise_regularizer: Regularizer function applied to the depthwise
<add> kernel matrix (see `keras.regularizers`).
<ide> bias_regularizer: Regularizer function applied to the bias vector (see
<ide> `keras.regularizers`).
<ide> activity_regularizer: Regularizer function applied to the output of the
<ide> class DepthwiseConv(Conv):
<ide> new_cols]` if `data_format='channels_first'`
<ide> or 4D tensor with shape: `[batch_size,
<ide> new_rows, new_cols, channels * depth_multiplier]` if
<del> `data_format='channels_last'`. `rows` and `cols` values might have changed
<del> due to padding.
<add> `data_format='channels_last'`. `rows` and `cols` values might have
<add> changed due to padding.
<ide>
<ide> Returns:
<ide> A tensor of rank 4 representing
<ide><path>keras/layers/convolutional/base_separable_conv.py
<ide> class SeparableConv(Conv):
<ide> channels, followed by a pointwise convolution that mixes channels.
<ide> If `use_bias` is True and a bias initializer is provided,
<ide> it adds a bias vector to the output.
<del> It then optionally applies an activation function to produce the final output.
<add> It then optionally applies an activation function to produce the final
<add> output.
<ide>
<ide> Args:
<del> rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
<add> rank: An integer, the rank of the convolution, e.g. "2" for 2D
<add> convolution.
<ide> filters: Integer, the dimensionality of the output space (i.e. the number
<ide> of filters in the convolution).
<ide> kernel_size: A tuple or list of integers specifying the spatial
<ide> dimensions of the filters. Can be a single integer to specify the same
<ide> value for all spatial dimensions.
<ide> strides: A tuple or list of integers specifying the strides
<del> of the convolution. Can be a single integer to specify the same value for
<del> all spatial dimensions.
<add> of the convolution. Can be a single integer to specify the same value
<add> for all spatial dimensions.
<ide> Specifying any `stride` value != 1 is incompatible with specifying
<ide> any `dilation_rate` value != 1.
<ide> padding: One of `"valid"` or `"same"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<ide> `channels_last` corresponds to inputs with shape
<ide> `(batch_size, ..., channels)` while `channels_first` corresponds to
<ide> inputs with shape `(batch_size, channels, ...)`.
<ide><path>keras/layers/convolutional/conv1d.py
<ide> class Conv1D(Conv):
<ide>
<ide> Examples:
<ide>
<del> >>> # The inputs are 128-length vectors with 10 timesteps, and the batch size
<del> >>> # is 4.
<add> >>> # The inputs are 128-length vectors with 10 timesteps, and the
<add> >>> # batch size is 4.
<ide> >>> input_shape = (4, 10, 128)
<ide> >>> x = tf.random.normal(input_shape)
<ide> >>> y = tf.keras.layers.Conv1D(
<ide> class Conv1D(Conv):
<ide> Specifying any stride value != 1 is incompatible with specifying
<ide> any `dilation_rate` value != 1.
<ide> padding: One of `"valid"`, `"same"` or `"causal"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input.
<ide> `"causal"` results in causal (dilated) convolutions, e.g. `output[t]`
<ide> does not depend on `input[t+1:]`. Useful when modeling temporal data
<ide> where the model should not violate the temporal order.
<ide><path>keras/layers/convolutional/conv1d_transpose.py
<ide> class Conv1DTranspose(Conv1D):
<ide> time dimension. Specifying a stride value != 1 is incompatible with
<ide> specifying a `dilation_rate` value != 1. Defaults to 1.
<ide> padding: one of `"valid"` or `"same"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input.
<ide> output_padding: An integer specifying the amount of padding along
<ide> the time dimension of the output tensor.
<ide> The amount of output padding must be lower than the stride.
<ide> If set to `None` (default), the output shape is inferred.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<ide> `channels_last` corresponds to inputs with shape
<ide> `(batch_size, length, channels)` while `channels_first` corresponds to
<ide> inputs with shape `(batch_size, channels, length)`.
<ide><path>keras/layers/convolutional/conv2d.py
<ide> class Conv2D(Conv):
<ide> >>> input_shape = (4, 28, 28, 3)
<ide> >>> x = tf.random.normal(input_shape)
<ide> >>> y = tf.keras.layers.Conv2D(
<del> ... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape[1:])(x)
<add> ... 2, 3,
<add> ... activation='relu',
<add> ... dilation_rate=2,
<add> ... input_shape=input_shape[1:])(x)
<ide> >>> print(y.shape)
<ide> (4, 24, 24, 2)
<ide>
<ide> class Conv2D(Conv):
<ide>
<ide>
<ide> Args:
<del> filters: Integer, the dimensionality of the output space (i.e. the number of
<del> output filters in the convolution).
<add> filters: Integer, the dimensionality of the output space (i.e. the number
<add> of output filters in the convolution).
<ide> kernel_size: An integer or tuple/list of 2 integers, specifying the height
<del> and width of the 2D convolution window. Can be a single integer to specify
<del> the same value for all spatial dimensions.
<add> and width of the 2D convolution window. Can be a single integer to
<add> specify the same value for all spatial dimensions.
<ide> strides: An integer or tuple/list of 2 integers, specifying the strides of
<ide> the convolution along the height and width. Can be a single integer to
<ide> specify the same value for all spatial dimensions. Specifying any stride
<del> value != 1 is incompatible with specifying any `dilation_rate` value != 1.
<add> value != 1 is incompatible with specifying any `dilation_rate` value !=
<add> 1.
<ide> padding: one of `"valid"` or `"same"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input. When `padding="same"` and
<del> `strides=1`, the output has the same size as the input.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `(batch_size, height, width, channels)` while
<del> `channels_first` corresponds to inputs with shape `(batch_size, channels,
<del> height, width)`. It defaults to the `image_data_format` value found in
<del> your Keras config file at `~/.keras/keras.json`. If you never set it, then
<del> it will be `channels_last`.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input. When `padding="same"`
<add> and `strides=1`, the output has the same size as the input.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `(batch_size, height,
<add> width, channels)` while `channels_first` corresponds to inputs with
<add> shape `(batch_size, channels, height, width)`. It defaults to the
<add> `image_data_format` value found in your Keras config file at
<add> `~/.keras/keras.json`. If you never set it, then it will be
<add> `channels_last`.
<ide> dilation_rate: an integer or tuple/list of 2 integers, specifying the
<ide> dilation rate to use for dilated convolution. Can be a single integer to
<ide> specify the same value for all spatial dimensions. Currently, specifying
<del> any `dilation_rate` value != 1 is incompatible with specifying any stride
<del> value != 1.
<add> any `dilation_rate` value != 1 is incompatible with specifying any
<add> stride value != 1.
<ide> groups: A positive integer specifying the number of groups in which the
<del> input is split along the channel axis. Each group is convolved separately
<del> with `filters / groups` filters. The output is the concatenation of all
<del> the `groups` results along the channel axis. Input channels and `filters`
<del> must both be divisible by `groups`.
<add> input is split along the channel axis. Each group is convolved
<add> separately with `filters / groups` filters. The output is the
<add> concatenation of all the `groups` results along the channel axis. Input
<add> channels and `filters` must both be divisible by `groups`.
<ide> activation: Activation function to use. If you don't specify anything, no
<ide> activation is applied (see `keras.activations`).
<ide> use_bias: Boolean, whether the layer uses a bias vector.
<ide><path>keras/layers/convolutional/conv2d_transpose.py
<ide> class Conv2DTranspose(Conv2D):
<ide> Specifying any stride value != 1 is incompatible with specifying
<ide> any `dilation_rate` value != 1.
<ide> padding: one of `"valid"` or `"same"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input.
<ide> output_padding: An integer or tuple/list of 2 integers,
<ide> specifying the amount of padding along the height and width
<ide> of the output tensor.
<ide> class Conv2DTranspose(Conv2D):
<ide>
<ide> Output shape:
<ide> 4D tensor with shape:
<del> `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
<add> `(batch_size, filters, new_rows, new_cols)` if
<add> data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
<del> `rows` and `cols` values might have changed due to padding.
<add> `(batch_size, new_rows, new_cols, filters)` if
<add> data_format='channels_last'. `rows` and `cols` values might have changed
<add> due to padding.
<ide> If `output_padding` is specified:
<ide> ```
<ide> new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
<ide> def call(self, inputs):
<ide> h_axis, w_axis = 1, 2
<ide>
<ide> # Use the constant height and weight when possible.
<del> # TODO(scottzhu): Extract this into a utility function that can be applied
<del> # to all convolutional layers, which currently lost the static shape
<del> # information due to tf.shape().
<add> # TODO(scottzhu): Extract this into a utility function that can be
<add> # applied to all convolutional layers, which currently lost the static
<add> # shape information due to tf.shape().
<ide> height, width = None, None
<ide> if inputs.shape.rank is not None:
<ide> dims = inputs.shape.as_list()
<ide><path>keras/layers/convolutional/conv3d.py
<ide> class Conv3D(Conv):
<ide> >>> print(y.shape)
<ide> (4, 26, 26, 26, 2)
<ide>
<del> >>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames,
<del> >>> # with 7 frames per video.
<add> >>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of
<add> >>> # 3D frames, with 7 frames per video.
<ide> >>> input_shape = (4, 7, 28, 28, 28, 1)
<ide> >>> x = tf.random.normal(input_shape)
<ide> >>> y = tf.keras.layers.Conv3D(
<ide> class Conv3D(Conv):
<ide> (4, 7, 26, 26, 26, 2)
<ide>
<ide> Args:
<del> filters: Integer, the dimensionality of the output space (i.e. the number of
<del> output filters in the convolution).
<add> filters: Integer, the dimensionality of the output space (i.e. the number
<add> of output filters in the convolution).
<ide> kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
<del> height and width of the 3D convolution window. Can be a single integer to
<del> specify the same value for all spatial dimensions.
<add> height and width of the 3D convolution window. Can be a single integer
<add> to specify the same value for all spatial dimensions.
<ide> strides: An integer or tuple/list of 3 integers, specifying the strides of
<ide> the convolution along each spatial dimension. Can be a single integer to
<ide> specify the same value for all spatial dimensions. Specifying any stride
<del> value != 1 is incompatible with specifying any `dilation_rate` value != 1.
<add> value != 1 is incompatible with specifying any `dilation_rate` value !=
<add> 1.
<ide> padding: one of `"valid"` or `"same"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2,
<del> spatial_dim3, channels)` while `channels_first` corresponds to inputs with
<del> shape `batch_shape + (channels, spatial_dim1, spatial_dim2,
<del> spatial_dim3)`. It defaults to the `image_data_format` value found in your
<del> Keras config file at `~/.keras/keras.json`. If you never set it, then it
<del> will be "channels_last".
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `batch_shape +
<add> (spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
<add> `channels_first` corresponds to inputs with shape `batch_shape +
<add> (channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to
<add> the `image_data_format` value found in your Keras config file at
<add> `~/.keras/keras.json`. If you never set it, then it will be
<add> "channels_last".
<ide> dilation_rate: an integer or tuple/list of 3 integers, specifying the
<ide> dilation rate to use for dilated convolution. Can be a single integer to
<ide> specify the same value for all spatial dimensions. Currently, specifying
<del> any `dilation_rate` value != 1 is incompatible with specifying any stride
<del> value != 1.
<add> any `dilation_rate` value != 1 is incompatible with specifying any
<add> stride value != 1.
<ide> groups: A positive integer specifying the number of groups in which the
<del> input is split along the channel axis. Each group is convolved separately
<del> with `filters / groups` filters. The output is the concatenation of all
<del> the `groups` results along the channel axis. Input channels and `filters`
<del> must both be divisible by `groups`.
<add> input is split along the channel axis. Each group is convolved
<add> separately with `filters / groups` filters. The output is the
<add> concatenation of all the `groups` results along the channel axis. Input
<add> channels and `filters` must both be divisible by `groups`.
<ide> activation: Activation function to use. If you don't specify anything, no
<ide> activation is applied (see `keras.activations`).
<ide> use_bias: Boolean, whether the layer uses a bias vector.
<ide> class Conv3D(Conv):
<ide> 5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,
<ide> new_conv_dim2, new_conv_dim3)` if data_format='channels_first'
<ide> or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,
<del> new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`,
<del> `new_conv_dim2` and `new_conv_dim3` values might have changed due to
<del> padding.
<add> new_conv_dim3, filters)` if data_format='channels_last'.
<add> `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
<add> changed due to padding.
<ide>
<ide> Returns:
<ide> A tensor of rank 5+ representing
<ide><path>keras/layers/convolutional/conv3d_transpose.py
<ide> class Conv3DTranspose(Conv3D):
<ide> When using this layer as the first layer in a model,
<ide> provide the keyword argument `input_shape`
<ide> (tuple of integers or `None`, does not include the sample axis),
<del> e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
<del> if `data_format="channels_last"`.
<add> e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3
<add> channels if `data_format="channels_last"`.
<ide>
<ide> Args:
<ide> filters: Integer, the dimensionality of the output space
<ide> class Conv3DTranspose(Conv3D):
<ide> Specifying any stride value != 1 is incompatible with specifying
<ide> any `dilation_rate` value != 1.
<ide> padding: one of `"valid"` or `"same"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input.
<ide> output_padding: An integer or tuple/list of 3 integers,
<ide> specifying the amount of padding along the depth, height, and
<ide> width.
<ide> class Conv3DTranspose(Conv3D):
<ide>
<ide> Input shape:
<ide> 5D tensor with shape:
<del> `(batch_size, channels, depth, rows, cols)` if data_format='channels_first'
<add> `(batch_size, channels, depth, rows, cols)` if
<add> data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(batch_size, depth, rows, cols, channels)` if data_format='channels_last'.
<add> `(batch_size, depth, rows, cols, channels)` if
<add> data_format='channels_last'.
<ide>
<ide> Output shape:
<ide> 5D tensor with shape:
<ide><path>keras/layers/convolutional/conv_test.py
<ide> def test_dynamic_shape(self):
<ide> input_shape = (5, None, None, 2)
<ide> inputs = keras.Input(shape=input_shape)
<ide> x = layer(inputs)
<del> # Won't raise error here with None values in input shape (b/144282043).
<add> # Won't raise error here with None values in input shape
<add> # (b/144282043).
<ide> layer(x)
<ide>
<ide>
<ide><path>keras/layers/convolutional/depthwise_conv1d.py
<ide> class DepthwiseConv1D(DepthwiseConv):
<ide> """Depthwise 1D convolution.
<ide>
<del> Depthwise convolution is a type of convolution in which each input channel is
<del> convolved with a different kernel (called a depthwise kernel). You
<del> can understand depthwise convolution as the first step in a depthwise
<del> separable convolution.
<add> Depthwise convolution is a type of convolution in which each input channel
<add> is convolved with a different kernel (called a depthwise kernel). You can
<add> understand depthwise convolution as the first step in a depthwise separable
<add> convolution.
<ide>
<ide> It is implemented via the following steps:
<ide>
<ide> class DepthwiseConv1D(DepthwiseConv):
<ide> Unlike a regular 1D convolution, depthwise convolution does not mix
<ide> information across different input channels.
<ide>
<del> The `depth_multiplier` argument determines how many filter are applied to one
<del> input channel. As such, it controls the amount of output channels that are
<del> generated per input channel in the depthwise step.
<add> The `depth_multiplier` argument determines how many filter are applied to
<add> one input channel. As such, it controls the amount of output channels that
<add> are generated per input channel in the depthwise step.
<ide>
<ide> Args:
<ide> kernel_size: An integer, specifying the height and width of the 1D
<del> convolution window. Can be a single integer to specify the same value for
<del> all spatial dimensions.
<add> convolution window. Can be a single integer to specify the same value
<add> for all spatial dimensions.
<ide> strides: An integer, specifying the strides of the convolution along the
<ide> height and width. Can be a single integer to specify the same value for
<ide> all spatial dimensions. Specifying any stride value != 1 is incompatible
<ide> with specifying any `dilation_rate` value != 1.
<del> padding: one of `'valid'` or `'same'` (case-insensitive). `"valid"` means no
<del> padding. `"same"` results in padding with zeros evenly to the left/right
<del> or up/down of the input such that output has the same height/width
<del> dimension as the input.
<add> padding: one of `'valid'` or `'same'` (case-insensitive). `"valid"` means
<add> no padding. `"same"` results in padding with zeros evenly to the
<add> left/right or up/down of the input such that output has the same
<add> height/width dimension as the input.
<ide> depth_multiplier: The number of depthwise convolution output channels for
<ide> each input channel. The total number of depthwise convolution output
<ide> channels will be equal to `filters_in * depth_multiplier`.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `(batch_size, height, width, channels)` while
<del> `channels_first` corresponds to inputs with shape `(batch_size, channels,
<del> height, width)`. It defaults to the `image_data_format` value found in
<del> your Keras config file at `~/.keras/keras.json`. If you never set it, then
<del> it will be 'channels_last'.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `(batch_size, height,
<add> width, channels)` while `channels_first` corresponds to inputs with
<add> shape `(batch_size, channels, height, width)`. It defaults to the
<add> `image_data_format` value found in your Keras config file at
<add> `~/.keras/keras.json`. If you never set it, then it will be
<add> 'channels_last'.
<ide> dilation_rate: A single integer, specifying the dilation rate to use for
<del> dilated convolution. Currently, specifying any `dilation_rate` value != 1
<del> is incompatible with specifying any stride value != 1.
<add> dilated convolution. Currently, specifying any `dilation_rate`
<add> value != 1 is incompatible with specifying any stride value != 1.
<ide> activation: Activation function to use. If you don't specify anything, no
<ide> activation is applied (see `keras.activations`).
<ide> use_bias: Boolean, whether the layer uses a bias vector.
<ide> depthwise_initializer: Initializer for the depthwise kernel matrix (see
<ide> `keras.initializers`). If None, the default initializer
<ide> ('glorot_uniform') will be used.
<ide> bias_initializer: Initializer for the bias vector (see
<del> `keras.initializers`). If None, the default initializer ('zeros') will be
<del> used.
<del> depthwise_regularizer: Regularizer function applied to the depthwise kernel
<del> matrix (see `keras.regularizers`).
<add> `keras.initializers`). If None, the default initializer ('zeros') will
<add> be used.
<add> depthwise_regularizer: Regularizer function applied to the depthwise
<add> kernel matrix (see `keras.regularizers`).
<ide> bias_regularizer: Regularizer function applied to the bias vector (see
<ide> `keras.regularizers`).
<ide> activity_regularizer: Regularizer function applied to the output of the
<ide> class DepthwiseConv1D(DepthwiseConv):
<ide> new_cols]` if `data_format='channels_first'`
<ide> or 4D tensor with shape: `[batch_size,
<ide> new_rows, new_cols, channels * depth_multiplier]` if
<del> `data_format='channels_last'`. `rows` and `cols` values might have changed
<del> due to padding.
<add> `data_format='channels_last'`. `rows` and `cols` values might have
<add> changed due to padding.
<ide>
<ide> Returns:
<ide> A tensor of rank 4 representing
<ide><path>keras/layers/convolutional/depthwise_conv2d.py
<ide> class DepthwiseConv2D(DepthwiseConv):
<ide> """Depthwise 2D convolution.
<ide>
<del> Depthwise convolution is a type of convolution in which each input channel is
<del> convolved with a different kernel (called a depthwise kernel). You
<del> can understand depthwise convolution as the first step in a depthwise
<del> separable convolution.
<add> Depthwise convolution is a type of convolution in which each input channel
<add> is convolved with a different kernel (called a depthwise kernel). You can
<add> understand depthwise convolution as the first step in a depthwise separable
<add> convolution.
<ide>
<ide> It is implemented via the following steps:
<ide>
<ide> class DepthwiseConv2D(DepthwiseConv):
<ide> Unlike a regular 2D convolution, depthwise convolution does not mix
<ide> information across different input channels.
<ide>
<del> The `depth_multiplier` argument determines how many filter are applied to one
<del> input channel. As such, it controls the amount of output channels that are
<del> generated per input channel in the depthwise step.
<add> The `depth_multiplier` argument determines how many filter are applied to
<add> one input channel. As such, it controls the amount of output channels that
<add> are generated per input channel in the depthwise step.
<ide>
<ide> Args:
<ide> kernel_size: An integer or tuple/list of 2 integers, specifying the height
<del> and width of the 2D convolution window. Can be a single integer to specify
<del> the same value for all spatial dimensions.
<add> and width of the 2D convolution window. Can be a single integer to
<add> specify the same value for all spatial dimensions.
<ide> strides: An integer or tuple/list of 2 integers, specifying the strides of
<ide> the convolution along the height and width. Can be a single integer to
<ide> specify the same value for all spatial dimensions. Specifying any stride
<del> value != 1 is incompatible with specifying any `dilation_rate` value != 1.
<del> padding: one of `'valid'` or `'same'` (case-insensitive). `"valid"` means no
<del> padding. `"same"` results in padding with zeros evenly to the left/right
<del> or up/down of the input such that output has the same height/width
<del> dimension as the input.
<add> value != 1 is incompatible with specifying any `dilation_rate` value !=
<add> 1.
<add> padding: one of `'valid'` or `'same'` (case-insensitive). `"valid"` means
<add> no padding. `"same"` results in padding with zeros evenly to the
<add> left/right or up/down of the input such that output has the same
<add> height/width dimension as the input.
<ide> depth_multiplier: The number of depthwise convolution output channels for
<ide> each input channel. The total number of depthwise convolution output
<ide> channels will be equal to `filters_in * depth_multiplier`.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `(batch_size, height, width, channels)` while
<del> `channels_first` corresponds to inputs with shape `(batch_size, channels,
<del> height, width)`. It defaults to the `image_data_format` value found in
<del> your Keras config file at `~/.keras/keras.json`. If you never set it, then
<del> it will be 'channels_last'.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `(batch_size, height,
<add> width, channels)` while `channels_first` corresponds to inputs with
<add> shape `(batch_size, channels, height, width)`. It defaults to the
<add> `image_data_format` value found in your Keras config file at
<add> `~/.keras/keras.json`. If you never set it, then it will be
<add> 'channels_last'.
<ide> dilation_rate: An integer or tuple/list of 2 integers, specifying the
<ide> dilation rate to use for dilated convolution. Currently, specifying any
<ide> `dilation_rate` value != 1 is incompatible with specifying any `strides`
<ide> class DepthwiseConv2D(DepthwiseConv):
<ide> `keras.initializers`). If None, the default initializer
<ide> ('glorot_uniform') will be used.
<ide> bias_initializer: Initializer for the bias vector (see
<del> `keras.initializers`). If None, the default initializer ('zeros') will be
<del> used.
<del> depthwise_regularizer: Regularizer function applied to the depthwise kernel
<del> matrix (see `keras.regularizers`).
<add> `keras.initializers`). If None, the default initializer ('zeros') will
<add> be used.
<add> depthwise_regularizer: Regularizer function applied to the depthwise
<add> kernel matrix (see `keras.regularizers`).
<ide> bias_regularizer: Regularizer function applied to the bias vector (see
<ide> `keras.regularizers`).
<ide> activity_regularizer: Regularizer function applied to the output of the
<ide> class DepthwiseConv2D(DepthwiseConv):
<ide> new_cols]` if `data_format='channels_first'`
<ide> or 4D tensor with shape: `[batch_size,
<ide> new_rows, new_cols, channels * depth_multiplier]` if
<del> `data_format='channels_last'`. `rows` and `cols` values might have changed
<del> due to padding.
<add> `data_format='channels_last'`. `rows` and `cols` values might have
<add> changed due to padding.
<ide>
<ide> Returns:
<ide> A tensor of rank 4 representing
<ide><path>keras/layers/convolutional/separable_conv1d.py
<ide> class SeparableConv1D(SeparableConv):
<ide> channels, followed by a pointwise convolution that mixes channels.
<ide> If `use_bias` is True and a bias initializer is provided,
<ide> it adds a bias vector to the output.
<del> It then optionally applies an activation function to produce the final output.
<add> It then optionally applies an activation function to produce the final
<add> output.
<ide>
<ide> Args:
<ide> filters: Integer, the dimensionality of the output space (i.e. the number
<ide> class SeparableConv1D(SeparableConv):
<ide> Specifying any `stride` value != 1 is incompatible with specifying
<ide> any `dilation_rate` value != 1.
<ide> padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input. `"causal"` results in causal
<del> (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input. `"causal"` results in
<add> causal (dilated) convolutions, e.g. `output[t]` does not depend on
<add> `input[t+1:]`.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<ide> `channels_last` corresponds to inputs with shape
<ide> `(batch_size, length, channels)` while `channels_first` corresponds to
<ide> inputs with shape `(batch_size, channels, length)`.
<ide> def call(self, inputs):
<ide> spatial_start_dim = 2
<ide>
<ide> # Explicitly broadcast inputs and kernels to 4D.
<del> # TODO(fchollet): refactor when a native separable_conv1d op is available.
<add> # TODO(fchollet): refactor when a native separable_conv1d op is
<add> # available.
<ide> inputs = tf.expand_dims(inputs, spatial_start_dim)
<ide> depthwise_kernel = tf.expand_dims(self.depthwise_kernel, 0)
<ide> pointwise_kernel = tf.expand_dims(self.pointwise_kernel, 0)
<ide><path>keras/layers/convolutional/separable_conv2d.py
<ide> class SeparableConv2D(SeparableConv):
<ide> Specifying any stride value != 1 is incompatible with specifying
<ide> any `dilation_rate` value != 1.
<ide> padding: one of `"valid"` or `"same"` (case-insensitive).
<del> `"valid"` means no padding. `"same"` results in padding with zeros evenly
<del> to the left/right or up/down of the input such that output has the same
<del> height/width dimension as the input.
<add> `"valid"` means no padding. `"same"` results in padding with zeros
<add> evenly to the left/right or up/down of the input such that output has
<add> the same height/width dimension as the input.
<ide> data_format: A string,
<ide> one of `channels_last` (default) or `channels_first`.
<ide> The ordering of the dimensions in the inputs.
<ide> class SeparableConv2D(SeparableConv):
<ide>
<ide> Output shape:
<ide> 4D tensor with shape:
<del> `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
<add> `(batch_size, filters, new_rows, new_cols)` if
<add> data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
<del> `rows` and `cols` values might have changed due to padding.
<add> `(batch_size, new_rows, new_cols, filters)` if
<add> data_format='channels_last'. `rows` and `cols` values might have changed
<add> due to padding.
<ide>
<ide> Returns:
<ide> A tensor of rank 4 representing
<ide><path>keras/layers/core/core_test.py
<ide> def test_dropout_with_savemodel(self):
<ide> model = keras.Model(inputs, outputs)
<ide> train = model(np.ones((20, 5, 10)), training=True)
<ide> predict = model(np.ones((20, 5, 10)))
<del> # Make sure the weights from tf.random.Generator is not present in the model
<del> # which will cause weight loading issue for existing application models if
<del> # it contains dropout layer.
<add> # Make sure the weights from tf.random.Generator is not present in the
<add> # model which will cause weight loading issue for existing application
<add> # models if it contains dropout layer.
<ide> self.assertEmpty(layer.get_weights())
<ide> self.assertEmpty(model.get_weights())
<ide>
<ide> def test_lambda_with_variable_in_model(self):
<ide> def lambda_fn(x, v):
<ide> return x * v
<ide>
<del> # While it is generally not advised to mix Variables with Lambda layers, if
<del> # the variables are explicitly set as attributes then they are still
<add> # While it is generally not advised to mix Variables with Lambda layers,
<add> # if the variables are explicitly set as attributes then they are still
<ide> # tracked. This is consistent with the base Layer behavior.
<ide> layer = keras.layers.Lambda(lambda_fn, arguments={"v": v})
<ide> self.assertLen(layer.trainable_weights, 0)
<ide> def patched_warn(msg):
<ide> @test_combinations.run_all_keras_modes
<ide> @test_combinations.run_with_all_model_types
<ide> def test_lambda_skip_state_variable_from_initializer(self):
<del> # Force the initializers to use the tf.random.Generator, which will contain
<del> # the state variable.
<add> # Force the initializers to use the tf.random.Generator, which will
<add> # contain the state variable.
<ide> kernel_initializer = initializers.RandomNormalV2()
<ide> kernel_initializer._random_generator._rng_type = (
<ide> kernel_initializer._random_generator.RNG_STATEFUL
<ide> def test_lambda_skip_state_variable_from_initializer(self):
<ide> def lambda_fn(x):
<ide> return dense(x + 1) # Dense layer is built on first call
<ide>
<del> # While it is generally not advised to mix Variables with Lambda layers, if
<del> # the variables are explicitly set as attributes then they are still
<add> # While it is generally not advised to mix Variables with Lambda layers,
<add> # if the variables are explicitly set as attributes then they are still
<ide> # tracked. This is consistent with the base Layer behavior.
<ide> layer = keras.layers.Lambda(lambda_fn)
<ide> layer.dense = dense
<ide><path>keras/layers/core/dense.py
<ide> class Dense(Layer):
<ide> Note: If the input to the layer has a rank greater than 2, then `Dense`
<ide> computes the dot product between the `inputs` and the `kernel` along the
<ide> last axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`).
<del> For example, if input has dimensions `(batch_size, d0, d1)`,
<del> then we create a `kernel` with shape `(d1, units)`, and the `kernel` operates
<del> along axis 2 of the `input`, on every sub-tensor of shape `(1, 1, d1)`
<del> (there are `batch_size * d0` such sub-tensors).
<del> The output in this case will have shape `(batch_size, d0, units)`.
<add> For example, if input has dimensions `(batch_size, d0, d1)`, then we create
<add> a `kernel` with shape `(d1, units)`, and the `kernel` operates along axis 2
<add> of the `input`, on every sub-tensor of shape `(1, 1, d1)` (there are
<add> `batch_size * d0` such sub-tensors). The output in this case will have
<add> shape `(batch_size, d0, units)`.
<ide>
<ide> Besides, layer attributes cannot be modified after the layer has been called
<ide> once (except the `trainable` attribute).
<ide> def call(self, inputs):
<ide>
<ide> is_ragged = isinstance(inputs, tf.RaggedTensor)
<ide> if is_ragged:
<del> # In case we encounter a RaggedTensor with a fixed last dimension (last
<del> # dimension not ragged), we can flatten the input and restore the ragged
<del> # dimensions at the end.
<add> # In case we encounter a RaggedTensor with a fixed last dimension
<add> # (last dimension not ragged), we can flatten the input and restore
<add> # the ragged dimensions at the end.
<ide> if tf.compat.dimension_value(inputs.shape[-1]) is None:
<ide> raise ValueError(
<ide> "Dense layer only supports RaggedTensors when the "
<ide> def call(self, inputs):
<ide>
<ide> rank = inputs.shape.rank
<ide> if rank == 2 or rank is None:
<del> # We use embedding_lookup_sparse as a more efficient matmul operation for
<del> # large sparse input tensors. The op will result in a sparse gradient, as
<del> # opposed to sparse_ops.sparse_tensor_dense_matmul which results in dense
<add> # We use embedding_lookup_sparse as a more efficient matmul
<add> # operation for large sparse input tensors. The op will result in a
<add> # sparse gradient, as opposed to
<add> # sparse_ops.sparse_tensor_dense_matmul which results in dense
<ide> # gradients. This can lead to sigfinicant speedups, see b/171762937.
<ide> if isinstance(inputs, tf.SparseTensor):
<del> # We need to fill empty rows, as the op assumes at least one id per row.
<add> # We need to fill empty rows, as the op assumes at least one id
<add> # per row.
<ide> inputs, _ = tf.sparse.fill_empty_rows(inputs, 0)
<del> # We need to do some munging of our input to use the embedding lookup as
<del> # a matrix multiply. We split our input matrix into separate ids and
<del> # weights tensors. The values of the ids tensor should be the column
<del> # indices of our input matrix and the values of the weights tensor
<del> # can continue to the actual matrix weights.
<del> # The column arrangement of ids and weights
<del> # will be summed over and does not matter. See the documentation for
<del> # sparse_ops.sparse_tensor_dense_matmul a more detailed explanation
<del> # of the inputs to both ops.
<add> # We need to do some munging of our input to use the embedding
<add> # lookup as a matrix multiply. We split our input matrix into
<add> # separate ids and weights tensors. The values of the ids tensor
<add> # should be the column indices of our input matrix and the
<add> # values of the weights tensor can continue to the actual matrix
<add> # weights. The column arrangement of ids and weights will be
<add> # summed over and does not matter. See the documentation for
<add> # sparse_ops.sparse_tensor_dense_matmul a more detailed
<add> # explanation of the inputs to both ops.
<ide> ids = tf.SparseTensor(
<ide> indices=inputs.indices,
<ide> values=inputs.indices[:, 1],
<ide><path>keras/layers/core/einsum_dense.py
<ide> class EinsumDense(Layer):
<ide> Args:
<ide> equation: An equation describing the einsum to perform. This equation must
<ide> be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or
<del> `ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum axis
<del> expression sequence.
<add> `ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum
<add> axis expression sequence.
<ide> output_shape: The expected shape of the output tensor (excluding the batch
<ide> dimension and any dimensions represented by ellipses). You can specify
<ide> None for any dimension that is unknown or can be inferred from the input
<ide> shape.
<ide> activation: Activation function to use. If you don't specify anything, no
<ide> activation is applied (that is, a "linear" activation: `a(x) = x`).
<ide> bias_axes: A string containing the output dimension(s) to apply a bias to.
<del> Each character in the `bias_axes` string should correspond to a character
<del> in the output portion of the `equation` string.
<add> Each character in the `bias_axes` string should correspond to a
<add> character in the output portion of the `equation` string.
<ide> kernel_initializer: Initializer for the `kernel` weights matrix.
<ide> bias_initializer: Initializer for the bias vector.
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> class EinsumDense(Layer):
<ide> This example shows how to instantiate a layer that applies the same dense
<ide> operation to every element in a sequence. Here, the `output_shape` has two
<ide> values (since there are two non-batch dimensions in the output); the first
<del> dimension in the `output_shape` is `None`, because the sequence dimension `b`
<del> has an unknown shape.
<add> dimension in the `output_shape` is `None`, because the sequence dimension
<add> `b` has an unknown shape.
<ide>
<ide> >>> layer = tf.keras.layers.EinsumDense("abc,cd->abd",
<ide> ... output_shape=(None, 64),
<ide> class EinsumDense(Layer):
<ide> instead of specifying the batch and sequence dimensions.
<ide>
<ide> Because we are using ellipsis notation and have specified only one axis, the
<del> `output_shape` arg is a single value. When instantiated in this way, the layer
<del> can handle any number of sequence dimensions - including the case where no
<del> sequence dimension exists.
<add> `output_shape` arg is a single value. When instantiated in this way, the
<add> layer can handle any number of sequence dimensions - including the case
<add> where no sequence dimension exists.
<ide>
<ide> >>> layer = tf.keras.layers.EinsumDense("...x,xy->...y",
<ide> ... output_shape=64,
<ide> def _analyze_split_string(
<ide>
<ide> if elided > 0 and left_elided:
<ide> for i in range(1, elided):
<del> # We already inserted the 0th input dimension at dim 0, so we need to
<del> # start at location 1 here.
<add> # We already inserted the 0th input dimension at dim 0, so we need
<add> # to start at location 1 here.
<ide> output_shape.insert(1, input_shape[i])
<ide> elif elided > 0 and not left_elided:
<ide> for i in range(len(input_shape) - elided, len(input_shape)):
<ide> output_shape.append(input_shape[i])
<ide>
<ide> if left_elided:
<del> # If we have beginning dimensions elided, we need to use negative indexing
<del> # to determine where in the input dimension our values are.
<add> # If we have beginning dimensions elided, we need to use negative
<add> # indexing to determine where in the input dimension our values are.
<ide> input_dim_map = {
<ide> dim: (i + elided) - len(input_shape)
<ide> for i, dim in enumerate(input_spec)
<ide> def _analyze_split_string(
<ide> for dim in output_spec:
<ide> if dim not in input_spec and dim not in weight_spec:
<ide> raise ValueError(
<del> f"Dimension '{dim}' was specified in the output '{output_spec}' but "
<del> f"has no corresponding dim in the input spec '{input_spec}' or "
<del> f"weight spec '{output_spec}'"
<add> f"Dimension '{dim}' was specified in the output "
<add> f"'{output_spec}' but has no corresponding dim in the input "
<add> f"spec '{input_spec}' or weight spec '{output_spec}'"
<ide> )
<ide>
<ide> weight_shape = []
<ide> def _analyze_split_string(
<ide> else:
<ide> raise ValueError(
<ide> f"Weight dimension '{dim}' did not have a match in either "
<del> f"the input spec '{input_spec}' or the output spec '{output_spec}'. "
<del> "For this layer, the weight must be fully specified."
<add> f"the input spec '{input_spec}' or the output "
<add> f"spec '{output_spec}'. For this layer, the weight must "
<add> "be fully specified."
<ide> )
<ide>
<ide> if bias_axes is not None:
<ide><path>keras/layers/core/einsum_dense_test.py
<ide> def test_layer_creation(
<ide> expected_bias_shape,
<ide> expected_output_shape,
<ide> ):
<del> # Keras elides the 0-dimension of the input shape when constructing inputs.
<add> # Keras elides the 0-dimension of the input shape when constructing
<add> # inputs.
<ide> non_batch_input_shape = list(input_shape)[1:]
<ide>
<ide> input_tensor = keras.Input(shape=non_batch_input_shape)
<ide><path>keras/layers/core/embedding.py
<ide> class Embedding(Layer):
<ide> the `embeddings` matrix (see `keras.regularizers`).
<ide> embeddings_constraint: Constraint function applied to
<ide> the `embeddings` matrix (see `keras.constraints`).
<del> mask_zero: Boolean, whether or not the input value 0 is a special "padding"
<del> value that should be masked out.
<del> This is useful when using recurrent layers
<del> which may take variable length input.
<del> If this is `True`, then all subsequent layers
<del> in the model need to support masking or an exception will be raised.
<del> If mask_zero is set to True, as a consequence, index 0 cannot be
<del> used in the vocabulary (input_dim should equal size of
<del> vocabulary + 1).
<add> mask_zero: Boolean, whether or not the input value 0 is a special
<add> "padding" value that should be masked out. This is useful when using
<add> recurrent layers which may take variable length input. If this is
<add> `True`, then all subsequent layers in the model need to support masking
<add> or an exception will be raised. If mask_zero is set to True, as a
<add> consequence, index 0 cannot be used in the vocabulary (input_dim should
<add> equal size of vocabulary + 1).
<ide> input_length: Length of input sequences, when it is constant.
<ide> This argument is required if you are going to connect
<ide> `Flatten` then `Dense` layers upstream
<ide> def __init__(
<ide> if input_dim <= 0 or output_dim <= 0:
<ide> raise ValueError(
<ide> "Both `input_dim` and `output_dim` should be positive, "
<del> f"Received input_dim = {input_dim} and output_dim = {output_dim}"
<add> f"Received input_dim = {input_dim} "
<add> f"and output_dim = {output_dim}"
<ide> )
<ide> if (
<ide> not base_layer_utils.v2_dtype_behavior_enabled()
<ide> and "dtype" not in kwargs
<ide> ):
<del> # In TF1, the dtype defaults to the input dtype which is typically int32,
<del> # so explicitly set it to floatx
<add> # In TF1, the dtype defaults to the input dtype which is typically
<add> # int32, so explicitly set it to floatx
<ide> kwargs["dtype"] = backend.floatx()
<del> # We set autocast to False, as we do not want to cast floating- point inputs
<del> # to self.dtype. In call(), we cast to int32, and casting to self.dtype
<del> # before casting to int32 might cause the int32 values to be different due
<del> # to a loss of precision.
<add> # We set autocast to False, as we do not want to cast floating- point
<add> # inputs to self.dtype. In call(), we cast to int32, and casting to
<add> # self.dtype before casting to int32 might cause the int32 values to be
<add> # different due to a loss of precision.
<ide> kwargs["autocast"] = False
<ide> super().__init__(**kwargs)
<ide>
<ide> def compute_output_shape(self, input_shape):
<ide> in_lens = [self.input_length]
<ide> if len(in_lens) != len(input_shape) - 1:
<ide> raise ValueError(
<del> f'"input_length" is {self.input_length}, but received input has '
<del> f"shape {input_shape}"
<add> f'"input_length" is {self.input_length}, but received '
<add> f"input has shape {input_shape}"
<ide> )
<ide> else:
<ide> for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
<ide> if s1 is not None and s2 is not None and s1 != s2:
<ide> raise ValueError(
<del> f'"input_length" is {self.input_length}, but received input '
<del> f"has shape {input_shape}"
<add> f'"input_length" is {self.input_length}, but '
<add> f"received input has shape {input_shape}"
<ide> )
<ide> elif s1 is None:
<ide> in_lens[i] = s2
<ide> def call(self, inputs):
<ide> self._dtype_policy.compute_dtype
<ide> != self._dtype_policy.variable_dtype
<ide> ):
<del> # Instead of casting the variable as in most layers, cast the output, as
<del> # this is mathematically equivalent but is faster.
<add> # Instead of casting the variable as in most layers, cast the
<add> # output, as this is mathematically equivalent but is faster.
<ide> out = tf.cast(out, self._dtype_policy.compute_dtype)
<ide> return out
<ide>
<ide><path>keras/layers/core/lambda_layer.py
<ide> class Lambda(Layer):
<ide> as a `Layer` when constructing `Sequential`
<ide> and Functional API models. `Lambda` layers are best suited for simple
<ide> operations or quick experimentation. For more advanced use cases, follow
<del> [this guide](https://www.tensorflow.org/guide/keras/custom_layers_and_models)
<add> [this guide](
<add> https://www.tensorflow.org/guide/keras/custom_layers_and_models)
<ide> for subclassing `tf.keras.layers.Layer`.
<ide>
<ide> WARNING: `tf.keras.layers.Lambda` layers have (de)serialization limitations!
<ide> def call(self, inputs):
<ide> ```
<ide>
<ide> In general, Lambda layers can be convenient for simple stateless
<del> computation, but anything more complex should use a subclass Layer instead.
<add> computation, but anything more complex should use a subclass Layer
<add> instead.
<ide>
<ide> Args:
<ide> function: The function to be evaluated. Takes input tensor as first
<ide> argument.
<ide> output_shape: Expected output shape from function. This argument can be
<ide> inferred if not explicitly provided. Can be a tuple or function. If a
<ide> tuple, it only specifies the first dimension onward;
<del> sample dimension is assumed either the same as the input: `output_shape =
<del> (input_shape[0], ) + output_shape` or, the input is `None` and
<del> the sample dimension is also `None`: `output_shape = (None, ) +
<del> output_shape` If a function, it specifies the entire shape as a function
<del> of the
<del> input shape: `output_shape = f(input_shape)`
<add> sample dimension is assumed either the same as the input:
<add> `output_shape = (input_shape[0], ) + output_shape` or, the input is
<add> `None` and the sample dimension is also `None`:
<add> `output_shape = (None, ) + output_shape` If a function, it specifies the
<add> entire shape as a function of the input shape:
<add> `output_shape = f(input_shape)`
<ide> mask: Either None (indicating no masking) or a callable with the same
<ide> signature as the `compute_mask` layer method, or a tensor that will be
<ide> returned as output mask regardless of what the input is.
<ide> def __init__(
<ide> def compute_output_shape(self, input_shape):
<ide> if self._output_shape is None:
<ide> # Make use of existing autocomputation but provide Lambda-specific
<del> # error message. This is always safe to run even when the outer context
<del> # is Graph mode because Lambda layers don't have side effects such as
<del> # `add_loss`.
<add> # error message. This is always safe to run even when the outer
<add> # context is Graph mode because Lambda layers don't have side
<add> # effects such as `add_loss`.
<ide> with tf.__internal__.eager_context.eager_mode():
<ide> try:
<ide> return super().compute_output_shape(input_shape)
<ide> except NotImplementedError:
<ide> raise NotImplementedError(
<del> "We could not automatically infer the shape of the Lambda's "
<del> "output. Please specify `output_shape` for this Lambda."
<add> "We could not automatically infer the shape of "
<add> "the Lambda's output. Please specify `output_shape` "
<add> "for this Lambda."
<ide> )
<ide>
<ide> if callable(self._output_shape):
<ide> def _add_batch(shape):
<ide> return tf.nest.map_structure(_add_batch, output_shapes)
<ide>
<ide> def call(self, inputs, mask=None, training=None):
<del> # We must copy for thread safety, but it only needs to be a shallow copy.
<add> # We must copy for thread safety, but it only needs to be a shallow
<add> # copy.
<ide> kwargs = {k: v for k, v in self.arguments.items()}
<ide> if self._fn_expects_mask_arg:
<ide> kwargs["mask"] = mask
<ide> def _variable_creator(next_creator, **kwargs):
<ide>
<ide> def _check_variables(self, created_variables, accessed_variables):
<ide> if not created_variables and not accessed_variables:
<del> # In the common case that a Lambda layer does not touch a Variable, we
<del> # don't want to incur the runtime cost of assembling any state used for
<del> # checking only to immediately discard it.
<add> # In the common case that a Lambda layer does not touch a Variable,
<add> # we don't want to incur the runtime cost of assembling any state
<add> # used for checking only to immediately discard it.
<ide> return
<ide>
<ide> # Filter out the state variable in the tf.random.Generator, which is
<ide> def _check_variables(self, created_variables, accessed_variables):
<ide> self._already_warned = True
<ide>
<ide> def _warn(self, msg):
<del> # This method will be overridden in a unit test to raise an error, because
<del> # self.assertWarns is not universally implemented.
<add> # This method will be overridden in a unit test to raise an error,
<add> # because self.assertWarns is not universally implemented.
<ide> return tf_logging.warning(msg)
<ide>
<ide> def compute_mask(self, inputs, mask=None):
<ide> def _parse_function_from_config(
<ide> supported_types = ["function", "lambda", "raw"]
<ide> raise TypeError(
<ide> f"Unsupported value for `function_type` argument. Received: "
<del> f"function_type={function_type}. Expected one of {supported_types}"
<add> f"function_type={function_type}. "
<add> f"Expected one of {supported_types}"
<ide> )
<ide> return function
<ide><path>keras/layers/core/tf_op_layer.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the TFOpLambda layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import,g-bad-import-order
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> # pylint: enable=g-bad-import-order
<ide> def from_config(cls, config, custom_objects=None):
<ide>
<ide>
<ide> class KerasOpDispatcher(tf.__internal__.dispatch.GlobalOpDispatcher):
<del> """A global dispatcher that allows building a functional model with TF Ops."""
<add> """A global dispatcher that allows building a functional model with TF
<add> Ops."""
<ide>
<ide> def handle(self, op, args, kwargs):
<ide> """Handle the specified operation with the specified arguments."""
<ide> def _variable_creator(next_creator, **creator_kwargs):
<ide>
<ide> def _check_variables(self, created_variables, accessed_variables):
<ide> if not created_variables and not accessed_variables:
<del> # In the common case that a Lambda layer does not touch a Variable, we
<del> # don't want to incur the runtime cost of assembling any state used for
<del> # checking only to immediately discard it.
<add> # In the common case that a Lambda layer does not touch a Variable,
<add> # we don't want to incur the runtime cost of assembling any state
<add> # used for checking only to immediately discard it.
<ide> return
<ide>
<ide> tracked_weights = set(v.ref() for v in self.weights)
<ide> def _check_variables(self, created_variables, accessed_variables):
<ide> )
<ide> raise ValueError(
<ide> "The following Variables were created within a Lambda layer "
<del> f"({self.name}) but are not tracked by said layer: {variable_str}\n"
<add> f"({self.name}) but are not tracked by said layer: "
<add> f"{variable_str}\n"
<ide> "The layer cannot safely ensure proper Variable reuse "
<del> "across multiple calls, and consequently this behavior is disallowed "
<del> "for safety reasons. Lambda layers are not well suited for stateful "
<del> "computation; instead, writing a subclassed Layer is the recommend "
<add> "across multiple calls, and consequently this behavior "
<add> "is disallowed for safety reasons. Lambda layers are "
<add> "not well suited for stateful computation; instead, "
<add> "writing a subclassed Layer is the recommend "
<ide> "way to define layers with Variables."
<ide> )
<ide>
<ide> def _check_variables(self, created_variables, accessed_variables):
<ide> self._warn(
<ide> "The following Variables were used in a Lambda layer's call "
<ide> f"({self.name}), but are not present in its tracked objects: "
<del> f"{variable_str}. This is a strong indication that the Lambda layer "
<del> "should be rewritten as a subclassed Layer."
<add> f"{variable_str}. This is a strong indication that the Lambda "
<add> "layer should be rewritten as a subclassed Layer."
<ide> )
<ide> self._already_warned = True
<ide>
<ide> def _warn(self, msg):
<del> # This method will be overridden in a unit test to raise an error, because
<del> # self.assertWarns is not universally implemented.
<add> # This method will be overridden in a unit test to raise an error,
<add> # because self.assertWarns is not universally implemented.
<ide> return tf_logging.warning(msg)
<ide>
<ide> def get_config(self):
<ide> if not self.symbol:
<ide> raise ValueError(
<del> f"This Keras op layer was generated from {self.function}, a method "
<del> "that is not publicly exposed in the TensorFlow API. This "
<del> "may have happened if the method was explicitly "
<add> f"This Keras op layer was generated from {self.function}, a "
<add> "method that is not publicly exposed in the TensorFlow API. "
<add> "This may have happened if the method was explicitly "
<ide> "decorated to add dispatching support, and it was used "
<ide> "during Functional model construction. "
<ide> "To ensure cross-version compatibility of Keras models "
<ide> def _delegate_property(
<ide> intermediate values in the model.
<ide>
<ide> Args:
<del> keras_tensor_cls: The KerasTensor subclass that should expose the property.
<add> keras_tensor_cls: The KerasTensor subclass that should expose the
<add> property.
<ide> property_name: The name of the property to expose and delegate to the
<ide> represented (Composite)Tensor.
<ide> """
<ide> def _delegate_method(
<ide>
<ide> Calling this function times with the same arguments should be a no-op.
<ide>
<del> This method exposes an instance method on the KerasTensor class that will use
<del> an `InstanceMethod` layer to run the desired method on the represented
<add> This method exposes an instance method on the KerasTensor class that will
<add> use an `InstanceMethod` layer to run the desired method on the represented
<ide> intermediate values in the model.
<ide>
<ide> Args:
<del> keras_tensor_cls: The KerasTensor subclass that should expose the property.
<add> keras_tensor_cls: The KerasTensor subclass that should expose the
<add> property.
<ide> method_name: The name of the method to expose and delegate to the
<ide> represented (Composite)Tensor.
<ide> """
<ide> def delegate(self, *args, **kwargs):
<ide>
<ide>
<ide> class TFClassMethodDispatcher(tf.__internal__.dispatch.OpDispatcher):
<del> """A class method dispatcher that allows building a functional model with TF class methods."""
<add> """A class method dispatcher that allows building a functional model with TF
<add> class methods."""
<ide>
<ide> def __init__(self, cls, method_name):
<ide> self.cls = cls
<ide> def _call_wrapper(*args, **kwargs):
<ide> # because dicts are flattened by nest while slices aren't.
<ide> # So, map_structure would only see the individual elements in the
<ide> # dict.
<del> # This can't use map_structure_up_to either because the 'shallowness' of
<del> # the shallow tree would have to vary depending on if only one dim or
<del> # multiple are being sliced.
<add> # This can't use map_structure_up_to either because the
<add> # 'shallowness' of the shallow tree would have to vary depending on
<add> # if only one dim or multiple are being sliced.
<ide> new_args = []
<ide> for arg in args:
<ide> arg = _dict_to_slice(arg)
<ide> def _dict_to_slice(x):
<ide>
<ide>
<ide> class TFSlicingOpDispatcher(tf.__internal__.dispatch.OpDispatcher):
<del> """A global dispatcher that allows building a functional model with TF Ops."""
<add> """A global dispatcher that allows building a functional model with TF
<add> Ops."""
<ide>
<ide> def __init__(self, op):
<ide> self.op = op
<ide><path>keras/layers/kernelized.py
<ide> class RandomFourierFeatures(base_layer.Layer):
<ide> r"""Layer that projects its inputs into a random feature space.
<ide>
<del> This layer implements a mapping from input space to a space with `output_dim`
<del> dimensions, which approximates shift-invariant kernels. A kernel function
<del> `K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for some function `k`.
<del> Many popular Radial Basis Functions (RBF), including Gaussian and
<del> Laplacian kernels, are shift-invariant.
<add> This layer implements a mapping from input space to a space with
<add> `output_dim` dimensions, which approximates shift-invariant kernels. A
<add> kernel function `K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for
<add> some function `k`. Many popular Radial Basis Functions (RBF), including
<add> Gaussian and Laplacian kernels, are shift-invariant.
<ide>
<ide> The implementation of this layer is based on the following paper:
<ide> ["Random Features for Large-Scale Kernel Machines"](
<ide> https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
<ide> by Ali Rahimi and Ben Recht.
<ide>
<del> The distribution from which the parameters of the random features map (layer)
<del> are sampled determines which shift-invariant kernel the layer approximates
<del> (see paper for more details). You can use the distribution of your
<del> choice. The layer supports out-of-the-box
<del> approximations of the following two RBF kernels:
<add> The distribution from which the parameters of the random features map
<add> (layer) are sampled determines which shift-invariant kernel the layer
<add> approximates (see paper for more details). You can use the distribution of
<add> your choice. The layer supports out-of-the-box approximations of the
<add> following two RBF kernels:
<ide>
<ide> - Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))`
<ide> - Laplacian: `K(x, y) = exp(-abs(x - y) / scale))`
<ide> class RandomFourierFeatures(base_layer.Layer):
<ide>
<ide> **Usage:** Typically, this layer is used to "kernelize" linear models by
<ide> applying a non-linear transformation (this layer) to the input features and
<del> then training a linear model on top of the transformed features. Depending on
<del> the loss function of the linear model, the composition of this layer and the
<del> linear model results to models that are equivalent (up to approximation) to
<del> kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
<del> kernel linear regression (for squared loss), etc.
<add> then training a linear model on top of the transformed features. Depending
<add> on the loss function of the linear model, the composition of this layer and
<add> the linear model results to models that are equivalent (up to approximation)
<add> to kernel SVMs (for hinge loss), kernel logistic regression (for logistic
<add> loss), kernel linear regression (for squared loss), etc.
<ide>
<ide> Examples:
<ide>
<del> A kernel multinomial logistic regression model with Gaussian kernel for MNIST:
<add> A kernel multinomial logistic regression model with Gaussian kernel for
<add> MNIST:
<ide>
<ide> ```python
<ide> model = keras.Sequential([
<ide> class RandomFourierFeatures(base_layer.Layer):
<ide> ```
<ide>
<ide> Args:
<del> output_dim: Positive integer, the dimension of the layer's output, i.e., the
<del> number of random features used to approximate the kernel.
<add> output_dim: Positive integer, the dimension of the layer's output, i.e.,
<add> the number of random features used to approximate the kernel.
<ide> kernel_initializer: Determines the distribution of the parameters of the
<del> random features map (and therefore the kernel approximated by the layer).
<del> It can be either a string identifier or a Keras `Initializer` instance.
<del> Currently only 'gaussian' and 'laplacian' are supported string
<del> identifiers (case insensitive). Note that the kernel matrix is not
<del> trainable.
<add> random features map (and therefore the kernel approximated by the
<add> layer). It can be either a string identifier or a Keras `Initializer`
<add> instance. Currently only 'gaussian' and 'laplacian' are supported
<add> string identifiers (case insensitive). Note that the kernel matrix is
<add> not trainable.
<ide> scale: For Gaussian and Laplacian kernels, this corresponds to a scaling
<del> factor of the corresponding kernel approximated by the layer (see concrete
<del> definitions above). When provided, it should be a positive float. If None,
<del> a default value is used: if the kernel initializer is set to "gaussian",
<del> `scale` defaults to `sqrt(input_dim / 2)`, otherwise, it defaults to 1.0.
<del> Both the approximation error of the kernel and the classification quality
<del> are sensitive to this parameter. If `trainable` is set to `True`, this
<del> parameter is learned end-to-end during training and the provided value
<del> serves as the initial value.
<add> factor of the corresponding kernel approximated by the layer (see
<add> concrete definitions above). When provided, it should be a positive
<add> float. If None, a default value is used: if the kernel initializer is
<add> set to "gaussian", `scale` defaults to `sqrt(input_dim / 2)`, otherwise,
<add> it defaults to 1.0. Both the approximation error of the kernel and the
<add> classification quality are sensitive to this parameter. If `trainable`
<add> is set to `True`, this parameter is learned end-to-end during training
<add> and the provided value serves as the initial value.
<ide> **Note:** When features from this layer are fed to a linear model,
<ide> by making `scale` trainable, the resulting optimization problem is
<ide> no longer convex (even if the loss function used by the linear model
<ide> def __init__(
<ide> ):
<ide> if output_dim <= 0:
<ide> raise ValueError(
<del> f"`output_dim` should be a positive integer. Received: {output_dim}"
<add> "`output_dim` should be a positive integer. "
<add> f"Received: {output_dim}"
<ide> )
<ide> if isinstance(kernel_initializer, str):
<ide> if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
<ide> def __init__(
<ide>
<ide> def build(self, input_shape):
<ide> input_shape = tf.TensorShape(input_shape)
<del> # TODO(pmol): Allow higher dimension inputs. Currently the input is expected
<del> # to have shape [batch_size, dimension].
<add> # TODO(pmol): Allow higher dimension inputs. Currently the input is
<add> # expected to have shape [batch_size, dimension].
<ide> if input_shape.rank != 2:
<ide> raise ValueError(
<ide> "The rank of the input tensor should be 2. "
<ide><path>keras/layers/kernelized_test.py
<ide> def test_different_params_similar_approximation(self, initializer, scale):
<ide> output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y)
<ide>
<ide> # Compute the inner products of the outputs (on inputs x and y) for both
<del> # layers. For any fixed random features layer rff_layer, and inputs x, y,
<del> # rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor.
<add> # layers. For any fixed random features layer rff_layer, and inputs x,
<add> # y, rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization
<add> # factor.
<ide> approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
<ide> approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
<ide> self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08)
<ide> def test_bad_kernel_approximation(
<ide> output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y)
<ide>
<ide> # The inner products of the outputs (on inputs x and y) approximates the
<del> # real value of the RBF kernel but poorly since the output dimension of the
<del> # layer is small.
<add> # real value of the RBF kernel but poorly since the output dimension of
<add> # the layer is small.
<ide> exact_kernel_value = exact_kernel_fn(x, y)
<ide> approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
<ide> abs_error = tf.abs(exact_kernel_value - approx_kernel_value)
<ide><path>keras/layers/locally_connected/locally_connected1d.py
<ide> class LocallyConnected1D(Layer):
<ide> ```
<ide>
<ide> Args:
<del> filters: Integer, the dimensionality of the output space (i.e. the number
<del> of output filters in the convolution).
<del> kernel_size: An integer or tuple/list of a single integer, specifying the
<del> length of the 1D convolution window.
<add> filters: Integer, the dimensionality of the output space (i.e. the
<add> number of output filters in the convolution).
<add> kernel_size: An integer or tuple/list of a single integer, specifying
<add> the length of the 1D convolution window.
<ide> strides: An integer or tuple/list of a single integer, specifying the
<ide> stride length of the convolution.
<ide> padding: Currently only supports `"valid"` (case-insensitive). `"same"`
<ide> class LocallyConnected1D(Layer):
<ide> `(batch, channels, length)`. It defaults to the `image_data_format`
<ide> value found in your Keras config file at `~/.keras/keras.json`. If you
<ide> never set it, then it will be "channels_last".
<del> activation: Activation function to use. If you don't specify anything, no
<del> activation is applied
<del> (ie. "linear" activation: `a(x) = x`).
<add> activation: Activation function to use. If you don't specify anything,
<add> no activation is applied (ie. "linear" activation: `a(x) = x`).
<ide> use_bias: Boolean, whether the layer uses a bias vector.
<ide> kernel_initializer: Initializer for the `kernel` weights matrix.
<ide> bias_initializer: Initializer for the bias vector.
<ide> class LocallyConnected1D(Layer):
<ide> `3`: large, sparse models, where "large" stands for large
<ide> input/output activations (i.e. many `filters`, `input_filters`,
<ide> large `input_size`, `output_size`), and "sparse" stands for few
<del> connections between inputs and outputs, i.e. small ratio `filters *
<del> input_filters * kernel_size / (input_size * strides)`, where inputs
<del> to and outputs of the layer are assumed to have shapes `(input_size,
<del> input_filters)`, `(output_size, filters)` respectively. It is
<del> recommended to benchmark each in the setting of interest to pick the
<del> most efficient one (in terms of speed and memory usage). Correct
<del> choice of implementation can lead to dramatic speed improvements
<del> (e.g. 50X), potentially at the expense of RAM. Also, only
<del> `padding="valid"` is supported by `implementation=1`.
<add> connections between inputs and outputs, i.e. small ratio
<add> `filters * input_filters * kernel_size / (input_size * strides)`,
<add> where inputs to and outputs of the layer are assumed to have
<add> shapes `(input_size, input_filters)`, `(output_size, filters)`
<add> respectively. It is recommended to benchmark each in the setting
<add> of interest to pick the most efficient one (in terms of speed and
<add> memory usage). Correct choice of implementation can lead to
<add> dramatic speed improvements (e.g. 50X), potentially at the expense
<add> of RAM. Also, only `padding="valid"` is supported by
<add> `implementation=1`.
<ide> Input shape:
<ide> 3D tensor with shape: `(batch_size, steps, input_dim)`
<ide> Output shape:
<ide><path>keras/layers/locally_connected/locally_connected2d.py
<ide> class LocallyConnected2D(Layer):
<ide> ```
<ide>
<ide> Args:
<del> filters: Integer, the dimensionality of the output space (i.e. the number
<del> of output filters in the convolution).
<del> kernel_size: An integer or tuple/list of 2 integers, specifying the width
<del> and height of the 2D convolution window. Can be a single integer to
<del> specify the same value for all spatial dimensions.
<del> strides: An integer or tuple/list of 2 integers, specifying the strides of
<del> the convolution along the width and height. Can be a single integer to
<del> specify the same value for all spatial dimensions.
<add> filters: Integer, the dimensionality of the output space (i.e. the
<add> number of output filters in the convolution).
<add> kernel_size: An integer or tuple/list of 2 integers, specifying the
<add> width and height of the 2D convolution window. Can be a single integer
<add> to specify the same value for all spatial dimensions.
<add> strides: An integer or tuple/list of 2 integers, specifying the strides
<add> of the convolution along the width and height. Can be a single integer
<add> to specify the same value for all spatial dimensions.
<ide> padding: Currently only support `"valid"` (case-insensitive). `"same"`
<ide> will be supported in future. `"valid"` means no padding.
<ide> data_format: A string, one of `channels_last` (default) or
<ide> `channels_first`. The ordering of the dimensions in the inputs.
<del> `channels_last` corresponds to inputs with shape `(batch, height, width,
<del> channels)` while `channels_first` corresponds to inputs with shape
<add> `channels_last` corresponds to inputs with shape `(batch, height,
<add> width, channels)` while `channels_first` corresponds to inputs with
<add> shape
<ide> `(batch, channels, height, width)`. It defaults to the
<ide> `image_data_format` value found in your Keras config file at
<ide> `~/.keras/keras.json`. If you never set it, then it will be
<ide> "channels_last".
<del> activation: Activation function to use. If you don't specify anything, no
<del> activation is applied
<del> (ie. "linear" activation: `a(x) = x`).
<add> activation: Activation function to use. If you don't specify anything,
<add> no activation is applied (ie. "linear" activation: `a(x) = x`).
<ide> use_bias: Boolean, whether the layer uses a bias vector.
<ide> kernel_initializer: Initializer for the `kernel` weights matrix.
<ide> bias_initializer: Initializer for the bias vector.
<ide> class LocallyConnected2D(Layer):
<ide> ratio `filters * input_filters * np.prod(kernel_size) /
<ide> (np.prod(input_size) * np.prod(strides))`, where inputs to and
<ide> outputs of the layer are assumed to have shapes `input_size +
<del> (input_filters,)`, `output_size + (filters,)` respectively. It is
<del> recommended to benchmark each in the setting of interest to pick the
<del> most efficient one (in terms of speed and memory usage). Correct
<del> choice of implementation can lead to dramatic speed improvements
<del> (e.g. 50X), potentially at the expense of RAM. Also, only
<del> `padding="valid"` is supported by `implementation=1`.
<add> (input_filters,)`, `output_size + (filters,)` respectively. It is
<add> recommended to benchmark each in the setting of interest to pick
<add> the most efficient one (in terms of speed and memory usage).
<add> Correct choice of implementation can lead to dramatic speed
<add> improvements (e.g. 50X), potentially at the expense of RAM. Also,
<add> only `padding="valid"` is supported by `implementation=1`.
<ide> Input shape:
<ide> 4D tensor with shape: `(samples, channels, rows, cols)` if
<ide> data_format='channels_first'
<ide> class LocallyConnected2D(Layer):
<ide> 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
<ide> data_format='channels_first'
<ide> or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if
<del> data_format='channels_last'. `rows` and `cols` values might have changed
<del> due to padding.
<add> data_format='channels_last'. `rows` and `cols` values might have
<add> changed due to padding.
<ide> """
<ide>
<ide> def __init__(
<ide><path>keras/layers/locally_connected/locally_connected_utils.py
<ide> def get_locallyconnected_mask(
<ide> ):
<ide> """Return a mask representing connectivity of a locally-connected operation.
<ide>
<del> This method returns a masking numpy array of 0s and 1s (of type `np.float32`)
<del> that, when element-wise multiplied with a fully-connected weight tensor, masks
<del> out the weights between disconnected input-output pairs and thus implements
<del> local connectivity through a sparse fully-connected weight tensor.
<add> This method returns a masking numpy array of 0s and 1s (of type
<add> `np.float32`) that, when element-wise multiplied with a fully-connected
<add> weight tensor, masks out the weights between disconnected input-output pairs
<add> and thus implements local connectivity through a sparse fully-connected
<add> weight tensor.
<ide>
<ide> Assume an unshared convolution with given parameters is applied to an input
<ide> having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)`
<ide> to produce an output with spatial shape `(d_out1, ..., d_outN)` (determined
<ide> by layer parameters such as `strides`).
<ide>
<ide> This method returns a mask which can be broadcast-multiplied (element-wise)
<del> with a 2*(N+1)-D weight matrix (equivalent to a fully-connected layer between
<del> (N+1)-D activations (N spatial + 1 channel dimensions for input and output)
<del> to make it perform an unshared convolution with given `kernel_shape`,
<del> `strides`, `padding` and `data_format`.
<add> with a 2*(N+1)-D weight matrix (equivalent to a fully-connected layer
<add> between (N+1)-D activations (N spatial + 1 channel dimensions for input and
<add> output) to make it perform an unshared convolution with given
<add> `kernel_shape`, `strides`, `padding` and `data_format`.
<ide>
<ide> Args:
<ide> input_shape: tuple of size N: `(d_in1, ..., d_inN)` spatial shape of the
<ide> def local_conv_matmul(inputs, kernel, kernel_mask, output_shape):
<ide> inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ...,
<ide> d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`.
<ide> kernel: the unshared weights for N-D convolution,
<del> an (N+2)-D tensor of shape: `(d_in1, ..., d_inN, channels_in, d_out2,
<del> ..., d_outN, channels_out)` or `(channels_in, d_in1, ..., d_inN,
<del> channels_out, d_out2, ..., d_outN)`, with the ordering of channels
<del> and spatial dimensions matching that of the input. Each entry is the
<del> weight between a particular input and output location, similarly to
<del> a fully-connected weight matrix.
<add> an (N+2)-D tensor of shape: `(d_in1, ..., d_inN, channels_in,
<add> d_out2, ..., d_outN, channels_out)` or `(channels_in, d_in1, ...,
<add> d_inN, channels_out, d_out2, ..., d_outN)`, with the ordering of
<add> channels and spatial dimensions matching that of the input. Each
<add> entry is the weight between a particular input and output location,
<add> similarly to a fully-connected weight matrix.
<ide> kernel_mask: a float 0/1 mask tensor of shape: `(d_in1, ..., d_inN, 1,
<ide> d_out2, ..., d_outN, 1)` or `(1, d_in1, ..., d_inN, 1, d_out2, ...,
<del> d_outN)`, with the ordering of singleton and spatial dimensions matching
<del> that of the input. Mask represents the connectivity pattern of the layer
<del> and is
<del> precomputed elsewhere based on layer parameters: stride, padding, and
<del> the receptive field shape.
<add> d_outN)`, with the ordering of singleton and spatial dimensions
<add> matching that of the input. Mask represents the connectivity pattern
<add> of the layer and is precomputed elsewhere based on layer parameters:
<add> stride, padding, and the receptive field shape.
<ide> output_shape: a tuple of (N+2) elements representing the output shape:
<ide> `(batch_size, channels_out, d_out1, ..., d_outN)` or `(batch_size,
<ide> d_out1, ..., d_outN, channels_out)`, with the ordering of channels and
<ide><path>keras/layers/merging/base_merge.py
<ide> def call(self, inputs):
<ide> return self._merge_function(reshaped_inputs)
<ide> else:
<ide> # Transpose all inputs so that batch size is the last dimension.
<del> # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
<add> # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... ,
<add> # batch_size)
<ide> transposed = False
<ide> for x in inputs:
<ide> x_ndim = backend.ndim(x)
<ide> def call(self, inputs):
<ide> reshaped_inputs.append(tf.transpose(x, perm=dims))
<ide> transposed = True
<ide> else:
<del> # We don't transpose inputs if they are 1D vectors or scalars.
<add> # We don't transpose inputs if they are 1D vectors or
<add> # scalars.
<ide> reshaped_inputs.append(x)
<ide> y = self._merge_function(reshaped_inputs)
<ide> y_ndim = backend.ndim(y)
<ide> if transposed:
<del> # If inputs have been transposed, we have to transpose the output too.
<add> # If inputs have been transposed, we have to transpose the
<add> # output too.
<ide> if y_ndim is None:
<ide> y_shape = tf.shape(y)
<ide> y_ndim = tf.shape(y_shape)[0]
<ide><path>keras/layers/merging/concatenate.py
<ide> def build(self, input_shape):
<ide> # Get the only rank for the set.
<ide> (rank,) = ranks
<ide> for axis in range(rank):
<del> # Skip the Nones in the shape since they are dynamic, also the axis for
<del> # concat has been removed above.
<add> # Skip the Nones in the shape since they are dynamic, also the
<add> # axis for concat has been removed above.
<ide> unique_dims = set(
<ide> shape[axis]
<ide> for shape in shape_set
<ide> def compute_output_shape(self, input_shape):
<ide> not isinstance(input_shape[0], (tuple, list))
<ide> ):
<ide> # The tf_utils.shape_type_conversion decorator turns tensorshapes
<del> # into tuples, so we need to verify that `input_shape` is a list/tuple,
<del> # *and* that the individual elements are themselves shape tuples.
<add> # into tuples, so we need to verify that `input_shape` is a
<add> # list/tuple, *and* that the individual elements are themselves
<add> # shape tuples.
<ide> raise ValueError(
<ide> "A `Concatenate` layer should be called on a list of inputs. "
<ide> f"Received: input_shape={input_shape}"
<ide><path>keras/layers/merging/dot.py
<ide> def __init__(self, axes, normalize=False, **kwargs):
<ide> Args:
<ide> axes: Integer or tuple of integers,
<ide> axis or axes along which to take the dot product. If a tuple, should
<del> be two integers corresponding to the desired axis from the first input
<del> and the desired axis from the second input, respectively. Note that the
<del> size of the two selected axes must match.
<add> be two integers corresponding to the desired axis from the first
<add> input and the desired axis from the second input, respectively. Note
<add> that the size of the two selected axes must match.
<ide> normalize: Whether to L2-normalize samples along the
<ide> dot product axis before taking the dot product.
<ide> If set to True, then the output of the dot product
<ide> def __init__(self, axes, normalize=False, **kwargs):
<ide> )
<ide> if not isinstance(axes[0], int) or not isinstance(axes[1], int):
<ide> raise ValueError(
<del> "Invalid format for argument `axes`: list elements should be "
<del> f"integers. Received: axes={axes}"
<add> "Invalid format for argument `axes`: list elements should "
<add> f"be integers. Received: axes={axes}"
<ide> )
<ide> self.axes = axes
<ide> self.normalize = normalize
<ide><path>keras/layers/merging/multiply.py
<ide> def multiply(inputs, **kwargs):
<ide> Usage in a functional model:
<ide>
<ide> >>> input1 = tf.keras.layers.Input(shape=(16,))
<del> >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
<add> >>> x1 = tf.keras.layers.Dense(
<add> ... 8, activation='relu')(input1) #shape=(None, 8)
<ide> >>> input2 = tf.keras.layers.Input(shape=(32,))
<del> >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
<add> >>> x2 = tf.keras.layers.Dense(
<add> ... 8, activation='relu')(input2) #shape=(None, 8)
<ide> >>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8)
<ide> >>> out = tf.keras.layers.Dense(4)(out)
<ide> >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
<ide><path>keras/layers/merging/subtract.py
<ide> class Subtract(_Merge):
<ide> """Layer that subtracts two inputs.
<ide>
<del> It takes as input a list of tensors of size 2,
<del> both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),
<del> also of the same shape.
<add> It takes as input a list of tensors of size 2, both of the same shape, and
<add> returns a single tensor, (inputs[0] - inputs[1]), also of the same shape.
<ide>
<ide> Examples:
<ide>
<ide><path>keras/layers/normalization/batch_normalization.py
<ide> class BatchNormalizationBase(Layer):
<ide> default), the layer normalizes its output using a moving average of the
<ide> mean and standard deviation of the batches it has seen during training. That
<ide> is to say, it returns
<del> `gamma * (batch - self.moving_mean) / sqrt(self.moving_var + epsilon) + beta`.
<add> `gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`.
<ide>
<ide> `self.moving_mean` and `self.moving_var` are non-trainable variables that
<ide> are updated each time the layer in called in training mode, as such:
<ide> class BatchNormalizationBase(Layer):
<ide> `data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
<ide> momentum: Momentum for the moving average.
<ide> epsilon: Small float added to variance to avoid dividing by zero.
<del> center: If True, add offset of `beta` to normalized tensor. If False, `beta`
<del> is ignored.
<del> scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
<del> next layer is linear (also e.g. `nn.relu`), this can be disabled since the
<del> scaling will be done by the next layer.
<add> center: If True, add offset of `beta` to normalized tensor. If False,
<add> `beta` is ignored.
<add> scale: If True, multiply by `gamma`. If False, `gamma` is not used. When
<add> the next layer is linear (also e.g. `nn.relu`), this can be disabled
<add> since the scaling will be done by the next layer.
<ide> beta_initializer: Initializer for the beta weight.
<ide> gamma_initializer: Initializer for the gamma weight.
<ide> moving_mean_initializer: Initializer for the moving mean.
<ide> class BatchNormalizationBase(Layer):
<ide> gamma_constraint: Optional constraint for the gamma weight.
<ide> renorm: Whether to use [Batch Renormalization](
<ide> https://arxiv.org/abs/1702.03275). This adds extra variables during
<del> training. The inference is the same for either value of this parameter.
<add> training. The inference is the same for either value of this
<add> parameter.
<ide> renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
<ide> scalar `Tensors` used to clip the renorm correction. The correction `(r,
<ide> d)` is used as `corrected_value = normalized_value * r + d`, with `r`
<ide> class BatchNormalizationBase(Layer):
<ide> renorm_momentum: Momentum used to update the moving means and standard
<ide> deviations with renorm. Unlike `momentum`, this affects training and
<ide> should be neither too small (which would add noise) nor too large (which
<del> would give stale estimates). Note that `momentum` is still applied to get
<del> the means and variances for inference.
<del> fused: if `True`, use a faster, fused implementation, or raise a ValueError
<del> if the fused implementation cannot be used. If `None`, use the faster
<del> implementation if possible. If False, do not used the fused
<del> implementation.
<del> Note that in TensorFlow 1.x, the meaning of `fused=True` is different: if
<del> `False`, the layer uses the system-recommended implementation.
<add> would give stale estimates). Note that `momentum` is still applied to
<add> get the means and variances for inference.
<add> fused: if `True`, use a faster, fused implementation, or raise a
<add> ValueError if the fused implementation cannot be used. If `None`, use
<add> the faster implementation if possible. If False, do not used the fused
<add> implementation. Note that in TensorFlow 1.x, the meaning of
<add> `fused=True` is different: if `False`, the layer uses the
<add> system-recommended implementation.
<ide> trainable: Boolean, if `True` the variables will be marked as trainable.
<ide> virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
<del> which means batch normalization is performed across the whole batch. When
<del> `virtual_batch_size` is not `None`, instead perform "Ghost Batch
<add> which means batch normalization is performed across the whole batch.
<add> When `virtual_batch_size` is not `None`, instead perform "Ghost Batch
<ide> Normalization", which creates virtual sub-batches which are each
<ide> normalized separately (with shared gamma, beta, and moving statistics).
<ide> Must divide the actual batch size during execution.
<del> adjustment: A function taking the `Tensor` containing the (dynamic) shape of
<del> the input tensor and returning a pair (scale, bias) to apply to the
<add> adjustment: A function taking the `Tensor` containing the (dynamic) shape
<add> of the input tensor and returning a pair (scale, bias) to apply to the
<ide> normalized values (before gamma and beta), only during training. For
<ide> example, if `axis=-1`,
<ide> `adjustment = lambda shape: (
<ide> class BatchNormalizationBase(Layer):
<ide> inputs: Input tensor (of any rank).
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode.
<del> - `training=True`: The layer will normalize its inputs using the mean and
<del> variance of the current batch of inputs.
<del> - `training=False`: The layer will normalize its inputs using the mean and
<del> variance of its moving statistics, learned during training.
<add> - `training=True`: The layer will normalize its inputs using the mean
<add> and variance of the current batch of inputs.
<add> - `training=False`: The layer will normalize its inputs using the mean
<add> and variance of its moving statistics, learned during training.
<ide>
<ide> Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of
<ide> integers, does not include the samples axis) when using this layer as the
<ide> def __init__(
<ide> if self._USE_V2_BEHAVIOR:
<ide> if fused:
<ide> self._raise_if_fused_cannot_be_used()
<del> # We leave fused as None if self._fused_can_be_used()==True, since we
<del> # still may set it to False in self.build() if the input rank is not 4.
<add> # We leave fused as None if self._fused_can_be_used()==True, since
<add> # we still may set it to False in self.build() if the input rank is
<add> # not 4.
<ide> elif fused is None and not self._fused_can_be_used():
<ide> fused = False
<ide> elif fused is None:
<ide> def __init__(
<ide> def _raise_if_fused_cannot_be_used(self):
<ide> """Raises a ValueError if fused implementation cannot be used.
<ide>
<del> In addition to the checks done in this function, the input tensors rank must
<del> be 4 or 5. The input rank check can only be done once the input shape is
<del> known.
<add> In addition to the checks done in this function, the input tensors rank
<add> must be 4 or 5. The input rank check can only be done once the input
<add> shape is known.
<ide> """
<ide> # Note the ValueErrors in this function are caught and not reraised in
<ide> # _fused_can_be_used(). No other exception besides ValueError should be
<ide> # raised here.
<ide>
<del> # Currently fused batch norm doesn't support renorm. It also only supports a
<del> # channel dimension on axis 1 or 3 (rank=4) / 1 or 4 (rank5), when no
<del> # virtual batch size or adjustment is used.
<add> # Currently fused batch norm doesn't support renorm. It also only
<add> # supports a channel dimension on axis 1 or 3 (rank=4) / 1 or 4 (rank5),
<add> # when no virtual batch size or adjustment is used.
<ide> if self.renorm:
<ide> raise ValueError(
<ide> "Passing both `fused=True` and `renorm=True` is "
<ide> "not supported"
<ide> )
<ide> axis = [self.axis] if isinstance(self.axis, int) else self.axis
<ide> # Axis -3 is equivalent to 1, and axis -1 is equivalent to 3, when the
<del> # input rank is 4. Similarly, the valid axis is -4, -1, 1, 4 when the rank
<del> # is 5. The combination of ranks and axes will be checked later.
<add> # input rank is 4. Similarly, the valid axis is -4, -1, 1, 4 when the
<add> # rank is 5. The combination of ranks and axes will be checked later.
<ide> if len(axis) > 1 or axis[0] not in (-4, -3, -1, 1, 3, 4):
<ide> raise ValueError(
<ide> "Passing `fused=True` is only supported when axis is 1 "
<ide> def _support_zero_size_input(self):
<ide> if not tf.distribute.has_strategy():
<ide> return False
<ide> strategy = tf.distribute.get_strategy()
<del> # TODO(b/195085185): remove experimental_enable_get_next_as_optional after
<del> # migrating all users.
<add> # TODO(b/195085185): remove experimental_enable_get_next_as_optional
<add> # after migrating all users.
<ide> return getattr(
<ide> strategy.extended,
<ide> "enable_partial_batch_handling",
<ide> def build(self, input_shape):
<ide> if self.virtual_batch_size is not None:
<ide> if self.virtual_batch_size <= 0:
<ide> raise ValueError(
<del> f"`virtual_batch_size` must be a positive integer that divides the "
<del> f"true batch size of the input tensor. Received: "
<del> f"virtual_batch_size={self.virtual_batch_size}"
<add> f"`virtual_batch_size` must be a positive integer that "
<add> f"divides the true batch size of the input tensor. "
<add> f"Received: virtual_batch_size={self.virtual_batch_size}"
<ide> )
<ide> # If using virtual batches, the first dimension must be the batch
<ide> # dimension and cannot be the batch norm axis
<ide> def build(self, input_shape):
<ide> )
<ide>
<ide> if self.fused in (None, True):
<del> # TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
<del> # output back to its original shape accordingly.
<add> # TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape
<add> # the output back to its original shape accordingly.
<ide> if self._USE_V2_BEHAVIOR:
<ide> if self.fused is None:
<ide> self.fused = rank in (4, 5)
<ide> def build(self, input_shape):
<ide> assert self.fused is not None
<ide> self.fused = rank in (4, 5) and self._fused_can_be_used()
<ide> # TODO(chrisying): fused batch norm is currently not supported for
<del> # multi-axis batch norm and by extension virtual batches. In some cases,
<del> # it might be possible to use fused batch norm but would require reshaping
<del> # the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
<del> # particularly tricky. A compromise might be to just support the most
<del> # common use case (turning 5D w/ virtual batch to NCHW)
<add> # multi-axis batch norm and by extension virtual batches. In some
<add> # cases, it might be possible to use fused batch norm but would
<add> # require reshaping the Tensor to 4D with the axis in 1 or 3
<add> # (preferred 1) which is particularly tricky. A compromise might be
<add> # to just support the most common use case (turning 5D w/ virtual
<add> # batch to NCHW)
<ide>
<ide> if self.fused:
<ide> if self.axis == [1] and rank == 4:
<ide> def build(self, input_shape):
<ide> elif self.axis == [4] and rank == 5:
<ide> self._data_format = "NDHWC"
<ide> elif rank == 5:
<del> # 5D tensors that can be passed in but should not use fused batch norm
<del> # due to unsupported axis.
<add> # 5D tensors that can be passed in but should not use fused
<add> # batch norm due to unsupported axis.
<ide> self.fused = False
<ide> else:
<ide> if rank == 4:
<ide> raise ValueError(
<del> "Unsupported axis. The use of `fused=True` is only possible with "
<del> "`axis=1` or `axis=3` for 4D input tensors. Received: "
<del> f"axis={tuple(self.axis)}"
<add> "Unsupported axis. The use of `fused=True` is only "
<add> "possible with `axis=1` or `axis=3` for 4D input "
<add> f"tensors. Received: axis={tuple(self.axis)}"
<ide> )
<ide> else:
<ide> raise ValueError(
<del> "Unsupported axis. The use of `fused=True` is only possible with "
<del> "`axis=1` or `axis=4` for 5D input tensors. Received: "
<del> f"axis={tuple(self.axis)}"
<add> "Unsupported axis. The use of `fused=True` is only "
<add> "possible with `axis=1` or `axis=4` for 5D input "
<add> f"tensors. Received: axis={tuple(self.axis)}"
<ide> )
<ide>
<ide> axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
<ide> def build(self, input_shape):
<ide> # Single axis batch norm (most common/default use-case)
<ide> param_shape = (list(axis_to_dim.values())[0],)
<ide> else:
<del> # Parameter shape is the original shape but with 1 in all non-axis dims
<add> # Parameter shape is the original shape but with 1 in all non-axis
<add> # dims
<ide> param_shape = [
<ide> axis_to_dim[i] if i in axis_to_dim else 1 for i in range(rank)
<ide> ]
<ide> def build(self, input_shape):
<ide> )
<ide>
<ide> try:
<del> # Disable variable partitioning when creating the moving mean and variance
<add> # Disable variable partitioning when creating the moving mean and
<add> # variance
<ide> if hasattr(self, "_scope") and self._scope:
<ide> partitioner = self._scope.partitioner
<ide> self._scope.set_partitioner(None)
<ide> def build(self, input_shape):
<ide> )
<ide>
<ide> if self.renorm:
<del> # In batch renormalization we track the inference moving stddev instead
<del> # of the moving variance to more closely align with the paper.
<add> # In batch renormalization we track the inference moving stddev
<add> # instead of the moving variance to more closely align with the
<add> # paper.
<ide> def moving_stddev_initializer(*args, **kwargs):
<ide> return tf.sqrt(
<ide> self.moving_variance_initializer(*args, **kwargs)
<ide> def moving_stddev_initializer(*args, **kwargs):
<ide> experimental_autocast=False,
<ide> )
<ide>
<del> # Create variables to maintain the moving mean and standard deviation.
<del> # These are used in training and thus are different from the moving
<del> # averages above. The renorm variables are colocated with moving_mean
<del> # and moving_stddev.
<del> # NOTE: below, the outer `with device` block causes the current device
<del> # stack to be cleared. The nested ones use a `lambda` to set the desired
<del> # device and ignore any devices that may be set by the custom getter.
<add> # Create variables to maintain the moving mean and standard
<add> # deviation. These are used in training and thus are different
<add> # from the moving averages above. The renorm variables are
<add> # colocated with moving_mean and moving_stddev.
<add> # NOTE: below, the outer `with device` block causes the current
<add> # device stack to be cleared. The nested ones use a `lambda` to
<add> # set the desired device and ignore any devices that may be set
<add> # by the custom getter.
<ide> def _renorm_variable(name, shape, initializer="zeros"):
<ide> """Create a renorm variable."""
<ide> var = self.add_weight(
<ide> def _fused_batch_norm(self, inputs, training):
<ide> beta = self.beta if self.center else self._beta_const
<ide> gamma = self.gamma if self.scale else self._gamma_const
<ide>
<del> # TODO(b/129279393): Support zero batch input in non DistributionStrategy
<del> # code as well.
<add> # TODO(b/129279393): Support zero batch input in non
<add> # DistributionStrategy code as well.
<ide> if self._support_zero_size_input():
<del> # Keras assumes that batch dimension is the first dimension for Batch
<del> # Normalization.
<add> # Keras assumes that batch dimension is the first dimension for
<add> # Batch Normalization.
<ide> input_batch_size = tf.shape(inputs)[0]
<ide> else:
<ide> input_batch_size = None
<ide>
<del> # TODO(rmlarsen): Support using fused avg updates for non-eager execution
<del> # after fixing graph pattern matching and enabling fused_batch_norm to
<del> # take exponential_avg_factor as a tensor input.
<add> # TODO(rmlarsen): Support using fused avg updates for non-eager
<add> # execution after fixing graph pattern matching and enabling
<add> # fused_batch_norm to take exponential_avg_factor as a tensor input.
<ide> use_fused_avg_updates = (
<ide> tf.compat.v1.executing_eagerly_outside_functions()
<ide> and isinstance(self.momentum, (float, int))
<ide> def mean_update():
<ide> )
<ide>
<ide> def variance_update():
<del> """Update self.moving_variance with the most recent data point."""
<add> """Update self.moving_variance with the most recent data
<add> point."""
<ide> if use_fused_avg_updates:
<ide> if input_batch_size is not None:
<ide> new_variance = control_flow_util.smart_cond(
<ide> def _renorm_correction_and_moments(
<ide> )
<ide>
<ide> def _update_renorm_variable(var, value, inputs_size):
<del> """Updates a moving average and weight, returns the unbiased value."""
<add> """Updates a moving average and weight, returns the unbiased
<add> value."""
<ide> value = tf.identity(value)
<ide>
<ide> def _do_update():
<ide> def _moments(self, inputs, reduction_axes, keep_dims):
<ide> mean, variance = self._calculate_mean_and_var(
<ide> inputs, reduction_axes, keep_dims
<ide> )
<del> # TODO(b/129279393): Support zero batch input in non DistributionStrategy
<del> # code as well.
<add> # TODO(b/129279393): Support zero batch input in non
<add> # DistributionStrategy code as well.
<ide> if self._support_zero_size_input():
<ide> input_batch_size = tf.shape(inputs)[0]
<ide> mean = tf.where(
<ide> def _get_training_value(self, training=None):
<ide> if isinstance(training, int):
<ide> training = bool(training)
<ide> if not self.trainable:
<del> # When the layer is not trainable, it overrides the value passed from
<del> # model.
<add> # When the layer is not trainable, it overrides the value passed
<add> # from model.
<ide> training = False
<ide> return training
<ide>
<ide> def call(self, inputs, training=None):
<ide> training = self._get_training_value(training)
<ide>
<ide> if self.virtual_batch_size is not None:
<del> # Virtual batches (aka ghost batches) can be simulated by reshaping the
<del> # Tensor and reusing the existing batch norm implementation
<add> # Virtual batches (aka ghost batches) can be simulated by reshaping
<add> # the Tensor and reusing the existing batch norm implementation
<ide> original_shape = tf.shape(inputs)
<ide> original_shape = tf.concat(
<ide> [tf.constant([-1]), original_shape[1:]], axis=0
<ide> def call(self, inputs, training=None):
<ide> axis=0,
<ide> )
<ide>
<del> # Will cause errors if virtual_batch_size does not divide the batch size
<add> # Will cause errors if virtual_batch_size does not divide the batch
<add> # size
<ide> inputs = tf.reshape(inputs, expanded_shape)
<ide>
<ide> def undo_virtual_batching(outputs):
<ide> def undo_virtual_batching(outputs):
<ide> if self.fused:
<ide> outputs = self._fused_batch_norm(inputs, training=training)
<ide> if self.virtual_batch_size is not None:
<del> # Currently never reaches here since fused_batch_norm does not support
<del> # virtual batching
<add> # Currently never reaches here since fused_batch_norm does not
<add> # support virtual batching
<ide> outputs = undo_virtual_batching(outputs)
<ide> return outputs
<ide>
<ide> inputs_dtype = inputs.dtype.base_dtype
<ide> if inputs_dtype in (tf.float16, tf.bfloat16):
<del> # Do all math in float32 if given 16-bit inputs for numeric stability.
<del> # In particular, it's very easy for variance to overflow in float16 and
<del> # for safety we also choose to cast bfloat16 to float32.
<add> # Do all math in float32 if given 16-bit inputs for numeric
<add> # stability. In particular, it's very easy for variance to overflow
<add> # in float16 and for safety we also choose to cast bfloat16 to
<add> # float32.
<ide> inputs = tf.cast(inputs, tf.float32)
<ide>
<ide> # Compute the axes along which to reduce the mean / variance
<ide> def undo_virtual_batching(outputs):
<ide> if self.virtual_batch_size is not None:
<ide> del reduction_axes[1] # Do not reduce along virtual batch dim
<ide>
<del> # Broadcasting only necessary for single-axis batch norm where the axis is
<del> # not the last dimension
<add> # Broadcasting only necessary for single-axis batch norm where the axis
<add> # is not the last dimension
<ide> broadcast_shape = [1] * ndims
<ide> broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value
<ide>
<ide> def _compose_transforms(scale, offset, then_scale, then_offset):
<ide> offset += then_offset
<ide> return (scale, offset)
<ide>
<del> # Determine a boolean value for `training`: could be True, False, or None.
<add> # Determine a boolean value for `training`: could be True, False, or
<add> # None.
<ide> training_value = control_flow_util.constant_value(training)
<ide> if (
<ide> training_value == False
<ide> def _compose_transforms(scale, offset, then_scale, then_offset):
<ide> adj_scale, adj_bias, scale, offset
<ide> )
<ide>
<del> # Some of the computations here are not necessary when training==False
<del> # but not a constant. However, this makes the code simpler.
<add> # Some of the computations here are not necessary when
<add> # training==False but not a constant. However, this makes the code
<add> # simpler.
<ide> keep_dims = (
<ide> self.virtual_batch_size is not None or len(self.axis) > 1
<ide> )
<ide> def _compose_transforms(scale, offset, then_scale, then_offset):
<ide>
<ide> if self.virtual_batch_size is not None:
<ide> # This isn't strictly correct since in ghost batch norm, you are
<del> # supposed to sequentially update the moving_mean and moving_variance
<del> # with each sub-batch. However, since the moving statistics are only
<del> # used during evaluation, it is more efficient to just update in one
<del> # step and should not make a significant difference in the result.
<add> # supposed to sequentially update the moving_mean and
<add> # moving_variance with each sub-batch. However, since the moving
<add> # statistics are only used during evaluation, it is more
<add> # efficient to just update in one step and should not make a
<add> # significant difference in the result.
<ide> new_mean = tf.reduce_mean(mean, axis=1, keepdims=True)
<ide> new_variance = tf.reduce_mean(variance, axis=1, keepdims=True)
<ide> else:
<ide> new_mean, new_variance = mean, variance
<ide>
<ide> if self._support_zero_size_input():
<del> # Keras assumes that batch dimension is the first dimension for Batch
<del> # Normalization.
<add> # Keras assumes that batch dimension is the first dimension for
<add> # Batch Normalization.
<ide> input_batch_size = tf.shape(inputs)[0]
<ide> else:
<ide> input_batch_size = None
<ide> def _compose_transforms(scale, offset, then_scale, then_offset):
<ide> ) = self._renorm_correction_and_moments(
<ide> new_mean, new_variance, training, input_batch_size
<ide> )
<del> # When training, the normalized values (say, x) will be transformed as
<del> # x * gamma + beta without renorm, and (x * r + d) * gamma + beta
<del> # = x * (r * gamma) + (d * gamma + beta) with renorm.
<add> # When training, the normalized values (say, x) will be
<add> # transformed as x * gamma + beta without renorm, and (x * r +
<add> # d) * gamma + beta = x * (r * gamma) + (d * gamma + beta) with
<add> # renorm.
<ide> r = _broadcast(tf.stop_gradient(r, name="renorm_r"))
<ide> d = _broadcast(tf.stop_gradient(d, name="renorm_d"))
<ide> scale, offset = _compose_transforms(r, d, scale, offset)
<ide> def variance_update():
<ide> """Update the moving variance."""
<ide>
<ide> def true_branch_renorm():
<del> # We apply epsilon as part of the moving_stddev to mirror the training
<del> # code path.
<add> # We apply epsilon as part of the moving_stddev to mirror
<add> # the training code path.
<ide> moving_stddev = _do_update(
<ide> self.moving_stddev, tf.sqrt(new_variance + self.epsilon)
<ide> )
<ide> return self._assign_new_value(
<ide> self.moving_variance,
<del> # Apply relu in case floating point rounding causes it to go
<del> # negative.
<add> # Apply relu in case floating point rounding causes it
<add> # to go negative.
<ide> backend.relu(
<ide> moving_stddev * moving_stddev - self.epsilon
<ide> ),
<ide> def get_config(self):
<ide> "beta_constraint": constraints.serialize(self.beta_constraint),
<ide> "gamma_constraint": constraints.serialize(self.gamma_constraint),
<ide> }
<del> # Only add TensorFlow-specific parameters if they are set, so as to preserve
<del> # model compatibility with external Keras.
<add> # Only add TensorFlow-specific parameters if they are set, so as to
<add> # preserve model compatibility with external Keras.
<ide> if self.renorm:
<ide> config["renorm"] = True
<ide> config["renorm_clipping"] = self.renorm_clipping
<ide> def get_config(self):
<ide> class SyncBatchNormalization(BatchNormalizationBase):
<ide> r"""Normalize and scale inputs or activations synchronously across replicas.
<ide>
<del> Applies batch normalization to activations of the previous layer at each batch
<del> by synchronizing the global batch statistics across all devices that are
<del> training the model. For specific details about batch normalization please
<del> refer to the `tf.keras.layers.BatchNormalization` layer docs.
<add> Applies batch normalization to activations of the previous layer at each
<add> batch by synchronizing the global batch statistics across all devices that
<add> are training the model. For specific details about batch normalization
<add> please refer to the `tf.keras.layers.BatchNormalization` layer docs.
<ide>
<ide> If this layer is used when using tf.distribute strategy to train models
<ide> across devices/workers, there will be an allreduce call to aggregate batch
<ide> statistics across all replicas at every training step. Without tf.distribute
<del> strategy, this layer behaves as a regular `tf.keras.layers.BatchNormalization`
<del> layer.
<add> strategy, this layer behaves as a regular
<add> `tf.keras.layers.BatchNormalization` layer.
<ide>
<ide> Example usage:
<ide>
<ide> def __init__(
<ide> def _calculate_mean_and_var(self, x, axes, keep_dims):
<ide>
<ide> with backend.name_scope("moments"):
<del> # The dynamic range of fp16 is too limited to support the collection of
<del> # sufficient statistics. As a workaround we simply perform the operations
<del> # on 32-bit floats before converting the mean and variance back to fp16
<add> # The dynamic range of fp16 is too limited to support the collection
<add> # of sufficient statistics. As a workaround we simply perform the
<add> # operations on 32-bit floats before converting the mean and
<add> # variance back to fp16
<ide> y = tf.cast(x, tf.float32) if x.dtype == tf.float16 else x
<ide> replica_ctx = tf.distribute.get_replica_context()
<ide> if replica_ctx:
<ide> def _calculate_mean_and_var(self, x, axes, keep_dims):
<ide> tf.square(y), axis=axes, keepdims=True
<ide> )
<ide> batch_size = tf.cast(tf.shape(y)[axes[0]], tf.float32)
<del> # TODO(b/163099951): batch the all-reduces once we sort out the ordering
<del> # issue for NCCL. We don't have a mechanism to launch NCCL in the same
<del> # order in each replica nowadays, so we limit NCCL to batch all-reduces.
<add> # TODO(b/163099951): batch the all-reduces once we sort out the
<add> # ordering issue for NCCL. We don't have a mechanism to launch
<add> # NCCL in the same order in each replica nowadays, so we limit
<add> # NCCL to batch all-reduces.
<ide> y_sum = replica_ctx.all_reduce(
<ide> tf.distribute.ReduceOp.SUM, local_sum
<ide> )
<ide> def _calculate_mean_and_var(self, x, axes, keep_dims):
<ide> # var = E(x^2) - E(x)^2
<ide> variance = y_squared_mean - tf.square(mean)
<ide> else:
<del> # Compute true mean while keeping the dims for proper broadcasting.
<add> # Compute true mean while keeping the dims for proper
<add> # broadcasting.
<ide> mean = tf.reduce_mean(y, axes, keepdims=True, name="mean")
<ide> # sample variance, not unbiased variance
<ide> # Note: stop_gradient does not change the gradient that gets
<del> # backpropagated to the mean from the variance calculation,
<del> # because that gradient is zero
<add> # backpropagated to the mean from the variance calculation,
<add> # because that gradient is zero
<ide> variance = tf.reduce_mean(
<ide> tf.math.squared_difference(y, tf.stop_gradient(mean)),
<ide> axes,
<ide> class BatchNormalization(BatchNormalizationBase):
<ide> default), the layer normalizes its output using a moving average of the
<ide> mean and standard deviation of the batches it has seen during training. That
<ide> is to say, it returns
<del> `gamma * (batch - self.moving_mean) / sqrt(self.moving_var + epsilon) + beta`.
<add> `gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`.
<ide>
<ide> `self.moving_mean` and `self.moving_var` are non-trainable variables that
<ide> are updated each time the layer in called in training mode, as such:
<ide> class BatchNormalization(BatchNormalizationBase):
<ide> `data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
<ide> momentum: Momentum for the moving average.
<ide> epsilon: Small float added to variance to avoid dividing by zero.
<del> center: If True, add offset of `beta` to normalized tensor. If False, `beta`
<del> is ignored.
<del> scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
<del> next layer is linear (also e.g. `nn.relu`), this can be disabled since the
<del> scaling will be done by the next layer.
<add> center: If True, add offset of `beta` to normalized tensor. If False,
<add> `beta` is ignored.
<add> scale: If True, multiply by `gamma`. If False, `gamma` is not used. When
<add> the next layer is linear (also e.g. `nn.relu`), this can be disabled
<add> since the scaling will be done by the next layer.
<ide> beta_initializer: Initializer for the beta weight.
<ide> gamma_initializer: Initializer for the gamma weight.
<ide> moving_mean_initializer: Initializer for the moving mean.
<ide> class BatchNormalization(BatchNormalizationBase):
<ide> inputs: Input tensor (of any rank).
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode.
<del> - `training=True`: The layer will normalize its inputs using the mean and
<del> variance of the current batch of inputs.
<del> - `training=False`: The layer will normalize its inputs using the mean and
<del> variance of its moving statistics, learned during training.
<add> - `training=True`: The layer will normalize its inputs using the mean
<add> and variance of the current batch of inputs.
<add> - `training=False`: The layer will normalize its inputs using the mean
<add> and variance of its moving statistics, learned during training.
<ide>
<ide> Input shape:
<ide> Arbitrary. Use the keyword argument `input_shape` (tuple of
<ide><path>keras/layers/normalization/batch_normalization_test.py
<ide> def test_bessels_correction(self):
<ide> )
<ide> layer(x, training=True)
<ide> self.assertTrue(layer.fused)
<del> # Since fused is used, Bessel's correction is used. The variance of [0, 2]
<del> # is 2 with Bessel's correction. Since the momentum is 0.5, the variance is
<del> # 2 * 0.5 == 1.
<add> # Since fused is used, Bessel's correction is used. The variance of [0,
<add> # 2] is 2 with Bessel's correction. Since the momentum is 0.5, the
<add> # variance is 2 * 0.5 == 1.
<ide> self.assertAllEqual(self.evaluate(layer.moving_variance), [1.0])
<ide>
<ide> x = tf.constant([0.0, 2.0], shape=[2, 1, 1, 1, 1])
<ide> def test_bessels_correction(self):
<ide> )
<ide> layer(x, training=True)
<ide> self.assertTrue(layer.fused)
<del> # Since fused is used, Bessel's correction is used. The variance of [0, 2]
<del> # is 2 with Bessel's correction. Since the momentum is 0.5, the variance is
<del> # 2 * 0.5 == 1.
<add> # Since fused is used, Bessel's correction is used. The variance of [0,
<add> # 2] is 2 with Bessel's correction. Since the momentum is 0.5, the
<add> # variance is 2 * 0.5 == 1.
<ide> self.assertAllEqual(self.evaluate(layer.moving_variance), [1.0])
<ide>
<ide>
<ide><path>keras/layers/normalization/layer_normalization.py
<ide> class LayerNormalization(Layer):
<ide> Normalization layer with group size set to 1.
<ide>
<ide> Args:
<del> axis: Integer or List/Tuple. The axis or axes to normalize across. Typically
<del> this is the features axis/axes. The left-out axes are typically the batch
<del> axis/axes. This argument defaults to `-1`, the last dimension in the
<del> input.
<add> axis: Integer or List/Tuple. The axis or axes to normalize across.
<add> Typically this is the features axis/axes. The left-out axes are
<add> typically the batch axis/axes. This argument defaults to `-1`, the last
<add> dimension in the input.
<ide> epsilon: Small float added to variance to avoid dividing by zero. Defaults
<ide> to 1e-3
<del> center: If True, add offset of `beta` to normalized tensor. If False, `beta`
<del> is ignored. Defaults to True.
<del> scale: If True, multiply by `gamma`. If False, `gamma` is not used. Defaults
<del> to True. When the next layer is linear (also e.g. `nn.relu`), this can be
<del> disabled since the scaling will be done by the next layer.
<add> center: If True, add offset of `beta` to normalized tensor. If False,
<add> `beta` is ignored. Defaults to True.
<add> scale: If True, multiply by `gamma`. If False, `gamma` is not used.
<add> Defaults to True. When the next layer is linear (also e.g. `nn.relu`),
<add> this can be disabled since the scaling will be done by the next layer.
<ide> beta_initializer: Initializer for the beta weight. Defaults to zeros.
<ide> gamma_initializer: Initializer for the gamma weight. Defaults to ones.
<del> beta_regularizer: Optional regularizer for the beta weight. None by default.
<add> beta_regularizer: Optional regularizer for the beta weight. None by
<add> default.
<ide> gamma_regularizer: Optional regularizer for the gamma weight. None by
<ide> default.
<ide> beta_constraint: Optional constraint for the beta weight. None by default.
<del> gamma_constraint: Optional constraint for the gamma weight. None by default.
<add> gamma_constraint: Optional constraint for the gamma weight. None by
<add> default.
<ide>
<ide> Input shape:
<ide> Arbitrary. Use the keyword argument `input_shape` (tuple of
<ide> def __init__(
<ide>
<ide> self.supports_masking = True
<ide>
<del> # Indicates whether a faster fused implementation can be used. This will be
<del> # set to True or False in build()"
<add> # Indicates whether a faster fused implementation can be used. This will
<add> # be set to True or False in build()"
<ide> self._fused = None
<ide>
<ide> def _fused_can_be_used(self, ndims):
<ide> def _fused_can_be_used(self, ndims):
<ide> if axis[-1] == ndims - 1 and axis[-1] - axis[0] == len(axis) - 1:
<ide> can_use_fused = True
<ide>
<del> # fused_batch_norm will silently raise epsilon to be at least 1.001e-5, so
<del> # we cannot used the fused version if epsilon is below that value. Also, the
<del> # variable dtype must be float32, as fused_batch_norm only supports float32
<del> # variables.
<add> # fused_batch_norm will silently raise epsilon to be at least 1.001e-5,
<add> # so we cannot used the fused version if epsilon is below that value.
<add> # Also, the variable dtype must be float32, as fused_batch_norm only
<add> # supports float32 variables.
<ide> if self.epsilon < 1.001e-5 or self.dtype != "float32":
<ide> can_use_fused = False
<ide>
<ide> def _broadcast(v):
<ide> input_dtype in ("float16", "bfloat16")
<ide> and self.dtype == "float32"
<ide> ):
<del> # If mixed precision is used, cast inputs to float32 so that this is at
<del> # least as numerically stable as the fused version.
<add> # If mixed precision is used, cast inputs to float32 so that
<add> # this is at least as numerically stable as the fused version.
<ide> inputs = tf.cast(inputs, "float32")
<ide>
<ide> # Calculate the moments on the last axis (layer activations).
<ide> mean, variance = tf.nn.moments(inputs, self.axis, keepdims=True)
<ide>
<ide> scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
<ide>
<del> # Compute layer normalization using the batch_normalization function.
<add> # Compute layer normalization using the batch_normalization
<add> # function.
<ide> outputs = tf.nn.batch_normalization(
<ide> inputs,
<ide> mean,
<ide> def _broadcast(v):
<ide>
<ide> inputs = tf.reshape(inputs, squeezed_shape)
<ide>
<del> # self.gamma and self.beta have the wrong shape for fused_batch_norm, so
<del> # we cannot pass them as the scale and offset parameters. Therefore, we
<del> # create two constant tensors in correct shapes for fused_batch_norm and
<del> # later construct a separate calculation on the scale and offset.
<add> # self.gamma and self.beta have the wrong shape for
<add> # fused_batch_norm, so we cannot pass them as the scale and offset
<add> # parameters. Therefore, we create two constant tensors in correct
<add> # shapes for fused_batch_norm and later construct a separate
<add> # calculation on the scale and offset.
<ide> scale = tf.ones([pre_dim], dtype=self.dtype)
<ide> offset = tf.zeros([pre_dim], dtype=self.dtype)
<ide>
<ide><path>keras/layers/normalization/layer_normalization_test.py
<ide> def testIncorrectAxisType(self):
<ide> def testInvalidAxis(self):
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<del> r"Invalid value for `axis` argument. Expected 0 <= axis < inputs.rank",
<add> r"Invalid value for `axis` argument. "
<add> r"Expected 0 <= axis < inputs.rank",
<ide> ):
<ide> layer_norm = layer_normalization.LayerNormalization(axis=3)
<ide> layer_norm.build(input_shape=(2, 2, 2))
<ide> def _test_forward_pass(
<ide> """Tests the forward pass of layer layer_normalization.
<ide>
<ide> Args:
<del> batch_input_shape: The input shape that will be used to test, including
<del> the batch dimension.
<del> axis: A list of axes to normalize. Will be passed to the `axis` argument
<del> of Layerlayer_normalization.
<add> batch_input_shape: The input shape that will be used to test,
<add> including the batch dimension.
<add> axis: A list of axes to normalize. Will be passed to the `axis`
<add> argument of Layerlayer_normalization.
<ide> fp64_tol: The relative and absolute tolerance for float64.
<ide> fp32_tol: The relative and absolute tolerance for float32.
<ide> fp16_tol: The relative and absolute tolerance for float16.
<ide> def _test_forward_pass(
<ide> assert dtype == "float16"
<ide> tol = fp16_tol
<ide>
<del> # We use absolute tolerances in addition to relative tolerances, because
<del> # some of the values are very close to zero.
<add> # We use absolute tolerances in addition to relative tolerances,
<add> # because some of the values are very close to zero.
<ide> self.assertAllClose(expected, actual, rtol=tol, atol=tol)
<ide>
<ide> @test_combinations.generate(
<ide> test_combinations.combine(mode=["graph", "eager"])
<ide> )
<ide> def test_forward(self):
<del> # For numeric stability, we ensure the axis's dimension(s) have at least 4
<del> # elements.
<add> # For numeric stability, we ensure the axis's dimension(s) have at least
<add> # 4 elements.
<ide> self._test_forward_pass((4, 3), (0,))
<ide> self._test_forward_pass((3, 4), (1,))
<ide> self._test_forward_pass((4, 3, 2), (0,))
<ide> def _test_backward_pass(
<ide> """Tests the backwards pass of layer layer_normalization.
<ide>
<ide> Args:
<del> batch_input_shape: The input shape that will be used to test, including
<del> the batch dimension.
<del> axis: A list of axes to normalize. Will be passed to the `axis` argument
<del> of Layerlayer_normalization.
<add> batch_input_shape: The input shape that will be used to test,
<add> including the batch dimension.
<add> axis: A list of axes to normalize. Will be passed to the `axis`
<add> argument of Layerlayer_normalization.
<ide> fp64_tol: The relative and absolute tolerance for float64.
<ide> fp32_tol: The relative and absolute tolerance for float32.
<ide> fp16_tol: The relative and absolute tolerance for float16.
<ide> def _test_backward_pass(
<ide> x = np.random.normal(size=batch_input_shape)
<ide>
<ide> for epsilon in 1e-12, 1e-3:
<del> # Float64 must come first in this list, as we use the float64 numerical
<del> # gradients to compare to the float32 and float16 symbolic gradients as
<del> # well. Computing float32/float16 numerical gradients is too numerically
<del> # unstable.
<add> # Float64 must come first in this list, as we use the float64
<add> # numerical gradients to compare to the float32 and float16 symbolic
<add> # gradients as well. Computing float32/float16 numerical gradients
<add> # is too numerically unstable.
<ide> for dtype in "float64", "float32", "float16":
<ide> norm = layer_normalization.LayerNormalization(
<ide> axis=axis,
<ide> def _test_backward_pass(
<ide>
<ide> # pylint: disable=cell-var-from-loop
<ide> def forward_fn(x, beta, gamma):
<del> # We must monkey-patch the attributes of `norm` with the function
<del> # arguments, so that the gradient checker will properly compute their
<del> # gradients. The gradient checker computes gradients with respect to
<del> # the input arguments of `f`.
<add> # We must monkey-patch the attributes of `norm` with the
<add> # function arguments, so that the gradient checker will
<add> # properly compute their gradients. The gradient checker
<add> # computes gradients with respect to the input arguments of
<add> # `f`.
<ide> with tf.compat.v1.test.mock.patch.object(
<ide> norm, "beta", beta
<ide> ):
<ide> def forward_fn(x, beta, gamma):
<ide> ) = results
<ide>
<ide> if dtype == "float64":
<del> # We use the float64 numeric gradients as the reference, to compare
<del> # against the symbolic gradients for all dtypes.
<add> # We use the float64 numeric gradients as the reference, to
<add> # compare against the symbolic gradients for all dtypes.
<ide> x_grad_ref = x_grad_n
<ide> beta_grad_ref = beta_grad_n
<ide> gamma_grad_ref = gamma_grad_n
<ide> def forward_fn(x, beta, gamma):
<ide> assert dtype == "float16"
<ide> tol = fp16_tol
<ide>
<del> # We use absolute tolerances in addition to relative tolerances, because
<del> # some of the values are very close to zero.
<add> # We use absolute tolerances in addition to relative tolerances,
<add> # because some of the values are very close to zero.
<ide> self.assertAllClose(x_grad_t, x_grad_ref, rtol=tol, atol=tol)
<ide> self.assertAllClose(
<ide> beta_grad_t, beta_grad_ref, rtol=tol, atol=tol
<ide> def forward_fn(x, beta, gamma):
<ide> gamma_grad_t, gamma_grad_ref, rtol=tol, atol=tol
<ide> )
<ide>
<del> # The gradient_checker_v2 does not work properly with LayerNorm in graph mode.
<add> # The gradient_checker_v2 does not work properly with LayerNorm in graph
<add> # mode.
<ide> @test_utils.run_v2_only
<ide> def test_backward(self):
<del> # For numeric stability, we ensure the axis's dimension(s) have at least 4
<del> # elements.
<add> # For numeric stability, we ensure the axis's dimension(s) have at least
<add> # 4 elements.
<ide> self._test_backward_pass((4, 3), (0,))
<ide> self._test_backward_pass((2, 4, 2), (1,))
<ide> self._test_backward_pass((2, 3, 4), (2,))
<ide><path>keras/layers/normalization/unit_normalization.py
<ide> class UnitNormalization(base_layer.Layer):
<ide> 1.0
<ide>
<ide> Args:
<del> axis: Integer or list/tuple. The axis or axes to normalize across. Typically
<del> this is the features axis or axes. The left-out axes are typically the
<del> batch axis or axes. Defaults to `-1`, the last dimension in
<del> the input.
<add> axis: Integer or list/tuple. The axis or axes to normalize across.
<add> Typically this is the features axis or axes. The left-out axes are
<add> typically the batch axis or axes. Defaults to `-1`, the last dimension
<add> in the input.
<ide> """
<ide>
<ide> def __init__(self, axis=-1, **kwargs):
<ide><path>keras/layers/pooling/average_pooling3d.py
<ide> class AveragePooling3D(Pooling3D):
<ide> """Average pooling operation for 3D data (spatial or spatio-temporal).
<ide>
<del> Downsamples the input along its spatial dimensions (depth, height, and width)
<del> by taking the average value over an input window
<add> Downsamples the input along its spatial dimensions (depth, height, and
<add> width) by taking the average value over an input window
<ide> (of size defined by `pool_size`) for each channel of the input.
<ide> The window is shifted by `strides` along each dimension.
<ide>
<ide><path>keras/layers/pooling/average_pooling_test.py
<ide> def test_average_pooling_2d(self):
<ide> # This part of the test can only run on GPU but doesn't appear
<ide> # to be properly assigned to a GPU when running in eager mode.
<ide> if not tf.executing_eagerly():
<del> # Only runs on GPU with CUDA, channels_first is not supported on CPU.
<add> # Only runs on GPU with CUDA, channels_first is not supported on
<add> # CPU.
<ide> # TODO(b/62340061): Support channels_first on CPU.
<ide> if tf.test.is_gpu_available(cuda_only=True):
<ide> test_utils.layer_test(
<ide><path>keras/layers/pooling/base_pooling2d.py
<ide> class Pooling2D(Layer):
<ide>
<ide> Args:
<ide> pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
<del> pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
<add> pool_size: An integer or tuple/list of 2 integers:
<add> (pool_height, pool_width)
<ide> specifying the size of the pooling window.
<ide> Can be a single integer to specify the same value for
<ide> all spatial dimensions.
<ide> class Pooling2D(Layer):
<ide> all spatial dimensions.
<ide> padding: A string. The padding method, either 'valid' or 'same'.
<ide> Case-insensitive.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`.
<ide> The ordering of the dimensions in the inputs.
<ide> `channels_last` corresponds to inputs with shape
<ide> `(batch, height, width, channels)` while `channels_first` corresponds to
<ide><path>keras/layers/pooling/base_pooling3d.py
<ide> class Pooling3D(Layer):
<ide> all spatial dimensions.
<ide> padding: A string. The padding method, either 'valid' or 'same'.
<ide> Case-insensitive.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`.
<ide> The ordering of the dimensions in the inputs.
<ide> `channels_last` corresponds to inputs with shape
<ide> `(batch, depth, height, width, channels)`
<ide><path>keras/layers/pooling/max_pooling3d.py
<ide> class MaxPooling3D(Pooling3D):
<ide> """Max pooling operation for 3D data (spatial or spatio-temporal).
<ide>
<del> Downsamples the input along its spatial dimensions (depth, height, and width)
<del> by taking the maximum value over an input window
<del> (of size defined by `pool_size`) for each channel of the input.
<del> The window is shifted by `strides` along each dimension.
<add> Downsamples the input along its spatial dimensions (depth, height, and
<add> width) by taking the maximum value over an input window (of size defined by
<add> `pool_size`) for each channel of the input. The window is shifted by
<add> `strides` along each dimension.
<ide>
<ide> Args:
<ide> pool_size: Tuple of 3 integers,
<ide><path>keras/layers/preprocessing/benchmarks/category_hash_dense_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of categorical hash columns with dense inputs."""
<add>"""Benchmark for KPL implementation of categorical hash columns with dense
<add>inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/category_hash_varlen_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of categorical hash columns with varying-length inputs."""
<add>"""Benchmark for KPL implementation of categorical hash columns with
<add>varying-length inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/category_vocab_file_dense_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of vocabulary columns from files with dense inputs."""
<add>"""Benchmark for KPL implementation of vocabulary columns from files with dense
<add>inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/category_vocab_file_varlen_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of vocabulary columns from files with varying-length inputs."""
<add>"""Benchmark for KPL implementation of vocabulary columns from files with
<add>varying-length inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/category_vocab_list_dense_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of vocabulary columns from lists with dense inputs."""
<add>"""Benchmark for KPL implementation of vocabulary columns from lists with dense
<add>inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_dense_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of vocabulary columns + indicator from lists with dense inputs."""
<add>"""Benchmark for KPL implementation of vocabulary columns + indicator from lists
<add>with dense inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_varlen_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of vocabulary columns + indicator from lists with varying-length inputs."""
<add>"""Benchmark for KPL implementation of vocabulary columns + indicator from lists
<add>with varying-length inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/category_vocab_list_varlen_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of vocabulary columns from lists with varying-length inputs."""
<add>"""Benchmark for KPL implementation of vocabulary columns from lists with
<add>varying-length inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/embedding_varlen_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of embedding column with varying-length inputs."""
<add>"""Benchmark for KPL implementation of embedding column with varying-length
<add>inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/hashed_crossing_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of categorical cross hash columns with dense inputs."""
<add>"""Benchmark for KPL implementation of categorical cross hash columns with dense
<add>inputs."""
<ide>
<ide>
<ide> import keras
<ide><path>keras/layers/preprocessing/benchmarks/weighted_embedding_varlen_benchmark.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del>"""Benchmark for KPL implementation of weighted embedding column with varying-length inputs."""
<add>"""Benchmark for KPL implementation of weighted embedding column with
<add>varying-length inputs."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/category_encoding.py
<ide> class CategoryEncoding(base_layer.Layer):
<ide> This layer provides options for condensing data into a categorical encoding
<ide> when the total number of tokens are known in advance. It accepts integer
<ide> values as inputs, and it outputs a dense or sparse representation of those
<del> inputs. For integer inputs where the total number of tokens is not known, use
<del> `tf.keras.layers.IntegerLookup` instead.
<add> inputs. For integer inputs where the total number of tokens is not known,
<add> use `tf.keras.layers.IntegerLookup` instead.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide> class CategoryEncoding(base_layer.Layer):
<ide> [0. , 0.2, 0. , 0.4]], dtype=float32)>
<ide>
<ide> Args:
<del> num_tokens: The total number of tokens the layer should support. All inputs
<del> to the layer must integers in the range `0 <= value < num_tokens`, or an
<del> error will be thrown.
<add> num_tokens: The total number of tokens the layer should support. All
<add> inputs to the layer must integers in the range `0 <= value <
<add> num_tokens`, or an error will be thrown.
<ide> output_mode: Specification for the output of the layer.
<ide> Defaults to `"multi_hot"`. Values can be `"one_hot"`, `"multi_hot"` or
<ide> `"count"`, configuring the layer as follows:
<ide> class CategoryEncoding(base_layer.Layer):
<ide> last dimension is not size 1, will append a new dimension for the
<ide> encoded output.
<ide> - `"multi_hot"`: Encodes each sample in the input into a single array
<del> of `num_tokens` size, containing a 1 for each vocabulary term present
<del> in the sample. Treats the last dimension as the sample dimension, if
<del> input shape is `(..., sample_length)`, output shape will be
<del> `(..., num_tokens)`.
<add> of `num_tokens` size, containing a 1 for each vocabulary term
<add> present in the sample. Treats the last dimension as the sample
<add> dimension, if input shape is `(..., sample_length)`, output shape
<add> will be `(..., num_tokens)`.
<ide> - `"count"`: Like `"multi_hot"`, but the int array contains a count of
<ide> the number of times the token at that index appeared in the sample.
<ide> For all output modes, currently only output up to rank 2 is supported.
<ide> class CategoryEncoding(base_layer.Layer):
<ide> Call arguments:
<ide> inputs: A 1D or 2D tensor of integer inputs.
<ide> count_weights: A tensor in the same shape as `inputs` indicating the
<del> weight for each sample value when summing up in `count` mode. Not used in
<del> `"multi_hot"` or `"one_hot"` modes.
<add> weight for each sample value when summing up in `count` mode. Not used
<add> in `"multi_hot"` or `"one_hot"` modes.
<ide> """
<ide>
<ide> def __init__(
<ide> self, num_tokens=None, output_mode="multi_hot", sparse=False, **kwargs
<ide> ):
<del> # max_tokens is an old name for the num_tokens arg we continue to support
<del> # because of usage.
<add> # max_tokens is an old name for the num_tokens arg we continue to
<add> # support because of usage.
<ide> if "max_tokens" in kwargs:
<ide> logging.warning(
<ide> "max_tokens is deprecated, please use num_tokens instead."
<ide> def call(self, inputs, count_weights=None):
<ide> if count_weights is not None:
<ide> if self.output_mode != COUNT:
<ide> raise ValueError(
<del> "`count_weights` is not used when `output_mode` is not `'count'`. "
<del> "Received `count_weights={}`.".format(count_weights)
<add> "`count_weights` is not used when `output_mode` is not "
<add> "`'count'`. Received `count_weights={count_weights}`."
<ide> )
<ide> count_weights = utils.ensure_tensor(
<ide> count_weights, self.compute_dtype
<ide><path>keras/layers/preprocessing/category_encoding_test.py
<ide> def test_dense_oov_input(self):
<ide> int_data = encoder_layer(input_data)
<ide> self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
<ide> model = keras.Model(inputs=input_data, outputs=int_data)
<del> # Call predict once on valid input to compile a graph and test control flow.
<add> # Call predict once on valid input to compile a graph and test control
<add> # flow.
<ide> _ = model.predict(valid_array, steps=1)
<ide> with self.assertRaisesRegex(
<ide> tf.errors.InvalidArgumentError,
<ide> def test_dense_negative(self):
<ide> int_data = encoder_layer(input_data)
<ide> self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
<ide> model = keras.Model(inputs=input_data, outputs=int_data)
<del> # Call predict once on valid input to compile a graph and test control flow.
<add> # Call predict once on valid input to compile a graph and test control
<add> # flow.
<ide> _ = model.predict(valid_array, steps=1)
<ide> with self.assertRaisesRegex(
<ide> tf.errors.InvalidArgumentError,
<ide><path>keras/layers/preprocessing/discretization.py
<ide> def summarize(values, epsilon):
<ide>
<ide> Args:
<ide> values: 1D `np.ndarray` to be summarized.
<del> epsilon: A `'float32'` that determines the approximate desired precision.
<add> epsilon: A `'float32'` that determines the approximate desired
<add> precision.
<ide>
<ide> Returns:
<ide> A 2D `np.ndarray` that is a summary of the inputs. First column is the
<ide> def summarize(values, epsilon):
<ide> def compress(summary, epsilon):
<ide> """Compress a summary to within `epsilon` accuracy.
<ide>
<del> The compression step is needed to keep the summary sizes small after merging,
<del> and also used to return the final target boundaries. It finds the new bins
<del> based on interpolating cumulative weight percentages from the large summary.
<del> Taking the difference of the cumulative weights from the previous bin's
<del> cumulative weight will give the new weight for that bin.
<add> The compression step is needed to keep the summary sizes small after
<add> merging, and also used to return the final target boundaries. It finds the
<add> new bins based on interpolating cumulative weight percentages from the large
<add> summary. Taking the difference of the cumulative weights from the previous
<add> bin's cumulative weight will give the new weight for that bin.
<ide>
<ide> Args:
<ide> summary: 2D `np.ndarray` summary to be compressed.
<del> epsilon: A `'float32'` that determines the approxmiate desired precision.
<add> epsilon: A `'float32'` that determines the approxmiate desired
<add> precision.
<ide>
<ide> Returns:
<ide> A 2D `np.ndarray` that is a compressed summary. First column is the
<ide> class Discretization(base_preprocessing_layer.PreprocessingLayer):
<ide> Arguments:
<ide> bin_boundaries: A list of bin boundaries. The leftmost and rightmost bins
<ide> will always extend to `-inf` and `inf`, so `bin_boundaries=[0., 1., 2.]`
<del> generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. If
<del> this option is set, `adapt()` should not be called.
<add> generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`.
<add> If this option is set, `adapt()` should not be called.
<ide> num_bins: The integer number of bins to compute. If this option is set,
<ide> `adapt()` should be called to learn the bin boundaries.
<ide> epsilon: Error tolerance, typically a small fraction close to zero (e.g.
<ide> 0.01). Higher values of epsilon increase the quantile approximation, and
<ide> hence result in more unequal buckets, but could improve performance
<ide> and resource consumption.
<del> output_mode: Specification for the output of the layer. Defaults to `"int"`.
<del> Values can be `"int"`, `"one_hot"`, `"multi_hot"`, or `"count"`
<del> configuring the layer as follows:
<add> output_mode: Specification for the output of the layer. Defaults to
<add> `"int"`. Values can be `"int"`, `"one_hot"`, `"multi_hot"`, or
<add> `"count"` configuring the layer as follows:
<ide> - `"int"`: Return the discritized bin indices directly.
<ide> - `"one_hot"`: Encodes each individual element in the input into an
<ide> array the same size as `num_bins`, containing a 1 at the input's bin
<del> index. If the last dimension is size 1, will encode on that dimension.
<del> If the last dimension is not size 1, will append a new dimension for
<del> the encoded output.
<add> index. If the last dimension is size 1, will encode on that
<add> dimension. If the last dimension is not size 1, will append a new
<add> dimension for the encoded output.
<ide> - `"multi_hot"`: Encodes each sample in the input into a single array
<ide> the same size as `num_bins`, containing a 1 for each bin index
<ide> index present in the sample. Treats the last dimension as the sample
<del> dimension, if input shape is `(..., sample_length)`, output shape will
<del> be `(..., num_tokens)`.
<del> - `"count"`: As `"multi_hot"`, but the int array contains a count of the
<del> number of times the bin index appeared in the sample.
<add> dimension, if input shape is `(..., sample_length)`, output shape
<add> will be `(..., num_tokens)`.
<add> - `"count"`: As `"multi_hot"`, but the int array contains a count of
<add> the number of times the bin index appeared in the sample.
<ide> sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
<ide> and `"count"` output modes. If True, returns a `SparseTensor` instead of
<ide> a dense `Tensor`. Defaults to False.
<ide> def __init__(
<ide> sparse=False,
<ide> **kwargs,
<ide> ):
<del> # bins is a deprecated arg for setting bin_boundaries or num_bins that still
<del> # has some usage.
<add> # bins is a deprecated arg for setting bin_boundaries or num_bins that
<add> # still has some usage.
<ide> if "bins" in kwargs:
<ide> logging.warning(
<del> "bins is deprecated, please use bin_boundaries or num_bins instead."
<add> "bins is deprecated, "
<add> "please use bin_boundaries or num_bins instead."
<ide> )
<ide> if isinstance(kwargs["bins"], int) and num_bins is None:
<ide> num_bins = kwargs["bins"]
<ide> def __init__(
<ide> elif (
<ide> output_mode == "int" and not tf.as_dtype(kwargs["dtype"]).is_integer
<ide> ):
<del> # Compat for when dtype was always floating and ignored by the layer.
<add> # Compat for when dtype was always floating and ignored by the
<add> # layer.
<ide> kwargs["dtype"] = tf.int64
<ide>
<ide> super().__init__(**kwargs)
<ide> def build(self, input_shape):
<ide> if self.input_bin_boundaries is not None:
<ide> return
<ide>
<del> # Summary contains two equal length vectors of bins at index 0 and weights
<del> # at index 1.
<add> # Summary contains two equal length vectors of bins at index 0 and
<add> # weights at index 1.
<ide> self.summary = self.add_weight(
<ide> name="summary",
<ide> shape=(2, None),
<ide> def build(self, input_shape):
<ide> def adapt(self, data, batch_size=None, steps=None):
<ide> """Computes bin boundaries from quantiles in a input dataset.
<ide>
<del> Calling `adapt()` on a `Discretization` layer is an alternative to passing
<del> in a `bin_boundaries` argument during construction. A `Discretization` layer
<del> should always be either adapted over a dataset or passed `bin_boundaries`.
<add> Calling `adapt()` on a `Discretization` layer is an alternative to
<add> passing in a `bin_boundaries` argument during construction. A
<add> `Discretization` layer should always be either adapted over a dataset or
<add> passed `bin_boundaries`.
<ide>
<ide> During `adapt()`, the layer will estimate the quantile boundaries of the
<del> input dataset. The number of quantiles can be controlled via the `num_bins`
<del> argument, and the error tolerance for quantile boundaries can be controlled
<del> via the `epsilon` argument.
<del>
<del> In order to make `Discretization` efficient in any distribution context, the
<del> computed boundaries are kept static with respect to any compiled `tf.Graph`s
<del> that call the layer. As a consequence, if the layer is adapted a second
<del> time, any models using the layer should be re-compiled. For more information
<del> see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<del>
<del> `adapt()` is meant only as a single machine utility to compute layer state.
<del> To analyze a dataset that cannot fit on a single machine, see
<del> [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)
<del> for a multi-machine, map-reduce solution.
<add> input dataset. The number of quantiles can be controlled via the
<add> `num_bins` argument, and the error tolerance for quantile boundaries can
<add> be controlled via the `epsilon` argument.
<add>
<add> In order to make `Discretization` efficient in any distribution context,
<add> the computed boundaries are kept static with respect to any compiled
<add> `tf.Graph`s that call the layer. As a consequence, if the layer is
<add> adapted a second time, any models using the layer should be re-compiled.
<add> For more information see
<add> `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<add>
<add> `adapt()` is meant only as a single machine utility to compute layer
<add> state. To analyze a dataset that cannot fit on a single machine, see
<add> [Tensorflow Transform](
<add> https://www.tensorflow.org/tfx/transform/get_started) for a
<add> multi-machine, map-reduce solution.
<ide>
<ide> Arguments:
<ide> data: The data to train on. It can be passed either as a
<ide> def adapt(self, data, batch_size=None, steps=None):
<ide> def update_state(self, data):
<ide> if self.input_bin_boundaries is not None:
<ide> raise ValueError(
<del> "Cannot adapt a Discretization layer that has been initialized with "
<del> "`bin_boundaries`, use `num_bins` instead. You passed "
<add> "Cannot adapt a Discretization layer that has been initialized "
<add> "with `bin_boundaries`, use `num_bins` instead. You passed "
<ide> "`bin_boundaries={}`.".format(self.input_bin_boundaries)
<ide> )
<ide>
<ide><path>keras/layers/preprocessing/hashed_crossing.py
<ide> class HashedCrossing(base_layer.Layer):
<ide> """A preprocessing layer which crosses features using the "hashing trick".
<ide>
<del> This layer performs crosses of categorical features using the "hasing trick".
<del> Conceptually, the transformation can be thought of as:
<add> This layer performs crosses of categorical features using the "hasing
<add> trick". Conceptually, the transformation can be thought of as:
<ide> hash(concatenation of features) % `num_bins`.
<ide>
<ide> This layer currently only performs crosses of scalar inputs and batches of
<ide> class HashedCrossing(base_layer.Layer):
<ide>
<ide> Args:
<ide> num_bins: Number of hash bins.
<del> output_mode: Specification for the output of the layer. Defaults to `"int"`.
<del> Values can be `"int"`, or `"one_hot"` configuring the layer as follows:
<add> output_mode: Specification for the output of the layer. Defaults to
<add> `"int"`. Values can be `"int"`, or `"one_hot"` configuring the layer as
<add> follows:
<ide> - `"int"`: Return the integer bin indices directly.
<ide> - `"one_hot"`: Encodes each individual element in the input into an
<ide> array the same size as `num_bins`, containing a 1 at the input's bin
<ide> def __init__(self, num_bins, output_mode="int", sparse=False, **kwargs):
<ide> self.sparse = sparse
<ide>
<ide> def call(self, inputs):
<del> # Convert all inputs to tensors and check shape. This layer only supports
<del> # sclars and batches of scalars for the initial version.
<add> # Convert all inputs to tensors and check shape. This layer only
<add> # supports sclars and batches of scalars for the initial version.
<ide> self._check_at_least_two_inputs(inputs)
<ide> inputs = [utils.ensure_tensor(x) for x in inputs]
<ide> self._check_input_shape_and_type(inputs)
<ide> def call(self, inputs):
<ide>
<ide> # Fix output shape and downrank to match input rank.
<ide> if rank == 2:
<del> # tf.sparse.cross_hashed output shape will always be None on the last
<del> # dimension. Given our input shape restrictions, we want to force shape 1
<del> # instead.
<add> # tf.sparse.cross_hashed output shape will always be None on the
<add> # last dimension. Given our input shape restrictions, we want to
<add> # force shape 1 instead.
<ide> outputs = tf.reshape(outputs, [-1, 1])
<ide> elif rank == 1:
<ide> outputs = tf.reshape(outputs, [-1])
<ide> def get_config(self):
<ide> def _check_at_least_two_inputs(self, inputs):
<ide> if not isinstance(inputs, (list, tuple)):
<ide> raise ValueError(
<del> "`HashedCrossing` should be called on a list or tuple of inputs. "
<del> f"Received: inputs={inputs}"
<add> "`HashedCrossing` should be called on a list or tuple of "
<add> f"inputs. Received: inputs={inputs}"
<ide> )
<ide> if len(inputs) < 2:
<ide> raise ValueError(
<ide> def _check_input_shape_and_type(self, inputs):
<ide> rank = len(first_shape)
<ide> if rank > 2 or (rank == 2 and first_shape[-1] != 1):
<ide> raise ValueError(
<del> "All `HashedCrossing` inputs should have shape `[]`, `[batch_size]` "
<del> f"or `[batch_size, 1]`. Received: inputs={inputs}"
<add> "All `HashedCrossing` inputs should have shape `[]`, "
<add> "`[batch_size]` or `[batch_size, 1]`. "
<add> f"Received: inputs={inputs}"
<ide> )
<ide> if not all(x.shape.as_list() == first_shape for x in inputs[1:]):
<ide> raise ValueError(
<ide><path>keras/layers/preprocessing/hashing.py
<ide> class Hashing(base_layer.Layer):
<ide> stable across invocations, regardless of device and context, by mixing the
<ide> input bits thoroughly.
<ide>
<del> If you want to obfuscate the hashed output, you can also pass a random `salt`
<del> argument in the constructor. In that case, the layer will use the
<add> If you want to obfuscate the hashed output, you can also pass a random
<add> `salt` argument in the constructor. In that case, the layer will use the
<ide> [SipHash64](https://github.com/google/highwayhash) hash function, with
<ide> the `salt` value serving as additional input to the hash function.
<ide>
<ide> class Hashing(base_layer.Layer):
<ide> [0]])>
<ide>
<ide> Args:
<del> num_bins: Number of hash bins. Note that this includes the `mask_value` bin,
<del> so the effective number of bins is `(num_bins - 1)` if `mask_value` is
<del> set.
<add> num_bins: Number of hash bins. Note that this includes the `mask_value`
<add> bin, so the effective number of bins is `(num_bins - 1)` if `mask_value`
<add> is set.
<ide> mask_value: A value that represents masked inputs, which are mapped to
<ide> index 0. Defaults to None, meaning no mask term will be added and the
<ide> hashing will start at index 0.
<ide> class Hashing(base_layer.Layer):
<ide> used as an additional input (known as a "salt" in cryptography).
<ide> These should be non-zero. Defaults to `None` (in that
<ide> case, the FarmHash64 hash function is used). It also supports
<del> tuple/list of 2 unsigned integer numbers, see reference paper for details.
<del> output_mode: Specification for the output of the layer. Defaults to `"int"`.
<del> Values can be `"int"`, `"one_hot"`, `"multi_hot"`, or `"count"`
<del> configuring the layer as follows:
<add> tuple/list of 2 unsigned integer numbers, see reference paper for
<add> details.
<add> output_mode: Specification for the output of the layer. Defaults to
<add> `"int"`. Values can be `"int"`, `"one_hot"`, `"multi_hot"`, or
<add> `"count"` configuring the layer as follows:
<ide> - `"int"`: Return the integer bin indices directly.
<ide> - `"one_hot"`: Encodes each individual element in the input into an
<ide> array the same size as `num_bins`, containing a 1 at the input's bin
<del> index. If the last dimension is size 1, will encode on that dimension.
<del> If the last dimension is not size 1, will append a new dimension for
<del> the encoded output.
<add> index. If the last dimension is size 1, will encode on that
<add> dimension. If the last dimension is not size 1, will append a new
<add> dimension for the encoded output.
<ide> - `"multi_hot"`: Encodes each sample in the input into a single array
<ide> the same size as `num_bins`, containing a 1 for each bin index
<ide> index present in the sample. Treats the last dimension as the sample
<del> dimension, if input shape is `(..., sample_length)`, output shape will
<del> be `(..., num_tokens)`.
<del> - `"count"`: As `"multi_hot"`, but the int array contains a count of the
<del> number of times the bin index appeared in the sample.
<add> dimension, if input shape is `(..., sample_length)`, output shape
<add> will be `(..., num_tokens)`.
<add> - `"count"`: As `"multi_hot"`, but the int array contains a count of
<add> the number of times the bin index appeared in the sample.
<ide> sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
<ide> and `"count"` output modes. If True, returns a `SparseTensor` instead of
<ide> a dense `Tensor`. Defaults to False.
<ide> def __init__(
<ide> ):
<ide> if num_bins is None or num_bins <= 0:
<ide> raise ValueError(
<del> f"The `num_bins` for `Hashing` cannot be `None` or non-positive "
<del> f"values. Received: num_bins={num_bins}."
<add> f"The `num_bins` for `Hashing` cannot be `None` or "
<add> f"non-positive values. Received: num_bins={num_bins}."
<ide> )
<ide>
<ide> # By default, output int64 when output_mode='int' and floats otherwise.
<ide> def __init__(
<ide> elif (
<ide> output_mode == "int" and not tf.as_dtype(kwargs["dtype"]).is_integer
<ide> ):
<del> # Compat for when dtype was always floating and ignored by the layer.
<add> # Compat for when dtype was always floating and ignored by the
<add> # layer.
<ide> kwargs["dtype"] = tf.int64
<ide>
<ide> super().__init__(**kwargs)
<ide> def __init__(
<ide> self.salt = [salt, salt]
<ide> else:
<ide> raise ValueError(
<del> f"The `salt` argument for `Hashing` can only be a tuple of size 2 "
<del> f"integers, or a single integer. Received: salt={salt}."
<add> "The `salt` argument for `Hashing` can only be a tuple of "
<add> "size 2 integers, or a single integer. "
<add> f"Received: salt={salt}."
<ide> )
<ide>
<ide> def call(self, inputs):
<ide><path>keras/layers/preprocessing/hashing_test.py
<ide> def test_hash_dense_input_mask_value_farmhash(self):
<ide> )
<ide> empty_mask_output = empty_mask_layer(inp)
<ide> omar_mask_output = omar_mask_layer(inp)
<del> # Outputs should be one more than test_hash_dense_input_farmhash (the zeroth
<del> # bin is now reserved for masks).
<add> # Outputs should be one more than test_hash_dense_input_farmhash (the
<add> # zeroth bin is now reserved for masks).
<ide> self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
<ide> # 'omar' should map to 0.
<ide> self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
<ide><path>keras/layers/preprocessing/image_preprocessing.py
<ide> class Resizing(base_layer.Layer):
<ide> """A preprocessing layer which resizes images.
<ide>
<ide> This layer resizes an image input to a target height and width. The input
<del> should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"` format.
<del> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of
<del> interger or floating point dtype. By default, the layer will output floats.
<add> should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"`
<add> format. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0,
<add> 255]`) and of interger or floating point dtype. By default, the layer will
<add> output floats.
<ide>
<ide> This layer can be called on tf.RaggedTensor batches of input images of
<del> distinct sizes, and will resize the outputs to dense tensors of uniform size.
<add> distinct sizes, and will resize the outputs to dense tensors of uniform
<add> size.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide> class Resizing(base_layer.Layer):
<ide> `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
<ide> crop_to_aspect_ratio: If True, resize the images without aspect
<ide> ratio distortion. When the original aspect ratio differs from the target
<del> aspect ratio, the output image will be cropped so as to return the largest
<del> possible window in the image (of size `(height, width)`) that matches
<del> the target aspect ratio. By default (`crop_to_aspect_ratio=False`),
<del> aspect ratio may not be preserved.
<add> aspect ratio, the output image will be cropped so as to return the
<add> largest possible window in the image (of size `(height, width)`) that
<add> matches the target aspect ratio. By default
<add> (`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
<ide> """
<ide>
<ide> def __init__(
<ide> def __init__(
<ide> base_preprocessing_layer.keras_kpl_gauge.get_cell("Resizing").set(True)
<ide>
<ide> def call(self, inputs):
<del> # tf.image.resize will always output float32 and operate more efficiently on
<del> # float32 unless interpolation is nearest, in which case ouput type matches
<del> # input type.
<add> # tf.image.resize will always output float32 and operate more
<add> # efficiently on float32 unless interpolation is nearest, in which case
<add> # ouput type matches input type.
<ide> if self.interpolation == "nearest":
<ide> input_dtype = self.compute_dtype
<ide> else:
<ide> class CenterCrop(base_layer.Layer):
<ide> """A preprocessing layer which crops images.
<ide>
<ide> This layers crops the central portion of the images to a target size. If an
<del> image is smaller than the target size, it will be resized and cropped so as to
<del> return the largest possible window in the image that matches the target aspect
<del> ratio.
<add> image is smaller than the target size, it will be resized and cropped so as
<add> to return the largest possible window in the image that matches the target
<add> aspect ratio.
<ide>
<ide> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
<del> of interger or floating point dtype. By default, the layer will output floats.
<add> of interger or floating point dtype. By default, the layer will output
<add> floats.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide> class BaseImageAugmentationLayer(base_layer.BaseRandomLayer):
<ide> """Abstract base layer for image augmentaion.
<ide>
<ide> This layer contains base functionalities for preprocessing layers which
<del> augment image related data, eg. image and in future, label and bounding boxes.
<del> The subclasses could avoid making certain mistakes and reduce code
<add> augment image related data, eg. image and in future, label and bounding
<add> boxes. The subclasses could avoid making certain mistakes and reduce code
<ide> duplications.
<ide>
<ide> This layer requires you to implement one method: `augment_image()`, which
<ide> class BaseImageAugmentationLayer(base_layer.BaseRandomLayer):
<ide> `augment_label()`, which handles label augmentation if the layer supports
<ide> that.
<ide>
<del> `augment_bounding_boxes()`, which handles the bounding box augmentation, if the
<del> layer supports that.
<add> `augment_bounding_boxes()`, which handles the bounding box augmentation, if
<add> the layer supports that.
<ide>
<ide> `get_random_transformation()`, which should produce a random transformation
<del> setting. The tranformation object, which could be any type, will be passed to
<del> `augment_image`, `augment_label` and `augment_bounding_boxes`, to coodinate
<del> the randomness behavior, eg, in the RandomFlip layer, the image and
<del> bounding_boxes should be changed in the same way.
<add> setting. The tranformation object, which could be any type, will be passed
<add> to `augment_image`, `augment_label` and `augment_bounding_boxes`, to
<add> coodinate the randomness behavior, eg, in the RandomFlip layer, the image
<add> and bounding_boxes should be changed in the same way.
<ide>
<ide> The `call()` method support two formats of inputs:
<ide> 1. Single image tensor with 3D (HWC) or 4D (NHWC) format.
<ide> class BaseImageAugmentationLayer(base_layer.BaseRandomLayer):
<ide> The output of the `call()` will be in two formats, which will be the same
<ide> structure as the inputs.
<ide>
<del> The `call()` will handle the logic detecting the training/inference
<del> mode, unpack the inputs, forward to the correct function, and pack the output
<del> back to the same structure as the inputs.
<add> The `call()` will handle the logic detecting the training/inference mode,
<add> unpack the inputs, forward to the correct function, and pack the output back
<add> to the same structure as the inputs.
<ide>
<ide> By default the `call()` method leverages the `tf.vectorized_map()` function.
<ide> Auto-vectorization can be disabled by setting `self.auto_vectorize = False`
<ide> def augment_image(self, image, transformation):
<ide> ```
<ide>
<ide> Note that since the randomness is also a common functionnality, this layer
<del> also includes a tf.keras.backend.RandomGenerator, which can be used to produce
<del> the random numbers. The random number generator is stored in the
<add> also includes a tf.keras.backend.RandomGenerator, which can be used to
<add> produce the random numbers. The random number generator is stored in the
<ide> `self._random_generator` attribute.
<ide> """
<ide>
<ide> def __init__(self, rate=1.0, seed=None, **kwargs):
<ide> def auto_vectorize(self):
<ide> """Control whether automatic vectorization occurs.
<ide>
<del> By default the `call()` method leverages the `tf.vectorized_map()` function.
<del> Auto-vectorization can be disabled by setting `self.auto_vectorize = False`
<del> in your `__init__()` method. When disabled, `call()` instead relies
<del> on `tf.map_fn()`. For example:
<add> By default the `call()` method leverages the `tf.vectorized_map()`
<add> function. Auto-vectorization can be disabled by setting
<add> `self.auto_vectorize = False` in your `__init__()` method. When
<add> disabled, `call()` instead relies on `tf.map_fn()`. For example:
<ide>
<ide> ```python
<ide> class SubclassLayer(BaseImageAugmentationLayer):
<ide> def augment_image(self, image, transformation):
<ide> """Augment a single image during training.
<ide>
<ide> Args:
<del> image: 3D image input tensor to the layer. Forwarded from `layer.call()`.
<add> image: 3D image input tensor to the layer. Forwarded from
<add> `layer.call()`.
<ide> transformation: The transformation object produced by
<del> `get_random_transformation`. Used to coordinate the randomness between
<del> image, label and bounding box.
<add> `get_random_transformation`. Used to coordinate the randomness
<add> between image, label and bounding box.
<ide>
<ide> Returns:
<ide> output 3D tensor, which will be forward to `layer.call()`.
<ide> def augment_label(self, label, transformation):
<ide> Args:
<ide> label: 1D label to the layer. Forwarded from `layer.call()`.
<ide> transformation: The transformation object produced by
<del> `get_random_transformation`. Used to coordinate the randomness between
<del> image, label and bounding box.
<add> `get_random_transformation`. Used to coordinate the randomness
<add> between image, label and bounding box.
<ide>
<ide> Returns:
<ide> output 1D tensor, which will be forward to `layer.call()`.
<ide> def augment_target(self, target, transformation):
<ide> Args:
<ide> target: 1D label to the layer. Forwarded from `layer.call()`.
<ide> transformation: The transformation object produced by
<del> `get_random_transformation`. Used to coordinate the randomness between
<del> image, label and bounding box.
<add> `get_random_transformation`. Used to coordinate the randomness
<add> between image, label and bounding box.
<ide>
<ide> Returns:
<ide> output 1D tensor, which will be forward to `layer.call()`.
<ide> def augment_bounding_boxes(
<ide> """Augment bounding boxes for one image during training.
<ide>
<ide> Args:
<del> image: 3D image input tensor to the layer. Forwarded from `layer.call()`.
<del> bounding_boxes: 2D bounding boxes to the layer. Forwarded from `call()`.
<add> image: 3D image input tensor to the layer. Forwarded from
<add> `layer.call()`.
<add> bounding_boxes: 2D bounding boxes to the layer. Forwarded from
<add> `call()`.
<ide> transformation: The transformation object produced by
<del> `get_random_transformation`. Used to coordinate the randomness between
<del> image, label and bounding box.
<add> `get_random_transformation`. Used to coordinate the randomness
<add> between image, label and bounding box.
<ide>
<ide> Returns:
<ide> output 2D tensor, which will be forward to `layer.call()`.
<ide> def get_random_transformation(
<ide> ):
<ide> """Produce random transformation config for one single input.
<ide>
<del> This is used to produce same randomness between image/label/bounding_box.
<add> This is used to produce same randomness between
<add> image/label/bounding_box.
<ide>
<ide> Args:
<ide> image: 3D image tensor from inputs.
<ide> class RandomCrop(BaseImageAugmentationLayer):
<ide> """A preprocessing layer which randomly crops images during training.
<ide>
<ide> During training, this layer will randomly choose a location to crop images
<del> down to a target size. The layer will crop all the images in the same batch to
<del> the same cropping location.
<add> down to a target size. The layer will crop all the images in the same batch
<add> to the same cropping location.
<ide>
<ide> At inference time, and during training if an input image is smaller than the
<del> target size, the input will be resized and cropped so as to return the largest
<del> possible window in the image that matches the target aspect ratio. If you need
<del> to apply random cropping at inference time, set `training` to True when
<del> calling the layer.
<add> target size, the input will be resized and cropped so as to return the
<add> largest possible window in the image that matches the target aspect ratio.
<add> If you need to apply random cropping at inference time, set `training` to
<add> True when calling the layer.
<ide>
<ide> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
<del> of interger or floating point dtype. By default, the layer will output floats.
<add> of interger or floating point dtype. By default, the layer will output
<add> floats.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide> def call(self, inputs, training=True):
<ide> inputs = self._ensure_inputs_are_compute_dtype(inputs)
<ide> inputs, is_dict, targets = self._format_inputs(inputs)
<ide> output = inputs
<del> # self._resize() returns valid results for both batched and unbatched
<add> # self._resize() returns valid results for both batched and
<add> # unbatched
<ide> output["images"] = self._resize(inputs["images"])
<ide> return self._format_output(output, is_dict, targets)
<ide>
<ide> def get_config(self):
<ide> class Rescaling(base_layer.Layer):
<ide> """A preprocessing layer which rescales input values to a new range.
<ide>
<del> This layer rescales every value of an input (often an image) by multiplying by
<del> `scale` and adding `offset`.
<add> This layer rescales every value of an input (often an image) by multiplying
<add> by `scale` and adding `offset`.
<ide>
<ide> For instance:
<ide>
<ide> 1. To rescale an input in the ``[0, 255]`` range
<ide> to be in the `[0, 1]` range, you would pass `scale=1./255`.
<ide>
<del> 2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]` range,
<del> you would pass `scale=1./127.5, offset=-1`.
<add> 2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]`
<add> range, you would pass `scale=1./127.5, offset=-1`.
<ide>
<ide> The rescaling is applied both during training and inference. Inputs can be
<ide> of integer or floating point dtype, and by default the layer will output
<ide> class RandomFlip(BaseImageAugmentationLayer):
<ide> input. Call the layer with `training=True` to flip the input.
<ide>
<ide> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
<del> of interger or floating point dtype. By default, the layer will output floats.
<add> of interger or floating point dtype. By default, the layer will output
<add> floats.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide> class RandomTranslation(BaseImageAugmentationLayer):
<ide> filling empty space according to `fill_mode`.
<ide>
<ide> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
<del> of interger or floating point dtype. By default, the layer will output floats.
<add> of interger or floating point dtype. By default, the layer will output
<add> floats.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide>
<ide> Args:
<del> height_factor: a float represented as fraction of value, or a tuple of size
<del> 2 representing lower and upper bound for shifting vertically. A negative
<del> value means shifting image up, while a positive value means shifting image
<del> down. When represented as a single positive float, this value is used for
<del> both the upper and lower bound. For instance, `height_factor=(-0.2, 0.3)`
<del> results in an output shifted by a random amount in the range
<del> `[-20%, +30%]`.
<del> `height_factor=0.2` results in an output height shifted by a random amount
<del> in the range `[-20%, +20%]`.
<del> width_factor: a float represented as fraction of value, or a tuple of size 2
<del> representing lower and upper bound for shifting horizontally. A negative
<del> value means shifting image left, while a positive value means shifting
<del> image right. When represented as a single positive float, this value is
<del> used for both the upper and lower bound. For instance,
<add> height_factor: a float represented as fraction of value, or a tuple of
<add> size 2 representing lower and upper bound for shifting vertically. A
<add> negative value means shifting image up, while a positive value means
<add> shifting image down. When represented as a single positive float, this
<add> value is used for both the upper and lower bound. For instance,
<add> `height_factor=(-0.2, 0.3)` results in an output shifted by a random
<add> amount in the range `[-20%, +30%]`. `height_factor=0.2` results in an
<add> output height shifted by a random amount in the range `[-20%, +20%]`.
<add> width_factor: a float represented as fraction of value, or a tuple of size
<add> 2 representing lower and upper bound for shifting horizontally. A
<add> negative value means shifting image left, while a positive value means
<add> shifting image right. When represented as a single positive float, this
<add> value is used for both the upper and lower bound. For instance,
<ide> `width_factor=(-0.2, 0.3)` results in an output shifted left by 20%, and
<ide> shifted right by 30%. `width_factor=0.2` results in an output height
<ide> shifted left or right by 20%.
<ide> class RandomTranslation(BaseImageAugmentationLayer):
<ide> filling all values beyond the edge with the same constant value k = 0.
<ide> - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
<ide> wrapping around to the opposite edge.
<del> - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
<del> nearest pixel.
<add> - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
<add> the nearest pixel.
<ide> interpolation: Interpolation mode. Supported values: `"nearest"`,
<ide> `"bilinear"`.
<ide> seed: Integer. Used to create a random seed.
<del> fill_value: a float represents the value to be filled outside the boundaries
<del> when `fill_mode="constant"`.
<add> fill_value: a float represents the value to be filled outside the
<add> boundaries when `fill_mode="constant"`.
<ide>
<ide> Input shape:
<ide> 3D (unbatched) or 4D (batched) tensor with shape:
<ide> def __init__(
<ide> @tf.function
<ide> def augment_image(self, image, transformation):
<ide> """Translated inputs with random ops."""
<del> # The transform op only accepts rank 4 inputs, so if we have an unbatched
<del> # image, we need to temporarily expand dims to a batch.
<add> # The transform op only accepts rank 4 inputs, so if we have an
<add> # unbatched image, we need to temporarily expand dims to a batch.
<ide> original_shape = image.shape
<ide> inputs = tf.expand_dims(image, 0)
<ide>
<ide> def get_random_transformation(
<ide> }
<ide>
<ide> def _batch_augment(self, inputs):
<del> # Change to vectorized_map for better performance, as well as work around
<del> # issue for different tensorspec between inputs and outputs.
<add> # Change to vectorized_map for better performance, as well as work
<add> # around issue for different tensorspec between inputs and outputs.
<ide> return tf.vectorized_map(self._augment, inputs)
<ide>
<ide> def augment_label(self, label, transformation):
<ide> def get_translation_matrix(translations, name=None):
<ide> name: The name of the op.
<ide>
<ide> Returns:
<del> A tensor of shape `(num_images, 8)` projective transforms which can be given
<del> to `transform`.
<add> A tensor of shape `(num_images, 8)` projective transforms which can be
<add> given to `transform`.
<ide> """
<ide> with backend.name_scope(name or "translation_matrix"):
<ide> num_translations = tf.shape(translations)[0]
<ide> def transform(
<ide>
<ide> Args:
<ide> images: A tensor of shape
<del> `(num_images, num_rows, num_columns, num_channels)` (NHWC). The rank must
<del> be statically known (the shape is not `TensorShape(None)`).
<add> `(num_images, num_rows, num_columns, num_channels)` (NHWC). The rank
<add> must be statically known (the shape is not `TensorShape(None)`).
<ide> transforms: Projective transform matrix/matrices. A vector of length 8 or
<del> tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2,
<del> c0, c1], then it maps the *output* point `(x, y)` to a transformed *input*
<del> point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
<del> `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
<del> transform mapping input points to output points. Note that gradients are
<del> not backpropagated into transformation parameters.
<add> tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1,
<add> b2, c0, c1], then it maps the *output* point `(x, y)` to a transformed
<add> *input* point
<add> `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
<add> where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared
<add> to the transform mapping input points to output points. Note that
<add> gradients are not backpropagated into transformation parameters.
<ide> fill_mode: Points outside the boundaries of the input are filled according
<ide> to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
<del> fill_value: a float represents the value to be filled outside the boundaries
<del> when `fill_mode="constant"`.
<add> fill_value: a float represents the value to be filled outside the
<add> boundaries when `fill_mode="constant"`.
<ide> interpolation: Interpolation mode. Supported values: `"nearest"`,
<ide> `"bilinear"`.
<ide> output_shape: Output dimension after the transform, `[height, width]`.
<ide> def get_rotation_matrix(angles, image_height, image_width, name=None):
<ide> """Returns projective transform(s) for the given angle(s).
<ide>
<ide> Args:
<del> angles: A scalar angle to rotate all images by, or (for batches of images) a
<del> vector with an angle to rotate each image in the batch. The rank must be
<del> statically known (the shape is not `TensorShape(None)`).
<add> angles: A scalar angle to rotate all images by, or (for batches of images)
<add> a vector with an angle to rotate each image in the batch. The rank must
<add> be statically known (the shape is not `TensorShape(None)`).
<ide> image_height: Height of the image(s) to be transformed.
<ide> image_width: Width of the image(s) to be transformed.
<ide> name: The name of the op.
<ide>
<ide> Returns:
<del> A tensor of shape (num_images, 8). Projective transforms which can be given
<del> to operation `image_projective_transform_v2`. If one row of transforms is
<del> [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
<del> `(x, y)` to a transformed *input* point
<del> `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
<del> where `k = c0 x + c1 y + 1`.
<add> A tensor of shape (num_images, 8). Projective transforms which can be
<add> given to operation `image_projective_transform_v2`. If one row of
<add> transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the
<add> *output* point `(x, y)` to a transformed *input* point
<add> `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
<add> where `k = c0 x + c1 y + 1`.
<ide> """
<ide> with backend.name_scope(name or "rotation_matrix"):
<ide> x_offset = (
<ide> class RandomRotation(BaseImageAugmentationLayer):
<ide> rotations at inference time, set `training` to True when calling the layer.
<ide>
<ide> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
<del> of interger or floating point dtype. By default, the layer will output floats.
<add> of interger or floating point dtype. By default, the layer will output
<add> floats.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide> class RandomRotation(BaseImageAugmentationLayer):
<ide> while a negative value means clock-wise. When represented as a single
<ide> float, this value is used for both the upper and lower bound. For
<ide> instance, `factor=(-0.2, 0.3)` results in an output rotation by a random
<del> amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in an
<del> output rotating by a random amount in the range `[-20% * 2pi, 20% * 2pi]`.
<add> amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in
<add> an output rotating by a random amount in the range
<add> `[-20% * 2pi, 20% * 2pi]`.
<ide> fill_mode: Points outside the boundaries of the input are filled according
<ide> to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
<ide> - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
<ide> class RandomRotation(BaseImageAugmentationLayer):
<ide> filling all values beyond the edge with the same constant value k = 0.
<ide> - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
<ide> wrapping around to the opposite edge.
<del> - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
<del> nearest pixel.
<add> - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
<add> the nearest pixel.
<ide> interpolation: Interpolation mode. Supported values: `"nearest"`,
<ide> `"bilinear"`.
<ide> seed: Integer. Used to create a random seed.
<del> fill_value: a float represents the value to be filled outside the boundaries
<del> when `fill_mode="constant"`.
<add> fill_value: a float represents the value to be filled outside the
<add> boundaries when `fill_mode="constant"`.
<ide> """
<ide>
<ide> def __init__(
<ide> def augment_bounding_boxes(self, image, bounding_boxes, transformation):
<ide> h = image_shape[H_AXIS]
<ide> w = image_shape[W_AXIS]
<ide> bbox_dtype = bounding_boxes.dtype
<del> # origin coordinates, all the points on the image are rotated around this
<del> # point
<add> # origin coordinates, all the points on the image are rotated around
<add> # this point
<ide> origin_x, origin_y = int(h / 2), int(w / 2)
<ide> angle = transformation["angle"]
<ide> angle = -angle
<ide> class RandomZoom(BaseImageAugmentationLayer):
<ide> independently, filling empty space according to `fill_mode`.
<ide>
<ide> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
<del> of interger or floating point dtype. By default, the layer will output floats.
<add> of interger or floating point dtype. By default, the layer will output
<add> floats.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide>
<ide> Args:
<del> height_factor: a float represented as fraction of value, or a tuple of size
<del> 2 representing lower and upper bound for zooming vertically. When
<add> height_factor: a float represented as fraction of value, or a tuple of
<add> size 2 representing lower and upper bound for zooming vertically. When
<ide> represented as a single float, this value is used for both the upper and
<ide> lower bound. A positive value means zooming out, while a negative value
<ide> means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an
<ide> output zoomed out by a random amount in the range `[+20%, +30%]`.
<ide> `height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
<ide> amount in the range `[+20%, +30%]`.
<del> width_factor: a float represented as fraction of value, or a tuple of size 2
<del> representing lower and upper bound for zooming horizontally. When
<add> width_factor: a float represented as fraction of value, or a tuple of size
<add> 2 representing lower and upper bound for zooming horizontally. When
<ide> represented as a single float, this value is used for both the upper and
<ide> lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output
<ide> zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an
<ide> class RandomZoom(BaseImageAugmentationLayer):
<ide> filling all values beyond the edge with the same constant value k = 0.
<ide> - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
<ide> wrapping around to the opposite edge.
<del> - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
<del> nearest pixel.
<add> - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
<add> the nearest pixel.
<ide> interpolation: Interpolation mode. Supported values: `"nearest"`,
<ide> `"bilinear"`.
<ide> seed: Integer. Used to create a random seed.
<del> fill_value: a float represents the value to be filled outside the boundaries
<del> when `fill_mode="constant"`.
<add> fill_value: a float represents the value to be filled outside the
<add> boundaries when `fill_mode="constant"`.
<ide>
<ide> Example:
<ide>
<ide> def get_zoom_matrix(zooms, image_height, image_width, name=None):
<ide> """Returns projective transform(s) for the given zoom(s).
<ide>
<ide> Args:
<del> zooms: A matrix of 2-element lists representing `[zx, zy]` to zoom for each
<del> image (for a batch of images).
<add> zooms: A matrix of 2-element lists representing `[zx, zy]` to zoom for
<add> each image (for a batch of images).
<ide> image_height: Height of the image(s) to be transformed.
<ide> image_width: Width of the image(s) to be transformed.
<ide> name: The name of the op.
<ide> def get_zoom_matrix(zooms, image_height, image_width, name=None):
<ide> class RandomContrast(BaseImageAugmentationLayer):
<ide> """A preprocessing layer which randomly adjusts contrast during training.
<ide>
<del> This layer will randomly adjust the contrast of an image or images by a random
<del> factor. Contrast is adjusted independently for each channel of each image
<del> during training.
<add> This layer will randomly adjust the contrast of an image or images by a
<add> random factor. Contrast is adjusted independently for each channel of each
<add> image during training.
<ide>
<ide> For each channel, this layer computes the mean of the image pixels in the
<ide> channel and then adjusts each component `x` of each pixel to
<ide> `(x - mean) * contrast_factor + mean`.
<ide>
<ide> Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
<del> in integer or floating point dtype. By default, the layer will output floats.
<del> The output value will be clipped to the range `[0, 255]`, the valid
<add> in integer or floating point dtype. By default, the layer will output
<add> floats. The output value will be clipped to the range `[0, 255]`, the valid
<ide> range of RGB colors.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> class RandomContrast(BaseImageAugmentationLayer):
<ide> Arguments:
<ide> factor: a positive float represented as fraction of value, or a tuple of
<ide> size 2 representing lower and upper bound. When represented as a single
<del> float, lower = upper. The contrast factor will be randomly picked between
<del> `[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel, the output
<del> will be `(x - mean) * factor + mean` where `mean` is the mean value of the
<del> channel.
<add> float, lower = upper. The contrast factor will be randomly picked
<add> between `[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
<add> the output will be `(x - mean) * factor + mean` where `mean` is the mean
<add> value of the channel.
<ide> seed: Integer. Used to create a random seed.
<ide> """
<ide>
<ide> class RandomBrightness(BaseImageAugmentationLayer):
<ide> is provided, eg, 0.2, then -0.2 will be used for lower bound and 0.2
<ide> will be used for upper bound.
<ide> value_range: Optional list/tuple of 2 floats for the lower and upper limit
<del> of the values of the input data. Defaults to [0.0, 255.0]. Can be changed
<del> to e.g. [0.0, 1.0] if the image input has been scaled before this layer.
<del> The brightness adjustment will be scaled to this range, and the
<del> output values will be clipped to this range.
<add> of the values of the input data. Defaults to [0.0, 255.0]. Can be
<add> changed to e.g. [0.0, 1.0] if the image input has been scaled before
<add> this layer. The brightness adjustment will be scaled to this range, and
<add> the output values will be clipped to this range.
<ide> seed: optional integer, for fixed RNG behavior.
<ide>
<ide> Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
<ide> class RandomHeight(BaseImageAugmentationLayer):
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide>
<ide> Args:
<del> factor: A positive float (fraction of original height), or a tuple of size 2
<del> representing lower and upper bound for resizing vertically. When
<add> factor: A positive float (fraction of original height), or a tuple of size
<add> 2 representing lower and upper bound for resizing vertically. When
<ide> represented as a single float, this value is used for both the upper and
<ide> lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
<ide> height changed by a random amount in the range `[20%, 30%]`.
<del> `factor=(-0.2, 0.3)` results in an output with height changed by a random
<del> amount in the range `[-20%, +30%]`. `factor=0.2` results in an output with
<del> height changed by a random amount in the range `[-20%, +20%]`.
<add> `factor=(-0.2, 0.3)` results in an output with height changed by a
<add> random amount in the range `[-20%, +30%]`. `factor=0.2` results in an
<add> output with height changed by a random amount in the range
<add> `[-20%, +20%]`.
<ide> interpolation: String, the interpolation method. Defaults to `"bilinear"`.
<ide> Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
<ide> `"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
<ide> def _batch_augment(self, inputs):
<ide> return result
<ide>
<ide> def augment_image(self, image, transformation):
<del> # The batch dimension of the input=image is not modified. The output would
<del> # be accurate for both unbatched and batched input
<add> # The batch dimension of the input=image is not modified. The output
<add> # would be accurate for both unbatched and batched input
<ide> inputs_shape = tf.shape(image)
<ide> img_wd = inputs_shape[W_AXIS]
<ide> adjusted_height = transformation["height"]
<ide> class RandomWidth(BaseImageAugmentationLayer):
<ide> This layer will randomly adjusts the width of a batch of images of a
<ide> batch of images by a random factor. The input should be a 3D (unbatched) or
<ide> 4D (batched) tensor in the `"channels_last"` image data format. Input pixel
<del> values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of interger or
<del> floating point dtype. By default, the layer will output floats.
<add> values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of interger
<add> or floating point dtype. By default, the layer will output floats.
<ide>
<ide> By default, this layer is inactive during inference.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide>
<ide> Args:
<del> factor: A positive float (fraction of original width), or a tuple of size 2
<del> representing lower and upper bound for resizing vertically. When
<add> factor: A positive float (fraction of original width), or a tuple of size
<add> 2 representing lower and upper bound for resizing vertically. When
<ide> represented as a single float, this value is used for both the upper and
<ide> lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
<del> width changed by a random amount in the range `[20%, 30%]`. `factor=(-0.2,
<del> 0.3)` results in an output with width changed by a random amount in the
<del> range `[-20%, +30%]`. `factor=0.2` results in an output with width changed
<del> by a random amount in the range `[-20%, +20%]`.
<add> width changed by a random amount in the range `[20%, 30%]`.
<add> `factor=(-0.2, 0.3)` results in an output with width changed by a random
<add> amount in the range `[-20%, +30%]`. `factor=0.2` results in an output
<add> with width changed by a random amount in the range `[-20%, +20%]`.
<ide> interpolation: String, the interpolation method. Defaults to `bilinear`.
<ide> Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`, `"lanczos3"`,
<ide> `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
<ide> def _batch_augment(self, inputs):
<ide> return result
<ide>
<ide> def augment_image(self, image, transformation):
<del> # The batch dimension of the input=image is not modified. The output would
<del> # be accurate for both unbatched and batched input
<add> # The batch dimension of the input=image is not modified. The output
<add> # would be accurate for both unbatched and batched input
<ide> inputs = utils.ensure_tensor(image)
<ide> inputs_shape = tf.shape(inputs)
<ide> img_hd = inputs_shape[H_AXIS]
<ide><path>keras/layers/preprocessing/image_preprocessing_test.py
<ide> def test_input_smaller_than_crop_box(self):
<ide> with test_utils.use_gpu():
<ide> layer = image_preprocessing.CenterCrop(height, width)
<ide> actual_output = layer(inp)
<del> # In this case, output should equal resizing with crop_to_aspect ratio.
<add> # In this case, output should equal resizing with crop_to_aspect
<add> # ratio.
<ide> resize_layer = image_preprocessing.Resizing(
<ide> height, width, crop_to_aspect_ratio=True
<ide> )
<ide> def test_input_smaller_than_crop_box(self):
<ide> with test_utils.use_gpu():
<ide> layer = image_preprocessing.RandomCrop(height, width)
<ide> actual_output = layer(inp)
<del> # In this case, output should equal resizing with crop_to_aspect ratio.
<add> # In this case, output should equal resizing with crop_to_aspect
<add> # ratio.
<ide> resize_layer = image_preprocessing.Resizing(
<ide> height, width, crop_to_aspect_ratio=True
<ide> )
<ide> def test_config_with_custom_name(self):
<ide>
<ide> def test_output_value_clip(self):
<ide> input_images = np.random.random((5, 8, 3)).astype(np.float32) * 255.0
<del> # Give a factor range [1.0, 11.0] so that it will produce large contrast.
<add> # Give a factor range [1.0, 11.0] so that it will produce large
<add> # contrast.
<ide> layer = image_preprocessing.RandomContrast((0.0, 10.0))
<ide> output = layer(input_images)
<ide> self.assertLessEqual(tf.reduce_max(output), 255.0)
<ide> def test_random_rotation_inference(self):
<ide> self.assertAllClose(expected_output, actual_output)
<ide>
<ide> def test_distribution_strategy(self):
<del> """Tests that RandomRotation can be created within distribution strategies."""
<add> """Tests that RandomRotation can be created within distribution
<add> strategies."""
<ide> input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
<ide> with test_utils.use_gpu():
<ide> strat = tf.distribute.MirroredStrategy(devices=["cpu", "gpu"])
<ide> def test_random_height_longer_numeric(self):
<ide> dtype
<ide> )
<ide> layer = image_preprocessing.RandomHeight(factor=(1.0, 1.0))
<del> # Return type of RandomHeight() is float32 if `interpolation` is not
<del> # set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
<add> # Return type of RandomHeight() is float32 if `interpolation` is
<add> # not set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to
<add> # desired dtype.
<ide> output_image = tf.cast(
<ide> layer(np.expand_dims(input_image, axis=0)), dtype=dtype
<ide> )
<ide> def test_random_width_longer_numeric(self):
<ide> dtype
<ide> )
<ide> layer = image_preprocessing.RandomWidth(factor=(1.0, 1.0))
<del> # Return type of RandomWidth() is float32 if `interpolation` is not
<del> # set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
<add> # Return type of RandomWidth() is float32 if `interpolation` is
<add> # not set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to
<add> # desired dtype.
<ide> output_image = tf.cast(
<ide> layer(np.expand_dims(input_image, axis=0)), dtype=dtype
<ide> )
<ide><path>keras/layers/preprocessing/index_lookup.py
<ide> def get_tensors(self):
<ide> class IndexLookup(base_preprocessing_layer.PreprocessingLayer):
<ide> """Maps values from a vocabulary to integer indices.
<ide>
<del> This layer translates a set of arbitrary hashables into an integer output via
<del> a table-based lookup, with optional out-of-vocabulary handling. This is the
<del> basis layer for both IntegerLookup and StringLookup; it holds the common
<add> This layer translates a set of arbitrary hashables into an integer output
<add> via a table-based lookup, with optional out-of-vocabulary handling. This is
<add> the basis layer for both IntegerLookup and StringLookup; it holds the common
<ide> logic but is not intended to be exported as part of the Keras API.
<ide>
<ide> Args:
<ide> max_tokens: The maximum size of the vocabulary for this layer. If None,
<ide> there is no cap on the size of the vocabulary. Note that this size
<ide> includes the OOV and mask tokens.
<ide> num_oov_indices: The number of out-of-vocabulary tokens to use. If this
<del> value is more than 1, OOV inputs are hashed to determine their OOV value.
<del> If this value is 0, OOV inputs will cause an error when calling the layer.
<add> value is more than 1, OOV inputs are hashed to determine their OOV
<add> value. If this value is 0, OOV inputs will cause an error when calling
<add> the layer.
<ide> mask_token: A token that represents masked inputs. When `output_mode` is
<ide> `"int"`, the token is included in vocabulary and mapped to index 0. In
<ide> other output modes, the token will not appear in the vocabulary and
<del> instances of the mask token in the input will be dropped. If set to None,
<del> no mask term will be added.
<add> instances of the mask token in the input will be dropped. If set to
<add> None, no mask term will be added.
<ide> oov_token: Only used when `invert` is True. The token to return for OOV
<ide> indices.
<ide> vocabulary: Optional. Either an array or a string path to a text file. If
<ide> passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor
<ide> containing the vocbulary terms. If passing a file path, the file should
<ide> contain one line per term in the vocabulary. If this argument is set,
<ide> there is no need to `adapt` the layer.
<del> vocabulary_dtype: The dtype of the vocabulary terms. For example, `"int64"`
<del> or `"string"`.
<del> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D
<del> numpy array, or 1D tensor or the same length as the vocabulary, containing
<del> the floating point inverse document frequency weights, which will be
<del> multiplied by per sample term counts for the final `tf_idf` weight. If the
<del> `vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this
<del> argument must be supplied.
<add> vocabulary_dtype: The dtype of the vocabulary terms. For example,
<add> `"int64"` or `"string"`.
<add> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
<add> 1D numpy array, or 1D tensor or the same length as the vocabulary,
<add> containing the floating point inverse document frequency weights, which
<add> will be multiplied by per sample term counts for the final `tf_idf`
<add> weight. If the `vocabulary` argument is set, and `output_mode` is
<add> `"tf_idf"`, this argument must be supplied.
<ide> invert: Only valid when `output_mode` is `"int"`. If True, this layer will
<ide> map indices to vocabulary items instead of mapping vocabulary items to
<ide> indices. Default to False.
<del> output_mode: Specification for the output of the layer. Defaults to `"int"`.
<del> Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
<del> `"tf_idf"` configuring the layer as follows:
<add> output_mode: Specification for the output of the layer. Defaults to
<add> `"int"`. Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`,
<add> or `"tf_idf"` configuring the layer as follows:
<ide> - `"int"`: Return the raw integer indices of the input tokens.
<ide> - `"one_hot"`: Encodes each individual element in the input into an
<ide> array the same size as the vocabulary, containing a 1 at the element
<del> index. If the last dimension is size 1, will encode on that dimension.
<del> If the last dimension is not size 1, will append a new dimension for
<del> the encoded output.
<add> index. If the last dimension is size 1, will encode on that
<add> dimension. If the last dimension is not size 1, will append a new
<add> dimension for the encoded output.
<ide> - `"multi_hot"`: Encodes each sample in the input into a single array
<ide> the same size as the vocabulary, containing a 1 for each vocabulary
<ide> term present in the sample. Treats the last dimension as the sample
<ide> dimension, if input shape is (..., sample_length), output shape will
<ide> be (..., num_tokens).
<del> - `"count"`: As `"multi_hot"`, but the int array contains a count of the
<del> number of times the token at that index appeared in the sample.
<add> - `"count"`: As `"multi_hot"`, but the int array contains a count of
<add> the number of times the token at that index appeared in the sample.
<ide> - `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
<ide> find the value in each token slot.
<ide> pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
<ide> `"count"`, or `"tf_idf"`. If True, the output will have its feature axis
<ide> padded to `max_tokens` even if the number of unique tokens in the
<ide> vocabulary is less than max_tokens, resulting in a tensor of shape
<del> [batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
<add> [batch_size, max_tokens] regardless of vocabulary size. Defaults to
<add> False.
<ide> sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`, `"count"`
<del> and `"tf-idf"` output modes. If True, returns a `SparseTensor` instead of
<del> a dense `Tensor`. Defaults to False.
<add> and `"tf-idf"` output modes. If True, returns a `SparseTensor` instead
<add> of a dense `Tensor`. Defaults to False.
<ide> """
<ide>
<ide> def __init__(
<ide> def __init__(
<ide>
<ide> self.input_vocabulary = vocabulary
<ide> self.input_idf_weights = idf_weights
<del> # VocabularySavedModelSaver will clear the config vocabulary to restore the
<del> # lookup table ops directly. We persist this hidden option to persist the
<del> # fact that we have have a non-adaptable layer with a manually set vocab.
<add> # VocabularySavedModelSaver will clear the config vocabulary to restore
<add> # the lookup table ops directly. We persist this hidden option to
<add> # persist the fact that we have have a non-adaptable layer with a
<add> # manually set vocab.
<ide> self._has_input_vocabulary = kwargs.pop(
<ide> "has_input_vocabulary", (vocabulary is not None)
<ide> )
<ide> def __init__(
<ide> self._key_dtype = tf.as_dtype(self.vocabulary_dtype)
<ide> self._value_dtype = self.dtype if output_mode == INT else tf.int64
<ide> mask_key = mask_token
<del> # Masks should map to 0 for int output and be dropped otherwise. Max ints
<del> # will be dropped from the bincount op.
<add> # Masks should map to 0 for int output and be dropped otherwise. Max
<add> # ints will be dropped from the bincount op.
<ide> mask_value = 0 if self.output_mode == INT else self._value_dtype.max
<ide> if self.num_oov_indices == 0:
<del> # If there are no OOV indices, we map OOV tokens to -1 and error out
<del> # during call if we find a negative index.
<add> # If there are no OOV indices, we map OOV tokens to -1 and error
<add> # out during call if we find a negative index.
<ide> self._default_value = -1
<ide> elif self.num_oov_indices == 1:
<del> # If there is only one OOV index, we can set that index as the default
<del> # value of the index_lookup table.
<add> # If there is only one OOV index, we can set that index as the
<add> # default value of the index_lookup table.
<ide> self._default_value = self._oov_start_index()
<ide> else:
<del> # If we have multiple OOV values, we need to do a further hashing step;
<del> # to make this easier, we set the OOV value to -1. (This lets us do a
<del> # vectorized add and cast to boolean to determine locations where we
<del> # need to do extra hashing.)
<add> # If we have multiple OOV values, we need to do a further
<add> # hashing step; to make this easier, we set the OOV value to -1.
<add> # (This lets us do a vectorized add and cast to boolean to
<add> # determine locations where we need to do extra hashing.)
<ide> self._default_value = -1
<ide> if self.mask_token is not None:
<ide> self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
<ide> def __init__(
<ide> if vocabulary is not None:
<ide> self.set_vocabulary(vocabulary, idf_weights)
<ide> else:
<del> # When restoring from a keras SavedModel, the loading code will expect to
<del> # find and restore a lookup_table attribute on the layer. This table needs
<del> # to be uninitialized as a StaticHashTable cannot be initialized twice.
<add> # When restoring from a keras SavedModel, the loading code will
<add> # expect to find and restore a lookup_table attribute on the layer.
<add> # This table needs to be uninitialized as a StaticHashTable cannot
<add> # be initialized twice.
<ide> self.lookup_table = self._uninitialized_lookup_table()
<ide>
<ide> # Only set up adapt state if we did not receive a vocab on construction.
<ide> if not self._has_input_vocabulary:
<del> # Add a custom weight handler to return the layers vocab as it's weight.
<add> # Add a custom weight handler to return the layers vocab as it's
<add> # weight.
<ide> self._add_trackable(VocabWeightHandler(self), False)
<ide> # Set adapt state.
<ide> self.token_counts = tf.lookup.experimental.MutableHashTable(
<ide> def get_vocabulary(self, include_special_tokens=True):
<ide> """Returns the current vocabulary of the layer.
<ide>
<ide> Args:
<del> include_special_tokens: If True, the returned vocabulary will include mask
<del> and OOV tokens, and a term's index in the vocabulary will equal the
<del> term's index when calling the layer. If False, the returned vocabulary
<del> will not include any mask or OOV tokens.
<add> include_special_tokens: If True, the returned vocabulary will include
<add> mask and OOV tokens, and a term's index in the vocabulary will equal
<add> the term's index when calling the layer. If False, the returned
<add> vocabulary will not include any mask or OOV tokens.
<ide> """
<ide> # The lookup table data will not be sorted, so we will create a inverted
<del> # lookup here, and use that to lookup a range of indices [0, vocab_size).
<add> # lookup here, and use that to lookup a range of indices [0,
<add> # vocab_size).
<ide> if self.lookup_table.size() == 0:
<ide> vocab, indices = [], []
<ide> else:
<ide> def vocabulary_size(self):
<ide> """Gets the current size of the layer's vocabulary.
<ide>
<ide> Returns:
<del> The integer size of the vocabulary, including optional mask and oov indices.
<add> The integer size of the vocabulary, including optional mask and oov
<add> indices.
<ide> """
<ide> if tf.executing_eagerly():
<ide> return (
<ide> def set_vocabulary(self, vocabulary, idf_weights=None):
<ide> """Sets vocabulary (and optionally document frequency) data for this layer.
<ide>
<ide> This method sets the vocabulary and idf weights for this layer directly,
<del> instead of analyzing a dataset through `adapt`. It should be used whenever
<del> the vocab (and optionally document frequency) information is already known.
<del> If vocabulary data is already present in the layer, this method will replace
<del> it.
<add> instead of analyzing a dataset through `adapt`. It should be used
<add> whenever the vocab (and optionally document frequency) information is
<add> already known. If vocabulary data is already present in the layer, this
<add> method will replace it.
<ide>
<ide> Args:
<del> vocabulary: Either an array or a string path to a text file. If passing an
<del> array, can pass a tuple, list, 1D numpy array, or 1D tensor containing
<del> the vocbulary terms. If passing a file path, the file should contain one
<del> line per term in the vocabulary.
<add> vocabulary: Either an array or a string path to a text file. If
<add> passing an array, can pass a tuple, list, 1D numpy array, or 1D
<add> tensor containing the vocbulary terms. If passing a file path, the
<add> file should contain one line per term in the vocabulary.
<ide> idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse
<del> document frequency weights with equal length to vocabulary. Must be set
<del> if `output_mode` is `"tf_idf"`. Should not be set otherwise.
<add> document frequency weights with equal length to vocabulary. Must be
<add> set if `output_mode` is `"tf_idf"`. Should not be set otherwise.
<ide>
<ide> Raises:
<ide> ValueError: If there are too many inputs, the inputs do not match, or
<ide> input data is missing.
<ide> RuntimeError: If the vocabulary cannot be set when this function is
<ide> called. This happens when `"multi_hot"`, `"count"`, and `"tf_idf"`
<del> modes, if `pad_to_max_tokens` is False and the layer itself has already
<del> been called.
<del> RuntimeError: If a tensor vocabulary is passed outside of eager execution.
<add> modes, if `pad_to_max_tokens` is False and the layer itself has
<add> already been called.
<add> RuntimeError: If a tensor vocabulary is passed outside of eager
<add> execution.
<ide> """
<ide> if self.output_mode != TF_IDF and idf_weights is not None:
<ide> raise ValueError(
<ide> def set_vocabulary(self, vocabulary, idf_weights=None):
<ide> tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)
<ide> ):
<ide> raise RuntimeError(
<del> "Cannot set a tensor vocabulary on {} layer {} when not executing "
<del> "eagerly. Create this layer or call `set_vocabulary` outside of "
<del> "any `tf.function`s and with eager execution enabled.".format(
<del> self.__class__.__name__, self.name
<del> )
<add> "Cannot set a tensor vocabulary on {} layer {} when not "
<add> "executing eagerly. Create this layer or call `set_vocabulary` "
<add> "outside of any `tf.function`s and with eager execution "
<add> "enabled.".format(self.__class__.__name__, self.name)
<ide> )
<ide>
<del> # TODO(mattdangerw): for better performance we should rewrite this entire
<del> # function to operate on tensors and convert vocabulary to a tensor here.
<add> # TODO(mattdangerw): for better performance we should rewrite this
<add> # entire function to operate on tensors and convert vocabulary to a
<add> # tensor here.
<ide> if tf.is_tensor(vocabulary):
<ide> vocabulary = self._tensor_vocab_to_numpy(vocabulary)
<ide> elif isinstance(vocabulary, (list, tuple)):
<ide> def set_vocabulary(self, vocabulary, idf_weights=None):
<ide> if self.mask_token is not None and self.mask_token in tokens:
<ide> mask_index = np.argwhere(vocabulary == self.mask_token)[-1]
<ide> raise ValueError(
<del> "Found reserved mask token at unexpected location in `vocabulary`. "
<del> "Note that passed `vocabulary` does not need to include the OOV and "
<del> "mask tokens. Either remove all mask and OOV tokens, or include them "
<del> "only at the start of the vocabulary in precisely this order: "
<del> f"{special_tokens}. Received: mask_token={self.mask_token} at "
<add> "Found reserved mask token at unexpected location in "
<add> "`vocabulary`. Note that passed `vocabulary` does not need to "
<add> "include the OOV and mask tokens. Either remove all mask and "
<add> "OOV tokens, or include them only at the start of the "
<add> f"vocabulary in precisely this order: {special_tokens}. "
<add> f"Received: mask_token={self.mask_token} at "
<ide> f"vocabulary index {mask_index}"
<ide> )
<ide> # Only error out for oov_token when invert=True. When invert=False,
<ide> def set_vocabulary(self, vocabulary, idf_weights=None):
<ide> ):
<ide> oov_index = np.argwhere(vocabulary == self.oov_token)[-1]
<ide> raise ValueError(
<del> "Found reserved OOV token at unexpected location in `vocabulary`. "
<del> "Note that passed `vocabulary` does not need to include the OOV and "
<del> "mask tokens. Either remove all mask and OOV tokens, or include them "
<del> "only at the start of the vocabulary in precisely this order: "
<del> f"{special_tokens}. Received: oov_token={self.oov_token} at "
<add> "Found reserved OOV token at unexpected location in "
<add> "`vocabulary`. Note that passed `vocabulary` does not need to "
<add> "include the OOV and mask tokens. Either remove all mask and "
<add> "OOV tokens, or include them only at the start of the "
<add> f"vocabulary in precisely this order: {special_tokens}. "
<add> f"Received: oov_token={self.oov_token} at "
<ide> f"vocabulary index {oov_index}"
<ide> )
<ide>
<ide> new_vocab_size = token_start + len(tokens)
<ide> if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
<ide> raise ValueError(
<del> "Attempted to set a vocabulary larger than the maximum vocab size. "
<del> "Passed vocab size is {}, max vocab size is {}.".format(
<add> "Attempted to set a vocabulary larger than the maximum vocab "
<add> "size. Passed vocab size is {}, max vocab size is {}.".format(
<ide> new_vocab_size, self.max_tokens
<ide> )
<ide> )
<ide> def set_vocabulary(self, vocabulary, idf_weights=None):
<ide> idf_weights = self._convert_to_ndarray(idf_weights)
<ide> if idf_weights.ndim != 1:
<ide> raise ValueError(
<del> "TF-IDF data must be a 1-index array, but received {}".format(
<del> type(idf_weights)
<del> )
<add> "TF-IDF data must be a 1-index array, "
<add> "but received {}".format(type(idf_weights))
<ide> )
<ide>
<del> # If the passed vocabulary has no special tokens, we need to pad the front
<del> # of idf_weights. We don't have real document frequencies for these tokens
<del> # so we will use an average of all idf_weights passed in as a reasonable
<del> # default.
<add> # If the passed vocabulary has no special tokens, we need to pad the
<add> # front of idf_weights. We don't have real document frequencies for
<add> # these tokens so we will use an average of all idf_weights passed
<add> # in as a reasonable default.
<ide> if found_special_tokens:
<ide> front_padding = 0
<ide> front_padding_value = 0
<ide> else:
<ide> front_padding = token_start
<ide> front_padding_value = np.average(idf_weights)
<del> # If pad_to_max_tokens is true, and max_tokens is greater than our total
<del> # vocab size, we need to pad the back of idf_weights with zeros as well.
<add> # If pad_to_max_tokens is true, and max_tokens is greater than our
<add> # total vocab size, we need to pad the back of idf_weights with
<add> # zeros as well.
<ide> back_padding_value = 0
<ide> if self.pad_to_max_tokens and self.max_tokens is not None:
<ide> back_padding = (
<ide> def set_vocabulary(self, vocabulary, idf_weights=None):
<ide> def update_state(self, data):
<ide> if self._has_input_vocabulary:
<ide> raise ValueError(
<del> "Cannot adapt {} layer after setting a static vocabulary via init "
<del> "argument or `set_vocabulary`.".format(self.__class__.__name__)
<add> "Cannot adapt {} layer after setting a static vocabulary via "
<add> "init argument "
<add> "or `set_vocabulary`.".format(self.__class__.__name__)
<ide> )
<ide>
<ide> data = utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
<ide> if data.shape.rank == 0:
<ide> data = tf.expand_dims(data, 0)
<ide> if data.shape.rank == 1:
<del> # Expand dims on axis 0 for tf-idf. A 1-d tensor is a single document.
<add> # Expand dims on axis 0 for tf-idf. A 1-d tensor is a single
<add> # document.
<ide> data = tf.expand_dims(data, 0)
<ide>
<ide> tokens, counts = self._num_tokens(data)
<ide> def finalize_state(self):
<ide> )
<ide>
<ide> tokens, counts = self.token_counts.export()
<del> # To keep vocabs deterministic, we sort our tokens by count and break ties
<del> # by sorting the tokens themselves. Tensorflow has no ops for sorting
<del> # strings, so we need to use numpy for the sort.
<add> # To keep vocabs deterministic, we sort our tokens by count and break
<add> # ties by sorting the tokens themselves. Tensorflow has no ops for
<add> # sorting strings, so we need to use numpy for the sort.
<ide> sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
<ide> token_start = self._token_start_index()
<ide> if self.max_tokens:
<ide> def finalize_state(self):
<ide> token_document_counts, self.num_documents
<ide> )
<ide> idf_weights = tf.cast(idf_weights, self.compute_dtype)
<del> # Pad the front of idf_weights with the average idf weight for OOV tokens.
<del> # We cannot compute the real idf weight of OOV in a single pass.
<add> # Pad the front of idf_weights with the average idf weight for OOV
<add> # tokens. We cannot compute the real idf weight of OOV in a single
<add> # pass.
<ide> idf_weights = tf.pad(
<ide> idf_weights,
<ide> [[self._token_start_index(), 0]],
<ide> def finalize_state(self):
<ide> self.idf_weights.assign(idf_weights)
<ide> self.idf_weights_const = self.idf_weights.value()
<ide>
<del> # We call this here to save memory, now that we've built our vocabulary, we
<del> # don't want to keep every token we've seen in separate lookup tables.
<add> # We call this here to save memory, now that we've built our vocabulary,
<add> # we don't want to keep every token we've seen in separate lookup
<add> # tables.
<ide> self.reset_state()
<ide>
<ide> def reset_state(self): # pylint: disable=method-hidden
<ide> def call(self, inputs):
<ide>
<ide> def _lookup_dense(self, inputs):
<ide> """Lookup table values for a dense Tensor, handling masking and OOV."""
<del> # When executing eagerly and tracing keras.Inputs, do not call lookup. This
<del> # is critical for restoring SavedModel, which will first trace layer.call
<del> # and then attempt to restore the table. We need the table to be
<del> # uninitialized for the restore to work, but calling the table uninitialized
<del> # would error.
<add> # When executing eagerly and tracing keras.Inputs, do not call lookup.
<add> # This is critical for restoring SavedModel, which will first trace
<add> # layer.call and then attempt to restore the table. We need the table to
<add> # be uninitialized for the restore to work, but calling the table
<add> # uninitialized would error.
<ide> if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
<ide> lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
<ide> else:
<ide> def _maybe_freeze_vocab_size(self):
<ide> with tf.init_scope():
<ide> if not tf.executing_eagerly():
<ide> raise RuntimeError(
<del> "When using `output_mode={}` eager execution must be enabled.".format(
<del> self.output_mode
<del> )
<add> "When using `output_mode={}` eager execution must "
<add> "be enabled.".format(self.output_mode)
<ide> )
<ide> new_vocab_size = self.vocabulary_size()
<ide> if new_vocab_size == self._token_start_index():
<ide> raise RuntimeError(
<del> "When using `output_mode={}` and `pad_to_max_tokens=False`, you "
<del> "must set the layer's vocabulary before calling it. Either pass "
<del> "a `vocabulary` argument to the layer, or call `adapt` with some "
<del> "sample data.".format(self.output_mode)
<add> "When using `output_mode={}` and `pad_to_max_tokens=False`, "
<add> "you must set the layer's vocabulary before calling it. Either "
<add> "pass a `vocabulary` argument to the layer, or call `adapt` "
<add> "with some sample data.".format(self.output_mode)
<ide> )
<ide> elif (
<ide> self._frozen_vocab_size is not None
<ide> and new_vocab_size != self._frozen_vocab_size
<ide> ):
<ide> raise RuntimeError(
<del> "When using `output_mode={}` and `pad_to_max_tokens=False`, the "
<del> "vocabulary size cannot be changed after the layer is called. "
<del> "Vocab size is {}, new vocab size is {}".format(
<add> "When using `output_mode={}` and `pad_to_max_tokens=False`, "
<add> "the vocabulary size cannot be changed after the layer is "
<add> "called. Vocab size is {}, new vocab size is {}".format(
<ide> self.output_mode, self._frozen_vocab_size, new_vocab_size
<ide> )
<ide> )
<ide> def _inverse_document_frequency(self, token_document_counts, num_documents):
<ide> https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
<ide>
<ide> Args:
<del> token_document_counts: An array of the # of documents each token appears
<del> in.
<add> token_document_counts: An array of the # of documents each token
<add> appears in.
<ide> num_documents: An int representing the total number of documents
<ide>
<ide> Returns:
<ide><path>keras/layers/preprocessing/index_lookup_distribution_test.py
<ide> def test_strategy_with_file(self, strategy):
<ide> self.assertAllEqual(expected_output, output_dataset)
<ide>
<ide> def test_tpu_with_multiple_oov(self, strategy):
<del> # TODO(b/180614455): remove this check when MLIR bridge is always enabled.
<add> # TODO(b/180614455): remove this check when MLIR bridge is always
<add> # enabled.
<ide> if backend.is_tpu_strategy(strategy):
<ide> self.skipTest("This test needs MLIR bridge on TPU.")
<ide>
<ide><path>keras/layers/preprocessing/index_lookup_test.py
<ide> def _get_end_to_end_test_cases():
<ide> test_cases = (
<ide> {
<ide> "testcase_name": "test_strings_soft_vocab_cap",
<del> # Create an array where 'earth' is the most frequent term, followed by
<del> # 'wind', then 'and', then 'fire'. This ensures that the vocab
<add> # Create an array where 'earth' is the most frequent term, followed
<add> # by 'wind', then 'and', then 'fire'. This ensures that the vocab
<ide> # accumulator is sorting by frequency.
<ide> "vocab_data": np.array(
<ide> [
<ide> def _get_end_to_end_test_cases():
<ide> },
<ide> {
<ide> "testcase_name": "test_inverse_strings_soft_vocab_cap",
<del> # Create an array where 'earth' is the most frequent term, followed by
<del> # 'wind', then 'and', then 'fire'. This ensures that the vocab
<add> # Create an array where 'earth' is the most frequent term, followed
<add> # by 'wind', then 'and', then 'fire'. This ensures that the vocab
<ide> # accumulator is sorting by frequency.
<ide> "vocab_data": np.array(
<ide> [
<ide> def _get_end_to_end_test_cases():
<ide> },
<ide> {
<ide> "testcase_name": "test_strings_with_special_tokens",
<del> # Mask and oov values in the vocab data should be dropped, and mapped
<del> # to 0 and 1 respectively when calling the layer.
<add> # Mask and oov values in the vocab data should be dropped, and
<add> # mapped to 0 and 1 respectively when calling the layer.
<ide> "vocab_data": np.array(
<ide> [
<ide> ["fire"],
<ide> def _get_end_to_end_test_cases():
<ide> },
<ide> {
<ide> "testcase_name": "test_ints_with_special_tokens",
<del> # Mask and oov values in the vocab data should be dropped, and mapped
<del> # to 0 and 1 respectively when calling the layer.
<add> # Mask and oov values in the vocab data should be dropped, and
<add> # mapped to 0 and 1 respectively when calling the layer.
<ide> "vocab_data": np.array(
<ide> [
<ide> [42],
<ide> def _get_end_to_end_test_cases():
<ide> },
<ide> {
<ide> "testcase_name": "test_strings_hard_vocab_cap",
<del> # Create an array where 'earth' is the most frequent term, followed by
<del> # 'wind', then 'and', then 'fire'. This ensures that the vocab
<add> # Create an array where 'earth' is the most frequent term, followed
<add> # by 'wind', then 'and', then 'fire'. This ensures that the vocab
<ide> # accumulator is sorting by frequency.
<ide> "vocab_data": np.array(
<ide> [
<ide> def _get_end_to_end_test_cases():
<ide> },
<ide> {
<ide> "testcase_name": "test_inverse_strings_hard_vocab_cap",
<del> # Create an array where 'earth' is the most frequent term, followed by
<del> # 'wind', then 'and', then 'fire'. This ensures that the vocab
<add> # Create an array where 'earth' is the most frequent term, followed
<add> # by 'wind', then 'and', then 'fire'. This ensures that the vocab
<ide> # accumulator is sorting by frequency.
<ide> "vocab_data": np.array(
<ide> [
<ide> def test_layer_end_to_end_with_adapt(
<ide> # together. When the results have different shapes on the non-concat
<ide> # axis (which can happen in the output_mode = INT case for
<ide> # IndexLookup), the concatenation fails. In real use cases, this may
<del> # not be an issue because users are likely to pipe the preprocessing layer
<del> # into other keras layers instead of predicting it directly. A workaround
<del> # for these unit tests is to have the dataset only contain one batch, so
<del> # no concatenation needs to happen with the result. For consistency with
<del> # numpy input, we should make `predict` join differently shaped results
<del> # together sensibly, with 0 padding.
<add> # not be an issue because users are likely to pipe the preprocessing
<add> # layer into other keras layers instead of predicting it directly. A
<add> # workaround for these unit tests is to have the dataset only
<add> # contain one batch, so no concatenation needs to happen with the
<add> # result. For consistency with numpy input, we should make `predict`
<add> # join differently shaped results together sensibly, with 0 padding.
<ide> input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
<ide> input_shape[0]
<ide> )
<ide> def test_vocabulary_persistence_across_saving(self):
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = keras.models.load_model(
<ide> def test_persistence_file_vocabs_tf_save_tf_load(self):
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> tf.saved_model.save(obj=model, export_dir=output_path)
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = tf.saved_model.load(output_path)
<ide> def test_vocabulary_persistence_file_vocab_keras_save_tf_load(self):
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = tf.saved_model.load(output_path)
<ide> def test_persistence_file_vocab_keras_save_keras_load(self):
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide> tf.io.gfile.remove(vocab_file)
<ide>
<ide> def test_persistence_file_vocab_keras_save_keras_load(self):
<ide> )
<ide> model_2.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = keras.models.load_model(
<ide> def test_persistence_file_vocab_keras_save_keras_load_tf_save_tf_load(self):
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide> tf.io.gfile.remove(vocab_file)
<ide>
<ide> def test_persistence_file_vocab_keras_save_keras_load_tf_save_tf_load(self):
<ide> )
<ide> tf.saved_model.save(model_2, output_path)
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = tf.saved_model.load(output_path)
<ide> def test_persistence_file_vocab_keras_save_keras_load_keras_save_keras_load(
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide> tf.io.gfile.remove(vocab_file)
<ide>
<ide> def test_persistence_file_vocab_keras_save_keras_load_keras_save_keras_load(
<ide> )
<ide> model_2.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = keras.models.load_model(
<ide> class EagerExecutionDisabled(
<ide> test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
<ide> ):
<ide> def test_lookup(self):
<del> # We need this test for model_to_estimator followed by export_saved_model,
<del> # which will call the layer in a legacy session. This could also happen
<del> # directly if a user calls disable_v2_behavior or disable_eager_execution.
<add> # We need this test for model_to_estimator followed by
<add> # export_saved_model, which will call the layer in a legacy session.
<add> # This could also happen directly if a user calls disable_v2_behavior or
<add> # disable_eager_execution.
<ide> with tf.compat.v1.Session():
<ide> with test_utils.run_eagerly_scope(False):
<ide> vocab_data = ["earth", "wind", "and", "fire"]
<ide> def test_lookup(self):
<ide> )
<ide> int_data = layer(input_data)
<ide> model = keras.Model(inputs=input_data, outputs=int_data)
<del> # In a TF1 session the user will need to make sure all tables are
<del> # initialized themselves.
<add> # In a TF1 session the user will need to make sure all tables
<add> # are initialized themselves.
<ide> tf.compat.v1.tables_initializer().run()
<ide> output_dataset = model(input_array)
<ide> self.assertAllEqual(output_dataset, expected_output)
<ide><path>keras/layers/preprocessing/integer_lookup.py
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide> """A preprocessing layer which maps integer features to contiguous ranges.
<ide>
<del> This layer maps a set of arbitrary integer input tokens into indexed
<del> integer output via a table-based vocabulary lookup. The layer's output indices
<del> will be contiguously arranged up to the maximum vocab size, even if the input
<add> This layer maps a set of arbitrary integer input tokens into indexed integer
<add> output via a table-based vocabulary lookup. The layer's output indices will
<add> be contiguously arranged up to the maximum vocab size, even if the input
<ide> tokens are non-continguous or unbounded. The layer supports multiple options
<ide> for encoding the output via `output_mode`, and has optional support for
<ide> out-of-vocabulary (OOV) tokens and masking.
<ide>
<ide> The vocabulary for the layer must be either supplied on construction or
<ide> learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
<del> determine the frequency of individual integer tokens, and create a vocabulary
<del> from them. If the vocabulary is capped in size, the most frequent tokens will
<del> be used to create the vocabulary and all others will be treated as OOV.
<add> determine the frequency of individual integer tokens, and create a
<add> vocabulary from them. If the vocabulary is capped in size, the most frequent
<add> tokens will be used to create the vocabulary and all others will be treated
<add> as OOV.
<ide>
<del> There are two possible output modes for the layer.
<del> When `output_mode` is `"int"`,
<del> input integers are converted to their index in the vocabulary (an integer).
<del> When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`, input integers
<del> are encoded into an array where each dimension corresponds to an element in
<del> the vocabulary.
<add> There are two possible output modes for the layer. When `output_mode` is
<add> `"int"`, input integers are converted to their index in the vocabulary (an
<add> integer). When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`,
<add> input integers are encoded into an array where each dimension corresponds to
<add> an element in the vocabulary.
<ide>
<ide> The vocabulary can optionally contain a mask token as well as an OOV token
<ide> (which can optionally occupy multiple indices in the vocabulary, as set
<ide> by `num_oov_indices`).
<del> The position of these tokens in the vocabulary is fixed. When `output_mode` is
<del> `"int"`, the vocabulary will begin with the mask token at index 0, followed by
<del> OOV indices, followed by the rest of the vocabulary. When `output_mode` is
<del> `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with OOV
<del> indices and instances of the mask token will be dropped.
<add> The position of these tokens in the vocabulary is fixed. When `output_mode`
<add> is `"int"`, the vocabulary will begin with the mask token at index 0,
<add> followed by OOV indices, followed by the rest of the vocabulary. When
<add> `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will
<add> begin with OOV indices and instances of the mask token will be dropped.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide>
<ide> Args:
<del> max_tokens: Maximum size of the vocabulary for this layer. This should only
<del> be specified when adapting the vocabulary or when setting
<add> max_tokens: Maximum size of the vocabulary for this layer. This should
<add> only be specified when adapting the vocabulary or when setting
<ide> `pad_to_max_tokens=True`. If None, there is no cap on the size of the
<del> vocabulary. Note that this size includes the OOV and mask tokens. Defaults
<del> to None.
<add> vocabulary. Note that this size includes the OOV and mask tokens.
<add> Defaults to None.
<ide> num_oov_indices: The number of out-of-vocabulary tokens to use. If this
<ide> value is more than 1, OOV inputs are modulated to determine their OOV
<del> value. If this value is 0, OOV inputs will cause an error when calling the
<del> layer. Defaults to 1.
<add> value. If this value is 0, OOV inputs will cause an error when calling
<add> the layer. Defaults to 1.
<ide> mask_token: An integer token that represents masked inputs. When
<ide> `output_mode` is `"int"`, the token is included in vocabulary and mapped
<ide> to index 0. In other output modes, the token will not appear in the
<ide> vocabulary and instances of the mask token in the input will be dropped.
<ide> If set to None, no mask term will be added. Defaults to None.
<ide> oov_token: Only used when `invert` is True. The token to return for OOV
<ide> indices. Defaults to -1.
<del> vocabulary: Optional. Either an array of integers or a string path to a text
<del> file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D
<del> tensor containing the integer vocbulary terms. If passing a file path, the
<del> file should contain one line per term in the vocabulary. If this argument
<del> is set, there is no need to `adapt()` the layer.
<add> vocabulary: Optional. Either an array of integers or a string path to a
<add> text file. If passing an array, can pass a tuple, list, 1D numpy array,
<add> or 1D tensor containing the integer vocbulary terms. If passing a file
<add> path, the file should contain one line per term in the vocabulary. If
<add> this argument is set, there is no need to `adapt()` the layer.
<ide> vocabulary_dtype: The dtype of the vocabulary terms, for example
<ide> `"int64"` or `"int32"`. Defaults to `"int64"`.
<del> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D
<del> numpy array, or 1D tensor or the same length as the vocabulary, containing
<del> the floating point inverse document frequency weights, which will be
<del> multiplied by per sample term counts for the final `tf_idf` weight. If the
<del> `vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this
<del> argument must be supplied.
<add> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
<add> 1D numpy array, or 1D tensor or the same length as the vocabulary,
<add> containing the floating point inverse document frequency weights, which
<add> will be multiplied by per sample term counts for the final `tf_idf`
<add> weight. If the `vocabulary` argument is set, and `output_mode` is
<add> `"tf_idf"`, this argument must be supplied.
<ide> invert: Only valid when `output_mode` is `"int"`. If True, this layer will
<ide> map indices to vocabulary items instead of mapping vocabulary items to
<ide> indices. Default to False.
<del> output_mode: Specification for the output of the layer. Defaults to `"int"`.
<del> Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
<del> `"tf_idf"` configuring the layer as follows:
<add> output_mode: Specification for the output of the layer. Defaults to
<add> `"int"`. Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`,
<add> or `"tf_idf"` configuring the layer as follows:
<ide> - `"int"`: Return the vocabulary indices of the input tokens.
<ide> - `"one_hot"`: Encodes each individual element in the input into an
<ide> array the same size as the vocabulary, containing a 1 at the element
<del> index. If the last dimension is size 1, will encode on that dimension.
<del> If the last dimension is not size 1, will append a new dimension for
<del> the encoded output.
<add> index. If the last dimension is size 1, will encode on that
<add> dimension. If the last dimension is not size 1, will append a new
<add> dimension for the encoded output.
<ide> - `"multi_hot"`: Encodes each sample in the input into a single array
<ide> the same size as the vocabulary, containing a 1 for each vocabulary
<ide> term present in the sample. Treats the last dimension as the sample
<ide> dimension, if input shape is (..., sample_length), output shape will
<ide> be (..., num_tokens).
<del> - `"count"`: As `"multi_hot"`, but the int array contains a count of the
<del> number of times the token at that index appeared in the sample.
<add> - `"count"`: As `"multi_hot"`, but the int array contains a count of
<add> the number of times the token at that index appeared in the sample.
<ide> - `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
<ide> find the value in each token slot.
<ide> For `"int"` output, any shape of input and output is supported. For all
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide> `"count"`, or `"tf_idf"`. If True, the output will have its feature axis
<ide> padded to `max_tokens` even if the number of unique tokens in the
<ide> vocabulary is less than max_tokens, resulting in a tensor of shape
<del> [batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
<add> [batch_size, max_tokens] regardless of vocabulary size. Defaults to
<add> False.
<ide> sparse: Boolean. Only applicable when `output_mode` is `"multi_hot"`,
<ide> `"count"`, or `"tf_idf"`. If True, returns a `SparseTensor` instead of a
<ide> dense `Tensor`. Defaults to False.
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide>
<ide> **Creating a lookup layer with an adapted vocabulary**
<ide>
<del> This example creates a lookup layer and generates the vocabulary by analyzing
<del> the dataset.
<add> This example creates a lookup layer and generates the vocabulary by
<add> analyzing the dataset.
<ide>
<ide> >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])
<ide> >>> layer = tf.keras.layers.IntegerLookup()
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide>
<ide> **Lookups with multiple OOV indices**
<ide>
<del> This example demonstrates how to use a lookup layer with multiple OOV indices.
<del> When a layer is created with more than one OOV index, any OOV tokens are
<del> hashed into the number of OOV buckets, distributing OOV tokens in a
<del> deterministic fashion across the set.
<add> This example demonstrates how to use a lookup layer with multiple OOV
<add> indices. When a layer is created with more than one OOV index, any OOV
<add> tokens are hashed into the number of OOV buckets, distributing OOV tokens in
<add> a deterministic fashion across the set.
<ide>
<ide> >>> vocab = [12, 36, 1138, 42]
<ide> >>> data = tf.constant([[12, 1138, 42], [37, 1000, 36]])
<del> >>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab, num_oov_indices=2)
<add> >>> layer = tf.keras.layers.IntegerLookup(
<add> ... vocabulary=vocab, num_oov_indices=2)
<ide> >>> layer(data)
<ide> <tf.Tensor: shape=(2, 3), dtype=int64, numpy=
<ide> array([[2, 4, 5],
<ide> [1, 0, 3]])>
<ide>
<ide> Note that the output for OOV token 37 is 1, while the output for OOV token
<ide> 1000 is 0. The in-vocab terms have their output index increased by 1 from
<del> earlier examples (12 maps to 2, etc) in order to make space for the extra OOV
<del> token.
<add> earlier examples (12 maps to 2, etc) in order to make space for the extra
<add> OOV token.
<ide>
<ide> **One-hot output**
<ide>
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide> `num_oov_indices` dimensions in the multi_hot encoding represent OOV tokens
<ide>
<ide> >>> vocab = [12, 36, 1138, 42]
<del> >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
<add> >>> data = tf.constant([[12, 1138, 42, 42],
<add> ... [42, 7, 36, 7]]) # Note OOV tokens
<ide> >>> layer = tf.keras.layers.IntegerLookup(
<ide> ... vocabulary=vocab, output_mode='multi_hot')
<ide> >>> layer(data)
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide>
<ide> **Token count output**
<ide>
<del> Configure the layer with `output_mode='count'`. As with multi_hot output, the
<del> first `num_oov_indices` dimensions in the output represent OOV tokens.
<add> Configure the layer with `output_mode='count'`. As with multi_hot output,
<add> the first `num_oov_indices` dimensions in the output represent OOV tokens.
<ide>
<ide> >>> vocab = [12, 36, 1138, 42]
<del> >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
<add> >>> data = tf.constant([[12, 1138, 42, 42],
<add> ... [42, 7, 36, 7]]) # Note OOV tokens
<ide> >>> layer = tf.keras.layers.IntegerLookup(
<ide> ... vocabulary=vocab, output_mode='count')
<ide> >>> layer(data)
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide>
<ide> **TF-IDF output**
<ide>
<del> Configure the layer with `output_mode='tf_idf'`. As with multi_hot output, the
<del> first `num_oov_indices` dimensions in the output represent OOV tokens.
<add> Configure the layer with `output_mode='tf_idf'`. As with multi_hot output,
<add> the first `num_oov_indices` dimensions in the output represent OOV tokens.
<ide>
<ide> Each token bin will output `token_count * idf_weight`, where the idf weights
<del> are the inverse document frequency weights per token. These should be provided
<del> along with the vocabulary. Note that the `idf_weight` for OOV tokens will
<del> default to the average of all idf weights passed in.
<add> are the inverse document frequency weights per token. These should be
<add> provided along with the vocabulary. Note that the `idf_weight` for OOV
<add> tokens will default to the average of all idf weights passed in.
<ide>
<ide> >>> vocab = [12, 36, 1138, 42]
<ide> >>> idf_weights = [0.25, 0.75, 0.6, 0.4]
<del> >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
<add> >>> data = tf.constant([[12, 1138, 42, 42],
<add> ... [42, 7, 36, 7]]) # Note OOV tokens
<ide> >>> layer = tf.keras.layers.IntegerLookup(
<ide> ... output_mode='tf_idf', vocabulary=vocab, idf_weights=idf_weights)
<ide> >>> layer(data)
<ide> class IntegerLookup(index_lookup.IndexLookup):
<ide>
<ide> >>> vocab = [-1, 12, 36, 1138, 42]
<ide> >>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
<del> >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens
<add> >>> data = tf.constant([[12, 1138, 42, 42],
<add> ... [42, 7, 36, 7]]) # Note OOV tokens
<ide> >>> layer = tf.keras.layers.IntegerLookup(
<ide> ... output_mode='tf_idf', vocabulary=vocab, idf_weights=idf_weights)
<ide> >>> layer(data)
<ide> <tf.Tensor: shape=(2, 5), dtype=float32, numpy=
<ide> array([[0. , 0.25, 0. , 0.6 , 0.8 ],
<ide> [1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
<ide>
<del> When adapting the layer in tf_idf mode, each input sample will be considered a
<del> document, and idf weight per token will be calculated as
<add> When adapting the layer in tf_idf mode, each input sample will be considered
<add> a document, and idf weight per token will be calculated as
<ide> `log(1 + num_documents / (1 + token_document_count))`.
<ide>
<ide> **Inverse lookup**
<ide>
<del> This example demonstrates how to map indices to tokens using this layer. (You
<del> can also use `adapt()` with `inverse=True`, but for simplicity we'll pass the
<del> vocab in this example.)
<add> This example demonstrates how to map indices to tokens using this layer.
<add> (You can also use `adapt()` with `inverse=True`, but for simplicity we'll
<add> pass the vocab in this example.)
<ide>
<ide> >>> vocab = [12, 36, 1138, 42]
<ide> >>> data = tf.constant([[1, 3, 4], [4, 0, 2]])
<ide> def __init__(
<ide> )
<ide>
<ide> # Legacy versions of the IntegerLookup layer set layer dtype to int64,
<del> # instead of the output type. If we see this and output mode is not "int",
<del> # clear the setting so we don't switch types for old SavedModels.
<add> # instead of the output type. If we see this and output mode is not
<add> # "int", clear the setting so we don't switch types for old SavedModels.
<ide> if (
<ide> output_mode != "int"
<ide> and "dtype" in kwargs
<ide> def __init__(
<ide> def adapt(self, data, batch_size=None, steps=None):
<ide> """Computes a vocabulary of interger terms from tokens in a dataset.
<ide>
<del> Calling `adapt()` on an `IntegerLookup` layer is an alternative to passing
<del> in a precomputed vocabulary on construction via the `vocabulary` argument.
<del> An `IntegerLookup` layer should always be either adapted over a dataset or
<del> supplied with a vocabulary.
<del>
<del> During `adapt()`, the layer will build a vocabulary of all integer tokens
<del> seen in the dataset, sorted by occurrence count, with ties broken by sort
<del> order of the tokens (high to low). At the end of `adapt()`, if `max_tokens`
<del> is set, the vocabulary wil be truncated to `max_tokens` size. For example,
<del> adapting a layer with `max_tokens=1000` will compute the 1000 most frequent
<del> tokens occurring in the input dataset. If `output_mode='tf-idf'`, `adapt()`
<del> will also learn the document frequencies of each token in the input dataset.
<del>
<del> In order to make `StringLookup` efficient in any distribution context, the
<del> vocabulary is kept static with respect to any compiled `tf.Graph`s that
<del> call the layer. As a consequence, if the layer is adapted a second time,
<del> any models using the layer should be re-compiled. For more information
<del> see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<del>
<del> `adapt()` is meant only as a single machine utility to compute layer state.
<del> To analyze a dataset that cannot fit on a single machine, see
<del> [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)
<del> for a multi-machine, map-reduce solution.
<add> Calling `adapt()` on an `IntegerLookup` layer is an alternative to
<add> passing in a precomputed vocabulary on construction via the
<add> `vocabulary` argument. An `IntegerLookup` layer should always be either
<add> adapted over a dataset or supplied with a vocabulary.
<add>
<add> During `adapt()`, the layer will build a vocabulary of all integer
<add> tokens seen in the dataset, sorted by occurrence count, with ties broken
<add> by sort order of the tokens (high to low). At the end of `adapt()`, if
<add> `max_tokens` is set, the vocabulary wil be truncated to `max_tokens`
<add> size. For example, adapting a layer with `max_tokens=1000` will compute
<add> the 1000 most frequent tokens occurring in the input dataset. If
<add> `output_mode='tf-idf'`, `adapt()` will also learn the document
<add> frequencies of each token in the input dataset.
<add>
<add> In order to make `StringLookup` efficient in any distribution context,
<add> the vocabulary is kept static with respect to any compiled `tf.Graph`s
<add> that call the layer. As a consequence, if the layer is adapted a second
<add> time, any models using the layer should be re-compiled. For more
<add> information see
<add> `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<add>
<add> `adapt()` is meant only as a single machine utility to compute layer
<add> state. To analyze a dataset that cannot fit on a single machine, see
<add> [Tensorflow Transform](
<add> https://www.tensorflow.org/tfx/transform/get_started) for a
<add> multi-machine, map-reduce solution.
<ide>
<ide> Arguments:
<ide> data: The data to train on. It can be passed either as a
<ide><path>keras/layers/preprocessing/integer_lookup_test.py
<ide> def test_layer_end_to_end_with_adapt(
<ide> # dataset batch separately, then tries to concatenate the results
<ide> # together. When the results have different shapes on the non-concat
<ide> # axis (which can happen in the output_mode = INT case for
<del> # IntegerLookup), the concatenation fails. In real use cases, this may
<del> # not be an issue because users are likely to pipe the preprocessing layer
<del> # into other keras layers instead of predicting it directly. A workaround
<del> # for these unit tests is to have the dataset only contain one batch, so
<del> # no concatenation needs to happen with the result. For consistency with
<del> # numpy input, we should make `predict` join differently shaped results
<del> # together sensibly, with 0 padding.
<add> # IntegerLookup), the concatenation fails. In real use cases, this
<add> # may not be an issue because users are likely to pipe the
<add> # preprocessing layer into other keras layers instead of predicting
<add> # it directly. A workaround for these unit tests is to have the
<add> # dataset only contain one batch, so no concatenation needs to
<add> # happen with the result. For consistency with numpy input, we
<add> # should make `predict` join differently shaped results together
<add> # sensibly, with 0 padding.
<ide> input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
<ide> input_shape[0]
<ide> )
<ide> def test_vocabulary_persistence_across_saving(self):
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> # TODO(b/149526183): Can't clear session when TF2 is disabled.
<ide> if tf.__internal__.tf2.enabled():
<ide> keras.backend.clear_session()
<ide><path>keras/layers/preprocessing/normalization.py
<ide> class Normalization(base_preprocessing_layer.PreprocessingLayer):
<ide> """A preprocessing layer which normalizes continuous features.
<ide>
<ide> This layer will shift and scale inputs into a distribution centered around
<del> 0 with standard deviation 1. It accomplishes this by precomputing the mean and
<del> variance of the data, and calling `(input - mean) / sqrt(var)` at runtime.
<add> 0 with standard deviation 1. It accomplishes this by precomputing the mean
<add> and variance of the data, and calling `(input - mean) / sqrt(var)` at
<add> runtime.
<ide>
<ide> The mean and variance values for the layer must be either supplied on
<ide> construction or learned via `adapt()`. `adapt()` will compute the mean and
<ide> class Normalization(base_preprocessing_layer.PreprocessingLayer):
<ide> axis: Integer, tuple of integers, or None. The axis or axes that should
<ide> have a separate mean and variance for each index in the shape. For
<ide> example, if shape is `(None, 5)` and `axis=1`, the layer will track 5
<del> separate mean and variance values for the last axis. If `axis` is set to
<del> `None`, the layer will normalize all elements in the input by a scalar
<del> mean and variance. Defaults to -1, where the last axis of the input is
<del> assumed to be a feature dimension and is normalized per index. Note that
<del> in the specific case of batched scalar inputs where the only axis is the
<del> batch axis, the default will normalize each index in the batch
<del> separately. In this case, consider passing `axis=None`.
<add> separate mean and variance values for the last axis. If `axis` is set
<add> to `None`, the layer will normalize all elements in the input by a
<add> scalar mean and variance. Defaults to -1, where the last axis of the
<add> input is assumed to be a feature dimension and is normalized per
<add> index. Note that in the specific case of batched scalar inputs where
<add> the only axis is the batch axis, the default will normalize each index
<add> in the batch separately. In this case, consider passing `axis=None`.
<ide> mean: The mean value(s) to use during normalization. The passed value(s)
<ide> will be broadcast to the shape of the kept axes above; if the value(s)
<del> cannot be broadcast, an error will be raised when this layer's `build()`
<del> method is called.
<add> cannot be broadcast, an error will be raised when this layer's
<add> `build()` method is called.
<ide> variance: The variance value(s) to use during normalization. The passed
<ide> value(s) will be broadcast to the shape of the kept axes above; if the
<del> value(s) cannot be broadcast, an error will be raised when this layer's
<del> `build()` method is called.
<add> value(s) cannot be broadcast, an error will be raised when this
<add> layer's `build()` method is called.
<ide> invert: If True, this layer will apply the inverse transformation
<ide> to its inputs: it would turn a normalized input back into its
<ide> original form.
<ide> def build(self, input_shape):
<ide> for d in self._keep_axis:
<ide> if input_shape[d] is None:
<ide> raise ValueError(
<del> "All `axis` values to be kept must have known shape. Got axis: {}, "
<add> "All `axis` values to be kept must have known shape. "
<add> "Got axis: {}, "
<ide> "input shape: {}, with unknown axis at index: {}".format(
<ide> self.axis, input_shape, d
<ide> )
<ide> def build(self, input_shape):
<ide> )
<ide> self.finalize_state()
<ide> else:
<del> # In the no adapt case, make constant tensors for mean and variance with
<del> # proper broadcast shape for use during call.
<add> # In the no adapt case, make constant tensors for mean and variance
<add> # with proper broadcast shape for use during call.
<ide> mean = self.input_mean * np.ones(mean_and_var_shape)
<ide> variance = self.input_variance * np.ones(mean_and_var_shape)
<ide> mean = tf.reshape(mean, self._broadcast_shape)
<ide> def build(self, input_shape):
<ide> def adapt(self, data, batch_size=None, steps=None):
<ide> """Computes the mean and variance of values in a dataset.
<ide>
<del> Calling `adapt()` on a `Normalization` layer is an alternative to passing in
<del> `mean` and `variance` arguments during layer construction. A `Normalization`
<del> layer should always either be adapted over a dataset or passed `mean` and
<del> `variance`.
<del>
<del> During `adapt()`, the layer will compute a `mean` and `variance` separately
<del> for each position in each axis specified by the `axis` argument. To
<del> calculate a single `mean` and `variance` over the input data, simply pass
<del> `axis=None`.
<del>
<del> In order to make `Normalization` efficient in any distribution context, the
<del> computed mean and variance are kept static with respect to any compiled
<del> `tf.Graph`s that call the layer. As a consequence, if the layer is adapted a
<del> second time, any models using the layer should be re-compiled. For more
<del> information see
<add> Calling `adapt()` on a `Normalization` layer is an alternative to
<add> passing in `mean` and `variance` arguments during layer construction. A
<add> `Normalization` layer should always either be adapted over a dataset or
<add> passed `mean` and `variance`.
<add>
<add> During `adapt()`, the layer will compute a `mean` and `variance`
<add> separately for each position in each axis specified by the `axis`
<add> argument. To calculate a single `mean` and `variance` over the input
<add> data, simply pass `axis=None`.
<add>
<add> In order to make `Normalization` efficient in any distribution context,
<add> the computed mean and variance are kept static with respect to any
<add> compiled `tf.Graph`s that call the layer. As a consequence, if the layer
<add> is adapted a second time, any models using the layer should be
<add> re-compiled. For more information see
<ide> `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<ide>
<del> `adapt()` is meant only as a single machine utility to compute layer state.
<del> To analyze a dataset that cannot fit on a single machine, see
<del> [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)
<add> `adapt()` is meant only as a single machine utility to compute layer
<add> state. To analyze a dataset that cannot fit on a single machine, see
<add> [Tensorflow Transform](
<add> https://www.tensorflow.org/tfx/transform/get_started)
<ide> for a multi-machine, map-reduce solution.
<ide>
<ide> Arguments:
<ide> def update_state(self, data):
<ide> if self.input_mean is not None:
<ide> raise ValueError(
<ide> "Cannot `adapt` a Normalization layer that is initialized with "
<del> "static `mean` and `variance`, you passed mean {} and variance {}.".format(
<add> "static `mean` and `variance`, "
<add> "you passed mean {} and variance {}.".format(
<ide> self.input_mean, self.input_variance
<ide> )
<ide> )
<ide> def update_state(self, data):
<ide> self.adapt_mean * existing_weight + batch_mean * batch_weight
<ide> )
<ide> # The variance is computed using the lack-of-fit sum of squares
<del> # formula (see https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares).
<add> # formula (see
<add> # https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares).
<ide> total_variance = (
<ide> self.adapt_variance + (self.adapt_mean - total_mean) ** 2
<ide> ) * existing_weight + (
<ide><path>keras/layers/preprocessing/normalization_test.py
<ide> def test_scalar_input(self):
<ide> def test_output_dtype(self):
<ide> if not tf.__internal__.tf2.enabled():
<ide> self.skipTest("set_global_policy only supported in TF2.")
<del> # Output should respect an explicit dtype, and default to the global policy.
<add> # Output should respect an explicit dtype, and default to the global
<add> # policy.
<ide> policy.set_global_policy("float64")
<ide> input_data = keras.Input(batch_size=16, shape=(1,))
<ide> layer = normalization.Normalization(
<ide><path>keras/layers/preprocessing/preprocessing_stage.py
<ide> class PreprocessingStage(
<ide> a single `adapt()` call on the preprocessing stage.
<ide>
<ide> Args:
<del> layers: List of layers. Can include layers that aren't preprocessing layers.
<add> layers: List of layers. Can include layers that aren't preprocessing
<add> layers.
<ide> name: String. Optional name for the preprocessing stage object.
<ide> """
<ide>
<ide> def adapt(self, data, reset_state=True):
<ide> data, (tf.data.Dataset, np.ndarray, tf.__internal__.EagerTensor)
<ide> ):
<ide> raise ValueError(
<del> f"`adapt()` requires a batched Dataset, an EagerTensor, or a Numpy "
<del> f"array as input. Received data={data}"
<add> f"`adapt()` requires a batched Dataset, an EagerTensor, or a "
<add> f"Numpy array as input. Received data={data}"
<ide> )
<ide> if isinstance(data, tf.data.Dataset):
<del> # Validate the datasets to try and ensure we haven't been passed one with
<del> # infinite size. That would cause an infinite loop here.
<add> # Validate the datasets to try and ensure we haven't been passed one
<add> # with infinite size. That would cause an infinite loop here.
<ide> if tf_utils.dataset_is_infinite(data):
<ide> raise ValueError(
<ide> "The dataset passed to `adapt()` has an infinite number of "
<ide> def map_fn(x):
<ide> """Maps `PreprocessingStage` inputs to inputs at `current_layer_index`.
<ide>
<ide> Args:
<del> x: Batch of inputs seen in entry of the `PreprocessingStage` instance.
<add> x: Batch of inputs seen in entry of the `PreprocessingStage`
<add> instance.
<ide>
<ide> Returns:
<ide> Batch of inputs to be processed by layer
<ide> class FunctionalPreprocessingStage(
<ide> >>> stage = FunctionalPreprocessingStage(inputs, outputs)
<ide>
<ide> Args:
<del> inputs: An input tensor (must be created via `tf.keras.Input()`), or a list,
<del> a dict, or a nested structure of input tensors.
<del> outputs: An output tensor, or a list, a dict or a nested structure of output
<del> tensors.
<add> inputs: An input tensor (must be created via `tf.keras.Input()`), or a
<add> list, a dict, or a nested structure of input tensors.
<add> outputs: An output tensor, or a list, a dict or a nested structure of
<add> output tensors.
<ide> name: String, optional. Name of the preprocessing stage.
<ide> """
<ide>
<ide> def fit(self, *args, **kwargs):
<ide> raise ValueError(
<del> "Preprocessing stage is not a complete model, and hence should not be "
<del> "`fit`. Instead, you may feed data to `adapt` the stage to set "
<add> "Preprocessing stage is not a complete model, and hence should not "
<add> "be `fit`. Instead, you may feed data to `adapt` the stage to set "
<ide> "appropriate states of the layers in the stage."
<ide> )
<ide>
<ide> def adapt(self, data, reset_state=True):
<ide> """Adapt the state of the layers of the preprocessing stage to the data.
<ide>
<ide> Args:
<del> data: A batched Dataset object, a NumPy array, an EagerTensor, or a list,
<del> dict or nested structure of Numpy Arrays or EagerTensors. The elements
<del> of Dataset object need to conform with inputs of the stage. The first
<del> dimension of NumPy arrays or EagerTensors are understood to be batch
<del> dimension. Data to be iterated over to adapt the state of the layers in
<del> this preprocessing stage.
<del> reset_state: Whether this call to `adapt` should reset the state of the
<del> layers in this preprocessing stage.
<add> data: A batched Dataset object, a NumPy array, an EagerTensor, or a
<add> list, dict or nested structure of Numpy Arrays or EagerTensors. The
<add> elements of Dataset object need to conform with inputs of the stage.
<add> The first dimension of NumPy arrays or EagerTensors are understood
<add> to be batch dimension. Data to be iterated over to adapt the state
<add> of the layers in this preprocessing stage.
<add> reset_state: Whether this call to `adapt` should reset the state of
<add> the layers in this preprocessing stage.
<ide>
<ide> Examples:
<ide>
<ide> def adapt(self, data, reset_state=True):
<ide> for datum in data
<ide> ):
<ide> raise ValueError(
<del> "`adapt()` requires a batched Dataset, a list of EagerTensors "
<del> "or Numpy arrays as input, got {}".format(type(data))
<add> "`adapt()` requires a batched Dataset, a list of "
<add> f"EagerTensors or Numpy arrays as input, got {type(data)}"
<ide> )
<ide> ds_input = [
<ide> tf.data.Dataset.from_tensor_slices(x).batch(1) for x in data
<ide> ]
<ide>
<ide> if isinstance(data, tf.data.Dataset):
<del> # Validate the datasets to try and ensure we haven't been passed one with
<del> # infinite size. That would cause an infinite loop here.
<add> # Validate the datasets to try and ensure we haven't been passed one
<add> # with infinite size. That would cause an infinite loop here.
<ide> if tf_utils.dataset_is_infinite(data):
<ide> raise ValueError(
<ide> "The dataset passed to `adapt()` has an infinite number of "
<ide><path>keras/layers/preprocessing/preprocessing_test_utils.py
<ide> def assert_extracted_output_equal(self, combiner, acc1, acc2, msg=None):
<ide> compare_accumulators = assertAllCloseOrEqual
<ide>
<ide> def validate_accumulator_computation(self, combiner, data, expected):
<del> """Validate that various combinations of compute and merge are identical."""
<add> """Validate that various combinations of compute and merge are
<add> identical."""
<ide> if len(data) < 4:
<ide> raise AssertionError(
<ide> f"Data must have at least 4 elements. Received "
<ide> def validate_accumulator_computation(self, combiner, data, expected):
<ide> self.compare_accumulators(
<ide> all_merge,
<ide> single_merge,
<del> msg="Calling merge with a data length of 1 should not change the data "
<del> "output.",
<add> msg="Calling merge with a data length of 1 should not change "
<add> "the data output.",
<ide> )
<ide>
<ide> self.compare_accumulators(
<ide><path>keras/layers/preprocessing/preprocessing_utils.py
<ide> def encode_categorical_inputs(
<ide> # TODO(b/190445202): remove output rank restriction.
<ide> if inputs.shape.rank > 2:
<ide> raise ValueError(
<del> f"When output_mode is not `'int'`, maximum supported output rank is 2. "
<del> f"Received output_mode {output_mode} and input shape {original_shape}, "
<add> f"When output_mode is not `'int'`, maximum supported output rank "
<add> f"is 2. Received output_mode {output_mode} and input shape "
<add> f"{original_shape}, "
<ide> f"which would result in output rank {inputs.shape.rank}."
<ide> )
<ide>
<ide><path>keras/layers/preprocessing/string_lookup.py
<ide> class StringLookup(index_lookup.IndexLookup):
<ide>
<ide> The vocabulary for the layer must be either supplied on construction or
<ide> learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
<del> determine the frequency of individual strings tokens, and create a vocabulary
<del> from them. If the vocabulary is capped in size, the most frequent tokens will
<del> be used to create the vocabulary and all others will be treated as
<del> out-of-vocabulary (OOV).
<add> determine the frequency of individual strings tokens, and create a
<add> vocabulary from them. If the vocabulary is capped in size, the most frequent
<add> tokens will be used to create the vocabulary and all others will be treated
<add> as out-of-vocabulary (OOV).
<ide>
<ide> There are two possible output modes for the layer.
<ide> When `output_mode` is `"int"`,
<ide> class StringLookup(index_lookup.IndexLookup):
<ide> The vocabulary can optionally contain a mask token as well as an OOV token
<ide> (which can optionally occupy multiple indices in the vocabulary, as set
<ide> by `num_oov_indices`).
<del> The position of these tokens in the vocabulary is fixed. When `output_mode` is
<del> `"int"`, the vocabulary will begin with the mask token (if set), followed by
<del> OOV indices, followed by the rest of the vocabulary. When `output_mode` is
<del> `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with OOV
<del> indices and instances of the mask token will be dropped.
<add> The position of these tokens in the vocabulary is fixed. When `output_mode`
<add> is `"int"`, the vocabulary will begin with the mask token (if set), followed
<add> by OOV indices, followed by the rest of the vocabulary. When `output_mode`
<add> is `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with
<add> OOV indices and instances of the mask token will be dropped.
<ide>
<ide> For an overview and full list of preprocessing layers, see the preprocessing
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide>
<ide> Args:
<del> max_tokens: Maximum size of the vocabulary for this layer. This should only
<del> be specified when adapting the vocabulary or when setting
<add> max_tokens: Maximum size of the vocabulary for this layer. This should
<add> only be specified when adapting the vocabulary or when setting
<ide> `pad_to_max_tokens=True`. If None, there is no cap on the size of the
<del> vocabulary. Note that this size includes the OOV and mask tokens. Defaults
<del> to None.
<add> vocabulary. Note that this size includes the OOV and mask tokens.
<add> Defaults to None.
<ide> num_oov_indices: The number of out-of-vocabulary tokens to use. If this
<del> value is more than 1, OOV inputs are hashed to determine their OOV value.
<del> If this value is 0, OOV inputs will cause an error when calling the layer.
<del> Defaults to 1.
<add> value is more than 1, OOV inputs are hashed to determine their OOV
<add> value. If this value is 0, OOV inputs will cause an error when calling
<add> the layer. Defaults to 1.
<ide> mask_token: A token that represents masked inputs. When `output_mode` is
<ide> `"int"`, the token is included in vocabulary and mapped to index 0. In
<ide> other output modes, the token will not appear in the vocabulary and
<del> instances of the mask token in the input will be dropped. If set to None,
<del> no mask term will be added. Defaults to `None`.
<add> instances of the mask token in the input will be dropped. If set to
<add> None, no mask term will be added. Defaults to `None`.
<ide> oov_token: Only used when `invert` is True. The token to return for OOV
<ide> indices. Defaults to `"[UNK]"`.
<del> vocabulary: Optional. Either an array of strings or a string path to a text
<del> file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D
<del> tensor containing the string vocbulary terms. If passing a file path, the
<del> file should contain one line per term in the vocabulary. If this argument
<del> is set, there is no need to `adapt()` the layer.
<del> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D
<del> numpy array, or 1D tensor or the same length as the vocabulary, containing
<del> the floating point inverse document frequency weights, which will be
<del> multiplied by per sample term counts for the final `tf_idf` weight. If the
<del> `vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this
<del> argument must be supplied.
<add> vocabulary: Optional. Either an array of strings or a string path to a
<add> text file. If passing an array, can pass a tuple, list, 1D numpy array,
<add> or 1D tensor containing the string vocbulary terms. If passing a file
<add> path, the file should contain one line per term in the vocabulary. If
<add> this argument is set, there is no need to `adapt()` the layer.
<add> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
<add> 1D numpy array, or 1D tensor or the same length as the vocabulary,
<add> containing the floating point inverse document frequency weights, which
<add> will be multiplied by per sample term counts for the final `tf_idf`
<add> weight. If the `vocabulary` argument is set, and `output_mode` is
<add> `"tf_idf"`, this argument must be supplied.
<ide> invert: Only valid when `output_mode` is `"int"`. If True, this layer will
<ide> map indices to vocabulary items instead of mapping vocabulary items to
<ide> indices. Default to False.
<del> output_mode: Specification for the output of the layer. Defaults to `"int"`.
<del> Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
<del> `"tf_idf"` configuring the layer as follows:
<add> output_mode: Specification for the output of the layer. Defaults to
<add> `"int"`. Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`,
<add> or `"tf_idf"` configuring the layer as follows:
<ide> - `"int"`: Return the raw integer indices of the input tokens.
<ide> - `"one_hot"`: Encodes each individual element in the input into an
<ide> array the same size as the vocabulary, containing a 1 at the element
<del> index. If the last dimension is size 1, will encode on that dimension.
<del> If the last dimension is not size 1, will append a new dimension for
<del> the encoded output.
<add> index. If the last dimension is size 1, will encode on that
<add> dimension. If the last dimension is not size 1, will append a new
<add> dimension for the encoded output.
<ide> - `"multi_hot"`: Encodes each sample in the input into a single array
<ide> the same size as the vocabulary, containing a 1 for each vocabulary
<ide> term present in the sample. Treats the last dimension as the sample
<ide> dimension, if input shape is (..., sample_length), output shape will
<ide> be (..., num_tokens).
<del> - `"count"`: As `"multi_hot"`, but the int array contains a count of the
<del> number of times the token at that index appeared in the sample.
<add> - `"count"`: As `"multi_hot"`, but the int array contains a count of
<add> the number of times the token at that index appeared in the sample.
<ide> - `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
<ide> find the value in each token slot.
<ide> For `"int"` output, any shape of input and output is supported. For all
<ide> class StringLookup(index_lookup.IndexLookup):
<ide> `"count"`, or `"tf_idf"`. If True, the output will have its feature axis
<ide> padded to `max_tokens` even if the number of unique tokens in the
<ide> vocabulary is less than max_tokens, resulting in a tensor of shape
<del> [batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
<add> [batch_size, max_tokens] regardless of vocabulary size. Defaults to
<add> False.
<ide> sparse: Boolean. Only applicable when `output_mode` is `"multi_hot"`,
<ide> `"count"`, or `"tf_idf"`. If True, returns a `SparseTensor` instead of a
<ide> dense `Tensor`. Defaults to False.
<ide> class StringLookup(index_lookup.IndexLookup):
<ide>
<ide> **Creating a lookup layer with an adapted vocabulary**
<ide>
<del> This example creates a lookup layer and generates the vocabulary by analyzing
<del> the dataset.
<add> This example creates a lookup layer and generates the vocabulary by
<add> analyzing the dataset.
<ide>
<ide> >>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
<ide> >>> layer = tf.keras.layers.StringLookup()
<ide> class StringLookup(index_lookup.IndexLookup):
<ide>
<ide> **Lookups with multiple OOV indices**
<ide>
<del> This example demonstrates how to use a lookup layer with multiple OOV indices.
<del> When a layer is created with more than one OOV index, any OOV values are
<del> hashed into the number of OOV buckets, distributing OOV values in a
<del> deterministic fashion across the set.
<add> This example demonstrates how to use a lookup layer with multiple OOV
<add> indices. When a layer is created with more than one OOV index, any OOV
<add> values are hashed into the number of OOV buckets, distributing OOV values in
<add> a deterministic fashion across the set.
<ide>
<ide> >>> vocab = ["a", "b", "c", "d"]
<ide> >>> data = tf.constant([["a", "c", "d"], ["m", "z", "b"]])
<del> >>> layer = tf.keras.layers.StringLookup(vocabulary=vocab, num_oov_indices=2)
<add> >>> layer = tf.keras.layers.StringLookup(vocabulary=vocab,
<add> ... num_oov_indices=2)
<ide> >>> layer(data)
<ide> <tf.Tensor: shape=(2, 3), dtype=int64, numpy=
<ide> array([[2, 4, 5],
<ide> class StringLookup(index_lookup.IndexLookup):
<ide>
<ide> **Token count output**
<ide>
<del> Configure the layer with `output_mode='count'`. As with multi_hot output, the
<del> first `num_oov_indices` dimensions in the output represent OOV values.
<add> Configure the layer with `output_mode='count'`. As with multi_hot output,
<add> the first `num_oov_indices` dimensions in the output represent OOV values.
<ide>
<ide> >>> vocab = ["a", "b", "c", "d"]
<ide> >>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
<ide> class StringLookup(index_lookup.IndexLookup):
<ide>
<ide> **TF-IDF output**
<ide>
<del> Configure the layer with `output_mode="tf_idf"`. As with multi_hot output, the
<del> first `num_oov_indices` dimensions in the output represent OOV values.
<add> Configure the layer with `output_mode="tf_idf"`. As with multi_hot output,
<add> the first `num_oov_indices` dimensions in the output represent OOV values.
<ide>
<ide> Each token bin will output `token_count * idf_weight`, where the idf weights
<del> are the inverse document frequency weights per token. These should be provided
<del> along with the vocabulary. Note that the `idf_weight` for OOV values will
<del> default to the average of all idf weights passed in.
<add> are the inverse document frequency weights per token. These should be
<add> provided along with the vocabulary. Note that the `idf_weight` for OOV
<add> values will default to the average of all idf weights passed in.
<ide>
<ide> >>> vocab = ["a", "b", "c", "d"]
<ide> >>> idf_weights = [0.25, 0.75, 0.6, 0.4]
<ide> class StringLookup(index_lookup.IndexLookup):
<ide>
<ide> **Inverse lookup**
<ide>
<del> This example demonstrates how to map indices to strings using this layer. (You
<del> can also use `adapt()` with `inverse=True`, but for simplicity we'll pass the
<del> vocab in this example.)
<add> This example demonstrates how to map indices to strings using this layer.
<add> (You can also use `adapt()` with `inverse=True`, but for simplicity we'll
<add> pass the vocab in this example.)
<ide>
<ide> >>> vocab = ["a", "b", "c", "d"]
<ide> >>> data = tf.constant([[1, 3, 4], [4, 0, 2]])
<ide> class StringLookup(index_lookup.IndexLookup):
<ide> since 1000 was not in the vocabulary - it got represented as an OOV, and all
<ide> OOV values are returned as `"[UNK]"` in the inverse layer. Also, note that
<ide> for the inverse to work, you must have already set the forward layer
<del> vocabulary either directly or via `adapt()` before calling `get_vocabulary()`.
<add> vocabulary either directly or via `adapt()` before calling
<add> `get_vocabulary()`.
<ide> """
<ide>
<ide> def __init__(
<ide> def get_config(self):
<ide> def adapt(self, data, batch_size=None, steps=None):
<ide> """Computes a vocabulary of string terms from tokens in a dataset.
<ide>
<del> Calling `adapt()` on a `StringLookup` layer is an alternative to passing in
<del> a precomputed vocabulary on construction via the `vocabulary` argument. A
<del> `StringLookup` layer should always be either adapted over a dataset or
<del> supplied with a vocabulary.
<add> Calling `adapt()` on a `StringLookup` layer is an alternative to passing
<add> in a precomputed vocabulary on construction via the `vocabulary`
<add> argument. A `StringLookup` layer should always be either adapted over a
<add> dataset or supplied with a vocabulary.
<ide>
<ide> During `adapt()`, the layer will build a vocabulary of all string tokens
<del> seen in the dataset, sorted by occurrence count, with ties broken by sort
<del> order of the tokens (high to low). At the end of `adapt()`, if `max_tokens`
<del> is set, the vocabulary wil be truncated to `max_tokens` size. For example,
<del> adapting a layer with `max_tokens=1000` will compute the 1000 most frequent
<del> tokens occurring in the input dataset. If `output_mode='tf-idf'`, `adapt()`
<del> will also learn the document frequencies of each token in the input dataset.
<del>
<del> In order to make `StringLookup` efficient in any distribution context, the
<del> vocabulary is kept static with respect to any compiled `tf.Graph`s that
<del> call the layer. As a consequence, if the layer is adapted a second time,
<del> any models using the layer should be re-compiled. For more information
<del> see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<del>
<del> `adapt()` is meant only as a single machine utility to compute layer state.
<del> To analyze a dataset that cannot fit on a single machine, see
<del> [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)
<del> for a multi-machine, map-reduce solution.
<add> seen in the dataset, sorted by occurrence count, with ties broken by
<add> sort order of the tokens (high to low). At the end of `adapt()`, if
<add> `max_tokens` is set, the vocabulary wil be truncated to `max_tokens`
<add> size. For example, adapting a layer with `max_tokens=1000` will compute
<add> the 1000 most frequent tokens occurring in the input dataset. If
<add> `output_mode='tf-idf'`, `adapt()` will also learn the document
<add> frequencies of each token in the input dataset.
<add>
<add> In order to make `StringLookup` efficient in any distribution context,
<add> the vocabulary is kept static with respect to any compiled `tf.Graph`s
<add> that call the layer. As a consequence, if the layer is adapted a second
<add> time, any models using the layer should be re-compiled. For more
<add> information see
<add> `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<add>
<add> `adapt()` is meant only as a single machine utility to compute layer
<add> state. To analyze a dataset that cannot fit on a single machine, see
<add> [Tensorflow Transform](
<add> https://www.tensorflow.org/tfx/transform/get_started) for a
<add> multi-machine, map-reduce solution.
<ide>
<ide> Arguments:
<ide> data: The data to train on. It can be passed either as a
<ide><path>keras/layers/preprocessing/string_lookup_test.py
<ide> def _get_end_to_end_test_cases():
<ide> test_cases = (
<ide> {
<ide> "testcase_name": "test_strings_soft_vocab_cap",
<del> # Create an array where 'earth' is the most frequent term, followed by
<del> # 'wind', then 'and', then 'fire'. This ensures that the vocab
<add> # Create an array where 'earth' is the most frequent term, followed
<add> # by 'wind', then 'and', then 'fire'. This ensures that the vocab
<ide> # accumulator is sorting by frequency.
<ide> "vocab_data": np.array(
<ide> [
<ide> def test_layer_end_to_end_with_adapt(
<ide> # dataset batch separately, then tries to concatenate the results
<ide> # together. When the results have different shapes on the non-concat
<ide> # axis (which can happen in the output_mode = INT case for
<del> # StringLookup), the concatenation fails. In real use cases, this may
<del> # not be an issue because users are likely to pipe the preprocessing layer
<del> # into other keras layers instead of predicting it directly. A workaround
<del> # for these unit tests is to have the dataset only contain one batch, so
<del> # no concatenation needs to happen with the result. For consistency with
<del> # numpy input, we should make `predict` join differently shaped results
<del> # together sensibly, with 0 padding.
<add> # StringLookup), the concatenation fails. In real use cases, this
<add> # may not be an issue because users are likely to pipe the
<add> # preprocessing layer into other keras layers instead of predicting
<add> # it directly. A workaround for these unit tests is to have the
<add> # dataset only contain one batch, so no concatenation needs to
<add> # happen with the result. For consistency with numpy input, we
<add> # should make `predict` join differently shaped results together
<add> # sensibly, with 0 padding.
<ide> input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
<ide> input_shape[0]
<ide> )
<ide><path>keras/layers/preprocessing/text_vectorization.py
<ide> class TextVectorization(base_preprocessing_layer.PreprocessingLayer):
<ide> """A preprocessing layer which maps text features to integer sequences.
<ide>
<del> This layer has basic options for managing text in a Keras model. It transforms
<del> a batch of strings (one example = one string) into either a list of token
<del> indices (one example = 1D tensor of integer token indices) or a dense
<del> representation (one example = 1D tensor of float values representing data
<del> about the example's tokens). This layer is meant to handle natural language
<del> inputs. To handle simple string inputs (categorical strings or pre-tokenized
<del> strings) see `tf.keras.layers.StringLookup`.
<add> This layer has basic options for managing text in a Keras model. It
<add> transforms a batch of strings (one example = one string) into either a list
<add> of token indices (one example = 1D tensor of integer token indices) or a
<add> dense representation (one example = 1D tensor of float values representing
<add> data about the example's tokens). This layer is meant to handle natural
<add> language inputs. To handle simple string inputs (categorical strings or
<add> pre-tokenized strings) see `tf.keras.layers.StringLookup`.
<ide>
<ide> The vocabulary for the layer must be either supplied on construction or
<ide> learned via `adapt()`. When this layer is adapted, it will analyze the
<ide> dataset, determine the frequency of individual string values, and create a
<ide> vocabulary from them. This vocabulary can have unlimited size or be capped,
<ide> depending on the configuration options for this layer; if there are more
<del> unique values in the input than the maximum vocabulary size, the most frequent
<del> terms will be used to create the vocabulary.
<add> unique values in the input than the maximum vocabulary size, the most
<add> frequent terms will be used to create the vocabulary.
<ide>
<ide> The processing of each example contains the following steps:
<ide>
<ide> class TextVectorization(base_preprocessing_layer.PreprocessingLayer):
<ide> [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide>
<ide> Args:
<del> max_tokens: Maximum size of the vocabulary for this layer. This should only
<del> be specified when adapting a vocabulary or when setting
<add> max_tokens: Maximum size of the vocabulary for this layer. This should
<add> only be specified when adapting a vocabulary or when setting
<ide> `pad_to_max_tokens=True`. Note that this vocabulary
<del> contains 1 OOV token, so the effective number of tokens is `(max_tokens -
<del> 1 - (1 if output_mode == "int" else 0))`.
<add> contains 1 OOV token, so the effective number of tokens is
<add> `(max_tokens - 1 - (1 if output_mode == "int" else 0))`.
<ide> standardize: Optional specification for standardization to apply to the
<ide> input text. Values can be:
<ide> - `None`: No standardization.
<ide> class TextVectorization(base_preprocessing_layer.PreprocessingLayer):
<ide> - `"character"`: Split on each unicode character.
<ide> - Callable: Standardized inputs will passed to the callable function,
<ide> which should split and returned.
<del> ngrams: Optional specification for ngrams to create from the possibly-split
<del> input text. Values can be None, an integer or tuple of integers; passing
<del> an integer will create ngrams up to that integer, and passing a tuple of
<del> integers will create ngrams for the specified values in the tuple. Passing
<del> None means that no ngrams will be created.
<del> output_mode: Optional specification for the output of the layer. Values can
<del> be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`, configuring the layer
<del> as follows:
<add> ngrams: Optional specification for ngrams to create from the
<add> possibly-split input text. Values can be None, an integer or tuple of
<add> integers; passing an integer will create ngrams up to that integer, and
<add> passing a tuple of integers will create ngrams for the specified values
<add> in the tuple. Passing None means that no ngrams will be created.
<add> output_mode: Optional specification for the output of the layer. Values
<add> can be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`, configuring the
<add> layer as follows:
<ide> - `"int"`: Outputs integer indices, one integer index per split string
<ide> token. When `output_mode == "int"`, 0 is reserved for masked
<ide> locations; this reduces the vocab size to
<ide> `max_tokens - 2` instead of `max_tokens - 1`.
<ide> - `"multi_hot"`: Outputs a single int array per batch, of either
<del> vocab_size or max_tokens size, containing 1s in all elements where the
<del> token mapped to that index exists at least once in the batch item.
<add> vocab_size or max_tokens size, containing 1s in all elements where
<add> the token mapped to that index exists at least once in the batch
<add> item.
<ide> - `"count"`: Like `"multi_hot"`, but the int array contains a count of
<ide> the number of times the token at that index appeared in the
<ide> batch item.
<del> - `"tf_idf"`: Like `"multi_hot"`, but the TF-IDF algorithm is applied to
<del> find the value in each token slot.
<add> - `"tf_idf"`: Like `"multi_hot"`, but the TF-IDF algorithm is applied
<add> to find the value in each token slot.
<ide> For `"int"` output, any shape of input and output is supported. For all
<del> other output modes, currently only rank 1 inputs (and rank 2 outputs after
<del> splitting) are supported.
<del> output_sequence_length: Only valid in INT mode. If set, the output will have
<del> its time dimension padded or truncated to exactly `output_sequence_length`
<del> values, resulting in a tensor of shape
<add> other output modes, currently only rank 1 inputs (and rank 2 outputs
<add> after splitting) are supported.
<add> output_sequence_length: Only valid in INT mode. If set, the output will
<add> have its time dimension padded or truncated to exactly
<add> `output_sequence_length` values, resulting in a tensor of shape
<ide> `(batch_size, output_sequence_length)` regardless of how many tokens
<ide> resulted from the splitting step. Defaults to None.
<ide> pad_to_max_tokens: Only valid in `"multi_hot"`, `"count"`, and `"tf_idf"`
<ide> modes. If True, the output will have its feature axis padded to
<del> `max_tokens` even if the number of unique tokens in the vocabulary is less
<del> than max_tokens, resulting in a tensor of shape `(batch_size, max_tokens)`
<del> regardless of vocabulary size. Defaults to False.
<del> vocabulary: Optional. Either an array of strings or a string path to a text
<del> file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D
<del> tensor containing the string vocbulary terms. If passing a file path, the
<del> file should contain one line per term in the vocabulary. If this argument
<del> is set, there is no need to `adapt()` the layer.
<del> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D
<del> numpy array, or 1D tensor or the same length as the vocabulary, containing
<del> the floating point inverse document frequency weights, which will be
<del> multiplied by per sample term counts for the final `tf_idf` weight. If the
<del> `vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this
<del> argument must be supplied.
<del> ragged: Boolean. Only applicable to `"int"` output mode. If True, returns a
<del> `RaggedTensor` instead of a dense `Tensor`, where each sequence may have a
<del> different length after string splitting. Defaults to False.
<add> `max_tokens` even if the number of unique tokens in the vocabulary is
<add> less than max_tokens, resulting in a tensor of shape `(batch_size,
<add> max_tokens)` regardless of vocabulary size. Defaults to False.
<add> vocabulary: Optional. Either an array of strings or a string path to a
<add> text file. If passing an array, can pass a tuple, list, 1D numpy array,
<add> or 1D tensor containing the string vocbulary terms. If passing a file
<add> path, the file should contain one line per term in the vocabulary. If
<add> this argument is set, there is no need to `adapt()` the layer.
<add> idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
<add> 1D numpy array, or 1D tensor or the same length as the vocabulary,
<add> containing the floating point inverse document frequency weights, which
<add> will be multiplied by per sample term counts for the final `tf_idf`
<add> weight. If the `vocabulary` argument is set, and `output_mode` is
<add> `"tf_idf"`, this argument must be supplied.
<add> ragged: Boolean. Only applicable to `"int"` output mode. If True, returns
<add> a `RaggedTensor` instead of a dense `Tensor`, where each sequence may
<add> have a different length after string splitting. Defaults to False.
<ide> sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
<ide> `"tf_idf"` output modes. If True, returns a `SparseTensor` instead of a
<ide> dense `Tensor`. Defaults to False.
<ide> class TextVectorization(base_preprocessing_layer.PreprocessingLayer):
<ide> ... output_mode='int',
<ide> ... output_sequence_length=max_len)
<ide> >>>
<del> >>> # Now that the vocab layer has been created, call `adapt` on the text-only
<del> >>> # dataset to create the vocabulary. You don't have to batch, but for large
<del> >>> # datasets this means we're not keeping spare copies of the dataset.
<add> >>> # Now that the vocab layer has been created, call `adapt` on the
<add> >>> # text-only dataset to create the vocabulary. You don't have to batch,
<add> >>> # but for large datasets this means we're not keeping spare copies of
<add> >>> # the dataset.
<ide> >>> vectorize_layer.adapt(text_dataset.batch(64))
<ide> >>>
<ide> >>> # Create the model that uses the vectorize text layer
<ide> class TextVectorization(base_preprocessing_layer.PreprocessingLayer):
<ide> >>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
<ide> >>>
<ide> >>> # The first layer in our model is the vectorization layer. After this
<del> >>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab
<del> >>> # indices.
<add> >>> # layer, we have a tensor of shape (batch_size, max_len) containing
<add> >>> # vocab indices.
<ide> >>> model.add(vectorize_layer)
<ide> >>>
<del> >>> # Now, the model can map strings to integers, and you can add an embedding
<del> >>> # layer to map these integers to learned embeddings.
<add> >>> # Now, the model can map strings to integers, and you can add an
<add> >>> # embedding layer to map these integers to learned embeddings.
<ide> >>> input_data = [["foo qux bar"], ["qux baz"]]
<ide> >>> model.predict(input_data)
<ide> array([[2, 1, 4, 0],
<ide> class TextVectorization(base_preprocessing_layer.PreprocessingLayer):
<ide> >>>
<ide> >>> # Because we've passed the vocabulary directly, we don't need to adapt
<ide> >>> # the layer - the vocabulary is already set. The vocabulary contains the
<del> >>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.
<add> >>> # padding token ('') and OOV token ('[UNK]') as well as the passed
<add> >>> # tokens.
<ide> >>> vectorize_layer.get_vocabulary()
<ide> ['', '[UNK]', 'earth', 'wind', 'and', 'fire']
<ide>
<ide> def __init__(
<ide> kwargs["dtype"] = tf.string
<ide>
<ide> # 'standardize' must be one of
<del> # (None, LOWER_AND_STRIP_PUNCTUATION, LOWER, STRIP_PUNCTUATION, callable)
<add> # (None, LOWER_AND_STRIP_PUNCTUATION, LOWER, STRIP_PUNCTUATION,
<add> # callable)
<ide> layer_utils.validate_string_arg(
<ide> standardize,
<ide> allowable_strings=(
<ide> def __init__(
<ide>
<ide> if output_mode != INT and output_sequence_length is not None:
<ide> raise ValueError(
<del> f"`output_sequence_length` must not be set if `output_mode` is not "
<del> f"'int'. Received output_sequence_length={output_sequence_length}."
<add> f"`output_sequence_length` must not be set if `output_mode` is "
<add> f"not 'int'. "
<add> f"Received output_sequence_length={output_sequence_length}."
<ide> )
<ide>
<ide> if ragged and output_mode != INT:
<ide> def __init__(
<ide> self._output_mode = output_mode
<ide> self._output_sequence_length = output_sequence_length
<ide>
<del> # VocabularySavedModelSaver will clear the config vocabulary to restore the
<del> # lookup table ops directly. We persist this hidden option to persist the
<del> # fact that we have have a non-adaptable layer with a manually set vocab.
<add> # VocabularySavedModelSaver will clear the config vocabulary to restore
<add> # the lookup table ops directly. We persist this hidden option to
<add> # persist the fact that we have have a non-adaptable layer with a
<add> # manually set vocab.
<ide> self._has_input_vocabulary = kwargs.pop(
<ide> "has_input_vocabulary", (vocabulary is not None)
<ide> )
<ide> def adapt(self, data, batch_size=None, steps=None):
<ide>
<ide> Calling `adapt()` on a `TextVectorization` layer is an alternative to
<ide> passing in a precomputed vocabulary on construction via the `vocabulary`
<del> argument. A `TextVectorization` layer should always be either adapted over a
<del> dataset or supplied with a vocabulary.
<add> argument. A `TextVectorization` layer should always be either adapted
<add> over a dataset or supplied with a vocabulary.
<ide>
<ide> During `adapt()`, the layer will build a vocabulary of all string tokens
<del> seen in the dataset, sorted by occurrence count, with ties broken by sort
<del> order of the tokens (high to low). At the end of `adapt()`, if `max_tokens`
<del> is set, the vocabulary wil be truncated to `max_tokens` size. For example,
<del> adapting a layer with `max_tokens=1000` will compute the 1000 most frequent
<del> tokens occurring in the input dataset. If `output_mode='tf-idf'`, `adapt()`
<del> will also learn the document frequencies of each token in the input dataset.
<del>
<del> In order to make `TextVectorization` efficient in any distribution context,
<del> the vocabulary is kept static with respect to any compiled `tf.Graph`s that
<del> call the layer. As a consequence, if the layer is adapted a second time,
<del> any models using the layer should be re-compiled. For more information
<del> see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<del>
<del> `adapt()` is meant only as a single machine utility to compute layer state.
<del> To analyze a dataset that cannot fit on a single machine, see
<del> [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)
<del> for a multi-machine, map-reduce solution.
<add> seen in the dataset, sorted by occurrence count, with ties broken by
<add> sort order of the tokens (high to low). At the end of `adapt()`, if
<add> `max_tokens` is set, the vocabulary wil be truncated to `max_tokens`
<add> size. For example, adapting a layer with `max_tokens=1000` will compute
<add> the 1000 most frequent tokens occurring in the input dataset. If
<add> `output_mode='tf-idf'`, `adapt()` will also learn the document
<add> frequencies of each token in the input dataset.
<add>
<add> In order to make `TextVectorization` efficient in any distribution
<add> context, the vocabulary is kept static with respect to any compiled
<add> `tf.Graph`s that call the layer. As a consequence, if the layer is
<add> adapted a second time, any models using the layer should be re-compiled.
<add> For more information see
<add> `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
<add>
<add> `adapt()` is meant only as a single machine utility to compute layer
<add> state. To analyze a dataset that cannot fit on a single machine, see
<add> [Tensorflow Transform](
<add> https://www.tensorflow.org/tfx/transform/get_started) for a
<add> multi-machine, map-reduce solution.
<ide>
<ide> Arguments:
<ide> data: The data to train on. It can be passed either as a
<ide> def get_vocabulary(self, include_special_tokens=True):
<ide>
<ide> Args:
<ide> include_special_tokens: If True, the returned vocabulary will include
<del> the padding and OOV tokens, and a term's index in the vocabulary will
<del> equal the term's index when calling the layer. If False, the returned
<del> vocabulary will not include any padding or OOV tokens.
<add> the padding and OOV tokens, and a term's index in the vocabulary
<add> will equal the term's index when calling the layer. If False, the
<add> returned vocabulary will not include any padding or OOV tokens.
<ide> """
<ide> return self._lookup_layer.get_vocabulary(include_special_tokens)
<ide>
<ide> def set_vocabulary(self, vocabulary, idf_weights=None):
<ide> """Sets vocabulary (and optionally document frequency) data for this layer.
<ide>
<ide> This method sets the vocabulary and idf weights for this layer directly,
<del> instead of analyzing a dataset through 'adapt'. It should be used whenever
<del> the vocab (and optionally document frequency) information is already known.
<del> If vocabulary data is already present in the layer, this method will replace
<del> it.
<add> instead of analyzing a dataset through 'adapt'. It should be used
<add> whenever the vocab (and optionally document frequency) information is
<add> already known. If vocabulary data is already present in the layer, this
<add> method will replace it.
<ide>
<ide> Args:
<del> vocabulary: Either an array or a string path to a text file. If passing an
<del> array, can pass a tuple, list, 1D numpy array, or 1D tensor containing
<del> the vocbulary terms. If passing a file path, the file should contain one
<del> line per term in the vocabulary.
<add> vocabulary: Either an array or a string path to a text file. If
<add> passing an array, can pass a tuple, list, 1D numpy array, or 1D
<add> tensor containing the vocbulary terms. If passing a file path, the
<add> file should contain one line per term in the vocabulary.
<ide> idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse
<del> document frequency weights with equal length to vocabulary. Must be set
<del> if `output_mode` is `"tf_idf"`. Should not be set otherwise.
<add> document frequency weights with equal length to vocabulary. Must be
<add> set if `output_mode` is `"tf_idf"`. Should not be set otherwise.
<ide>
<ide> Raises:
<ide> ValueError: If there are too many inputs, the inputs do not match, or
<ide> input data is missing.
<ide> RuntimeError: If the vocabulary cannot be set when this function is
<del> called. This happens when `"multi_hot"`, `"count"`, and "tf_idf" modes,
<del> if `pad_to_max_tokens` is False and the layer itself has already been
<del> called.
<add> called. This happens when `"multi_hot"`, `"count"`, and "tf_idf"
<add> modes, if `pad_to_max_tokens` is False and the layer itself has
<add> already been called.
<ide> """
<ide> self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
<ide>
<ide> def _preprocess(self, inputs):
<ide> inputs = self._standardize(inputs)
<ide>
<ide> if self._split is not None:
<del> # If we are splitting, we validate that the 1st axis is of dimension 1 and
<del> # so can be squeezed out. We do this here instead of after splitting for
<del> # performance reasons - it's more expensive to squeeze a ragged tensor.
<add> # If we are splitting, we validate that the 1st axis is of dimension
<add> # 1 and so can be squeezed out. We do this here instead of after
<add> # splitting for performance reasons - it's more expensive to squeeze
<add> # a ragged tensor.
<ide> if inputs.shape.rank > 1:
<ide> if inputs.shape[-1] != 1:
<ide> raise ValueError(
<del> "When using `TextVectorization` to tokenize strings, the input "
<del> "rank must be 1 or the last shape dimension must be 1. Received: "
<del> f"inputs.shape={inputs.shape} with rank={inputs.shape.rank}"
<add> "When using `TextVectorization` to tokenize strings, "
<add> "the input rank must be 1 or the last shape dimension "
<add> f"must be 1. Received: inputs.shape={inputs.shape} "
<add> f"with rank={inputs.shape.rank}"
<ide> )
<ide> else:
<ide> inputs = tf.squeeze(inputs, axis=-1)
<ide> if self._split == WHITESPACE:
<del> # This treats multiple whitespaces as one whitespace, and strips leading
<del> # and trailing whitespace.
<add> # This treats multiple whitespaces as one whitespace, and strips
<add> # leading and trailing whitespace.
<ide> inputs = tf.strings.split(inputs)
<ide> elif self._split == CHARACTER:
<ide> inputs = tf.strings.unicode_split(inputs, "UTF-8")
<ide> def _preprocess(self, inputs):
<ide> )
<ide>
<ide> # Note that 'inputs' here can be either ragged or dense depending on the
<del> # configuration choices for this Layer. The strings.ngrams op, however, does
<del> # support both ragged and dense inputs.
<add> # configuration choices for this Layer. The strings.ngrams op, however,
<add> # does support both ragged and dense inputs.
<ide> if self._ngrams is not None:
<ide> inputs = tf.strings.ngrams(
<ide> inputs, ngram_width=self._ngrams, separator=" "
<ide> def call(self, inputs):
<ide>
<ide> lookup_data = self._lookup_layer(inputs)
<ide>
<del> # For any non-int output, we can return directly from the underlying layer.
<add> # For any non-int output, we can return directly from the underlying
<add> # layer.
<ide> if self._output_mode != INT:
<ide> return lookup_data
<ide>
<ide> def call(self, inputs):
<ide> # If we have a ragged tensor, we can pad during the conversion to dense.
<ide> if tf_utils.is_ragged(lookup_data):
<ide> shape = lookup_data.shape.as_list()
<del> # If output sequence length is None, to_tensor will pad the last dimension
<del> # to the bounding shape of the ragged dimension.
<add> # If output sequence length is None, to_tensor will pad the last
<add> # dimension to the bounding shape of the ragged dimension.
<ide> shape[-1] = self._output_sequence_length
<ide> return lookup_data.to_tensor(default_value=0, shape=shape)
<ide>
<ide> def call(self, inputs):
<ide> # Maybe trim the output.
<ide> lookup_data = lookup_data[..., : self._output_sequence_length]
<ide>
<del> # Maybe pad the output. We need to be careful to use dynamic shape here as
<del> # required_space_to_batch_paddings requires a fully known shape.
<add> # Maybe pad the output. We need to be careful to use dynamic shape
<add> # here as required_space_to_batch_paddings requires a fully known
<add> # shape.
<ide> shape = tf.shape(lookup_data)
<ide> padded_shape = tf.concat(
<ide> (shape[:-1], [self._output_sequence_length]), 0
<ide><path>keras/layers/preprocessing/text_vectorization_distribution_test.py
<ide> def test_distribution_strategy_output(self, strategy):
<ide> self.assertAllEqual(expected_output, output_dataset)
<ide>
<ide> def test_distribution_strategy_output_with_adapt(self, strategy):
<del> # TODO(b/180614455): remove this check when MLIR bridge is always enabled.
<add> # TODO(b/180614455): remove this check when MLIR bridge is always
<add> # enabled.
<ide> if backend.is_tpu_strategy(strategy):
<ide> self.skipTest("This test needs MLIR bridge on TPU.")
<ide>
<ide><path>keras/layers/preprocessing/text_vectorization_test.py
<ide> def _get_end_to_end_test_cases():
<ide> test_cases = (
<ide> {
<ide> "testcase_name": "test_simple_tokens_int_mode",
<del> # Create an array where 'earth' is the most frequent term, followed by
<del> # 'wind', then 'and', then 'fire'. This ensures that the vocab
<del> # is sorting by frequency.
<add> # Create an array where 'earth' is the most frequent term, followed
<add> # by 'wind', then 'and', then 'fire'. This ensures that the vocab is
<add> # sorting by frequency.
<ide> "vocab_data": np.array(
<ide> [
<ide> ["fire"],
<ide> def _get_end_to_end_test_cases():
<ide> },
<ide> {
<ide> "testcase_name": "test_simple_tokens_int_mode_hard_cap",
<del> # Create an array where 'earth' is the most frequent term, followed by
<del> # 'wind', then 'and', then 'fire'. This ensures that the vocab
<del> # is sorting by frequency.
<add> # Create an array where 'earth' is the most frequent term, followed
<add> # by 'wind', then 'and', then 'fire'. This ensures that the vocab is
<add> # sorting by frequency.
<ide> "vocab_data": np.array(
<ide> [
<ide> ["fire"],
<ide> def _get_end_to_end_test_cases():
<ide> },
<ide> {
<ide> "testcase_name": "test_special_tokens_int_mode",
<del> # Mask tokens in the vocab data should be ignored, and mapped to 0 in
<del> # from the input data.
<add> # Mask tokens in the vocab data should be ignored, and mapped to 0
<add> # in from the input data.
<ide> "vocab_data": np.array(
<ide> [
<ide> ["fire"],
<ide> def test_layer_end_to_end_with_adapt(
<ide> # dataset batch separately, then tries to concatenate the results
<ide> # together. When the results have different shapes on the non-concat
<ide> # axis (which can happen in the output_mode = INT case for
<del> # TextVectorization), the concatenation fails. In real use cases, this may
<del> # not be an issue because users are likely to pipe the preprocessing layer
<del> # into other keras layers instead of predicting it directly. A workaround
<del> # for these unit tests is to have the dataset only contain one batch, so
<del> # no concatenation needs to happen with the result. For consistency with
<del> # numpy input, we should make `predict` join differently shaped results
<del> # together sensibly, with 0 padding.
<add> # TextVectorization), the concatenation fails. In real use cases,
<add> # this may not be an issue because users are likely to pipe the
<add> # preprocessing layer into other keras layers instead of predicting
<add> # it directly. A workaround for these unit tests is to have the
<add> # dataset only contain one batch, so no concatenation needs to
<add> # happen with the result. For consistency with numpy input, we
<add> # should make `predict` join differently shaped results together
<add> # sensibly, with 0 padding.
<ide> input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
<ide> input_shape[0]
<ide> )
<ide> def test_summary_before_adapt(self):
<ide> )
<ide> int_data = layer(input_data)
<ide> model = keras.Model(inputs=input_data, outputs=int_data)
<del> # We are testing that model.summary() can be called without erroring out.
<del> # (b/145726907)
<add> # We are testing that model.summary() can be called without erroring
<add> # out. (b/145726907)
<ide> model.summary()
<ide>
<ide> @parameterized.parameters([list, np.array, tf.constant, tf.ragged.constant])
<ide> def test_int_output(self):
<ide>
<ide> def test_int_output_densifies_with_zeros(self):
<ide> vocab_data = ["earth", "wind", "and", "fire"]
<del> # Create an input array that has 5 elements in the first example and 4 in
<del> # the second. This should output a 2x5 tensor with a padding value in the
<del> # second example.
<add> # Create an input array that has 5 elements in the first example and 4
<add> # in the second. This should output a 2x5 tensor with a padding value in
<add> # the second example.
<ide> input_array = np.array(
<ide> [["earth wind and also fire"], ["fire and earth michigan"]]
<ide> )
<ide> def test_int_output_densifies_with_zeros(self):
<ide>
<ide> def test_int_output_ragged(self):
<ide> vocab_data = ["earth", "wind", "and", "fire"]
<del> # Create an input array that has 5 elements in the first example and 4 in
<del> # the second.
<add> # Create an input array that has 5 elements in the first example and 4
<add> # in the second.
<ide> input_array = np.array(
<ide> [["earth wind and also fire"], ["fire and earth michigan"]]
<ide> )
<ide> def test_int_output_ragged(self):
<ide>
<ide> def test_int_output_densifies_with_zeros_and_pads(self):
<ide> vocab_data = ["earth", "wind", "and", "fire"]
<del> # Create an input array that has 5 elements in the first example and 4 in
<del> # the second. This should output a 2x6 tensor with a padding value in the
<del> # second example, since output_sequence_length is set to 6.
<add> # Create an input array that has 5 elements in the first example and 4
<add> # in the second. This should output a 2x6 tensor with a padding value in
<add> # the second example, since output_sequence_length is set to 6.
<ide> input_array = np.array(
<ide> [["earth wind and also fire"], ["fire and earth michigan"]]
<ide> )
<ide> def test_int_output_densifies_with_zeros_and_pads(self):
<ide>
<ide> def test_int_output_densifies_with_zeros_and_strips(self):
<ide> vocab_data = ["earth", "wind", "and", "fire"]
<del> # Create an input array that has 5 elements in the first example and 4 in
<del> # the second. This should output a 2x3 tensor with a padding value in the
<del> # second example, since output_sequence_length is set to 3.
<add> # Create an input array that has 5 elements in the first example and 4
<add> # in the second. This should output a 2x3 tensor with a padding value in
<add> # the second example, since output_sequence_length is set to 3.
<ide> input_array = np.array(
<ide> [["earth wind and also fire"], ["fire and earth michigan"]]
<ide> )
<ide> def test_int_output_densifies_with_zeros_and_strips(self):
<ide>
<ide> def test_int_output_dynamically_strips_and_pads(self):
<ide> vocab_data = ["earth", "wind", "and", "fire"]
<del> # Create an input array that has 5 elements in the first example and 4 in
<del> # the second. This should output a 2x3 tensor with a padding value in the
<del> # second example, since output_sequence_length is set to 3.
<add> # Create an input array that has 5 elements in the first example and 4
<add> # in the second. This should output a 2x3 tensor with a padding value in
<add> # the second example, since output_sequence_length is set to 3.
<ide> input_array = np.array(
<ide> [["earth wind and also fire"], ["fire and earth michigan"]]
<ide> )
<ide> def test_int_output_dynamically_strips_and_pads(self):
<ide> self.assertAllEqual(expected_output, output_dataset)
<ide>
<ide> # Create an input array that has 1 element in the first example and 2 in
<del> # the second. This should output a 2x3 tensor with a padding value in the
<del> # second example, since output_sequence_length is set to 3.
<add> # the second. This should output a 2x3 tensor with a padding value in
<add> # the second example, since output_sequence_length is set to 3.
<ide> input_array_2 = np.array([["wind"], ["fire and"]])
<ide> expected_output_2 = [[3, 0, 0], [5, 4, 0]]
<ide> output_dataset = model.predict(input_array_2)
<ide> def test_saving(self, init_vocab):
<ide>
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = keras.models.load_model(output_path)
<ide> def test_saving_when_nested(self, init_vocab):
<ide> output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
<ide> outer_model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = keras.models.load_model(output_path)
<ide> def test_saving_when_adapted(self):
<ide>
<ide> model.save(output_path, save_format="tf")
<ide>
<del> # Delete the session and graph to ensure that the loaded model is generated
<del> # from scratch.
<add> # Delete the session and graph to ensure that the loaded model is
<add> # generated from scratch.
<ide> keras.backend.clear_session()
<ide>
<ide> loaded_model = keras.models.load_model(output_path)
<ide><path>keras/layers/regularization/dropout.py
<ide> def build(self, input_shape):
<ide> self._random_generator._maybe_init() # pylint: disable=protected-access
<ide>
<ide> def _get_noise_shape(self, inputs):
<del> # Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
<del> # which will override `self.noise_shape`, and allows for custom noise
<del> # shapes with dynamically sized inputs.
<add> # Subclasses of `Dropout` may implement `_get_noise_shape(self,
<add> # inputs)`, which will override `self.noise_shape`, and allows for
<add> # custom noise shapes with dynamically sized inputs.
<ide> if self.noise_shape is None:
<ide> return None
<ide>
<ide><path>keras/layers/regularization/dropout_test.py
<ide> def test_dropout_with_savemodel(self):
<ide> model = keras.Model(inputs, outputs)
<ide> train = model(np.ones((20, 5, 10)), training=True)
<ide> predict = model(np.ones((20, 5, 10)))
<del> # Make sure the weights from tf.random.Generator is not present in the model
<del> # which will cause weight loading issue for existing application models if
<del> # it contains dropout layer.
<add> # Make sure the weights from tf.random.Generator is not present in the
<add> # model which will cause weight loading issue for existing application
<add> # models if it contains dropout layer.
<ide> self.assertEmpty(layer.get_weights())
<ide> self.assertEmpty(model.get_weights())
<ide>
<ide><path>keras/layers/regularization/spatial_dropout2d.py
<ide> class SpatialDropout2D(Dropout):
<ide>
<ide> Args:
<ide> rate: Float between 0 and 1. Fraction of the input units to drop.
<del> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode,
<del> the channels dimension (the depth) is at index 1, in 'channels_last' mode
<del> is it at index 3. It defaults to the `image_data_format` value found in
<del> your Keras config file at `~/.keras/keras.json`. If you never set it, then
<del> it will be "channels_last".
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first'
<add> mode, the channels dimension (the depth) is at index 1, in
<add> 'channels_last' mode is it at index 3. It defaults to the
<add> `image_data_format` value found in your Keras config file at
<add> `~/.keras/keras.json`. If you never set it, then it will be
<add> "channels_last".
<ide> Call arguments:
<ide> inputs: A 4D tensor.
<ide> training: Python boolean indicating whether the layer should behave in
<ide><path>keras/layers/regularization/spatial_dropout3d.py
<ide> class SpatialDropout3D(Dropout):
<ide>
<ide> Args:
<ide> rate: Float between 0 and 1. Fraction of the input units to drop.
<del> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode,
<del> the channels dimension (the depth) is at index 1, in 'channels_last' mode
<del> is it at index 4. It defaults to the `image_data_format` value found in
<del> your Keras config file at `~/.keras/keras.json`. If you never set it, then
<del> it will be "channels_last".
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first'
<add> mode, the channels dimension (the depth) is at index 1, in
<add> 'channels_last' mode is it at index 4. It defaults to the
<add> `image_data_format` value found in your Keras config file at
<add> `~/.keras/keras.json`. If you never set it, then it will be
<add> "channels_last".
<ide> Call arguments:
<ide> inputs: A 5D tensor.
<ide> training: Python boolean indicating whether the layer should behave in
<ide><path>keras/layers/reshaping/cropping3d.py
<ide> class Cropping3D(Layer):
<ide> Input shape:
<ide> 5D tensor with shape:
<ide> - If `data_format` is `"channels_last"`:
<del> `(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
<del> depth)`
<add> `(batch_size, first_axis_to_crop, second_axis_to_crop,
<add> third_axis_to_crop, depth)`
<ide> - If `data_format` is `"channels_first"`:
<ide> `(batch_size, depth, first_axis_to_crop, second_axis_to_crop,
<ide> third_axis_to_crop)`
<ide>
<ide> Output shape:
<ide> 5D tensor with shape:
<ide> - If `data_format` is `"channels_last"`:
<del> `(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis,
<del> depth)`
<add> `(batch_size, first_cropped_axis, second_cropped_axis,
<add> third_cropped_axis, depth)`
<ide> - If `data_format` is `"channels_first"`:
<ide> `(batch_size, depth, first_cropped_axis, second_cropped_axis,
<ide> third_cropped_axis)`
<ide> def __init__(
<ide> raise ValueError(
<ide> "`cropping` should be either an int, "
<ide> "a tuple of 3 ints "
<del> "(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), "
<add> "(symmetric_dim1_crop, symmetric_dim2_crop, "
<add> "symmetric_dim3_crop), "
<ide> "or a tuple of 3 tuples of 2 ints "
<ide> "((left_dim1_crop, right_dim1_crop),"
<ide> " (left_dim2_crop, right_dim2_crop),"
<ide> def call(self, inputs):
<ide> :,
<ide> self.cropping[0][0] : -self.cropping[0][1],
<ide> self.cropping[1][0] : -self.cropping[1][1],
<del> self.cropping[2][
<del> 0
<del> ] : -self.cropping[ # pylint: disable=invalid-unary-operand-type
<del> 2
<del> ][
<del> 1
<del> ],
<add> self.cropping[2][0] : -self.cropping[2][1],
<ide> :,
<ide> ] # pylint: disable=invalid-unary-operand-type
<ide> # pylint: enable=invalid-unary-operand-type
<ide><path>keras/layers/reshaping/flatten.py
<ide> def call(self, inputs):
<ide>
<ide> if tf.executing_eagerly():
<ide> # Full static shape is guaranteed to be available.
<del> # Performance: Using `constant_op` is much faster than passing a list.
<add> # Performance: Using `constant_op` is much faster than passing a
<add> # list.
<ide> flattened_shape = tf.constant([inputs.shape[0], -1])
<ide> return tf.reshape(inputs, flattened_shape)
<ide> else:
<ide> def call(self, inputs):
<ide> else:
<ide> batch_dim = tf.compat.dimension_value(input_shape[0])
<ide> non_batch_dims = input_shape[1:]
<del> # Reshape in a way that preserves as much shape info as possible.
<add> # Reshape in a way that preserves as much shape info as
<add> # possible.
<ide> if non_batch_dims.is_fully_defined():
<ide> last_dim = int(
<ide> functools.reduce(operator.mul, non_batch_dims)
<ide><path>keras/layers/reshaping/permute.py
<ide> def __init__(self, dims, **kwargs):
<ide> if sorted(dims) != list(range(1, len(dims) + 1)):
<ide> raise ValueError(
<ide> "Invalid permutation argument `dims` for Permute Layer. "
<del> "The set of indices in `dims` must be consecutive and start from 1. "
<del> f"Received dims={dims}"
<add> "The set of indices in `dims` must be consecutive and start "
<add> f"from 1. Received dims={dims}"
<ide> )
<ide> self.input_spec = InputSpec(ndim=len(self.dims) + 1)
<ide>
<ide><path>keras/layers/reshaping/reshape.py
<ide> class Reshape(Layer):
<ide>
<ide> Input shape:
<ide> Arbitrary, although all dimensions in the input shape must be known/fixed.
<del> Use the keyword argument `input_shape` (tuple of integers, does not include
<del> the samples/batch size axis) when using this layer as the first layer
<del> in a model.
<add> Use the keyword argument `input_shape` (tuple of integers, does not
<add> include the samples/batch size axis) when using this layer as the first
<add> layer in a model.
<ide>
<ide> Output shape:
<ide> `(batch_size,) + target_shape`
<ide> def _fix_unknown_dimension(self, input_shape, output_shape):
<ide>
<ide> Args:
<ide> input_shape: Shape of array being reshaped
<del> output_shape: Desired shape of the array with at most a single -1 which
<del> indicates a dimension that should be derived from the input shape.
<add> output_shape: Desired shape of the array with at most a single -1
<add> which indicates a dimension that should be derived from the input
<add> shape.
<ide>
<ide> Returns:
<ide> The new output shape with a -1 replaced with its computed value.
<ide> def _fix_unknown_dimension(self, input_shape, output_shape):
<ide> unknown = index
<ide> else:
<ide> raise ValueError(
<del> f"There must be at most one unknown dimension in output_shape. "
<del> f"Received: output_shape={output_shape}."
<add> f"There must be at most one unknown dimension in "
<add> f"output_shape. Received: output_shape={output_shape}."
<ide> )
<ide> else:
<ide> known *= dim
<ide> def compute_output_shape(self, input_shape):
<ide> def call(self, inputs):
<ide> result = tf.reshape(inputs, (tf.shape(inputs)[0],) + self.target_shape)
<ide> if not tf.executing_eagerly():
<del> # Set the static shape for the result since it might lost during array_ops
<del> # reshape, eg, some `None` dim in the result could be inferred.
<add> # Set the static shape for the result since it might lost during
<add> # array_ops reshape, eg, some `None` dim in the result could be
<add> # inferred.
<ide> result.set_shape(self.compute_output_shape(inputs.shape))
<ide> return result
<ide>
<ide><path>keras/layers/reshaping/up_sampling2d.py
<ide> class UpSampling2D(Layer):
<ide> Keras config file at `~/.keras/keras.json`.
<ide> If you never set it, then it will be "channels_last".
<ide> interpolation: A string, one of `"area"`, `"bicubic"`, `"bilinear"`,
<del> `"gaussian"`, `"lanczos3"`, `"lanczos5"`, `"mitchellcubic"`, `"nearest"`.
<add> `"gaussian"`, `"lanczos3"`, `"lanczos5"`, `"mitchellcubic"`,
<add> `"nearest"`.
<ide>
<ide> Input shape:
<ide> 4D tensor with shape:
<ide><path>keras/layers/reshaping/up_sampling3d.py
<ide> class UpSampling3D(Layer):
<ide> Output shape:
<ide> 5D tensor with shape:
<ide> - If `data_format` is `"channels_last"`:
<del> `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
<add> `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3,
<add> channels)`
<ide> - If `data_format` is `"channels_first"`:
<del> `(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
<add> `(batch_size, channels, upsampled_dim1, upsampled_dim2,
<add> upsampled_dim3)`
<ide> """
<ide>
<ide> def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
<ide><path>keras/layers/reshaping/zero_padding3d.py
<ide> class ZeroPadding3D(Layer):
<ide> Input shape:
<ide> 5D tensor with shape:
<ide> - If `data_format` is `"channels_last"`:
<del> `(batch_size, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
<del> depth)`
<add> `(batch_size, first_axis_to_pad, second_axis_to_pad,
<add> third_axis_to_pad, depth)`
<ide> - If `data_format` is `"channels_first"`:
<ide> `(batch_size, depth, first_axis_to_pad, second_axis_to_pad,
<del> third_axis_to_pad)`
<add> third_axis_to_pad)`
<ide>
<ide> Output shape:
<ide> 5D tensor with shape:
<ide> - If `data_format` is `"channels_last"`:
<del> `(batch_size, first_padded_axis, second_padded_axis, third_axis_to_pad,
<del> depth)`
<add> `(batch_size, first_padded_axis, second_padded_axis,
<add> third_axis_to_pad, depth)`
<ide> - If `data_format` is `"channels_first"`:
<ide> `(batch_size, depth, first_padded_axis, second_padded_axis,
<ide> third_axis_to_pad)`
<ide><path>keras/layers/rnn/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras recurrent layers."""
<del># pylint: disable=g-bad-import-order,g-direct-tensorflow-import,disable=g-import-not-at-top
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/rnn/abstract_rnn_cell.py
<ide> def call(self, inputs, states):
<ide> """The function that contains the logic for one RNN step calculation.
<ide>
<ide> Args:
<del> inputs: the input tensor, which is a slide from the overall RNN input by
<del> the time dimension (usually the second dimension).
<add> inputs: the input tensor, which is a slide from the overall RNN input
<add> by the time dimension (usually the second dimension).
<ide> states: the state tensor from previous step, which has the same shape
<ide> as `(batch, state_size)`. In the case of timestep 0, it will be the
<ide> initial state user specified, or zero filled tensor otherwise.
<ide> def call(self, inputs, states):
<ide> def state_size(self):
<ide> """size(s) of state(s) used by this cell.
<ide>
<del> It can be represented by an Integer, a TensorShape or a tuple of Integers
<del> or TensorShapes.
<add> It can be represented by an Integer, a TensorShape or a tuple of
<add> Integers or TensorShapes.
<ide> """
<ide> raise NotImplementedError
<ide>
<ide><path>keras/layers/rnn/base_conv_lstm.py
<ide> class ConvLSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
<ide>
<ide> Args:
<ide> rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions.
<del> filters: Integer, the dimensionality of the output space (i.e. the number of
<del> output filters in the convolution).
<add> filters: Integer, the dimensionality of the output space (i.e. the number
<add> of output filters in the convolution).
<ide> kernel_size: An integer or tuple/list of n integers, specifying the
<ide> dimensions of the convolution window.
<ide> strides: An integer or tuple/list of n integers, specifying the strides of
<ide> the convolution. Specifying any stride value != 1 is incompatible with
<ide> specifying any `dilation_rate` value != 1.
<del> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no
<del> padding. `"same"` results in padding evenly to the left/right or up/down
<del> of the input such that output has the same height/width dimension as the
<del> input.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> It defaults to the `image_data_format` value found in your Keras config
<del> file at `~/.keras/keras.json`. If you never set it, then it will be
<del> "channels_last".
<add> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
<add> no padding. `"same"` results in padding evenly to the left/right or
<add> up/down of the input such that output has the same height/width
<add> dimension as the input.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. It defaults to the `image_data_format` value found in
<add> your Keras config file at `~/.keras/keras.json`. If you never set it,
<add> then it will be "channels_last".
<ide> dilation_rate: An integer or tuple/list of n integers, specifying the
<ide> dilation rate to use for dilated convolution. Currently, specifying any
<ide> `dilation_rate` value != 1 is incompatible with specifying any `strides`
<ide> class ConvLSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
<ide> recurrent_initializer: Initializer for the `recurrent_kernel` weights
<ide> matrix, used for the linear transformation of the recurrent state.
<ide> bias_initializer: Initializer for the bias vector.
<del> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at
<del> initialization. Use in combination with `bias_initializer="zeros"`. This
<del> is recommended in [Jozefowicz et al., 2015](
<del> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<add> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
<add> at initialization. Use in combination with `bias_initializer="zeros"`.
<add> This is recommended in [Jozefowicz et al., 2015](
<add> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> matrix.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> `recurrent_kernel` weights matrix.
<ide> bias_regularizer: Regularizer function applied to the bias vector.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix.
<ide> bias_constraint: Constraint function applied to the bias vector.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state.
<ide> Call arguments:
<ide> inputs: A (2+ `rank`)D tensor.
<ide> states: List of state tensors corresponding to the previous timestep.
<ide> def build(self, input_shape):
<ide> channel_axis = -1
<ide> if input_shape[channel_axis] is None:
<ide> raise ValueError(
<del> "The channel dimension of the inputs (last axis) should be defined. "
<del> f"Found None. Full input shape received: input_shape={input_shape}"
<add> "The channel dimension of the inputs (last axis) should be "
<add> "defined. Found None. Full input shape received: "
<add> f"input_shape={input_shape}"
<ide> )
<ide> input_dim = input_shape[channel_axis]
<ide> self.kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
<ide><path>keras/layers/rnn/base_conv_rnn.py
<ide> class ConvRNN(RNN):
<ide> `call(input_at_t, states_at_t)` method, returning `(output_at_t,
<ide> states_at_t_plus_1)`. The call method of the cell can also take the
<ide> optional argument `constants`, see section "Note on passing external
<del> constants" below. - a `state_size` attribute. This can be a single integer
<del> (single state) in which case it is the number of channels of the recurrent
<del> state (which should be the same as the number of channels of the cell
<del> output). This can also be a list/tuple of integers (one size per state).
<del> In this case, the first entry (`state_size[0]`) should be the same as the
<del> size of the cell output.
<del> return_sequences: Boolean. Whether to return the last output. in the output
<del> sequence, or the full sequence.
<add> constants" below. - a `state_size` attribute. This can be a single
<add> integer (single state) in which case it is the number of channels of the
<add> recurrent state (which should be the same as the number of channels of
<add> the cell output). This can also be a list/tuple of integers (one size
<add> per state). In this case, the first entry (`state_size[0]`) should be
<add> the same as the size of the cell output.
<add> return_sequences: Boolean. Whether to return the last output. in the
<add> output sequence, or the full sequence.
<ide> return_state: Boolean. Whether to return the last state in addition to the
<ide> output.
<ide> go_backwards: Boolean (default False). If True, process the input sequence
<ide> class ConvRNN(RNN):
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. This argument is passed to the cell
<ide> when calling it. This is for use with cells that use dropout.
<del> initial_state: List of initial state tensors to be passed to the first call
<del> of the cell.
<add> initial_state: List of initial state tensors to be passed to the first
<add> call of the cell.
<ide> constants: List of constant tensors to be passed to the cell at each
<ide> timestep.
<ide> Input shape:
<ide> class ConvRNN(RNN):
<ide> if data_format='channels_first' or shape: `(samples, timesteps,
<ide> img_dimensions..., channels)` if data_format='channels_last'.
<ide> Output shape:
<del> - If `return_state`: a list of tensors. The first tensor is the output. The
<del> remaining tensors are the last states,
<add> - If `return_state`: a list of tensors. The first tensor is the output.
<add> The remaining tensors are the last states,
<ide> each (2 + `rank`)D tensor with shape: `(samples, filters,
<ide> new_img_dimensions...)` if data_format='channels_first'
<ide> or shape: `(samples, new_img_dimensions..., filters)` if
<del> data_format='channels_last'. img_dimension values might have changed due
<del> to padding.
<add> data_format='channels_last'. img_dimension values might have changed
<add> due to padding.
<ide> - If `return_sequences`: (3 + `rank`)D tensor with shape: `(samples,
<ide> timesteps, filters, new_img_dimensions...)` if
<ide> data_format='channels_first'
<ide> class ConvRNN(RNN):
<ide> new_img_dimensions...)` if data_format='channels_first'
<ide> or shape: `(samples, new_img_dimensions..., filters)` if
<ide> data_format='channels_last'.
<del> Masking: This layer supports masking for input data with a variable number of
<del> timesteps.
<add> Masking: This layer supports masking for input data with a variable number
<add> of timesteps.
<ide> Note on using statefulness in RNNs: You can set RNN layers to be 'stateful',
<ide> which means that the states computed for the samples in one batch will be
<ide> reused as initial states for the samples in the next batch. This assumes a
<ide> one-to-one mapping between samples in different successive batches.
<del> To enable statefulness: - Specify `stateful=True` in the layer constructor.
<add> To enable statefulness: - Specify `stateful=True` in the layer
<add> constructor.
<ide> - Specify a fixed batch size for your model, by passing
<del> - If sequential model: `batch_input_shape=(...)` to the first layer in
<del> your model.
<del> - If functional model with 1 or more Input layers: `batch_shape=(...)`
<del> to all the first layers in your model. This is the expected shape of
<del> your inputs *including the batch size*. It should be a tuple of
<del> integers, e.g. `(32, 10, 100, 100, 32)`. for rank 2 convolution Note
<del> that the image dimensions should be specified too. - Specify
<del> `shuffle=False` when calling fit(). To reset the states of your
<del> model, call `.reset_states()` on either a specific layer, or on your
<del> entire model.
<add> - If sequential model: `batch_input_shape=(...)` to the first layer
<add> in your model.
<add> - If functional model with 1 or more Input layers:
<add> `batch_shape=(...)` to all the first layers in your model. This is
<add> the expected shape of your inputs *including the batch size*. It
<add> should be a tuple of integers, e.g. `(32, 10, 100, 100, 32)`. for
<add> rank 2 convolution Note that the image dimensions should be
<add> specified too. - Specify `shuffle=False` when calling fit(). To
<add> reset the states of your model, call `.reset_states()` on either a
<add> specific layer, or on your entire model.
<ide> Note on specifying the initial state of RNNs: You can specify the initial
<ide> state of RNN layers symbolically by calling them with the keyword argument
<del> `initial_state`. The value of `initial_state` should be a tensor or list of
<del> tensors representing the initial state of the RNN layer. You can specify the
<del> initial state of RNN layers numerically by calling `reset_states` with the
<del> keyword argument `states`. The value of `states` should be a numpy array or
<del> list of numpy arrays representing the initial state of the RNN layer.
<del> Note on passing external constants to RNNs: You can pass "external" constants
<del> to the cell using the `constants` keyword argument of `RNN.__call__` (as
<del> well as `RNN.call`) method. This requires that the `cell.call` method
<del> accepts the same keyword argument `constants`. Such constants can be used to
<del> condition the cell transformation on additional static inputs (not changing
<del> over time), a.k.a. an attention mechanism.
<add> `initial_state`. The value of `initial_state` should be a tensor or list
<add> of tensors representing the initial state of the RNN layer. You can
<add> specify the initial state of RNN layers numerically by calling
<add> `reset_states` with the keyword argument `states`. The value of `states`
<add> should be a numpy array or list of numpy arrays representing the initial
<add> state of the RNN layer.
<add> Note on passing external constants to RNNs: You can pass "external"
<add> constants to the cell using the `constants` keyword argument of
<add> `RNN.__call__` (as well as `RNN.call`) method. This requires that the
<add> `cell.call` method accepts the same keyword argument `constants`. Such
<add> constants can be used to condition the cell transformation on additional
<add> static inputs (not changing over time), a.k.a. an attention mechanism.
<ide> """
<ide>
<ide> def __init__(
<ide> def compute_output_shape(self, input_shape):
<ide>
<ide> norm_img_dims = tuple(
<ide> [
<del> conv_utils.conv_output_length( # pylint: disable=g-complex-comprehension
<add> conv_utils.conv_output_length(
<ide> img_dims[idx],
<ide> cell.kernel_size[idx],
<ide> padding=cell.padding,
<ide> def get_tuple_shape(nb_channels):
<ide> dim = self.cell.state_size
<ide> if value.shape != get_tuple_shape(dim):
<ide> raise ValueError(
<del> f"State {index} is incompatible with layer {self.name}: "
<del> f"expected shape={get_tuple_shape(dim)}, "
<add> "State {index} is incompatible with layer "
<add> f"{self.name}: expected shape={get_tuple_shape(dim)}, "
<ide> f"found shape={value.shape}"
<ide> )
<ide> backend.set_value(state, value)
<ide><path>keras/layers/rnn/base_cudnn_rnn.py
<ide> class _CuDNNRNN(RNN):
<ide> stateful: Boolean (default False). If True, the last state
<ide> for each sample at index i in a batch will be used as initial
<ide> state for the sample of index i in the following batch.
<del> time_major: Boolean (default False). If true, the inputs and outputs will be
<del> in shape `(timesteps, batch, ...)`, whereas in the False case, it will
<del> be `(batch, timesteps, ...)`.
<add> time_major: Boolean (default False). If true, the inputs and outputs will
<add> be in shape `(timesteps, batch, ...)`, whereas in the False case, it
<add> will be `(batch, timesteps, ...)`.
<ide> """
<ide>
<ide> def __init__(
<ide><path>keras/layers/rnn/base_rnn.py
<ide> class RNN(base_layer.Layer):
<ide> for each sample at index i in a batch will be used as initial
<ide> state for the sample of index i in the following batch.
<ide> unroll: Boolean (default `False`).
<del> If True, the network will be unrolled, else a symbolic loop will be used.
<del> Unrolling can speed-up a RNN, although it tends to be more
<add> If True, the network will be unrolled, else a symbolic loop will be
<add> used. Unrolling can speed-up a RNN, although it tends to be more
<ide> memory-intensive. Unrolling is only suitable for short sequences.
<ide> time_major: The shape format of the `inputs` and `outputs` tensors.
<ide> If True, the inputs and outputs will be in shape
<ide> def __init__(
<ide> f"Received: cell={cell}"
<ide> )
<ide> # If True, the output for masked timestep will be zeros, whereas in the
<del> # False case, output from previous timestep is returned for masked timestep.
<add> # False case, output from previous timestep is returned for masked
<add> # timestep.
<ide> self.zero_output_for_mask = kwargs.pop("zero_output_for_mask", False)
<ide>
<ide> if "input_shape" not in kwargs and (
<ide> def __init__(
<ide> self.time_major = time_major
<ide>
<ide> self.supports_masking = True
<del> # The input shape is unknown yet, it could have nested tensor inputs, and
<del> # the input spec will be the list of specs for nested inputs, the structure
<del> # of the input_spec will be the same as the input.
<add> # The input shape is unknown yet, it could have nested tensor inputs,
<add> # and the input spec will be the list of specs for nested inputs, the
<add> # structure of the input_spec will be the same as the input.
<ide> self.input_spec = None
<ide> self.state_spec = None
<ide> self._states = None
<ide> def __init__(
<ide> @property
<ide> def _use_input_spec_as_call_signature(self):
<ide> if self.unroll:
<del> # When the RNN layer is unrolled, the time step shape cannot be unknown.
<del> # The input spec does not define the time step (because this layer can be
<del> # called with any time step value, as long as it is not None), so it
<del> # cannot be used as the call function signature when saving to SavedModel.
<add> # When the RNN layer is unrolled, the time step shape cannot be
<add> # unknown. The input spec does not define the time step (because
<add> # this layer can be called with any time step value, as long as it
<add> # is not None), so it cannot be used as the call function signature
<add> # when saving to SavedModel.
<ide> return False
<ide> return super()._use_input_spec_as_call_signature
<ide>
<ide> def compute_output_shape(self, input_shape):
<ide> if isinstance(input_shape, list):
<ide> input_shape = input_shape[0]
<ide> # Check whether the input shape contains any nested shapes. It could be
<del> # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
<del> # inputs.
<add> # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from
<add> # numpy inputs.
<ide> try:
<ide> input_shape = tf.TensorShape(input_shape)
<ide> except (ValueError, TypeError):
<ide> def build(self, input_shape):
<ide> input_shape = input_shape[0]
<ide> # The input_shape here could be a nest structure.
<ide>
<del> # do the tensor_shape to shapes here. The input could be single tensor, or a
<del> # nested structure of tensors.
<add> # do the tensor_shape to shapes here. The input could be single tensor,
<add> # or a nested structure of tensors.
<ide> def get_input_spec(shape):
<ide> """Convert input shape to InputSpec."""
<ide> if isinstance(shape, tf.TensorShape):
<ide> def get_state_spec(shape):
<ide> return InputSpec(shape=tuple(state_spec_shape))
<ide>
<ide> # Check whether the input shape contains any nested shapes. It could be
<del> # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
<del> # inputs.
<add> # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from
<add> # numpy inputs.
<ide> try:
<ide> input_shape = tf.TensorShape(input_shape)
<ide> except (ValueError, TypeError):
<ide> def _validate_state_spec(cell_state_sizes, init_state_specs):
<ide>
<ide> Args:
<ide> cell_state_sizes: list, the `state_size` attribute from the cell.
<del> init_state_specs: list, the `state_spec` from the initial_state that is
<del> passed in `call()`.
<add> init_state_specs: list, the `state_spec` from the initial_state that
<add> is passed in `call()`.
<ide>
<ide> Raises:
<del> ValueError: When initial state spec is not compatible with the state size.
<add> ValueError: When initial state spec is not compatible with the state
<add> size.
<ide> """
<ide> validation_error = ValueError(
<ide> "An `initial_state` was passed that is not compatible with "
<ide> def get_initial_state(self, inputs):
<ide> get_initial_state_fn = getattr(self.cell, "get_initial_state", None)
<ide>
<ide> if tf.nest.is_nested(inputs):
<del> # The input are nested sequences. Use the first element in the seq to get
<del> # batch size and dtype.
<add> # The input are nested sequences. Use the first element in the seq
<add> # to get batch size and dtype.
<ide> inputs = tf.nest.flatten(inputs)[0]
<ide>
<ide> input_shape = tf.shape(inputs)
<ide> def get_initial_state(self, inputs):
<ide> init_state = rnn_utils.generate_zero_filled_state(
<ide> batch_size, self.cell.state_size, dtype
<ide> )
<del> # Keras RNN expect the states in a list, even if it's a single state tensor.
<add> # Keras RNN expect the states in a list, even if it's a single state
<add> # tensor.
<ide> if not tf.nest.is_nested(init_state):
<ide> init_state = [init_state]
<del> # Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.
<add> # Force the state to be a list in case it is a namedtuple eg
<add> # LSTMStateTuple.
<ide> return list(init_state)
<ide>
<ide> def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
<ide> def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
<ide> ]
<ide> self._num_constants = len(constants)
<ide> additional_specs += self.constants_spec
<del> # additional_inputs can be empty if initial_state or constants are provided
<del> # but empty (e.g. the cell is stateless).
<add> # additional_inputs can be empty if initial_state or constants are
<add> # provided but empty (e.g. the cell is stateless).
<ide> flat_additional_inputs = tf.nest.flatten(additional_inputs)
<ide> is_keras_tensor = (
<ide> backend.is_keras_tensor(flat_additional_inputs[0])
<ide> def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
<ide> if backend.is_keras_tensor(tensor) != is_keras_tensor:
<ide> raise ValueError(
<ide> "The initial state or constants of an RNN layer cannot be "
<del> "specified via a mix of Keras tensors and non-Keras tensors "
<del> '(a "Keras tensor" is a tensor that was returned by a Keras layer '
<del> " or by `Input` during Functional model construction). "
<del> f"Received: initial_state={initial_state}, constants={constants}"
<add> "specified via a mix of Keras tensors and non-Keras "
<add> 'tensors (a "Keras tensor" is a tensor that was returned '
<add> "by a Keras layer or by `Input` during Functional "
<add> "model construction). Received: "
<add> f"initial_state={initial_state}, constants={constants}"
<ide> )
<ide>
<ide> if is_keras_tensor:
<ide> # Compute the full input spec, including state and constants
<ide> full_input = [inputs] + additional_inputs
<ide> if self.built:
<del> # Keep the input_spec since it has been populated in build() method.
<add> # Keep the input_spec since it has been populated in build()
<add> # method.
<ide> full_input_spec = self.input_spec + additional_specs
<ide> else:
<del> # The original input_spec is None since there could be a nested tensor
<del> # input. Update the input_spec to match the inputs.
<add> # The original input_spec is None since there could be a nested
<add> # tensor input. Update the input_spec to match the inputs.
<ide> full_input_spec = (
<ide> generic_utils.to_list(
<ide> tf.nest.map_structure(lambda _: None, inputs)
<ide> def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
<ide> # Perform the call with temporarily replaced input_spec
<ide> self.input_spec = full_input_spec
<ide> output = super().__call__(full_input, **kwargs)
<del> # Remove the additional_specs from input spec and keep the rest. It is
<del> # important to keep since the input spec was populated by build(), and
<del> # will be reused in the stateful=True.
<add> # Remove the additional_specs from input spec and keep the rest. It
<add> # is important to keep since the input spec was populated by
<add> # build(), and will be reused in the stateful=True.
<ide> self.input_spec = self.input_spec[: -len(additional_specs)]
<ide> return output
<ide> else:
<ide> def call(
<ide> mask = tf.nest.flatten(mask)[0]
<ide>
<ide> if tf.nest.is_nested(inputs):
<del> # In the case of nested input, use the first element for shape check.
<add> # In the case of nested input, use the first element for shape
<add> # check.
<ide> input_shape = backend.int_shape(tf.nest.flatten(inputs)[0])
<ide> else:
<ide> input_shape = backend.int_shape(inputs)
<ide> def call(
<ide> if generic_utils.has_arg(self.cell.call, "training"):
<ide> kwargs["training"] = training
<ide>
<del> # TF RNN cells expect single tensor as state instead of list wrapped tensor.
<add> # TF RNN cells expect single tensor as state instead of list wrapped
<add> # tensor.
<ide> is_tf_rnn_cell = getattr(self.cell, "_is_tf_rnn_cell", None) is not None
<ide> # Use the __call__ function for callable objects, eg layers, so that it
<ide> # will have the proper name scopes for the ops, etc.
<ide> def _process_inputs(self, inputs, initial_state, constants):
<ide>
<ide> if self.stateful:
<ide> if initial_state is not None:
<del> # When layer is stateful and initial_state is provided, check if the
<del> # recorded state is same as the default value (zeros). Use the recorded
<del> # state if it is not same as the default.
<add> # When layer is stateful and initial_state is provided, check if
<add> # the recorded state is same as the default value (zeros). Use
<add> # the recorded state if it is not same as the default.
<ide> non_zero_count = tf.add_n(
<ide> [
<ide> tf.math.count_nonzero(s)
<ide> def _process_inputs(self, inputs, initial_state, constants):
<ide> else:
<ide> initial_state = self.states
<ide> initial_state = tf.nest.map_structure(
<del> # When the layer has a inferred dtype, use the dtype from the cell.
<add> # When the layer has a inferred dtype, use the dtype from the
<add> # cell.
<ide> lambda v: tf.cast(
<ide> v, self.compute_dtype or self.cell.compute_dtype
<ide> ),
<ide> def reset_states(self, states=None):
<ide>
<ide> Can only be used when RNN layer is constructed with `stateful` = `True`.
<ide> Args:
<del> states: Numpy arrays that contains the value for the initial state, which
<del> will be feed to cell at the first time step. When the value is None,
<del> zero filled numpy array will be created based on the cell state size.
<add> states: Numpy arrays that contains the value for the initial state,
<add> which will be feed to cell at the first time step. When the value is
<add> None, zero filled numpy array will be created based on the cell
<add> state size.
<ide>
<ide> Raises:
<ide> AttributeError: When the RNN layer is not stateful.
<ide> def reset_states(self, states=None):
<ide> if self.input_spec is not None:
<ide> spec_shape = tf.nest.flatten(self.input_spec[0])[0].shape
<ide> if spec_shape is None:
<del> # It is possible to have spec shape to be None, eg when construct a RNN
<del> # with a custom cell, or standard RNN layers (LSTM/GRU) which we only know
<del> # it has 3 dim input, but not its full shape spec before build().
<add> # It is possible to have spec shape to be None, eg when construct a
<add> # RNN with a custom cell, or standard RNN layers (LSTM/GRU) which we
<add> # only know it has 3 dim input, but not its full shape spec before
<add> # build().
<ide> batch_size = None
<ide> else:
<ide> batch_size = spec_shape[1] if self.time_major else spec_shape[0]
<ide> def reset_states(self, states=None):
<ide> self.cell.get_initial_state(
<ide> inputs=None,
<ide> batch_size=batch_size,
<del> # Use variable_dtype instead of compute_dtype, since the state is
<del> # stored in a variable
<add> # Use variable_dtype instead of compute_dtype, since the
<add> # state is stored in a variable
<ide> dtype=self.variable_dtype or backend.floatx(),
<ide> )
<ide> )
<ide><path>keras/layers/rnn/base_rnn_test.py
<ide> def test_stacked_rnn_dropout(self, cell, unroll):
<ide>
<ide> def test_dropout_mask_reuse(self):
<ide> # The layer is created with recurrent_initializer = zero, so that the
<del> # the recurrent state won't affect the output. By doing this, we can verify
<del> # the output and see if the same mask is applied to for each timestep.
<add> # the recurrent state won't affect the output. By doing this, we can
<add> # verify the output and see if the same mask is applied to for each
<add> # timestep.
<ide> layer_1 = keras.layers.SimpleRNN(
<ide> 3,
<ide> dropout=0.5,
<ide> def test_zero_output_for_masking(self):
<ide> self.assertAllClose(result_1, result_2)
<ide>
<ide> def test_unroll_single_step(self):
<del> """Even if the time dimension is only one, we should be able to unroll."""
<add> """Even if the time dimension is only one, we should be able to
<add> unroll."""
<ide> cell = keras.layers.SimpleRNNCell(5)
<ide> x = keras.Input((1, 5))
<ide> layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
<ide> def make_model(stateful=False, with_initial_state=False):
<ide> model.reset_states()
<ide> predict_3 = model.predict(test_inputs)
<ide>
<del> # predict 1 and 2 should be different since the batch 2 should use the state
<del> # from batch 1 as the initial state.
<add> # predict 1 and 2 should be different since the batch 2 should use the
<add> # state from batch 1 as the initial state.
<ide> self.assertNotAllClose(predict_1, predict_2)
<ide> self.assertAllClose(predict_1, predict_3)
<ide>
<del> # Create a new model with same weights but without initial states. Make sure
<del> # the predict value is different from the model with non-zero initial state.
<add> # Create a new model with same weights but without initial states. Make
<add> # sure the predict value is different from the model with non-zero
<add> # initial state.
<ide> model_2 = make_model(stateful=True, with_initial_state=False)
<ide> model_2.layers[1].set_weights(layer_weights)
<ide>
<ide> def make_model(stateful=False, with_initial_state=False):
<ide> self.assertNotAllClose(predict_1, predict_4)
<ide> self.assertNotAllClose(predict_4, predict_5)
<ide>
<del> # Create models with stateful=False, and make sure they handle init state
<del> # correctly.
<add> # Create models with stateful=False, and make sure they handle init
<add> # state correctly.
<ide> model_3 = make_model(stateful=False, with_initial_state=True)
<ide> model_3.layers[1].set_weights(layer_weights)
<ide>
<ide> def test_input_dim_length(self):
<ide> ]
<ide> )
<ide> def test_state_spec_with_stack_cell(self, cell):
<del> # See https://github.com/tensorflow/tensorflow/issues/27817 for more detail.
<add> # See https://github.com/tensorflow/tensorflow/issues/27817 for more
<add> # detail.
<ide> batch = 12
<ide> timesteps = 10
<ide> input_dim = 8
<ide> def test_rnn_with_ragged_input(self, layer):
<ide> dense_data = ragged_data.to_tensor()
<ide> output_dense = model_2.predict(dense_data, steps=1)
<ide>
<del> # Note that the raw output for dense and ragged input when go_backward=True
<del> # will be different. Consider following input
<add> # Note that the raw output for dense and ragged input when
<add> # go_backward=True will be different. Consider following input
<ide> # [[a, b, 0], [c, 0, 0], [d, e, f]] where 0s are masked value.
<del> # The dense output will be [[0, b, a], [0, 0, c], [f, e, d]] since it will
<del> # process the whole sequence from the end.
<del> # While ragged output will be [[b, a], [c], [f, e, d]] since it just ignore
<del> # the 0s. And if we densify the ragged output, it will by default inserting
<del> # 0s to the end (rather than from the beginning), which make the output to
<del> # be [[b, a, 0], [c, 0, 0], [f, e, d]]. With this, we need to verify that
<del> # reverse(ragged_output.to_tensor()) == reverse(dense_output)
<add> # The dense output will be [[0, b, a], [0, 0, c], [f, e, d]] since it
<add> # will process the whole sequence from the end.
<add> # While ragged output will be [[b, a], [c], [f, e, d]] since it just
<add> # ignore the 0s. And if we densify the ragged output, it will by default
<add> # inserting 0s to the end (rather than from the beginning), which make
<add> # the output to be [[b, a, 0], [c, 0, 0], [f, e, d]]. With this, we need
<add> # to verify that reverse(ragged_output.to_tensor()) ==
<add> # reverse(dense_output)
<ide> output_dense = keras.backend.reverse(output_dense, [1])
<ide> output_dense = tf.RaggedTensor.from_tensor(
<ide> output_dense, lengths=row_lengths
<ide><path>keras/layers/rnn/bidirectional.py
<ide> class Bidirectional(Wrapper):
<ide> Note that the recommended way to create new RNN layers is to write a
<ide> custom RNN cell and use it with `keras.layers.RNN`, instead of
<ide> subclassing `keras.layers.Layer` directly.
<del> - When the `returns_sequences` is true, the output of the masked timestep
<del> will be zero regardless of the layer's original `zero_output_for_mask`
<del> value.
<add> - When the `returns_sequences` is true, the output of the masked
<add> timestep will be zero regardless of the layer's original
<add> `zero_output_for_mask` value.
<ide> merge_mode: Mode by which outputs of the forward and backward RNNs will be
<ide> combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the
<ide> outputs will not be combined, they will be returned as a list. Default
<ide> class Bidirectional(Wrapper):
<ide>
<ide> ```python
<ide> model = Sequential()
<del> model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10)))
<add> model.add(Bidirectional(LSTM(10, return_sequences=True),
<add> input_shape=(5, 10)))
<ide> model.add(Bidirectional(LSTM(10)))
<ide> model.add(Dense(5))
<ide> model.add(Activation('softmax'))
<ide> model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
<ide>
<del> # With custom backward layer
<del> model = Sequential()
<del> forward_layer = LSTM(10, return_sequences=True)
<del> backward_layer = LSTM(10, activation='relu', return_sequences=True,
<del> go_backwards=True)
<del> model.add(Bidirectional(forward_layer, backward_layer=backward_layer,
<del> input_shape=(5, 10)))
<del> model.add(Dense(5))
<del> model.add(Activation('softmax'))
<del> model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
<add> # With custom backward layer
<add> model = Sequential()
<add> forward_layer = LSTM(10, return_sequences=True)
<add> backward_layer = LSTM(10, activation='relu', return_sequences=True,
<add> go_backwards=True)
<add> model.add(Bidirectional(forward_layer, backward_layer=backward_layer,
<add> input_shape=(5, 10)))
<add> model.add(Dense(5))
<add> model.add(Activation('softmax'))
<add> model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
<ide> ```
<ide> """
<ide>
<ide> def __init__(
<ide> )
<ide> if backward_layer is not None and not isinstance(backward_layer, Layer):
<ide> raise ValueError(
<del> "`backward_layer` need to be a `tf.keras.layers.Layer` instance. "
<del> f"Received: {backward_layer}"
<add> "`backward_layer` need to be a `tf.keras.layers.Layer` "
<add> f"instance. Received: {backward_layer}"
<ide> )
<ide> if merge_mode not in ["sum", "mul", "ave", "concat", None]:
<ide> raise ValueError(
<ide> f"Invalid merge mode. Received: {merge_mode}. "
<ide> "Merge mode should be one of "
<ide> '{"sum", "mul", "ave", "concat", None}'
<ide> )
<del> # We don't want to track `layer` since we're already tracking the two copies
<del> # of it we actually run.
<add> # We don't want to track `layer` since we're already tracking the two
<add> # copies of it we actually run.
<ide> self._setattr_tracking = False
<ide> super().__init__(layer, **kwargs)
<ide> self._setattr_tracking = True
<ide>
<del> # Recreate the forward layer from the original layer config, so that it will
<del> # not carry over any state from the layer.
<add> # Recreate the forward layer from the original layer config, so that it
<add> # will not carry over any state from the layer.
<ide> self.forward_layer = self._recreate_layer_from_config(layer)
<ide>
<ide> if backward_layer is None:
<ide> def __init__(
<ide> )
<ide> else:
<ide> self.backward_layer = backward_layer
<del> # Keep the custom backward layer config, so that we can save it later. The
<del> # layer's name might be updated below with prefix 'backward_', and we want
<del> # to preserve the original config.
<add> # Keep the custom backward layer config, so that we can save it
<add> # later. The layer's name might be updated below with prefix
<add> # 'backward_', and we want to preserve the original config.
<ide> self._backward_layer_config = generic_utils.serialize_keras_object(
<ide> backward_layer
<ide> )
<ide> def _verify_layer_config(self):
<ide> raise ValueError(
<ide> "Forward layer and backward layer should have different "
<ide> "`go_backwards` value."
<del> f"forward_layer.go_backwards = {self.forward_layer.go_backwards},"
<del> f"backward_layer.go_backwards = {self.backward_layer.go_backwards}"
<add> f"forward_layer.go_backwards = "
<add> f"{self.forward_layer.go_backwards},"
<add> f"backward_layer.go_backwards = "
<add> f"{self.backward_layer.go_backwards}"
<ide> )
<ide>
<ide> common_attributes = ("stateful", "return_sequences", "return_state")
<ide> def _verify_layer_config(self):
<ide> backward_value = getattr(self.backward_layer, a)
<ide> if forward_value != backward_value:
<ide> raise ValueError(
<del> "Forward layer and backward layer are expected to have the same "
<del> f'value for attribute "{a}", got "{forward_value}" for forward '
<del> f'layer and "{backward_value}" for backward layer'
<add> "Forward layer and backward layer are expected to have "
<add> f'the same value for attribute "{a}", got '
<add> f'"{forward_value}" for forward layer and '
<add> f'"{backward_value}" for backward layer'
<ide> )
<ide>
<ide> def _recreate_layer_from_config(self, layer, go_backwards=False):
<del> # When recreating the layer from its config, it is possible that the layer
<del> # is a RNN layer that contains custom cells. In this case we inspect the
<del> # layer and pass the custom cell class as part of the `custom_objects`
<del> # argument when calling `from_config`.
<del> # See https://github.com/tensorflow/tensorflow/issues/26581 for more detail.
<add> # When recreating the layer from its config, it is possible that the
<add> # layer is a RNN layer that contains custom cells. In this case we
<add> # inspect the layer and pass the custom cell class as part of the
<add> # `custom_objects` argument when calling `from_config`. See
<add> # https://github.com/tensorflow/tensorflow/issues/26581 for more detail.
<ide> config = layer.get_config()
<ide> if go_backwards:
<ide> config["go_backwards"] = not config["go_backwards"]
<ide> def compute_output_shape(self, input_shape):
<ide> return output_shape
<ide>
<ide> def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
<del> """`Bidirectional.__call__` implements the same API as the wrapped `RNN`."""
<add> """`Bidirectional.__call__` implements the same API as the wrapped
<add> `RNN`."""
<ide> inputs, initial_state, constants = rnn_utils.standardize_args(
<ide> inputs, initial_state, constants, self._num_constants
<ide> )
<ide> def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
<ide> if is_keras_tensor:
<ide> # Compute the full input spec, including state
<ide> full_input = [inputs] + additional_inputs
<del> # The original input_spec is None since there could be a nested tensor
<del> # input. Update the input_spec to match the inputs.
<add> # The original input_spec is None since there could be a nested
<add> # tensor input. Update the input_spec to match the inputs.
<ide> full_input_spec = [
<ide> None for _ in range(len(tf.nest.flatten(inputs)))
<ide> ] + additional_specs
<ide> def call(
<ide>
<ide> if generic_utils.has_arg(self.layer.call, "initial_state"):
<ide> if isinstance(inputs, list) and len(inputs) > 1:
<del> # initial_states are keras tensors, which means they are passed in
<del> # together with inputs as list. The initial_states need to be split into
<del> # forward and backward section, and be feed to layers accordingly.
<add> # initial_states are keras tensors, which means they are passed
<add> # in together with inputs as list. The initial_states need to be
<add> # split into forward and backward section, and be feed to layers
<add> # accordingly.
<ide> forward_inputs = [inputs[0]]
<ide> backward_inputs = [inputs[0]]
<ide> pivot = (len(inputs) - self._num_constants) // 2 + 1
<ide> def call(
<ide> if "constants" in kwargs:
<ide> kwargs["constants"] = None
<ide> elif initial_state is not None:
<del> # initial_states are not keras tensors, eg eager tensor from np array.
<del> # They are only passed in from kwarg initial_state, and should be passed
<del> # to forward/backward layer via kwarg initial_state as well.
<add> # initial_states are not keras tensors, eg eager tensor from np
<add> # array. They are only passed in from kwarg initial_state, and
<add> # should be passed to forward/backward layer via kwarg
<add> # initial_state as well.
<ide> forward_inputs, backward_inputs = inputs, inputs
<ide> half = len(initial_state) // 2
<ide> forward_state = initial_state[:half]
<ide> def call(
<ide> output = [y, y_rev]
<ide> else:
<ide> raise ValueError(
<del> f"Unrecognized value for `merge_mode`. Received: {self.merge_mode}"
<add> "Unrecognized value for `merge_mode`. "
<add> f"Received: {self.merge_mode}"
<ide> 'Expected values are ["concat", "sum", "ave", "mul"]'
<ide> )
<ide>
<ide><path>keras/layers/rnn/bidirectional_test.py
<ide> def test_Bidirectional_merged_value(self, merge_mode):
<ide> def test_Bidirectional_with_time_major_input(self, time_major):
<ide> batch_size, time, input_dim = 2, 3, 1
<ide> inputs = tf.zeros((batch_size, time, input_dim))
<del> # length is [1 2]. Within the batch, the first element has 1 step, and the
<del> # second element as 2 steps.
<add> # length is [1 2]. Within the batch, the first element has 1 step, and
<add> # the second element as 2 steps.
<ide> lengths = tf.range(1, 1 + batch_size)
<ide> mask = tf.sequence_mask(lengths, maxlen=time, dtype=tf.float32)
<ide>
<ide> def test_Bidirectional_with_time_major_input(self, time_major):
<ide> if time_major:
<ide> keras_outputs = tf.transpose(keras_outputs, [1, 0, 2])
<ide>
<del> # expect the first element in batch has 1 step and second element in batch
<del> # has 2 steps.
<add> # expect the first element in batch has 1 step and second element in
<add> # batch has 2 steps.
<ide> expected_result = np.array(
<ide> [
<ide> [[1.0, 1.0], [0.0, 0.0], [0.0, 0.0]],
<ide> def test_Bidirectional_state_reuse(self):
<ide> model.predict(inputs)
<ide>
<ide> def test_Bidirectional_state_reuse_with_np_input(self):
<del> # See https://github.com/tensorflow/tensorflow/issues/28761 for more detail.
<add> # See https://github.com/tensorflow/tensorflow/issues/28761 for more
<add> # detail.
<ide> rnn = keras.layers.LSTM
<ide> samples = 2
<ide> dim = 5
<ide> def test_Bidirectional_output_shape(self, rnn):
<ide> rnn(3, return_state=True), merge_mode=None
<ide> )
<ide> output_shape = wrapper.compute_output_shape(input_shape)
<del> # 1 for forward output and 1 for backward output, and the rest for states
<add> # 1 for forward output and 1 for backward output, and the rest for
<add> # states
<ide> self.assertLen(output_shape, 2 + num_state)
<ide> for shape in output_shape:
<ide> self.assertEqual(shape.as_list(), [None, 3])
<ide> def compute_output_shape(self, input_shape):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_Bidirectional_last_output_with_masking(self):
<ide> rnn = keras.layers.LSTM
<ide> def test_Bidirectional_last_output_with_masking(self):
<ide> units = 3
<ide> merge_mode = "concat"
<ide> x = np.random.rand(samples, timesteps, dim)
<del> # clear the first record's timestep 2. Last output should be same as state,
<del> # not zeroed.
<add> # clear the first record's timestep 2. Last output should be same as
<add> # state, not zeroed.
<ide> x[0, 2] = 0
<ide>
<ide> with self.cached_session():
<ide> def test_Bidirectional_last_output_with_masking(self):
<ide> @parameterized.parameters([keras.layers.LSTM, keras.layers.GRU])
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_Bidirectional_sequence_output_with_masking(self, rnn):
<ide> samples = 2
<ide> def test_Bidirectional_sequence_output_with_masking(self, rnn):
<ide> units = 3
<ide> merge_mode = "concat"
<ide> x = np.random.rand(samples, timesteps, dim)
<del> # clear the first record's timestep 2, and expect the output of timestep 2
<del> # is also 0s.
<add> # clear the first record's timestep 2, and expect the output of timestep
<add> # 2 is also 0s.
<ide> x[0, 2] = 0
<ide>
<ide> with self.cached_session():
<ide> def test_wrapped_rnn_cell(self):
<ide> @parameterized.parameters(["ave", "concat", "mul"])
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm RNN does not support ragged tensors yet.",
<add> skip_message="Skipping as ROCm RNN does not support ragged "
<add> "tensors yet.",
<ide> )
<ide> def test_Bidirectional_ragged_input(self, merge_mode):
<ide> np.random.seed(100)
<ide> def test_Bidirectional_ragged_input(self, merge_mode):
<ide> )
<ide>
<ide> # TODO(kaftan): after KerasTensor refactor TF op layers should work
<del> # with many composite tensors, and this shouldn't need to be a lambda
<del> # layer.
<add> # with many composite tensors, and this shouldn't need to be a
<add> # lambda layer.
<ide> reverse_layer = core.Lambda(tf.reverse, arguments=dict(axis=[1]))
<ide> f_backward = keras.backend.function(
<ide> [inputs], reverse_layer(layer.backward_layer(inputs))
<ide><path>keras/layers/rnn/cell_wrappers.py
<ide> def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
<ide> Args:
<ide> inputs: A tensor with wrapped cell's input.
<ide> state: A tensor or tuple of tensors with wrapped cell's state.
<del> cell_call_fn: Wrapped cell's method to use for step computation (cell's
<del> `__call__` or 'call' method).
<add> cell_call_fn: Wrapped cell's method to use for step computation
<add> (cell's `__call__` or 'call' method).
<ide> **kwargs: Additional arguments.
<ide>
<ide> Returns:
<ide> A pair containing:
<ide> - Output: A tensor with cell's output.
<del> - New state: A tensor or tuple of tensors with new wrapped cell's state.
<add> - New state: A tensor or tuple of tensors with new wrapped cell's
<add> state.
<ide> """
<ide> raise NotImplementedError
<ide>
<ide> def call(self, inputs, state, **kwargs):
<ide> """Runs the RNN cell step computation.
<ide>
<del> When `call` is being used, we assume that the wrapper object has been built,
<del> and therefore the wrapped cells has been built via its `build` method and
<del> its `call` method can be used directly.
<add> When `call` is being used, we assume that the wrapper object has been
<add> built, and therefore the wrapped cells has been built via its `build`
<add> method and its `call` method can be used directly.
<ide>
<del> This allows to use the wrapped cell and the non-wrapped cell equivalently
<del> when using `call` and `build`.
<add> This allows to use the wrapped cell and the non-wrapped cell
<add> equivalently when using `call` and `build`.
<ide>
<ide> Args:
<ide> inputs: A tensor with wrapped cell's input.
<ide> def call(self, inputs, state, **kwargs):
<ide> A pair containing:
<ide>
<ide> - Output: A tensor with cell's output.
<del> - New state: A tensor or tuple of tensors with new wrapped cell's state.
<add> - New state: A tensor or tuple of tensors with new wrapped cell's
<add> state.
<ide> """
<ide> return self._call_wrapped_cell(
<ide> inputs, state, cell_call_fn=self.cell.call, **kwargs
<ide> def __init__(
<ide> ):
<ide> """Create a cell with added input, state, and/or output dropout.
<ide>
<del> If `variational_recurrent` is set to `True` (**NOT** the default behavior),
<del> then the same dropout mask is applied at every step, as described in:
<del> [A Theoretically Grounded Application of Dropout in Recurrent
<del> Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287).
<add> If `variational_recurrent` is set to `True` (**NOT** the default
<add> behavior), then the same dropout mask is applied at every step, as
<add> described in: [A Theoretically Grounded Application of Dropout in
<add> Recurrent Neural Networks. Y. Gal, Z.
<add> Ghahramani](https://arxiv.org/abs/1512.05287).
<ide>
<ide> Otherwise a different dropout mask is applied at every time step.
<ide>
<ide> def __init__(
<ide> Args:
<ide> cell: an RNNCell, a projection to output_size is added to it.
<ide> input_keep_prob: unit Tensor or float between 0 and 1, input keep
<del> probability; if it is constant and 1, no input dropout will be added.
<add> probability; if it is constant and 1, no input dropout will be
<add> added.
<ide> output_keep_prob: unit Tensor or float between 0 and 1, output keep
<del> probability; if it is constant and 1, no output dropout will be added.
<add> probability; if it is constant and 1, no output dropout will be
<add> added.
<ide> state_keep_prob: unit Tensor or float between 0 and 1, output keep
<del> probability; if it is constant and 1, no output dropout will be added.
<del> State dropout is performed on the outgoing states of the cell. **Note**
<del> the state components to which dropout is applied when `state_keep_prob`
<del> is in `(0, 1)` are also determined by the argument
<del> `dropout_state_filter_visitor` (e.g. by default dropout is never applied
<del> to the `c` component of an `LSTMStateTuple`).
<add> probability; if it is constant and 1, no output dropout will be
<add> added. State dropout is performed on the outgoing states of the
<add> cell. **Note** the state components to which dropout is applied when
<add> `state_keep_prob` is in `(0, 1)` are also determined by the argument
<add> `dropout_state_filter_visitor` (e.g. by default dropout is never
<add> applied to the `c` component of an `LSTMStateTuple`).
<ide> variational_recurrent: Python bool. If `True`, then the same dropout
<del> pattern is applied across all time steps per run call. If this parameter
<del> is set, `input_size` **must** be provided.
<del> input_size: (optional) (possibly nested tuple of) `TensorShape` objects
<del> containing the depth(s) of the input tensors expected to be passed in to
<del> the `DropoutWrapper`. Required and used **iff** `variational_recurrent
<del> = True` and `input_keep_prob < 1`.
<add> pattern is applied across all time steps per run call. If this
<add> parameter is set, `input_size` **must** be provided.
<add> input_size: (optional) (possibly nested tuple of) `TensorShape`
<add> objects containing the depth(s) of the input tensors expected to be
<add> passed in to the `DropoutWrapper`. Required and used **iff**
<add> `variational_recurrent = True` and `input_keep_prob < 1`.
<ide> dtype: (optional) The `dtype` of the input, state, and output tensors.
<ide> Required and used **iff** `variational_recurrent = True`.
<ide> seed: (optional) integer, the randomness seed.
<del> dropout_state_filter_visitor: (optional), default: (see below). Function
<del> that takes any hierarchical level of the state and returns a scalar or
<del> depth=1 structure of Python booleans describing which terms in the state
<del> should be dropped out. In addition, if the function returns `True`,
<del> dropout is applied across this sublevel. If the function returns
<del> `False`, dropout is not applied across this entire sublevel.
<del> Default behavior: perform dropout on all terms except the memory (`c`)
<del> state of `LSTMCellState` objects, and don't try to apply dropout to
<del> `TensorArray` objects: ```
<add> dropout_state_filter_visitor: (optional), default: (see below).
<add> Function that takes any hierarchical level of the state and returns
<add> a scalar or depth=1 structure of Python booleans describing which
<add> terms in the state should be dropped out. In addition, if the
<add> function returns `True`, dropout is applied across this sublevel.
<add> If the function returns `False`, dropout is not applied across this
<add> entire sublevel. Default behavior: perform dropout on all terms
<add> except the memory (`c`) state of `LSTMCellState` objects, and don't
<add> try to apply dropout to
<add> `TensorArray` objects:
<add> ```
<ide> def dropout_state_filter_visitor(s):
<del> if isinstance(s, LSTMCellState): # Never perform dropout on the c
<del> state. return LSTMCellState(c=False, h=True)
<del> elif isinstance(s, TensorArray): return False return True ```
<add> # Never perform dropout on the c state.
<add> if isinstance(s, LSTMCellState):
<add> return LSTMCellState(c=False, h=True)
<add> elif isinstance(s, TensorArray):
<add> return False
<add> return True
<add> ```
<ide> **kwargs: dict of keyword arguments for base layer.
<ide>
<ide> Raises:
<del> TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
<del> but not `callable`.
<add> TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is
<add> provided but not `callable`.
<ide> ValueError: if any of the keep_probs are not between 0 and 1.
<ide> """
<ide> if isinstance(cell, lstm.LSTMCell):
<ide> def batch_noise(s, inner_seed):
<ide> ):
<ide> if input_size is None:
<ide> raise ValueError(
<del> "When variational_recurrent=True and input_keep_prob < 1.0 or "
<del> "is unknown, input_size must be provided"
<add> "When variational_recurrent=True and input_keep_prob < "
<add> "1.0 or is unknown, input_size must be provided"
<ide> )
<ide> self._recurrent_input_noise = _enumerated_map_structure_up_to(
<ide> input_size,
<ide> def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
<ide> Args:
<ide> inputs: A tensor with wrapped cell's input.
<ide> state: A tensor or tuple of tensors with wrapped cell's state.
<del> cell_call_fn: Wrapped cell's method to use for step computation (cell's
<del> `__call__` or 'call' method).
<add> cell_call_fn: Wrapped cell's method to use for step computation
<add> (cell's `__call__` or 'call' method).
<ide> **kwargs: Additional arguments.
<ide>
<ide> Returns:
<ide> A pair containing:
<ide>
<ide> - Output: A tensor with cell's output.
<del> - New state: A tensor or tuple of tensors with new wrapped cell's state.
<add> - New state: A tensor or tuple of tensors with new wrapped cell's
<add> state.
<ide> """
<ide>
<ide> def _should_dropout(p):
<ide> def __init__(self, cell, residual_fn=None, **kwargs):
<ide>
<ide> Args:
<ide> cell: An instance of `RNNCell`.
<del> residual_fn: (Optional) The function to map raw cell inputs and raw cell
<del> outputs to the actual cell outputs of the residual network.
<del> Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
<del> and outputs.
<add> residual_fn: (Optional) The function to map raw cell inputs and raw
<add> cell outputs to the actual cell outputs of the residual network.
<add> Defaults to calling nest.map_structure on (lambda i, o: i + o),
<add> inputs and outputs.
<ide> **kwargs: dict of keyword arguments for base layer.
<ide> """
<ide> super().__init__(cell, **kwargs)
<ide> def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
<ide> Args:
<ide> inputs: cell inputs.
<ide> state: cell state.
<del> cell_call_fn: Wrapped cell's method to use for step computation (cell's
<del> `__call__` or 'call' method).
<add> cell_call_fn: Wrapped cell's method to use for step computation
<add> (cell's `__call__` or 'call' method).
<ide> **kwargs: Additional arguments passed to the wrapped cell's `call`.
<ide>
<ide> Returns:
<ide> Tuple of cell outputs and new state.
<ide>
<ide> Raises:
<ide> TypeError: If cell inputs and outputs have different structure (type).
<del> ValueError: If cell inputs and outputs have different structure (value).
<add> ValueError: If cell inputs and outputs have different structure
<add> (value).
<ide> """
<ide> outputs, new_state = cell_call_fn(inputs, state, **kwargs)
<ide>
<ide><path>keras/layers/rnn/conv_lstm1d.py
<ide> class ConvLSTM1D(ConvLSTM):
<ide> and recurrent transformations are both convolutional.
<ide>
<ide> Args:
<del> filters: Integer, the dimensionality of the output space (i.e. the number of
<del> output filters in the convolution).
<add> filters: Integer, the dimensionality of the output space (i.e. the number
<add> of output filters in the convolution).
<ide> kernel_size: An integer or tuple/list of n integers, specifying the
<ide> dimensions of the convolution window.
<ide> strides: An integer or tuple/list of n integers, specifying the strides of
<ide> the convolution. Specifying any stride value != 1 is incompatible with
<ide> specifying any `dilation_rate` value != 1.
<del> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no
<del> padding. `"same"` results in padding evenly to the left/right or up/down
<del> of the input such that output has the same height/width dimension as the
<del> input.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `(batch, time, ..., channels)` while `channels_first`
<del> corresponds to inputs with shape `(batch, time, channels, ...)`. It
<del> defaults to the `image_data_format` value found in your Keras config file
<del> at `~/.keras/keras.json`. If you never set it, then it will be
<del> "channels_last".
<add> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
<add> no padding. `"same"` results in padding evenly to the left/right or
<add> up/down of the input such that output has the same height/width
<add> dimension as the input.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `(batch, time, ...,
<add> channels)` while `channels_first` corresponds to inputs with shape
<add> `(batch, time, channels, ...)`. It defaults to the `image_data_format`
<add> value found in your Keras config file at `~/.keras/keras.json`. If you
<add> never set it, then it will be "channels_last".
<ide> dilation_rate: An integer or tuple/list of n integers, specifying the
<ide> dilation rate to use for dilated convolution. Currently, specifying any
<ide> `dilation_rate` value != 1 is incompatible with specifying any `strides`
<ide> class ConvLSTM1D(ConvLSTM):
<ide> recurrent_initializer: Initializer for the `recurrent_kernel` weights
<ide> matrix, used for the linear transformation of the recurrent state.
<ide> bias_initializer: Initializer for the bias vector.
<del> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at
<del> initialization. Use in combination with `bias_initializer="zeros"`. This
<del> is recommended in [Jozefowicz et al., 2015](
<del> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<add> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
<add> at initialization. Use in combination with `bias_initializer="zeros"`.
<add> This is recommended in [Jozefowicz et al., 2015](
<add> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> matrix.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> class ConvLSTM1D(ConvLSTM):
<ide> activity_regularizer: Regularizer function applied to.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix.
<ide> bias_constraint: Constraint function applied to the bias vector.
<ide> return_sequences: Boolean. Whether to return the last output in the output
<ide> sequence, or the full sequence. (default False)
<ide> class ConvLSTM1D(ConvLSTM):
<ide> stateful: Boolean (default False). If True, the last state for each sample
<ide> at index i in a batch will be used as initial state for the sample of
<ide> index i in the following batch.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state.
<ide> Call arguments:
<ide> inputs: A 4D tensor.
<ide> mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
<ide> given timestep should be masked.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. This argument is passed to the cell
<del> when calling it. This is only relevant if `dropout` or `recurrent_dropout`
<del> are set.
<del> initial_state: List of initial state tensors to be passed to the first call
<del> of the cell.
<add> when calling it. This is only relevant if `dropout` or
<add> `recurrent_dropout` are set.
<add> initial_state: List of initial state tensors to be passed to the first
<add> call of the cell.
<ide> Input shape: - If data_format='channels_first'
<ide> 4D tensor with shape: `(samples, time, channels, rows)` - If
<ide> data_format='channels_last'
<ide> 4D tensor with shape: `(samples, time, rows, channels)`
<ide> Output shape:
<del> - If `return_state`: a list of tensors. The first tensor is the output. The
<del> remaining tensors are the last states,
<add> - If `return_state`: a list of tensors. The first tensor is the output.
<add> The remaining tensors are the last states,
<ide> each 3D tensor with shape: `(samples, filters, new_rows)` if
<ide> data_format='channels_first'
<ide> or shape: `(samples, new_rows, filters)` if data_format='channels_last'.
<ide><path>keras/layers/rnn/conv_lstm2d.py
<ide> class ConvLSTM2D(ConvLSTM):
<ide> and recurrent transformations are both convolutional.
<ide>
<ide> Args:
<del> filters: Integer, the dimensionality of the output space (i.e. the number of
<del> output filters in the convolution).
<add> filters: Integer, the dimensionality of the output space (i.e. the number
<add> of output filters in the convolution).
<ide> kernel_size: An integer or tuple/list of n integers, specifying the
<ide> dimensions of the convolution window.
<ide> strides: An integer or tuple/list of n integers, specifying the strides of
<ide> the convolution. Specifying any stride value != 1 is incompatible with
<ide> specifying any `dilation_rate` value != 1.
<del> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no
<del> padding. `"same"` results in padding evenly to the left/right or up/down
<del> of the input such that output has the same height/width dimension as the
<del> input.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `(batch, time, ..., channels)` while `channels_first`
<del> corresponds to inputs with shape `(batch, time, channels, ...)`. It
<del> defaults to the `image_data_format` value found in your Keras config file
<del> at `~/.keras/keras.json`. If you never set it, then it will be
<del> "channels_last".
<add> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
<add> no padding. `"same"` results in padding evenly to the left/right or
<add> up/down of the input such that output has the same height/width
<add> dimension as the input.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `(batch, time, ...,
<add> channels)` while `channels_first` corresponds to inputs with shape
<add> `(batch, time, channels, ...)`. It defaults to the `image_data_format`
<add> value found in your Keras config file at `~/.keras/keras.json`. If you
<add> never set it, then it will be "channels_last".
<ide> dilation_rate: An integer or tuple/list of n integers, specifying the
<ide> dilation rate to use for dilated convolution. Currently, specifying any
<ide> `dilation_rate` value != 1 is incompatible with specifying any `strides`
<ide> class ConvLSTM2D(ConvLSTM):
<ide> recurrent_initializer: Initializer for the `recurrent_kernel` weights
<ide> matrix, used for the linear transformation of the recurrent state.
<ide> bias_initializer: Initializer for the bias vector.
<del> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at
<del> initialization. Use in combination with `bias_initializer="zeros"`. This
<del> is recommended in [Jozefowicz et al., 2015](
<del> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<add> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
<add> at initialization. Use in combination with `bias_initializer="zeros"`.
<add> This is recommended in [Jozefowicz et al., 2015](
<add> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> matrix.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> class ConvLSTM2D(ConvLSTM):
<ide> activity_regularizer: Regularizer function applied to.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix.
<ide> bias_constraint: Constraint function applied to the bias vector.
<ide> return_sequences: Boolean. Whether to return the last output in the output
<ide> sequence, or the full sequence. (default False)
<ide> class ConvLSTM2D(ConvLSTM):
<ide> stateful: Boolean (default False). If True, the last state for each sample
<ide> at index i in a batch will be used as initial state for the sample of
<ide> index i in the following batch.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state.
<ide> Call arguments:
<ide> inputs: A 5D tensor.
<ide> mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
<ide> given timestep should be masked.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. This argument is passed to the cell
<del> when calling it. This is only relevant if `dropout` or `recurrent_dropout`
<del> are set.
<del> initial_state: List of initial state tensors to be passed to the first call
<del> of the cell.
<add> when calling it. This is only relevant if `dropout` or
<add> `recurrent_dropout` are set.
<add> initial_state: List of initial state tensors to be passed to the first
<add> call of the cell.
<ide> Input shape: - If data_format='channels_first'
<ide> 5D tensor with shape: `(samples, time, channels, rows, cols)` - If
<ide> data_format='channels_last'
<ide> 5D tensor with shape: `(samples, time, rows, cols, channels)`
<ide> Output shape:
<del> - If `return_state`: a list of tensors. The first tensor is the output. The
<del> remaining tensors are the last states,
<add> - If `return_state`: a list of tensors. The first tensor is the output.
<add> The remaining tensors are the last states,
<ide> each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
<ide> data_format='channels_first'
<ide> or shape: `(samples, new_rows, new_cols, filters)` if
<del> data_format='channels_last'. `rows` and `cols` values might have changed
<del> due to padding.
<add> data_format='channels_last'. `rows` and `cols` values might have
<add> changed due to padding.
<ide> - If `return_sequences`: 5D tensor with shape: `(samples, timesteps,
<ide> filters, new_rows, new_cols)` if data_format='channels_first'
<ide> or shape: `(samples, timesteps, new_rows, new_cols, filters)` if
<ide><path>keras/layers/rnn/conv_lstm3d.py
<ide> class ConvLSTM3D(ConvLSTM):
<ide> and recurrent transformations are both convolutional.
<ide>
<ide> Args:
<del> filters: Integer, the dimensionality of the output space (i.e. the number of
<del> output filters in the convolution).
<add> filters: Integer, the dimensionality of the output space (i.e. the number
<add> of output filters in the convolution).
<ide> kernel_size: An integer or tuple/list of n integers, specifying the
<ide> dimensions of the convolution window.
<ide> strides: An integer or tuple/list of n integers, specifying the strides of
<ide> the convolution. Specifying any stride value != 1 is incompatible with
<ide> specifying any `dilation_rate` value != 1.
<del> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no
<del> padding. `"same"` results in padding evenly to the left/right or up/down
<del> of the input such that output has the same height/width dimension as the
<del> input.
<del> data_format: A string, one of `channels_last` (default) or `channels_first`.
<del> The ordering of the dimensions in the inputs. `channels_last` corresponds
<del> to inputs with shape `(batch, time, ..., channels)` while `channels_first`
<del> corresponds to inputs with shape `(batch, time, channels, ...)`. It
<del> defaults to the `image_data_format` value found in your Keras config file
<del> at `~/.keras/keras.json`. If you never set it, then it will be
<del> "channels_last".
<add> padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
<add> no padding. `"same"` results in padding evenly to the left/right or
<add> up/down of the input such that output has the same height/width
<add> dimension as the input.
<add> data_format: A string, one of `channels_last` (default) or
<add> `channels_first`. The ordering of the dimensions in the inputs.
<add> `channels_last` corresponds to inputs with shape `(batch, time, ...,
<add> channels)` while `channels_first` corresponds to inputs with shape
<add> `(batch, time, channels, ...)`. It defaults to the `image_data_format`
<add> value found in your Keras config file at `~/.keras/keras.json`. If you
<add> never set it, then it will be "channels_last".
<ide> dilation_rate: An integer or tuple/list of n integers, specifying the
<ide> dilation rate to use for dilated convolution. Currently, specifying any
<ide> `dilation_rate` value != 1 is incompatible with specifying any `strides`
<ide> class ConvLSTM3D(ConvLSTM):
<ide> recurrent_initializer: Initializer for the `recurrent_kernel` weights
<ide> matrix, used for the linear transformation of the recurrent state.
<ide> bias_initializer: Initializer for the bias vector.
<del> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at
<del> initialization. Use in combination with `bias_initializer="zeros"`. This
<del> is recommended in [Jozefowicz et al., 2015](
<del> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<add> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
<add> at initialization. Use in combination with `bias_initializer="zeros"`.
<add> This is recommended in [Jozefowicz et al., 2015](
<add> http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> matrix.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> class ConvLSTM3D(ConvLSTM):
<ide> activity_regularizer: Regularizer function applied to.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix.
<ide> bias_constraint: Constraint function applied to the bias vector.
<ide> return_sequences: Boolean. Whether to return the last output in the output
<ide> sequence, or the full sequence. (default False)
<ide> class ConvLSTM3D(ConvLSTM):
<ide> stateful: Boolean (default False). If True, the last state for each sample
<ide> at index i in a batch will be used as initial state for the sample of
<ide> index i in the following batch.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state.
<ide> Call arguments:
<ide> inputs: A 6D tensor.
<ide> mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
<ide> given timestep should be masked.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. This argument is passed to the cell
<del> when calling it. This is only relevant if `dropout` or `recurrent_dropout`
<del> are set.
<del> initial_state: List of initial state tensors to be passed to the first call
<del> of the cell.
<add> when calling it. This is only relevant if `dropout` or
<add> `recurrent_dropout` are set.
<add> initial_state: List of initial state tensors to be passed to the first
<add> call of the cell.
<ide> Input shape: - If data_format='channels_first'
<ide> 6D tensor with shape: `(samples, time, channels, rows, cols, depth)` -
<ide> If data_format='channels_last'
<ide> 5D tensor with shape: `(samples, time, rows, cols, depth, channels)`
<ide> Output shape:
<del> - If `return_state`: a list of tensors. The first tensor is the output. The
<del> remaining tensors are the last states,
<add> - If `return_state`: a list of tensors. The first tensor is the output.
<add> The remaining tensors are the last states,
<ide> each 5D tensor with shape: `(samples, filters, new_rows, new_cols,
<ide> new_depth)` if data_format='channels_first'
<ide> or shape: `(samples, new_rows, new_cols, new_depth, filters)` if
<ide><path>keras/layers/rnn/cudnn_gru.py
<ide> class CuDNNGRU(_CuDNNRNN):
<ide>
<ide> Args:
<ide> units: Positive integer, dimensionality of the output space.
<del> kernel_initializer: Initializer for the `kernel` weights matrix, used for
<del> the linear transformation of the inputs.
<add> kernel_initializer: Initializer for the `kernel` weights matrix, used
<add> for the linear transformation of the inputs.
<ide> recurrent_initializer: Initializer for the `recurrent_kernel` weights
<ide> matrix, used for the linear transformation of the recurrent state.
<ide> bias_initializer: Initializer for the bias vector.
<ide> class CuDNNGRU(_CuDNNRNN):
<ide> recurrent_constraint: Constraint function applied to the
<ide> `recurrent_kernel` weights matrix.
<ide> bias_constraint: Constraint function applied to the bias vector.
<del> return_sequences: Boolean. Whether to return the last output in the output
<del> sequence, or the full sequence.
<del> return_state: Boolean. Whether to return the last state in addition to the
<del> output.
<del> go_backwards: Boolean (default False). If True, process the input sequence
<del> backwards and return the reversed sequence.
<del> stateful: Boolean (default False). If True, the last state for each sample
<del> at index i in a batch will be used as initial state for the sample of
<del> index i in the following batch.
<add> return_sequences: Boolean. Whether to return the last output in the
<add> output sequence, or the full sequence.
<add> return_state: Boolean. Whether to return the last state in addition to
<add> the output.
<add> go_backwards: Boolean (default False). If True, process the input
<add> sequence backwards and return the reversed sequence.
<add> stateful: Boolean (default False). If True, the last state for each
<add> sample at index i in a batch will be used as initial state for the
<add> sample of index i in the following batch.
<ide> """
<ide>
<ide> def __init__(
<ide><path>keras/layers/rnn/cudnn_lstm.py
<ide> class CuDNNLSTM(_CuDNNRNN):
<ide>
<ide> Args:
<ide> units: Positive integer, dimensionality of the output space.
<del> kernel_initializer: Initializer for the `kernel` weights matrix, used for
<del> the linear transformation of the inputs.
<add> kernel_initializer: Initializer for the `kernel` weights matrix, used
<add> for the linear transformation of the inputs.
<ide> unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
<ide> at initialization. Setting it to true will also force
<ide> `bias_initializer="zeros"`. This is recommended in [Jozefowicz et
<ide> class CuDNNLSTM(_CuDNNRNN):
<ide> bias_constraint: Constraint function applied to the bias vector.
<ide> return_sequences: Boolean. Whether to return the last output. in the
<ide> output sequence, or the full sequence.
<del> return_state: Boolean. Whether to return the last state in addition to the
<del> output.
<del> go_backwards: Boolean (default False). If True, process the input sequence
<del> backwards and return the reversed sequence.
<del> stateful: Boolean (default False). If True, the last state for each sample
<del> at index i in a batch will be used as initial state for the sample of
<del> index i in the following batch.
<add> return_state: Boolean. Whether to return the last state in addition to
<add> the output.
<add> go_backwards: Boolean (default False). If True, process the input
<add> sequence backwards and return the reversed sequence.
<add> stateful: Boolean (default False). If True, the last state for each
<add> sample at index i in a batch will be used as initial state for the
<add> sample of index i in the following batch.
<ide> """
<ide>
<ide> def __init__(
<ide><path>keras/layers/rnn/cudnn_test.py
<ide> def _convert_model_weights(self, source_model, target_model):
<ide> def test_load_weights_between_noncudnn_rnn_time_distributed(
<ide> self, rnn_type, to_cudnn
<ide> ):
<del> # Similar test as test_load_weights_between_noncudnn_rnn() but has different
<del> # rank of input due to usage of TimeDistributed. Issue: #10356.
<add> # Similar test as test_load_weights_between_noncudnn_rnn() but has
<add> # different rank of input due to usage of TimeDistributed. Issue:
<add> # #10356.
<ide> input_size = 10
<ide> steps = 6
<ide> timesteps = 6
<ide><path>keras/layers/rnn/dropout_rnn_cell_mixin.py
<ide> class DropoutRNNCellMixin:
<ide> """Object that hold dropout related fields for RNN Cell.
<ide>
<del> This class is not a standalone RNN cell. It suppose to be used with a RNN cell
<del> by multiple inheritance. Any cell that mix with class should have following
<del> fields:
<add> This class is not a standalone RNN cell. It suppose to be used with a RNN
<add> cell by multiple inheritance. Any cell that mix with class should have
<add> following fields:
<ide> dropout: a float number within range [0, 1). The ratio that the input
<ide> tensor need to dropout.
<ide> recurrent_dropout: a float number within range [0, 1). The ratio that the
<ide> def _create_non_trackable_mask_cache(self):
<ide> tensors will be generated differently than in the "graph function" case,
<ide> and they will be cached.
<ide>
<del> Also note that in graph mode, we still cache those masks only because the
<del> RNN could be created with `unroll=True`. In that case, the `cell.call()`
<del> function will be invoked multiple times, and we want to ensure same mask
<del> is used every time.
<add> Also note that in graph mode, we still cache those masks only because
<add> the RNN could be created with `unroll=True`. In that case, the
<add> `cell.call()` function will be invoked multiple times, and we want to
<add> ensure same mask is used every time.
<ide>
<del> Also the caches are created without tracking. Since they are not picklable
<del> by python when deepcopy, we don't want `layer._obj_reference_counts_dict`
<del> to track it by default.
<add> Also the caches are created without tracking. Since they are not
<add> picklable by python when deepcopy, we don't want
<add> `layer._obj_reference_counts_dict` to track it by default.
<ide> """
<ide> self._dropout_mask_cache = backend.ContextValueCache(
<ide> self._create_dropout_mask
<ide> def _create_non_trackable_mask_cache(self):
<ide> def reset_dropout_mask(self):
<ide> """Reset the cached dropout masks if any.
<ide>
<del> This is important for the RNN layer to invoke this in it `call()` method so
<del> that the cached mask is cleared before calling the `cell.call()`. The mask
<del> should be cached across the timestep within the same batch, but shouldn't
<del> be cached between batches. Otherwise it will introduce unreasonable bias
<del> against certain index of data within the batch.
<add> This is important for the RNN layer to invoke this in it `call()` method
<add> so that the cached mask is cleared before calling the `cell.call()`. The
<add> mask should be cached across the timestep within the same batch, but
<add> shouldn't be cached between batches. Otherwise it will introduce
<add> unreasonable bias against certain index of data within the batch.
<ide> """
<ide> self._dropout_mask_cache.clear()
<ide>
<ide> def reset_recurrent_dropout_mask(self):
<ide> """Reset the cached recurrent dropout masks if any.
<ide>
<del> This is important for the RNN layer to invoke this in it call() method so
<del> that the cached mask is cleared before calling the cell.call(). The mask
<del> should be cached across the timestep within the same batch, but shouldn't
<del> be cached between batches. Otherwise it will introduce unreasonable bias
<del> against certain index of data within the batch.
<add> This is important for the RNN layer to invoke this in it call() method
<add> so that the cached mask is cleared before calling the cell.call(). The
<add> mask should be cached across the timestep within the same batch, but
<add> shouldn't be cached between batches. Otherwise it will introduce
<add> unreasonable bias against certain index of data within the batch.
<ide> """
<ide> self._recurrent_dropout_mask_cache.clear()
<ide>
<ide> def get_dropout_mask_for_cell(self, inputs, training, count=1):
<ide> Args:
<ide> inputs: The input tensor whose shape will be used to generate dropout
<ide> mask.
<del> training: Boolean tensor, whether its in training mode, dropout will be
<del> ignored in non-training mode.
<del> count: Int, how many dropout mask will be generated. It is useful for cell
<del> that has internal weights fused together.
<add> training: Boolean tensor, whether its in training mode, dropout will
<add> be ignored in non-training mode.
<add> count: Int, how many dropout mask will be generated. It is useful for
<add> cell that has internal weights fused together.
<ide> Returns:
<ide> List of mask tensor, generated or cached mask based on context.
<ide> """
<ide> def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
<ide> Args:
<ide> inputs: The input tensor whose shape will be used to generate dropout
<ide> mask.
<del> training: Boolean tensor, whether its in training mode, dropout will be
<del> ignored in non-training mode.
<del> count: Int, how many dropout mask will be generated. It is useful for cell
<del> that has internal weights fused together.
<add> training: Boolean tensor, whether its in training mode, dropout will
<add> be ignored in non-training mode.
<add> count: Int, how many dropout mask will be generated. It is useful for
<add> cell that has internal weights fused together.
<ide> Returns:
<ide> List of mask tensor, generated or cached mask based on context.
<ide> """
<ide> def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
<ide> return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)
<ide>
<ide> def __getstate__(self):
<del> # Used for deepcopy. The caching can't be pickled by python, since it will
<del> # contain tensor and graph.
<add> # Used for deepcopy. The caching can't be pickled by python, since it
<add> # will contain tensor and graph.
<ide> state = super().__getstate__()
<ide> state.pop("_dropout_mask_cache", None)
<ide> state.pop("_recurrent_dropout_mask_cache", None)
<ide><path>keras/layers/rnn/gru.py
<ide> class GRUCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
<ide> used for the linear transformation of the inputs. Default:
<ide> `glorot_uniform`.
<ide> recurrent_initializer: Initializer for the `recurrent_kernel`
<del> weights matrix, used for the linear transformation of the recurrent state.
<del> Default: `orthogonal`.
<add> weights matrix, used for the linear transformation of the recurrent
<add> state. Default: `orthogonal`.
<ide> bias_initializer: Initializer for the bias vector. Default: `zeros`.
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> `recurrent_kernel` weights matrix. Default: `None`.
<del> bias_regularizer: Regularizer function applied to the bias vector. Default:
<del> `None`.
<add> bias_regularizer: Regularizer function applied to the bias vector.
<add> Default: `None`.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix. Default: `None`.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix. Default: `None`.
<ide> bias_constraint: Constraint function applied to the bias vector. Default:
<ide> `None`.
<ide> dropout: Float between 0 and 1. Fraction of the units to drop for the
<ide> linear transformation of the inputs. Default: 0.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state. Default: 0.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state. Default: 0.
<ide> reset_after: GRU convention (whether to apply reset gate after or
<ide> before matrix multiplication). False = "before",
<ide> True = "after" (default and cuDNN compatible).
<ide>
<ide> Call arguments:
<ide> inputs: A 2D tensor, with shape of `[batch, feature]`.
<del> states: A 2D tensor with shape of `[batch, units]`, which is the state from
<del> the previous time step. For timestep 0, the initial state provided by user
<del> will be feed to cell.
<add> states: A 2D tensor with shape of `[batch, units]`, which is the state
<add> from the previous time step. For timestep 0, the initial state provided
<add> by user will be feed to cell.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. Only relevant when `dropout` or
<ide> `recurrent_dropout` is used.
<ide> def build(self, input_shape):
<ide> bias_shape = (3 * self.units,)
<ide> else:
<ide> # separate biases for input and recurrent kernels
<del> # Note: the shape is intentionally different from CuDNNGRU biases
<del> # `(2 * 3 * self.units,)`, so that we can distinguish the classes
<del> # when loading and converting saved weights.
<add> # Note: the shape is intentionally different from CuDNNGRU
<add> # biases `(2 * 3 * self.units,)`, so that we can distinguish the
<add> # classes when loading and converting saved weights.
<ide> bias_shape = (2, 3 * self.units)
<ide> self.bias = self.add_weight(
<ide> shape=bias_shape,
<ide> class GRU(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer):
<ide> 7. Inputs, if use masking, are strictly right-padded.
<ide> 8. Eager execution is enabled in the outermost context.
<ide>
<del> There are two variants of the GRU implementation. The default one is based on
<del> [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden
<del> state before matrix multiplication. The other one is based on
<add> There are two variants of the GRU implementation. The default one is based
<add> on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to
<add> hidden state before matrix multiplication. The other one is based on
<ide> [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
<ide>
<ide> The second variant is compatible with CuDNNGRU (GPU-only) and allows
<ide> class GRU(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer):
<ide> matrix. Default: `None`.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> `recurrent_kernel` weights matrix. Default: `None`.
<del> bias_regularizer: Regularizer function applied to the bias vector. Default:
<del> `None`.
<add> bias_regularizer: Regularizer function applied to the bias vector.
<add> Default: `None`.
<ide> activity_regularizer: Regularizer function applied to the output of the
<ide> layer (its "activation"). Default: `None`.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix. Default: `None`.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix. Default: `None`.
<ide> bias_constraint: Constraint function applied to the bias vector. Default:
<ide> `None`.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs. Default: 0.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state. Default: 0.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs. Default: 0.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state. Default: 0.
<ide> return_sequences: Boolean. Whether to return the last output
<ide> in the output sequence, or the full sequence. Default: `False`.
<ide> return_state: Boolean. Whether to return the last state in addition to the
<ide> def __init__(
<ide> and tf.compat.v1.executing_eagerly_outside_functions()
<ide> )
<ide> if tf.config.list_logical_devices("GPU"):
<del> # Only show the message when there is GPU available, user will not care
<del> # about the cuDNN if there isn't any GPU.
<add> # Only show the message when there is GPU available, user will not
<add> # care about the cuDNN if there isn't any GPU.
<ide> if self._could_use_gpu_kernel:
<ide> logging.debug(gru_lstm_utils.CUDNN_AVAILABLE_MSG % self.name)
<ide> else:
<ide> def _defun_gru_call(
<ide> ):
<ide> # Use the new defun approach for backend implementation swap.
<ide> # Note that different implementations need to have same function
<del> # signature, eg, the tensor parameters need to have same shape and dtypes.
<add> # signature, eg, the tensor parameters need to have same shape and
<add> # dtypes.
<ide>
<ide> self.reset_dropout_mask()
<ide> dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
<ide> def _defun_gru_call(
<ide> if tf.executing_eagerly():
<ide> device_type = gru_lstm_utils.get_context_device_type()
<ide> can_use_gpu = (
<del> # Either user specified GPU or unspecified but GPU is available.
<add> # Either user specified GPU or unspecified but GPU is
<add> # available.
<ide> (
<ide> device_type == gru_lstm_utils.GPU_DEVICE_NAME
<ide> or (
<ide> def standard_gru(
<ide> init_h: Initial state tensor for the cell output.
<ide> kernel: Weights for cell kernel.
<ide> recurrent_kernel: Weights for cell recurrent kernel.
<del> bias: Weights for cell kernel bias and recurrent bias. The bias contains the
<del> combined input_bias and recurrent_bias.
<add> bias: Weights for cell kernel bias and recurrent bias. The bias contains
<add> the combined input_bias and recurrent_bias.
<ide> mask: Binary tensor of shape `(samples, timesteps)` indicating whether
<ide> a given timestep should be masked. An individual `True` entry indicates
<del> that the corresponding timestep should be utilized, while a `False` entry
<del> indicates that the corresponding timestep should be ignored.
<add> that the corresponding timestep should be utilized, while a `False`
<add> entry indicates that the corresponding timestep should be ignored.
<ide> time_major: Boolean, whether the inputs are in the format of
<ide> [time, batch, feature] or [batch, time, feature].
<ide> go_backwards: Boolean (default False). If True, process the input sequence
<ide> backwards and return the reversed sequence.
<del> sequence_lengths: The lengths of all sequences coming from a variable length
<del> input, such as ragged tensors. If the input has a fixed timestep size,
<del> this should be None.
<add> sequence_lengths: The lengths of all sequences coming from a variable
<add> length input, such as ragged tensors. If the input has a fixed timestep
<add> size, this should be None.
<ide> zero_output_for_mask: Boolean, whether to output zero for masked timestep.
<ide> return_sequences: Boolean. If True, return the recurrent outputs for all
<ide> timesteps in the sequence. If False, only return the output for the
<ide> def gpu_gru(
<ide> bias = tf.split(backend.flatten(bias), 6)
<ide>
<ide> if tf.sysconfig.get_build_info()["is_cuda_build"]:
<del> # Note that the gate order for cuDNN is different from the canonical format.
<del> # canonical format is [z, r, h], whereas cuDNN is [r, z, h]. The swap need
<del> # to be done for kernel, recurrent_kernel, input_bias, recurrent_bias.
<add> # Note that the gate order for cuDNN is different from the canonical
<add> # format. canonical format is [z, r, h], whereas cuDNN is [r, z, h].
<add> # The swap need to be done for kernel, recurrent_kernel, input_bias,
<add> # recurrent_bias.
<ide> # z is update gate weights.
<ide> # r is reset gate weights.
<ide> # h is output gate weights.
<ide> def gpu_gru(
<ide> h = tf.squeeze(h, axis=seq_axis)
<ide>
<ide> # In the case of variable length input, the cudnn kernel will fill zeros for
<del> # the output, whereas the default keras behavior is to bring over the previous
<del> # output for t-1, so that in the return_sequence=False case, user can quickly
<del> # get the final effect output instead just 0s at the last timestep.
<del> # In order to mimic the default keras behavior, we copy the final h state as
<del> # the last_output, since it is numerically same as the output.
<add> # the output, whereas the default keras behavior is to bring over the
<add> # previous output for t-1, so that in the return_sequence=False case, user
<add> # can quickly get the final effect output instead just 0s at the last
<add> # timestep. In order to mimic the default keras behavior, we copy the final
<add> # h state as the last_output, since it is numerically same as the output.
<ide> if sequence_lengths is not None:
<ide> last_output = h
<ide>
<ide> def gru_with_backend_selection(
<ide> is used in this case.
<ide> mask: Boolean tensor for mask out the steps within sequence.
<ide> An individual `True` entry indicates that the corresponding timestep
<del> should be utilized, while a `False` entry indicates that the corresponding
<del> timestep should be ignored.
<add> should be utilized, while a `False` entry indicates that the
<add> corresponding timestep should be ignored.
<ide> time_major: Boolean, whether the inputs are in the format of
<ide> [time, batch, feature] or [batch, time, feature].
<ide> go_backwards: Boolean (default False). If True, process the input sequence
<ide> backwards and return the reversed sequence.
<del> sequence_lengths: The lengths of all sequences coming from a variable length
<del> input, such as ragged tensors. If the input has a fixed timestep size,
<del> this should be None.
<add> sequence_lengths: The lengths of all sequences coming from a variable
<add> length input, such as ragged tensors. If the input has a fixed timestep
<add> size, this should be None.
<ide> zero_output_for_mask: Boolean, whether to output zero for masked timestep.
<ide> return_sequences: Boolean. If True, return the recurrent outputs for all
<ide> timesteps in the sequence. If False, only return the output for the
<ide><path>keras/layers/rnn/gru_lstm_test.py
<ide> def test_device_placement(self, layer):
<ide>
<ide> @parameterized.parameters([lstm.LSTM, gru.GRU])
<ide> def test_reset_dropout_mask_between_batch(self, layer):
<del> # See https://github.com/tensorflow/tensorflow/issues/29187 for more details
<add> # See https://github.com/tensorflow/tensorflow/issues/29187 for more
<add> # details
<ide> batch_size = 8
<ide> timestep = 12
<ide> embedding_dim = 10
<ide><path>keras/layers/rnn/gru_lstm_utils.py
<ide> def __deepcopy__(self, memo):
<ide> def canonical_to_params(weights, biases, shape, transpose_weights=False):
<ide> """Utility function convert variable to cuDNN compatible parameter.
<ide>
<del> Note that Keras weights for kernels are different from the cuDNN format. Eg.:
<add> Note that Keras weights for kernels are different from the cuDNN format.
<add> Eg.:
<ide>
<ide> ```
<ide> Keras cuDNN
<ide> def is_sequence_right_padded(mask):
<ide> Mixture of mask/unmasked data: [[True, False, True, False, False]].
<ide>
<ide> Note that for the mixed data example above, the actually data RNN should see
<del> are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not
<del> pollute the internal states.
<add> are those 2 Trues (index 0 and 2), the index 1 False should be ignored and
<add> not pollute the internal states.
<ide>
<ide> Args:
<ide> mask: the Boolean tensor with shape [batch, timestep]
<ide> def is_sequence_right_padded(mask):
<ide>
<ide>
<ide> def has_fully_masked_sequence(mask):
<del> # See https://github.com/tensorflow/tensorflow/issues/33148 for more details.
<del> # Cudnn kernel will error out if the input sequence contains any fully masked
<del> # data. We walk around this issue by rerouting the computation to standard
<del> # kernel, until the issue on cudnn side has been fixed.
<del> # For a fully masked sequence, it will contain all Falses. To make it easy to
<add> # See https://github.com/tensorflow/tensorflow/issues/33148 for more
<add> # details. Cudnn kernel will error out if the input sequence contains any
<add> # fully masked data. We walk around this issue by rerouting the computation
<add> # to standard kernel, until the issue on cudnn side has been fixed. For a
<add> # fully masked sequence, it will contain all Falses. To make it easy to
<ide> # check, we inverse the boolean, check if any of the sequence has all True.
<ide> return tf.reduce_any(tf.reduce_all(tf.logical_not(mask), axis=1))
<ide>
<ide> def calculate_sequence_by_mask(mask, time_major):
<ide> Consider the following example:
<ide> a = [[True, True, False, False],
<ide> [True, True, True, False]]
<del> It is a (2, 4) tensor, and the corresponding sequence length result should be
<del> 1D tensor with value [2, 3]. Note that the masking tensor must be right
<add> It is a (2, 4) tensor, and the corresponding sequence length result should
<add> be 1D tensor with value [2, 3]. Note that the masking tensor must be right
<ide> padded that could be checked by, e.g., `is_sequence_right_padded()`.
<ide>
<ide> Args:
<ide> mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if
<ide> time_major=True.
<del> time_major: Boolean, which indicates whether the mask is time major or batch
<del> major.
<add> time_major: Boolean, which indicates whether the mask is time major or
<add> batch major.
<ide> Returns:
<ide> sequence_length: 1D int32 tensor.
<ide> """
<ide> def function_register(func, *args, **kwargs):
<ide> a `ConcreteFunction` object specialized to inputs and execution context.
<ide>
<ide> Raises:
<del> ValueError: When the input function is not a defun wrapped python function.
<add> ValueError: When the input function is not a defun wrapped python
<add> function.
<ide> """
<ide> concrete_func = func.get_concrete_function(*args, **kwargs)
<ide> concrete_func.add_to_graph()
<ide><path>keras/layers/rnn/gru_test.py
<ide> def test_gru_v2_output_on_multiple_kernel(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_with_masking_layer_GRU(self):
<ide> layer_class = keras.layers.GRU
<ide> def test_with_masking_layer_GRU(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_masking_with_stacking_GRU(self):
<ide> inputs = np.random.random((2, 3, 4))
<ide> def test_float64_GRU(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_return_states_GRU(self):
<ide> layer_class = keras.layers.GRU
<ide> def test_regularizers_GRU(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_statefulness_GRU(self):
<ide> num_samples = 2
<ide> def test_stateful_GRU_training(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_explicit_device_with_go_backward_and_mask(self):
<ide> def test_GRU_runtime(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_GRU_runtime_with_mask(self):
<del> # Masking will affect which backend is selected based on whether the mask
<del> # is strictly right padded.
<add> # Masking will affect which backend is selected based on whether the
<add> # mask is strictly right padded.
<ide> layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)
<ide>
<ide> inputs = keras.layers.Input(
<ide> def test_dropout_gru(self):
<ide>
<ide> def test_recurrent_dropout_with_implementation_restriction(self):
<ide> layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2)
<del> # The implementation is force to 1 due to the limit of recurrent_dropout.
<add> # The implementation is force to 1 due to the limit of
<add> # recurrent_dropout.
<ide> self.assertEqual(layer.implementation, 1)
<ide>
<ide> @parameterized.parameters([0, 1, 2])
<ide><path>keras/layers/rnn/gru_v1.py
<ide> class GRUCell(gru.GRUCell):
<ide> recurrent_constraint: Constraint function applied to
<ide> the `recurrent_kernel` weights matrix.
<ide> bias_constraint: Constraint function applied to the bias vector.
<del> dropout: Float between 0 and 1.
<del> Fraction of the units to drop for the linear transformation of the inputs.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs.
<ide> recurrent_dropout: Float between 0 and 1.
<ide> Fraction of the units to drop for
<ide> the linear transformation of the recurrent state.
<ide> class GRU(RNN):
<ide> use_bias: Boolean, whether the layer uses a bias vector.
<ide> kernel_initializer: Initializer for the `kernel` weights matrix,
<ide> used for the linear transformation of the inputs.
<del> recurrent_initializer: Initializer for the `recurrent_kernel`
<del> weights matrix, used for the linear transformation of the recurrent state.
<add> recurrent_initializer: Initializer for the `recurrent_kernel` weights
<add> matrix, used for the linear transformation of the recurrent state.
<ide> bias_initializer: Initializer for the bias vector.
<ide> kernel_regularizer: Regularizer function applied to
<ide> the `kernel` weights matrix.
<ide><path>keras/layers/rnn/gru_v1_test.py
<ide> class GRUGraphRewriteTest(test_combinations.TestCase):
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_gru_feature_parity_v1_v2(self):
<ide> def build_model(layer_cls):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_explicit_device_with_go_backward_and_mask_v1(self):
<ide><path>keras/layers/rnn/legacy_cell_wrappers.py
<ide> def assert_like_rnncell(cell_name, cell):
<ide> class _RNNCellWrapperV1(RNNCell):
<ide> """Base class for cells wrappers V1 compatibility.
<ide>
<del> This class along with `_RNNCellWrapperV2` allows to define cells wrappers that
<del> are compatible with V1 and V2, and defines helper methods for this purpose.
<add> This class along with `_RNNCellWrapperV2` allows to define cells wrappers
<add> that are compatible with V1 and V2, and defines helper methods for this
<add> purpose.
<ide> """
<ide>
<ide> def __init__(self, cell, *args, **kwargs):
<ide> def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
<ide> Args:
<ide> inputs: A tensor with wrapped cell's input.
<ide> state: A tensor or tuple of tensors with wrapped cell's state.
<del> cell_call_fn: Wrapped cell's method to use for step computation (cell's
<del> `__call__` or 'call' method).
<add> cell_call_fn: Wrapped cell's method to use for step computation
<add> (cell's `__call__` or 'call' method).
<ide> **kwargs: Additional arguments.
<ide>
<ide> Returns:
<ide> A pair containing:
<ide> - Output: A tensor with cell's output.
<del> - New state: A tensor or tuple of tensors with new wrapped cell's state.
<add> - New state: A tensor or tuple of tensors with new wrapped cell's
<add> state.
<ide> """
<ide> raise NotImplementedError
<ide>
<ide> def __call__(self, inputs, state, scope=None):
<ide> method. We directly use the wrapped cell's `__call__` in the overridden
<ide> wrapper `__call__` method.
<ide>
<del> This allows to use the wrapped cell and the non-wrapped cell equivalently
<del> when using `__call__`.
<add> This allows to use the wrapped cell and the non-wrapped cell
<add> equivalently when using `__call__`.
<ide>
<ide> Args:
<ide> inputs: A tensor with wrapped cell's input.
<ide> def __call__(self, inputs, state, scope=None):
<ide> A pair containing:
<ide>
<ide> - Output: A tensor with cell's output.
<del> - New state: A tensor or tuple of tensors with new wrapped cell's state.
<add> - New state: A tensor or tuple of tensors with new wrapped cell's
<add> state.
<ide> """
<ide> return self._call_wrapped_cell(
<ide> inputs, state, cell_call_fn=self.cell.__call__, scope=scope
<ide> def __init__(
<ide> ):
<ide> """Create a cell with added input, state, and/or output dropout.
<ide>
<del> If `variational_recurrent` is set to `True` (**NOT** the default behavior),
<del> then the same dropout mask is applied at every step, as described in:
<del> [A Theoretically Grounded Application of Dropout in Recurrent
<del> Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287).
<add> If `variational_recurrent` is set to `True` (**NOT** the default
<add> behavior), then the same dropout mask is applied at every step, as
<add> described in: [A Theoretically Grounded Application of Dropout in
<add> Recurrent Neural Networks. Y. Gal, Z.
<add> Ghahramani](https://arxiv.org/abs/1512.05287).
<ide>
<ide> Otherwise a different dropout mask is applied at every time step.
<ide>
<ide> def __init__(
<ide> Args:
<ide> cell: an RNNCell, a projection to output_size is added to it.
<ide> input_keep_prob: unit Tensor or float between 0 and 1, input keep
<del> probability; if it is constant and 1, no input dropout will be added.
<add> probability; if it is constant and 1, no input dropout will be
<add> added.
<ide> output_keep_prob: unit Tensor or float between 0 and 1, output keep
<del> probability; if it is constant and 1, no output dropout will be added.
<add> probability; if it is constant and 1, no output dropout will be
<add> added.
<ide> state_keep_prob: unit Tensor or float between 0 and 1, output keep
<del> probability; if it is constant and 1, no output dropout will be added.
<del> State dropout is performed on the outgoing states of the cell. **Note**
<del> the state components to which dropout is applied when `state_keep_prob`
<del> is in `(0, 1)` are also determined by the argument
<del> `dropout_state_filter_visitor` (e.g. by default dropout is never applied
<del> to the `c` component of an `LSTMStateTuple`).
<add> probability; if it is constant and 1, no output dropout will be
<add> added. State dropout is performed on the outgoing states of the
<add> cell. **Note** the state components to which dropout is applied when
<add> `state_keep_prob` is in `(0, 1)` are also determined by the argument
<add> `dropout_state_filter_visitor` (e.g. by default dropout is never
<add> applied to the `c` component of an `LSTMStateTuple`).
<ide> variational_recurrent: Python bool. If `True`, then the same dropout
<del> pattern is applied across all time steps per run call. If this parameter
<del> is set, `input_size` **must** be provided.
<del> input_size: (optional) (possibly nested tuple of) `TensorShape` objects
<del> containing the depth(s) of the input tensors expected to be passed in to
<del> the `DropoutWrapper`. Required and used **iff** `variational_recurrent
<del> = True` and `input_keep_prob < 1`.
<add> pattern is applied across all time steps per run call. If this
<add> parameter is set, `input_size` **must** be provided.
<add> input_size: (optional) (possibly nested tuple of) `TensorShape`
<add> objects containing the depth(s) of the input tensors expected to be
<add> passed in to the `DropoutWrapper`. Required and used **iff**
<add> `variational_recurrent = True` and `input_keep_prob < 1`.
<ide> dtype: (optional) The `dtype` of the input, state, and output tensors.
<ide> Required and used **iff** `variational_recurrent = True`.
<ide> seed: (optional) integer, the randomness seed.
<del> dropout_state_filter_visitor: (optional), default: (see below). Function
<del> that takes any hierarchical level of the state and returns a scalar or
<del> depth=1 structure of Python booleans describing which terms in the state
<del> should be dropped out. In addition, if the function returns `True`,
<del> dropout is applied across this sublevel. If the function returns
<del> `False`, dropout is not applied across this entire sublevel.
<del> Default behavior: perform dropout on all terms except the memory (`c`)
<del> state of `LSTMCellState` objects, and don't try to apply dropout to
<del> `TensorArray` objects: ```
<add> dropout_state_filter_visitor: (optional), default: (see below).
<add> Function that takes any hierarchical level of the state and returns
<add> a scalar or depth=1 structure of Python booleans describing which
<add> terms in the state should be dropped out. In addition, if the
<add> function returns `True`, dropout is applied across this sublevel.
<add> If the function returns `False`, dropout is not applied across this
<add> entire sublevel. Default behavior: perform dropout on all terms
<add> except the memory (`c`) state of `LSTMCellState` objects, and don't
<add> try to apply dropout to `TensorArray` objects:
<add> ```
<ide> def dropout_state_filter_visitor(s):
<del> if isinstance(s, LSTMCellState): # Never perform dropout on the c
<del> state. return LSTMCellState(c=False, h=True)
<del> elif isinstance(s, TensorArray): return False return True ```
<add> # Never perform dropout on the c state.
<add> if isinstance(s, LSTMCellState):
<add> return LSTMCellState(c=False, h=True)
<add> elif isinstance(s, TensorArray):
<add> return False
<add> return True
<add> ```
<ide> **kwargs: dict of keyword arguments for base layer.
<ide>
<ide> Raises:
<del> TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
<del> but not `callable`.
<add> TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is
<add> provided but not `callable`.
<ide> ValueError: if any of the keep_probs are not between 0 and 1.
<ide> """
<ide> super().__init__(cell, dtype=dtype, **kwargs)
<ide> def batch_noise(s, inner_seed):
<ide> ):
<ide> if input_size is None:
<ide> raise ValueError(
<del> "When variational_recurrent=True and input_keep_prob < 1.0 or "
<del> "is unknown, input_size must be provided"
<add> "When variational_recurrent=True and input_keep_prob "
<add> "< 1.0 or is unknown, input_size must be provided"
<ide> )
<ide> self._recurrent_input_noise = _enumerated_map_structure_up_to(
<ide> input_size,
<ide> def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
<ide> Args:
<ide> inputs: A tensor with wrapped cell's input.
<ide> state: A tensor or tuple of tensors with wrapped cell's state.
<del> cell_call_fn: Wrapped cell's method to use for step computation (cell's
<del> `__call__` or 'call' method).
<add> cell_call_fn: Wrapped cell's method to use for step computation
<add> (cell's `__call__` or 'call' method).
<ide> **kwargs: Additional arguments.
<ide>
<ide> Returns:
<ide> A pair containing:
<ide>
<ide> - Output: A tensor with cell's output.
<del> - New state: A tensor or tuple of tensors with new wrapped cell's state.
<add> - New state: A tensor or tuple of tensors with new wrapped cell's
<add> state.
<ide> """
<ide>
<ide> def _should_dropout(p):
<ide> def __init__(self, cell, residual_fn=None, **kwargs):
<ide>
<ide> Args:
<ide> cell: An instance of `RNNCell`.
<del> residual_fn: (Optional) The function to map raw cell inputs and raw cell
<del> outputs to the actual cell outputs of the residual network.
<del> Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
<del> and outputs.
<add> residual_fn: (Optional) The function to map raw cell inputs and raw
<add> cell outputs to the actual cell outputs of the residual network.
<add> Defaults to calling nest.map_structure on (lambda i, o: i + o),
<add> inputs and outputs.
<ide> **kwargs: dict of keyword arguments for base layer.
<ide> """
<ide> super().__init__(cell, **kwargs)
<ide> def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
<ide> Args:
<ide> inputs: cell inputs.
<ide> state: cell state.
<del> cell_call_fn: Wrapped cell's method to use for step computation (cell's
<del> `__call__` or 'call' method).
<add> cell_call_fn: Wrapped cell's method to use for step computation
<add> (cell's `__call__` or 'call' method).
<ide> **kwargs: Additional arguments passed to the wrapped cell's `call`.
<ide>
<ide> Returns:
<ide> Tuple of cell outputs and new state.
<ide>
<ide> Raises:
<ide> TypeError: If cell inputs and outputs have different structure (type).
<del> ValueError: If cell inputs and outputs have different structure (value).
<add> ValueError: If cell inputs and outputs have different structure
<add> (value).
<ide> """
<ide> outputs, new_state = cell_call_fn(inputs, state, **kwargs)
<ide>
<ide><path>keras/layers/rnn/legacy_cells.py
<ide> class RNNCell(base_layer.Layer):
<ide>
<ide> def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
<ide> super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)
<del> # Attribute that indicates whether the cell is a TF RNN cell, due the slight
<del> # difference between TF and Keras RNN cell. Notably the state is not wrapped
<del> # in a list for TF cell where they are single tensor state, whereas keras
<del> # cell will wrap the state into a list, and call() will have to unwrap them.
<add> # Attribute that indicates whether the cell is a TF RNN cell, due the
<add> # slight difference between TF and Keras RNN cell. Notably the state is
<add> # not wrapped in a list for TF cell where they are single tensor state,
<add> # whereas keras cell will wrap the state into a list, and call() will
<add> # have to unwrap them.
<ide> self._is_tf_rnn_cell = True
<ide>
<ide> def __call__(self, inputs, state, scope=None):
<ide> """Run this RNN cell on inputs, starting from the given state.
<ide>
<ide> Args:
<ide> inputs: `2-D` tensor with shape `[batch_size, input_size]`.
<del> state: if `self.state_size` is an integer, this should be a `2-D Tensor`
<del> with shape `[batch_size, self.state_size]`. Otherwise, if
<del> `self.state_size` is a tuple of integers, this should be a tuple with
<del> shapes `[batch_size, s] for s in self.state_size`.
<add> state: if `self.state_size` is an integer, this should be a
<add> `2-D Tensor` with shape `[batch_size, self.state_size]`. Otherwise,
<add> if `self.state_size` is a tuple of integers, this should be a tuple
<add> with shapes `[batch_size, s] for s in self.state_size`.
<ide> scope: VariableScope for the created subgraph; defaults to class name.
<ide>
<ide> Returns:
<ide> A pair containing:
<ide>
<ide> - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
<del> - New state: Either a single `2-D` tensor, or a tuple of tensors matching
<del> the arity and shapes of `state`.
<add> - New state: Either a single `2-D` tensor, or a tuple of tensors
<add> matching the arity and shapes of `state`.
<ide> """
<ide> if scope is not None:
<ide> with tf.compat.v1.variable_scope(
<ide> def _rnn_get_variable(self, getter, *args, **kwargs):
<ide> def state_size(self):
<ide> """size(s) of state(s) used by this cell.
<ide>
<del> It can be represented by an Integer, a TensorShape or a tuple of Integers
<del> or TensorShapes.
<add> It can be represented by an Integer, a TensorShape or a tuple of
<add> Integers or TensorShapes.
<ide> """
<ide> raise NotImplementedError("Abstract method")
<ide>
<ide> def build(self, _):
<ide>
<ide> def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
<ide> if inputs is not None:
<del> # Validate the given batch_size and dtype against inputs if provided.
<add> # Validate the given batch_size and dtype against inputs if
<add> # provided.
<ide> inputs = tf.convert_to_tensor(inputs, name="inputs")
<ide> if batch_size is not None:
<ide> if tf.is_tensor(batch_size):
<ide> def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
<ide> if inputs.shape.dims[0].value != static_batch_size:
<ide> raise ValueError(
<ide> "batch size from input tensor is different from the "
<del> f"input param. Input tensor batch: {inputs.shape.dims[0].value}, "
<add> f"input param. Input tensor batch: "
<add> f"{inputs.shape.dims[0].value}, "
<ide> f"batch_size: {batch_size}"
<ide> )
<ide>
<ide> if dtype is not None and inputs.dtype != dtype:
<ide> raise ValueError(
<ide> "dtype from input tensor is different from the "
<del> f"input param. Input tensor dtype: {inputs.dtype}, dtype: {dtype}"
<add> f"input param. Input tensor dtype: {inputs.dtype}, "
<add> f"dtype: {dtype}"
<ide> )
<ide>
<ide> batch_size = (
<ide> def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
<ide> dtype = inputs.dtype
<ide> if batch_size is None or dtype is None:
<ide> raise ValueError(
<del> "batch_size and dtype cannot be None while constructing initial "
<del> f"state: batch_size={batch_size}, dtype={dtype}"
<add> "batch_size and dtype cannot be None while constructing "
<add> f"initial state: batch_size={batch_size}, dtype={dtype}"
<ide> )
<ide> return self.zero_state(batch_size, dtype)
<ide>
<ide> def zero_state(self, batch_size, dtype):
<ide> a nested list or tuple (of the same structure) of `2-D` tensors with
<ide> the shapes `[batch_size, s]` for each s in `state_size`.
<ide> """
<del> # Try to use the last cached zero_state. This is done to avoid recreating
<del> # zeros, especially when eager execution is enabled.
<add> # Try to use the last cached zero_state. This is done to avoid
<add> # recreating zeros, especially when eager execution is enabled.
<ide> state_size = self.state_size
<ide> is_eager = tf.executing_eagerly()
<ide> if is_eager and _hasattr(self, "_last_zero_state"):
<ide> def get_config(self): # pylint: disable=useless-super-delegation
<ide>
<ide> @property
<ide> def _use_input_spec_as_call_signature(self):
<del> # We do not store the shape information for the state argument in the call
<del> # function for legacy RNN cells, so do not generate an input signature.
<add> # We do not store the shape information for the state argument in the
<add> # call function for legacy RNN cells, so do not generate an input
<add> # signature.
<ide> return False
<ide>
<ide>
<ide> class LayerRNNCell(RNNCell):
<ide> """Subclass of RNNCells that act like proper `tf.Layer` objects.
<ide>
<ide> For backwards compatibility purposes, most `RNNCell` instances allow their
<del> `call` methods to instantiate variables via `tf.compat.v1.get_variable`. The
<del> underlying
<del> variable scope thus keeps track of any variables, and returning cached
<del> versions. This is atypical of `tf.layer` objects, which separate this
<del> part of layer building into a `build` method that is only called once.
<add> `call` methods to instantiate variables via `tf.compat.v1.get_variable`.
<add> The underlying variable scope thus keeps track of any variables, and
<add> returning cached versions. This is atypical of `tf.layer` objects, which
<add> separate this part of layer building into a `build` method that is only
<add> called once.
<ide>
<ide> Here we provide a subclass for `RNNCell` objects that act exactly as
<ide> `Layer` objects do. They must provide a `build` method and their
<ide> def __call__(self, inputs, state, scope=None, *args, **kwargs):
<ide>
<ide> Args:
<ide> inputs: `2-D` tensor with shape `[batch_size, input_size]`.
<del> state: if `self.state_size` is an integer, this should be a `2-D Tensor`
<del> with shape `[batch_size, self.state_size]`. Otherwise, if
<del> `self.state_size` is a tuple of integers, this should be a tuple with
<del> shapes `[batch_size, s] for s in self.state_size`.
<add> state: if `self.state_size` is an integer, this should be a `2-D
<add> Tensor` with shape `[batch_size, self.state_size]`. Otherwise, if
<add> `self.state_size` is a tuple of integers, this should be a tuple
<add> with shapes `[batch_size, s] for s in self.state_size`.
<ide> scope: optional cell scope.
<ide> *args: Additional positional arguments.
<ide> **kwargs: Additional keyword arguments.
<ide> def __call__(self, inputs, state, scope=None, *args, **kwargs):
<ide> A pair containing:
<ide>
<ide> - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
<del> - New state: Either a single `2-D` tensor, or a tuple of tensors matching
<del> the arity and shapes of `state`.
<add> - New state: Either a single `2-D` tensor, or a tuple of tensors
<add> matching the arity and shapes of `state`.
<ide> """
<ide> # Bypass RNNCell's variable capturing semantics for LayerRNNCell.
<ide> # Instead, it is up to subclasses to provide a proper build
<ide> class BasicRNNCell(LayerRNNCell):
<ide> num_units: int, The number of units in the RNN cell.
<ide> activation: Nonlinearity to use. Default: `tanh`. It could also be string
<ide> that is within Keras activation function names.
<del> reuse: (optional) Python boolean describing whether to reuse variables in an
<del> existing scope. If not `True`, and the existing scope already has the
<add> reuse: (optional) Python boolean describing whether to reuse variables in
<add> an existing scope. If not `True`, and the existing scope already has the
<ide> given variables, an error is raised.
<ide> name: String, the name of the layer. Layers with the same name will share
<ide> weights, but to avoid mistakes we require reuse=True in such cases.
<ide> def build(self, inputs_shape):
<ide> self.built = True
<ide>
<ide> def call(self, inputs, state):
<del> """Most basic RNN: output = new_state = act(W * input + U * state + B)."""
<add> """Most basic RNN: output = new_state = act(W * input + U * state +
<add> B)."""
<ide> _check_rnn_cell_input_dtypes([inputs, state])
<ide> gate_inputs = tf.matmul(tf.concat([inputs, state], 1), self._kernel)
<ide> gate_inputs = tf.nn.bias_add(gate_inputs, self._bias)
<ide> class GRUCell(LayerRNNCell):
<ide> Args:
<ide> num_units: int, The number of units in the GRU cell.
<ide> activation: Nonlinearity to use. Default: `tanh`.
<del> reuse: (optional) Python boolean describing whether to reuse variables in an
<del> existing scope. If not `True`, and the existing scope already has the
<del> given variables, an error is raised.
<add> reuse: (optional) Python boolean describing whether to reuse variables in
<add> an existing scope. If not `True`, and the existing scope already has
<add> the given variables, an error is raised.
<ide> kernel_initializer: (optional) The initializer to use for the weight and
<ide> projection matrices.
<ide> bias_initializer: (optional) The initializer to use for the bias.
<ide> class GRUCell(LayerRNNCell):
<ide> the first input). Required when `build` is called before `call`.
<ide> **kwargs: Dict, keyword named properties for common layer attributes, like
<ide> `trainable` etc when constructing the cell from configs of get_config().
<del> References: Learning Phrase Representations using RNN Encoder Decoder for
<del> Statistical
<del> Machine Translation: [Cho et al., 2014]
<add> References: Learning Phrase Representations using RNN Encoder Decoder
<add> for Statistical Machine Translation: [Cho et al., 2014]
<ide> (https://aclanthology.coli.uni-saarland.de/papers/D14-1179/d14-1179)
<ide> ([pdf](http://emnlp2014.org/papers/pdf/EMNLP2014179.pdf))
<ide> """
<ide> def __init__(
<ide>
<ide> Args:
<ide> num_units: int, The number of units in the LSTM cell.
<del> forget_bias: float, The bias added to forget gates (see above). Must set
<del> to `0.0` manually when restoring from CudnnLSTM-trained checkpoints.
<del> state_is_tuple: If True, accepted and returned states are 2-tuples of the
<del> `c_state` and `m_state`. If False, they are concatenated along the
<del> column axis. The latter behavior will soon be deprecated.
<del> activation: Activation function of the inner states. Default: `tanh`. It
<del> could also be string that is within Keras activation function names.
<del> reuse: (optional) Python boolean describing whether to reuse variables in
<del> an existing scope. If not `True`, and the existing scope already has
<del> the given variables, an error is raised.
<del> name: String, the name of the layer. Layers with the same name will share
<del> weights, but to avoid mistakes we require reuse=True in such cases.
<del> dtype: Default dtype of the layer (default of `None` means use the type of
<del> the first input). Required when `build` is called before `call`.
<del> **kwargs: Dict, keyword named properties for common layer attributes, like
<del> `trainable` etc when constructing the cell from configs of get_config().
<del> When restoring from CudnnLSTM-trained checkpoints, must use
<del> `CudnnCompatibleLSTMCell` instead.
<add> forget_bias: float, The bias added to forget gates (see above). Must
<add> set to `0.0` manually when restoring from CudnnLSTM-trained
<add> checkpoints.
<add> state_is_tuple: If True, accepted and returned states are 2-tuples of
<add> the `c_state` and `m_state`. If False, they are concatenated along
<add> the column axis. The latter behavior will soon be deprecated.
<add> activation: Activation function of the inner states. Default: `tanh`.
<add> It could also be string that is within Keras activation function
<add> names.
<add> reuse: (optional) Python boolean describing whether to reuse variables
<add> in an existing scope. If not `True`, and the existing scope already
<add> has the given variables, an error is raised.
<add> name: String, the name of the layer. Layers with the same name will
<add> share weights, but to avoid mistakes we require reuse=True in such
<add> cases.
<add> dtype: Default dtype of the layer (default of `None` means use the
<add> type of the first input). Required when `build` is called before
<add> `call`.
<add> **kwargs: Dict, keyword named properties for common layer attributes,
<add> like `trainable` etc when constructing the cell from configs of
<add> get_config(). When restoring from CudnnLSTM-trained checkpoints,
<add> must use `CudnnCompatibleLSTMCell` instead.
<ide> """
<ide> warnings.warn(
<ide> "`tf.nn.rnn_cell.BasicLSTMCell` is deprecated and will be "
<ide> def call(self, inputs, state):
<ide> Args:
<ide> inputs: `2-D` tensor with shape `[batch_size, input_size]`.
<ide> state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size,
<del> num_units]`, if `state_is_tuple` has been set to `True`. Otherwise, a
<del> `Tensor` shaped `[batch_size, 2 * num_units]`.
<add> num_units]`, if `state_is_tuple` has been set to `True`. Otherwise,
<add> a `Tensor` shaped `[batch_size, 2 * num_units]`.
<ide>
<ide> Returns:
<ide> A pair containing the new hidden state, and the new state (either a
<ide> def __init__(
<ide> Args:
<ide> num_units: int, The number of units in the LSTM cell.
<ide> use_peepholes: bool, set True to enable diagonal/peephole connections.
<del> cell_clip: (optional) A float value, if provided the cell state is clipped
<del> by this value prior to the cell output activation.
<add> cell_clip: (optional) A float value, if provided the cell state is
<add> clipped by this value prior to the cell output activation.
<ide> initializer: (optional) The initializer to use for the weight and
<ide> projection matrices.
<ide> num_proj: (optional) int, The output dimensionality for the projection
<ide> matrices. If None, no projection is performed.
<del> proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
<del> provided, then the projected values are clipped elementwise to within
<del> `[-proj_clip, proj_clip]`.
<add> proj_clip: (optional) A float value. If `num_proj > 0` and
<add> `proj_clip` is provided, then the projected values are clipped
<add> elementwise to within `[-proj_clip, proj_clip]`.
<ide> num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
<ide> variable_scope partitioner instead.
<ide> num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
<ide> variable_scope partitioner instead.
<del> forget_bias: Biases of the forget gate are initialized by default to 1 in
<del> order to reduce the scale of forgetting at the beginning of the
<del> training. Must set it manually to `0.0` when restoring from CudnnLSTM
<del> trained checkpoints.
<del> state_is_tuple: If True, accepted and returned states are 2-tuples of the
<del> `c_state` and `m_state`. If False, they are concatenated along the
<del> column axis. This latter behavior will soon be deprecated.
<del> activation: Activation function of the inner states. Default: `tanh`. It
<del> could also be string that is within Keras activation function names.
<del> reuse: (optional) Python boolean describing whether to reuse variables in
<del> an existing scope. If not `True`, and the existing scope already has
<del> the given variables, an error is raised.
<del> name: String, the name of the layer. Layers with the same name will share
<del> weights, but to avoid mistakes we require reuse=True in such cases.
<del> dtype: Default dtype of the layer (default of `None` means use the type of
<del> the first input). Required when `build` is called before `call`.
<del> **kwargs: Dict, keyword named properties for common layer attributes, like
<del> `trainable` etc when constructing the cell from configs of get_config().
<del> When restoring from CudnnLSTM-trained checkpoints, use
<del> `CudnnCompatibleLSTMCell` instead.
<add> forget_bias: Biases of the forget gate are initialized by default to 1
<add> in order to reduce the scale of forgetting at the beginning of the
<add> training. Must set it manually to `0.0` when restoring from
<add> CudnnLSTM trained checkpoints.
<add> state_is_tuple: If True, accepted and returned states are 2-tuples of
<add> the `c_state` and `m_state`. If False, they are concatenated along
<add> the column axis. This latter behavior will soon be deprecated.
<add> activation: Activation function of the inner states. Default: `tanh`.
<add> It could also be string that is within Keras activation function
<add> names.
<add> reuse: (optional) Python boolean describing whether to reuse variables
<add> in an existing scope. If not `True`, and the existing scope already
<add> has the given variables, an error is raised.
<add> name: String, the name of the layer. Layers with the same name will
<add> share weights, but to avoid mistakes we require reuse=True in such
<add> cases.
<add> dtype: Default dtype of the layer (default of `None` means use the
<add> type of the first input). Required when `build` is called before
<add> `call`.
<add> **kwargs: Dict, keyword named properties for common layer attributes,
<add> like `trainable` etc when constructing the cell from configs of
<add> get_config(). When restoring from CudnnLSTM-trained checkpoints,
<add> use `CudnnCompatibleLSTMCell` instead.
<ide> """
<ide> warnings.warn(
<ide> "`tf.nn.rnn_cell.LSTMCell` is deprecated and will be "
<ide> def call(self, inputs, state):
<ide>
<ide> Args:
<ide> inputs: input Tensor, must be 2-D, `[batch, input_size]`.
<del> state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
<del> [batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
<del> of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
<add> state: if `state_is_tuple` is False, this must be a state Tensor,
<add> `2-D, [batch, state_size]`. If `state_is_tuple` is True, this must
<add> be a tuple of state Tensors, both `2-D`, with column sizes `c_state`
<add> and `m_state`.
<ide>
<ide> Returns:
<ide> A tuple containing:
<ide> def call(self, inputs, state):
<ide> Here output_dim is:
<ide> num_proj if num_proj was set,
<ide> num_units otherwise.
<del> - Tensor(s) representing the new state of LSTM after reading `inputs` when
<del> the previous state was `state`. Same type and shape(s) as `state`.
<add> - Tensor(s) representing the new state of LSTM after reading `inputs`
<add> when the previous state was `state`. Same type and shape(s) as
<add> `state`.
<ide>
<ide> Raises:
<ide> ValueError: If input size cannot be inferred from inputs via
<ide> def __init__(self, cells, state_is_tuple=True):
<ide>
<ide> Args:
<ide> cells: list of RNNCells that will be composed in this order.
<del> state_is_tuple: If True, accepted and returned states are n-tuples, where
<del> `n = len(cells)`. If False, the states are all concatenated along the
<del> column axis. This latter behavior will soon be deprecated.
<add> state_is_tuple: If True, accepted and returned states are n-tuples,
<add> where `n = len(cells)`. If False, the states are all concatenated
<add> along the column axis. This latter behavior will soon be
<add> deprecated.
<ide>
<ide> Raises:
<del> ValueError: if cells is empty (not allowed), or at least one of the cells
<del> returns a state tuple but the flag `state_is_tuple` is `False`.
<add> ValueError: if cells is empty (not allowed), or at least one of the
<add> cells returns a state tuple but the flag `state_is_tuple` is
<add> `False`.
<ide> """
<ide> logging.warning(
<ide> "`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class "
<ide> def zero_state(self, batch_size, dtype):
<ide> )
<ide> else:
<ide> # We know here that state_size of each cell is not a tuple and
<del> # presumably does not contain TensorArrays or anything else fancy
<add> # presumably does not contain TensorArrays or anything else
<add> # fancy
<ide> return super().zero_state(batch_size, dtype)
<ide>
<ide> @property
<ide> def call(self, inputs, state):
<ide> if self._state_is_tuple:
<ide> if not tf.nest.is_nested(state):
<ide> raise ValueError(
<del> f"Expected state to be a tuple of length {len(self.state_size)}"
<add> f"Expected state to be a tuple of length "
<add> f"{len(self.state_size)}"
<ide> f", but received: {state}"
<ide> )
<ide> cur_state = state[i]
<ide><path>keras/layers/rnn/lstm.py
<ide> class LSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
<ide> (`tanh`). If you pass `None`, no activation is applied (ie. "linear"
<ide> activation: `a(x) = x`).
<ide> recurrent_activation: Activation function to use for the recurrent step.
<del> Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied
<del> (ie. "linear" activation: `a(x) = x`).
<add> Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
<add> applied (ie. "linear" activation: `a(x) = x`).
<ide> use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
<ide> kernel_initializer: Initializer for the `kernel` weights matrix, used for
<ide> the linear transformation of the inputs. Default: `glorot_uniform`.
<ide> class LSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
<ide> matrix. Default: `None`.
<ide> recurrent_regularizer: Regularizer function applied to
<ide> the `recurrent_kernel` weights matrix. Default: `None`.
<del> bias_regularizer: Regularizer function applied to the bias vector. Default:
<del> `None`.
<add> bias_regularizer: Regularizer function applied to the bias vector.
<add> Default: `None`.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix. Default: `None`.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix. Default: `None`.
<ide> bias_constraint: Constraint function applied to the bias vector. Default:
<ide> `None`.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs. Default: 0.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state. Default: 0.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs. Default: 0.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state. Default: 0.
<ide>
<ide> Call arguments:
<ide> inputs: A 2D tensor, with shape of `[batch, feature]`.
<ide> class LSTM(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer):
<ide> matrix. Default: `None`.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> `recurrent_kernel` weights matrix. Default: `None`.
<del> bias_regularizer: Regularizer function applied to the bias vector. Default:
<del> `None`.
<add> bias_regularizer: Regularizer function applied to the bias vector.
<add> Default: `None`.
<ide> activity_regularizer: Regularizer function applied to the output of the
<ide> layer (its "activation"). Default: `None`.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix. Default: `None`.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix. Default: `None`.
<ide> bias_constraint: Constraint function applied to the bias vector. Default:
<ide> `None`.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs. Default: 0.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state. Default: 0.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs. Default: 0.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state. Default: 0.
<ide> return_sequences: Boolean. Whether to return the last output in the output
<ide> sequence, or the full sequence. Default: `False`.
<ide> return_state: Boolean. Whether to return the last state in addition to the
<ide> output. Default: `False`.
<del> go_backwards: Boolean (default `False`). If True, process the input sequence
<del> backwards and return the reversed sequence.
<del> stateful: Boolean (default `False`). If True, the last state for each sample
<del> at index i in a batch will be used as initial state for the sample of
<del> index i in the following batch.
<add> go_backwards: Boolean (default `False`). If True, process the input
<add> sequence backwards and return the reversed sequence.
<add> stateful: Boolean (default `False`). If True, the last state for each
<add> sample at index i in a batch will be used as initial state for the sample
<add> of index i in the following batch.
<ide> time_major: The shape format of the `inputs` and `outputs` tensors.
<ide> If True, the inputs and outputs will be in shape
<ide> `[timesteps, batch, feature]`, whereas in the False case, it will be
<ide> class LSTM(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer):
<ide> default this function accepts input and emits output in batch-major
<ide> form.
<ide> unroll: Boolean (default `False`). If True, the network will be unrolled,
<del> else a symbolic loop will be used. Unrolling can speed-up a RNN, although
<del> it tends to be more memory-intensive. Unrolling is only suitable for short
<del> sequences.
<add> else a symbolic loop will be used. Unrolling can speed-up a RNN,
<add> although it tends to be more memory-intensive. Unrolling is only
<add> suitable for short sequences.
<ide>
<ide> Call arguments:
<ide> inputs: A 3D tensor with shape `[batch, timesteps, feature]`.
<ide> mask: Binary tensor of shape `[batch, timesteps]` indicating whether
<ide> a given timestep should be masked (optional, defaults to `None`).
<ide> An individual `True` entry indicates that the corresponding timestep
<del> should be utilized, while a `False` entry indicates that the corresponding
<del> timestep should be ignored.
<add> should be utilized, while a `False` entry indicates that the
<add> corresponding timestep should be ignored.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. This argument is passed to the cell
<ide> when calling it. This is only relevant if `dropout` or
<ide> def __init__(
<ide> and tf.compat.v1.executing_eagerly_outside_functions()
<ide> )
<ide> if tf.config.list_logical_devices("GPU"):
<del> # Only show the message when there is GPU available, user will not care
<del> # about the cuDNN if there isn't any GPU.
<add> # Only show the message when there is GPU available, user will not
<add> # care about the cuDNN if there isn't any GPU.
<ide> if self._could_use_gpu_kernel:
<ide> logging.debug(gru_lstm_utils.CUDNN_AVAILABLE_MSG % self.name)
<ide> else:
<ide> def step(inputs, states):
<ide> else:
<ide> # Use the new defun approach for backend implementation swap.
<ide> # Note that different implementations need to have same function
<del> # signature, eg, the tensor parameters need to have same shape and dtypes.
<del> # Since the cuDNN has an extra set of bias, those bias will be passed to
<del> # both normal and cuDNN implementations.
<add> # signature, eg, the tensor parameters need to have same shape and
<add> # dtypes. Since the cuDNN has an extra set of bias, those bias will
<add> # be passed to both normal and cuDNN implementations.
<ide> self.reset_dropout_mask()
<ide> dropout_mask = self.get_dropout_mask_for_cell(
<ide> inputs, training, count=4
<ide> def step(inputs, states):
<ide> if tf.executing_eagerly():
<ide> device_type = gru_lstm_utils.get_context_device_type()
<ide> can_use_gpu = (
<del> # Either user specified GPU or unspecified but GPU is available.
<add> # Either user specified GPU or unspecified but GPU is
<add> # available.
<ide> (
<ide> device_type == gru_lstm_utils.GPU_DEVICE_NAME
<ide> or (
<ide> def step(inputs, states):
<ide> )
<ide> )
<ide> )
<del> # Under eager context, check the device placement and prefer the
<del> # GPU implementation when GPU is available.
<add> # Under eager context, check the device placement and prefer
<add> # the GPU implementation when GPU is available.
<ide> if can_use_gpu:
<ide> last_output, outputs, new_h, new_c, runtime = gpu_lstm(
<ide> **gpu_lstm_kwargs
<ide> def standard_lstm(
<ide> removed since cuDNN implementation does not support that.
<ide>
<ide> Note that the first half of the bias tensor should be ignored by this impl.
<del> The cuDNN impl need an extra set of input gate bias. In order to make the both
<del> function take same shape of parameter, that extra set of bias is also feed
<add> The cuDNN impl need an extra set of input gate bias. In order to make the
<add> both function take same shape of parameter, that extra set of bias is also
<add> feed
<ide> here.
<ide>
<ide> Args:
<ide> def standard_lstm(
<ide> is used in this case.
<ide> mask: Boolean tensor for mask out the steps within sequence.
<ide> An individual `True` entry indicates that the corresponding timestep
<del> should be utilized, while a `False` entry indicates that the corresponding
<del> timestep should be ignored.
<add> should be utilized, while a `False` entry indicates that the
<add> corresponding timestep should be ignored.
<ide> time_major: boolean, whether the inputs are in the format of
<ide> [time, batch, feature] or [batch, time, feature].
<ide> go_backwards: Boolean (default False). If True, process the input sequence
<ide> backwards and return the reversed sequence.
<del> sequence_lengths: The lengths of all sequences coming from a variable length
<del> input, such as ragged tensors. If the input has a fixed timestep size,
<del> this should be None.
<add> sequence_lengths: The lengths of all sequences coming from a variable
<add> length input, such as ragged tensors. If the input has a fixed timestep
<add> size, this should be None.
<ide> zero_output_for_mask: Boolean, whether to output zero for masked timestep.
<ide> return_sequences: Boolean. If True, return the recurrent outputs for all
<ide> timesteps in the sequence. If False, only return the output for the
<ide> def gpu_lstm(
<ide> sequence_lengths,
<ide> return_sequences,
<ide> ):
<del> """LSTM with either cuDNN or ROCm implementation which is only available for GPU.
<add> """LSTM with either cuDNN or ROCm implementation which is only available for
<add> GPU.
<ide>
<del> Note that currently only right padded data is supported, or the result will be
<del> polluted by the unmasked data which should be filtered.
<add> Note that currently only right padded data is supported, or the result will
<add> be polluted by the unmasked data which should be filtered.
<ide>
<ide> Args:
<ide> inputs: Input tensor of LSTM layer.
<ide> def gpu_lstm(
<ide> bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
<ide> is used in this case.
<ide> mask: Boolean tensor for mask out the steps within sequence. An individual
<del> `True` entry indicates that the corresponding timestep should be utilized,
<del> while a `False` entry indicates that the corresponding timestep should be
<del> ignored.
<add> `True` entry indicates that the corresponding timestep should be
<add> utilized, while a `False` entry indicates that the corresponding
<add> timestep should be ignored.
<ide> time_major: Boolean, whether the inputs are in the format of [time, batch,
<ide> feature] or [batch, time, feature].
<ide> go_backwards: Boolean (default False). If True, process the input sequence
<ide> backwards and return the reversed sequence.
<del> sequence_lengths: The lengths of all sequences coming from a variable length
<del> input, such as ragged tensors. If the input has a fixed timestep size,
<del> this should be None.
<add> sequence_lengths: The lengths of all sequences coming from a variable
<add> length input, such as ragged tensors. If the input has a fixed timestep
<add> size, this should be None.
<ide> return_sequences: Boolean. If True, return the recurrent outputs for all
<ide> timesteps in the sequence. If False, only return the output for the
<ide> last timestep, matching the CPU function output format.
<ide> def gpu_lstm(
<ide> full_bias = tf.concat((tf.zeros_like(bias), bias), 0)
<ide>
<ide> if tf.sysconfig.get_build_info()["is_rocm_build"]:
<del> # ROCm MIOpen's weight sequence for LSTM is different from both canonical
<del> # and Cudnn format
<add> # ROCm MIOpen's weight sequence for LSTM is different from both
<add> # canonical and Cudnn format
<ide> # MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o]
<ide> # i is input gate weights.
<ide> # f is forget gate weights.
<ide> def gpu_lstm(
<ide> c = tf.squeeze(c, axis=seq_axis)
<ide>
<ide> # In the case of variable length input, the cudnn kernel will fill zeros for
<del> # the output, whereas the default keras behavior is to bring over the previous
<del> # output for t-1, so that in the return_sequence=False case, user can quickly
<del> # get the final effect output instead just 0s at the last timestep.
<del> # In order to mimic the default keras behavior, we copy the final h state as
<del> # the last_output, since it is numerically same as the output.
<add> # the output, whereas the default keras behavior is to bring over the
<add> # previous output for t-1, so that in the return_sequence=False case, user
<add> # can quickly get the final effect output instead just 0s at the last
<add> # timestep. In order to mimic the default keras behavior, we copy the final
<add> # h state as the last_output, since it is numerically same as the output.
<ide> if sequence_lengths is not None:
<ide> last_output = h
<ide>
<ide> def lstm_with_backend_selection(
<ide> is used in this case.
<ide> mask: Boolean tensor for mask out the steps within sequence.
<ide> An individual `True` entry indicates that the corresponding timestep
<del> should be utilized, while a `False` entry indicates that the corresponding
<del> timestep should be ignored.
<add> should be utilized, while a `False` entry indicates that the
<add> corresponding timestep should be ignored.
<ide> time_major: Boolean, whether the inputs are in the format of
<ide> [time, batch, feature] or [batch, time, feature].
<ide> go_backwards: Boolean (default False). If True, process the input sequence
<ide> backwards and return the reversed sequence.
<del> sequence_lengths: The lengths of all sequences coming from a variable length
<del> input, such as ragged tensors. If the input has a fixed timestep size,
<del> this should be None.
<add> sequence_lengths: The lengths of all sequences coming from a variable
<add> length input, such as ragged tensors. If the input has a fixed timestep
<add> size, this should be None.
<ide> zero_output_for_mask: Boolean, whether to output zero for masked timestep.
<ide> return_sequences: Boolean. If True, return the recurrent outputs for all
<ide> timesteps in the sequence. If False, only return the output for the
<ide><path>keras/layers/rnn/lstm_test.py
<ide> def test_specify_state_with_masking(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_return_state(self):
<ide> num_states = 2
<ide> def test_initial_states_as_other_inputs(self):
<ide> @parameterized.named_parameters(("v0", 0), ("v1", 1), ("v2", 2))
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_implementation_mode_LSTM(self, implementation_mode):
<ide> num_samples = 2
<ide> def test_implementation_mode_LSTM(self, implementation_mode):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_masking_with_stacking_LSTM(self):
<ide> inputs = np.random.random((2, 3, 4))
<ide> def test_regularizers_LSTM(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> def test_statefulness_LSTM(self):
<ide> num_samples = 2
<ide> def test_bidirectional(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_explicit_device_with_go_backward_and_mask(self):
<ide> def test_LSTM_runtime(self):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_LSTM_runtime_with_mask(self):
<del> # Masking will affect which backend is selected based on whether the mask
<del> # is strictly right padded.
<add> # Masking will affect which backend is selected based on whether the
<add> # mask is strictly right padded.
<ide> layer = keras.layers.LSTM(self.rnn_state_size, return_runtime=True)
<ide>
<ide> inputs = keras.layers.Input(
<ide> def test_dropout_LSTM(self):
<ide>
<ide> def test_recurrent_dropout_with_implementation_restriction(self):
<ide> layer = keras.layers.LSTM(2, recurrent_dropout=0.1, implementation=2)
<del> # The implementation is force to 1 due to the limit of recurrent_dropout.
<add> # The implementation is force to 1 due to the limit of
<add> # recurrent_dropout.
<ide> self.assertEqual(layer.implementation, 1)
<ide>
<ide> @parameterized.parameters([0, 1, 2])
<ide><path>keras/layers/rnn/lstm_v1_test.py
<ide> class LSTMGraphRewriteTest(test_combinations.TestCase):
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_lstm_feature_parity_v1_v2(self):
<ide> def build_model(layer_cls):
<ide>
<ide> @tf.test.disable_with_predicate(
<ide> pred=tf.test.is_built_with_rocm,
<del> skip_message="Skipping as ROCm MIOpen does not support padded input yet.",
<add> skip_message="Skipping as ROCm MIOpen does not support padded "
<add> "input yet.",
<ide> )
<ide> @test_utils.run_v2_only
<ide> def test_explicit_device_with_go_backward_and_mask_v1(self):
<ide><path>keras/layers/rnn/rnn_utils.py
<ide> def standardize_args(inputs, initial_state, constants, num_constants):
<ide> """
<ide> if isinstance(inputs, list):
<ide> # There are several situations here:
<del> # In the graph mode, __call__ will be only called once. The initial_state
<del> # and constants could be in inputs (from file loading).
<add> # In the graph mode, __call__ will be only called once. The
<add> # initial_state and constants could be in inputs (from file loading).
<ide> # In the eager mode, __call__ will be called twice, once during
<ide> # rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
<del> # model.fit/train_on_batch/predict with real np data. In the second case,
<del> # the inputs will contain initial_state and constants as eager tensor.
<add> # model.fit/train_on_batch/predict with real np data. In the second
<add> # case, the inputs will contain initial_state and constants as eager
<add> # tensor.
<ide> #
<ide> # For either case, the real input is the first item in the list, which
<del> # could be a nested structure itself. Then followed by initial_states, which
<del> # could be a list of items, or list of list if the initial_state is complex
<del> # structure, and finally followed by constants which is a flat list.
<add> # could be a nested structure itself. Then followed by initial_states,
<add> # which could be a list of items, or list of list if the initial_state
<add> # is complex structure, and finally followed by constants which is a
<add> # flat list.
<ide> assert initial_state is None and constants is None
<ide> if num_constants:
<ide> constants = inputs[-num_constants:]
<ide> def generate_zero_filled_state(batch_size_tensor, state_size, dtype):
<ide> """Generate a zero filled tensor with shape [batch_size, state_size]."""
<ide> if batch_size_tensor is None or dtype is None:
<ide> raise ValueError(
<del> "batch_size and dtype cannot be None while constructing initial state. "
<del> f"Received: batch_size={batch_size_tensor}, dtype={dtype}"
<add> "batch_size and dtype cannot be None while constructing initial "
<add> f"state. Received: batch_size={batch_size_tensor}, dtype={dtype}"
<ide> )
<ide>
<ide> def create_zeros(unnested_state_size):
<ide> def create_zeros(unnested_state_size):
<ide> def caching_device(rnn_cell):
<ide> """Returns the caching device for the RNN variable.
<ide>
<del> This is useful for distributed training, when variable is not located as same
<del> device as the training worker. By enabling the device cache, this allows
<del> worker to read the variable once and cache locally, rather than read it every
<del> time step from remote when it is needed.
<add> This is useful for distributed training, when variable is not located as
<add> same device as the training worker. By enabling the device cache, this
<add> allows worker to read the variable once and cache locally, rather than read
<add> it every time step from remote when it is needed.
<ide>
<del> Note that this is assuming the variable that cell needs for each time step is
<del> having the same value in the forward path, and only gets updated in the
<del> backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the
<del> cell body relies on any variable that gets updated every time step, then
<add> Note that this is assuming the variable that cell needs for each time step
<add> is having the same value in the forward path, and only gets updated in the
<add> backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If
<add> the cell body relies on any variable that gets updated every time step, then
<ide> caching device will cause it to read the stall value.
<ide>
<ide> Args:
<ide> def caching_device(rnn_cell):
<ide> return None
<ide> if not getattr(rnn_cell, "_enable_caching_device", False):
<ide> return None
<del> # Don't set a caching device when running in a loop, since it is possible that
<del> # train steps could be wrapped in a tf.while_loop. In that scenario caching
<del> # prevents forward computations in loop iterations from re-reading the
<del> # updated weights.
<add> # Don't set a caching device when running in a loop, since it is possible
<add> # that train steps could be wrapped in a tf.while_loop. In that scenario
<add> # caching prevents forward computations in loop iterations from re-reading
<add> # the updated weights.
<ide> if control_flow_util.IsInWhileLoop(tf.compat.v1.get_default_graph()):
<ide> logging.warning(
<ide> "Variable read device caching has been disabled because the "
<ide> def config_for_enable_caching_device(rnn_cell):
<ide>
<ide> Returns:
<ide> A dict which contains the JSON config for enable_caching_device value or
<del> empty dict if the enable_caching_device value is same as the default value.
<add> empty dict if the enable_caching_device value is same as the default
<add> value.
<ide> """
<ide> default_enable_caching_device = (
<ide> tf.compat.v1.executing_eagerly_outside_functions()
<ide><path>keras/layers/rnn/simple_rnn.py
<ide> class SimpleRNNCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
<ide> used for the linear transformation of the inputs. Default:
<ide> `glorot_uniform`.
<ide> recurrent_initializer: Initializer for the `recurrent_kernel`
<del> weights matrix, used for the linear transformation of the recurrent state.
<del> Default: `orthogonal`.
<add> weights matrix, used for the linear transformation of the recurrent
<add> state. Default: `orthogonal`.
<ide> bias_initializer: Initializer for the bias vector. Default: `zeros`.
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> `recurrent_kernel` weights matrix. Default: `None`.
<del> bias_regularizer: Regularizer function applied to the bias vector. Default:
<del> `None`.
<add> bias_regularizer: Regularizer function applied to the bias vector.
<add> Default: `None`.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix. Default: `None`.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix. Default: `None`.
<ide> bias_constraint: Constraint function applied to the bias vector. Default:
<ide> `None`.
<del> dropout: Float between 0 and 1. Fraction of the units to drop for the linear
<del> transformation of the inputs. Default: 0.
<del> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
<del> the linear transformation of the recurrent state. Default: 0.
<add> dropout: Float between 0 and 1. Fraction of the units to drop for the
<add> linear transformation of the inputs. Default: 0.
<add> recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
<add> for the linear transformation of the recurrent state. Default: 0.
<ide>
<ide> Call arguments:
<ide> inputs: A 2D tensor, with shape of `[batch, feature]`.
<del> states: A 2D tensor with shape of `[batch, units]`, which is the state from
<del> the previous time step. For timestep 0, the initial state provided by user
<del> will be feed to cell.
<add> states: A 2D tensor with shape of `[batch, units]`, which is the state
<add> from the previous time step. For timestep 0, the initial state provided
<add> by user will be feed to cell.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. Only relevant when `dropout` or
<ide> `recurrent_dropout` is used.
<ide> class SimpleRNN(RNN):
<ide> used for the linear transformation of the inputs. Default:
<ide> `glorot_uniform`.
<ide> recurrent_initializer: Initializer for the `recurrent_kernel`
<del> weights matrix, used for the linear transformation of the recurrent state.
<del> Default: `orthogonal`.
<add> weights matrix, used for the linear transformation of the recurrent
<add> state. Default: `orthogonal`.
<ide> bias_initializer: Initializer for the bias vector. Default: `zeros`.
<ide> kernel_regularizer: Regularizer function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<ide> recurrent_regularizer: Regularizer function applied to the
<ide> `recurrent_kernel` weights matrix. Default: `None`.
<del> bias_regularizer: Regularizer function applied to the bias vector. Default:
<del> `None`.
<add> bias_regularizer: Regularizer function applied to the bias vector.
<add> Default: `None`.
<ide> activity_regularizer: Regularizer function applied to the output of the
<ide> layer (its "activation"). Default: `None`.
<ide> kernel_constraint: Constraint function applied to the `kernel` weights
<ide> matrix. Default: `None`.
<del> recurrent_constraint: Constraint function applied to the `recurrent_kernel`
<del> weights matrix. Default: `None`.
<add> recurrent_constraint: Constraint function applied to the
<add> `recurrent_kernel` weights matrix. Default: `None`.
<ide> bias_constraint: Constraint function applied to the bias vector. Default:
<ide> `None`.
<ide> dropout: Float between 0 and 1.
<del> Fraction of the units to drop for the linear transformation of the inputs.
<del> Default: 0.
<add> Fraction of the units to drop for the linear transformation of the
<add> inputs. Default: 0.
<ide> recurrent_dropout: Float between 0 and 1.
<ide> Fraction of the units to drop for the linear transformation of the
<ide> recurrent state. Default: 0.
<ide> class SimpleRNN(RNN):
<ide> inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
<ide> mask: Binary tensor of shape `[batch, timesteps]` indicating whether
<ide> a given timestep should be masked. An individual `True` entry indicates
<del> that the corresponding timestep should be utilized, while a `False` entry
<del> indicates that the corresponding timestep should be ignored.
<add> that the corresponding timestep should be utilized, while a `False`
<add> entry indicates that the corresponding timestep should be ignored.
<ide> training: Python boolean indicating whether the layer should behave in
<ide> training mode or in inference mode. This argument is passed to the cell
<ide> when calling it. This is only relevant if `dropout` or
<ide><path>keras/layers/rnn/stacked_rnn_cells.py
<ide> def __init__(self, cells, **kwargs):
<ide> f"Received cell without a `state_size`: {cell}"
<ide> )
<ide> self.cells = cells
<del> # reverse_state_order determines whether the state size will be in a reverse
<del> # order of the cells' state. User might want to set this to True to keep the
<del> # existing behavior. This is only useful when use RNN(return_state=True)
<del> # since the state will be returned as the same order of state_size.
<add> # reverse_state_order determines whether the state size will be in a
<add> # reverse order of the cells' state. User might want to set this to True
<add> # to keep the existing behavior. This is only useful when use
<add> # RNN(return_state=True) since the state will be returned as the same
<add> # order of state_size.
<ide> self.reverse_state_order = kwargs.pop("reverse_state_order", False)
<ide> if self.reverse_state_order:
<ide> logging.warning(
<ide> def call(self, inputs, states, constants=None, training=None, **kwargs):
<ide> new_nested_states = []
<ide> for cell, states in zip(self.cells, nested_states):
<ide> states = states if tf.nest.is_nested(states) else [states]
<del> # TF cell does not wrap the state into list when there is only one state.
<add> # TF cell does not wrap the state into list when there is only one
<add> # state.
<ide> is_tf_rnn_cell = getattr(cell, "_is_tf_rnn_cell", None) is not None
<ide> states = (
<ide> states[0] if len(states) == 1 and is_tf_rnn_cell else states
<ide> def call(self, inputs, states, constants=None, training=None, **kwargs):
<ide> kwargs["training"] = training
<ide> else:
<ide> kwargs.pop("training", None)
<del> # Use the __call__ function for callable objects, eg layers, so that it
<del> # will have the proper name scopes for the ops, etc.
<add> # Use the __call__ function for callable objects, eg layers, so that
<add> # it will have the proper name scopes for the ops, etc.
<ide> cell_call_fn = cell.__call__ if callable(cell) else cell.call
<ide> if generic_utils.has_arg(cell.call, "constants"):
<ide> inputs, states = cell_call_fn(
<ide><path>keras/layers/rnn/time_distributed.py
<ide> class TimeDistributed(Wrapper):
<ide> Every input should be at least 3D, and the dimension of index one of the
<ide> first input will be considered to be the temporal dimension.
<ide>
<del> Consider a batch of 32 video samples, where each sample is a 128x128 RGB image
<del> with `channels_last` data format, across 10 timesteps.
<add> Consider a batch of 32 video samples, where each sample is a 128x128 RGB
<add> image with `channels_last` data format, across 10 timesteps.
<ide> The batch input shape is `(32, 10, 128, 128, 3)`.
<ide>
<ide> You can then use `TimeDistributed` to apply the same `Conv2D` layer to each
<ide> class TimeDistributed(Wrapper):
<ide> >>> outputs.shape
<ide> TensorShape([None, 10, 126, 126, 64])
<ide>
<del> Because `TimeDistributed` applies the same instance of `Conv2D` to each of the
<del> timestamps, the same set of weights are used at each timestamp.
<add> Because `TimeDistributed` applies the same instance of `Conv2D` to each of
<add> the timestamps, the same set of weights are used at each timestamp.
<ide>
<ide> Args:
<ide> layer: a `tf.keras.layers.Layer` instance.
<ide> def __init__(self, layer, **kwargs):
<ide> def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):
<ide> """Finds non-specific dimensions in the static shapes.
<ide>
<del> The static shapes are replaced with the corresponding dynamic shapes of the
<del> tensor.
<add> The static shapes are replaced with the corresponding dynamic shapes of
<add> the tensor.
<ide> Args:
<ide> init_tuple: a tuple, the first part of the output shape
<ide> tensor: the tensor from which to get the (static and dynamic) shapes
<ide> def step(x, _):
<ide> y, tf.reshape, y, output_shape
<ide> )
<ide> if not tf.executing_eagerly():
<del> # Set the static shape for the result since it might be lost during
<del> # array_ops reshape, eg, some `None` dim in the result could be
<del> # inferred.
<add> # Set the static shape for the result since it might be lost
<add> # during array_ops reshape, eg, some `None` dim in the
<add> # result could be inferred.
<ide> tf.__internal__.nest.map_structure_up_to(
<ide> y,
<ide> lambda tensor, shape: tensor.set_shape(shape),
<ide> def compute_mask(self, inputs, mask=None):
<ide>
<ide> Args:
<ide> inputs: Tensor with shape [batch size, timesteps, ...] indicating the
<del> input to TimeDistributed. If static shape information is available for
<del> "batch size", `mask` is returned unmodified.
<add> input to TimeDistributed. If static shape information is available
<add> for "batch size", `mask` is returned unmodified.
<ide> mask: Either None (indicating no masking) or a Tensor indicating the
<ide> input mask for TimeDistributed. The shape can be static or dynamic.
<ide>
<ide> Returns:
<del> Either None (no masking), or a [batch size, timesteps, ...] Tensor with
<del> an output mask for the TimeDistributed layer with the shape beyond the
<del> second dimension being the value of the input mask shape(if the computed
<del> output mask is none), an output mask with the shape beyond the first
<del> dimension being the value of the mask shape(if mask is not None) or
<del> output mask with the shape beyond the first dimension being the
<del> value of the computed output shape.
<add> Either None (no masking), or a [batch size, timesteps, ...] Tensor
<add> with an output mask for the TimeDistributed layer with the shape
<add> beyond the second dimension being the value of the input mask shape(if
<add> the computed output mask is none), an output mask with the shape
<add> beyond the first dimension being the value of the mask shape(if mask
<add> is not None) or output mask with the shape beyond the first dimension
<add> being the value of the computed output shape.
<ide>
<ide> """
<ide> # cases need to call the layer.compute_mask when input_mask is None:
<ide> def compute_mask(self, inputs, mask=None):
<ide> tf.nest.flatten(is_ragged_input)
<ide> )
<ide> if batch_size and not self._always_use_reshape or any(is_ragged_input):
<del> # batch size matters, we currently do not handle mask explicitly, or if
<del> # the layer always uses reshape approach, or the input is a ragged tensor.
<add> # batch size matters, we currently do not handle mask explicitly, or
<add> # if the layer always uses reshape approach, or the input is a
<add> # ragged tensor.
<ide> return mask
<ide> inner_mask = mask
<ide> if inner_mask is not None:
<ide><path>keras/layers/rnn/time_distributed_test.py
<ide> def call(self, inputs):
<ide> td1 = keras.layers.TimeDistributed(keras.layers.Dense(5))
<ide> self.assertTrue(td1._always_use_reshape)
<ide>
<del> # Built-in layers that are stateful don't use the reshape implementation.
<add> # Built-in layers that are stateful don't use the reshape
<add> # implementation.
<ide> td2 = keras.layers.TimeDistributed(
<ide> keras.layers.RNN(keras.layers.SimpleRNNCell(10), stateful=True)
<ide> )
<ide><path>keras/layers/serialization.py
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<del># pylint: disable=g-bad-import-order,g-direct-tensorflow-import,unused-import,wildcard-import
<del>
<ide> import threading
<ide> from keras.engine import base_layer
<ide> from keras.engine import input_layer
<ide><path>keras/layers/serialization_test.py
<ide> def test_implicit_serialize_deserialize_fails_without_object(self):
<ide> bias_regularizer="l2",
<ide> )
<ide> config = keras.layers.serialize(layer)
<del> # Because we're passing an unknown class here, deserialization should fail
<del> # unless we add SerializableInt to the custom object dict.
<add> # Because we're passing an unknown class here, deserialization should
<add> # fail unless we add SerializableInt to the custom object dict.
<ide> with self.assertRaisesRegex(
<ide> ValueError, "Unknown config_item: SerializableInt.*"
<ide> ):
<ide> def test_implicit_serialize_deserialize_succeeds_with_object(self):
<ide> bias_regularizer="l2",
<ide> )
<ide> config = keras.layers.serialize(layer)
<del> # Because we're passing an unknown class here, deserialization should fail
<del> # unless we add SerializableInt to the custom object dict.
<add> # Because we're passing an unknown class here, deserialization should
<add> # fail unless we add SerializableInt to the custom object dict.
<ide> new_layer = keras.layers.deserialize(
<ide> config, custom_objects={"SerializableInt": SerializableInt}
<ide> )
<ide><path>keras/layers/tensorflow_op_layer_test.py
<ide> def test_getitem_complex_slicing(self):
<ide> tf.constant(stop, shape=(batch_size,)),
<ide> tf.constant(step, shape=(batch_size,)),
<ide> ]
<del> # Slice the innermost dim. only grab one index from the second-to-innermost
<del> # dim, removing that dim from the shape.
<add> # Slice the innermost dim. only grab one index from the
<add> # second-to-innermost dim, removing that dim from the shape.
<ide> expected = tf.stack(
<ide> [
<ide> tf.stack([tf.range(8)[start:stop:step] for _ in range(4)])
<ide> def test_identity(self):
<ide> x = keras.Input(shape=(1,))
<ide> ident = tf.identity(x)
<ide>
<del> # This is now a graph tensor, and should be able to continue in graphland
<add> # This is now a graph tensor, and should be able to continue in
<add> # graphland
<ide> self.assertIn("Identity", ident.name)
<ide>
<ide> def test_size(self):
<ide> x = keras.Input(shape=(3,))
<ide> self.assertAllEqual(x.get_shape().as_list(), [None, 3])
<ide> sz = tf.size(x)
<ide>
<del> # This is now a graph tensor, and should be able to continue in graphland
<add> # This is now a graph tensor, and should be able to continue in
<add> # graphland
<ide> self.assertIn("Size", sz.name)
<ide>
<ide> | 133 |
Javascript | Javascript | add missing variable declaration | 30e919a3cfd663ec3f1d971dcdb9970ff8a4779e | <ide><path>lib/repl.js
<ide> function complete(line, callback) {
<ide> var completions;
<ide> // List of completion lists, one for each inheritance "level"
<ide> var completionGroups = [];
<del> var completeOn, i, group, c;
<add> var completeOn, group, c;
<ide>
<ide> // REPL commands (e.g. ".break").
<ide> var filter;
<ide> function complete(line, callback) {
<ide> completeOn = match[1];
<ide> var subdir = match[2] || '';
<ide> filter = match[1];
<del> var dir, files, f, name, base, ext, abs, subfiles, s, isDirectory;
<add> var dir, files, name, base, ext, abs, subfiles, isDirectory;
<ide> group = [];
<ide> let paths = [];
<ide>
<ide> function complete(line, callback) {
<ide> paths = module.paths.concat(CJSModule.globalPaths);
<ide> }
<ide>
<del> for (i = 0; i < paths.length; i++) {
<add> for (let i = 0; i < paths.length; i++) {
<ide> dir = path.resolve(paths[i], subdir);
<ide> try {
<ide> files = fs.readdirSync(dir);
<ide> } catch {
<ide> continue;
<ide> }
<del> for (f = 0; f < files.length; f++) {
<add> for (let f = 0; f < files.length; f++) {
<ide> name = files[f];
<ide> ext = path.extname(name);
<ide> base = name.slice(0, -ext.length);
<ide> function complete(line, callback) {
<ide> } catch {
<ide> continue;
<ide> }
<del> for (s = 0; s < subfiles.length; s++) {
<add> for (let s = 0; s < subfiles.length; s++) {
<ide> if (indexRe.test(subfiles[s])) {
<ide> group.push(subdir + name);
<ide> }
<ide> function complete(line, callback) {
<ide> }
<ide>
<ide> if (memberGroups.length) {
<del> for (i = 0; i < memberGroups.length; i++) {
<add> for (let i = 0; i < memberGroups.length; i++) {
<ide> completionGroups.push(
<ide> memberGroups[i].map((member) => `${expr}.${member}`));
<ide> }
<ide> function complete(line, callback) {
<ide> // Filter, sort (within each group), uniq and merge the completion groups.
<ide> if (completionGroups.length && filter) {
<ide> var newCompletionGroups = [];
<del> for (i = 0; i < completionGroups.length; i++) {
<add> for (let i = 0; i < completionGroups.length; i++) {
<ide> group = completionGroups[i]
<ide> .filter((elem) => elem.indexOf(filter) === 0);
<ide> if (group.length) {
<ide> function complete(line, callback) {
<ide> // Completion group 0 is the "closest"
<ide> // (least far up the inheritance chain)
<ide> // so we put its completions last: to be closest in the REPL.
<del> for (i = 0; i < completionGroups.length; i++) {
<add> for (let i = 0; i < completionGroups.length; i++) {
<ide> group = completionGroups[i];
<ide> group.sort();
<ide> for (var j = group.length - 1; j >= 0; j--) { | 1 |
Text | Text | edit paragraph 1 for clarity and material. | 00c7b70ab56a4c8f1177286ea6cafc0440fa464d | <ide><path>guide/english/computer-hardware/cooling/index.md
<ide> ---
<ide> title: Cooling Systems
<ide> ---
<del>Your computer contains many parts that generate heat. Excessive heat can cause your computer to behave erratically or its hardware to fail prematurely. The more you overclock the graphics or the CPU, the hotter the computer will run. Cooling systems assure that your computer is stable and extends the life of your investment.
<add>Your computer contains many parts that generate heat. Excessive heat can cause your computer to behave erratically or its hardware to fail prematurely. Overclocking, changing the default hardware settings for maximum performance, will also cause components to run hotter. The more you overclock a graphics card, CPU, etc., the hotter the component will run. Cooling systems assure that your computer is stable and extends the life of your investment.
<ide>
<ide> ## Case Cooling
<ide> Case cooling can be a combination of a well ventilated case and the use of fans to circulate air within the case. Most cases come with at least one fan. This is the minimum configuration. For best results, there should be at least two fans in the case. One as an intake to draw in cold air, while the other as an exhaust to expel hot air. It is important to consider the direction of the fan and whether or not the fan is intaking air or exhausting air. It is typical to place exhaust fans towards the top of the case as hot air rises, and intake fans towards the bottom of the case as cool air sinks. There are many different types of fans and configurations. Cable management also plays a huge part in cooling the system. Bad cable management and tangled wires can disrupt airflow and cause dust to accumulate inside the case. | 1 |
PHP | PHP | add docs for hour | 9ddc70bfcdeff3b339aba4a821733cf8c24a8f4e | <ide><path>src/View/Input/DateTime.php
<ide> public function __construct($templates, $selectBox) {
<ide> * The `month` option accepts the `name` option which allows you to get month
<ide> * names instead of month numbers.
<ide> *
<del> * The `hour` option allows you to set the `format` option which accepts
<del> * 12 or 24, allowing you to indicate which hour format you want.
<add> * The `hour` option allows you to set the following options:
<add> *
<add> * - `format` option which accepts 12 or 24, allowing
<add> * you to indicate which hour format you want.
<add> * - `start` The hour to start the options at.
<add> * - `end` The hour to stop the options at.
<add> *
<add> * The start and end options are dependent on the format used. If the
<add> * value is out of the start/end range it will not be included.
<ide> *
<ide> * The `minute` option allows you to define the following options:
<ide> * | 1 |
Javascript | Javascript | add example usage | 342d064345ea802c0fba82d1f389e3ae5800d7fe | <ide><path>src/objects/Mesh.js
<ide> THREE.Mesh.prototype.raycast = ( function () {
<ide> var vA = new THREE.Vector3();
<ide> var vB = new THREE.Vector3();
<ide> var vC = new THREE.Vector3();
<add>
<add> var tempA = new THREE.Vector3();
<add> var tempB = new THREE.Vector3();
<add> var tempC = new THREE.Vector3();
<add>
<ide>
<ide> return function ( raycaster, intersects ) {
<ide>
<ide> THREE.Mesh.prototype.raycast = ( function () {
<ide> var vertices = geometry.vertices;
<ide> var faces = geometry.faces;
<ide>
<add>
<ide> for ( var f = 0, fl = faces.length; f < fl; f ++ ) {
<ide>
<ide> var face = faces[ f ];
<ide> THREE.Mesh.prototype.raycast = ( function () {
<ide> if ( influence === 0 ) continue;
<ide>
<ide> var targets = morphTargets[ t ].vertices;
<del>
<del> vA.x += ( targets[ face.a ].x - a.x ) * influence;
<del> vA.y += ( targets[ face.a ].y - a.y ) * influence;
<del> vA.z += ( targets[ face.a ].z - a.z ) * influence;
<del>
<del> vB.x += ( targets[ face.b ].x - b.x ) * influence;
<del> vB.y += ( targets[ face.b ].y - b.y ) * influence;
<del> vB.z += ( targets[ face.b ].z - b.z ) * influence;
<del>
<del> vC.x += ( targets[ face.c ].x - c.x ) * influence;
<del> vC.y += ( targets[ face.c ].y - c.y ) * influence;
<del> vC.z += ( targets[ face.c ].z - c.z ) * influence;
<add>
<add> vA.addScaledVector(tempA.subVectors(targets[ face.a ], a),influence);
<add> vB.addScaledVector(tempB.subVectors(targets[ face.b ], b),influence);
<add> vC.addScaledVector(tempC.subVectors(targets[ face.c ], c),influence);
<ide>
<ide> }
<ide> | 1 |
PHP | PHP | escape pre tag in markdown block | a8a2aad3dd9507db5c73c9f484f72fb8cac03fd9 | <ide><path>src/Core/functions.php
<ide> function namespaceSplit(string $class): array
<ide> * print_r() convenience function.
<ide> *
<ide> * In terminals this will act similar to using print_r() directly, when not run on CLI
<del> * print_r() will also wrap <pre> tags around the output of given variable. Similar to debug().
<add> * print_r() will also wrap `<pre>` tags around the output of given variable. Similar to debug().
<ide> *
<ide> * This function returns the same variable that was passed.
<ide> * | 1 |
Javascript | Javascript | follow style conventions | fc23239b677939f6b43787044a7613589f4bcf56 | <ide><path>src/renderers/dom/client/wrappers/ReactDOMInput.js
<ide> var ReactDOMInput = {
<ide> onChange: inst._wrapperState.onChange,
<ide> });
<ide>
<del> return nativeProps
<add> return nativeProps;
<ide> },
<ide>
<ide> mountWrapper: function(inst, props) { | 1 |
Javascript | Javascript | remove unnessecary import | 416c109665f067237f6ce7dc1dfdbb0df088d104 | <ide><path>test/unit/src/math/Plane.tests.js
<ide> import {
<ide> zero3,
<ide> one3
<ide> } from './Constants.tests';
<del>import { Cache } from '../../../../build/three';
<ide>
<ide> function comparePlane( a, b, threshold ) {
<ide> | 1 |
Python | Python | improve the speed and stability of kpo tests | a032b8ab93a95fc660ffbe7aa3531cd4614c1d1c | <ide><path>kubernetes_tests/test_kubernetes_pod_operator.py
<ide> import textwrap
<ide> import unittest
<ide> from copy import copy
<add>from typing import Optional
<ide> from unittest import mock
<ide> from unittest.mock import ANY, MagicMock
<ide>
<ide> def get_kubeconfig_path():
<ide> return kubeconfig_path if kubeconfig_path else os.path.expanduser('~/.kube/config')
<ide>
<ide>
<add>def get_label():
<add> test = os.environ.get('PYTEST_CURRENT_TEST')
<add> label = ''.join(filter(str.isalnum, test)).lower()
<add> return label[-63]
<add>
<add>
<ide> class TestKubernetesPodOperatorSystem(unittest.TestCase):
<ide> def get_current_task_name(self):
<ide> # reverse test name to make pod name unique (it has limited length)
<ide> def get_current_task_name(self):
<ide> def setUp(self):
<ide> self.maxDiff = None
<ide> self.api_client = ApiClient()
<add> self.labels = {"test_label": get_label()}
<ide> self.expected_pod = {
<ide> 'apiVersion': 'v1',
<ide> 'kind': 'Pod',
<ide> def setUp(self):
<ide> 'name': ANY,
<ide> 'annotations': {},
<ide> 'labels': {
<del> 'foo': 'bar',
<add> 'test_label': get_label(),
<ide> 'kubernetes_pod_operator': 'True',
<ide> 'airflow_version': airflow_version.replace('+', '-'),
<ide> 'airflow_kpo_in_cluster': 'False',
<ide> def setUp(self):
<ide> },
<ide> }
<ide>
<add> def _get_labels_selector(self) -> Optional[str]:
<add> if not self.labels:
<add> return None
<add> return ",".join([f'{key}={value}' for key, value in enumerate(self.labels)])
<add>
<ide> def tearDown(self) -> None:
<ide> hook = KubernetesHook(conn_id=None, in_cluster=False)
<ide> client = hook.core_v1_client
<del> client.delete_collection_namespaced_pod(namespace="default")
<del> import time
<del>
<del> time.sleep(1)
<add> client.delete_collection_namespaced_pod(namespace="default", grace_period_seconds=0)
<ide>
<ide> def test_do_xcom_push_defaults_false(self):
<ide> new_config_path = '/tmp/kube_config'
<ide> old_config_path = get_kubeconfig_path()
<ide> shutil.copy(old_config_path, new_config_path)
<del>
<ide> k = KubernetesPodOperator(
<ide> namespace='default',
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_config_path_move(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test1",
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_working_pod(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_delete_operator_pod(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_already_checked_on_success(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name=pod_name,
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_already_checked_on_failure(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["lalala"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name=pod_name,
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_hostnetwork(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_dnspolicy(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_schedulername(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_node_selectors(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_resources(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_affinity(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_port(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_volume_mount(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=args,
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> volume_mounts=[volume_mount],
<ide> volumes=[volume],
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> def test_run_as_user_root(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_run_as_user_non_root(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_fs_group(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-fs-group",
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_faulty_image(self):
<ide> image=bad_image_name,
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_faulty_service_account(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_failure(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=bad_internal_command,
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_failure(self):
<ide>
<ide> @mock.patch("airflow.models.taskinstance.TaskInstance.xcom_push")
<ide> def test_xcom_push(self, xcom_push):
<del> return_value = '{"foo": "bar"\n, "buzz": 2}'
<add> return_value = f'{{"test_label": "{get_label()}"\n, "buzz": 2}}'
<ide> args = [f'echo \'{return_value}\' > /airflow/xcom/return.json']
<ide> k = KubernetesPodOperator(
<ide> namespace='default',
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=args,
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_envs_from_secrets(self, await_pod_completion_mock, create_pod):
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<ide> secrets=secrets,
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_env_vars(self):
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<ide> env_vars=env_vars,
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_template_file_system(self):
<ide> k = KubernetesPodOperator(
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<add> labels=self.labels,
<ide> pod_template_file=fixture,
<ide> do_xcom_push=True,
<ide> )
<ide> def test_pod_template_file_with_overrides_system(self):
<ide> fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
<ide> k = KubernetesPodOperator(
<ide> task_id="task" + self.get_current_task_name(),
<del> labels={"foo": "bar", "fizz": "buzz"},
<add> labels=self.labels,
<ide> env_vars=[k8s.V1EnvVar(name="env_name", value="value")],
<ide> in_cluster=False,
<ide> pod_template_file=fixture,
<ide> def test_pod_template_file_with_overrides_system(self):
<ide> result = k.execute(context)
<ide> assert result is not None
<ide> assert k.pod.metadata.labels == {
<del> 'fizz': 'buzz',
<del> 'foo': 'bar',
<add> 'test_label': get_label(),
<ide> 'airflow_version': mock.ANY,
<ide> 'airflow_kpo_in_cluster': 'False',
<ide> 'dag_id': 'dag',
<ide> def test_pod_template_file_with_full_pod_spec(self):
<ide> fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
<ide> pod_spec = k8s.V1Pod(
<ide> metadata=k8s.V1ObjectMeta(
<del> labels={"foo": "bar", "fizz": "buzz"},
<add> labels={"test_label": get_label(), "fizz": "buzz"},
<ide> ),
<ide> spec=k8s.V1PodSpec(
<ide> containers=[
<ide> def test_pod_template_file_with_full_pod_spec(self):
<ide> )
<ide> k = KubernetesPodOperator(
<ide> task_id="task" + self.get_current_task_name(),
<add> labels=self.labels,
<ide> in_cluster=False,
<ide> pod_template_file=fixture,
<ide> full_pod_spec=pod_spec,
<ide> def test_pod_template_file_with_full_pod_spec(self):
<ide> assert result is not None
<ide> assert k.pod.metadata.labels == {
<ide> 'fizz': 'buzz',
<del> 'foo': 'bar',
<add> 'test_label': get_label(),
<ide> 'airflow_version': mock.ANY,
<ide> 'airflow_kpo_in_cluster': 'False',
<ide> 'dag_id': 'dag',
<ide> def test_pod_template_file_with_full_pod_spec(self):
<ide> def test_full_pod_spec(self):
<ide> pod_spec = k8s.V1Pod(
<ide> metadata=k8s.V1ObjectMeta(
<del> labels={"foo": "bar", "fizz": "buzz"}, namespace="default", name="test-pod"
<add> labels={"test_label": get_label(), "fizz": "buzz"}, namespace="default", name="test-pod"
<ide> ),
<ide> spec=k8s.V1PodSpec(
<ide> containers=[
<ide> def test_full_pod_spec(self):
<ide> k = KubernetesPodOperator(
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<add> labels=self.labels,
<ide> full_pod_spec=pod_spec,
<ide> do_xcom_push=True,
<ide> is_delete_operator_pod=False,
<ide> def test_full_pod_spec(self):
<ide> assert result is not None
<ide> assert k.pod.metadata.labels == {
<ide> 'fizz': 'buzz',
<del> 'foo': 'bar',
<add> 'test_label': get_label(),
<ide> 'airflow_version': mock.ANY,
<ide> 'airflow_kpo_in_cluster': 'False',
<ide> 'dag_id': 'dag',
<ide> def test_init_container(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> volumes=[volume],
<ide> def test_pod_template_file(
<ide> path = sys.path[0] + '/tests/kubernetes/pod.yaml'
<ide> k = KubernetesPodOperator(
<ide> task_id="task" + self.get_current_task_name(),
<add> labels=self.labels,
<ide> random_name_suffix=False,
<ide> pod_template_file=path,
<ide> do_xcom_push=True,
<ide> def test_pod_template_file(
<ide> 'metadata': {
<ide> 'annotations': {},
<ide> 'labels': {
<add> "test_label": get_label(),
<ide> 'airflow_kpo_in_cluster': 'False',
<ide> 'dag_id': 'dag',
<ide> 'run_id': 'manual__2016-01-01T0100000100-da4d1ce7b',
<ide> def test_pod_priority_class_name(self, hook_mock, await_pod_completion_mock):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test-" + str(random.randint(0, 1000000)),
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_pod_name(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["echo 10"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name=pod_name_too_long,
<ide> task_id="task" + self.get_current_task_name(),
<ide> in_cluster=False,
<ide> def test_on_kill(self):
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["sleep 1000"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test",
<ide> task_id=name,
<ide> in_cluster=False,
<ide> def get_op():
<ide> image="ubuntu:16.04",
<ide> cmds=["bash", "-cx"],
<ide> arguments=["exit 1"],
<del> labels={"foo": "bar"},
<add> labels=self.labels,
<ide> name="test",
<ide> task_id=name,
<ide> in_cluster=False, | 1 |
Javascript | Javascript | add deprecated annotation to api docs | 44eb0488467cf4ea7cd84f8f4ba5b6dbf47edd8b | <ide><path>packages/ember/lib/index.js
<ide> Object.defineProperty(Ember, 'onerror', {
<ide> @method K
<ide> @return {Object}
<ide> @public
<add> @deprecated
<ide> */
<ide> function deprecatedEmberK() { return this; }
<ide> | 1 |
Javascript | Javascript | remove dollar symbol for private function | 3a4521a4a2af30bac7f67b5a02b4433a51e9d169 | <ide><path>lib/events.js
<ide> EventEmitter.prototype.setMaxListeners = function setMaxListeners(n) {
<ide> return this;
<ide> };
<ide>
<del>function $getMaxListeners(that) {
<add>function _getMaxListeners(that) {
<ide> if (that._maxListeners === undefined)
<ide> return EventEmitter.defaultMaxListeners;
<ide> return that._maxListeners;
<ide> }
<ide>
<ide> EventEmitter.prototype.getMaxListeners = function getMaxListeners() {
<del> return $getMaxListeners(this);
<add> return _getMaxListeners(this);
<ide> };
<ide>
<ide> // Returns the length and line number of the first sequence of `a` that fully
<ide> function _addListener(target, type, listener, prepend) {
<ide> }
<ide>
<ide> // Check for listener leak
<del> m = $getMaxListeners(target);
<add> m = _getMaxListeners(target);
<ide> if (m > 0 && existing.length > m && !existing.warned) {
<ide> existing.warned = true;
<ide> // No error code for this since it is a Warning | 1 |
PHP | PHP | fix comments on session | 452956551b701bfe40631894cf4e6c0568c5fd2f | <ide><path>app/config/session.php
<ide> | requests. By default we will use the light-weight cookie driver but
<ide> | you may specify any of the other wonderful drivers provided here.
<ide> |
<del> | Supported: "native", "file", "database", "apc",
<add> | Supported: "native", "database", "apc",
<ide> | "memcached", "redis", "array"
<ide> |
<ide> */
<ide> | Session File Location
<ide> |--------------------------------------------------------------------------
<ide> |
<del> | When using the "file" session driver, we need a location where session
<add> | When using the native session driver, we need a location where session
<ide> | files may be stored. A default has been set for you but a different
<ide> | location may be specified. This is only needed for file sessions.
<ide> | | 1 |
PHP | PHP | remove defunct method from "implementedmethods" | ca2b7b576466e20d16a4f9bad9a76de81495b8af | <ide><path>src/ORM/Behavior/TranslateBehavior.php
<ide> class TranslateBehavior extends Behavior implements PropertyMarshalInterface
<ide> 'implementedMethods' => [
<ide> 'setLocale' => 'setLocale',
<ide> 'getLocale' => 'getLocale',
<del> 'locale' => 'locale',
<ide> 'translationField' => 'translationField'
<ide> ],
<ide> 'fields' => [], | 1 |
Go | Go | generalize consumeslow and add stop support | 417e48e4a00c891e8fe5614ac6a1ef12de951f72 | <ide><path>integration-cli/docker_cli_run_test.go
<ide> import (
<ide> "bufio"
<ide> "bytes"
<ide> "fmt"
<del> "io"
<ide> "io/ioutil"
<ide> "net"
<ide> "os"
<ide> func TestRunSlowStdoutConsumer(t *testing.T) {
<ide> if err := c.Start(); err != nil {
<ide> t.Fatal(err)
<ide> }
<del> n, err := consumeSlow(stdout, 10000, 5*time.Millisecond)
<add> n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide><path>integration-cli/utils.go
<ide> func makeRandomString(n int) string {
<ide> return string(b)
<ide> }
<ide>
<del>func consumeSlow(reader io.Reader, chunkSize int, interval time.Duration) (n int, err error) {
<add>// Reads chunkSize bytes from reader after every interval.
<add>// Returns total read bytes.
<add>func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) {
<ide> buffer := make([]byte, chunkSize)
<ide> for {
<del> var readBytes int
<del> readBytes, err = reader.Read(buffer)
<del> n += readBytes
<del> if err != nil {
<del> if err == io.EOF {
<del> err = nil
<del> }
<add> select {
<add> case <-stop:
<ide> return
<add> default:
<add> var readBytes int
<add> readBytes, err = reader.Read(buffer)
<add> n += readBytes
<add> if err != nil {
<add> if err == io.EOF {
<add> err = nil
<add> }
<add> return
<add> }
<add> time.Sleep(interval)
<ide> }
<del> time.Sleep(interval)
<ide> }
<ide> } | 2 |
Text | Text | fix grammatical errors | c09aa7a83c2632309203a130362bf5c737a12e19 | <ide><path>threejs/lessons/fr/threejs-primitives.md
<ide> Title: Primitives de Three.js
<del>Description: Tour d'horizon des primitives de Three.js
<add>Description: Un tour des primitives de Three.js
<ide> TOC: Primitives
<ide>
<ide> Cet article fait partie d'une série consacrée à Three.js. | 1 |
Text | Text | add explanation about loops | 6bbf1c0c531f9f89b5035a4cf9931ef5991ffe8d | <ide><path>guide/english/php/loops/index.md
<ide> A loop will continue running until the defined condition returns `false`.
<ide>
<ide> You can type `php for` , `php while` or `php do while` to get more info on any of these.
<ide>
<add>
<add>## PHP Loops
<add>Often when you write code, you want the same block of code to run over and over again in a row. Instead of adding several almost equal code-lines in a script, we can use loops to perform a task like this.
<add>
<add>In PHP, we have the following looping statements:
<add>
<add>while - loops through a block of code as long as the specified condition is true
<add>do...while - loops through a block of code once, and then repeats the loop as long as the specified condition is true
<add>for - loops through a block of code a specified number of times
<add>foreach - loops through a block of code for each element in an array
<add>
<add>
<ide> ### More Information
<ide>
<ide> - <a href='https://secure.php.net/manual/control-structures.for.php' target='_blank' rel='nofollow'>PHP.net - For Loops</a> | 1 |
Mixed | Go | move tar copy-up for tmpfs mounts | ae8ec4860e68e945cf6b2c157fa4e243c35c54a5 | <ide><path>daemon/execdriver/native/create.go
<ide> func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) e
<ide> flags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
<ide> err error
<ide> )
<del> fulldest := filepath.Join(c.Rootfs, m.Destination)
<ide> if m.Data != "" {
<ide> flags, data, err = mount.ParseTmpfsOptions(m.Data)
<ide> if err != nil {
<ide> func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) e
<ide> Data: data,
<ide> Device: "tmpfs",
<ide> Flags: flags,
<del> PremountCmds: genTmpfsPremountCmd(c.TmpDir, fulldest, m.Destination),
<del> PostmountCmds: genTmpfsPostmountCmd(c.TmpDir, fulldest, m.Destination),
<ide> PropagationFlags: []int{mountPropagationMap[volume.DefaultPropagationMode]},
<ide> })
<ide> continue
<ide><path>daemon/execdriver/native/tmpfs.go
<del>package native
<del>
<del>import (
<del> "fmt"
<del> "os"
<del> "os/exec"
<del> "strings"
<del>
<del> "github.com/Sirupsen/logrus"
<del> "github.com/opencontainers/runc/libcontainer/configs"
<del>)
<del>
<del>func genTmpfsPremountCmd(tmpDir string, fullDest string, dest string) []configs.Command {
<del> var premount []configs.Command
<del> tarPath, err := exec.LookPath("tar")
<del> if err != nil {
<del> logrus.Warn("tar command is not available for tmpfs mount: %s", err)
<del> return premount
<del> }
<del> if _, err = exec.LookPath("rm"); err != nil {
<del> logrus.Warn("rm command is not available for tmpfs mount: %s", err)
<del> return premount
<del> }
<del> tarFile := fmt.Sprintf("%s/%s.tar", tmpDir, strings.Replace(dest, "/", "_", -1))
<del> if _, err := os.Stat(fullDest); err == nil {
<del> premount = append(premount, configs.Command{
<del> Path: tarPath,
<del> Args: []string{"-cf", tarFile, "-C", fullDest, "."},
<del> })
<del> }
<del> return premount
<del>}
<del>
<del>func genTmpfsPostmountCmd(tmpDir string, fullDest string, dest string) []configs.Command {
<del> var postmount []configs.Command
<del> tarPath, err := exec.LookPath("tar")
<del> if err != nil {
<del> return postmount
<del> }
<del> rmPath, err := exec.LookPath("rm")
<del> if err != nil {
<del> return postmount
<del> }
<del> if _, err := os.Stat(fullDest); os.IsNotExist(err) {
<del> return postmount
<del> }
<del> tarFile := fmt.Sprintf("%s/%s.tar", tmpDir, strings.Replace(dest, "/", "_", -1))
<del> postmount = append(postmount, configs.Command{
<del> Path: tarPath,
<del> Args: []string{"-xf", tarFile, "-C", fullDest, "."},
<del> })
<del> return append(postmount, configs.Command{
<del> Path: rmPath,
<del> Args: []string{"-f", tarFile},
<del> })
<del>}
<ide><path>man/docker-create.1.md
<ide> unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
<ide>
<ide> $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image
<ide>
<del> This command mounts a `tmpfs` at `/tmp` within the container. The mount copies
<del>the underlying content of `my_image` into `/tmp`. For example if there was a
<del>directory `/tmp/content` in the base image, docker will copy this directory and
<del>all of its content on top of the tmpfs mounted on `/tmp`. The supported mount
<add> This command mounts a `tmpfs` at `/tmp` within the container. The supported mount
<ide> options are the same as the Linux default `mount` flags. If you do not specify
<ide> any options, the systems uses the following options:
<ide> `rw,noexec,nosuid,nodev,size=65536k`.
<ide><path>man/docker-run.1.md
<ide> standard input.
<ide>
<ide> $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image
<ide>
<del> This command mounts a `tmpfs` at `/tmp` within the container. The mount copies
<del>the underlying content of `my_image` into `/tmp`. For example if there was a
<del>directory `/tmp/content` in the base image, docker will copy this directory and
<del>all of its content on top of the tmpfs mounted on `/tmp`. The supported mount
<add> This command mounts a `tmpfs` at `/tmp` within the container. The supported mount
<ide> options are the same as the Linux default `mount` flags. If you do not specify
<ide> any options, the systems uses the following options:
<ide> `rw,noexec,nosuid,nodev,size=65536k`. | 4 |
Text | Text | fix return type of server.address() | 049c0464ce40cef17e8476b3e6b909bb10de469b | <ide><path>doc/api/net.md
<ide> Emitted when the server has been bound after calling [`server.listen()`][].
<ide> added: v0.1.90
<ide> -->
<ide>
<del>* Returns: {Object}
<add>* Returns: {Object|string}
<ide>
<ide> Returns the bound `address`, the address `family` name, and `port` of the server
<ide> as reported by the operating system if listening on an IP socket | 1 |
Text | Text | fix a typo by removing extra article [ci skip] | 6fd43bd07170d54e90625081bf21c36d3666b8c6 | <ide><path>guides/source/contributing_to_ruby_on_rails.md
<ide> changes to the master branch.
<ide>
<ide> When working with documentation, please take into account the [API Documentation Guidelines](api_documentation_guidelines.html) and the [Ruby on Rails Guides Guidelines](ruby_on_rails_guides_guidelines.html).
<ide>
<del>NOTE: For documentation changes, your commit message should include [ci skip]. This will skip the running the test suite, helping us to cut down on our server costs. Keep in mind that you should only skip CI when your change touches documentation exclusively.
<add>NOTE: For documentation changes, your commit message should include [ci skip]. This will skip running the test suite, helping us to cut down on our server costs. Keep in mind that you should only skip CI when your change touches documentation exclusively.
<ide>
<ide> Translating Rails Guides
<ide> ------------------------ | 1 |
PHP | PHP | relax typehints in fixturemanager | c6cbc43229153fd3ea75e6c9ab0ea6337d253687 | <ide><path>src/TestSuite/Fixture/FixtureManager.php
<ide> protected function _loadFixtures($test)
<ide> * @param bool $drop whether drop the fixture if it is already created or not
<ide> * @return void
<ide> */
<del> protected function _setupTable(TestFixture $fixture, Connection $db, array $sources, $drop = true)
<add> protected function _setupTable($fixture, $db, array $sources, $drop = true)
<ide> {
<ide> if (!empty($fixture->created) && in_array($db->configName(), $fixture->created)) {
<ide> return; | 1 |
PHP | PHP | update apcengine & tests for cache changes | 4f325a7fbe06b433202db5371e1283dd0ab77e28 | <ide><path>lib/Cake/Cache/Engine/ApcEngine.php
<ide> <?php
<ide> /**
<del> * APC storage engine for cache.
<del> *
<del> *
<del> * PHP 5
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide> * @license MIT License (http://www.opensource.org/licenses/mit-license.php)
<ide> */
<ide> namespace Cake\Cache\Engine;
<add>
<ide> use Cake\Cache\CacheEngine;
<ide> use Cake\Utility\Inflector;
<ide>
<ide> class ApcEngine extends CacheEngine {
<ide>
<ide> /**
<ide> * Contains the compiled group names
<del> * (prefixed witht the global configuration prefix)
<add> * (prefixed with the global configuration prefix)
<ide> *
<ide> * @var array
<ide> **/
<ide> class ApcEngine extends CacheEngine {
<ide> * Initialize the Cache Engine
<ide> *
<ide> * Called automatically by the cache frontend
<del> * To reinitialize the settings call Cache::engine('EngineName', [optional] settings = array());
<ide> *
<ide> * @param array $settings array of setting for the engine
<ide> * @return boolean True if the engine has been successfully initialized, false if not
<del> * @see CacheEngine::__defaults
<ide> */
<ide> public function init($settings = array()) {
<ide> if (!isset($settings['prefix'])) {
<ide> public function clear($check) {
<ide> * the group accordingly.
<ide> *
<ide> * @return array
<del> **/
<add> */
<ide> public function groups() {
<ide> if (empty($this->_compiledGroupNames)) {
<ide> foreach ($this->settings['groups'] as $group) {
<ide> public function groups() {
<ide> * old values will remain in storage until they expire.
<ide> *
<ide> * @return boolean success
<del> **/
<add> */
<ide> public function clearGroup($group) {
<ide> apc_inc($this->settings['prefix'] . $group, 1, $success);
<ide> return $success;
<ide><path>lib/Cake/Test/TestCase/Cache/Engine/ApcEngineTest.php
<ide> <?php
<ide> /**
<del> * ApcEngineTest file
<del> *
<del> * PHP 5
<del> *
<del> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<add> * CakePHP(tm) <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide> * Licensed under The MIT License
<ide> * @license MIT License (http://www.opensource.org/licenses/mit-license.php)
<ide> */
<ide> namespace Cake\Test\TestCase\Cache\Engine;
<add>
<ide> use Cake\Cache\Cache;
<ide> use Cake\Core\Configure;
<ide> use Cake\TestSuite\TestCase;
<ide> public function setUp() {
<ide> parent::setUp();
<ide> $this->skipIf(!function_exists('apc_store'), 'Apc is not installed or configured properly.');
<ide>
<del> $this->_cacheDisable = Configure::read('Cache.disable');
<ide> Configure::write('Cache.disable', false);
<del> Cache::config('apc', array('engine' => 'Apc', 'prefix' => 'cake_'));
<add> Configure::write('Cache.apc', ['engine' => 'Apc', 'prefix' => 'cake_']);
<ide> }
<ide>
<ide> /**
<ide> public function setUp() {
<ide> */
<ide> public function tearDown() {
<ide> parent::tearDown();
<del> Configure::write('Cache.disable', $this->_cacheDisable);
<ide> Cache::drop('apc');
<ide> Cache::drop('apc_groups');
<del> Cache::config('default');
<ide> }
<ide>
<ide> /**
<ide> public function tearDown() {
<ide> * @return void
<ide> */
<ide> public function testReadAndWriteCache() {
<del> Cache::set(array('duration' => 1), 'apc');
<add> Cache::set(['duration' => 1], 'apc');
<ide>
<ide> $result = Cache::read('test', 'apc');
<ide> $expecting = '';
<ide> public function testReadAndWriteCache() {
<ide> * @return void
<ide> */
<ide> public function testReadWriteDurationZero() {
<del> Cache::config('apc', array('engine' => 'Apc', 'duration' => 0, 'prefix' => 'cake_'));
<add> Configure::write('apc', ['engine' => 'Apc', 'duration' => 0, 'prefix' => 'cake_']);
<ide> Cache::write('zero', 'Should save', 'apc');
<ide> sleep(1);
<ide>
<ide> public function testReadWriteDurationZero() {
<ide> * @return void
<ide> */
<ide> public function testExpiry() {
<del> Cache::set(array('duration' => 1), 'apc');
<add> Cache::set(['duration' => 1], 'apc');
<ide>
<ide> $result = Cache::read('test', 'apc');
<ide> $this->assertFalse($result);
<ide> public function testExpiry() {
<ide> $result = Cache::read('other_test', 'apc');
<ide> $this->assertFalse($result);
<ide>
<del> Cache::set(array('duration' => 1), 'apc');
<add> Cache::set(['duration' => 1], 'apc');
<ide>
<ide> $data = 'this is a test of the emergency broadcasting system';
<ide> $result = Cache::write('other_test', $data, 'apc');
<ide> public function testClear() {
<ide> * @return void
<ide> */
<ide> public function testGroupsReadWrite() {
<del> Cache::config('apc_groups', array(
<add> Configure::write('Cache.apc_groups', [
<ide> 'engine' => 'Apc',
<ide> 'duration' => 0,
<ide> 'groups' => array('group_a', 'group_b'),
<ide> 'prefix' => 'test_'
<del> ));
<add> ]);
<ide> $this->assertTrue(Cache::write('test_groups', 'value', 'apc_groups'));
<ide> $this->assertEquals('value', Cache::read('test_groups', 'apc_groups'));
<ide>
<ide> public function testGroupsReadWrite() {
<ide> * @return void
<ide> */
<ide> public function testGroupDelete() {
<del> Cache::config('apc_groups', array(
<add> Configure::write('Cache.apc_groups', array(
<ide> 'engine' => 'Apc',
<ide> 'duration' => 0,
<ide> 'groups' => array('group_a', 'group_b'),
<ide> public function testGroupDelete() {
<ide> * @return void
<ide> **/
<ide> public function testGroupClear() {
<del> Cache::config('apc_groups', array(
<add> Configure::write('Cache.apc_groups', array(
<ide> 'engine' => 'Apc',
<ide> 'duration' => 0,
<ide> 'groups' => array('group_a', 'group_b'),
<ide> public function testGroupClear() {
<ide> $this->assertTrue(Cache::clearGroup('group_b', 'apc_groups'));
<ide> $this->assertFalse(Cache::read('test_groups', 'apc_groups'));
<ide> }
<add>
<ide> } | 2 |
Text | Text | fix broken link to dive into python 3 website | 167d63af315df96e2463b89b963e006b20da900a | <ide><path>.github/contributors/amitness.md
<add># spaCy contributor agreement
<add>
<add>This spaCy Contributor Agreement (**"SCA"**) is based on the
<add>[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
<add>The SCA applies to any contribution that you make to any product or project
<add>managed by us (the **"project"**), and sets out the intellectual property rights
<add>you grant to us in the contributed materials. The term **"us"** shall mean
<add>[ExplosionAI GmbH](https://explosion.ai/legal). The term
<add>**"you"** shall mean the person or entity identified below.
<add>
<add>If you agree to be bound by these terms, fill in the information requested
<add>below and include the filled-in version with your first pull request, under the
<add>folder [`.github/contributors/`](/.github/contributors/). The name of the file
<add>should be your GitHub username, with the extension `.md`. For example, the user
<add>example_user would create the file `.github/contributors/example_user.md`.
<add>
<add>Read this agreement carefully before signing. These terms and conditions
<add>constitute a binding legal agreement.
<add>
<add>## Contributor Agreement
<add>
<add>1. The term "contribution" or "contributed materials" means any source code,
<add>object code, patch, tool, sample, graphic, specification, manual,
<add>documentation, or any other material posted or submitted by you to the project.
<add>
<add>2. With respect to any worldwide copyrights, or copyright applications and
<add>registrations, in your contribution:
<add>
<add> * you hereby assign to us joint ownership, and to the extent that such
<add> assignment is or becomes invalid, ineffective or unenforceable, you hereby
<add> grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
<add> royalty-free, unrestricted license to exercise all rights under those
<add> copyrights. This includes, at our option, the right to sublicense these same
<add> rights to third parties through multiple levels of sublicensees or other
<add> licensing arrangements;
<add>
<add> * you agree that each of us can do all things in relation to your
<add> contribution as if each of us were the sole owners, and if one of us makes
<add> a derivative work of your contribution, the one who makes the derivative
<add> work (or has it made will be the sole owner of that derivative work;
<add>
<add> * you agree that you will not assert any moral rights in your contribution
<add> against us, our licensees or transferees;
<add>
<add> * you agree that we may register a copyright in your contribution and
<add> exercise all ownership rights associated with it; and
<add>
<add> * you agree that neither of us has any duty to consult with, obtain the
<add> consent of, pay or render an accounting to the other for any use or
<add> distribution of your contribution.
<add>
<add>3. With respect to any patents you own, or that you can license without payment
<add>to any third party, you hereby grant to us a perpetual, irrevocable,
<add>non-exclusive, worldwide, no-charge, royalty-free license to:
<add>
<add> * make, have made, use, sell, offer to sell, import, and otherwise transfer
<add> your contribution in whole or in part, alone or in combination with or
<add> included in any product, work or materials arising out of the project to
<add> which your contribution was submitted, and
<add>
<add> * at our option, to sublicense these same rights to third parties through
<add> multiple levels of sublicensees or other licensing arrangements.
<add>
<add>4. Except as set out above, you keep all right, title, and interest in your
<add>contribution. The rights that you grant to us under these terms are effective
<add>on the date you first submitted a contribution to us, even if your submission
<add>took place before the date you sign these terms.
<add>
<add>5. You covenant, represent, warrant and agree that:
<add>
<add> * Each contribution that you submit is and shall be an original work of
<add> authorship and you can legally grant the rights set out in this SCA;
<add>
<add> * to the best of your knowledge, each contribution will not violate any
<add> third party's copyrights, trademarks, patents, or other intellectual
<add> property rights; and
<add>
<add> * each contribution shall be in compliance with U.S. export control laws and
<add> other applicable export and import laws. You agree to notify us if you
<add> become aware of any circumstance which would make any of the foregoing
<add> representations inaccurate in any respect. We may publicly disclose your
<add> participation in the project, including the fact that you have signed the SCA.
<add>
<add>6. This SCA is governed by the laws of the State of California and applicable
<add>U.S. Federal law. Any choice of law rules will not apply.
<add>
<add>7. Please place an “x” on one of the applicable statement below. Please do NOT
<add>mark both statements:
<add>
<add> * [X] I am signing on behalf of myself as an individual and no other person
<add> or entity, including my employer, has or will have rights with respect to my
<add> contributions.
<add>
<add> * [ ] I am signing on behalf of my employer or a legal entity and I have the
<add> actual authority to contractually bind that entity.
<add>
<add>## Contributor Details
<add>
<add>| Field | Entry |
<add>|------------------------------- | -------------------- |
<add>| Name | Amit Chaudhary |
<add>| Company name (if applicable) | |
<add>| Title or role (if applicable) | |
<add>| Date | April 29, 2019 |
<add>| GitHub username | amitness |
<add>| Website (optional) | https://amitness.com |
<ide><path>website/docs/usage/101/_serialization.md
<ide> example, everything that's in your `nlp` object. This means you'll have to
<ide> translate its contents and structure into a format that can be saved, like a
<ide> file or a byte string. This process is called serialization. spaCy comes with
<ide> **built-in serialization methods** and supports the
<del>[Pickle protocol](http://www.diveintopython3.net/serializing.html#dump).
<add>[Pickle protocol](https://www.diveinto.org/python3/serializing.html#dump).
<ide>
<ide> > #### What's pickle?
<ide> > | 2 |
Python | Python | add bp neural network with 3 layers | e1befed97603c84e2daae43254610d695794f7b8 | <ide><path>Neural_Network/neuralnetwork_bp3.py
<add>#-*- coding:utf-8 -*-
<add>'''
<add>Author: Stephen Lee
<add>Date: 2017.9.21
<add>
<add>BP neural network with three layers
<add>'''
<add>
<add>import numpy as np
<add>import matplotlib.pyplot as plt
<add>
<add>class Bpnw():
<add>
<add> def __init__(self,n_layer1,n_layer2,n_layer3,rate_w=0.3,rate_t=0.3):
<add> '''
<add> :param n_layer1: number of input layer
<add> :param n_layer2: number of hiden layer
<add> :param n_layer3: number of output layer
<add> :param rate_w: rate of weight learning
<add> :param rate_t: rate of threshold learning
<add> '''
<add> self.num1 = n_layer1
<add> self.num2 = n_layer2
<add> self.num3 = n_layer3
<add> self.rate_weight = rate_w
<add> self.rate_thre = rate_t
<add> self.thre2 = -2*np.random.rand(self.num2)+1
<add> self.thre3 = -2*np.random.rand(self.num3)+1
<add> self.vji = np.mat(-2*np.random.rand(self.num2, self.num1)+1)
<add> self.wkj = np.mat(-2*np.random.rand(self.num3, self.num2)+1)
<add>
<add> def sig(self,x):
<add> return 1 / (1 + np.exp(-1*x))
<add>
<add> def sig_plain(self,x):
<add> return 1 / (1 + np.exp(-1*x))
<add>
<add> def do_round(self,x):
<add> return round(x, 3)
<add>
<add> def trian(self,patterns,data_train, data_teach, n_repeat, error_accuracy,draw_e = bool):
<add> '''
<add> :param patterns: the number of patterns
<add> :param data_train: training data x; numpy.ndarray
<add> :param data_teach: training data y; numpy.ndarray
<add> :param n_repeat: echoes
<add> :param error_accuracy: error accuracy
<add> :return: None
<add> '''
<add> data_train = np.asarray(data_train)
<add> data_teach = np.asarray(data_teach)
<add> print('-------------------Start Training-------------------------')
<add> print(' - - Shape: Train_Data ',np.shape(data_train))
<add> print(' - - Shape: Teach_Data ',np.shape(data_teach))
<add> rp = 0
<add> all_mse = []
<add> mse = 10000
<add> while rp < n_repeat and mse >= error_accuracy:
<add> alle = 0
<add> final_out = []
<add> for g in range(np.shape(data_train)[0]):
<add> net_i = data_train[g]
<add> out1 = net_i
<add>
<add> net_j = out1 * self.vji.T - self.thre2
<add> out2=self.sig(net_j)
<add>
<add> net_k = out2 * self.wkj.T - self.thre3
<add> out3 = self.sig(net_k)
<add>
<add> # learning process
<add> pd_k_all = np.multiply(np.multiply(out3,(1 - out3)),(data_teach[g]-out3))
<add> pd_j_all = np.multiply(pd_k_all * self.wkj,np.multiply(out2,1-out2))
<add> #upgrade weight
<add> self.wkj = self.wkj + pd_k_all.T * out2 *self.rate_weight
<add> self.vji = self.vji + pd_j_all.T * out1 * self.rate_weight
<add> #upgrade threshold
<add> self.thre3 = self.thre3 - pd_k_all * self.rate_thre
<add> self.thre2 = self.thre2 - pd_j_all * self.rate_thre
<add> #calculate sum of error
<add> errors = np.sum(abs((data_teach[g] - out3)))
<add>
<add> alle = alle + errors
<add> final_out.extend(out3.getA().tolist())
<add> final_out3 = [list(map(self.do_round,each)) for each in final_out]
<add>
<add> rp = rp + 1
<add> mse = alle/patterns
<add> all_mse.append(mse)
<add> def draw_error():
<add> yplot = [error_accuracy for i in range(int(n_repeat * 1.2))]
<add> plt.plot(all_mse, '+-')
<add> plt.plot(yplot, 'r--')
<add> plt.xlabel('Learning Times')
<add> plt.ylabel('All_mse')
<add> plt.grid(True,alpha = 0.7)
<add> plt.show()
<add> print('------------------Training Complished---------------------')
<add> print(' - - Training epoch: ', rp, ' - - Mse: %.6f'%mse)
<add> print(' - - Last Output: ', final_out3)
<add> if draw_e:
<add> draw_error()
<add>
<add> def predict(self,data_test):
<add> '''
<add> :param data_test: data test, numpy.ndarray
<add> :return: predict output data
<add> '''
<add> data_test = np.asarray(data_test)
<add> produce_out = []
<add> print('-------------------Start Testing-------------------------')
<add> print(' - - Shape: Test_Data ',np.shape(data_test))
<add> print(np.shape(data_test))
<add> for g in range(np.shape(data_test)[0]):
<add>
<add> net_i = data_test[g]
<add> out1 = net_i
<add>
<add> net_j = out1 * self.vji.T - self.thre2
<add> out2 = self.sig(net_j)
<add>
<add> net_k = out2 * self.wkj.T - self.thre3
<add> out3 = self.sig(net_k)
<add> produce_out.extend(out3.getA().tolist())
<add> res = [list(map(self.do_round,each)) for each in produce_out]
<add> return np.asarray(res)
<add>
<add>
<add>def main():
<add> #I will fish the mian function later
<add> pass
<add>
<add>if __name__ == '__main__':
<add> main() | 1 |
Ruby | Ruby | fix actionview tests for missing helper | 4642ca964081179cdd3e3e739ed6e0271e5ff6d3 | <ide><path>actionview/test/abstract_unit.rb
<ide> # frozen_string_literal: true
<ide>
<ide> $:.unshift File.expand_path("lib", __dir__)
<del>$:.unshift File.expand_path("fixtures/helpers", __dir__)
<del>$:.unshift File.expand_path("fixtures/alternate_helpers", __dir__)
<ide>
<ide> ENV["TMPDIR"] = File.expand_path("tmp", __dir__)
<ide>
<ide> require "active_support/dependencies"
<ide> require "active_model"
<ide>
<add>module ActionViewTestSuiteUtils
<add> def self.require_helpers(helpers_dirs)
<add> Array(helpers_dirs).each do |helpers_dir|
<add> Dir.glob("#{helpers_dir}/**/*_helper.rb") do |helper_file|
<add> require helper_file
<add> end
<add> end
<add> end
<add>end
<add>
<add>ActionViewTestSuiteUtils.require_helpers("#{__dir__}/fixtures/helpers")
<add>ActionViewTestSuiteUtils.require_helpers("#{__dir__}/fixtures/alternate_helpers")
<add>
<ide> ActiveSupport::Dependencies.hook!
<ide>
<ide> Thread.abort_on_exception = true
<ide><path>actionview/test/actionpack/abstract/helper_test.rb
<ide> def test_helpers_with_symbol
<ide> end
<ide>
<ide> def test_declare_missing_helper
<del> e = assert_raise AbstractController::Helpers::MissingHelperError do
<add> e = assert_raise NameError do
<ide> AbstractHelpers.helper :missing
<ide> end
<del> assert_equal "helpers/missing_helper.rb", e.path
<add> assert_equal "uninitialized constant MissingHelper", e.message
<ide> end
<ide>
<ide> def test_helpers_with_module_through_block
<ide> def test_includes_controller_default_helper
<ide> end
<ide>
<ide> class InvalidHelpersTest < ActiveSupport::TestCase
<del> def test_controller_raise_error_about_real_require_problem
<del> e = assert_raise(LoadError) { AbstractInvalidHelpers.helper(:invalid_require) }
<del> assert_equal "No such file to load -- very_invalid_file_name.rb", e.message
<del> end
<del>
<ide> def test_controller_raise_error_about_missing_helper
<del> e = assert_raise(AbstractController::Helpers::MissingHelperError) { AbstractInvalidHelpers.helper(:missing) }
<del> assert_equal "Missing helper file helpers/missing_helper.rb", e.message
<del> end
<del>
<del> def test_missing_helper_error_has_the_right_path
<del> e = assert_raise(AbstractController::Helpers::MissingHelperError) { AbstractInvalidHelpers.helper(:missing) }
<del> assert_equal "helpers/missing_helper.rb", e.path
<add> e = assert_raise(NameError) { AbstractInvalidHelpers.helper(:missing) }
<add> assert_equal "uninitialized constant MissingHelper", e.message
<ide> end
<ide> end
<ide> end | 2 |
Ruby | Ruby | fix rubocop violation | 4cb0ce2e794f42c144629834d6f82cf5f21fcb75 | <ide><path>railties/lib/rails/generators/rails/app/app_generator.rb
<ide> module Generators
<ide> RAILS_DEV_PATH = File.expand_path("../../../../../..", __dir__)
<ide>
<ide> class AppGenerator < AppBase
<del>
<ide> # :stopdoc:
<ide>
<ide> WEBPACKS = %w( react vue angular elm stimulus )
<ide> def self.banner
<ide> "rails new #{arguments.map(&:usage).join(' ')} [options]"
<ide> end
<ide>
<del> # :startdoc:
<add> # :startdoc:
<ide>
<ide> private
<ide> | 1 |
Python | Python | add cursor to the browser client | 5a950e6a931229bcbc16480617f218c71c0b77f2 | <ide><path>glances/core/glances_client_browser.py
<ide> def serve_forever(self):
<ide> logger.warning(
<ide> _("Can not grab stats form {0}: {1}").format(uri, e))
<ide> except RuntimeError:
<del> logger.debug(_("Server list dictionnary change inside the loop (wait next update)"))
<add> logger.debug(
<add> _("Server list dictionnary change inside the loop (wait next update)"))
<ide>
<ide> # Update the screen
<ide> self.screen.update_browser(self.get_servers_list())
<ide><path>glances/outputs/glances_curses.py
<ide>
<ide> """Curses interface class."""
<ide>
<add># !!! TODO: split GlancesCurses for client and client_browser
<add>
<ide> # Import system lib
<ide> import sys
<ide>
<ide> def __init__(self, args=None):
<ide> logger.error(
<ide> 'Stats history disabled because MatPlotLib is not installed')
<ide>
<add> # Init the cursor position for the client browser
<add> self.cursor_init()
<add>
<ide> def set_cursor(self, value):
<ide> """Configure the cursor
<ide> 0: invisible
<ide> def __get_key(self, window):
<ide> keycode[0] = window.getch()
<ide> keycode[1] = window.getch()
<ide>
<add> if keycode != [-1, -1]:
<add> logger.debug("Keypressed ! Code: %s" % keycode)
<add>
<ide> if keycode[0] == 27 and keycode[1] != -1:
<ide> # Do not escape on specials keys
<ide> return -1
<ide> def __get_key(self, window):
<ide>
<ide> def __catch_key(self):
<ide> # Catch the pressed key
<del> # ~ self.pressedkey = self.term_window.getch()
<ide> self.pressedkey = self.__get_key(self.term_window)
<ide>
<ide> # Actions...
<ide> def __catch_key(self):
<ide> # Return the key code
<ide> return self.pressedkey
<ide>
<add> def cursor_init(self):
<add> """Init the cursor position to the top of the list"""
<add> self.cursor_position = 0
<add>
<add> def cursor_get(self):
<add> """Return the cursor position"""
<add> return self.cursor_position
<add>
<add> def cursor_up(self):
<add> """Set the cursor to position N-1 in the list"""
<add> if self.cursor_position > 0:
<add> self.cursor_position -= 1
<add>
<add> def cursor_down(self, servers_list):
<add> """Set the cursor to position N-1 in the list"""
<add> if self.cursor_position < len(servers_list) - 1:
<add> self.cursor_position += 1
<add>
<add> def __catch_key_browser(self, servers_list):
<add> # Catch the browser pressed key
<add> self.pressedkey = self.__get_key(self.term_window)
<add>
<add> # Actions...
<add> if self.pressedkey == ord('\x1b') or self.pressedkey == ord('q'):
<add> # 'ESC'|'q' > Quit
<add> self.end()
<add> logger.info("Stop Glances client browser")
<add> sys.exit(0)
<add> elif self.pressedkey == 10:
<add> # 'ENTER' > Run Glances on the selected server
<add> logger.debug("Line %s selected in the server list" % self.cursor_get())
<add> elif self.pressedkey == 259:
<add> # 'UP' > Up in the server list
<add> logger
<add> self.cursor_up()
<add> elif self.pressedkey == 258:
<add> # 'DOWN' > Down in the server list
<add> self.cursor_down(servers_list)
<add>
<add> # Return the key code
<add> return self.pressedkey
<add>
<ide> def end(self):
<ide> """Shutdown the curses window."""
<ide> if hasattr(curses, 'echo'):
<ide> def display_browser(self, servers_list):
<ide> screen_x = self.screen.getmaxyx()[1]
<ide> screen_y = self.screen.getmaxyx()[0]
<ide>
<del> # Display top header
<add> # Init position
<ide> x = 0
<ide> y = 0
<add>
<add> # Display top header
<ide> if len(servers_list) == 0:
<ide> msg = _("No Glances server detected on your network")
<ide> elif len(servers_list) == 1:
<ide> def display_browser(self, servers_list):
<ide>
<ide> # Display table header
<ide> cpt = 0
<del> xc = x
<add> xc = x + 2
<ide> for c in column_def:
<del> # Display server name
<ide> self.term_window.addnstr(y, xc,
<ide> c[1],
<ide> screen_x - x,
<ide> def display_browser(self, servers_list):
<ide> y += 1
<ide>
<ide> # Display table
<add> line = 0
<ide> try:
<ide> iteritems = servers_list.iteritems()
<ide> except AttributeError:
<ide> def display_browser(self, servers_list):
<ide> server_stat[c[0]] = servers_list[k][c[0]]
<ide> except KeyError as e:
<ide> logger.debug(_("Can not grab stats %s from server (KeyError: %s)") % (c[0], e))
<del> continue
<add> continue
<add>
<ide> # Display line for server stats
<ide> cpt = 0
<ide> xc = x
<add>
<add> # Is the line selected ?
<add> if line == self.cursor_get():
<add> # Display cursor
<add> self.term_window.addnstr(y, xc,
<add> ">",
<add> screen_x - x,
<add> self.__colors_list['BOLD'])
<add>
<add> xc += 2
<ide> for c in column_def:
<del> # Display server name
<add> # Display server stats
<ide> self.term_window.addnstr(y, xc,
<ide> "%s" % server_stat[c[0]],
<ide> screen_x - x,
<ide> def display_browser(self, servers_list):
<ide> cpt += 1
<ide> # Next line, next server...
<ide> y += 1
<add> line += 1
<ide>
<ide> return True
<ide>
<ide> def update_browser(self, servers_list):
<ide> countdown = Timer(self.__refresh_time)
<ide> while not countdown.finished():
<ide> # Getkey
<del> if self.__catch_key() > -1:
<add> if self.__catch_key_browser(servers_list) > -1:
<ide> # Redraw display
<ide> self.flush_browser(servers_list)
<ide> # Wait 100ms...
<ide><path>glances/plugins/glances_now.py
<ide> def __init__(self, args=None):
<ide>
<ide> # We want to display the stat in the curse interface
<ide> self.display_curse = True
<del>
<add>
<ide> # Set the message position
<ide> self.set_align('bottom')
<ide>
<ide><path>glances/plugins/glances_uptime.py
<ide> def __init__(self, args=None):
<ide>
<ide> # Set the message position
<ide> self.set_align('right')
<del>
<add>
<ide> # Init the stats
<ide> self.reset()
<ide>
<ide> def update(self):
<ide>
<ide> if self.get_input() == 'local':
<ide> # Update stats using the standard system lib
<del> uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
<add> uptime = datetime.now() - \
<add> datetime.fromtimestamp(psutil.boot_time())
<ide>
<ide> # Convert uptime to string (because datetime is not JSONifi)
<ide> self.stats = str(uptime).split('.')[0] | 4 |
Javascript | Javascript | reduce number of calls to mapper when sorting | 1e5c16b78dcc25be56f0ba49fef055db32d39a19 | <ide><path>dist/immutable.js
<ide> function interposeFactory(iterable, separator) {
<ide> return interposedSequence;
<ide> }
<ide> function sortFactory(iterable, comparator, mapper) {
<del> var sortFn = mapper ? (function(a, b) {
<del> return comparator(mapper(a[1][1], a[1][0], iterable), mapper(b[1][1], b[1][0], iterable)) || a[0] - b[0];
<del> }) : (function(a, b) {
<del> return comparator(a[1][1], b[1][1]) || a[0] - b[0];
<del> });
<del> var entries = [];
<del> iterable.forEach((function(v, k) {
<del> entries.push([entries.length, [k, v]]);
<del> }));
<del> entries.sort(sortFn);
<ide> var isKeyedIterable = isKeyed(iterable);
<del> entries.forEach(isKeyedIterable ? (function(v, i) {
<del> entries[i] = v[1];
<add> var index = 0;
<add> var entries = iterable.toSeq().map((function(v, k) {
<add> return [k, v, index++, mapper ? mapper(v, k, iterable) : v];
<add> })).toArray();
<add> entries.sort((function(a, b) {
<add> return comparator(a[3], b[3]) || a[2] - b[2];
<add> })).forEach(isKeyedIterable ? (function(v, i) {
<add> entries[i].length = 2;
<ide> }) : (function(v, i) {
<del> entries[i] = v[1][1];
<add> entries[i] = v[1];
<ide> }));
<ide> return isKeyedIterable ? KeyedSeq(entries) : isIndexed(iterable) ? IndexedSeq(entries) : SetSeq(entries);
<ide> }
<ide><path>dist/immutable.min.js
<ide> function t(){function t(t,e,r,n){var i;if(n){var u=n.prototype;i=lr.create(u)}el
<ide> })}:function(e,r){t.set(r,e)},i=0;r.length>i;i++)r[i].forEach(n)})}function _e(t,e,r,n,u){i(!t||t.set,"updateIn with invalid keyPath");var s=e[u],o=t?t.get(s,gr):gr,a=o===gr?void 0:o,h=u===e.length-1?n(o===gr?r:o):_e(a,e,r,n,u+1);return h===a?t:h===gr?t&&t.remove(s):(t||re()).set(s,h)}function le(t){return t-=t>>1&1431655765,t=(858993459&t)+(t>>2&858993459),t=t+(t>>4)&252645135,t+=t>>8,t+=t>>16,127&t}function ve(t,e,r,n){var i=n?t:a(t);return i[e]=r,i}function pe(t,e,r,n){var i=t.length+1;if(n&&e+1===i)return t[e]=r,t;for(var u=Array(i),s=0,o=0;i>o;o++)o===e?(u[o]=r,s=-1):u[o]=t[o+s];return u}function de(t,e,r){var n=t.length-1;if(r&&e===n)return t.pop(),t;for(var i=Array(n),u=0,s=0;n>s;s++)s===e&&(u=1),i[s]=t[s+u];return i}function ye(t){var e=Ke(t);return e._iter=t,e.size=t.size,e.flip=function(){return t},e.reverse=function(){var e=t.reverse.apply(this);return e.flip=function(){return t.reverse()},e},e.has=function(e){return t.contains(e)},e.contains=function(e){return t.has(e)},e.cacheResult=Le,e.__iterateUncached=function(e,r){var n=this;return t.__iterate(function(t,r){return e(r,t,n)!==!1},r)},e.__iteratorUncached=function(e,r){if(e===Cr){var n=t.__iterator(e,r);return new Kr(function(){var t=n.next();if(!t.done){var e=t.value[0];t.value[0]=t.value[1],t.value[1]=e}return t})}return t.__iterator(e===Ar?Dr:Ar,r)},e}function me(t,e,r){var n=Ke(t);return n.size=t.size,n.has=function(e){return t.has(e)},n.get=function(n,i){var u=t.get(n,gr);return u===gr?i:e.call(r,u,n,t)},n.__iterateUncached=function(n,i){var u=this;return t.__iterate(function(t,i,s){return n(e.call(r,t,i,s),i,u)!==!1},i)},n.__iteratorUncached=function(n,i){var u=t.__iterator(Cr,i);return new Kr(function(){var i=u.next();if(i.done)return i;var s=i.value,o=s[0];return z(n,o,e.call(r,s[1],o,t),i)})},n}function ge(t,e){var r=Ke(t);return r._iter=t,r.size=t.size,r.reverse=function(){return t},t.flip&&(r.flip=function(){var e=ye(t);return e.reverse=function(){return t.flip()},e}),r.get=function(r,n){return t.get(e?r:-1-r,n)},r.has=function(r){return t.has(e?r:-1-r)
<ide> },r.contains=function(e){return t.contains(e)},r.cacheResult=Le,r.__iterate=function(e,r){var n=this;return t.__iterate(function(t,r){return e(t,r,n)},!r)},r.__iterator=function(e,r){return t.__iterator(e,!r)},r}function we(t,e,r,n){var i=Ke(t);return n&&(i.has=function(n){var i=t.get(n,gr);return i!==gr&&!!e.call(r,i,n,t)},i.get=function(n,i){var u=t.get(n,gr);return u!==gr&&e.call(r,u,n,t)?u:i}),i.__iterateUncached=function(i,u){var s=this,o=0;return t.__iterate(function(t,u,a){return e.call(r,t,u,a)?(o++,i(t,n?u:o-1,s)):void 0},u),o},i.__iteratorUncached=function(i,u){var s=t.__iterator(Cr,u),o=0;return new Kr(function(){for(;;){var u=s.next();if(u.done)return u;var a=u.value,h=a[0],c=a[1];if(e.call(r,c,h,t))return z(i,n?h:o++,c,u)}})},i}function Se(t,e,r){var n=yn().asMutable();return t.__iterate(function(i,u){n.update(e.call(r,i,u,t),0,function(t){return t+1})}),n.asImmutable()}function ze(t,e,r){var n=k(t),i=yn().asMutable();t.__iterate(function(u,s){i.update(e.call(r,u,s,t),[],function(t){return t.push(n?[s,u]:u),t})});var u=Ue(t);return i.map(function(e){return Ce(t,u(e))})}function Ie(t,e){if(e>t.size)return t;0>e&&(e=0);var r=Ke(t);return r.size=t.size&&Math.min(t.size,e),r.__iterateUncached=function(r,n){var i=this;if(0===e)return 0;if(n)return this.cacheResult().__iterate(r,n);var u=0;return t.__iterate(function(t,n){return++u&&r(t,n,i)!==!1&&e>u}),u},r.__iteratorUncached=function(r,n){if(n)return this.cacheResult().__iterator(r,n);var i=e&&t.__iterator(r,n),u=0;return new Kr(function(){return u++>e?I():i.next()})},r}function be(t,e,r){var n=Ke(t);return n.__iterateUncached=function(n,i){var u=this;if(i)return this.cacheResult().__iterate(n,i);var s=0;return t.__iterate(function(t,i,o){return e.call(r,t,i,o)&&++s&&n(t,i,u)}),s},n.__iteratorUncached=function(n,i){var u=this;if(i)return this.cacheResult().__iterator(n,i);var s=t.__iterator(Cr,i),o=!0;return new Kr(function(){if(!o)return I();var t=s.next();if(t.done)return t;var i=t.value,a=i[0],h=i[1];return e.call(r,h,a,u)?n===Cr?t:z(n,a,h,t):(o=!1,I())
<ide> })},n}function qe(t,e,r){if(0>=e)return t;var n=Ke(t);return n.size=t.size&&Math.max(0,t.size-e),n.__iterateUncached=function(n,i){var u=this;if(i)return this.cacheResult().__iterate(n,i);var s=0,o=!0,a=0;return t.__iterate(function(t,i){return o&&(o=s++<e)?void 0:(a++,n(t,r?i:a-1,u))}),a},n.__iteratorUncached=function(n,i){if(i)return this.cacheResult().__iterator(n,i);var u=e&&t.__iterator(n,i),s=0,o=0;return new Kr(function(){for(;e>s;)s++,u.next();var t=u.next();return r||n===Ar?t:n===Dr?z(n,o++,void 0,t):z(n,o++,t.value[1],t)})},n}function xe(t,e,r,n){var i=Ke(t);return i.__iterateUncached=function(i,u){var s=this;if(u)return this.cacheResult().__iterate(i,u);var o=!0,a=0;return t.__iterate(function(t,u,h){return o&&(o=e.call(r,t,u,h))?void 0:(a++,i(t,n?u:a-1,s))}),a},i.__iteratorUncached=function(i,u){var s=this;if(u)return this.cacheResult().__iterator(i,u);var o=t.__iterator(Cr,u),a=!0,h=0;return new Kr(function(){var t,u,c;do{if(t=o.next(),t.done)return n||i===Ar?t:i===Dr?z(i,h++,void 0,t):z(i,h++,t.value[1],t);var f=t.value;u=f[0],c=f[1],a&&(a=e.call(r,c,u,s))}while(a);return i===Cr?t:z(i,u,c,t)})},i}function Me(t,e){var r=k(t),n=new un([t].concat(e)).map(function(t){return E(t)?r&&(t=Nr(t)):t=r?B(t):P(Array.isArray(t)?t:[t]),t});r?n=n.toKeyedSeq():O(t)||(n=n.toSetSeq());var i=n.flatten(!0);return i.size=n.reduce(function(t,e){if(void 0!==t){var r=e.size;if(void 0!==r)return t+r}},0),i}function Ee(t,e,r){var n=Ke(t);return n.__iterateUncached=function(n,i){function u(t,a){var h=this;t.__iterate(function(t,i){return(!e||e>a)&&E(t)?u(t,a+1):n(t,r?i:s++,h)===!1&&(o=!0),!o},i)}var s=0,o=!1;return u(t,0),s},n.__iteratorUncached=function(n,i){var u=t.__iterator(n,i),s=[],o=0;return new Kr(function(){for(;u;){var t=u.next();if(t.done===!1){var a=t.value;if(n===Cr&&(a=a[1]),e&&!(e>s.length)||!E(a))return r?t:z(n,o++,a,t);s.push(u),u=a.__iterator(n,i)}else u=s.pop()}return I()})},n}function ke(t,e,r){var n=Ue(t);return t.toSeq().map(function(i,u){return n(e.call(r,i,u,t))}).flatten(!0)}function Oe(t,e){var r=Ke(t);
<del>return r.size=t.size&&2*t.size-1,r.__iterateUncached=function(r,n){var i=this,u=0;return t.__iterate(function(t){return(!u||r(e,u++,i)!==!1)&&r(t,u++,i)!==!1},n),u},r.__iteratorUncached=function(r,n){var i,u=t.__iterator(Ar,n),s=0;return new Kr(function(){return(!i||s%2)&&(i=u.next(),i.done)?i:s%2?z(r,s++,e):z(r,s++,i.value,i)})},r}function De(t,e,r){var n=r?function(n,i){return e(r(n[1][1],n[1][0],t),r(i[1][1],i[1][0],t))||n[0]-i[0]}:function(t,r){return e(t[1][1],r[1][1])||t[0]-r[0]},i=[];t.forEach(function(t,e){i.push([i.length,[e,t]])}),i.sort(n);var u=k(t);return i.forEach(u?function(t,e){i[e]=t[1]}:function(t,e){i[e]=t[1][1]}),u?Hr(i):O(t)?$r(i):en(i)}function Ae(t,e,r){if(r){var n=t.toSeq().map(function(e,n){return[e,r(e,n,t)]}).reduce(function(t,r){return e(r[1],t[1])>0?r:t});return n&&n[0]}return t.reduce(function(t,r){return e(r,t)>0?r:t})}function Ce(t,e){return T(t)?e:t.constructor(e)}function je(t){if(t!==Object(t))throw new TypeError("Expected [K, V] tuple: "+t)}function Re(t){return h(t.size),c(t)}function Ue(t){return k(t)?Nr:O(t)?Qr:Xr}function Ke(t){return Object.create((k(t)?Hr:O(t)?$r:en).prototype)}function Le(){return this._iter.cacheResult?(this._iter.cacheResult(),this.size=this._iter.size,this):Fr.prototype.cacheResult.call(this)}function Te(t){return!(!t||!t[Kn])}function We(t,e,r,n,i,u){var s,o=t&&t.array;if(0===e){var a=0>r?-r:0,h=n-r;for(h>yr&&(h=yr),s=a;h>s;s++)if(i(o&&o[u?a+h-1-s:s])===!1)return!1}else{var c=1<<e,f=e-dr;for(s=0;mr>=s;s++){var _=u?mr-s:s,l=r+(_<<e);if(n>l&&l+c>0){var v=o&&o[_];if(!We(v,f,l,n,i,u))return!1}}}return!0}function Be(t,e,r,n,i){return{array:t,level:e,offset:r,max:n,rawMax:n-r>>e,index:0,__prev:i}}function Pe(t,e,r,n,i,u,s){var o=Object.create(Ln);return o.size=e-t,o._origin=t,o._capacity=e,o._level=r,o._root=n,o._tail=i,o.__ownerID=u,o.__hash=s,o.__altered=!1,o}function Je(){return Pn||(Pn=Pe(0,0,dr))}function Ve(t,e,r){if(e=f(t,e),e>=t.size||0>e)return t.withMutations(function(t){0>e?Xe(t,e).set(0,r):Xe(t,0,e+1).set(e,r)});e+=t._origin;var n=t._tail,i=t._root,s=u(Sr);
<del>return e>=Ge(t._capacity)?n=Ne(n,t.__ownerID,0,e,r,s):i=Ne(i,t.__ownerID,t._level,e,r,s),s.value?t.__ownerID?(t._root=i,t._tail=n,t.__hash=void 0,t.__altered=!0,t):Pe(t._origin,t._capacity,t._level,i,n):t}function Ne(t,e,r,n,i,u){var o=n>>>r&mr,a=t&&t.array.length>o;if(!a&&void 0===i)return t;var h;if(r>0){var c=t&&t.array[o],f=Ne(c,e,r-dr,n,i,u);return f===c?t:(h=Ye(t,e),h.array[o]=f,h)}return a&&t.array[o]===i?t:(s(u),h=Ye(t,e),void 0===i&&o===h.array.length-1?h.array.pop():h.array[o]=i,h)}function Ye(t,e){return e&&t&&e===t.ownerID?t:new Tn(t?t.array.slice():[],e)}function Qe(t,e){if(e>=Ge(t._capacity))return t._tail;if(1<<t._level+dr>e){for(var r=t._root,n=t._level;r&&n>0;)r=r.array[e>>>n&mr],n-=dr;return r}}function Xe(t,e,r){var n=t.__ownerID||new o,i=t._origin,u=t._capacity,s=i+e,a=void 0===r?u:0>r?u+r:i+r;if(s===i&&a===u)return t;if(s>=a)return t.clear();for(var h=t._level,c=t._root,f=0;0>s+f;)c=new Tn(c&&c.array.length?[void 0,c]:[],n),h+=dr,f+=1<<h;f&&(s+=f,i+=f,a+=f,u+=f);for(var _=Ge(u),l=Ge(a);l>=1<<h+dr;)c=new Tn(c&&c.array.length?[c]:[],n),h+=dr;var v=t._tail,p=_>l?Qe(t,a-1):l>_?new Tn([],n):v;if(v&&l>_&&u>s&&v.array.length){c=Ye(c,n);for(var d=c,y=h;y>dr;y-=dr){var m=_>>>y&mr;d=d.array[m]=Ye(d.array[m],n)}d.array[_>>>dr&mr]=v}if(u>a&&(p=p&&p.removeAfter(n,0,a)),s>=l)s-=l,a-=l,h=dr,c=null,p=p&&p.removeBefore(n,0,s);else if(s>i||_>l){for(f=0;c;){var g=s>>>h&mr;if(g!==l>>>h&mr)break;g&&(f+=(1<<h)*g),h-=dr,c=c.array[g]}c&&s>i&&(c=c.removeBefore(n,h,s-f)),c&&_>l&&(c=c.removeAfter(n,h,l-f)),f&&(s-=f,a-=f)}return t.__ownerID?(t.size=a-s,t._origin=s,t._capacity=a,t._level=h,t._root=c,t._tail=p,t.__hash=void 0,t.__altered=!0,t):Pe(s,a,h,c,p)}function Fe(t,e,r){for(var n=[],i=0,u=0;r.length>u;u++){var s=r[u],o=Qr(s);o.size>i&&(i=o.size),E(s)||(o=o.map(function(t){return X(t)})),n.push(o)}return i>t.size&&(t=t.setSize(i)),fe(t,e,n)}function Ge(t){return yr>t?0:t-1>>>dr<<dr}function He(t){return!(!t||!t[Vn])}function Ze(t,e,r,n){var i=Object.create(Jn.prototype);return i.size=t?t.size:0,i._map=t,i._list=e,i.__ownerID=r,i.__hash=n,i
<del>}function $e(){return Nn||(Nn=Ze(re(),Je()))}function tr(t,e,r){var n=t._map,i=t._list,u=n.get(e),s=void 0!==u,o=r===gr;if(!s&&o||s&&r===i.get(u)[1])return t;s||(u=i.size);var a=o?n.remove(e):s?n:n.set(e,u),h=o?i.set(u,void 0):i.set(u,[e,r]);return t.__ownerID?(t.size=a.size,t._map=a,t._list=h,t.__hash=void 0,t):Ze(a,h)}function er(t){return!(!t||!t[Xn])}function rr(t,e,r,n){var i=Object.create(Fn);return i.size=t,i._head=e,i.__ownerID=r,i.__hash=n,i.__altered=!1,i}function nr(){return Gn||(Gn=rr(0))}function ir(t){return!(!t||!t[Zn])}function ur(t,e){return t.__ownerID?(t.size=e.size,t._map=e,t):e===t._map?t:0===e.size?t.__empty():t.__make(e)}function sr(t,e){var r=Object.create($n);return r.size=t?t.size:0,r._map=t,r.__ownerID=e,r}function or(){return ti||(ti=sr(re()))}function ar(t){return!(!t||!t[ri])}function hr(t,e){var r=Object.create(ni);return r.size=t?t.size:0,r._map=t,r.__ownerID=e,r}function cr(){return ii||(ii=hr($e()))}function fr(t,e,r){var n=Object.create(Object.getPrototypeOf(t));return n._map=e,n.__ownerID=r,n}function _r(t){return t._name||t.constructor.name}var lr=Object,vr={};vr.createClass=t,vr.superCall=e,vr.defaultSuperCall=r;var pr="delete",dr=5,yr=1<<dr,mr=yr-1,gr={},wr={value:!1},Sr={value:!1},zr=function(){try{return Object.defineProperty({},"x",{}),!0}catch(t){return!1}}(),Ir="function"==typeof WeakMap&&new WeakMap,br=2147483647,qr=0,xr="__immutablehash__";"function"==typeof Symbol&&(xr=Symbol(xr));var Mr=16,Er=255,kr=0,Or={},Dr=0,Ar=1,Cr=2,jr="@@iterator",Rr="function"==typeof Symbol&&Symbol.iterator,Ur=Rr||jr,Kr=function(t){this.next=t};vr.createClass(Kr,{toString:function(){return"[Iterator]"}},{}),Kr.KEYS=Dr,Kr.VALUES=Ar,Kr.ENTRIES=Cr;var Lr=Kr.prototype;Lr.inspect=Lr.toSource=function(){return""+this},Lr[Ur]=function(){return this};var Tr=function(t){return E(t)?t:Fr(t)},Wr=Tr;vr.createClass(Tr,{toArray:function(){h(this.size);var t=Array(this.size||0);return this.valueSeq().__iterate(function(e,r){t[r]=e}),t},toIndexedSeq:function(){return new Cn(this)},toJS:function(){return this.toSeq().map(function(t){return t&&"function"==typeof t.toJS?t.toJS():t
<del>}).__toJS()},toKeyedSeq:function(){return new An(this,!0)},toMap:function(){return h(this.size),yn(this.toKeyedSeq())},toObject:function(){h(this.size);var t={};return this.__iterate(function(e,r){t[r]=e}),t},toOrderedMap:function(){return h(this.size),Jn(this.toKeyedSeq())},toOrderedSet:function(){return h(this.size),ei(k(this)?this.valueSeq():this)},toSet:function(){return h(this.size),Hn(k(this)?this.valueSeq():this)},toSetSeq:function(){return new jn(this)},toSeq:function(){return O(this)?this.toIndexedSeq():k(this)?this.toKeyedSeq():this.toSetSeq()},toStack:function(){return h(this.size),Yn(k(this)?this.valueSeq():this)},toList:function(){return h(this.size),Un(k(this)?this.valueSeq():this)},toString:function(){return"[Iterable]"},__toString:function(t,e){return 0===this.size?t+e:t+" "+this.toSeq().map(this.__toStringMapper).join(", ")+" "+e},concat:function(){for(var t=[],e=0;arguments.length>e;e++)t[e]=arguments[e];return Ce(this,Me(this,t))},contains:function(t){return this.some(function(e){return n(e,t)})},entries:function(){return this.__iterator(Cr)},every:function(t,e){var r=!0;return this.__iterate(function(n,i,u){return t.call(e,n,i,u)?void 0:(r=!1,!1)}),r},filter:function(t,e){return Ce(this,we(this,t,e,!0))},find:function(t,e,r){var n=r;return this.__iterate(function(r,i,u){return t.call(e,r,i,u)?(n=r,!1):void 0}),n},forEach:function(t,e){return this.__iterate(e?t.bind(e):t)},join:function(t){t=void 0!==t?""+t:",";var e="",r=!0;return this.__iterate(function(n){r?r=!1:e+=t,e+=null!==n&&void 0!==n?n:""}),e},keys:function(){return this.__iterator(Dr)},map:function(t,e){return Ce(this,me(this,t,e))},reduce:function(t,e,r){var n,i;return 2>arguments.length?i=!0:n=e,this.__iterate(function(e,u,s){i?(i=!1,n=e):n=t.call(r,n,e,u,s)}),n},reduceRight:function(){var t=this.toKeyedSeq().reverse();return t.reduce.apply(t,arguments)},reverse:function(){return Ce(this,ge(this,!0))},slice:function(t,e){if(l(t,e,this.size))return this;var r=v(t,this.size),n=p(e,this.size);if(r!==r||n!==n)return this.toSeq().cacheResult().slice(t,e);
<del>var i=0===r?this:this.skip(r);return Ce(this,void 0===n||n===this.size?i:i.take(n-r))},some:function(t,e){return!this.every(j(t),e)},sort:function(t){return Ce(this,De(this,t||K))},values:function(){return this.__iterator(Ar)},butLast:function(){return this.slice(0,-1)},count:function(t,e){return c(t?this.toSeq().filter(t,e):this)},countBy:function(t,e){return Se(this,t,e)},equals:function(t){if(this===t)return!0;if(!t||"function"!=typeof t.equals)return!1;if(void 0!==this.size&&void 0!==t.size){if(this.size!==t.size)return!1;if(0===this.size&&0===t.size)return!0}return void 0!==this.__hash&&void 0!==t.__hash&&this.__hash!==t.__hash?!1:this.__deepEquals(t)},__deepEquals:function(t){var e=this.entries();return"function"==typeof t.every&&t.every(function(t,r){var i=e.next().value;return i&&n(i[0],r)&&n(i[1],t)})&&e.next().done},entrySeq:function(){var t=this;if(t._cache)return new un(t._cache);var e=t.toSeq().map(C).toIndexedSeq();return e.fromEntrySeq=function(){return t.toSeq()},e},filterNot:function(t,e){return this.filter(j(t),e)},findLast:function(t,e,r){return this.toKeyedSeq().reverse().find(t,e,r)},first:function(){return this.find(_)},flatMap:function(t,e){return Ce(this,ke(this,t,e))},flatten:function(t){return Ce(this,Ee(this,t,!0))},fromEntrySeq:function(){return new Rn(this)},get:function(t,e){return this.find(function(e,r){return n(r,t)},void 0,e)},getIn:function(t,e){var r=this;if(t)for(var n=0;t.length>n;n++)if(r=r&&r.get?r.get(t[n],gr):gr,r===gr)return e;return r},groupBy:function(t,e){return ze(this,t,e)},has:function(t){return this.get(t,gr)!==gr},isSubset:function(t){return t="function"==typeof t.contains?t:Wr(t),this.every(function(e){return t.contains(e)})},isSuperset:function(t){return t.isSubset(this)},keySeq:function(){return this.toSeq().map(A).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},max:function(t){return Ae(this,t||K)},maxBy:function(t,e){return Ae(this,e||K,t)},min:function(t){return Ae(this,R(t||K))},minBy:function(t,e){return Ae(this,R(e||K),t)},rest:function(){return this.slice(1)
<del>},skip:function(t){return Ce(this,qe(this,t,!0))},skipLast:function(t){return Ce(this,this.toSeq().reverse().skip(t).reverse())},skipWhile:function(t,e){return Ce(this,xe(this,t,e,!0))},skipUntil:function(t,e){return this.skipWhile(j(t),e)},sortBy:function(t,e){return Ce(this,De(this,e||K,t))},take:function(t){return Ce(this,Ie(this,t))},takeLast:function(t){return Ce(this,this.toSeq().reverse().take(t).reverse())},takeWhile:function(t,e){return Ce(this,be(this,t,e))},takeUntil:function(t,e){return this.takeWhile(j(t),e)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=1/0===this.size?0:this.reduce(function(t,e,r){return t+(y(e)^(e===r?0:y(r)))&br},0))}},{});var Br="",Pr="",Jr="",Vr=Tr.prototype;Vr[Br]=!0,Vr[Ur]=Vr.values,Vr.toJSON=Vr.toJS,Vr.__toJS=Vr.toArray,Vr.__toStringMapper=U,Vr.inspect=Vr.toSource=function(){return""+this},Vr.chain=Vr.flatMap,function(){try{Object.defineProperty(Vr,"length",{get:function(){if(!Tr.noLengthWarning){var t;try{throw Error()}catch(e){t=e.stack}if(-1===t.indexOf("_wrapObject"))return console&&console.warn&&console.warn("iterable.length has been deprecated, use iterable.size or iterable.count(). This warning will become a silent error in a future version. "+t),this.size}}})}catch(t){}}();var Nr=function(t){return k(t)?t:Hr(t)};vr.createClass(Nr,{flip:function(){return Ce(this,ye(this))},findKey:function(t,e){var r;return this.__iterate(function(n,i,u){return t.call(e,n,i,u)?(r=i,!1):void 0}),r},findLastKey:function(t,e){return this.toSeq().reverse().findKey(t,e)},keyOf:function(t){return this.findKey(function(e){return n(e,t)})},lastKeyOf:function(t){return this.toSeq().reverse().keyOf(t)},mapEntries:function(t,e){var r=this,n=0;return Ce(this,this.toSeq().map(function(i,u){return t.call(e,[u,i],n++,r)}).fromEntrySeq())},mapKeys:function(t,e){var r=this;return Ce(this,this.toSeq().flip().map(function(n,i){return t.call(e,n,i,r)}).flip())}},{},Tr);var Yr=Nr.prototype;
<del>Yr[Pr]=!0,Yr[Ur]=Vr.entries,Yr.__toJS=Vr.toObject,Yr.__toStringMapper=function(t,e){return e+": "+U(t)};var Qr=function(t){return O(t)?t:$r(t)};vr.createClass(Qr,{toKeyedSeq:function(){return new An(this,!1)},filter:function(t,e){return Ce(this,we(this,t,e,!1))},findIndex:function(t,e){var r=this.toKeyedSeq().findKey(t,e);return void 0===r?-1:r},indexOf:function(t){var e=this.toKeyedSeq().keyOf(t);return void 0===e?-1:e},lastIndexOf:function(t){var e=this.toKeyedSeq().lastKeyOf(t);return void 0===e?-1:e},reverse:function(){return Ce(this,ge(this,!1))},splice:function(t,e){var r=arguments.length;if(e=Math.max(0|e,0),0===r||2===r&&!e)return this;t=v(t,this.size);var n=this.slice(0,t);return Ce(this,1===r?n:n.concat(a(arguments,2),this.slice(t+e)))},findLastIndex:function(t,e){var r=this.toKeyedSeq().findLastKey(t,e);return void 0===r?-1:r},first:function(){return this.get(0)},flatten:function(t){return Ce(this,Ee(this,t,!1))},get:function(t,e){return t=f(this,t),0>t||1/0===this.size||void 0!==this.size&&t>this.size?e:this.find(function(e,r){return r===t},void 0,e)},has:function(t){return t=f(this,t),t>=0&&(void 0!==this.size?1/0===this.size||this.size>t:-1!==this.indexOf(t))},interpose:function(t){return Ce(this,Oe(this,t))},last:function(){return this.get(-1)},skip:function(t){var e=this,r=qe(e,t,!1);return T(e)&&r!==e&&(r.get=function(r,n){return r=f(this,r),r>=0?e.get(r+t,n):n}),Ce(this,r)},skipWhile:function(t,e){return Ce(this,xe(this,t,e,!1))},take:function(t){var e=this,r=Ie(e,t);return T(e)&&r!==e&&(r.get=function(r,n){return r=f(this,r),r>=0&&t>r?e.get(r,n):n}),Ce(this,r)}},{},Tr),Qr.prototype[Jr]=!0;var Xr=function(t){return E(t)&&!D(t)?t:en(t)};vr.createClass(Xr,{get:function(t,e){return this.has(t)?t:e},contains:function(t){return this.has(t)},keySeq:function(){return this.valueSeq()}},{},Tr),Xr.prototype.has=Vr.contains,Tr.isIterable=E,Tr.isKeyed=k,Tr.isIndexed=O,Tr.isAssociative=D,Tr.Keyed=Nr,Tr.Indexed=Qr,Tr.Set=Xr,Tr.Iterator=Kr;var Fr=function(t){return null===t||void 0===t?W():E(t)?t.toSeq():J(t)},Gr=Fr;
<del>vr.createClass(Fr,{toSeq:function(){return this},toString:function(){return this.__toString("Seq {","}")},cacheResult:function(){return!this._cache&&this.__iterateUncached&&(this._cache=this.entrySeq().toArray(),this.size=this._cache.length),this},__iterate:function(t,e){return Y(this,t,e,!0)},__iterator:function(t,e){return Q(this,t,e,!0)}},{of:function(){return Gr(arguments)}},Tr);var Hr=function(t){return null===t||void 0===t?W().toKeyedSeq():E(t)?k(t)?t.toSeq():t.fromEntrySeq():B(t)},Zr=Hr;vr.createClass(Hr,{toKeyedSeq:function(){return this},toSeq:function(){return this}},{of:function(){return Zr(arguments)}},Fr),L(Hr,Nr.prototype);var $r=function(t){return null===t||void 0===t?W():E(t)?k(t)?t.entrySeq():t.toIndexedSeq():P(t)},tn=$r;vr.createClass($r,{toIndexedSeq:function(){return this},toString:function(){return this.__toString("Seq [","]")},__iterate:function(t,e){return Y(this,t,e,!1)},__iterator:function(t,e){return Q(this,t,e,!1)}},{of:function(){return tn(arguments)}},Fr),L($r,Qr.prototype);var en=function(t){return(null===t||void 0===t?W():E(t)?k(t)?t.entrySeq():t:P(t)).toSetSeq()},rn=en;vr.createClass(en,{toSetSeq:function(){return this}},{of:function(){return rn(arguments)}},Fr),L(en,Xr.prototype),Fr.isSeq=T,Fr.Keyed=Hr,Fr.Set=en,Fr.Indexed=$r;var nn="";Fr.prototype[nn]=!0;var un=function(t){this._array=t,this.size=t.length};vr.createClass(un,{get:function(t,e){return this.has(t)?this._array[f(this,t)]:e},__iterate:function(t,e){for(var r=this._array,n=r.length-1,i=0;n>=i;i++)if(t(r[e?n-i:i],i,this)===!1)return i+1;return i},__iterator:function(t,e){var r=this._array,n=r.length-1,i=0;return new Kr(function(){return i>n?I():z(t,i,r[e?n-i++:i++])})}},{},$r);var sn=function(t){var e=Object.keys(t);this._object=t,this._keys=e,this.size=e.length};vr.createClass(sn,{get:function(t,e){return void 0===e||this.has(t)?this._object[t]:e},has:function(t){return this._object.hasOwnProperty(t)},__iterate:function(t,e){for(var r=this._object,n=this._keys,i=n.length-1,u=0;i>=u;u++){var s=n[e?i-u:u];
<del>if(t(r[s],s,this)===!1)return u+1}return u},__iterator:function(t,e){var r=this._object,n=this._keys,i=n.length-1,u=0;return new Kr(function(){var s=n[e?i-u:u];return u++>i?I():z(t,s,r[s])})}},{},Hr);var on=function(t){this._iterable=t,this.size=t.length||t.size};vr.createClass(on,{__iterateUncached:function(t,e){if(e)return this.cacheResult().__iterate(t,e);var r=this._iterable,n=x(r),i=0;if(q(n))for(var u;!(u=n.next()).done&&t(u.value,i++,this)!==!1;);return i},__iteratorUncached:function(t,e){if(e)return this.cacheResult().__iterator(t,e);var r=this._iterable,n=x(r);if(!q(n))return new Kr(I);var i=0;return new Kr(function(){var e=n.next();return e.done?e:z(t,i++,e.value)})}},{},$r);var an=function(t){this._iterator=t,this._iteratorCache=[]};vr.createClass(an,{__iterateUncached:function(t,e){if(e)return this.cacheResult().__iterate(t,e);for(var r=this._iterator,n=this._iteratorCache,i=0;n.length>i;)if(t(n[i],i++,this)===!1)return i;for(var u;!(u=r.next()).done;){var s=u.value;if(n[i]=s,t(s,i++,this)===!1)break}return i},__iteratorUncached:function(t,e){if(e)return this.cacheResult().__iterator(t,e);var r=this._iterator,n=this._iteratorCache,i=0;return new Kr(function(){if(i>=n.length){var e=r.next();if(e.done)return e;n[i]=e.value}return z(t,i,n[i++])})}},{},$r);var hn,cn=function(){throw TypeError("Abstract")};vr.createClass(cn,{},{},Tr);var fn=function(){vr.defaultSuperCall(this,_n.prototype,arguments)},_n=fn;vr.createClass(fn,{},{},cn),L(fn,Nr.prototype);var ln=function(){vr.defaultSuperCall(this,vn.prototype,arguments)},vn=ln;vr.createClass(ln,{},{},cn),L(ln,Qr.prototype);var pn=function(){vr.defaultSuperCall(this,dn.prototype,arguments)},dn=pn;vr.createClass(pn,{},{},cn),L(pn,Xr.prototype),cn.Keyed=fn,cn.Indexed=ln,cn.Set=pn;var yn=function(t){return null===t||void 0===t?re():Z(t)?t:re().merge(Nr(t))};vr.createClass(yn,{toString:function(){return this.__toString("Map {","}")},get:function(t,e){return this._root?this._root.get(0,y(t),t,e):e},set:function(t,e){return ne(this,t,e)},setIn:function(t,e){return i(t.length>0,"Requires non-empty key path."),this.updateIn(t,function(){return e
<del>})},remove:function(t){return ne(this,t,gr)},removeIn:function(t){return i(t.length>0,"Requires non-empty key path."),this.updateIn(t,function(){return gr})},update:function(t,e,r){return 1===arguments.length?t(this):this.updateIn([t],e,r)},updateIn:function(t,e,r){return r||(r=e,e=void 0),0===t.length?r(this):_e(this,t,e,r,0)},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._root=null,this.__hash=void 0,this.__altered=!0,this):re()},merge:function(){return he(this,void 0,arguments)},mergeWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return he(this,t,e)},mergeDeep:function(){return he(this,ce(void 0),arguments)},mergeDeepWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return he(this,ce(t),e)},withMutations:function(t){var e=this.asMutable();return t(e),e.wasAltered()?e.__ensureOwner(this.__ownerID):this},asMutable:function(){return this.__ownerID?this:this.__ensureOwner(new o)},asImmutable:function(){return this.__ensureOwner()},wasAltered:function(){return this.__altered},__iterator:function(t,e){return new En(this,t,e)},__iterate:function(t,e){var r=this,n=0;return this._root&&this._root.iterate(function(e){return n++,t(e[1],e[0],r)},e),n},__ensureOwner:function(t){return t===this.__ownerID?this:t?ee(this.size,this._root,t,this.__hash):(this.__ownerID=t,this.__altered=!1,this)}},{},fn),yn.isMap=Z;var mn="",gn=yn.prototype;gn[mn]=!0,gn[pr]=gn.remove;var wn=function(t,e,r){this.ownerID=t,this.bitmap=e,this.nodes=r},Sn=wn;vr.createClass(wn,{get:function(t,e,r,n){var i=1<<((0===t?e:e>>>t)&mr),u=this.bitmap;return 0===(u&i)?n:this.nodes[le(u&i-1)].get(t+dr,e,r,n)},update:function(t,e,r,n,i,u,s){var o=(0===e?r:r>>>e)&mr,a=1<<o,h=this.bitmap,c=0!==(h&a);if(!c&&i===gr)return this;var f=le(h&a-1),_=this.nodes,l=c?_[f]:void 0,v=ie(l,t,e+dr,r,n,i,u,s);if(v===l)return this;if(!c&&v&&_.length>=On)return ae(t,_,h,o,v);if(c&&!v&&2===_.length&&ue(_[1^f]))return _[1^f];if(c&&v&&1===_.length&&ue(v))return v;var p=t&&t===this.ownerID,d=c?v?h:h^a:h|a,y=c?v?ve(_,f,v,p):de(_,f,p):pe(_,f,v,p);
<del>return p?(this.bitmap=d,this.nodes=y,this):new Sn(t,d,y)},iterate:function(t,e){for(var r=this.nodes,n=0,i=r.length-1;i>=n;n++)if(r[e?i-n:n].iterate(t,e)===!1)return!1}},{});var zn=function(t,e,r){this.ownerID=t,this.count=e,this.nodes=r},In=zn;vr.createClass(zn,{get:function(t,e,r,n){var i=(0===t?e:e>>>t)&mr,u=this.nodes[i];return u?u.get(t+dr,e,r,n):n},update:function(t,e,r,n,i,u,s){var o=(0===e?r:r>>>e)&mr,a=i===gr,h=this.nodes,c=h[o];if(a&&!c)return this;var f=ie(c,t,e+dr,r,n,i,u,s);if(f===c)return this;var _=this.count;if(c){if(!f&&(_--,Dn>_))return oe(t,h,_,o)}else _++;var l=t&&t===this.ownerID,v=ve(h,o,f,l);return l?(this.count=_,this.nodes=v,this):new In(t,_,v)},iterate:function(t,e){for(var r=this.nodes,n=0,i=r.length-1;i>=n;n++){var u=r[e?i-n:n];if(u&&u.iterate(t,e)===!1)return!1}}},{});var bn=function(t,e,r){this.ownerID=t,this.hash=e,this.entries=r},qn=bn;vr.createClass(bn,{get:function(t,e,r,i){for(var u=this.entries,s=0,o=u.length;o>s;s++)if(n(r,u[s][0]))return u[s][1];return i},update:function(t,e,r,i,u,o,h){var c=u===gr;if(r!==this.hash)return c?this:(s(h),s(o),se(this,t,e,r,[i,u]));for(var f=this.entries,_=0,l=f.length;l>_&&!n(i,f[_][0]);_++);var v=l>_;if(c&&!v)return this;if(s(h),(c||!v)&&s(o),c&&2===l)return new xn(t,this.hash,f[1^_]);var p=t&&t===this.ownerID,d=p?f:a(f);return v?c?_===l-1?d.pop():d[_]=d.pop():d[_]=[i,u]:d.push([i,u]),p?(this.entries=d,this):new qn(t,this.hash,d)},iterate:function(t,e){for(var r=this.entries,n=0,i=r.length-1;i>=n;n++)if(t(r[e?i-n:n])===!1)return!1}},{});var xn=function(t,e,r){this.ownerID=t,this.hash=e,this.entry=r},Mn=xn;vr.createClass(xn,{get:function(t,e,r,i){return n(r,this.entry[0])?this.entry[1]:i},update:function(t,e,r,i,u,o,a){var h=u===gr,c=n(i,this.entry[0]);return(c?u===this.entry[1]:h)?this:(s(a),h?void s(o):c?t&&t===this.ownerID?(this.entry[1]=u,this):new Mn(t,r,[i,u]):(s(o),se(this,t,e,r,[i,u])))},iterate:function(t){return t(this.entry)}},{});var En=function(t,e,r){this._type=e,this._reverse=r,this._stack=t._root&&te(t._root)};vr.createClass(En,{next:function(){for(var t=this._type,e=this._stack;e;){var r,n=e.node,i=e.index++;
<del>if(n.entry){if(0===i)return $(t,n.entry)}else if(n.entries){if(r=n.entries.length-1,r>=i)return $(t,n.entries[this._reverse?r-i:i])}else if(r=n.nodes.length-1,r>=i){var u=n.nodes[this._reverse?r-i:i];if(u){if(u.entry)return $(t,u.entry);e=this._stack=te(u,e)}continue}e=this._stack=this._stack.__prev}return I()}},{},Kr);var kn,On=yr/2,Dn=yr/4,An=function(t,e){this._iter=t,this._useKeys=e,this.size=t.size};vr.createClass(An,{get:function(t,e){return this._iter.get(t,e)},has:function(t){return this._iter.has(t)},valueSeq:function(){return this._iter.valueSeq()},reverse:function(){var t=this,e=ge(this,!0);return this._useKeys||(e.valueSeq=function(){return t._iter.toSeq().reverse()}),e},map:function(t,e){var r=this,n=me(this,t,e);return this._useKeys||(n.valueSeq=function(){return r._iter.toSeq().map(t,e)}),n},__iterate:function(t,e){var r,n=this;return this._iter.__iterate(this._useKeys?function(e,r){return t(e,r,n)}:(r=e?Re(this):0,function(i){return t(i,e?--r:r++,n)}),e)},__iterator:function(t,e){if(this._useKeys)return this._iter.__iterator(t,e);var r=this._iter.__iterator(Ar,e),n=e?Re(this):0;return new Kr(function(){var i=r.next();return i.done?i:z(t,e?--n:n++,i.value,i)})}},{},Hr);var Cn=function(t){this._iter=t,this.size=t.size};vr.createClass(Cn,{contains:function(t){return this._iter.contains(t)},__iterate:function(t,e){var r=this,n=0;return this._iter.__iterate(function(e){return t(e,n++,r)},e)},__iterator:function(t,e){var r=this._iter.__iterator(Ar,e),n=0;return new Kr(function(){var e=r.next();return e.done?e:z(t,n++,e.value,e)})}},{},$r);var jn=function(t){this._iter=t,this.size=t.size};vr.createClass(jn,{has:function(t){return this._iter.contains(t)},__iterate:function(t,e){var r=this;return this._iter.__iterate(function(e){return t(e,e,r)},e)},__iterator:function(t,e){var r=this._iter.__iterator(Ar,e);return new Kr(function(){var e=r.next();return e.done?e:z(t,e.value,e.value,e)})}},{},en);var Rn=function(t){this._iter=t,this.size=t.size};vr.createClass(Rn,{entrySeq:function(){return this._iter.toSeq()
<del>},__iterate:function(t,e){var r=this;return this._iter.__iterate(function(e){return e?(je(e),t(e[1],e[0],r)):void 0},e)},__iterator:function(t,e){var r=this._iter.__iterator(Ar,e);return new Kr(function(){for(;;){var e=r.next();if(e.done)return e;var n=e.value;if(n)return je(n),t===Cr?e:z(t,n[0],n[1],e)}})}},{},Hr),Cn.prototype.cacheResult=An.prototype.cacheResult=jn.prototype.cacheResult=Rn.prototype.cacheResult=Le;var Un=function(t){var e=Je();if(null===t||void 0===t)return e;if(Te(t))return t;t=Qr(t);var r=t.size;return 0===r?e:r>0&&yr>r?Pe(0,r,dr,null,new Tn(t.toArray())):e.merge(t)};vr.createClass(Un,{toString:function(){return this.__toString("List [","]")},get:function(t,e){if(t=f(this,t),0>t||t>=this.size)return e;t+=this._origin;var r=Qe(this,t);return r&&r.array[t&mr]},set:function(t,e){return Ve(this,t,e)},remove:function(t){return this.has(t)?0===t?this.shift():t===this.size-1?this.pop():this.splice(t,1):this},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=this._origin=this._capacity=0,this._level=dr,this._root=this._tail=null,this.__hash=void 0,this.__altered=!0,this):Je()},push:function(){var t=arguments,e=this.size;return this.withMutations(function(r){Xe(r,0,e+t.length);for(var n=0;t.length>n;n++)r.set(e+n,t[n])})},pop:function(){return Xe(this,0,-1)},unshift:function(){var t=arguments;return this.withMutations(function(e){Xe(e,-t.length);for(var r=0;t.length>r;r++)e.set(r,t[r])})},shift:function(){return Xe(this,1)},merge:function(){return Fe(this,void 0,arguments)},mergeWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return Fe(this,t,e)},mergeDeep:function(){return Fe(this,ce(void 0),arguments)},mergeDeepWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return Fe(this,ce(t),e)},setSize:function(t){return Xe(this,0,t)},slice:function(t,e){var r=this.size;return l(t,e,r)?this:Xe(this,v(t,r),p(e,r))},__iterator:function(t,e){return new Bn(this,t,e)},__iterate:function(t,e){var r=this,n=0,i=function(e){return t(e,n++,r)},u=Ge(this._capacity);
<del>return e?We(this._tail,0,u-this._origin,this._capacity-this._origin,i,e)&&We(this._root,this._level,-this._origin,u-this._origin,i,e):We(this._root,this._level,-this._origin,u-this._origin,i,e)&&We(this._tail,0,u-this._origin,this._capacity-this._origin,i,e),n},__ensureOwner:function(t){return t===this.__ownerID?this:t?Pe(this._origin,this._capacity,this._level,this._root,this._tail,t,this.__hash):(this.__ownerID=t,this)}},{of:function(){return this(arguments)}},ln),Un.isList=Te;var Kn="",Ln=Un.prototype;Ln[Kn]=!0,Ln[pr]=Ln.remove,Ln.setIn=gn.setIn,Ln.removeIn=gn.removeIn,Ln.update=gn.update,Ln.updateIn=gn.updateIn,Ln.withMutations=gn.withMutations,Ln.asMutable=gn.asMutable,Ln.asImmutable=gn.asImmutable,Ln.wasAltered=gn.wasAltered;var Tn=function(t,e){this.array=t,this.ownerID=e},Wn=Tn;vr.createClass(Tn,{removeBefore:function(t,e,r){if(r===e?1<<e:0||0===this.array.length)return this;var n=r>>>e&mr;if(n>=this.array.length)return new Wn([],t);var i,u=0===n;if(e>0){var s=this.array[n];if(i=s&&s.removeBefore(t,e-dr,r),i===s&&u)return this}if(u&&!i)return this;var o=Ye(this,t);if(!u)for(var a=0;n>a;a++)o.array[a]=void 0;return i&&(o.array[n]=i),o},removeAfter:function(t,e,r){if(r===e?1<<e:0||0===this.array.length)return this;var n=r-1>>>e&mr;if(n>=this.array.length)return this;var i,u=n===this.array.length-1;if(e>0){var s=this.array[n];if(i=s&&s.removeAfter(t,e-dr,r),i===s&&u)return this}if(u&&!i)return this;var o=Ye(this,t);return u||o.array.pop(),i&&(o.array[n]=i),o}},{});var Bn=function(t,e,r){this._type=e,this._reverse=!!r,this._maxIndex=t.size-1;var n=Ge(t._capacity),i=Be(t._root&&t._root.array,t._level,-t._origin,n-t._origin-1),u=Be(t._tail&&t._tail.array,0,n-t._origin,t._capacity-t._origin-1);this._stack=r?u:i,this._stack.__prev=r?i:u};vr.createClass(Bn,{next:function(){for(var t=this._stack;t;){var e=t.array,r=t.index++;if(this._reverse&&(r=mr-r,r>t.rawMax&&(r=t.rawMax,t.index=yr-r)),r>=0&&yr>r&&t.rawMax>=r){var n=e&&e[r];if(0===t.level){var i,u=this._type;return 1!==u&&(i=t.offset+(r<<t.level),this._reverse&&(i=this._maxIndex-i)),z(u,i,n)
<del>}this._stack=t=Be(n&&n.array,t.level-dr,t.offset+(r<<t.level),t.max,t)}else t=this._stack=this._stack.__prev}return I()}},{},Kr);var Pn,Jn=function(t){return null===t||void 0===t?$e():He(t)?t:$e().merge(Nr(t))};vr.createClass(Jn,{toString:function(){return this.__toString("OrderedMap {","}")},get:function(t,e){var r=this._map.get(t);return void 0!==r?this._list.get(r)[1]:e},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._map.clear(),this._list.clear(),this):$e()},set:function(t,e){return tr(this,t,e)},remove:function(t){return tr(this,t,gr)},wasAltered:function(){return this._map.wasAltered()||this._list.wasAltered()},__iterate:function(t,e){var r=this;return this._list.__iterate(function(e){return e&&t(e[1],e[0],r)},e)},__iterator:function(t,e){return this._list.fromEntrySeq().__iterator(t,e)},__ensureOwner:function(t){if(t===this.__ownerID)return this;var e=this._map.__ensureOwner(t),r=this._list.__ensureOwner(t);return t?Ze(e,r,t,this.__hash):(this.__ownerID=t,this._map=e,this._list=r,this)}},{of:function(){return this(arguments)}},yn),Jn.isOrderedMap=He;var Vn="";Jn.prototype[Vn]=!0,Jn.prototype[pr]=Jn.prototype.remove;var Nn,Yn=function(t){return null===t||void 0===t?nr():er(t)?t:nr().unshiftAll(t)},Qn=Yn;vr.createClass(Yn,{toString:function(){return this.__toString("Stack [","]")},get:function(t,e){for(var r=this._head;r&&t--;)r=r.next;return r?r.value:e},peek:function(){return this._head&&this._head.value},push:function(){if(0===arguments.length)return this;for(var t=this.size+arguments.length,e=this._head,r=arguments.length-1;r>=0;r--)e={value:arguments[r],next:e};return this.__ownerID?(this.size=t,this._head=e,this.__hash=void 0,this.__altered=!0,this):rr(t,e)},pushAll:function(t){if(t=Qr(t),0===t.size)return this;var e=this.size,r=this._head;return t.reverse().forEach(function(t){e++,r={value:t,next:r}}),this.__ownerID?(this.size=e,this._head=r,this.__hash=void 0,this.__altered=!0,this):rr(e,r)},pop:function(){return this.slice(1)},unshift:function(){return this.push.apply(this,arguments)
<del>},unshiftAll:function(t){return this.pushAll(t)},shift:function(){return this.pop.apply(this,arguments)},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._head=void 0,this.__hash=void 0,this.__altered=!0,this):nr()},slice:function(t,e){if(l(t,e,this.size))return this;var r=v(t,this.size),n=p(e,this.size);if(n!==this.size)return vr.superCall(this,Qn.prototype,"slice",[t,e]);for(var i=this.size-r,u=this._head;r--;)u=u.next;return this.__ownerID?(this.size=i,this._head=u,this.__hash=void 0,this.__altered=!0,this):rr(i,u)},__ensureOwner:function(t){return t===this.__ownerID?this:t?rr(this.size,this._head,t,this.__hash):(this.__ownerID=t,this.__altered=!1,this)},__iterate:function(t,e){if(e)return this.toSeq().cacheResult.__iterate(t,e);for(var r=0,n=this._head;n&&t(n.value,r++,this)!==!1;)n=n.next;return r},__iterator:function(t,e){if(e)return this.toSeq().cacheResult().__iterator(t,e);var r=0,n=this._head;return new Kr(function(){if(n){var e=n.value;return n=n.next,z(t,r++,e)}return I()})}},{of:function(){return this(arguments)}},ln),Yn.isStack=er;var Xn="",Fn=Yn.prototype;Fn[Xn]=!0,Fn.withMutations=gn.withMutations,Fn.asMutable=gn.asMutable,Fn.asImmutable=gn.asImmutable,Fn.wasAltered=gn.wasAltered;var Gn,Hn=function(t){return null===t||void 0===t?or():ir(t)?t:or().union(Xr(t))};vr.createClass(Hn,{toString:function(){return this.__toString("Set {","}")},has:function(t){return this._map.has(t)},add:function(t){return ur(this,this._map.set(t,!0))},remove:function(t){return ur(this,this._map.remove(t))},clear:function(){return ur(this,this._map.clear())},union:function(){var t=arguments;return 0===t.length?this:this.withMutations(function(e){for(var r=0;t.length>r;r++)Xr(t[r]).forEach(function(t){return e.add(t)})})},intersect:function(){for(var t=[],e=0;arguments.length>e;e++)t[e]=arguments[e];if(0===t.length)return this;t=t.map(function(t){return Xr(t)});var r=this;return this.withMutations(function(e){r.forEach(function(r){t.every(function(t){return t.contains(r)})||e.remove(r)
<del>})})},subtract:function(){for(var t=[],e=0;arguments.length>e;e++)t[e]=arguments[e];if(0===t.length)return this;t=t.map(function(t){return Xr(t)});var r=this;return this.withMutations(function(e){r.forEach(function(r){t.some(function(t){return t.contains(r)})&&e.remove(r)})})},merge:function(){return this.union.apply(this,arguments)},mergeWith:function(){for(var t=[],e=1;arguments.length>e;e++)t[e-1]=arguments[e];return this.union.apply(this,t)},wasAltered:function(){return this._map.wasAltered()},__iterate:function(t,e){var r=this;return this._map.__iterate(function(e,n){return t(n,n,r)},e)},__iterator:function(t,e){return this._map.map(function(t,e){return e}).__iterator(t,e)},__ensureOwner:function(t){if(t===this.__ownerID)return this;var e=this._map.__ensureOwner(t);return t?this.__make(e,t):(this.__ownerID=t,this._map=e,this)}},{of:function(){return this(arguments)},fromKeys:function(t){return this(Nr(t).keySeq())}},pn),Hn.isSet=ir;var Zn="",$n=Hn.prototype;$n[Zn]=!0,$n[pr]=$n.remove,$n.mergeDeep=$n.merge,$n.mergeDeepWith=$n.mergeWith,$n.withMutations=gn.withMutations,$n.asMutable=gn.asMutable,$n.asImmutable=gn.asImmutable,$n.__empty=or,$n.__make=sr;var ti,ei=function(t){return null===t||void 0===t?cr():ar(t)?t:cr().union(Xr(t))};vr.createClass(ei,{toString:function(){return this.__toString("OrderedSet {","}")}},{of:function(){return this(arguments)},fromKeys:function(t){return this(Nr(t).keySeq())}},Hn),ei.isOrderedSet=ar;var ri="",ni=ei.prototype;ni[ri]=!0,ni.__empty=cr,ni.__make=hr;var ii,ui=function(t,e){var r=function(t){return this instanceof r?void(this._map=yn(t)):new r(t)},n=Object.keys(t),u=r.prototype=Object.create(si);u.constructor=r,e&&(u._name=e),u._defaultValues=t,u._keys=n,u.size=n.length;try{n.forEach(function(t){Object.defineProperty(r.prototype,t,{get:function(){return this.get(t)},set:function(e){i(this.__ownerID,"Cannot set on an immutable record."),this.set(t,e)}})})}catch(s){}return r};vr.createClass(ui,{toString:function(){return this.__toString(_r(this)+" {","}")
<del>},has:function(t){return this._defaultValues.hasOwnProperty(t)},get:function(t,e){if(!this.has(t))return e;var r=this._defaultValues[t];return this._map?this._map.get(t,r):r},clear:function(){if(this.__ownerID)return this._map&&this._map.clear(),this;var t=Object.getPrototypeOf(this).constructor;return t._empty||(t._empty=fr(this,re()))},set:function(t,e){if(!this.has(t))throw Error('Cannot set unknown key "'+t+'" on '+_r(this));var r=this._map&&this._map.set(t,e);return this.__ownerID||r===this._map?this:fr(this,r)},remove:function(t){if(!this.has(t))return this;var e=this._map&&this._map.remove(t);return this.__ownerID||e===this._map?this:fr(this,e)},wasAltered:function(){return this._map.wasAltered()},__iterator:function(t,e){var r=this;return Nr(this._defaultValues).map(function(t,e){return r.get(e)}).__iterator(t,e)},__iterate:function(t,e){var r=this;return Nr(this._defaultValues).map(function(t,e){return r.get(e)}).__iterate(t,e)},__ensureOwner:function(t){if(t===this.__ownerID)return this;var e=this._map&&this._map.__ensureOwner(t);return t?fr(this,e,t):(this.__ownerID=t,this._map=e,this)}},{},fn);var si=ui.prototype;si[pr]=si.remove,si.merge=gn.merge,si.mergeWith=gn.mergeWith,si.mergeDeep=gn.mergeDeep,si.mergeDeepWith=gn.mergeDeepWith,si.update=gn.update,si.updateIn=gn.updateIn,si.withMutations=gn.withMutations,si.asMutable=gn.asMutable,si.asImmutable=gn.asImmutable;var oi=function(t,e,r){return this instanceof ai?(i(0!==r,"Cannot step a Range by 0"),t=t||0,void 0===e&&(e=1/0),t===e&&ci?ci:(r=void 0===r?1:Math.abs(r),t>e&&(r=-r),this._start=t,this._end=e,this._step=r,void(this.size=Math.max(0,Math.ceil((e-t)/r-1)+1)))):new ai(t,e,r)},ai=oi;vr.createClass(oi,{toString:function(){return 0===this.size?"Range []":"Range [ "+this._start+"..."+this._end+(this._step>1?" by "+this._step:"")+" ]"},get:function(t,e){return this.has(t)?this._start+f(this,t)*this._step:e},contains:function(t){var e=(t-this._start)/this._step;return e>=0&&this.size>e&&e===Math.floor(e)},slice:function(t,e){return l(t,e,this.size)?this:(t=v(t,this.size),e=p(e,this.size),t>=e?ci:new ai(this.get(t,this._end),this.get(e,this._end),this._step))
<del>},indexOf:function(t){var e=t-this._start;if(e%this._step===0){var r=e/this._step;if(r>=0&&this.size>r)return r}return-1},lastIndexOf:function(t){return this.indexOf(t)},take:function(t){return this.slice(0,Math.max(0,t))},skip:function(t){return this.slice(Math.max(0,t))},__iterate:function(t,e){for(var r=this.size-1,n=this._step,i=e?this._start+r*n:this._start,u=0;r>=u;u++){if(t(i,u,this)===!1)return u+1;i+=e?-n:n}return u},__iterator:function(t,e){var r=this.size-1,n=this._step,i=e?this._start+r*n:this._start,u=0;return new Kr(function(){var s=i;return i+=e?-n:n,u>r?I():z(t,u++,s)})},__deepEquals:function(t){return t instanceof ai?this._start===t._start&&this._end===t._end&&this._step===t._step:vr.superCall(this,ai.prototype,"__deepEquals",[t])}},{},$r);var hi=oi.prototype;hi.__toJS=hi.toArray,hi.first=Ln.first,hi.last=Ln.last;var ci=oi(0,0),fi=function(t,e){return 0>=e&&vi?vi:this instanceof _i?(this._value=t,this.size=void 0===e?1/0:Math.max(0,e),void(0===this.size&&(vi=this))):new _i(t,e)},_i=fi;vr.createClass(fi,{toString:function(){return 0===this.size?"Repeat []":"Repeat [ "+this._value+" "+this.size+" times ]"},get:function(t,e){return this.has(t)?this._value:e},contains:function(t){return n(this._value,t)},slice:function(t,e){var r=this.size;return l(t,e,r)?this:new _i(this._value,p(e,r)-v(t,r))},reverse:function(){return this},indexOf:function(t){return n(this._value,t)?0:-1},lastIndexOf:function(t){return n(this._value,t)?this.size:-1},__iterate:function(t){for(var e=0;this.size>e;e++)if(t(this._value,e,this)===!1)return e+1;return e},__iterator:function(t){var e=this,r=0;return new Kr(function(){return e.size>r?z(t,r++,e._value):I()})},__deepEquals:function(t){return t instanceof _i?n(this._value,t._value):vr.superCall(this,_i.prototype,"__deepEquals",[t])}},{},$r);var li=fi.prototype;li.last=li.first,li.has=hi.has,li.take=hi.take,li.skip=hi.skip,li.__toJS=hi.__toJS;var vi,pi={Iterable:Tr,Seq:Fr,Collection:cn,Map:yn,OrderedMap:Jn,List:Un,Stack:Yn,Set:Hn,OrderedSet:ei,Record:ui,Range:oi,Repeat:fi,is:n,fromJS:X};
<del>return pi}"object"==typeof exports?module.exports=t():"function"==typeof define&&define.amd?define(t):Immutable=t();
<ide>\ No newline at end of file
<add>return r.size=t.size&&2*t.size-1,r.__iterateUncached=function(r,n){var i=this,u=0;return t.__iterate(function(t){return(!u||r(e,u++,i)!==!1)&&r(t,u++,i)!==!1},n),u},r.__iteratorUncached=function(r,n){var i,u=t.__iterator(Ar,n),s=0;return new Kr(function(){return(!i||s%2)&&(i=u.next(),i.done)?i:s%2?z(r,s++,e):z(r,s++,i.value,i)})},r}function De(t,e,r){var n=k(t),i=0,u=t.toSeq().map(function(e,n){return[n,e,i++,r?r(e,n,t):e]}).toArray();return u.sort(function(t,r){return e(t[3],r[3])||t[2]-r[2]}).forEach(n?function(t,e){u[e].length=2}:function(t,e){u[e]=t[1]}),n?Hr(u):O(t)?$r(u):en(u)}function Ae(t,e,r){if(r){var n=t.toSeq().map(function(e,n){return[e,r(e,n,t)]}).reduce(function(t,r){return e(r[1],t[1])>0?r:t});return n&&n[0]}return t.reduce(function(t,r){return e(r,t)>0?r:t})}function Ce(t,e){return T(t)?e:t.constructor(e)}function je(t){if(t!==Object(t))throw new TypeError("Expected [K, V] tuple: "+t)}function Re(t){return h(t.size),c(t)}function Ue(t){return k(t)?Nr:O(t)?Qr:Xr}function Ke(t){return Object.create((k(t)?Hr:O(t)?$r:en).prototype)}function Le(){return this._iter.cacheResult?(this._iter.cacheResult(),this.size=this._iter.size,this):Fr.prototype.cacheResult.call(this)}function Te(t){return!(!t||!t[Kn])}function We(t,e,r,n,i,u){var s,o=t&&t.array;if(0===e){var a=0>r?-r:0,h=n-r;for(h>yr&&(h=yr),s=a;h>s;s++)if(i(o&&o[u?a+h-1-s:s])===!1)return!1}else{var c=1<<e,f=e-dr;for(s=0;mr>=s;s++){var _=u?mr-s:s,l=r+(_<<e);if(n>l&&l+c>0){var v=o&&o[_];if(!We(v,f,l,n,i,u))return!1}}}return!0}function Be(t,e,r,n,i){return{array:t,level:e,offset:r,max:n,rawMax:n-r>>e,index:0,__prev:i}}function Pe(t,e,r,n,i,u,s){var o=Object.create(Ln);return o.size=e-t,o._origin=t,o._capacity=e,o._level=r,o._root=n,o._tail=i,o.__ownerID=u,o.__hash=s,o.__altered=!1,o}function Je(){return Pn||(Pn=Pe(0,0,dr))}function Ve(t,e,r){if(e=f(t,e),e>=t.size||0>e)return t.withMutations(function(t){0>e?Xe(t,e).set(0,r):Xe(t,0,e+1).set(e,r)});e+=t._origin;var n=t._tail,i=t._root,s=u(Sr);return e>=Ge(t._capacity)?n=Ne(n,t.__ownerID,0,e,r,s):i=Ne(i,t.__ownerID,t._level,e,r,s),s.value?t.__ownerID?(t._root=i,t._tail=n,t.__hash=void 0,t.__altered=!0,t):Pe(t._origin,t._capacity,t._level,i,n):t
<add>}function Ne(t,e,r,n,i,u){var o=n>>>r&mr,a=t&&t.array.length>o;if(!a&&void 0===i)return t;var h;if(r>0){var c=t&&t.array[o],f=Ne(c,e,r-dr,n,i,u);return f===c?t:(h=Ye(t,e),h.array[o]=f,h)}return a&&t.array[o]===i?t:(s(u),h=Ye(t,e),void 0===i&&o===h.array.length-1?h.array.pop():h.array[o]=i,h)}function Ye(t,e){return e&&t&&e===t.ownerID?t:new Tn(t?t.array.slice():[],e)}function Qe(t,e){if(e>=Ge(t._capacity))return t._tail;if(1<<t._level+dr>e){for(var r=t._root,n=t._level;r&&n>0;)r=r.array[e>>>n&mr],n-=dr;return r}}function Xe(t,e,r){var n=t.__ownerID||new o,i=t._origin,u=t._capacity,s=i+e,a=void 0===r?u:0>r?u+r:i+r;if(s===i&&a===u)return t;if(s>=a)return t.clear();for(var h=t._level,c=t._root,f=0;0>s+f;)c=new Tn(c&&c.array.length?[void 0,c]:[],n),h+=dr,f+=1<<h;f&&(s+=f,i+=f,a+=f,u+=f);for(var _=Ge(u),l=Ge(a);l>=1<<h+dr;)c=new Tn(c&&c.array.length?[c]:[],n),h+=dr;var v=t._tail,p=_>l?Qe(t,a-1):l>_?new Tn([],n):v;if(v&&l>_&&u>s&&v.array.length){c=Ye(c,n);for(var d=c,y=h;y>dr;y-=dr){var m=_>>>y&mr;d=d.array[m]=Ye(d.array[m],n)}d.array[_>>>dr&mr]=v}if(u>a&&(p=p&&p.removeAfter(n,0,a)),s>=l)s-=l,a-=l,h=dr,c=null,p=p&&p.removeBefore(n,0,s);else if(s>i||_>l){for(f=0;c;){var g=s>>>h&mr;if(g!==l>>>h&mr)break;g&&(f+=(1<<h)*g),h-=dr,c=c.array[g]}c&&s>i&&(c=c.removeBefore(n,h,s-f)),c&&_>l&&(c=c.removeAfter(n,h,l-f)),f&&(s-=f,a-=f)}return t.__ownerID?(t.size=a-s,t._origin=s,t._capacity=a,t._level=h,t._root=c,t._tail=p,t.__hash=void 0,t.__altered=!0,t):Pe(s,a,h,c,p)}function Fe(t,e,r){for(var n=[],i=0,u=0;r.length>u;u++){var s=r[u],o=Qr(s);o.size>i&&(i=o.size),E(s)||(o=o.map(function(t){return X(t)})),n.push(o)}return i>t.size&&(t=t.setSize(i)),fe(t,e,n)}function Ge(t){return yr>t?0:t-1>>>dr<<dr}function He(t){return!(!t||!t[Vn])}function Ze(t,e,r,n){var i=Object.create(Jn.prototype);return i.size=t?t.size:0,i._map=t,i._list=e,i.__ownerID=r,i.__hash=n,i}function $e(){return Nn||(Nn=Ze(re(),Je()))}function tr(t,e,r){var n=t._map,i=t._list,u=n.get(e),s=void 0!==u,o=r===gr;if(!s&&o||s&&r===i.get(u)[1])return t;s||(u=i.size);var a=o?n.remove(e):s?n:n.set(e,u),h=o?i.set(u,void 0):i.set(u,[e,r]);
<add>return t.__ownerID?(t.size=a.size,t._map=a,t._list=h,t.__hash=void 0,t):Ze(a,h)}function er(t){return!(!t||!t[Xn])}function rr(t,e,r,n){var i=Object.create(Fn);return i.size=t,i._head=e,i.__ownerID=r,i.__hash=n,i.__altered=!1,i}function nr(){return Gn||(Gn=rr(0))}function ir(t){return!(!t||!t[Zn])}function ur(t,e){return t.__ownerID?(t.size=e.size,t._map=e,t):e===t._map?t:0===e.size?t.__empty():t.__make(e)}function sr(t,e){var r=Object.create($n);return r.size=t?t.size:0,r._map=t,r.__ownerID=e,r}function or(){return ti||(ti=sr(re()))}function ar(t){return!(!t||!t[ri])}function hr(t,e){var r=Object.create(ni);return r.size=t?t.size:0,r._map=t,r.__ownerID=e,r}function cr(){return ii||(ii=hr($e()))}function fr(t,e,r){var n=Object.create(Object.getPrototypeOf(t));return n._map=e,n.__ownerID=r,n}function _r(t){return t._name||t.constructor.name}var lr=Object,vr={};vr.createClass=t,vr.superCall=e,vr.defaultSuperCall=r;var pr="delete",dr=5,yr=1<<dr,mr=yr-1,gr={},wr={value:!1},Sr={value:!1},zr=function(){try{return Object.defineProperty({},"x",{}),!0}catch(t){return!1}}(),Ir="function"==typeof WeakMap&&new WeakMap,br=2147483647,qr=0,xr="__immutablehash__";"function"==typeof Symbol&&(xr=Symbol(xr));var Mr=16,Er=255,kr=0,Or={},Dr=0,Ar=1,Cr=2,jr="@@iterator",Rr="function"==typeof Symbol&&Symbol.iterator,Ur=Rr||jr,Kr=function(t){this.next=t};vr.createClass(Kr,{toString:function(){return"[Iterator]"}},{}),Kr.KEYS=Dr,Kr.VALUES=Ar,Kr.ENTRIES=Cr;var Lr=Kr.prototype;Lr.inspect=Lr.toSource=function(){return""+this},Lr[Ur]=function(){return this};var Tr=function(t){return E(t)?t:Fr(t)},Wr=Tr;vr.createClass(Tr,{toArray:function(){h(this.size);var t=Array(this.size||0);return this.valueSeq().__iterate(function(e,r){t[r]=e}),t},toIndexedSeq:function(){return new Cn(this)},toJS:function(){return this.toSeq().map(function(t){return t&&"function"==typeof t.toJS?t.toJS():t}).__toJS()},toKeyedSeq:function(){return new An(this,!0)},toMap:function(){return h(this.size),yn(this.toKeyedSeq())},toObject:function(){h(this.size);var t={};return this.__iterate(function(e,r){t[r]=e
<add>}),t},toOrderedMap:function(){return h(this.size),Jn(this.toKeyedSeq())},toOrderedSet:function(){return h(this.size),ei(k(this)?this.valueSeq():this)},toSet:function(){return h(this.size),Hn(k(this)?this.valueSeq():this)},toSetSeq:function(){return new jn(this)},toSeq:function(){return O(this)?this.toIndexedSeq():k(this)?this.toKeyedSeq():this.toSetSeq()},toStack:function(){return h(this.size),Yn(k(this)?this.valueSeq():this)},toList:function(){return h(this.size),Un(k(this)?this.valueSeq():this)},toString:function(){return"[Iterable]"},__toString:function(t,e){return 0===this.size?t+e:t+" "+this.toSeq().map(this.__toStringMapper).join(", ")+" "+e},concat:function(){for(var t=[],e=0;arguments.length>e;e++)t[e]=arguments[e];return Ce(this,Me(this,t))},contains:function(t){return this.some(function(e){return n(e,t)})},entries:function(){return this.__iterator(Cr)},every:function(t,e){var r=!0;return this.__iterate(function(n,i,u){return t.call(e,n,i,u)?void 0:(r=!1,!1)}),r},filter:function(t,e){return Ce(this,we(this,t,e,!0))},find:function(t,e,r){var n=r;return this.__iterate(function(r,i,u){return t.call(e,r,i,u)?(n=r,!1):void 0}),n},forEach:function(t,e){return this.__iterate(e?t.bind(e):t)},join:function(t){t=void 0!==t?""+t:",";var e="",r=!0;return this.__iterate(function(n){r?r=!1:e+=t,e+=null!==n&&void 0!==n?n:""}),e},keys:function(){return this.__iterator(Dr)},map:function(t,e){return Ce(this,me(this,t,e))},reduce:function(t,e,r){var n,i;return 2>arguments.length?i=!0:n=e,this.__iterate(function(e,u,s){i?(i=!1,n=e):n=t.call(r,n,e,u,s)}),n},reduceRight:function(){var t=this.toKeyedSeq().reverse();return t.reduce.apply(t,arguments)},reverse:function(){return Ce(this,ge(this,!0))},slice:function(t,e){if(l(t,e,this.size))return this;var r=v(t,this.size),n=p(e,this.size);if(r!==r||n!==n)return this.toSeq().cacheResult().slice(t,e);var i=0===r?this:this.skip(r);return Ce(this,void 0===n||n===this.size?i:i.take(n-r))},some:function(t,e){return!this.every(j(t),e)},sort:function(t){return Ce(this,De(this,t||K))},values:function(){return this.__iterator(Ar)
<add>},butLast:function(){return this.slice(0,-1)},count:function(t,e){return c(t?this.toSeq().filter(t,e):this)},countBy:function(t,e){return Se(this,t,e)},equals:function(t){if(this===t)return!0;if(!t||"function"!=typeof t.equals)return!1;if(void 0!==this.size&&void 0!==t.size){if(this.size!==t.size)return!1;if(0===this.size&&0===t.size)return!0}return void 0!==this.__hash&&void 0!==t.__hash&&this.__hash!==t.__hash?!1:this.__deepEquals(t)},__deepEquals:function(t){var e=this.entries();return"function"==typeof t.every&&t.every(function(t,r){var i=e.next().value;return i&&n(i[0],r)&&n(i[1],t)})&&e.next().done},entrySeq:function(){var t=this;if(t._cache)return new un(t._cache);var e=t.toSeq().map(C).toIndexedSeq();return e.fromEntrySeq=function(){return t.toSeq()},e},filterNot:function(t,e){return this.filter(j(t),e)},findLast:function(t,e,r){return this.toKeyedSeq().reverse().find(t,e,r)},first:function(){return this.find(_)},flatMap:function(t,e){return Ce(this,ke(this,t,e))},flatten:function(t){return Ce(this,Ee(this,t,!0))},fromEntrySeq:function(){return new Rn(this)},get:function(t,e){return this.find(function(e,r){return n(r,t)},void 0,e)},getIn:function(t,e){var r=this;if(t)for(var n=0;t.length>n;n++)if(r=r&&r.get?r.get(t[n],gr):gr,r===gr)return e;return r},groupBy:function(t,e){return ze(this,t,e)},has:function(t){return this.get(t,gr)!==gr},isSubset:function(t){return t="function"==typeof t.contains?t:Wr(t),this.every(function(e){return t.contains(e)})},isSuperset:function(t){return t.isSubset(this)},keySeq:function(){return this.toSeq().map(A).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},max:function(t){return Ae(this,t||K)},maxBy:function(t,e){return Ae(this,e||K,t)},min:function(t){return Ae(this,R(t||K))},minBy:function(t,e){return Ae(this,R(e||K),t)},rest:function(){return this.slice(1)},skip:function(t){return Ce(this,qe(this,t,!0))},skipLast:function(t){return Ce(this,this.toSeq().reverse().skip(t).reverse())},skipWhile:function(t,e){return Ce(this,xe(this,t,e,!0))},skipUntil:function(t,e){return this.skipWhile(j(t),e)
<add>},sortBy:function(t,e){return Ce(this,De(this,e||K,t))},take:function(t){return Ce(this,Ie(this,t))},takeLast:function(t){return Ce(this,this.toSeq().reverse().take(t).reverse())},takeWhile:function(t,e){return Ce(this,be(this,t,e))},takeUntil:function(t,e){return this.takeWhile(j(t),e)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=1/0===this.size?0:this.reduce(function(t,e,r){return t+(y(e)^(e===r?0:y(r)))&br},0))}},{});var Br="",Pr="",Jr="",Vr=Tr.prototype;Vr[Br]=!0,Vr[Ur]=Vr.values,Vr.toJSON=Vr.toJS,Vr.__toJS=Vr.toArray,Vr.__toStringMapper=U,Vr.inspect=Vr.toSource=function(){return""+this},Vr.chain=Vr.flatMap,function(){try{Object.defineProperty(Vr,"length",{get:function(){if(!Tr.noLengthWarning){var t;try{throw Error()}catch(e){t=e.stack}if(-1===t.indexOf("_wrapObject"))return console&&console.warn&&console.warn("iterable.length has been deprecated, use iterable.size or iterable.count(). This warning will become a silent error in a future version. "+t),this.size}}})}catch(t){}}();var Nr=function(t){return k(t)?t:Hr(t)};vr.createClass(Nr,{flip:function(){return Ce(this,ye(this))},findKey:function(t,e){var r;return this.__iterate(function(n,i,u){return t.call(e,n,i,u)?(r=i,!1):void 0}),r},findLastKey:function(t,e){return this.toSeq().reverse().findKey(t,e)},keyOf:function(t){return this.findKey(function(e){return n(e,t)})},lastKeyOf:function(t){return this.toSeq().reverse().keyOf(t)},mapEntries:function(t,e){var r=this,n=0;return Ce(this,this.toSeq().map(function(i,u){return t.call(e,[u,i],n++,r)}).fromEntrySeq())},mapKeys:function(t,e){var r=this;return Ce(this,this.toSeq().flip().map(function(n,i){return t.call(e,n,i,r)}).flip())}},{},Tr);var Yr=Nr.prototype;Yr[Pr]=!0,Yr[Ur]=Vr.entries,Yr.__toJS=Vr.toObject,Yr.__toStringMapper=function(t,e){return e+": "+U(t)};var Qr=function(t){return O(t)?t:$r(t)};vr.createClass(Qr,{toKeyedSeq:function(){return new An(this,!1)},filter:function(t,e){return Ce(this,we(this,t,e,!1))
<add>},findIndex:function(t,e){var r=this.toKeyedSeq().findKey(t,e);return void 0===r?-1:r},indexOf:function(t){var e=this.toKeyedSeq().keyOf(t);return void 0===e?-1:e},lastIndexOf:function(t){var e=this.toKeyedSeq().lastKeyOf(t);return void 0===e?-1:e},reverse:function(){return Ce(this,ge(this,!1))},splice:function(t,e){var r=arguments.length;if(e=Math.max(0|e,0),0===r||2===r&&!e)return this;t=v(t,this.size);var n=this.slice(0,t);return Ce(this,1===r?n:n.concat(a(arguments,2),this.slice(t+e)))},findLastIndex:function(t,e){var r=this.toKeyedSeq().findLastKey(t,e);return void 0===r?-1:r},first:function(){return this.get(0)},flatten:function(t){return Ce(this,Ee(this,t,!1))},get:function(t,e){return t=f(this,t),0>t||1/0===this.size||void 0!==this.size&&t>this.size?e:this.find(function(e,r){return r===t},void 0,e)},has:function(t){return t=f(this,t),t>=0&&(void 0!==this.size?1/0===this.size||this.size>t:-1!==this.indexOf(t))},interpose:function(t){return Ce(this,Oe(this,t))},last:function(){return this.get(-1)},skip:function(t){var e=this,r=qe(e,t,!1);return T(e)&&r!==e&&(r.get=function(r,n){return r=f(this,r),r>=0?e.get(r+t,n):n}),Ce(this,r)},skipWhile:function(t,e){return Ce(this,xe(this,t,e,!1))},take:function(t){var e=this,r=Ie(e,t);return T(e)&&r!==e&&(r.get=function(r,n){return r=f(this,r),r>=0&&t>r?e.get(r,n):n}),Ce(this,r)}},{},Tr),Qr.prototype[Jr]=!0;var Xr=function(t){return E(t)&&!D(t)?t:en(t)};vr.createClass(Xr,{get:function(t,e){return this.has(t)?t:e},contains:function(t){return this.has(t)},keySeq:function(){return this.valueSeq()}},{},Tr),Xr.prototype.has=Vr.contains,Tr.isIterable=E,Tr.isKeyed=k,Tr.isIndexed=O,Tr.isAssociative=D,Tr.Keyed=Nr,Tr.Indexed=Qr,Tr.Set=Xr,Tr.Iterator=Kr;var Fr=function(t){return null===t||void 0===t?W():E(t)?t.toSeq():J(t)},Gr=Fr;vr.createClass(Fr,{toSeq:function(){return this},toString:function(){return this.__toString("Seq {","}")},cacheResult:function(){return!this._cache&&this.__iterateUncached&&(this._cache=this.entrySeq().toArray(),this.size=this._cache.length),this},__iterate:function(t,e){return Y(this,t,e,!0)
<add>},__iterator:function(t,e){return Q(this,t,e,!0)}},{of:function(){return Gr(arguments)}},Tr);var Hr=function(t){return null===t||void 0===t?W().toKeyedSeq():E(t)?k(t)?t.toSeq():t.fromEntrySeq():B(t)},Zr=Hr;vr.createClass(Hr,{toKeyedSeq:function(){return this},toSeq:function(){return this}},{of:function(){return Zr(arguments)}},Fr),L(Hr,Nr.prototype);var $r=function(t){return null===t||void 0===t?W():E(t)?k(t)?t.entrySeq():t.toIndexedSeq():P(t)},tn=$r;vr.createClass($r,{toIndexedSeq:function(){return this},toString:function(){return this.__toString("Seq [","]")},__iterate:function(t,e){return Y(this,t,e,!1)},__iterator:function(t,e){return Q(this,t,e,!1)}},{of:function(){return tn(arguments)}},Fr),L($r,Qr.prototype);var en=function(t){return(null===t||void 0===t?W():E(t)?k(t)?t.entrySeq():t:P(t)).toSetSeq()},rn=en;vr.createClass(en,{toSetSeq:function(){return this}},{of:function(){return rn(arguments)}},Fr),L(en,Xr.prototype),Fr.isSeq=T,Fr.Keyed=Hr,Fr.Set=en,Fr.Indexed=$r;var nn="";Fr.prototype[nn]=!0;var un=function(t){this._array=t,this.size=t.length};vr.createClass(un,{get:function(t,e){return this.has(t)?this._array[f(this,t)]:e},__iterate:function(t,e){for(var r=this._array,n=r.length-1,i=0;n>=i;i++)if(t(r[e?n-i:i],i,this)===!1)return i+1;return i},__iterator:function(t,e){var r=this._array,n=r.length-1,i=0;return new Kr(function(){return i>n?I():z(t,i,r[e?n-i++:i++])})}},{},$r);var sn=function(t){var e=Object.keys(t);this._object=t,this._keys=e,this.size=e.length};vr.createClass(sn,{get:function(t,e){return void 0===e||this.has(t)?this._object[t]:e},has:function(t){return this._object.hasOwnProperty(t)},__iterate:function(t,e){for(var r=this._object,n=this._keys,i=n.length-1,u=0;i>=u;u++){var s=n[e?i-u:u];if(t(r[s],s,this)===!1)return u+1}return u},__iterator:function(t,e){var r=this._object,n=this._keys,i=n.length-1,u=0;return new Kr(function(){var s=n[e?i-u:u];return u++>i?I():z(t,s,r[s])})}},{},Hr);var on=function(t){this._iterable=t,this.size=t.length||t.size};vr.createClass(on,{__iterateUncached:function(t,e){if(e)return this.cacheResult().__iterate(t,e);
<add>var r=this._iterable,n=x(r),i=0;if(q(n))for(var u;!(u=n.next()).done&&t(u.value,i++,this)!==!1;);return i},__iteratorUncached:function(t,e){if(e)return this.cacheResult().__iterator(t,e);var r=this._iterable,n=x(r);if(!q(n))return new Kr(I);var i=0;return new Kr(function(){var e=n.next();return e.done?e:z(t,i++,e.value)})}},{},$r);var an=function(t){this._iterator=t,this._iteratorCache=[]};vr.createClass(an,{__iterateUncached:function(t,e){if(e)return this.cacheResult().__iterate(t,e);for(var r=this._iterator,n=this._iteratorCache,i=0;n.length>i;)if(t(n[i],i++,this)===!1)return i;for(var u;!(u=r.next()).done;){var s=u.value;if(n[i]=s,t(s,i++,this)===!1)break}return i},__iteratorUncached:function(t,e){if(e)return this.cacheResult().__iterator(t,e);var r=this._iterator,n=this._iteratorCache,i=0;return new Kr(function(){if(i>=n.length){var e=r.next();if(e.done)return e;n[i]=e.value}return z(t,i,n[i++])})}},{},$r);var hn,cn=function(){throw TypeError("Abstract")};vr.createClass(cn,{},{},Tr);var fn=function(){vr.defaultSuperCall(this,_n.prototype,arguments)},_n=fn;vr.createClass(fn,{},{},cn),L(fn,Nr.prototype);var ln=function(){vr.defaultSuperCall(this,vn.prototype,arguments)},vn=ln;vr.createClass(ln,{},{},cn),L(ln,Qr.prototype);var pn=function(){vr.defaultSuperCall(this,dn.prototype,arguments)},dn=pn;vr.createClass(pn,{},{},cn),L(pn,Xr.prototype),cn.Keyed=fn,cn.Indexed=ln,cn.Set=pn;var yn=function(t){return null===t||void 0===t?re():Z(t)?t:re().merge(Nr(t))};vr.createClass(yn,{toString:function(){return this.__toString("Map {","}")},get:function(t,e){return this._root?this._root.get(0,y(t),t,e):e},set:function(t,e){return ne(this,t,e)},setIn:function(t,e){return i(t.length>0,"Requires non-empty key path."),this.updateIn(t,function(){return e})},remove:function(t){return ne(this,t,gr)},removeIn:function(t){return i(t.length>0,"Requires non-empty key path."),this.updateIn(t,function(){return gr})},update:function(t,e,r){return 1===arguments.length?t(this):this.updateIn([t],e,r)},updateIn:function(t,e,r){return r||(r=e,e=void 0),0===t.length?r(this):_e(this,t,e,r,0)
<add>},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._root=null,this.__hash=void 0,this.__altered=!0,this):re()},merge:function(){return he(this,void 0,arguments)},mergeWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return he(this,t,e)},mergeDeep:function(){return he(this,ce(void 0),arguments)},mergeDeepWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return he(this,ce(t),e)},withMutations:function(t){var e=this.asMutable();return t(e),e.wasAltered()?e.__ensureOwner(this.__ownerID):this},asMutable:function(){return this.__ownerID?this:this.__ensureOwner(new o)},asImmutable:function(){return this.__ensureOwner()},wasAltered:function(){return this.__altered},__iterator:function(t,e){return new En(this,t,e)},__iterate:function(t,e){var r=this,n=0;return this._root&&this._root.iterate(function(e){return n++,t(e[1],e[0],r)},e),n},__ensureOwner:function(t){return t===this.__ownerID?this:t?ee(this.size,this._root,t,this.__hash):(this.__ownerID=t,this.__altered=!1,this)}},{},fn),yn.isMap=Z;var mn="",gn=yn.prototype;gn[mn]=!0,gn[pr]=gn.remove;var wn=function(t,e,r){this.ownerID=t,this.bitmap=e,this.nodes=r},Sn=wn;vr.createClass(wn,{get:function(t,e,r,n){var i=1<<((0===t?e:e>>>t)&mr),u=this.bitmap;return 0===(u&i)?n:this.nodes[le(u&i-1)].get(t+dr,e,r,n)},update:function(t,e,r,n,i,u,s){var o=(0===e?r:r>>>e)&mr,a=1<<o,h=this.bitmap,c=0!==(h&a);if(!c&&i===gr)return this;var f=le(h&a-1),_=this.nodes,l=c?_[f]:void 0,v=ie(l,t,e+dr,r,n,i,u,s);if(v===l)return this;if(!c&&v&&_.length>=On)return ae(t,_,h,o,v);if(c&&!v&&2===_.length&&ue(_[1^f]))return _[1^f];if(c&&v&&1===_.length&&ue(v))return v;var p=t&&t===this.ownerID,d=c?v?h:h^a:h|a,y=c?v?ve(_,f,v,p):de(_,f,p):pe(_,f,v,p);return p?(this.bitmap=d,this.nodes=y,this):new Sn(t,d,y)},iterate:function(t,e){for(var r=this.nodes,n=0,i=r.length-1;i>=n;n++)if(r[e?i-n:n].iterate(t,e)===!1)return!1}},{});var zn=function(t,e,r){this.ownerID=t,this.count=e,this.nodes=r},In=zn;vr.createClass(zn,{get:function(t,e,r,n){var i=(0===t?e:e>>>t)&mr,u=this.nodes[i];
<add>return u?u.get(t+dr,e,r,n):n},update:function(t,e,r,n,i,u,s){var o=(0===e?r:r>>>e)&mr,a=i===gr,h=this.nodes,c=h[o];if(a&&!c)return this;var f=ie(c,t,e+dr,r,n,i,u,s);if(f===c)return this;var _=this.count;if(c){if(!f&&(_--,Dn>_))return oe(t,h,_,o)}else _++;var l=t&&t===this.ownerID,v=ve(h,o,f,l);return l?(this.count=_,this.nodes=v,this):new In(t,_,v)},iterate:function(t,e){for(var r=this.nodes,n=0,i=r.length-1;i>=n;n++){var u=r[e?i-n:n];if(u&&u.iterate(t,e)===!1)return!1}}},{});var bn=function(t,e,r){this.ownerID=t,this.hash=e,this.entries=r},qn=bn;vr.createClass(bn,{get:function(t,e,r,i){for(var u=this.entries,s=0,o=u.length;o>s;s++)if(n(r,u[s][0]))return u[s][1];return i},update:function(t,e,r,i,u,o,h){var c=u===gr;if(r!==this.hash)return c?this:(s(h),s(o),se(this,t,e,r,[i,u]));for(var f=this.entries,_=0,l=f.length;l>_&&!n(i,f[_][0]);_++);var v=l>_;if(c&&!v)return this;if(s(h),(c||!v)&&s(o),c&&2===l)return new xn(t,this.hash,f[1^_]);var p=t&&t===this.ownerID,d=p?f:a(f);return v?c?_===l-1?d.pop():d[_]=d.pop():d[_]=[i,u]:d.push([i,u]),p?(this.entries=d,this):new qn(t,this.hash,d)},iterate:function(t,e){for(var r=this.entries,n=0,i=r.length-1;i>=n;n++)if(t(r[e?i-n:n])===!1)return!1}},{});var xn=function(t,e,r){this.ownerID=t,this.hash=e,this.entry=r},Mn=xn;vr.createClass(xn,{get:function(t,e,r,i){return n(r,this.entry[0])?this.entry[1]:i},update:function(t,e,r,i,u,o,a){var h=u===gr,c=n(i,this.entry[0]);return(c?u===this.entry[1]:h)?this:(s(a),h?void s(o):c?t&&t===this.ownerID?(this.entry[1]=u,this):new Mn(t,r,[i,u]):(s(o),se(this,t,e,r,[i,u])))},iterate:function(t){return t(this.entry)}},{});var En=function(t,e,r){this._type=e,this._reverse=r,this._stack=t._root&&te(t._root)};vr.createClass(En,{next:function(){for(var t=this._type,e=this._stack;e;){var r,n=e.node,i=e.index++;if(n.entry){if(0===i)return $(t,n.entry)}else if(n.entries){if(r=n.entries.length-1,r>=i)return $(t,n.entries[this._reverse?r-i:i])}else if(r=n.nodes.length-1,r>=i){var u=n.nodes[this._reverse?r-i:i];if(u){if(u.entry)return $(t,u.entry);e=this._stack=te(u,e)
<add>}continue}e=this._stack=this._stack.__prev}return I()}},{},Kr);var kn,On=yr/2,Dn=yr/4,An=function(t,e){this._iter=t,this._useKeys=e,this.size=t.size};vr.createClass(An,{get:function(t,e){return this._iter.get(t,e)},has:function(t){return this._iter.has(t)},valueSeq:function(){return this._iter.valueSeq()},reverse:function(){var t=this,e=ge(this,!0);return this._useKeys||(e.valueSeq=function(){return t._iter.toSeq().reverse()}),e},map:function(t,e){var r=this,n=me(this,t,e);return this._useKeys||(n.valueSeq=function(){return r._iter.toSeq().map(t,e)}),n},__iterate:function(t,e){var r,n=this;return this._iter.__iterate(this._useKeys?function(e,r){return t(e,r,n)}:(r=e?Re(this):0,function(i){return t(i,e?--r:r++,n)}),e)},__iterator:function(t,e){if(this._useKeys)return this._iter.__iterator(t,e);var r=this._iter.__iterator(Ar,e),n=e?Re(this):0;return new Kr(function(){var i=r.next();return i.done?i:z(t,e?--n:n++,i.value,i)})}},{},Hr);var Cn=function(t){this._iter=t,this.size=t.size};vr.createClass(Cn,{contains:function(t){return this._iter.contains(t)},__iterate:function(t,e){var r=this,n=0;return this._iter.__iterate(function(e){return t(e,n++,r)},e)},__iterator:function(t,e){var r=this._iter.__iterator(Ar,e),n=0;return new Kr(function(){var e=r.next();return e.done?e:z(t,n++,e.value,e)})}},{},$r);var jn=function(t){this._iter=t,this.size=t.size};vr.createClass(jn,{has:function(t){return this._iter.contains(t)},__iterate:function(t,e){var r=this;return this._iter.__iterate(function(e){return t(e,e,r)},e)},__iterator:function(t,e){var r=this._iter.__iterator(Ar,e);return new Kr(function(){var e=r.next();return e.done?e:z(t,e.value,e.value,e)})}},{},en);var Rn=function(t){this._iter=t,this.size=t.size};vr.createClass(Rn,{entrySeq:function(){return this._iter.toSeq()},__iterate:function(t,e){var r=this;return this._iter.__iterate(function(e){return e?(je(e),t(e[1],e[0],r)):void 0},e)},__iterator:function(t,e){var r=this._iter.__iterator(Ar,e);return new Kr(function(){for(;;){var e=r.next();if(e.done)return e;var n=e.value;
<add>if(n)return je(n),t===Cr?e:z(t,n[0],n[1],e)}})}},{},Hr),Cn.prototype.cacheResult=An.prototype.cacheResult=jn.prototype.cacheResult=Rn.prototype.cacheResult=Le;var Un=function(t){var e=Je();if(null===t||void 0===t)return e;if(Te(t))return t;t=Qr(t);var r=t.size;return 0===r?e:r>0&&yr>r?Pe(0,r,dr,null,new Tn(t.toArray())):e.merge(t)};vr.createClass(Un,{toString:function(){return this.__toString("List [","]")},get:function(t,e){if(t=f(this,t),0>t||t>=this.size)return e;t+=this._origin;var r=Qe(this,t);return r&&r.array[t&mr]},set:function(t,e){return Ve(this,t,e)},remove:function(t){return this.has(t)?0===t?this.shift():t===this.size-1?this.pop():this.splice(t,1):this},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=this._origin=this._capacity=0,this._level=dr,this._root=this._tail=null,this.__hash=void 0,this.__altered=!0,this):Je()},push:function(){var t=arguments,e=this.size;return this.withMutations(function(r){Xe(r,0,e+t.length);for(var n=0;t.length>n;n++)r.set(e+n,t[n])})},pop:function(){return Xe(this,0,-1)},unshift:function(){var t=arguments;return this.withMutations(function(e){Xe(e,-t.length);for(var r=0;t.length>r;r++)e.set(r,t[r])})},shift:function(){return Xe(this,1)},merge:function(){return Fe(this,void 0,arguments)},mergeWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return Fe(this,t,e)},mergeDeep:function(){return Fe(this,ce(void 0),arguments)},mergeDeepWith:function(t){for(var e=[],r=1;arguments.length>r;r++)e[r-1]=arguments[r];return Fe(this,ce(t),e)},setSize:function(t){return Xe(this,0,t)},slice:function(t,e){var r=this.size;return l(t,e,r)?this:Xe(this,v(t,r),p(e,r))},__iterator:function(t,e){return new Bn(this,t,e)},__iterate:function(t,e){var r=this,n=0,i=function(e){return t(e,n++,r)},u=Ge(this._capacity);return e?We(this._tail,0,u-this._origin,this._capacity-this._origin,i,e)&&We(this._root,this._level,-this._origin,u-this._origin,i,e):We(this._root,this._level,-this._origin,u-this._origin,i,e)&&We(this._tail,0,u-this._origin,this._capacity-this._origin,i,e),n
<add>},__ensureOwner:function(t){return t===this.__ownerID?this:t?Pe(this._origin,this._capacity,this._level,this._root,this._tail,t,this.__hash):(this.__ownerID=t,this)}},{of:function(){return this(arguments)}},ln),Un.isList=Te;var Kn="",Ln=Un.prototype;Ln[Kn]=!0,Ln[pr]=Ln.remove,Ln.setIn=gn.setIn,Ln.removeIn=gn.removeIn,Ln.update=gn.update,Ln.updateIn=gn.updateIn,Ln.withMutations=gn.withMutations,Ln.asMutable=gn.asMutable,Ln.asImmutable=gn.asImmutable,Ln.wasAltered=gn.wasAltered;var Tn=function(t,e){this.array=t,this.ownerID=e},Wn=Tn;vr.createClass(Tn,{removeBefore:function(t,e,r){if(r===e?1<<e:0||0===this.array.length)return this;var n=r>>>e&mr;if(n>=this.array.length)return new Wn([],t);var i,u=0===n;if(e>0){var s=this.array[n];if(i=s&&s.removeBefore(t,e-dr,r),i===s&&u)return this}if(u&&!i)return this;var o=Ye(this,t);if(!u)for(var a=0;n>a;a++)o.array[a]=void 0;return i&&(o.array[n]=i),o},removeAfter:function(t,e,r){if(r===e?1<<e:0||0===this.array.length)return this;var n=r-1>>>e&mr;if(n>=this.array.length)return this;var i,u=n===this.array.length-1;if(e>0){var s=this.array[n];if(i=s&&s.removeAfter(t,e-dr,r),i===s&&u)return this}if(u&&!i)return this;var o=Ye(this,t);return u||o.array.pop(),i&&(o.array[n]=i),o}},{});var Bn=function(t,e,r){this._type=e,this._reverse=!!r,this._maxIndex=t.size-1;var n=Ge(t._capacity),i=Be(t._root&&t._root.array,t._level,-t._origin,n-t._origin-1),u=Be(t._tail&&t._tail.array,0,n-t._origin,t._capacity-t._origin-1);this._stack=r?u:i,this._stack.__prev=r?i:u};vr.createClass(Bn,{next:function(){for(var t=this._stack;t;){var e=t.array,r=t.index++;if(this._reverse&&(r=mr-r,r>t.rawMax&&(r=t.rawMax,t.index=yr-r)),r>=0&&yr>r&&t.rawMax>=r){var n=e&&e[r];if(0===t.level){var i,u=this._type;return 1!==u&&(i=t.offset+(r<<t.level),this._reverse&&(i=this._maxIndex-i)),z(u,i,n)}this._stack=t=Be(n&&n.array,t.level-dr,t.offset+(r<<t.level),t.max,t)}else t=this._stack=this._stack.__prev}return I()}},{},Kr);var Pn,Jn=function(t){return null===t||void 0===t?$e():He(t)?t:$e().merge(Nr(t))
<add>};vr.createClass(Jn,{toString:function(){return this.__toString("OrderedMap {","}")},get:function(t,e){var r=this._map.get(t);return void 0!==r?this._list.get(r)[1]:e},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._map.clear(),this._list.clear(),this):$e()},set:function(t,e){return tr(this,t,e)},remove:function(t){return tr(this,t,gr)},wasAltered:function(){return this._map.wasAltered()||this._list.wasAltered()},__iterate:function(t,e){var r=this;return this._list.__iterate(function(e){return e&&t(e[1],e[0],r)},e)},__iterator:function(t,e){return this._list.fromEntrySeq().__iterator(t,e)},__ensureOwner:function(t){if(t===this.__ownerID)return this;var e=this._map.__ensureOwner(t),r=this._list.__ensureOwner(t);return t?Ze(e,r,t,this.__hash):(this.__ownerID=t,this._map=e,this._list=r,this)}},{of:function(){return this(arguments)}},yn),Jn.isOrderedMap=He;var Vn="";Jn.prototype[Vn]=!0,Jn.prototype[pr]=Jn.prototype.remove;var Nn,Yn=function(t){return null===t||void 0===t?nr():er(t)?t:nr().unshiftAll(t)},Qn=Yn;vr.createClass(Yn,{toString:function(){return this.__toString("Stack [","]")},get:function(t,e){for(var r=this._head;r&&t--;)r=r.next;return r?r.value:e},peek:function(){return this._head&&this._head.value},push:function(){if(0===arguments.length)return this;for(var t=this.size+arguments.length,e=this._head,r=arguments.length-1;r>=0;r--)e={value:arguments[r],next:e};return this.__ownerID?(this.size=t,this._head=e,this.__hash=void 0,this.__altered=!0,this):rr(t,e)},pushAll:function(t){if(t=Qr(t),0===t.size)return this;var e=this.size,r=this._head;return t.reverse().forEach(function(t){e++,r={value:t,next:r}}),this.__ownerID?(this.size=e,this._head=r,this.__hash=void 0,this.__altered=!0,this):rr(e,r)},pop:function(){return this.slice(1)},unshift:function(){return this.push.apply(this,arguments)},unshiftAll:function(t){return this.pushAll(t)},shift:function(){return this.pop.apply(this,arguments)},clear:function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._head=void 0,this.__hash=void 0,this.__altered=!0,this):nr()
<add>},slice:function(t,e){if(l(t,e,this.size))return this;var r=v(t,this.size),n=p(e,this.size);if(n!==this.size)return vr.superCall(this,Qn.prototype,"slice",[t,e]);for(var i=this.size-r,u=this._head;r--;)u=u.next;return this.__ownerID?(this.size=i,this._head=u,this.__hash=void 0,this.__altered=!0,this):rr(i,u)},__ensureOwner:function(t){return t===this.__ownerID?this:t?rr(this.size,this._head,t,this.__hash):(this.__ownerID=t,this.__altered=!1,this)},__iterate:function(t,e){if(e)return this.toSeq().cacheResult.__iterate(t,e);for(var r=0,n=this._head;n&&t(n.value,r++,this)!==!1;)n=n.next;return r},__iterator:function(t,e){if(e)return this.toSeq().cacheResult().__iterator(t,e);var r=0,n=this._head;return new Kr(function(){if(n){var e=n.value;return n=n.next,z(t,r++,e)}return I()})}},{of:function(){return this(arguments)}},ln),Yn.isStack=er;var Xn="",Fn=Yn.prototype;Fn[Xn]=!0,Fn.withMutations=gn.withMutations,Fn.asMutable=gn.asMutable,Fn.asImmutable=gn.asImmutable,Fn.wasAltered=gn.wasAltered;var Gn,Hn=function(t){return null===t||void 0===t?or():ir(t)?t:or().union(Xr(t))};vr.createClass(Hn,{toString:function(){return this.__toString("Set {","}")},has:function(t){return this._map.has(t)},add:function(t){return ur(this,this._map.set(t,!0))},remove:function(t){return ur(this,this._map.remove(t))},clear:function(){return ur(this,this._map.clear())},union:function(){var t=arguments;return 0===t.length?this:this.withMutations(function(e){for(var r=0;t.length>r;r++)Xr(t[r]).forEach(function(t){return e.add(t)})})},intersect:function(){for(var t=[],e=0;arguments.length>e;e++)t[e]=arguments[e];if(0===t.length)return this;t=t.map(function(t){return Xr(t)});var r=this;return this.withMutations(function(e){r.forEach(function(r){t.every(function(t){return t.contains(r)})||e.remove(r)})})},subtract:function(){for(var t=[],e=0;arguments.length>e;e++)t[e]=arguments[e];if(0===t.length)return this;t=t.map(function(t){return Xr(t)});var r=this;return this.withMutations(function(e){r.forEach(function(r){t.some(function(t){return t.contains(r)
<add>})&&e.remove(r)})})},merge:function(){return this.union.apply(this,arguments)},mergeWith:function(){for(var t=[],e=1;arguments.length>e;e++)t[e-1]=arguments[e];return this.union.apply(this,t)},wasAltered:function(){return this._map.wasAltered()},__iterate:function(t,e){var r=this;return this._map.__iterate(function(e,n){return t(n,n,r)},e)},__iterator:function(t,e){return this._map.map(function(t,e){return e}).__iterator(t,e)},__ensureOwner:function(t){if(t===this.__ownerID)return this;var e=this._map.__ensureOwner(t);return t?this.__make(e,t):(this.__ownerID=t,this._map=e,this)}},{of:function(){return this(arguments)},fromKeys:function(t){return this(Nr(t).keySeq())}},pn),Hn.isSet=ir;var Zn="",$n=Hn.prototype;$n[Zn]=!0,$n[pr]=$n.remove,$n.mergeDeep=$n.merge,$n.mergeDeepWith=$n.mergeWith,$n.withMutations=gn.withMutations,$n.asMutable=gn.asMutable,$n.asImmutable=gn.asImmutable,$n.__empty=or,$n.__make=sr;var ti,ei=function(t){return null===t||void 0===t?cr():ar(t)?t:cr().union(Xr(t))};vr.createClass(ei,{toString:function(){return this.__toString("OrderedSet {","}")}},{of:function(){return this(arguments)},fromKeys:function(t){return this(Nr(t).keySeq())}},Hn),ei.isOrderedSet=ar;var ri="",ni=ei.prototype;ni[ri]=!0,ni.__empty=cr,ni.__make=hr;var ii,ui=function(t,e){var r=function(t){return this instanceof r?void(this._map=yn(t)):new r(t)},n=Object.keys(t),u=r.prototype=Object.create(si);u.constructor=r,e&&(u._name=e),u._defaultValues=t,u._keys=n,u.size=n.length;try{n.forEach(function(t){Object.defineProperty(r.prototype,t,{get:function(){return this.get(t)},set:function(e){i(this.__ownerID,"Cannot set on an immutable record."),this.set(t,e)}})})}catch(s){}return r};vr.createClass(ui,{toString:function(){return this.__toString(_r(this)+" {","}")},has:function(t){return this._defaultValues.hasOwnProperty(t)},get:function(t,e){if(!this.has(t))return e;var r=this._defaultValues[t];return this._map?this._map.get(t,r):r},clear:function(){if(this.__ownerID)return this._map&&this._map.clear(),this;
<add>var t=Object.getPrototypeOf(this).constructor;return t._empty||(t._empty=fr(this,re()))},set:function(t,e){if(!this.has(t))throw Error('Cannot set unknown key "'+t+'" on '+_r(this));var r=this._map&&this._map.set(t,e);return this.__ownerID||r===this._map?this:fr(this,r)},remove:function(t){if(!this.has(t))return this;var e=this._map&&this._map.remove(t);return this.__ownerID||e===this._map?this:fr(this,e)},wasAltered:function(){return this._map.wasAltered()},__iterator:function(t,e){var r=this;return Nr(this._defaultValues).map(function(t,e){return r.get(e)}).__iterator(t,e)},__iterate:function(t,e){var r=this;return Nr(this._defaultValues).map(function(t,e){return r.get(e)}).__iterate(t,e)},__ensureOwner:function(t){if(t===this.__ownerID)return this;var e=this._map&&this._map.__ensureOwner(t);return t?fr(this,e,t):(this.__ownerID=t,this._map=e,this)}},{},fn);var si=ui.prototype;si[pr]=si.remove,si.merge=gn.merge,si.mergeWith=gn.mergeWith,si.mergeDeep=gn.mergeDeep,si.mergeDeepWith=gn.mergeDeepWith,si.update=gn.update,si.updateIn=gn.updateIn,si.withMutations=gn.withMutations,si.asMutable=gn.asMutable,si.asImmutable=gn.asImmutable;var oi=function(t,e,r){return this instanceof ai?(i(0!==r,"Cannot step a Range by 0"),t=t||0,void 0===e&&(e=1/0),t===e&&ci?ci:(r=void 0===r?1:Math.abs(r),t>e&&(r=-r),this._start=t,this._end=e,this._step=r,void(this.size=Math.max(0,Math.ceil((e-t)/r-1)+1)))):new ai(t,e,r)},ai=oi;vr.createClass(oi,{toString:function(){return 0===this.size?"Range []":"Range [ "+this._start+"..."+this._end+(this._step>1?" by "+this._step:"")+" ]"},get:function(t,e){return this.has(t)?this._start+f(this,t)*this._step:e},contains:function(t){var e=(t-this._start)/this._step;return e>=0&&this.size>e&&e===Math.floor(e)},slice:function(t,e){return l(t,e,this.size)?this:(t=v(t,this.size),e=p(e,this.size),t>=e?ci:new ai(this.get(t,this._end),this.get(e,this._end),this._step))},indexOf:function(t){var e=t-this._start;if(e%this._step===0){var r=e/this._step;if(r>=0&&this.size>r)return r}return-1},lastIndexOf:function(t){return this.indexOf(t)
<add>},take:function(t){return this.slice(0,Math.max(0,t))},skip:function(t){return this.slice(Math.max(0,t))},__iterate:function(t,e){for(var r=this.size-1,n=this._step,i=e?this._start+r*n:this._start,u=0;r>=u;u++){if(t(i,u,this)===!1)return u+1;i+=e?-n:n}return u},__iterator:function(t,e){var r=this.size-1,n=this._step,i=e?this._start+r*n:this._start,u=0;return new Kr(function(){var s=i;return i+=e?-n:n,u>r?I():z(t,u++,s)})},__deepEquals:function(t){return t instanceof ai?this._start===t._start&&this._end===t._end&&this._step===t._step:vr.superCall(this,ai.prototype,"__deepEquals",[t])}},{},$r);var hi=oi.prototype;hi.__toJS=hi.toArray,hi.first=Ln.first,hi.last=Ln.last;var ci=oi(0,0),fi=function(t,e){return 0>=e&&vi?vi:this instanceof _i?(this._value=t,this.size=void 0===e?1/0:Math.max(0,e),void(0===this.size&&(vi=this))):new _i(t,e)},_i=fi;vr.createClass(fi,{toString:function(){return 0===this.size?"Repeat []":"Repeat [ "+this._value+" "+this.size+" times ]"},get:function(t,e){return this.has(t)?this._value:e},contains:function(t){return n(this._value,t)},slice:function(t,e){var r=this.size;return l(t,e,r)?this:new _i(this._value,p(e,r)-v(t,r))},reverse:function(){return this},indexOf:function(t){return n(this._value,t)?0:-1},lastIndexOf:function(t){return n(this._value,t)?this.size:-1},__iterate:function(t){for(var e=0;this.size>e;e++)if(t(this._value,e,this)===!1)return e+1;return e},__iterator:function(t){var e=this,r=0;return new Kr(function(){return e.size>r?z(t,r++,e._value):I()})},__deepEquals:function(t){return t instanceof _i?n(this._value,t._value):vr.superCall(this,_i.prototype,"__deepEquals",[t])}},{},$r);var li=fi.prototype;li.last=li.first,li.has=hi.has,li.take=hi.take,li.skip=hi.skip,li.__toJS=hi.__toJS;var vi,pi={Iterable:Tr,Seq:Fr,Collection:cn,Map:yn,OrderedMap:Jn,List:Un,Stack:Yn,Set:Hn,OrderedSet:ei,Record:ui,Range:oi,Repeat:fi,is:n,fromJS:X};return pi}"object"==typeof exports?module.exports=t():"function"==typeof define&&define.amd?define(t):Immutable=t();
<ide>\ No newline at end of file
<ide><path>src/Operations.js
<ide> function interposeFactory(iterable, separator) {
<ide> }
<ide>
<ide> function sortFactory(iterable, comparator, mapper) {
<del> var sortFn = mapper ?
<del> (a, b) => comparator(
<del> mapper(a[1][1], a[1][0], iterable),
<del> mapper(b[1][1], b[1][0], iterable)
<del> ) || a[0] - b[0] :
<del> (a, b) => comparator(a[1][1], b[1][1]) || a[0] - b[0];
<del> var entries = [];
<del> iterable.forEach((v, k) => { entries.push([entries.length, [k, v]]); });
<del> entries.sort(sortFn);
<ide> var isKeyedIterable = isKeyed(iterable);
<del> entries.forEach(
<add> var index = 0;
<add> var entries = iterable.toSeq().map(
<add> (v, k) => [k, v, index++, mapper ? mapper(v, k, iterable) : v]
<add> ).toArray();
<add> entries.sort((a, b) => comparator(a[3], b[3]) || a[2] - b[2]).forEach(
<ide> isKeyedIterable ?
<del> (v, i) => { entries[i] = v[1] } :
<del> (v, i) => { entries[i] = v[1][1] }
<add> (v, i) => { entries[i].length = 2; } :
<add> (v, i) => { entries[i] = v[1]; }
<ide> );
<ide> return isKeyedIterable ? KeyedSeq(entries) :
<ide> isIndexed(iterable) ? IndexedSeq(entries) : | 3 |
Javascript | Javascript | ignore .next from hot reload watcher | 4620cd8cd683b084480a793e54375c9be3d0b200 | <ide><path>server/hot-reloader.js
<ide> export default class HotReloader {
<ide>
<ide> const ignored = [
<ide> /(^|[/\\])\../, // .dotfiles
<del> /node_modules/
<add> /node_modules/,
<add> /\.next/
<ide> ]
<ide> const windowsSettings = isWindowsBash() ? {
<ide> lazy: false, | 1 |
Python | Python | restore libplatform headers in distribution | c476bb844c9db5539fd4104a3f4fec053d7e778c | <ide><path>tools/install.py
<ide> def headers(action):
<ide> def wanted_v8_headers(files_arg, dest):
<ide> v8_headers = [
<ide> 'deps/v8/include/cppgc/common.h',
<add> 'deps/v8/include/libplatform/libplatform.h',
<add> 'deps/v8/include/libplatform/libplatform-export.h',
<add> 'deps/v8/include/libplatform/v8-tracing.h',
<ide> 'deps/v8/include/v8.h',
<ide> 'deps/v8/include/v8-internal.h',
<ide> 'deps/v8/include/v8-platform.h', | 1 |
Mixed | Javascript | run all tests, fix indentation | e1457a14fdea8ae77148147dc8b4611142710cea | <ide><path>docs/docs/flux-todo-list.md
<ide> Dispatcher.prototype = merge(Dispatcher.prototype, {
<ide> var resolves = [];
<ide> var rejects = [];
<ide> _promises = _callbacks.map(function(_, i) {
<del> return new Promise(function(resolve, reject) {
<del> resolves[i] = resolve;
<del> rejects[i] = reject;
<del> });
<add> return new Promise(function(resolve, reject) {
<add> resolves[i] = resolve;
<add> rejects[i] = reject;
<add> });
<ide> });
<ide> // Dispatch to callbacks and resolve/reject promises.
<ide> _callbacks.forEach(function(callback, i) {
<ide><path>examples/todomvc-flux/js/dispatcher/Dispatcher.js
<ide> Dispatcher.prototype = merge(Dispatcher.prototype, {
<ide> var resolves = [];
<ide> var rejects = [];
<ide> _promises = _callbacks.map(function(_, i) {
<del> return new Promise(function(resolve, reject) {
<del> resolves[i] = resolve;
<del> rejects[i] = reject;
<del> });
<add> return new Promise(function(resolve, reject) {
<add> resolves[i] = resolve;
<add> rejects[i] = reject;
<add> });
<ide> });
<ide> // Dispatch to callbacks and resolve/reject promises.
<ide> _callbacks.forEach(function(callback, i) {
<ide><path>examples/todomvc-flux/js/dispatcher/__tests__/AppDispatcher-test.js
<ide> describe('AppDispatcher', function() {
<ide> expect(listener.mock.calls[0][0]).toBe(payload);
<ide> });
<ide>
<del> it.only('waits with chained dependencies properly', function() {
<add> it('waits with chained dependencies properly', function() {
<ide> var payload = {};
<ide>
<ide> var listener1Done = false; | 3 |
PHP | PHP | remove interactive mode | 577870b19f91d1cb4f50d681824ed399deac55ca | <ide><path>src/Shell/PluginShell.php
<ide> class PluginShell extends Shell {
<ide> public $tasks = ['Assets'];
<ide>
<ide> /**
<del> * Override main() for help message hook
<add> * Symlink / copy plugin assets to app's webroot
<ide> *
<ide> * @return void
<ide> */
<del> public function main() {
<del> $this->out('<info>Plugin Shell</info>');
<del> $this->hr();
<del> $this->out('[A]ssets symlink / copy to app\'s webroot');
<del> $this->out('[H]elp');
<del> $this->out('[Q]uit');
<del>
<del> $choice = strtolower($this->in('What would you like to do?', ['A', 'H', 'Q']));
<del> switch ($choice) {
<del> case 'a':
<del> $this->Assets->main();
<del> break;
<del> case 'h':
<del> $this->out($this->OptionParser->help());
<del> break;
<del> case 'q':
<del> return $this->_stop();
<del> default:
<del> $this->out('You have made an invalid selection. Please choose a command to execute by entering A, H, or Q.');
<del> }
<del> $this->hr();
<del> $this->main();
<add> public function assets() {
<add> $this->Assets->main();
<ide> }
<ide>
<ide> /**
<ide> public function main() {
<ide> public function getOptionParser() {
<ide> $parser = parent::getOptionParser();
<ide>
<del> $parser->description(
<del> 'Plugin Shell symlinks your plugin assets to app\'s webroot.'
<del> )->addSubcommand('assets', [
<del> 'help' => 'Symlink / copy assets to app\'s webroot',
<del> 'parser' => $this->Assets->getOptionParser()
<add> $parser->addSubcommand('assets', [
<add> 'help' => 'Symlink / copy assets to app\'s webroot'
<ide> ]);
<ide>
<ide> return $parser; | 1 |
Text | Text | fix new ssh documentation | 3927723263def56efd3a9b6d77074825198821cf | <ide><path>UPDATING.md
<ide> assists people when migrating to a new version.
<ide>
<ide> ## Master
<ide>
<add>### SSH Hook updates, along with new SSH Operator & SFTP Operator
<add> SSH Hook now uses Paramiko library to create ssh client connection, instead of sub-process based ssh command execution previously (<1.9.0), so this is backward incompatible.
<add> - update SSHHook constructor
<add> - use SSHOperator class in place of SSHExecuteOperator which is removed now. Refer test_ssh_operator.py for usage info.
<add> - SFTPOperator is added to perform secure file transfer from serverA to serverB. Refer test_sftp_operator.py.py for usage info.
<add> - No updates are required if you are using ftpHook, it will continue work as is.
<add>
<ide> ### New Features
<ide>
<ide> #### Dask Executor
<ide> If you experience problems connecting with your operator make sure you set the c
<ide>
<ide> Also the old P12 key file type is not supported anymore and only the new JSON key files are supported as a service
<ide> account.
<del>
<del>### SSH Hook updates, along with new SSH Operator & SFTP Operator
<del> SSH Hook now uses Paramiko library to create ssh client connection, instead of sub-process based ssh command execution previously (<1.9.0), so this is backward incompatible.
<del> - update SSHHook constructor
<del> - use SSHOperator class in place of SSHExecuteOperator which is removed now. Refer test_ssh_operator.py for usage info.
<del> - SFTPOperator is added to perform secure file transfer from serverA to serverB. Refer test_sftp_operator.py.py for usage info.
<del> - No updates are required if you are using ftpHook, it will continue work as is.
<ide>
<ide> ### Deprecated Features
<ide> These features are marked for deprecation. They may still work (and raise a `DeprecationWarning`), but are no longer | 1 |
Javascript | Javascript | see value of "haderror" in tls test | 90972d5cb6477adfa907f2a8e4326908ef93c7ef | <ide><path>test/parallel/test-tls-hello-parser-failure.js
<ide> const server = tls.createServer(options, function(c) {
<ide> }));
<ide>
<ide> client.on('close', common.mustCall(function(hadError) {
<del> assert.strictEqual(hadError, true, 'Client never errored');
<add> // Confirm that client errored
<add> assert.strictEqual(hadError, true);
<ide> }));
<ide> })); | 1 |
Python | Python | remove default options | 5b91d3fb8911a9d826ff26285ffe1a1f06f23ed8 | <ide><path>libcloud/container/drivers/lxd.py
<ide> def _ex_create_exec_configuration(input, **config):
<ide>
<ide> if "width" in config.keys():
<ide> input["width"] = config["width"]
<del> else:
<del> input["width"] = 80
<ide>
<ide> if "height" in config.keys():
<ide> input["width"] = config["height"]
<del> else:
<del> input["height"] = 25
<ide>
<ide> if "user" in config.keys():
<ide> input["user"] = config["user"]
<del> else:
<del> input["user"] = 1000
<ide>
<ide> if "group" in config.keys():
<ide> input["group"] = config["group"]
<del> else:
<del> input["group"] = 1000
<ide>
<ide> if "cwd" in config.keys():
<ide> input["cwd"] = config["cwd"]
<del> else:
<del> input["cwd"] = "/tmp"
<ide>
<ide> if "wait-for-websocket" in config.keys():
<ide> input["wait-for-websocket"] = config["wait-for-websocket"]
<ide> def _ex_create_exec_configuration(input, **config):
<ide>
<ide> if "record-output" in config.keys():
<ide> input["record-output"] = config["record-output"]
<del> else:
<del> input["record-output"] = False
<ide>
<ide> if "interactive" in config.keys():
<ide> input["interactive"] = config["interactive"] | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.