content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
---|---|---|---|---|---|
PHP | PHP | fix parse error | 9df2d77f48c5134a125ff2def78d48f0e9c81a04 | <ide><path>src/Console/ConsoleOptionParser.php
<ide> public function addOption($name, array $options = [])
<ide> $options['default'],
<ide> $options['choices'],
<ide> $options['multiple'],
<del> $options['required'],
<add> $options['required']
<ide> );
<ide> }
<ide> $this->_options[$name] = $option; | 1 |
Go | Go | allow anonymous endpoint | e41a9cf59d4b65fef352cc9cbf5f8c75ec0f111a | <ide><path>libnetwork/endpoint.go
<ide> type endpoint struct {
<ide> joinInfo *endpointJoinInfo
<ide> sandboxID string
<ide> exposedPorts []types.TransportPort
<add> anonymous bool
<ide> generic map[string]interface{}
<ide> joinLeaveDone chan struct{}
<ide> dbIndex uint64
<ide> func (ep *endpoint) MarshalJSON() ([]byte, error) {
<ide> epMap["generic"] = ep.generic
<ide> }
<ide> epMap["sandbox"] = ep.sandboxID
<add> epMap["anonymous"] = ep.anonymous
<ide> return json.Marshal(epMap)
<ide> }
<ide>
<ide> func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
<ide> if v, ok := epMap["generic"]; ok {
<ide> ep.generic = v.(map[string]interface{})
<ide> }
<add>
<add> if v, ok := epMap["anonymous"]; ok {
<add> ep.anonymous = v.(bool)
<add> }
<ide> return nil
<ide> }
<ide>
<ide> func (ep *endpoint) CopyTo(o datastore.KVObject) error {
<ide> dstEp.sandboxID = ep.sandboxID
<ide> dstEp.dbIndex = ep.dbIndex
<ide> dstEp.dbExists = ep.dbExists
<add> dstEp.anonymous = ep.anonymous
<ide>
<ide> if ep.iface != nil {
<ide> dstEp.iface = &endpointInterface{}
<ide> func (ep *endpoint) Network() string {
<ide> return ep.network.name
<ide> }
<ide>
<add>func (ep *endpoint) isAnonymous() bool {
<add> ep.Lock()
<add> defer ep.Unlock()
<add> return ep.anonymous
<add>}
<add>
<ide> // endpoint Key structure : endpoint/network-id/endpoint-id
<ide> func (ep *endpoint) Key() []string {
<ide> if ep.network == nil {
<ide> func CreateOptionPortMapping(portBindings []types.PortBinding) EndpointOption {
<ide> }
<ide> }
<ide>
<add>// CreateOptionAnonymous function returns an option setter for setting
<add>// this endpoint as anonymous
<add>func CreateOptionAnonymous() EndpointOption {
<add> return func(ep *endpoint) {
<add> ep.anonymous = true
<add> }
<add>}
<add>
<ide> // JoinOptionPriority function returns an option setter for priority option to
<ide> // be passed to the endpoint.Join() method.
<ide> func JoinOptionPriority(ep Endpoint, prio int) EndpointOption {
<ide><path>libnetwork/libnetwork_internal_test.go
<ide> func TestEndpointMarshalling(t *testing.T) {
<ide> name: "Bau",
<ide> id: "efghijklmno",
<ide> sandboxID: "ambarabaciccicocco",
<add> anonymous: true,
<ide> iface: &endpointInterface{
<ide> mac: []byte{11, 12, 13, 14, 15, 16},
<ide> addr: &net.IPNet{
<ide> func TestEndpointMarshalling(t *testing.T) {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> if e.name != ee.name || e.id != ee.id || e.sandboxID != ee.sandboxID || !compareEndpointInterface(e.iface, ee.iface) {
<add> if e.name != ee.name || e.id != ee.id || e.sandboxID != ee.sandboxID || !compareEndpointInterface(e.iface, ee.iface) || e.anonymous != ee.anonymous {
<ide> t.Fatalf("JSON marsh/unmarsh failed.\nOriginal:\n%#v\nDecoded:\n%#v\nOriginal iface: %#v\nDecodediface:\n%#v", e, ee, e.iface, ee.iface)
<ide> }
<ide> }
<ide> func TestAuxAddresses(t *testing.T) {
<ide> }
<ide> defer c.Stop()
<ide>
<del> n := &network{ipamType: ipamapi.DefaultIPAM, ctrlr: c.(*controller)}
<add> n := &network{ipamType: ipamapi.DefaultIPAM, networkType: "bridge", ctrlr: c.(*controller)}
<ide>
<ide> input := []struct {
<ide> masterPool string
<ide><path>libnetwork/network.go
<ide> func (n *network) EndpointByID(id string) (Endpoint, error) {
<ide> }
<ide>
<ide> func (n *network) updateSvcRecord(ep *endpoint, localEps []*endpoint, isAdd bool) {
<add> if ep.isAnonymous() {
<add> return
<add> }
<add>
<ide> c := n.getController()
<ide> sr, ok := c.svcDb[n.ID()]
<ide> if !ok { | 3 |
Text | Text | update docs to conform with new translation flow | c7955c4fbc7679784592912a7591e4e7bc101563 | <ide><path>docs/FAQ.md
<ide> freeCodeCamp runs on a modern JavaScript stack. If you're interested in contribu
<ide>
<ide> Yes - You can contribute to any of the 30+ languages we have enabled on our translation platform.
<ide>
<del>We have user-contributed translations live in some languages. We intend to localize freeCodeCamp into several major world languages. You can read all about this in our [announcement here](https://www.freecodecamp.org/news/world-language-translation-effort).
<add>We have user-contributed translations live in some languages. We intend to localize freeCodeCamp into several major world languages. You can read all about this in our [announcement here](https://www.freecodecamp.org/news/help-translate-freecodecamp-language/).
<ide>
<ide> If you are interested in contributing to translations please makes sure you [read this guide](how-to-translate-files.md) first.
<ide>
<ide><path>docs/how-to-translate-files.md
<ide> # How to Translate freeCodeCamp's resources
<ide>
<del>It's our dream to provide you with the resources to learn, no matter the world language you speak. To help us with this massive effort, we have integrated our open-source code-base & curriculum with [Crowdin](https://crowdin.com/) - A tool to help us localize our code-base.
<del>
<del>The translation workflow is split into two main activities:
<del>
<del>- **Translating** curriculum files, documentation and UI elements like buttons, labels, etc.:
<del>
<del> As a translator you can sign up on [our translation platform](https://translate.freecodecamp.org) and contribute translations in any of the 30+ languages enabled in there.
<del>
<del>- **Proofreading** the translations for all of the above.
<del>
<del> Proofreaders verify that the community contributed translations are uniform in tone and free of common issues like typos, etc. In short, they ensure that the quality of translations is high. Note that we do not use machine translations for a reason.
<del>
<del>> [!WARNING]
<del>> We are no longer using GitHub to translate files directly, if you are a returning contributor head to our [translation platform](https://translate.freecodecamp.org/) instead.
<del>
<ide> ## Prepare yourself for contributions
<ide>
<ide> > The freeCodeCamp Localization Roadmap – There Are No Speed Limits
<ide> We just ask that you understand the following:
<ide>
<ide> Translating freeCodeCamp's resources is one of the most fun and rewarding experiences as a contributor, and it works best if you involve your friends and colleagues who speak the same world language as you.
<ide>
<del> We recommend joining [our community forum](https://forum.freecodecamp.org/c/contributors/3) and [contributors chat room](https://discord.gg/PRyKn3Vbay) with your friends and showing your interest before starting off with translations. Crowdin makes it easy to contribute translations, but it's still a lot of work.
<add> You can start by reading [this announcement](https://www.freecodecamp.org/news/help-translate-freecodecamp-language/). We recommend joining [our community forum](https://forum.freecodecamp.org/c/contributors/3) and [Discord chat server](https://discord.gg/PRyKn3Vbay) with your friends and showing your interest before starting off with translations. Crowdin and other tools make it easy to contribute translations, but it's still a lot of work.
<ide>
<ide> We want you to enjoy contributing and not burn out or lose interest.
<ide>
<ide> We just ask that you understand the following:
<ide>
<ide> freeCodeCamp.org is committed to providing these for free as always, however we need to prioritize resources for those who need them the most. The last thing we want is to shutdown servers for a language if the translation activity dies off & things become outdated.
<ide>
<del> Once a language reaches at least a few certifications on the curriculum we can begin deploying the language live on [`/learn`](https://www.freecodecamp.org/learn), while you continue to translate the remaining certifications.
<add> For translating the curriculum, once a language reaches at least a few certifications we can begin deploying the language live on [`/learn`](https://www.freecodecamp.org/learn), while you continue to translate the remaining certifications.
<ide>
<ide> For example, we would want to deploy at least the entire front-end certifications suite when we ship a new world language for the first time.
<ide>
<ide> We just ask that you understand the following:
<ide>
<ide> Once you have a small group of people (at least 4-5) interested and committed, we can hop on a call. We will explain all the details and walk you through some of the tools and processes.
<ide>
<add>## Overview of Crowdin
<add>
<add>It's our dream to provide you with the resources to learn, no matter the world language you speak. To help us with this massive effort, we have integrated our open-source code-base & curriculum with [Crowdin](https://crowdin.com/) - A tool to help us localize our code-base.
<add>
<add>> [!NOTE]
<add>> We use a different tool and workflow for translating [news articles](https://www.freecodecamp.org/news). If you are interested in translating articles, read [this announcement](https://www.freecodecamp.org/news/help-translate-freecodecamp-language/) and reach out to your Language Lead.
<add>
<add>The translation workflow is split into two main activities:
<add>
<add>- **Translating** curriculum files, documentation and UI elements like buttons, labels, etc.:
<add>
<add> As a translator you can sign up on [our translation platform](https://translate.freecodecamp.org) and contribute translations in any of the 30+ languages enabled in there.
<add>
<add>- **Proofreading** the translations for all of the above.
<add>
<add> Proofreaders verify that the community contributed translations are uniform in tone and free of common issues like typos, etc. In short, they ensure that the quality of translations is high. Note that we do not use machine translations for a reason.
<add>
<add>> [!WARNING]
<add>> We are no longer using GitHub to translate files directly, if you are a returning contributor head to our [translation platform](https://translate.freecodecamp.org/) instead.
<add>
<ide> ## Getting started
<ide>
<del>First, make sure you come say "Hi" in our [contributors chat room](https://discord.gg/PRyKn3Vbay). We post regular updates about translating resources and answer a lot of your queries in there.
<add>First, make sure you come say "Hi" in our [Discord](https://discord.gg/PRyKn3Vbay). We post regular updates about translating resources and answer a lot of your queries in there.
<ide>
<ide> Next, head to our [translation platform](https://translate.freecodecamp.org/) and login (if you have not contributed to translations before, you will need to create an account).
<ide>
<ide> Crowdin separates a document into translatable "strings", usually sentences. Eac
<ide> 10. These two "pane" buttons will hide the left (document) and right (comments) views.
<ide>
<ide> > [!NOTE]
<del>> If you see a hidden string that includes translations, please notify us in the [contributors chat room](https://discord.gg/PRyKn3Vbay) so we can remove the translation from memory.
<add>> If you see a hidden string that includes translations, please notify us in the [Discord](https://discord.gg/PRyKn3Vbay) so we can remove the translation from memory.
<ide>
<ide> When you have completed a translation for a string, select the `Save` button to store your translation on Crowdin. Other contributors will then be able to vote on your translation and proofreaders will be able to approve it.
<ide>
<ide> Follow these guidelines to ensure our translations are as accurate as possible:
<ide> - Do not add additional content. If you feel a challenge requires changes in the text content or additional information, you should propose the changes through a GitHub issue or a pull request that modifies the English file.
<ide> - Do not change the order of content.
<ide>
<del>If you have any questions, feel free to reach out to us in our [contributors chat room](https://discord.gg/PRyKn3Vbay) and we will be happy to assist you.
<add>If you have any questions, feel free to reach out to us in our [Discord](https://discord.gg/PRyKn3Vbay) and we will be happy to assist you.
<ide><path>docs/index.md
<ide> You can help expand and improve the curriculum. You can also update project user
<ide>
<ide> We are localizing freeCodeCamp.org to major world languages.
<ide>
<del>Certifications are already live in some major world languages like [Chinese (中文)](https://chinese.freecodecamp.org/learn), [Spanish (Español)](https://www.freecodecamp.org/espanol/learn), [Italian (Italiano)](https://www.freecodecamp.org/italian/learn), [Portuguese (Português)](https://www.freecodecamp.org/portuguese/learn). We encourage you to read the [announcement here](https://www.freecodecamp.org/news/world-language-translation-effort) and share it with your friends to get them excited about this.
<add>Certifications are already live in some major world languages like below:
<add>
<add>- [Chinese (中文)](https://chinese.freecodecamp.org/learn)
<add>- [Spanish (Español)](https://www.freecodecamp.org/espanol/learn)
<add>- [Italian (Italiano)](https://www.freecodecamp.org/italian/learn)
<add>- [Portuguese (Português)](https://www.freecodecamp.org/portuguese/learn)
<add>- [Ukrainian (Українська)](https://www.freecodecamp.org/ukrainian/learn)
<add>- [Japanese (日本語)](https://www.freecodecamp.org/japanese/learn)
<add>
<add>We encourage you to read the [announcement here](https://www.freecodecamp.org/news/help-translate-freecodecamp-language/) and share it with your friends to get them excited about this.
<ide>
<ide> **If you're interested in translating, here's [how to translate freeCodeCamp's resources](how-to-translate-files.md).**
<ide> | 3 |
Text | Text | fix mode and flags being mistaken in fs | a45c1aa39f593cc584f1895dd0b7309fb12e1b02 | <ide><path>doc/api/fs.md
<ide> Returns an object containing commonly used constants for file system
<ide> operations. The specific constants currently defined are described in
<ide> [FS Constants][].
<ide>
<del>## `fs.copyFile(src, dest[, flags], callback)`
<add>## `fs.copyFile(src, dest[, mode], callback)`
<ide> <!-- YAML
<ide> added: v8.5.0
<ide> -->
<ide>
<ide> * `src` {string|Buffer|URL} source filename to copy
<ide> * `dest` {string|Buffer|URL} destination filename of the copy operation
<del>* `flags` {number} modifiers for copy operation. **Default:** `0`.
<add>* `mode` {integer} modifiers for copy operation. **Default:** `0`.
<ide> * `callback` {Function}
<ide>
<ide> Asynchronously copies `src` to `dest`. By default, `dest` is overwritten if it
<ide> callback function. Node.js makes no guarantees about the atomicity of the copy
<ide> operation. If an error occurs after the destination file has been opened for
<ide> writing, Node.js will attempt to remove the destination.
<ide>
<del>`flags` is an optional integer that specifies the behavior
<add>`mode` is an optional integer that specifies the behavior
<ide> of the copy operation. It is possible to create a mask consisting of the bitwise
<ide> OR of two or more values (e.g.
<ide> `fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
<ide> fs.copyFile('source.txt', 'destination.txt', (err) => {
<ide> });
<ide> ```
<ide>
<del>If the third argument is a number, then it specifies `flags`:
<add>If the third argument is a number, then it specifies `mode`:
<ide>
<ide> ```js
<ide> const fs = require('fs');
<ide> const { COPYFILE_EXCL } = fs.constants;
<ide> fs.copyFile('source.txt', 'destination.txt', COPYFILE_EXCL, callback);
<ide> ```
<ide>
<del>## `fs.copyFileSync(src, dest[, flags])`
<add>## `fs.copyFileSync(src, dest[, mode])`
<ide> <!-- YAML
<ide> added: v8.5.0
<ide> -->
<ide>
<ide> * `src` {string|Buffer|URL} source filename to copy
<ide> * `dest` {string|Buffer|URL} destination filename of the copy operation
<del>* `flags` {number} modifiers for copy operation. **Default:** `0`.
<add>* `mode` {integer} modifiers for copy operation. **Default:** `0`.
<ide>
<ide> Synchronously copies `src` to `dest`. By default, `dest` is overwritten if it
<ide> already exists. Returns `undefined`. Node.js makes no guarantees about the
<ide> atomicity of the copy operation. If an error occurs after the destination file
<ide> has been opened for writing, Node.js will attempt to remove the destination.
<ide>
<del>`flags` is an optional integer that specifies the behavior
<add>`mode` is an optional integer that specifies the behavior
<ide> of the copy operation. It is possible to create a mask consisting of the bitwise
<ide> OR of two or more values (e.g.
<ide> `fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
<ide> fs.copyFileSync('source.txt', 'destination.txt');
<ide> console.log('source.txt was copied to destination.txt');
<ide> ```
<ide>
<del>If the third argument is a number, then it specifies `flags`:
<add>If the third argument is a number, then it specifies `mode`:
<ide>
<ide> ```js
<ide> const fs = require('fs');
<ide> changes:
<ide> * `fs` {Object|null} **Default:** `null`
<ide> * Returns: {fs.WriteStream} See [Writable Stream][].
<ide>
<del>`options` may also include a `start` option to allow writing data at
<del>some position past the beginning of the file, allowed values are in the
<del>[0, [`Number.MAX_SAFE_INTEGER`][]] range. Modifying a file rather
<del>than replacing it may require a `flags` mode of `r+` rather than the
<del>default mode `w`. The `encoding` can be any one of those accepted by
<del>[`Buffer`][].
<add>`options` may also include a `start` option to allow writing data at some
<add>position past the beginning of the file, allowed values are in the
<add>[0, [`Number.MAX_SAFE_INTEGER`][]] range. Modifying a file rather than replacing
<add>it may require the `flags` option to be set to `r+` rather than the default `w`.
<add>The `encoding` can be any one of those accepted by [`Buffer`][].
<ide>
<ide> If `autoClose` is set to true (default behavior) on `'error'` or `'finish'`
<ide> the file descriptor will be closed automatically. If `autoClose` is false,
<ide> changes:
<ide> Asynchronously creates a directory. No arguments other than a possible exception
<ide> are given to the completion callback.
<ide>
<del>The optional `options` argument can be an integer specifying mode (permission
<add>The optional `options` argument can be an integer specifying `mode` (permission
<ide> and sticky bits), or an object with a `mode` property and a `recursive`
<ide> property indicating whether parent folders should be created. Calling
<ide> `fs.mkdir()` when `path` is a directory that exists results in an error only
<ide> changes:
<ide> description: The `flags` argument is now optional and defaults to `'r'`.
<ide> - version: v9.9.0
<ide> pr-url: https://github.com/nodejs/node/pull/18801
<del> description: The `as` and `as+` modes are supported now.
<add> description: The `as` and `as+` flags are supported now.
<ide> - version: v7.6.0
<ide> pr-url: https://github.com/nodejs/node/pull/10739
<ide> description: The `path` parameter can be a WHATWG `URL` object using `file:`
<ide> changes:
<ide> description: The `flags` argument is now optional and defaults to `'r'`.
<ide> - version: v9.9.0
<ide> pr-url: https://github.com/nodejs/node/pull/18801
<del> description: The `as` and `as+` modes are supported now.
<add> description: The `as` and `as+` flags are supported now.
<ide> - version: v7.6.0
<ide> pr-url: https://github.com/nodejs/node/pull/10739
<ide> description: The `path` parameter can be a WHATWG `URL` object using `file:`
<ide> changes:
<ide> * `path` {string|Buffer|URL}
<ide> * `options` {Object}
<ide> * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
<del> `EPERM` error is encountered, Node.js will retry the operation with a linear
<del> backoff wait of `retryDelay` ms longer on each try. This option represents the
<del> number of retries. This option is ignored if the `recursive` option is not
<del> `true`. **Default:** `0`.
<add> `EPERM` error is encountered, Node.js will retry the operation with a linear
<add> backoff wait of `retryDelay` ms longer on each try. This option represents
<add> the number of retries. This option is ignored if the `recursive` option is
<add> not `true`. **Default:** `0`.
<ide> * `recursive` {boolean} If `true`, perform a recursive directory removal. In
<del> recursive mode, errors are not reported if `path` does not exist, and
<del> operations are retried on failure. **Default:** `false`.
<add> recursive mode, errors are not reported if `path` does not exist, and
<add> operations are retried on failure. **Default:** `false`.
<ide> * `retryDelay` {integer} The amount of time in milliseconds to wait between
<del> retries. This option is ignored if the `recursive` option is not `true`.
<del> **Default:** `100`.
<add> retries. This option is ignored if the `recursive` option is not `true`.
<add> **Default:** `100`.
<ide> * `callback` {Function}
<ide> * `err` {Error}
<ide>
<ide> changes:
<ide> * `path` {string|Buffer|URL}
<ide> * `options` {Object}
<ide> * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
<del> `EPERM` error is encountered, Node.js will retry the operation with a linear
<del> backoff wait of `retryDelay` ms longer on each try. This option represents the
<del> number of retries. This option is ignored if the `recursive` option is not
<del> `true`. **Default:** `0`.
<add> `EPERM` error is encountered, Node.js will retry the operation with a linear
<add> backoff wait of `retryDelay` ms longer on each try. This option represents
<add> the number of retries. This option is ignored if the `recursive` option is
<add> not `true`. **Default:** `0`.
<ide> * `recursive` {boolean} If `true`, perform a recursive directory removal. In
<del> recursive mode, errors are not reported if `path` does not exist, and
<del> operations are retried on failure. **Default:** `false`.
<add> recursive mode, errors are not reported if `path` does not exist, and
<add> operations are retried on failure. **Default:** `false`.
<ide> * `retryDelay` {integer} The amount of time in milliseconds to wait between
<del> retries. This option is ignored if the `recursive` option is not `true`.
<del> **Default:** `100`.
<add> retries. This option is ignored if the `recursive` option is not `true`.
<add> **Default:** `100`.
<ide>
<ide> Synchronous rmdir(2). Returns `undefined`.
<ide>
<ide> added: v10.0.0
<ide> Changes the ownership of a file then resolves the `Promise` with no arguments
<ide> upon success.
<ide>
<del>### `fsPromises.copyFile(src, dest[, flags])`
<add>### `fsPromises.copyFile(src, dest[, mode])`
<ide> <!-- YAML
<ide> added: v10.0.0
<ide> -->
<ide>
<ide> * `src` {string|Buffer|URL} source filename to copy
<ide> * `dest` {string|Buffer|URL} destination filename of the copy operation
<del>* `flags` {number} modifiers for copy operation. **Default:** `0`.
<add>* `mode` {integer} modifiers for copy operation. **Default:** `0`.
<ide> * Returns: {Promise}
<ide>
<ide> Asynchronously copies `src` to `dest`. By default, `dest` is overwritten if it
<ide> Node.js makes no guarantees about the atomicity of the copy operation. If an
<ide> error occurs after the destination file has been opened for writing, Node.js
<ide> will attempt to remove the destination.
<ide>
<del>`flags` is an optional integer that specifies the behavior
<add>`mode` is an optional integer that specifies the behavior
<ide> of the copy operation. It is possible to create a mask consisting of the bitwise
<ide> OR of two or more values (e.g.
<ide> `fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
<ide> fsPromises.copyFile('source.txt', 'destination.txt')
<ide> .catch(() => console.log('The file could not be copied'));
<ide> ```
<ide>
<del>If the third argument is a number, then it specifies `flags`:
<add>If the third argument is a number, then it specifies `mode`:
<ide>
<ide> ```js
<ide> const fs = require('fs');
<ide> added: v10.0.0
<ide> Asynchronously creates a directory then resolves the `Promise` with no
<ide> arguments upon success.
<ide>
<del>The optional `options` argument can be an integer specifying mode (permission
<add>The optional `options` argument can be an integer specifying `mode` (permission
<ide> and sticky bits), or an object with a `mode` property and a `recursive`
<ide> property indicating whether parent folders should be created. Calling
<ide> `fsPromises.mkdir()` when `path` is a directory that exists results in a
<ide> changes:
<ide> * `path` {string|Buffer|URL}
<ide> * `options` {Object}
<ide> * `maxRetries` {integer} If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
<del> `EPERM` error is encountered, Node.js will retry the operation with a linear
<del> backoff wait of `retryDelay` ms longer on each try. This option represents the
<del> number of retries. This option is ignored if the `recursive` option is not
<del> `true`. **Default:** `0`.
<add> `EPERM` error is encountered, Node.js will retry the operation with a linear
<add> backoff wait of `retryDelay` ms longer on each try. This option represents
<add> the number of retries. This option is ignored if the `recursive` option is
<add> not `true`. **Default:** `0`.
<ide> * `recursive` {boolean} If `true`, perform a recursive directory removal. In
<del> recursive mode, errors are not reported if `path` does not exist, and
<del> operations are retried on failure. **Default:** `false`.
<add> recursive mode, errors are not reported if `path` does not exist, and
<add> operations are retried on failure. **Default:** `false`.
<ide> * `retryDelay` {integer} The amount of time in milliseconds to wait between
<del> retries. This option is ignored if the `recursive` option is not `true`.
<del> **Default:** `100`.
<add> retries. This option is ignored if the `recursive` option is not `true`.
<add> **Default:** `100`.
<ide> * Returns: {Promise}
<ide>
<ide> Removes the directory identified by `path` then resolves the `Promise` with
<ide> the file contents.
<ide> [`fs.access()`]: #fs_fs_access_path_mode_callback
<ide> [`fs.chmod()`]: #fs_fs_chmod_path_mode_callback
<ide> [`fs.chown()`]: #fs_fs_chown_path_uid_gid_callback
<del>[`fs.copyFile()`]: #fs_fs_copyfile_src_dest_flags_callback
<add>[`fs.copyFile()`]: #fs_fs_copyfile_src_dest_mode_callback
<ide> [`fs.createWriteStream()`]: #fs_fs_createwritestream_path_options
<ide> [`fs.exists()`]: fs.html#fs_fs_exists_path_callback
<ide> [`fs.fstat()`]: #fs_fs_fstat_fd_options_callback | 1 |
Python | Python | add sparsity modules | e4c07faf0ae125559c30998c3513dd85f012c88e | <ide><path>examples/movement-pruning/emmental/__init__.py
<add>from .modules import *
<add>
<add>from .configuration_bert_masked import MaskedBertConfig
<add>
<add>from .modeling_bert_masked import (
<add> MaskedBertModel,
<add> MaskedBertForQuestionAnswering,
<add> MaskedBertForSequenceClassification,
<add> MaskedBertForTokenClassification,
<add> MaskedBertForMultipleChoice,
<add>)
<ide><path>examples/movement-pruning/emmental/configuration_bert_masked.py
<add># coding=utf-8
<add># Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
<add># Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>""" Masked BERT model configuration. It replicates the class `~transformers.BertConfig`
<add>and adapts it to the specificities of MaskedBert (`pruning_method`, `mask_init` and `mask_scale`."""
<add>
<add>
<add>import logging
<add>
<add>from transformers.configuration_utils import PretrainedConfig
<add>from transformers.configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
<add>
<add>logger = logging.getLogger(__name__)
<add>
<add>
<add>class MaskedBertConfig(PretrainedConfig):
<add> """
<add> A class replicating the `~transformers.BertConfig` with additional parameters for pruning/masking configuration.
<add> """
<add>
<add> pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
<add> model_type = "masked_bert"
<add>
<add> def __init__(
<add> self,
<add> vocab_size=30522,
<add> hidden_size=768,
<add> num_hidden_layers=12,
<add> num_attention_heads=12,
<add> intermediate_size=3072,
<add> hidden_act="gelu",
<add> hidden_dropout_prob=0.1,
<add> attention_probs_dropout_prob=0.1,
<add> max_position_embeddings=512,
<add> type_vocab_size=2,
<add> initializer_range=0.02,
<add> layer_norm_eps=1e-12,
<add> pad_token_id=0,
<add> pruning_method="topK",
<add> mask_init="constant",
<add> mask_scale=0.0,
<add> **kwargs
<add> ):
<add> super().__init__(pad_token_id=pad_token_id, **kwargs)
<add>
<add> self.vocab_size = vocab_size
<add> self.hidden_size = hidden_size
<add> self.num_hidden_layers = num_hidden_layers
<add> self.num_attention_heads = num_attention_heads
<add> self.hidden_act = hidden_act
<add> self.intermediate_size = intermediate_size
<add> self.hidden_dropout_prob = hidden_dropout_prob
<add> self.attention_probs_dropout_prob = attention_probs_dropout_prob
<add> self.max_position_embeddings = max_position_embeddings
<add> self.type_vocab_size = type_vocab_size
<add> self.initializer_range = initializer_range
<add> self.layer_norm_eps = layer_norm_eps
<add> self.pruning_method = pruning_method
<add> self.mask_init = mask_init
<add> self.mask_scale = mask_scale
<ide><path>examples/movement-pruning/emmental/modeling_bert_masked.py
<add># coding=utf-8
<add># Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
<add># Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>"""Masked Version of BERT. It replaces the `torch.nn.Linear` layers with
<add>:class:`~emmental.MaskedLinear` and add an additional parameters in the forward pass to
<add>compute the adaptive mask.
<add>Built on top of `transformers.modeling_bert`"""
<add>
<add>
<add>import logging
<add>import math
<add>
<add>import torch
<add>from torch import nn
<add>from torch.nn import CrossEntropyLoss, MSELoss
<add>
<add>from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
<add>from transformers.modeling_utils import PreTrainedModel, prune_linear_layer
<add>from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
<add>from transformers.modeling_bert import load_tf_weights_in_bert, ACT2FN, BertLayerNorm
<add>
<add>from emmental import MaskedLinear
<add>from emmental import MaskedBertConfig
<add>
<add>logger = logging.getLogger(__name__)
<add>
<add>
<add>class BertEmbeddings(nn.Module):
<add> """Construct the embeddings from word, position and token_type embeddings.
<add> """
<add>
<add> def __init__(self, config):
<add> super().__init__()
<add> self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
<add> self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
<add> self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
<add>
<add> # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
<add> # any TensorFlow checkpoint file
<add> self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
<add> self.dropout = nn.Dropout(config.hidden_dropout_prob)
<add>
<add> def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
<add> if input_ids is not None:
<add> input_shape = input_ids.size()
<add> else:
<add> input_shape = inputs_embeds.size()[:-1]
<add>
<add> seq_length = input_shape[1]
<add> device = input_ids.device if input_ids is not None else inputs_embeds.device
<add> if position_ids is None:
<add> position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
<add> position_ids = position_ids.unsqueeze(0).expand(input_shape)
<add> if token_type_ids is None:
<add> token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
<add>
<add> if inputs_embeds is None:
<add> inputs_embeds = self.word_embeddings(input_ids)
<add> position_embeddings = self.position_embeddings(position_ids)
<add> token_type_embeddings = self.token_type_embeddings(token_type_ids)
<add>
<add> embeddings = inputs_embeds + position_embeddings + token_type_embeddings
<add> embeddings = self.LayerNorm(embeddings)
<add> embeddings = self.dropout(embeddings)
<add> return embeddings
<add>
<add>
<add>class BertSelfAttention(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
<add> raise ValueError(
<add> "The hidden size (%d) is not a multiple of the number of attention "
<add> "heads (%d)" % (config.hidden_size, config.num_attention_heads)
<add> )
<add> self.output_attentions = config.output_attentions
<add>
<add> self.num_attention_heads = config.num_attention_heads
<add> self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
<add> self.all_head_size = self.num_attention_heads * self.attention_head_size
<add>
<add> self.query = MaskedLinear(
<add> config.hidden_size,
<add> self.all_head_size,
<add> pruning_method=config.pruning_method,
<add> mask_init=config.mask_init,
<add> mask_scale=config.mask_scale,
<add> )
<add> self.key = MaskedLinear(
<add> config.hidden_size,
<add> self.all_head_size,
<add> pruning_method=config.pruning_method,
<add> mask_init=config.mask_init,
<add> mask_scale=config.mask_scale,
<add> )
<add> self.value = MaskedLinear(
<add> config.hidden_size,
<add> self.all_head_size,
<add> pruning_method=config.pruning_method,
<add> mask_init=config.mask_init,
<add> mask_scale=config.mask_scale,
<add> )
<add>
<add> self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
<add>
<add> def transpose_for_scores(self, x):
<add> new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
<add> x = x.view(*new_x_shape)
<add> return x.permute(0, 2, 1, 3)
<add>
<add> def forward(
<add> self,
<add> hidden_states,
<add> attention_mask=None,
<add> head_mask=None,
<add> encoder_hidden_states=None,
<add> encoder_attention_mask=None,
<add> threshold=None,
<add> ):
<add> mixed_query_layer = self.query(hidden_states, threshold=threshold)
<add>
<add> # If this is instantiated as a cross-attention module, the keys
<add> # and values come from an encoder; the attention mask needs to be
<add> # such that the encoder's padding tokens are not attended to.
<add> if encoder_hidden_states is not None:
<add> mixed_key_layer = self.key(encoder_hidden_states, threshold=threshold)
<add> mixed_value_layer = self.value(encoder_hidden_states, threshold=threshold)
<add> attention_mask = encoder_attention_mask
<add> else:
<add> mixed_key_layer = self.key(hidden_states, threshold=threshold)
<add> mixed_value_layer = self.value(hidden_states, threshold=threshold)
<add>
<add> query_layer = self.transpose_for_scores(mixed_query_layer)
<add> key_layer = self.transpose_for_scores(mixed_key_layer)
<add> value_layer = self.transpose_for_scores(mixed_value_layer)
<add>
<add> # Take the dot product between "query" and "key" to get the raw attention scores.
<add> attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
<add> attention_scores = attention_scores / math.sqrt(self.attention_head_size)
<add> if attention_mask is not None:
<add> # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
<add> attention_scores = attention_scores + attention_mask
<add>
<add> # Normalize the attention scores to probabilities.
<add> attention_probs = nn.Softmax(dim=-1)(attention_scores)
<add>
<add> # This is actually dropping out entire tokens to attend to, which might
<add> # seem a bit unusual, but is taken from the original Transformer paper.
<add> attention_probs = self.dropout(attention_probs)
<add>
<add> # Mask heads if we want to
<add> if head_mask is not None:
<add> attention_probs = attention_probs * head_mask
<add>
<add> context_layer = torch.matmul(attention_probs, value_layer)
<add>
<add> context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
<add> new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
<add> context_layer = context_layer.view(*new_context_layer_shape)
<add>
<add> outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
<add> return outputs
<add>
<add>
<add>class BertSelfOutput(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> self.dense = MaskedLinear(
<add> config.hidden_size,
<add> config.hidden_size,
<add> pruning_method=config.pruning_method,
<add> mask_init=config.mask_init,
<add> mask_scale=config.mask_scale,
<add> )
<add> self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
<add> self.dropout = nn.Dropout(config.hidden_dropout_prob)
<add>
<add> def forward(self, hidden_states, input_tensor, threshold):
<add> hidden_states = self.dense(hidden_states, threshold=threshold)
<add> hidden_states = self.dropout(hidden_states)
<add> hidden_states = self.LayerNorm(hidden_states + input_tensor)
<add> return hidden_states
<add>
<add>
<add>class BertAttention(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> self.self = BertSelfAttention(config)
<add> self.output = BertSelfOutput(config)
<add> self.pruned_heads = set()
<add>
<add> def prune_heads(self, heads):
<add> if len(heads) == 0:
<add> return
<add> mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
<add> heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads
<add> for head in heads:
<add> # Compute how many pruned heads are before the head and move the index accordingly
<add> head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
<add> mask[head] = 0
<add> mask = mask.view(-1).contiguous().eq(1)
<add> index = torch.arange(len(mask))[mask].long()
<add>
<add> # Prune linear layers
<add> self.self.query = prune_linear_layer(self.self.query, index)
<add> self.self.key = prune_linear_layer(self.self.key, index)
<add> self.self.value = prune_linear_layer(self.self.value, index)
<add> self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
<add>
<add> # Update hyper params and store pruned heads
<add> self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
<add> self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
<add> self.pruned_heads = self.pruned_heads.union(heads)
<add>
<add> def forward(
<add> self,
<add> hidden_states,
<add> attention_mask=None,
<add> head_mask=None,
<add> encoder_hidden_states=None,
<add> encoder_attention_mask=None,
<add> threshold=None,
<add> ):
<add> self_outputs = self.self(
<add> hidden_states,
<add> attention_mask,
<add> head_mask,
<add> encoder_hidden_states,
<add> encoder_attention_mask,
<add> threshold=threshold,
<add> )
<add> attention_output = self.output(self_outputs[0], hidden_states, threshold=threshold)
<add> outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
<add> return outputs
<add>
<add>
<add>class BertIntermediate(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> self.dense = MaskedLinear(
<add> config.hidden_size,
<add> config.intermediate_size,
<add> pruning_method=config.pruning_method,
<add> mask_init=config.mask_init,
<add> mask_scale=config.mask_scale,
<add> )
<add> if isinstance(config.hidden_act, str):
<add> self.intermediate_act_fn = ACT2FN[config.hidden_act]
<add> else:
<add> self.intermediate_act_fn = config.hidden_act
<add>
<add> def forward(self, hidden_states, threshold):
<add> hidden_states = self.dense(hidden_states, threshold=threshold)
<add> hidden_states = self.intermediate_act_fn(hidden_states)
<add> return hidden_states
<add>
<add>
<add>class BertOutput(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> self.dense = MaskedLinear(
<add> config.intermediate_size,
<add> config.hidden_size,
<add> pruning_method=config.pruning_method,
<add> mask_init=config.mask_init,
<add> mask_scale=config.mask_scale,
<add> )
<add> self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
<add> self.dropout = nn.Dropout(config.hidden_dropout_prob)
<add>
<add> def forward(self, hidden_states, input_tensor, threshold):
<add> hidden_states = self.dense(hidden_states, threshold=threshold)
<add> hidden_states = self.dropout(hidden_states)
<add> hidden_states = self.LayerNorm(hidden_states + input_tensor)
<add> return hidden_states
<add>
<add>
<add>class BertLayer(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> self.attention = BertAttention(config)
<add> self.is_decoder = config.is_decoder
<add> if self.is_decoder:
<add> self.crossattention = BertAttention(config)
<add> self.intermediate = BertIntermediate(config)
<add> self.output = BertOutput(config)
<add>
<add> def forward(
<add> self,
<add> hidden_states,
<add> attention_mask=None,
<add> head_mask=None,
<add> encoder_hidden_states=None,
<add> encoder_attention_mask=None,
<add> threshold=None,
<add> ):
<add> self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, threshold=threshold)
<add> attention_output = self_attention_outputs[0]
<add> outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
<add>
<add> if self.is_decoder and encoder_hidden_states is not None:
<add> cross_attention_outputs = self.crossattention(
<add> attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
<add> )
<add> attention_output = cross_attention_outputs[0]
<add> outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
<add>
<add> intermediate_output = self.intermediate(attention_output, threshold=threshold)
<add> layer_output = self.output(intermediate_output, attention_output, threshold=threshold)
<add> outputs = (layer_output,) + outputs
<add> return outputs
<add>
<add>
<add>class BertEncoder(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> self.output_attentions = config.output_attentions
<add> self.output_hidden_states = config.output_hidden_states
<add> self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
<add>
<add> def forward(
<add> self,
<add> hidden_states,
<add> attention_mask=None,
<add> head_mask=None,
<add> encoder_hidden_states=None,
<add> encoder_attention_mask=None,
<add> threshold=None,
<add> ):
<add> all_hidden_states = ()
<add> all_attentions = ()
<add> for i, layer_module in enumerate(self.layer):
<add> if self.output_hidden_states:
<add> all_hidden_states = all_hidden_states + (hidden_states,)
<add>
<add> layer_outputs = layer_module(
<add> hidden_states,
<add> attention_mask,
<add> head_mask[i],
<add> encoder_hidden_states,
<add> encoder_attention_mask,
<add> threshold=threshold,
<add> )
<add> hidden_states = layer_outputs[0]
<add>
<add> if self.output_attentions:
<add> all_attentions = all_attentions + (layer_outputs[1],)
<add>
<add> # Add last layer
<add> if self.output_hidden_states:
<add> all_hidden_states = all_hidden_states + (hidden_states,)
<add>
<add> outputs = (hidden_states,)
<add> if self.output_hidden_states:
<add> outputs = outputs + (all_hidden_states,)
<add> if self.output_attentions:
<add> outputs = outputs + (all_attentions,)
<add> return outputs # last-layer hidden state, (all hidden states), (all attentions)
<add>
<add>
<add>class BertPooler(nn.Module):
<add> def __init__(self, config):
<add> super().__init__()
<add> self.dense = nn.Linear(config.hidden_size, config.hidden_size)
<add> self.activation = nn.Tanh()
<add>
<add> def forward(self, hidden_states):
<add> # We "pool" the model by simply taking the hidden state corresponding
<add> # to the first token.
<add> first_token_tensor = hidden_states[:, 0]
<add> pooled_output = self.dense(first_token_tensor)
<add> pooled_output = self.activation(pooled_output)
<add> return pooled_output
<add>
<add>
<add>class MaskedBertPreTrainedModel(PreTrainedModel):
<add> """ An abstract class to handle weights initialization and
<add> a simple interface for downloading and loading pretrained models.
<add> """
<add>
<add> config_class = MaskedBertConfig
<add> pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
<add> load_tf_weights = load_tf_weights_in_bert
<add> base_model_prefix = "bert"
<add>
<add> def _init_weights(self, module):
<add> """ Initialize the weights """
<add> if isinstance(module, (nn.Linear, nn.Embedding)):
<add> # Slightly different from the TF version which uses truncated_normal for initialization
<add> # cf https://github.com/pytorch/pytorch/pull/5617
<add> module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
<add> elif isinstance(module, BertLayerNorm):
<add> module.bias.data.zero_()
<add> module.weight.data.fill_(1.0)
<add> if isinstance(module, nn.Linear) and module.bias is not None:
<add> module.bias.data.zero_()
<add>
<add>
<add>MASKED_BERT_START_DOCSTRING = r"""
<add> This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
<add> Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
<add> usage and behavior.
<add>
<add> Parameters:
<add> config (:class:`~emmental.MaskedBertConfig`): Model configuration class with all the parameters of the model.
<add> Initializing with a config file does not load the weights associated with the model, only the configuration.
<add> Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
<add>"""
<add>
<add>MASKED_BERT_INPUTS_DOCSTRING = r"""
<add> Args:
<add> input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
<add> Indices of input sequence tokens in the vocabulary.
<add>
<add> Indices can be obtained using :class:`transformers.BertTokenizer`.
<add> See :func:`transformers.PreTrainedTokenizer.encode` and
<add> :func:`transformers.PreTrainedTokenizer.encode_plus` for details.
<add>
<add> `What are input IDs? <../glossary.html#input-ids>`__
<add> attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<add> Mask to avoid performing attention on padding token indices.
<add> Mask values selected in ``[0, 1]``:
<add> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
<add>
<add> `What are attention masks? <../glossary.html#attention-mask>`__
<add> token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<add> Segment token indices to indicate first and second portions of the inputs.
<add> Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
<add> corresponds to a `sentence B` token
<add>
<add> `What are token type IDs? <../glossary.html#token-type-ids>`_
<add> position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<add> Indices of positions of each input sequence tokens in the position embeddings.
<add> Selected in the range ``[0, config.max_position_embeddings - 1]``.
<add>
<add> `What are position IDs? <../glossary.html#position-ids>`_
<add> head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
<add> Mask to nullify selected heads of the self-attention modules.
<add> Mask values selected in ``[0, 1]``:
<add> :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
<add> inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
<add> Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
<add> This is useful if you want more control over how to convert `input_ids` indices into associated vectors
<add> than the model's internal embedding lookup matrix.
<add> encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
<add> Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
<add> if the model is configured as a decoder.
<add> encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<add> Mask to avoid performing attention on the padding token indices of the encoder input. This mask
<add> is used in the cross-attention if the model is configured as a decoder.
<add> Mask values selected in ``[0, 1]``:
<add> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
<add>"""
<add>
<add>
<add>@add_start_docstrings(
<add> "The bare Masked Bert Model transformer outputting raw hidden-states without any specific head on top.",
<add> MASKED_BERT_START_DOCSTRING,
<add>)
<add>class MaskedBertModel(MaskedBertPreTrainedModel):
<add> """
<add> The `MaskedBertModel` class replicates the :class:`~transformers.BertModel` class
<add> and adds specific inputs to compute the adaptive mask on the fly.
<add> Note that we freeze the embeddings modules from their pre-trained values.
<add> """
<add>
<add> def __init__(self, config):
<add> super().__init__(config)
<add> self.config = config
<add>
<add> self.embeddings = BertEmbeddings(config)
<add> self.embeddings.requires_grad_(requires_grad=False)
<add> self.encoder = BertEncoder(config)
<add> self.pooler = BertPooler(config)
<add>
<add> self.init_weights()
<add>
<add> def get_input_embeddings(self):
<add> return self.embeddings.word_embeddings
<add>
<add> def set_input_embeddings(self, value):
<add> self.embeddings.word_embeddings = value
<add>
<add> def _prune_heads(self, heads_to_prune):
<add> """ Prunes heads of the model.
<add> heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
<add> See base class PreTrainedModel
<add> """
<add> for layer, heads in heads_to_prune.items():
<add> self.encoder.layer[layer].attention.prune_heads(heads)
<add>
<add> @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING)
<add> def forward(
<add> self,
<add> input_ids=None,
<add> attention_mask=None,
<add> token_type_ids=None,
<add> position_ids=None,
<add> head_mask=None,
<add> inputs_embeds=None,
<add> encoder_hidden_states=None,
<add> encoder_attention_mask=None,
<add> threshold=None,
<add> ):
<add> r"""
<add> threshold (:obj:`float`):
<add> Threshold value (see :class:`~emmental.MaskedLinear`).
<add>
<add> Return:
<add> :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:
<add> last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
<add> Sequence of hidden-states at the output of the last layer of the model.
<add> pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
<add> Last layer hidden-state of the first token of the sequence (classification token)
<add> further processed by a Linear layer and a Tanh activation function. The Linear
<add> layer weights are trained from the next sentence prediction (classification)
<add> objective during pre-training.
<add>
<add> This output is usually *not* a good summary
<add> of the semantic content of the input, you're often better with averaging or pooling
<add> the sequence of hidden-states for the whole input sequence.
<add> hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
<add> of shape :obj:`(batch_size, sequence_length, hidden_size)`.
<add>
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
<add> :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
<add>
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
<add> heads.
<add> """
<add>
<add> if input_ids is not None and inputs_embeds is not None:
<add> raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
<add> elif input_ids is not None:
<add> input_shape = input_ids.size()
<add> elif inputs_embeds is not None:
<add> input_shape = inputs_embeds.size()[:-1]
<add> else:
<add> raise ValueError("You have to specify either input_ids or inputs_embeds")
<add>
<add> device = input_ids.device if input_ids is not None else inputs_embeds.device
<add>
<add> if attention_mask is None:
<add> attention_mask = torch.ones(input_shape, device=device)
<add> if token_type_ids is None:
<add> token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
<add>
<add> # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
<add> # ourselves in which case we just need to make it broadcastable to all heads.
<add> if attention_mask.dim() == 3:
<add> extended_attention_mask = attention_mask[:, None, :, :]
<add> elif attention_mask.dim() == 2:
<add> # Provided a padding mask of dimensions [batch_size, seq_length]
<add> # - if the model is a decoder, apply a causal mask in addition to the padding mask
<add> # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
<add> if self.config.is_decoder:
<add> batch_size, seq_length = input_shape
<add> seq_ids = torch.arange(seq_length, device=device)
<add> causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
<add> causal_mask = causal_mask.to(
<add> attention_mask.dtype
<add> ) # causal and attention masks must have same type with pytorch version < 1.3
<add> extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
<add> else:
<add> extended_attention_mask = attention_mask[:, None, None, :]
<add> else:
<add> raise ValueError(
<add> "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
<add> input_shape, attention_mask.shape
<add> )
<add> )
<add>
<add> # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
<add> # masked positions, this operation will create a tensor which is 0.0 for
<add> # positions we want to attend and -10000.0 for masked positions.
<add> # Since we are adding it to the raw scores before the softmax, this is
<add> # effectively the same as removing these entirely.
<add> extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
<add> extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
<add>
<add> # If a 2D ou 3D attention mask is provided for the cross-attention
<add> # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
<add> if self.config.is_decoder and encoder_hidden_states is not None:
<add> encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
<add> encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
<add> if encoder_attention_mask is None:
<add> encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
<add>
<add> if encoder_attention_mask.dim() == 3:
<add> encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
<add> elif encoder_attention_mask.dim() == 2:
<add> encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
<add> else:
<add> raise ValueError(
<add> "Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format(
<add> encoder_hidden_shape, encoder_attention_mask.shape
<add> )
<add> )
<add>
<add> encoder_extended_attention_mask = encoder_extended_attention_mask.to(
<add> dtype=next(self.parameters()).dtype
<add> ) # fp16 compatibility
<add> encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
<add> else:
<add> encoder_extended_attention_mask = None
<add>
<add> # Prepare head mask if needed
<add> # 1.0 in head_mask indicate we keep the head
<add> # attention_probs has shape bsz x n_heads x N x N
<add> # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
<add> # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
<add> if head_mask is not None:
<add> if head_mask.dim() == 1:
<add> head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
<add> head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
<add> elif head_mask.dim() == 2:
<add> head_mask = (
<add> head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
<add> ) # We can specify head_mask for each layer
<add> head_mask = head_mask.to(
<add> dtype=next(self.parameters()).dtype
<add> ) # switch to fload if need + fp16 compatibility
<add> else:
<add> head_mask = [None] * self.config.num_hidden_layers
<add>
<add> embedding_output = self.embeddings(
<add> input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
<add> )
<add> encoder_outputs = self.encoder(
<add> embedding_output,
<add> attention_mask=extended_attention_mask,
<add> head_mask=head_mask,
<add> encoder_hidden_states=encoder_hidden_states,
<add> encoder_attention_mask=encoder_extended_attention_mask,
<add> threshold=threshold,
<add> )
<add> sequence_output = encoder_outputs[0]
<add> pooled_output = self.pooler(sequence_output)
<add>
<add> outputs = (sequence_output, pooled_output,) + encoder_outputs[
<add> 1:
<add> ] # add hidden_states and attentions if they are here
<add> return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
<add>
<add>
<add>@add_start_docstrings(
<add> """Masked Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
<add> the pooled output) e.g. for GLUE tasks. """,
<add> MASKED_BERT_START_DOCSTRING,
<add>)
<add>class MaskedBertForSequenceClassification(MaskedBertPreTrainedModel):
<add> def __init__(self, config):
<add> super().__init__(config)
<add> self.num_labels = config.num_labels
<add>
<add> self.bert = MaskedBertModel(config)
<add> self.dropout = nn.Dropout(config.hidden_dropout_prob)
<add> self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
<add>
<add> self.init_weights()
<add>
<add> @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING)
<add> def forward(
<add> self,
<add> input_ids=None,
<add> attention_mask=None,
<add> token_type_ids=None,
<add> position_ids=None,
<add> head_mask=None,
<add> inputs_embeds=None,
<add> labels=None,
<add> threshold=None,
<add> ):
<add> r"""
<add> labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
<add> Labels for computing the sequence classification/regression loss.
<add> Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
<add> If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
<add> If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
<add> threshold (:obj:`float`):
<add> Threshold value (see :class:`~emmental.MaskedLinear`).
<add>
<add> Returns:
<add> :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:
<add> loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
<add> Classification (or regression if config.num_labels==1) loss.
<add> logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
<add> Classification (or regression if config.num_labels==1) scores (before SoftMax).
<add> hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
<add> of shape :obj:`(batch_size, sequence_length, hidden_size)`.
<add>
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
<add> :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
<add>
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
<add> heads.
<add> """
<add>
<add> outputs = self.bert(
<add> input_ids,
<add> attention_mask=attention_mask,
<add> token_type_ids=token_type_ids,
<add> position_ids=position_ids,
<add> head_mask=head_mask,
<add> inputs_embeds=inputs_embeds,
<add> threshold=threshold,
<add> )
<add>
<add> pooled_output = outputs[1]
<add>
<add> pooled_output = self.dropout(pooled_output)
<add> logits = self.classifier(pooled_output)
<add>
<add> outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
<add>
<add> if labels is not None:
<add> if self.num_labels == 1:
<add> # We are doing regression
<add> loss_fct = MSELoss()
<add> loss = loss_fct(logits.view(-1), labels.view(-1))
<add> else:
<add> loss_fct = CrossEntropyLoss()
<add> loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
<add> outputs = (loss,) + outputs
<add>
<add> return outputs # (loss), logits, (hidden_states), (attentions)
<add>
<add>
<add>@add_start_docstrings(
<add> """Masked Bert Model with a multiple choice classification head on top (a linear layer on top of
<add> the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
<add> MASKED_BERT_START_DOCSTRING,
<add>)
<add>class MaskedBertForMultipleChoice(MaskedBertPreTrainedModel):
<add> def __init__(self, config):
<add> super().__init__(config)
<add>
<add> self.bert = MaskedBertModel(config)
<add> self.dropout = nn.Dropout(config.hidden_dropout_prob)
<add> self.classifier = nn.Linear(config.hidden_size, 1)
<add>
<add> self.init_weights()
<add>
<add> @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING)
<add> def forward(
<add> self,
<add> input_ids=None,
<add> attention_mask=None,
<add> token_type_ids=None,
<add> position_ids=None,
<add> head_mask=None,
<add> inputs_embeds=None,
<add> labels=None,
<add> threshold=None,
<add> ):
<add> r"""
<add> labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
<add> Labels for computing the multiple choice classification loss.
<add> Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
<add> of the input tensors. (see `input_ids` above)
<add> threshold (:obj:`float`):
<add> Threshold value (see :class:`~emmental.MaskedLinear`).
<add>
<add> Returns:
<add> :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:
<add> loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
<add> Classification loss.
<add> classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
<add> `num_choices` is the second dimension of the input tensors. (see `input_ids` above).
<add>
<add> Classification scores (before SoftMax).
<add> hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
<add> of shape :obj:`(batch_size, sequence_length, hidden_size)`.
<add>
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
<add> :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
<add>
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
<add> heads.
<add>
<add> """
<add> num_choices = input_ids.shape[1]
<add>
<add> input_ids = input_ids.view(-1, input_ids.size(-1))
<add> attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
<add> token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
<add> position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
<add>
<add> outputs = self.bert(
<add> input_ids,
<add> attention_mask=attention_mask,
<add> token_type_ids=token_type_ids,
<add> position_ids=position_ids,
<add> head_mask=head_mask,
<add> inputs_embeds=inputs_embeds,
<add> threshold=threshold,
<add> )
<add>
<add> pooled_output = outputs[1]
<add>
<add> pooled_output = self.dropout(pooled_output)
<add> logits = self.classifier(pooled_output)
<add> reshaped_logits = logits.view(-1, num_choices)
<add>
<add> outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
<add>
<add> if labels is not None:
<add> loss_fct = CrossEntropyLoss()
<add> loss = loss_fct(reshaped_logits, labels)
<add> outputs = (loss,) + outputs
<add>
<add> return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
<add>
<add>
<add>@add_start_docstrings(
<add> """Masked Bert Model with a token classification head on top (a linear layer on top of
<add> the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
<add> MASKED_BERT_START_DOCSTRING,
<add>)
<add>class MaskedBertForTokenClassification(MaskedBertPreTrainedModel):
<add> def __init__(self, config):
<add> super().__init__(config)
<add> self.num_labels = config.num_labels
<add>
<add> self.bert = MaskedBertModel(config)
<add> self.dropout = nn.Dropout(config.hidden_dropout_prob)
<add> self.classifier = nn.Linear(config.hidden_size, config.num_labels)
<add>
<add> self.init_weights()
<add>
<add> @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING)
<add> def forward(
<add> self,
<add> input_ids=None,
<add> attention_mask=None,
<add> token_type_ids=None,
<add> position_ids=None,
<add> head_mask=None,
<add> inputs_embeds=None,
<add> labels=None,
<add> threshold=None,
<add> ):
<add> r"""
<add> labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<add> Labels for computing the token classification loss.
<add> Indices should be in ``[0, ..., config.num_labels - 1]``.
<add> threshold (:obj:`float`):
<add> Threshold value (see :class:`~emmental.MaskedLinear`).
<add>
<add> Returns:
<add> :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:
<add> loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
<add> Classification loss.
<add> scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
<add> Classification scores (before SoftMax).
<add> hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
<add> of shape :obj:`(batch_size, sequence_length, hidden_size)`.
<add>
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
<add> :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
<add>
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
<add> heads.
<add> """
<add>
<add> outputs = self.bert(
<add> input_ids,
<add> attention_mask=attention_mask,
<add> token_type_ids=token_type_ids,
<add> position_ids=position_ids,
<add> head_mask=head_mask,
<add> inputs_embeds=inputs_embeds,
<add> threshold=threshold,
<add> )
<add>
<add> sequence_output = outputs[0]
<add>
<add> sequence_output = self.dropout(sequence_output)
<add> logits = self.classifier(sequence_output)
<add>
<add> outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
<add> if labels is not None:
<add> loss_fct = CrossEntropyLoss()
<add> # Only keep active parts of the loss
<add> if attention_mask is not None:
<add> active_loss = attention_mask.view(-1) == 1
<add> active_logits = logits.view(-1, self.num_labels)
<add> active_labels = torch.where(
<add> active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
<add> )
<add> loss = loss_fct(active_logits, active_labels)
<add> else:
<add> loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
<add> outputs = (loss,) + outputs
<add>
<add> return outputs # (loss), scores, (hidden_states), (attentions)
<add>
<add>
<add>@add_start_docstrings(
<add> """Masked Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
<add> layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
<add> MASKED_BERT_START_DOCSTRING,
<add>)
<add>class MaskedBertForQuestionAnswering(MaskedBertPreTrainedModel):
<add> def __init__(self, config):
<add> super().__init__(config)
<add> self.num_labels = config.num_labels
<add>
<add> self.bert = MaskedBertModel(config)
<add> self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
<add>
<add> self.init_weights()
<add>
<add> @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING)
<add> def forward(
<add> self,
<add> input_ids=None,
<add> attention_mask=None,
<add> token_type_ids=None,
<add> position_ids=None,
<add> head_mask=None,
<add> inputs_embeds=None,
<add> start_positions=None,
<add> end_positions=None,
<add> threshold=None,
<add> ):
<add> r"""
<add> start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
<add> Labels for position (index) of the start of the labelled span for computing the token classification loss.
<add> Positions are clamped to the length of the sequence (`sequence_length`).
<add> Position outside of the sequence are not taken into account for computing the loss.
<add> end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
<add> Labels for position (index) of the end of the labelled span for computing the token classification loss.
<add> Positions are clamped to the length of the sequence (`sequence_length`).
<add> Position outside of the sequence are not taken into account for computing the loss.
<add> threshold (:obj:`float`):
<add> Threshold value (see :class:`~emmental.MaskedLinear`).
<add>
<add> Returns:
<add> :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:
<add> loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
<add> Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
<add> start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
<add> Span-start scores (before SoftMax).
<add> end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
<add> Span-end scores (before SoftMax).
<add> hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
<add> of shape :obj:`(batch_size, sequence_length, hidden_size)`.
<add>
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
<add> Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
<add> :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
<add>
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
<add> heads.
<add> """
<add>
<add> outputs = self.bert(
<add> input_ids,
<add> attention_mask=attention_mask,
<add> token_type_ids=token_type_ids,
<add> position_ids=position_ids,
<add> head_mask=head_mask,
<add> inputs_embeds=inputs_embeds,
<add> threshold=threshold,
<add> )
<add>
<add> sequence_output = outputs[0]
<add>
<add> logits = self.qa_outputs(sequence_output)
<add> start_logits, end_logits = logits.split(1, dim=-1)
<add> start_logits = start_logits.squeeze(-1)
<add> end_logits = end_logits.squeeze(-1)
<add>
<add> outputs = (start_logits, end_logits,) + outputs[2:]
<add> if start_positions is not None and end_positions is not None:
<add> # If we are on multi-GPU, split add a dimension
<add> if len(start_positions.size()) > 1:
<add> start_positions = start_positions.squeeze(-1)
<add> if len(end_positions.size()) > 1:
<add> end_positions = end_positions.squeeze(-1)
<add> # sometimes the start/end positions are outside our model inputs, we ignore these terms
<add> ignored_index = start_logits.size(1)
<add> start_positions.clamp_(0, ignored_index)
<add> end_positions.clamp_(0, ignored_index)
<add>
<add> loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
<add> start_loss = loss_fct(start_logits, start_positions)
<add> end_loss = loss_fct(end_logits, end_positions)
<add> total_loss = (start_loss + end_loss) / 2
<add> outputs = (total_loss,) + outputs
<add>
<add> return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
<ide><path>examples/movement-pruning/emmental/modules/__init__.py
<add>from .binarizer import ThresholdBinarizer, TopKBinarizer, MagnitudeBinarizer
<add>from .masked_nn import MaskedLinear
<ide><path>examples/movement-pruning/emmental/modules/binarizer.py
<add># coding=utf-8
<add># Copyright 2020-present, AllenAI Authors, University of Illinois Urbana-Champaign,
<add># Intel Nervana Systems and the HuggingFace Inc. team.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>"""
<add>Binarizers take a (real value) matrice as input and produce a binary (values in {0,1}) mask of the same shape.
<add>"""
<add>
<add>import torch
<add>from torch import autograd
<add>
<add>
<add>class ThresholdBinarizer(autograd.Function):
<add> """
<add> Thresholdd binarizer.
<add> Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j} > \tau`
<add> where `\tau` is a real value threshold.
<add>
<add> Implementation is inspired from:
<add> https://github.com/arunmallya/piggyback
<add> Piggyback: Adapting a Single Network to Multiple Tasks by Learning to Mask Weights
<add> Arun Mallya, Dillon Davis, Svetlana Lazebnik
<add> """
<add>
<add> @staticmethod
<add> def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool):
<add> """
<add> Args:
<add> inputs (`torch.FloatTensor`)
<add> The input matrix from which the binarizer computes the binary mask.
<add> threshold (`float`)
<add> The threshold value (in R).
<add> sigmoid (`bool`)
<add> If set to ``True``, we apply the sigmoid function to the `inputs` matrix before comparing to `threshold`.
<add> In this case, `threshold` should be a value between 0 and 1.
<add> Returns:
<add> mask (`torch.FloatTensor`)
<add> Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
<add> retained, 0 - the associated weight is pruned).
<add> """
<add> nb_elems = inputs.numel()
<add> nb_min = int(0.005 * nb_elems) + 1
<add> if sigmoid:
<add> mask = (torch.sigmoid(inputs) > threshold).type(inputs.type())
<add> else:
<add> mask = (inputs > threshold).type(inputs.type())
<add> if mask.sum() < nb_min:
<add> # We limit the pruning so that at least 0.5% (half a percent) of the weights are remaining
<add> k_threshold = inputs.flatten().kthvalue(max(nb_elems - nb_min, 1)).values
<add> mask = (inputs > k_threshold).type(inputs.type())
<add> return mask
<add>
<add> @staticmethod
<add> def backward(ctx, gradOutput):
<add> return gradOutput, None, None
<add>
<add>
<add>class TopKBinarizer(autograd.Function):
<add> """
<add> Top-k Binarizer.
<add> Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
<add> is among the k% highest values of S.
<add>
<add> Implementation is inspired from:
<add> https://github.com/allenai/hidden-networks
<add> What's hidden in a randomly weighted neural network?
<add> Vivek Ramanujan*, Mitchell Wortsman*, Aniruddha Kembhavi, Ali Farhadi, Mohammad Rastegari
<add> """
<add>
<add> @staticmethod
<add> def forward(ctx, inputs: torch.tensor, threshold: float):
<add> """
<add> Args:
<add> inputs (`torch.FloatTensor`)
<add> The input matrix from which the binarizer computes the binary mask.
<add> threshold (`float`)
<add> The percentage of weights to keep (the rest is pruned).
<add> `threshold` is a float between 0 and 1.
<add> Returns:
<add> mask (`torch.FloatTensor`)
<add> Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
<add> retained, 0 - the associated weight is pruned).
<add> """
<add> # Get the subnetwork by sorting the inputs and using the top threshold %
<add> mask = inputs.clone()
<add> _, idx = inputs.flatten().sort(descending=True)
<add> j = int(threshold * inputs.numel())
<add>
<add> # flat_out and mask access the same memory.
<add> flat_out = mask.flatten()
<add> flat_out[idx[j:]] = 0
<add> flat_out[idx[:j]] = 1
<add> return mask
<add>
<add> @staticmethod
<add> def backward(ctx, gradOutput):
<add> return gradOutput, None
<add>
<add>
<add>class MagnitudeBinarizer(object):
<add> """
<add> Magnitude Binarizer.
<add> Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
<add> is among the k% highest values of |S| (absolute value).
<add>
<add> Implementation is inspired from https://github.com/NervanaSystems/distiller/blob/2291fdcc2ea642a98d4e20629acb5a9e2e04b4e6/distiller/pruning/automated_gradual_pruner.py#L24
<add> """
<add>
<add> @staticmethod
<add> def apply(inputs: torch.tensor, threshold: float):
<add> """
<add> Args:
<add> inputs (`torch.FloatTensor`)
<add> The input matrix from which the binarizer computes the binary mask.
<add> This input marix is typically the weight matrix.
<add> threshold (`float`)
<add> The percentage of weights to keep (the rest is pruned).
<add> `threshold` is a float between 0 and 1.
<add> Returns:
<add> mask (`torch.FloatTensor`)
<add> Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
<add> retained, 0 - the associated weight is pruned).
<add> """
<add> # Get the subnetwork by sorting the inputs and using the top threshold %
<add> mask = inputs.clone()
<add> _, idx = inputs.abs().flatten().sort(descending=True)
<add> j = int(threshold * inputs.numel())
<add>
<add> # flat_out and mask access the same memory.
<add> flat_out = mask.flatten()
<add> flat_out[idx[j:]] = 0
<add> flat_out[idx[:j]] = 1
<add> return mask
<ide><path>examples/movement-pruning/emmental/modules/masked_nn.py
<add># coding=utf-8
<add># Copyright 2020-present, the HuggingFace Inc. team.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>"""
<add>Masked Linear module: A fully connected layer that computes an adaptive binary mask on the fly.
<add>The mask (binary or not) is computed at each forward pass and multiplied against
<add>the weight matrix to prune a portion of the weights.
<add>The pruned weight matrix is then multiplied against the inputs (and if necessary, the bias is added).
<add>"""
<add>
<add>import torch
<add>from torch import nn
<add>from torch.nn import functional as F
<add>from torch.nn import init
<add>
<add>import math
<add>
<add>from .binarizer import ThresholdBinarizer, TopKBinarizer, MagnitudeBinarizer
<add>
<add>
<add>class MaskedLinear(nn.Linear):
<add> """
<add> Fully Connected layer with on the fly adaptive mask.
<add> If needed, a score matrix is created to store the importance of each associated weight.
<add> """
<add>
<add> def __init__(
<add> self,
<add> in_features: int,
<add> out_features: int,
<add> bias: bool = True,
<add> mask_init: str = "constant",
<add> mask_scale: float = 0.0,
<add> pruning_method: str = "topK",
<add> ):
<add> """
<add> Args:
<add> in_features (`int`)
<add> Size of each input sample
<add> out_features (`int`)
<add> Size of each output sample
<add> bias (`bool`)
<add> If set to ``False``, the layer will not learn an additive bias.
<add> Default: ``True``
<add> mask_init (`str`)
<add> The initialization method for the score matrix if a score matrix is needed.
<add> Choices: ["constant", "uniform", "kaiming"]
<add> Default: ``constant``
<add> mask_scale (`float`)
<add> The initialization parameter for the chosen initialization method `mask_init`.
<add> Default: ``0.``
<add> pruning_method (`str`)
<add> Method to compute the mask.
<add> Choices: ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"]
<add> Default: ``topK``
<add> """
<add> super(MaskedLinear, self).__init__(in_features=in_features, out_features=out_features, bias=bias)
<add> assert pruning_method in ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"]
<add> self.pruning_method = pruning_method
<add>
<add> if self.pruning_method in ["topK", "threshold", "sigmoied_threshold", "l0"]:
<add> self.mask_scale = mask_scale
<add> self.mask_init = mask_init
<add> self.mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
<add> self.init_mask()
<add>
<add> def init_mask(self):
<add> if self.mask_init == "constant":
<add> init.constant_(self.mask_scores, val=self.mask_scale)
<add> elif self.mask_init == "uniform":
<add> init.uniform_(self.mask_scores, a=-self.mask_scale, b=self.mask_scale)
<add> elif self.mask_init == "kaiming":
<add> init.kaiming_uniform_(self.mask_scores, a=math.sqrt(5))
<add>
<add> def forward(self, input: torch.tensor, threshold: float):
<add> # Get the mask
<add> if self.pruning_method == "topK":
<add> mask = TopKBinarizer.apply(self.mask_scores, threshold)
<add> elif self.pruning_method in ["threshold", "sigmoied_threshold"]:
<add> sig = "sigmoied" in self.pruning_method
<add> mask = ThresholdBinarizer.apply(self.mask_scores, threshold, sig)
<add> elif self.pruning_method == "magnitude":
<add> mask = MagnitudeBinarizer.apply(self.weight, threshold)
<add> elif self.pruning_method == "l0":
<add> l, r, b = -0.1, 1.1, 2 / 3
<add> if self.training:
<add> u = torch.zeros_like(self.mask_scores).uniform_().clamp(0.0001, 0.9999)
<add> s = torch.sigmoid((u.log() - (1 - u).log() + self.mask_scores) / b)
<add> else:
<add> s = torch.sigmoid(self.mask_scores)
<add> s_bar = s * (r - l) + l
<add> mask = s_bar.clamp(min=0.0, max=1.0)
<add> # Mask weights with computed mask
<add> weight_thresholded = mask * self.weight
<add> # Compute output (linear layer) with masked weights
<add> return F.linear(input, weight_thresholded, self.bias) | 6 |
Ruby | Ruby | fix tests for mail 2.8 | bf01cc6744a3593c6256ab6765d7d7a500696bd5 | <ide><path>actionmailer/test/base_test.rb
<ide> class BaseTest < ActiveSupport::TestCase
<ide>
<ide> test "can pass random headers in as a hash to mail" do
<ide> hash = { "X-Special-Domain-Specific-Header" => "SecretValue",
<del> "In-Reply-To" => "[email protected]" }
<add> "In-Reply-To" => "<[email protected]>" }
<ide> mail = BaseMailer.welcome(hash)
<ide> assert_equal("SecretValue", mail["X-Special-Domain-Specific-Header"].decoded)
<del> assert_equal("[email protected]", mail["In-Reply-To"].decoded)
<add> assert_equal("<[email protected]>", mail["In-Reply-To"].decoded)
<ide> end
<ide>
<ide> test "can pass random headers in as a hash to headers" do
<ide> hash = { "X-Special-Domain-Specific-Header" => "SecretValue",
<del> "In-Reply-To" => "[email protected]" }
<add> "In-Reply-To" => "<[email protected]>" }
<ide> mail = BaseMailer.welcome_with_headers(hash)
<ide> assert_equal("SecretValue", mail["X-Special-Domain-Specific-Header"].decoded)
<del> assert_equal("[email protected]", mail["In-Reply-To"].decoded)
<add> assert_equal("<[email protected]>", mail["In-Reply-To"].decoded)
<ide> end
<ide>
<ide> # Attachments | 1 |
Python | Python | improve otto example | b763f8964f60a12df64f373546bf3d884e26ea8b | <ide><path>examples/kaggle_otto_nn.py
<ide> - with smaller layers, largers layers
<ide> - with more layers, less layers
<ide> - with different optimizers (SGD+momentum+decay is probably better than Adam!)
<add>
<add> Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
<ide> '''
<ide>
<ide> np.random.seed(1337) # for reproducibility
<ide> def make_submission(y_prob, ids, encoder, fname):
<ide>
<ide> print("Training model...")
<ide>
<del>model.fit(X, y, nb_epoch=20, batch_size=16, validation_split=0.15)
<add>model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
<ide>
<ide> print("Generating submission...")
<ide> | 1 |
Java | Java | remove static import from invocablehandlermethod | 495ba2f58cdd195c4c0d16be5b43c22a8a193d81 | <ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/result/method/InvocableHandlerMethod.java
<ide> import org.springframework.web.reactive.HandlerResult;
<ide> import org.springframework.web.server.ServerWebExchange;
<ide>
<del>import static org.springframework.web.reactive.result.method.InvocableHandlerMethodKt.invokeHandlerMethod;
<del>
<ide> /**
<ide> * Extension of {@link HandlerMethod} that invokes the underlying method with
<ide> * argument values resolved from the current HTTP request through a list of
<ide> public Mono<HandlerResult> invoke(
<ide> ReflectionUtils.makeAccessible(getBridgedMethod());
<ide> Method method = getBridgedMethod();
<ide> if (KotlinDetector.isKotlinReflectPresent() && KotlinDetector.isKotlinType(method.getDeclaringClass())) {
<del> value = invokeHandlerMethod(method, getBean(), args);
<add> value = InvocableHandlerMethodKt.invokeHandlerMethod(method, getBean(), args);
<ide> }
<ide> else {
<ide> value = method.invoke(getBean(), args); | 1 |
Ruby | Ruby | push the formatter up to the route object | 620cb0167667e2f5d788c11b637db51d20496dcc | <ide><path>actionpack/lib/action_dispatch/journey/path/pattern.rb
<ide> def initialize(strexp)
<ide> @required_names = nil
<ide> @re = nil
<ide> @offsets = nil
<del> @format = Visitors::FormatBuilder.new.accept(spec)
<ide> end
<ide>
<del> def format_path(path_options)
<del> @format.evaluate path_options
<add> def build_formatter
<add> Visitors::FormatBuilder.new.accept(spec)
<ide> end
<ide>
<ide> def ast
<ide><path>actionpack/lib/action_dispatch/journey/route.rb
<ide> def initialize(name, app, path, constraints, defaults = {})
<ide> @parts = nil
<ide> @decorated_ast = nil
<ide> @precedence = 0
<add> @formatter = @path.build_formatter
<ide> end
<ide>
<ide> def ast
<ide> def format(path_options)
<ide> value.to_s == defaults[key].to_s && !required_parts.include?(key)
<ide> end
<ide>
<del> path.format_path path_options
<add> @formatter.evaluate path_options
<ide> end
<ide>
<ide> def optimized_path | 2 |
Text | Text | improve doc on unintended breaking changes | 3fe9267592cfef01b50f4f94eaa50b2dc495eb1a | <ide><path>COLLABORATOR_GUIDE.md
<ide> - [Breaking Changes](#breaking-changes)
<ide> - [Breaking Changes and Deprecations](#breaking-changes-and-deprecations)
<ide> - [Breaking Changes to Internal Elements](#breaking-changes-to-internal-elements)
<del> - [When Breaking Changes Actually Break Things](#when-breaking-changes-actually-break-things)
<add> - [Unintended Breaking Changes](#unintended-breaking-changes)
<ide> - [Reverting commits](#reverting-commits)
<ide> - [Introducing New Modules](#introducing-new-modules)
<ide> - [Additions to N-API](#additions-to-n-api)
<ide> an effort to determine the potential impact of the change in the ecosystem. Use
<ide> If a change will cause ecosystem breakage, then it is semver-major. Consider
<ide> providing a Public API in such cases.
<ide>
<del>#### When Breaking Changes Actually Break Things
<add>#### Unintended Breaking Changes
<ide>
<del>When any changes are landed on the master branch and it is determined that the
<del>changes *do* break existing code, a decision may be made to revert those
<del>changes either temporarily or permanently. However, the decision to revert or
<del>not can often be based on many complex factors that are not easily codified. It
<del>is also possible that the breaking commit can be labeled retroactively as a
<del>semver-major change that will not be backported to Current or LTS branches.
<add>Sometimes, a change intended to be non-breaking turns out to be a breaking
<add>change. If such a change lands on the master branch, a Collaborator may revert
<add>it. As an alternative to reverting, the TSC may apply the semver-major label
<add>after-the-fact.
<ide>
<ide> ##### Reverting commits
<ide> | 1 |
PHP | PHP | add cross version constants | 73a6ebea8d109e34c9218abb88983f78bce75830 | <ide><path>cake/basics.php
<ide> define('MONTH', 2592000);
<ide> define('YEAR', 31536000);
<ide>
<add>/**
<add> * Patch old versions of PHP4.
<add> */
<add>if (!defined('PHP_EOL')) {
<add> switch (strtoupper(substr(PHP_OS, 0, 3))) {
<add> case 'WIN':
<add> define('PHP_EOL', "\r\n");
<add> break;
<add> default:
<add> define('PHP_EOL', "\n");
<add> }
<add>}
<add>
<add>/**
<add> * Patch PHP4 and PHP5.0
<add> */
<add>if (!defined('DATE_RFC2822')) {
<add> define('DATE_RFC2822', 'D, d M Y H:i:s O');
<add>}
<add>
<ide> /**
<ide> * Patch for PHP < 5.0
<ide> */ | 1 |
Ruby | Ruby | remove unnecessary class | 93edfaa7b4a314c60a22f3e56fc07fabbec0c733 | <ide><path>activemodel/lib/active_model/errors.rb
<ide> def messages
<ide> end
<ide>
<ide> # Returns a Hash of attributes with an array of their error details.
<del> #
<del> # Updating this hash would still update errors state for backward
<del> # compatibility, but this behavior is deprecated.
<ide> def details
<ide> hash = group_by_attribute.transform_values do |errors|
<ide> errors.map(&:details)
<ide> end
<del> DeprecationHandlingDetailsHash.new(hash)
<add> hash.default = []
<add> hash.freeze
<add> hash
<ide> end
<ide>
<ide> # Returns a Hash of attributes with an array of their Error objects.
<ide> def prepare_content
<ide> end
<ide> end
<ide>
<del> class DeprecationHandlingDetailsHash < SimpleDelegator # :nodoc:
<del> def initialize(details)
<del> details.default = []
<del> details.freeze
<del> super(details)
<del> end
<del> end
<del>
<ide> # Raised when a validation cannot be corrected by end users and are considered
<ide> # exceptional.
<ide> # | 1 |
Ruby | Ruby | add support for screenshots | e9127f7aa082986952ffcc8331b675a3a99c3a83 | <ide><path>actionpack/lib/system_testing/driver_adapters/capybara_driver.rb
<ide> def initialize(name)
<ide> def call
<ide> Capybara.default_driver = @name
<ide> end
<add>
<add> def supports_screenshots?
<add> @name != :rack_test
<add> end
<ide> end
<ide> end
<ide> end
<ide><path>actionpack/lib/system_testing/driver_adapters/rails_selenium_driver.rb
<ide> def call # :nodoc:
<ide> setup
<ide> end
<ide>
<add> def supports_screenshots?
<add> true
<add> end
<add>
<ide> private
<ide> def registration
<ide> register_browser_driver
<ide><path>actionpack/lib/system_testing/test_helper.rb
<ide>
<ide> module SystemTesting
<ide> module TestHelper # :nodoc:
<del> include TestHelpers::FormHelper
<ide> include TestHelpers::Assertions
<add> include TestHelpers::FormHelper
<add> include TestHelpers::ScreenshotHelper
<ide> include Capybara::DSL
<ide>
<ide> Capybara.app = Rack::Builder.new do
<ide> module TestHelper # :nodoc:
<ide> end
<ide>
<ide> def after_teardown
<add> take_screenshot if supported?
<ide> Capybara.reset_sessions!
<ide> super
<ide> end
<ide><path>actionpack/lib/system_testing/test_helpers.rb
<ide> module SystemTesting
<ide> module TestHelpers
<ide> extend ActiveSupport::Autoload
<ide>
<del> autoload :FormHelper
<ide> autoload :Assertions
<add> autoload :FormHelper
<add> autoload :ScreenshotHelper
<ide> end
<ide> end
<ide><path>actionpack/lib/system_testing/test_helpers/screenshot_helper.rb
<add>module SystemTesting
<add> module TestHelpers
<add> # Screenshot helper for system testing
<add> module ScreenshotHelper
<add> # Takes a screenshot of the current page in the browser if the system
<add> # test driver supports screenshots and the test failed.
<add> #
<add> # Additionally +take_screenshot+ can be used within your tests at points
<add> # you want to take a screenshot if the driver supports screenshots. The
<add> # Rack Test driver does not support screenshots.
<add> #
<add> # You can check of the driver supports screenshots by running
<add> #
<add> # Rails::SystemTestCase.driver.supports_screenshots?
<add> # => true
<add> def take_screenshot
<add> puts "[Screenshot]: #{image_path}"
<add> puts find_image
<add> end
<add>
<add> private
<add> def supported?
<add> Rails::SystemTestCase.driver.supports_screenshots? && !passed?
<add> end
<add>
<add> def image_path
<add> path = "tmp/screenshots/failures_#{method_name}.png"
<add> page.save_screenshot(Rails.root.join(path))
<add> path
<add> end
<add>
<add> def find_image
<add> if ENV['CAPYBARA_INLINE_SCREENSHOT'] == 'artifact'
<add> "\e]1338;url=artifact://#{image_path}\a"
<add> else
<add> name = inline_base64(File.basename(image_path))
<add> image = inline_base64(File.read(image_path))
<add> "\e]1337;File=name=#{name};height=400px;inline=1:#{image}\a"
<add> end
<add> end
<add>
<add> def inline_base64(path)
<add> Base64.encode64(path).gsub("\n",'')
<add> end
<add> end
<add> end
<add>end
<add>
<ide><path>actionpack/test/system_testing/screenshot_helper_test.rb
<add>require 'abstract_unit'
<add>
<add>class ScreenshotHelperTest < ActiveSupport::TestCase
<add> def test_driver_support_for_screenshots
<add> Rails::SystemTestCase.driver = :rails_selenium_driver
<add> assert Rails::SystemTestCase.driver.supports_screenshots?
<add>
<add> Rails::SystemTestCase.driver = :rack_test
<add> assert_not Rails::SystemTestCase.driver.supports_screenshots?
<add>
<add> Rails::SystemTestCase.driver = :selenium
<add> assert Rails::SystemTestCase.driver.supports_screenshots?
<add>
<add> Rails::SystemTestCase.driver = :webkit
<add> assert Rails::SystemTestCase.driver.supports_screenshots?
<add>
<add> Rails::SystemTestCase.driver = :poltergeist
<add> assert Rails::SystemTestCase.driver.supports_screenshots?
<add> end
<add>end | 6 |
PHP | PHP | fix issues with '#first' as a url | d984ceb3a8a82bd7c34c7ff0bfe01efca17984cc | <ide><path>lib/Cake/Routing/Router.php
<ide> public static function url($url = null, $options = array()) {
<ide> $options = array();
<ide> }
<ide> $urlType = gettype($url);
<del> $hasColonSlash = $hasLeadingSlash = $isJavascript = $isMailto = false;
<add> $hasColonSlash = $hasLeadingSlash = $plainString = false;
<ide>
<ide> if ($urlType === 'string') {
<del> $isJavascript = strpos($url, 'javascript:') === 0;
<del> $isMailto = strpos($url, 'mailto:') === 0;
<add> $plainString = (
<add> strpos($url, 'javascript:') === 0 ||
<add> strpos($url, 'mailto:') === 0 ||
<add> strpos($url, '#') === 0
<add> );
<ide> $hasColonSlash = strpos($url, '://') !== false;
<ide> $hasLeadingSlash = isset($url[0]) ? $url[0] === '/' : false;
<ide> }
<ide> public static function url($url = null, $options = array()) {
<ide> $urlType === 'string' &&
<ide> strpos($url, ':') !== false &&
<ide> strpos($url, '/') === false &&
<del> !$isMailto &&
<del> !$isJavascript
<add> !$plainString
<ide> ) {
<ide> $url = self::_splitName($url, $options);
<ide> $urlType = 'array';
<ide> public static function url($url = null, $options = array()) {
<ide> $urlType === 'string' &&
<ide> !$hasLeadingSlash &&
<ide> !$hasColonSlash &&
<del> !$isMailto &&
<del> !$isJavascript
<add> !$plainString
<ide> ) {
<ide> // named route.
<ide> $route = self::$_routes->get($url);
<ide> public static function url($url = null, $options = array()) {
<ide> $output = self::$_routes->match($url);
<ide> } else {
<ide> // String urls.
<del> if (
<del> ($hasColonSlash || $isJavascript || $isMailto) ||
<del> (!strncmp($url, '#', 1))
<del> ) {
<add> if ($hasColonSlash || $plainString) {
<ide> return $url;
<ide> }
<ide> if ($hasLeadingSlash) {
<ide><path>lib/Cake/Test/TestCase/Routing/RouterTest.php
<ide> public function testUrlGenerationWithUrlFilter() {
<ide> }
<ide>
<ide> /**
<del> * Test that strings starting with mailto: etc are handled correctly.
<add> * Test that plain strings urls work
<ide> *
<ide> * @return void
<ide> */
<del> public function testUrlGenerationMailtoAndJavascript() {
<add> public function testUrlGenerationPlainString() {
<ide> $mailto = 'mailto:[email protected]';
<ide> $result = Router::url($mailto);
<ide> $this->assertEquals($mailto, $result);
<ide>
<ide> $js = 'javascript:alert("hi")';
<ide> $result = Router::url($js);
<ide> $this->assertEquals($js, $result);
<add>
<add> $hash = '#first';
<add> $result = Router::url($hash);
<add> $this->assertEquals($hash, $result);
<ide> }
<ide>
<ide> /** | 2 |
Text | Text | require two approvals to land changes | b01e617dcc4cf3f38f2b4516545aaf14d352f6e4 | <ide><path>COLLABORATOR_GUIDE.md
<ide> comment that explains why the PR does not require a CI run.
<ide>
<ide> ### Code Reviews
<ide>
<del>All pull requests must be reviewed and accepted by a Collaborator with
<del>sufficient expertise who is able to take full responsibility for the
<del>change. In the case of pull requests proposed by an existing
<del>Collaborator, an additional Collaborator is required for sign-off.
<add>At least two Collaborators must approve a pull request before the pull request
<add>lands. (One Collaborator approval is enough if the pull request has been open
<add>for more than 7 days.) Approving a pull request indicates that the Collaborator
<add>accepts responsibility for the change. Approval must be from Collaborators who
<add>are not authors of the change.
<ide>
<ide> In some cases, it may be necessary to summon a GitHub team to a pull request for
<ide> review by @-mention.
<ide><path>GOVERNANCE.md
<ide> Their privileges include but are not limited to:
<ide> Modifications of the contents of the nodejs/node repository are made on
<ide> a collaborative basis. Anybody with a GitHub account may propose a
<ide> modification via pull request and it will be considered by the project
<del>Collaborators. All pull requests must be reviewed and accepted by a
<del>Collaborator with sufficient expertise who is able to take full
<del>responsibility for the change. In the case of pull requests proposed
<del>by an existing Collaborator, an additional Collaborator is required
<del>for sign-off.
<add>Collaborators.
<add>
<add>At least two Collaborators must approve a pull request before the pull request
<add>lands. (One Collaborator approval is enough if the pull request has been open
<add>for more than 7 days.) Approving a pull request indicates that the Collaborator
<add>accepts responsibility for the change. Approval must be from Collaborators who
<add>are not authors of the change.
<ide>
<ide> If one or more Collaborators oppose a proposed change, then the change cannot
<ide> be accepted unless: | 2 |
Javascript | Javascript | fix typos detected by github.com/client9/misspell | e0204084a03e04103864ab36df9456c99bd4ae1b | <ide><path>packages/react-reconciler/src/ReactFiberCommitWork.js
<ide> export function logError(boundary: Fiber, errorInfo: CapturedValue<mixed>) {
<ide> const suppressLogging = e && e.suppressReactErrorLogging;
<ide> if (!suppressLogging) {
<ide> // Rethrow it from a clean stack because this function is assumed to never throw.
<del> // We can't safely call console.error() here because it could *also* throw if overriden.
<add> // We can't safely call console.error() here because it could *also* throw if overridden.
<ide> // https://github.com/facebook/react/issues/13188
<ide> setTimeout(() => {
<ide> throw e;
<ide><path>packages/react-reconciler/src/ReactFiberScheduler.js
<ide> function renderRoot(
<ide> let msUntilTimeout = nextLatestAbsoluteTimeoutMs - currentTimeMs;
<ide> msUntilTimeout = msUntilTimeout < 0 ? 0 : msUntilTimeout;
<ide>
<del> // TODO: Account for the Just Noticable Difference
<add> // TODO: Account for the Just Noticeable Difference
<ide>
<ide> const rootExpirationTime = root.expirationTime;
<ide> onSuspend(
<ide><path>packages/react-reconciler/src/ReactFiberUnwindWork.js
<ide> function throwException(
<ide> // time. First, find the earliest uncommitted expiration time in the
<ide> // tree, including work that is suspended. Then subtract the offset
<ide> // used to compute an async update's expiration time. This will cause
<del> // high priority (interactive) work to expire earlier than neccessary,
<del> // but we can account for this by adjusting for the Just Noticable
<add> // high priority (interactive) work to expire earlier than necessary,
<add> // but we can account for this by adjusting for the Just Noticeable
<ide> // Difference.
<ide> const earliestExpirationTime = findEarliestOutstandingPriorityLevel(
<ide> root,
<ide><path>packages/react-reconciler/src/__tests__/ReactIncrementalErrorReplay-test.internal.js
<ide> describe('ReactIncrementalErrorReplay-test', () => {
<ide> // Note: this test is fragile and relies on internals.
<ide> // We almost always try to avoid such tests, but here the cost of
<ide> // the list getting out of sync (and causing subtle bugs in rare cases)
<del> // is higher than the cost of maintaing the test.
<add> // is higher than the cost of maintaining the test.
<ide> const {
<ide> // Any Fiber factory function will do.
<ide> createHostRootFiber, | 4 |
PHP | PHP | add support for markdown in slack attachment | dc0b9def6e2e284f09775139b76bdd1577f75ea9 | <ide><path>src/Illuminate/Notifications/Channels/SlackWebhookChannel.php
<ide> protected function attachments(SlackMessage $message)
<ide> 'text' => $attachment->content,
<ide> 'title_link' => $attachment->url,
<ide> 'fields' => $this->fields($attachment),
<add> 'mrkdwn_in' => $attachment->markdown,
<ide> ]);
<ide> })->all();
<ide> }
<ide><path>src/Illuminate/Notifications/Messages/SlackAttachment.php
<ide> class SlackAttachment
<ide> */
<ide> public $fields;
<ide>
<add> /**
<add> * The fields containing markdown.
<add> *
<add> * @var array
<add> */
<add> public $markdown;
<add>
<ide> /**
<ide> * Set the title of the attachment.
<ide> *
<ide> public function fields(array $fields)
<ide>
<ide> return $this;
<ide> }
<add>
<add> /**
<add> * Set the fields containing markdown.
<add> *
<add> * @param array $fields
<add> * @return $this
<add> */
<add> public function markdown(array $fields)
<add> {
<add> $this->markdown = $fields;
<add>
<add> return $this;
<add> }
<ide> } | 2 |
Text | Text | remove div_for from guides [ci skip] | d3da2080487391ee5a28f3167c21084107858f52 | <ide><path>guides/source/action_view_overview.md
<ide> You can also render a block of code within a partial layout instead of calling `
<ide>
<ide> ```html+erb
<ide> <% render(layout: 'box', locals: { article: @article }) do %>
<del> <%= div_for(article) do %>
<add> <div>
<ide> <p><%= article.body %></p>
<del> <% end %>
<add> </div>
<ide> <% end %>
<ide> ```
<ide> | 1 |
Javascript | Javascript | remove null checks that always evaluate to false | ea719ae805a032505074c01c70d12e951c633aad | <ide><path>src/core/jpg.js
<ide> var JpegImage = (function jpegImage() {
<ide>
<ide> function decodeHuffman(tree) {
<ide> var node = tree;
<del> var bit;
<del> while ((bit = readBit()) !== null) {
<del> node = node[bit];
<add> while (true) {
<add> node = node[readBit()];
<ide> if (typeof node === 'number') {
<ide> return node;
<ide> }
<ide> if (typeof node !== 'object') {
<ide> throw 'invalid huffman sequence';
<ide> }
<ide> }
<del> return null;
<ide> }
<ide>
<ide> function receive(length) {
<ide> var n = 0;
<ide> while (length > 0) {
<del> var bit = readBit();
<del> if (bit === null) {
<del> return;
<del> }
<del> n = (n << 1) | bit;
<add> n = (n << 1) | readBit();
<ide> length--;
<ide> }
<ide> return n;
<ide> var JpegImage = (function jpegImage() {
<ide> }
<ide> }
<ide>
<del> // decodeTransform will contains pairs of multiplier (-256..256) and
<del> // additive
<add> // decodeTransform contains pairs of multiplier (-256..256) and additive
<ide> var transform = this.decodeTransform;
<ide> if (transform) {
<ide> for (i = 0; i < dataLength;) { | 1 |
Javascript | Javascript | log unknown props only when we have a match | 4ed7b85ed8296259c61eeb4747a067afb35d4f52 | <ide><path>src/dom/DOMPropertyOperations.js
<ide> if (__DEV__) {
<ide> }
<ide>
<ide> warnedProperties[name] = true;
<del> var message = 'Unknown DOM property ' + name + '.';
<ide> var lowerCasedName = name.toLowerCase();
<ide>
<ide> // data-* attributes should be lowercase; suggest the lowercase version
<ide> var standardName = DOMProperty.isCustomAttribute(lowerCasedName) ?
<ide> lowerCasedName : DOMProperty.getPossibleStandardName[lowerCasedName];
<add>
<add> // For now, only warn when we have a suggested correction. This prevents
<add> // logging too much when using transferPropsTo.
<ide> if (standardName != null) {
<del> message += ' Did you mean ' + standardName + '?';
<add> console.warn(
<add> 'Unknown DOM property ' + name + '. Did you mean ' + standardName + '?'
<add> );
<ide> }
<ide>
<del> console.warn(message);
<ide> };
<ide> }
<ide>
<ide><path>src/dom/__tests__/DOMPropertyOperations-test.js
<ide> describe('DOMPropertyOperations', function() {
<ide>
<ide> describe('injectDOMPropertyConfig', function() {
<ide> it('should support custom attributes', function() {
<del> spyOn(console, 'warn');
<del>
<ide> // foobar does not exist yet
<ide> expect(DOMPropertyOperations.createMarkupForProperty(
<ide> 'foobar',
<ide> 'simple'
<ide> )).toBe(null);
<ide>
<del> expect(console.warn.argsForCall.length).toBe(1);
<del>
<ide> // foo-* does not exist yet
<ide> expect(DOMPropertyOperations.createMarkupForProperty(
<ide> 'foo-xyz',
<ide> 'simple'
<ide> )).toBe(null);
<ide>
<del> expect(console.warn.argsForCall.length).toBe(2);
<del>
<ide> // inject foobar DOM property
<ide> DOMProperty.injection.injectDOMPropertyConfig({
<ide> isCustomAttribute: function(name) { | 2 |
Python | Python | add a version switch when importing mapping | 7846aa339bb66e576dbdf474d9cc896962b70cc8 | <ide><path>celery/app/amqp.py
<ide> from __future__ import absolute_import, unicode_literals
<ide>
<ide> import numbers
<del>from collections import Mapping, namedtuple
<add>from collections import namedtuple
<ide> from datetime import timedelta
<ide> from weakref import WeakValueDictionary
<ide>
<ide>
<ide> from . import routes as _routes
<ide>
<add>try:
<add> from collections.abc import Mapping
<add>except ImportError:
<add> # TODO: Remove this when we drop Python 2.7 support
<add> from collections import Mapping
<add>
<ide> __all__ = ('AMQP', 'Queues', 'task_message')
<ide>
<ide> #: earliest date supported by time.mktime. | 1 |
Javascript | Javascript | fix performance issue | c862c0348530824bfacef8eadfaf448120d91664 | <ide><path>lib/tls.js
<ide> CryptoStream.prototype._read = function read(size) {
<ide> if (this.ondata) {
<ide> this.ondata(pool, start, start + bytesRead);
<ide>
<del> // Deceive streams2
<del> var self = this;
<del>
<del> setImmediate(function() {
<del> // Force state.reading to set to false
<del> self.push('');
<add> // Force state.reading to set to false
<add> this.push('');
<ide>
<del> // Try reading more, we most likely have some data
<del> self.read(0);
<del> });
<add> // Try reading more, we most likely have some data
<add> this.read(0);
<ide> } else {
<ide> this.push(pool.slice(start, start + bytesRead));
<ide> } | 1 |
Text | Text | change image sizes docs to use em instead of px | 815abc5e05fe1230922b06b62aca287ce44da41e | <ide><path>docs/api-reference/next/future/image.md
<ide> const Example = () => (
<ide> <Image
<ide> src="/example.png"
<ide> layout="fill"
<del> sizes="(min-width: 75em) 33vw,
<del> (min-width: 48em) 50vw,
<add> sizes="(min-width: 1200px) 33vw,
<add> (min-width: 768px) 50vw,
<ide> 100vw"
<ide> />
<ide> </div>
<ide><path>docs/api-reference/next/image.md
<ide> const Example = () => (
<ide> <Image
<ide> src="/example.png"
<ide> layout="fill"
<del> sizes="(min-width: 75em) 33vw,
<del> (min-width: 48em) 50vw,
<add> sizes="(min-width: 1200px) 33vw,
<add> (min-width: 768px) 50vw,
<ide> 100vw"
<ide> />
<ide> </div> | 2 |
Ruby | Ruby | fix schema dumper for infinite dates in postgresql | 248b5851fdc7113226bc316472424da590787a86 | <ide><path>activerecord/lib/active_record/connection_adapters/postgresql/oid/date.rb
<ide> def cast_value(value)
<ide> super
<ide> end
<ide> end
<add>
<add> def type_cast_for_schema(value)
<add> case value
<add> when ::Float::INFINITY then "::Float::INFINITY"
<add> when -::Float::INFINITY then "-::Float::INFINITY"
<add> else super
<add> end
<add> end
<ide> end
<ide> end
<ide> end
<ide><path>activerecord/test/cases/schema_dumper_test.rb
<ide> class SchemaDumperDefaultsTest < ActiveRecord::TestCase
<ide> t.float :float_with_nan_default, default: Float::NAN
<ide> t.datetime :beginning_of_time, default: "-infinity"
<ide> t.datetime :end_of_time, default: "infinity"
<add> t.date :date_with_neg_inf_default, default: -::Float::INFINITY
<add> t.date :date_with_pos_inf_default, default: ::Float::INFINITY
<ide> end
<ide> end
<ide> end
<ide> def test_schema_dump_with_column_infinity_default
<ide> assert_match %r{t\.float\s+"float_with_nan_default",\s+default: ::Float::NAN}, output
<ide> assert_match %r{t\.datetime\s+"beginning_of_time",\s+default: -::Float::INFINITY}, output
<ide> assert_match %r{t\.datetime\s+"end_of_time",\s+default: ::Float::INFINITY}, output
<add> assert_match %r{t\.date\s+"date_with_neg_inf_default",\s+default: -::Float::INFINITY}, output
<add> assert_match %r{t\.date\s+"date_with_pos_inf_default",\s+default: ::Float::INFINITY}, output
<ide> end
<ide> end | 2 |
Text | Text | add history for readline `crlfdelay` option | b0c5f7d24de3df722a6c33eb19ead0a09d63b867 | <ide><path>doc/api/readline.md
<ide> the current position of the cursor down.
<ide> <!-- YAML
<ide> added: v0.1.98
<ide> changes:
<add> - version: v8.3.0, 6.11.4
<add> pr-url: https://github.com/nodejs/node/pull/13497
<add> description: Remove max limit of `crlfDelay` option.
<add> - version: v6.6.0
<add> pr-url: https://github.com/nodejs/node/pull/8109
<add> description: The `crlfDelay` option is supported now.
<ide> - version: v6.3.0
<ide> pr-url: https://github.com/nodejs/node/pull/7125
<ide> description: The `prompt` option is supported now. | 1 |
Python | Python | fix a small error in summarization example | eb3e072a3b24806c72e35e9246e1cf972de1c77f | <ide><path>examples/pytorch/summarization/run_summarization_no_trainer.py
<ide> def main():
<ide> # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
<ide> # download model & vocab.
<ide> if args.config_name:
<del> config = AutoConfig.from_pretrained(args.model_name_or_path)
<add> config = AutoConfig.from_pretrained(args.config_name)
<ide> elif args.model_name_or_path:
<ide> config = AutoConfig.from_pretrained(args.model_name_or_path)
<ide> else: | 1 |
Java | Java | remove viewisdescendantof from android code | cc79e971a3efa64d141284cb386cb7532dcd4322 | <ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/UIImplementation.java
<ide> public void findSubviewIn(int reactTag, float targetX, float targetY, Callback c
<ide> mOperationsQueue.enqueueFindTargetForTouch(reactTag, targetX, targetY, callback);
<ide> }
<ide>
<del> /**
<del> * Check if the first shadow node is the descendant of the second shadow node
<del> */
<del> public void viewIsDescendantOf(
<del> final int reactTag,
<del> final int ancestorReactTag,
<del> final Callback callback) {
<del> ReactShadowNode node = mShadowNodeRegistry.getNode(reactTag);
<del> ReactShadowNode ancestorNode = mShadowNodeRegistry.getNode(ancestorReactTag);
<del> if (node == null || ancestorNode == null) {
<del> callback.invoke(false);
<del> return;
<del> }
<del> callback.invoke(node.isDescendantOf(ancestorNode));
<del> }
<del>
<ide> /**
<ide> * Determines the location on screen, width, and height of the given view relative to the root
<ide> * view and returns the values via an async callback.
<ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/UIManagerModule.java
<ide> public void findSubviewIn(
<ide> callback);
<ide> }
<ide>
<del> /** Check if the first shadow node is the descendant of the second shadow node */
<del> @ReactMethod
<del> public void viewIsDescendantOf(
<del> final int reactTag, final int ancestorReactTag, final Callback callback) {
<del> mUIImplementation.viewIsDescendantOf(reactTag, ancestorReactTag, callback);
<del> }
<del>
<ide> @Override
<ide> @ReactMethod
<ide> public void setJSResponder(int reactTag, boolean blockNativeResponder) { | 2 |
Ruby | Ruby | add readable disk space, numbers methods | a96a9004f4600ed71b0d9f6a22fbc9b2a2fe41f8 | <ide><path>Library/Homebrew/utils.rb
<ide> def private_repo?(user, repo)
<ide> open(uri) { |json| json["private"] }
<ide> end
<ide> end
<add>
<add>def disk_usage_readable(size_in_bytes)
<add> len = size_in_bytes.to_s.length
<add> case
<add> when len > 9
<add> sym, unit = ["G", 1_073_741_824]
<add> when len > 6
<add> sym, unit = ["M", 1_048_576]
<add> when len > 3
<add> sym, unit = ["K", 1_024]
<add> else
<add> sym, unit = ["B", 1]
<add> end
<add>
<add> num = "%.1f" % [size_in_bytes.to_f / unit]
<add> # check whether the rounded value has a zero after decimal point,
<add> # if true, then display just the integer value.
<add> if num.split(".").last.to_i == 0
<add> "%d#{sym}" % num.to_i
<add> else
<add> "#{num}#{sym}"
<add> end
<add>end
<add>
<add>def number_readable(number)
<add> numstr = number.to_i.to_s
<add> (numstr.size - 3).step(1, -3) { |i| numstr.insert(i, ",") }
<add> numstr
<add>end | 1 |
Python | Python | fix tensorflow t5 with int64 input | 707105290b0825c8fd01c977d33ec8f8833c937f | <ide><path>src/transformers/models/t5/modeling_tf_t5.py
<ide> def _shift_right(self, input_ids):
<ide> ), "self.model.config.decoder_start_token_id has to be defined. In TF T5 it is usually set to the pad_token_id. See T5 docs for more information"
<ide>
<ide> start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
<add> start_tokens = tf.cast(start_tokens, input_ids.dtype) # Ensure compatible dtypes for concatenation
<ide> shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
<ide>
<ide> assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
<ide> # replace possible -100 values in labels by `pad_token_id`
<ide> shifted_input_ids = tf.where(
<del> shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
<add> shifted_input_ids == -100,
<add> tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype),
<add> shifted_input_ids,
<ide> )
<ide>
<ide> # "Verify that `labels` has only positive values and -100"
<del> assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0))
<add> assert_gte0 = tf.debugging.assert_greater_equal(
<add> shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype)
<add> )
<ide>
<ide> # Make sure the assertion op is called by wrapping the result in an identity no-op
<ide> with tf.control_dependencies([assert_gte0]): | 1 |
Javascript | Javascript | upgrade nodehotupdatechunktemplateplugin to es6 | 5aa522d5be9a207bfce7f2c97eed2d8ec6b77698 | <ide><path>lib/node/NodeHotUpdateChunkTemplatePlugin.js
<ide> MIT License http://www.opensource.org/licenses/mit-license.php
<ide> Author Tobias Koppers @sokra
<ide> */
<del>var ConcatSource = require("webpack-sources").ConcatSource;
<add>"use strict";
<ide>
<del>function NodeHotUpdateChunkTemplatePlugin() {}
<del>module.exports = NodeHotUpdateChunkTemplatePlugin;
<add>const ConcatSource = require("webpack-sources").ConcatSource;
<add>
<add>class NodeHotUpdateChunkTemplatePlugin {
<ide>
<del>NodeHotUpdateChunkTemplatePlugin.prototype.apply = function(hotUpdateChunkTemplate) {
<del> hotUpdateChunkTemplate.plugin("render", function(modulesSource, modules, removedModules, hash, id) {
<del> var source = new ConcatSource();
<del> source.add("exports.id = " + JSON.stringify(id) + ";\nexports.modules = ");
<del> source.add(modulesSource);
<del> source.add(";");
<del> return source;
<del> });
<del> hotUpdateChunkTemplate.plugin("hash", function(hash) {
<del> hash.update("NodeHotUpdateChunkTemplatePlugin");
<del> hash.update("3");
<del> hash.update(this.outputOptions.hotUpdateFunction + "");
<del> hash.update(this.outputOptions.library + "");
<del> });
<del>};
<add> apply(hotUpdateChunkTemplate) {
<add> hotUpdateChunkTemplate.plugin("render", (modulesSource, modules, removedModules, hash, id) => {
<add> const source = new ConcatSource();
<add> source.add("exports.id = " + JSON.stringify(id) + ";\nexports.modules = ");
<add> source.add(modulesSource);
<add> source.add(";");
<add> return source;
<add> });
<add> hotUpdateChunkTemplate.plugin("hash", function(hash) {
<add> hash.update("NodeHotUpdateChunkTemplatePlugin");
<add> hash.update("3");
<add> hash.update(this.outputOptions.hotUpdateFunction + "");
<add> hash.update(this.outputOptions.library + "");
<add> });
<add> }
<add>}
<add>module.exports = NodeHotUpdateChunkTemplatePlugin; | 1 |
Python | Python | use zip instead of range in piecewise | 3e261be81a5f2645ffc4c1e79a5dccc65a216669 | <ide><path>numpy/lib/function_base.py
<ide> def piecewise(x, condlist, funclist, *args, **kw):
<ide> )
<ide>
<ide> y = zeros(x.shape, x.dtype)
<del> for k in range(n):
<del> item = funclist[k]
<del> if not isinstance(item, collections.abc.Callable):
<del> y[condlist[k]] = item
<add> for cond, func in zip(condlist, funclist):
<add> if not isinstance(func, collections.abc.Callable):
<add> y[cond] = func
<ide> else:
<del> vals = x[condlist[k]]
<add> vals = x[cond]
<ide> if vals.size > 0:
<del> y[condlist[k]] = item(vals, *args, **kw)
<add> y[cond] = func(vals, *args, **kw)
<ide>
<ide> return y
<ide>
<ide> def select(condlist, choicelist, default=0):
<ide> choicelist = np.broadcast_arrays(*choicelist)
<ide>
<ide> # If cond array is not an ndarray in boolean format or scalar bool, abort.
<del> for i in range(len(condlist)):
<del> cond = condlist[i]
<add> for i, cond in enumerate(condlist):
<ide> if cond.dtype.type is not np.bool_:
<ide> raise TypeError(
<ide> 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) | 1 |
Python | Python | simplify the annotations of `np.version` | 0f6feaa9c619251a0c62fb7baa49c369069340d9 | <ide><path>numpy/version.py
<add>from __future__ import annotations
<add>
<ide> from ._version import get_versions
<ide>
<ide> __ALL__ = ['version', '__version__', 'full_version', 'git_revision', 'release']
<ide>
<del>vinfo = get_versions()
<del>version: str = vinfo["version"]
<add>vinfo: dict[str, str] = get_versions()
<add>version = vinfo["version"]
<ide> __version__ = vinfo.get("closest-tag", vinfo["version"])
<del>full_version: str = vinfo['version']
<del>git_revision: str = vinfo['full-revisionid']
<add>full_version = vinfo['version']
<add>git_revision = vinfo['full-revisionid']
<ide> release = 'dev0' not in version and '+' not in version
<del>short_version: str = vinfo['version'].split("+")[0]
<add>short_version = vinfo['version'].split("+")[0]
<ide>
<ide> del get_versions, vinfo | 1 |
Java | Java | fix usage of java8 api | 37c04bd9c8d7a1269d1088af935a5571cb6887bc | <ide><path>spring-context/src/main/java/org/springframework/context/event/ApplicationListenerMethodAdapter.java
<ide> private String getInvocationErrorMessage(Object bean, String message, Object[] r
<ide>
<ide>
<ide> private ResolvableType resolveDeclaredEventType() {
<del> Parameter[] parameters = this.method.getParameters();
<del> if (parameters.length != 1) {
<add> int count = this.method.getParameterTypes().length;
<add> if (count != 1) {
<ide> throw new IllegalStateException("Only one parameter is allowed " +
<ide> "for event listener method: " + method);
<ide> } | 1 |
Javascript | Javascript | remove unused vars from http/https tests | bc5b89f9ca467f0bea81ef5af19e7db3f75f234c | <ide><path>test/parallel/test-http-client-timeout-agent.js
<ide> require('../common');
<ide> var assert = require('assert');
<ide> var http = require('http');
<ide>
<del>var request_number = 0;
<ide> var requests_sent = 0;
<ide> var requests_done = 0;
<ide> var options = {
<ide> var server = http.createServer(function(req, res) {
<ide> res.write(reqid.toString());
<ide> res.end();
<ide> }
<del> request_number += 1;
<ide> });
<ide>
<ide> server.listen(0, options.host, function() {
<ide><path>test/parallel/test-http-pipeline-flood.js
<ide> switch (process.argv[2]) {
<ide> function parent() {
<ide> const http = require('http');
<ide> const bigResponse = Buffer.alloc(10240, 'x');
<del> var requests = 0;
<ide> var connections = 0;
<ide> var backloggedReqs = 0;
<ide>
<ide> const server = http.createServer(function(req, res) {
<del> requests++;
<ide> res.setHeader('content-length', bigResponse.length);
<ide> if (!res.write(bigResponse)) {
<ide> if (backloggedReqs === 0) {
<ide><path>test/parallel/test-http-upgrade-agent.js
<ide> var net = require('net');
<ide>
<ide> // Create a TCP server
<ide> var srv = net.createServer(function(c) {
<del> var data = '';
<ide> c.on('data', function(d) {
<del> data += d.toString('utf8');
<del>
<ide> c.write('HTTP/1.1 101\r\n');
<ide> c.write('hello: world\r\n');
<ide> c.write('connection: upgrade\r\n');
<ide><path>test/parallel/test-http-upgrade-client.js
<ide> var net = require('net');
<ide>
<ide> // Create a TCP server
<ide> var srv = net.createServer(function(c) {
<del> var data = '';
<ide> c.on('data', function(d) {
<del> data += d.toString('utf8');
<del>
<ide> c.write('HTTP/1.1 101\r\n');
<ide> c.write('hello: world\r\n');
<ide> c.write('connection: upgrade\r\n');
<ide><path>test/parallel/test-https-foafssl.js
<ide> var options = {
<ide> requestCert: true
<ide> };
<ide>
<del>var reqCount = 0;
<ide> var CRLF = '\r\n';
<ide> var body = 'hello world\n';
<ide> var cert;
<ide> var modulus;
<ide> var exponent;
<ide>
<ide> var server = https.createServer(options, function(req, res) {
<del> reqCount++;
<ide> console.log('got request');
<ide>
<ide> cert = req.connection.getPeerCertificate(); | 5 |
Python | Python | add method get_record | 67aed49f927bc92bb2e4fd2e01fbd8a787115e35 | <ide><path>libcloud/dns/drivers/rcodezero.py
<ide> def get_zone(self, zone_id):
<ide>
<ide> return self._to_zone(response.object)
<ide>
<add> def get_record(self, zone_id, record_id):
<add> """
<add> Return a Record instance.
<add>
<add> :param zone_id: ID of the required zone
<add> :type zone_id: ``str``
<add>
<add> :param record_id: ID of the required record
<add> :type record_id: ``str``
<add>
<add> :rtype: :class:`Record`
<add> """
<add> records = self.list_records(Zone(id=zone_id, domain=zone_id,
<add> type=None, ttl=None, driver=self, extra=None))
<add>
<add> foundrecords = filter(lambda x: x.id == record_id,records)
<add>
<add> if len(foundrecords) > 0:
<add> return(filter(lambda x: x.id == record_id,records)[0])
<add> else:
<add> return(None)
<add>
<ide> def list_records(self, zone):
<ide> """
<ide> Return a list of all record objects for the given zone. | 1 |
Python | Python | maintain support for empty docbin span groups | 31a5d99efa39ad14fdefe4b48bb45d3f4d97c8fa | <ide><path>spacy/tokens/_serialize.py
<ide> def get_docs(self, vocab: Vocab) -> Iterator[Doc]:
<ide> doc = Doc(vocab, words=tokens[:, orth_col], spaces=spaces) # type: ignore
<ide> doc = doc.from_array(self.attrs, tokens) # type: ignore
<ide> doc.cats = self.cats[i]
<del> if self.span_groups[i] != SpanGroups._EMPTY_BYTES:
<add> # backwards-compatibility: may be b'' or serialized empty list
<add> if self.span_groups[i] and self.span_groups[i] != SpanGroups._EMPTY_BYTES:
<ide> doc.spans.from_bytes(self.span_groups[i])
<ide> else:
<ide> doc.spans.clear() | 1 |
Javascript | Javascript | simplify emptyview and itemviewclass lookups | faed7a64417f5712ad356289a3c0b2068c50bf4e | <ide><path>packages/ember-views/lib/views/collection_view.js
<ide> Ember.CollectionView = Ember.ContainerView.extend(/** @scope Ember.CollectionVie
<ide> addedViews = [], view, item, idx, len;
<ide>
<ide> if ('string' === typeof itemViewClass) {
<del> var newItemViewClass = get(itemViewClass);
<del> if (!newItemViewClass) {
<del> newItemViewClass = this.container.lookupFactory('view:' + itemViewClass);
<del> }
<del> itemViewClass = newItemViewClass;
<add> itemViewClass = get(itemViewClass) || this.container.lookupFactory('view:' + itemViewClass);
<ide> }
<ide>
<ide> Ember.assert(fmt("itemViewClass must be a subclass of Ember.View, not %@", [itemViewClass]), Ember.View.detect(itemViewClass));
<ide>
<ide> len = content ? get(content, 'length') : 0;
<add>
<ide> if (len) {
<ide> for (idx = start; idx < start+added; idx++) {
<ide> item = content.objectAt(idx);
<ide> Ember.CollectionView = Ember.ContainerView.extend(/** @scope Ember.CollectionVie
<ide> if (!emptyView) { return; }
<ide>
<ide> if ('string' === typeof emptyView) {
<del> var newEmptyView = get(emptyView);
<del> if (!newEmptyView) {
<del> newEmptyView = this.container.lookupFactory('view:' + emptyView);
<del> }
<del> emptyView = newEmptyView;
<add> emptyView = get(emptyView) || this.container.lookupFactory('view:' + emptyView);
<ide> }
<ide>
<ide> var isClass = Ember.CoreView.detect(emptyView); | 1 |
Javascript | Javascript | add rudimentary test renderer | 50982cea99fcb12d9a2f4bda82ab6735b5fc0149 | <ide><path>src/renderers/testing/ReactTestMount.js
<add>/**
<add> * Copyright (c) 2015-present, Facebook, Inc.
<add> * All rights reserved.
<add> *
<add> * This source code is licensed under the BSD-style license found in the
<add> * LICENSE file in the root directory of this source tree. An additional grant
<add> * of patent rights can be found in the PATENTS file in the same directory.
<add> *
<add> * @providesModule ReactTestMount
<add> * @flow
<add> */
<add>'use strict';
<add>
<add>var ReactElement = require('ReactElement');
<add>var ReactInstrumentation = require('ReactInstrumentation');
<add>var ReactReconciler = require('ReactReconciler');
<add>var ReactUpdates = require('ReactUpdates');
<add>
<add>var emptyObject = require('emptyObject');
<add>var getHostComponentFromComposite = require('getHostComponentFromComposite');
<add>var instantiateReactComponent = require('instantiateReactComponent');
<add>
<add>/**
<add> * Temporary (?) hack so that we can store all top-level pending updates on
<add> * composites instead of having to worry about different types of components
<add> * here.
<add> */
<add>var TopLevelWrapper = function() {};
<add>TopLevelWrapper.prototype.isReactComponent = {};
<add>if (__DEV__) {
<add> TopLevelWrapper.displayName = 'TopLevelWrapper';
<add>}
<add>TopLevelWrapper.prototype.render = function() {
<add> // this.props is actually a ReactElement
<add> return this.props;
<add>};
<add>
<add>/**
<add> * Mounts this component and inserts it into the DOM.
<add> *
<add> * @param {ReactComponent} componentInstance The instance to mount.
<add> * @param {number} rootID ID of the root node.
<add> * @param {number} containerTag container element to mount into.
<add> * @param {ReactReconcileTransaction} transaction
<add> */
<add>function mountComponentIntoNode(
<add> componentInstance,
<add> transaction) {
<add> var image = ReactReconciler.mountComponent(
<add> componentInstance,
<add> transaction,
<add> null,
<add> null,
<add> emptyObject
<add> );
<add> componentInstance._renderedComponent._topLevelWrapper = componentInstance;
<add> return image;
<add>}
<add>
<add>/**
<add> * Batched mount.
<add> *
<add> * @param {ReactComponent} componentInstance The instance to mount.
<add> * @param {number} rootID ID of the root node.
<add> * @param {number} containerTag container element to mount into.
<add> */
<add>function batchedMountComponentIntoNode(
<add> componentInstance) {
<add> var transaction = ReactUpdates.ReactReconcileTransaction.getPooled();
<add> var image = transaction.perform(
<add> mountComponentIntoNode,
<add> null,
<add> componentInstance,
<add> transaction
<add> );
<add> ReactUpdates.ReactReconcileTransaction.release(transaction);
<add> return image;
<add>}
<add>
<add>var ReactTestInstance = function(component) {
<add> this._component = component;
<add>};
<add>ReactTestInstance.prototype.getInstance = function() {
<add> return this._component._renderedComponent.getPublicInstance();
<add>};
<add>ReactTestInstance.prototype.toJSON = function() {
<add> var inst = getHostComponentFromComposite(this._component);
<add> return inst.toJSON();
<add>};
<add>
<add>/**
<add> * As soon as `ReactMount` is refactored to not rely on the DOM, we can share
<add> * code between the two. For now, we'll hard code the ID logic.
<add> */
<add>var ReactHostMount = {
<add>
<add> render: function(
<add> nextElement: ReactElement
<add> ): ?ReactComponent {
<add> var nextWrappedElement = new ReactElement(
<add> TopLevelWrapper,
<add> null,
<add> null,
<add> null,
<add> null,
<add> null,
<add> nextElement
<add> );
<add>
<add> // var prevComponent = ReactHostMount._instancesByContainerID[containerTag];
<add> // if (prevComponent) {
<add> // var prevWrappedElement = prevComponent._currentElement;
<add> // var prevElement = prevWrappedElement.props;
<add> // if (shouldUpdateReactComponent(prevElement, nextElement)) {
<add> // ReactUpdateQueue.enqueueElementInternal(prevComponent, nextWrappedElement);
<add> // if (callback) {
<add> // ReactUpdateQueue.enqueueCallbackInternal(prevComponent, callback);
<add> // }
<add> // return prevComponent;
<add> // }
<add> // }
<add>
<add> var instance = instantiateReactComponent(nextWrappedElement);
<add>
<add> if (__DEV__) {
<add> // Mute future events from the top level wrapper.
<add> // It is an implementation detail that devtools should not know about.
<add> instance._debugID = 0;
<add>
<add> if (__DEV__) {
<add> ReactInstrumentation.debugTool.onBeginFlush();
<add> }
<add> }
<add>
<add> // The initial render is synchronous but any updates that happen during
<add> // rendering, in componentWillMount or componentDidMount, will be batched
<add> // according to the current batching strategy.
<add>
<add> ReactUpdates.batchedUpdates(
<add> batchedMountComponentIntoNode,
<add> instance
<add> );
<add> if (__DEV__) {
<add> // The instance here is TopLevelWrapper so we report mount for its child.
<add> ReactInstrumentation.debugTool.onMountRootComponent(
<add> instance._renderedComponent._debugID
<add> );
<add> ReactInstrumentation.debugTool.onEndFlush();
<add> }
<add> return new ReactTestInstance(instance);
<add> },
<add>
<add>};
<add>
<add>module.exports = ReactHostMount;
<ide><path>src/renderers/testing/ReactTestReconcileTransaction.js
<add>/**
<add> * Copyright (c) 2015-present, Facebook, Inc.
<add> * All rights reserved.
<add> *
<add> * This source code is licensed under the BSD-style license found in the
<add> * LICENSE file in the root directory of this source tree. An additional grant
<add> * of patent rights can be found in the PATENTS file in the same directory.
<add> *
<add> * @providesModule ReactTestReconcileTransaction
<add> * @flow
<add> */
<add>'use strict';
<add>
<add>var CallbackQueue = require('CallbackQueue');
<add>var PooledClass = require('PooledClass');
<add>var Transaction = require('Transaction');
<add>
<add>/**
<add> * Provides a `CallbackQueue` queue for collecting `onDOMReady` callbacks during
<add> * the performing of the transaction.
<add> */
<add>var ON_DOM_READY_QUEUEING = {
<add> /**
<add> * Initializes the internal `onDOMReady` queue.
<add> */
<add> initialize: function() {
<add> this.reactMountReady.reset();
<add> },
<add>
<add> /**
<add> * After DOM is flushed, invoke all registered `onDOMReady` callbacks.
<add> */
<add> close: function() {
<add> this.reactMountReady.notifyAll();
<add> },
<add>};
<add>
<add>/**
<add> * Executed within the scope of the `Transaction` instance. Consider these as
<add> * being member methods, but with an implied ordering while being isolated from
<add> * each other.
<add> */
<add>var TRANSACTION_WRAPPERS = [ON_DOM_READY_QUEUEING];
<add>
<add>/**
<add> * Currently:
<add> * - The order that these are listed in the transaction is critical:
<add> * - Suppresses events.
<add> * - Restores selection range.
<add> *
<add> * Future:
<add> * - Restore document/overflow scroll positions that were unintentionally
<add> * modified via DOM insertions above the top viewport boundary.
<add> * - Implement/integrate with customized constraint based layout system and keep
<add> * track of which dimensions must be remeasured.
<add> *
<add> * @class ReactTestReconcileTransaction
<add> */
<add>function ReactTestReconcileTransaction() {
<add> this.reinitializeTransaction();
<add> this.reactMountReady = CallbackQueue.getPooled(null);
<add>}
<add>
<add>var Mixin = {
<add> /**
<add> * @see Transaction
<add> * @abstract
<add> * @final
<add> * @return {array<object>} List of operation wrap procedures.
<add> * TODO: convert to array<TransactionWrapper>
<add> */
<add> getTransactionWrappers: function() {
<add> return TRANSACTION_WRAPPERS;
<add> },
<add>
<add> /**
<add> * @return {object} The queue to collect `onDOMReady` callbacks with.
<add> * TODO: convert to ReactMountReady
<add> */
<add> getReactMountReady: function() {
<add> return this.reactMountReady;
<add> },
<add>
<add> /**
<add> * `PooledClass` looks for this, and will invoke this before allowing this
<add> * instance to be reused.
<add> */
<add> destructor: function() {
<add> CallbackQueue.release(this.reactMountReady);
<add> this.reactMountReady = null;
<add> },
<add>};
<add>
<add>Object.assign(
<add> ReactTestReconcileTransaction.prototype,
<add> Transaction.Mixin,
<add> ReactTestReconcileTransaction,
<add> Mixin
<add>);
<add>
<add>PooledClass.addPoolingTo(ReactTestReconcileTransaction);
<add>
<add>module.exports = ReactTestReconcileTransaction;
<ide><path>src/renderers/testing/ReactTestRenderer.js
<add>/**
<add> * Copyright 2013-present, Facebook, Inc.
<add> * All rights reserved.
<add> *
<add> * This source code is licensed under the BSD-style license found in the
<add> * LICENSE file in the root directory of this source tree. An additional grant
<add> * of patent rights can be found in the PATENTS file in the same directory.
<add> *
<add> * @providesModule ReactTestRenderer
<add> */
<add>
<add>'use strict';
<add>
<add>var ReactDefaultBatchingStrategy = require('ReactDefaultBatchingStrategy');
<add>var ReactEmptyComponent = require('ReactEmptyComponent');
<add>var ReactMultiChild = require('ReactMultiChild');
<add>var ReactHostComponent = require('ReactHostComponent');
<add>var ReactTestMount = require('ReactTestMount');
<add>var ReactTestReconcileTransaction = require('ReactTestReconcileTransaction');
<add>var ReactUpdates = require('ReactUpdates');
<add>
<add>var renderSubtreeIntoContainer = require('renderSubtreeIntoContainer');
<add>
<add>/**
<add> * Drill down (through composites and empty components) until we get a native or
<add> * native text component.
<add> *
<add> * This is pretty polymorphic but unavoidable with the current structure we have
<add> * for `_renderedChildren`.
<add> */
<add>function getRenderedHostOrTextFromComponent(component) {
<add> var rendered;
<add> while ((rendered = component._renderedComponent)) {
<add> component = rendered;
<add> }
<add> return component;
<add>}
<add>
<add>
<add>// =============================================================================
<add>
<add>var ReactTestComponent = function(element) {
<add> this._currentElement = element;
<add> this._renderedChildren = null;
<add> this._topLevelWrapper = null;
<add>};
<add>ReactTestComponent.prototype.mountComponent = function(
<add> transaction,
<add> nativeParent,
<add> nativeContainerInfo,
<add> context
<add>) {
<add> var element = this._currentElement;
<add> this.mountChildren(element.props.children, transaction, context);
<add>};
<add>ReactTestComponent.prototype.receiveComponent = function(
<add> nextElement,
<add> transaction,
<add> context
<add>) {
<add> this._currentElement = nextElement;
<add> this.updateChildren(nextElement.props.children, transaction, context);
<add>};
<add>ReactTestComponent.prototype.getHostNode = function() {};
<add>ReactTestComponent.prototype.unmountComponent = function() {};
<add>ReactTestComponent.prototype.toJSON = function() {
<add> var {children, ...props} = this._currentElement.props;
<add> var childrenJSON = [];
<add> for (var key in this._renderedChildren) {
<add> var inst = this._renderedChildren[key];
<add> inst = getRenderedHostOrTextFromComponent(inst);
<add> var json = inst.toJSON();
<add> if (json !== undefined) {
<add> childrenJSON.push(json);
<add> }
<add> }
<add> return {
<add> type: this._currentElement.type,
<add> props: props,
<add> children: childrenJSON.length ? childrenJSON : null,
<add> };
<add>};
<add>Object.assign(ReactTestComponent.prototype, ReactMultiChild.Mixin);
<add>
<add>// =============================================================================
<add>
<add>var ReactTestTextComponent = function(element) {
<add> this._currentElement = element;
<add>};
<add>ReactTestTextComponent.prototype.mountComponent = function() {};
<add>ReactTestTextComponent.prototype.receiveComponent = function(nextElement) {
<add> this._currentElement = nextElement;
<add>};
<add>ReactTestTextComponent.prototype.getHostNode = function() {};
<add>ReactTestTextComponent.prototype.unmountComponent = function() {};
<add>ReactTestTextComponent.prototype.toJSON = function() {
<add> return this._currentElement;
<add>};
<add>
<add>// =============================================================================
<add>
<add>var ReactTestEmptyComponent = function(element) {
<add> this._currentElement = null;
<add>};
<add>ReactTestEmptyComponent.prototype.mountComponent = function() {};
<add>ReactTestEmptyComponent.prototype.receiveComponent = function() {};
<add>ReactTestEmptyComponent.prototype.getHostNode = function() {};
<add>ReactTestEmptyComponent.prototype.unmountComponent = function() {};
<add>ReactTestEmptyComponent.prototype.toJSON = function() {};
<add>
<add>// =============================================================================
<add>
<add>ReactUpdates.injection.injectReconcileTransaction(
<add> ReactTestReconcileTransaction
<add>);
<add>ReactUpdates.injection.injectBatchingStrategy(ReactDefaultBatchingStrategy);
<add>
<add>ReactHostComponent.injection.injectGenericComponentClass(ReactTestComponent);
<add>ReactHostComponent.injection.injectTextComponentClass(ReactTestTextComponent);
<add>ReactEmptyComponent.injection.injectEmptyComponentFactory(function() {
<add> return new ReactTestEmptyComponent();
<add>});
<add>
<add>var ReactTestRenderer = {
<add> create: ReactTestMount.render,
<add>
<add> /* eslint-disable camelcase */
<add> unstable_batchedUpdates: ReactUpdates.batchedUpdates,
<add> unstable_renderSubtreeIntoContainer: renderSubtreeIntoContainer,
<add> /* eslint-enable camelcase */
<add>};
<add>
<add>module.exports = ReactTestRenderer;
<ide><path>src/renderers/testing/__tests__/ReactTestRenderer-test.js
<add>/**
<add> * Copyright 2013-present, Facebook, Inc.
<add> * All rights reserved.
<add> *
<add> * This source code is licensed under the BSD-style license found in the
<add> * LICENSE file in the root directory of this source tree. An additional grant
<add> * of patent rights can be found in the PATENTS file in the same directory.
<add> *
<add> * @emails react-core
<add> */
<add>
<add>'use strict';
<add>
<add>var React = require('React');
<add>var ReactTestRenderer = require('ReactTestRenderer');
<add>
<add>describe('ReactTestRenderer', function() {
<add>
<add> it('renders a simple component', function() {
<add> function Link() {
<add> return <a role="link" />;
<add> }
<add> var renderer = ReactTestRenderer.create(<Link />);
<add> expect(renderer.toJSON()).toEqual({
<add> type: 'a',
<add> props: { role: 'link' },
<add> children: null,
<add> });
<add> });
<add>
<add> it('renders some basics with an update', function() {
<add> var renders = 0;
<add> var Component = React.createClass({
<add> getInitialState: function() {
<add> return {x: 3};
<add> },
<add> render: function() {
<add> renders++;
<add> return (
<add> <div className="purple">
<add> {this.state.x}
<add> <Child />
<add> <Null />
<add> </div>
<add> );
<add> },
<add> componentDidMount: function() {
<add> this.setState({x: 7});
<add> },
<add> });
<add>
<add> var Child = () => (renders++, <moo />);
<add> var Null = () => (renders++, null);
<add>
<add> var renderer = ReactTestRenderer.create(<Component />);
<add> expect(renderer.toJSON()).toEqual({
<add> type: 'div',
<add> props: { className: 'purple' },
<add> children: [
<add> 7,
<add> { type: 'moo', props: {}, children: null },
<add> ],
<add> });
<add> expect(renders).toBe(6);
<add> });
<add>
<add> it('exposes the instance', function() {
<add> class Mouse extends React.Component {
<add> constructor() {
<add> super();
<add> this.state = {mouse: 'mouse'};
<add> }
<add> handleMoose() {
<add> this.setState({mouse: 'moose'});
<add> }
<add> render() {
<add> return <div>{this.state.mouse}</div>;
<add> }
<add> }
<add> var renderer = ReactTestRenderer.create(<Mouse />);
<add>
<add> expect(renderer.toJSON()).toEqual({
<add> type: 'div',
<add> props: {},
<add> children: ['mouse'],
<add> });
<add>
<add> var mouse = renderer.getInstance();
<add> mouse.handleMoose();
<add> expect(renderer.toJSON()).toEqual({
<add> type: 'div',
<add> props: {},
<add> children: ['moose'],
<add> });
<add> });
<add>
<add>}); | 4 |
PHP | PHP | remove unused import | 0ec8c170c8604eb4ae55988b0f97dced8fef6a6f | <ide><path>src/Illuminate/Http/Resources/Json/JsonResource.php
<ide>
<ide> use ArrayAccess;
<ide> use JsonSerializable;
<del>use Illuminate\Support\Collection;
<ide> use Illuminate\Container\Container;
<ide> use Illuminate\Contracts\Support\Arrayable;
<ide> use Illuminate\Contracts\Routing\UrlRoutable; | 1 |
Python | Python | add cli to setup.py | afb94e5702d8a8c73498c1e9574b0525bed96ac3 | <ide><path>setup.py
<ide>
<ide> PACKAGES = [
<ide> 'spacy',
<add> 'spacy.cli',
<ide> 'spacy.tokens',
<ide> 'spacy.en',
<ide> 'spacy.de', | 1 |
Python | Python | remove duplicated sentence in numpy.multiply | 075270a145aed66514af1cd9662aef235dc99448 | <ide><path>numpy/core/code_generators/ufunc_docstrings.py
<ide> def add_newdoc(place, name, doc):
<ide> Returns
<ide> -------
<ide> y : ndarray
<del> The product of `x1` and `x2`, element-wise. Returns a scalar if
<del> both `x1` and `x2` are scalars.
<add> The product of `x1` and `x2`, element-wise.
<ide> $OUT_SCALAR_2
<ide>
<ide> Notes | 1 |
Ruby | Ruby | fix logger silencing for broadcasted loggers | 2518bda97cbbcb33dc9a92e70d5b01c09e64d12d | <ide><path>activesupport/lib/active_support/logger.rb
<ide> require 'active_support/logger_silence'
<add>require 'active_support/logger_thread_safe_level'
<ide> require 'logger'
<ide>
<ide> module ActiveSupport
<ide> class Logger < ::Logger
<add> include ActiveSupport::LoggerThreadSafeLevel
<ide> include LoggerSilence
<ide>
<ide> # Returns true if the logger destination matches one of the sources
<ide> def self.broadcast(logger) # :nodoc:
<ide> logger.level = level
<ide> super(level)
<ide> end
<add>
<add> define_method(:local_level=) do |level|
<add> logger.local_level = level if logger.respond_to?(:local_level=)
<add> super(level) if respond_to?(:local_level=)
<add> end
<ide> end
<ide> end
<ide>
<ide><path>activesupport/lib/active_support/logger_silence.rb
<ide> module LoggerSilence
<ide>
<ide> included do
<ide> cattr_accessor :silencer
<del> attr_reader :local_levels
<ide> self.silencer = true
<ide> end
<ide>
<del> def after_initialize
<del> @local_levels = Concurrent::Map.new(:initial_capacity => 2)
<del> end
<del>
<del> def local_log_id
<del> Thread.current.__id__
<del> end
<del>
<del> def level
<del> local_levels[local_log_id] || super
<del> end
<del>
<ide> # Silences the logger for the duration of the block.
<ide> def silence(temporary_level = Logger::ERROR)
<ide> if silencer
<ide> begin
<del> old_local_level = local_levels[local_log_id]
<del> local_levels[local_log_id] = temporary_level
<add> old_local_level = local_level
<add> self.local_level = temporary_level
<ide>
<ide> yield self
<ide> ensure
<del> if old_local_level
<del> local_levels[local_log_id] = old_local_level
<del> else
<del> local_levels.delete(local_log_id)
<del> end
<add> self.local_level = old_local_level
<ide> end
<ide> else
<ide> yield self
<ide> end
<ide> end
<del>end
<ide>\ No newline at end of file
<add>end
<ide><path>activesupport/lib/active_support/logger_thread_safe_level.rb
<add>require 'active_support/concern'
<add>
<add>module ActiveSupport
<add> module LoggerThreadSafeLevel
<add> extend ActiveSupport::Concern
<add>
<add> def after_initialize
<add> @local_levels = Concurrent::Map.new(:initial_capacity => 2)
<add> end
<add>
<add> def local_log_id
<add> Thread.current.__id__
<add> end
<add>
<add> def local_level
<add> @local_levels[local_log_id]
<add> end
<add>
<add> def local_level=(level)
<add> if level
<add> @local_levels[local_log_id] = level
<add> else
<add> @local_levels.delete(local_log_id)
<add> end
<add> end
<add>
<add> def level
<add> local_level || super
<add> end
<add> end
<add>end
<ide><path>activesupport/test/logger_test.rb
<ide> def test_silencing_everything_but_errors
<ide> assert @output.string.include?("THIS IS HERE")
<ide> end
<ide>
<add> def test_logger_silencing_works_for_broadcast
<add> another_output = StringIO.new
<add> another_logger = Logger.new(another_output)
<add>
<add> @logger.extend Logger.broadcast(another_logger)
<add>
<add> @logger.debug "CORRECT DEBUG"
<add> @logger.silence do
<add> @logger.debug "FAILURE"
<add> @logger.error "CORRECT ERROR"
<add> end
<add>
<add> assert @output.string.include?("CORRECT DEBUG")
<add> assert @output.string.include?("CORRECT ERROR")
<add> assert_not @output.string.include?("FAILURE")
<add>
<add> assert another_output.string.include?("CORRECT DEBUG")
<add> assert another_output.string.include?("CORRECT ERROR")
<add> assert_not another_output.string.include?("FAILURE")
<add> end
<add>
<add> def test_broadcast_silencing_does_not_break_plain_ruby_logger
<add> another_output = StringIO.new
<add> another_logger = ::Logger.new(another_output)
<add>
<add> @logger.extend Logger.broadcast(another_logger)
<add>
<add> @logger.debug "CORRECT DEBUG"
<add> @logger.silence do
<add> @logger.debug "FAILURE"
<add> @logger.error "CORRECT ERROR"
<add> end
<add>
<add> assert @output.string.include?("CORRECT DEBUG")
<add> assert @output.string.include?("CORRECT ERROR")
<add> assert_not @output.string.include?("FAILURE")
<add>
<add> assert another_output.string.include?("CORRECT DEBUG")
<add> assert another_output.string.include?("CORRECT ERROR")
<add> assert another_output.string.include?("FAILURE")
<add> # We can't silence plain ruby Logger cause with thread safety
<add> # but at least we don't break it
<add> end
<add>
<ide> def test_logger_level_per_object_thread_safety
<ide> logger1 = Logger.new(StringIO.new)
<ide> logger2 = Logger.new(StringIO.new) | 4 |
Go | Go | add the health check to mount_test | 137af244ee561bd350797c8ce6e159fbd46b6a02 | <ide><path>mount_test.go
<ide> import (
<ide> "testing"
<ide> )
<ide>
<add>// Look for inconsistencies in a store.
<add>func healthCheck(store *Store) error {
<add> parents := make(map[string]bool)
<add> paths, err := store.Paths()
<add> if err != nil {
<add> return err
<add> }
<add> for _, path := range paths {
<add> images, err := store.List(path)
<add> if err != nil {
<add> return err
<add> }
<add> IDs := make(map[string]bool) // All IDs for this path
<add> for _, img := range images {
<add> // Check for duplicate IDs per path
<add> if _, exists := IDs[img.Id]; exists {
<add> return errors.New(fmt.Sprintf("Duplicate ID: %s", img.Id))
<add> } else {
<add> IDs[img.Id] = true
<add> }
<add> // Store parent for 2nd pass
<add> if parent := img.Parent; parent != "" {
<add> parents[parent] = true
<add> }
<add> }
<add> }
<add> // Check non-existing parents
<add> for parent := range parents {
<add> if _, exists := parents[parent]; !exists {
<add> return errors.New("Reference to non-registered parent: " + parent)
<add> }
<add> }
<add> return nil
<add>}
<add>
<ide> // Note: This test is in the docker package because he needs to be run as root
<ide> func TestMount(t *testing.T) {
<ide> dir, err := ioutil.TempDir("", "docker-fs-test-mount")
<ide> func TestMount(t *testing.T) {
<ide> t.Fatal("Wrong number of mountpoints registered (should be %d, not %d)", 0, len(mps))
<ide> }
<ide> // General health check
<del> // if err := healthCheck(store); err != nil {
<del> // t.Fatal(err)
<del> // }
<add> if err := healthCheck(store); err != nil {
<add> t.Fatal(err)
<add> }
<ide> } | 1 |
Mixed | Python | simplify pipe analysis | b40f44419b03010d7eb14d255f9bfc99c3cad637 | <ide><path>spacy/errors.py
<ide> class Warnings:
<ide> "have the spacy-lookups-data package installed.")
<ide> W024 = ("Entity '{entity}' - Alias '{alias}' combination already exists in "
<ide> "the Knowledge Base.")
<del> W025 = ("'{name}' requires '{attr}' to be assigned, but none of the "
<del> "previous components in the pipeline declare that they assign it.")
<ide> W026 = ("Unable to set all sentence boundaries from dependency parses.")
<ide> W027 = ("Found a large training file of {size} bytes. Note that it may "
<ide> "be more efficient to split your training data into multiple "
<ide><path>spacy/language.py
<ide>
<ide> from .tokens.underscore import Underscore
<ide> from .vocab import Vocab, create_vocab
<del>from .pipe_analysis import validate_attrs, print_summary
<add>from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
<ide> from .gold import Example
<ide> from .scorer import Scorer
<ide> from .util import create_default_optimizer, registry
<ide> def analyze_pipes(
<ide> self,
<ide> *,
<ide> keys: List[str] = ["assigns", "requires", "scores", "retokenizes"],
<del> pretty: bool = True,
<del> no_print: bool = False,
<add> pretty: bool = False,
<ide> ) -> Optional[Dict[str, Any]]:
<ide> """Analyze the current pipeline components, print a summary of what
<ide> they assign or require and check that all requirements are met.
<ide>
<ide> keys (List[str]): The meta values to display in the table. Corresponds
<ide> to values in FactoryMeta, defined by @Language.factory decorator.
<del> pretty (bool): Pretty-print the results with colors and icons.
<del> no_print (bool): Don't print anything and return structured dict instead.
<del> RETURNS (dict): The data, if no_print is set to True.
<add> pretty (bool): Pretty-print the results.
<add> RETURNS (dict): The data.
<ide> """
<del> return print_summary(self, keys=keys, pretty=pretty, no_print=no_print)
<add> analysis = analyze_pipes(self, keys=keys)
<add> if pretty:
<add> print_pipe_analysis(analysis, keys=keys)
<add> return analysis
<ide>
<ide> def get_pipe(self, name: str) -> Callable[[Doc], Doc]:
<ide> """Get a pipeline component for a given component name.
<ide><path>spacy/pipe_analysis.py
<ide> from typing import List, Dict, Iterable, Optional, Union, TYPE_CHECKING
<del>from wasabi import Printer
<del>import warnings
<add>from wasabi import msg
<ide>
<ide> from .tokens import Doc, Token, Span
<del>from .errors import Errors, Warnings
<add>from .errors import Errors
<ide> from .util import dot_to_dict
<ide>
<ide> if TYPE_CHECKING:
<ide> # This lets us add type hints for mypy etc. without causing circular imports
<ide> from .language import Language # noqa: F401
<ide>
<ide>
<del>def analyze_pipes(
<del> nlp: "Language", name: str, index: int, warn: bool = True
<del>) -> List[str]:
<del> """Analyze a pipeline component with respect to its position in the current
<del> pipeline and the other components. Will check whether requirements are
<del> fulfilled (e.g. if previous components assign the attributes).
<del>
<del> nlp (Language): The current nlp object.
<del> name (str): The name of the pipeline component to analyze.
<del> index (int): The index of the component in the pipeline.
<del> warn (bool): Show user warning if problem is found.
<del> RETURNS (List[str]): The problems found for the given pipeline component.
<del> """
<del> assert nlp.pipeline[index][0] == name
<del> prev_pipes = nlp.pipeline[:index]
<del> meta = nlp.get_pipe_meta(name)
<del> requires = {annot: False for annot in meta.requires}
<del> if requires:
<del> for prev_name, prev_pipe in prev_pipes:
<del> prev_meta = nlp.get_pipe_meta(prev_name)
<del> for annot in prev_meta.assigns:
<del> requires[annot] = True
<del> problems = []
<del> for annot, fulfilled in requires.items():
<del> if not fulfilled:
<del> problems.append(annot)
<del> if warn:
<del> warnings.warn(Warnings.W025.format(name=name, attr=annot))
<del> return problems
<add>DEFAULT_KEYS = ["requires", "assigns", "scores", "retokenizes"]
<ide>
<ide>
<ide> def validate_attrs(values: Iterable[str]) -> Iterable[str]:
<ide> def validate_attrs(values: Iterable[str]) -> Iterable[str]:
<ide> return values
<ide>
<ide>
<del>def _get_feature_for_attr(nlp: "Language", attr: str, feature: str) -> List[str]:
<del> assert feature in ["assigns", "requires"]
<del> result = []
<add>def get_attr_info(nlp: "Language", attr: str) -> Dict[str, List[str]]:
<add> """Check which components in the pipeline assign or require an attribute.
<add>
<add> nlp (Language): The current nlp object.
<add> attr (str): The attribute, e.g. "doc.tensor".
<add> RETURNS (Dict[str, List[str]]): A dict keyed by "assigns" and "requires",
<add> mapped to a list of component names.
<add> """
<add> result = {"assigns": [], "requires": []}
<ide> for pipe_name in nlp.pipe_names:
<ide> meta = nlp.get_pipe_meta(pipe_name)
<del> pipe_assigns = getattr(meta, feature, [])
<del> if attr in pipe_assigns:
<del> result.append(pipe_name)
<add> if attr in meta.assigns:
<add> result["assigns"].append(pipe_name)
<add> if attr in meta.requires:
<add> result["requires"].append(pipe_name)
<ide> return result
<ide>
<ide>
<del>def get_assigns_for_attr(nlp: "Language", attr: str) -> List[str]:
<del> """Get all pipeline components that assign an attr, e.g. "doc.tensor".
<del>
<del> pipeline (Language): The current nlp object.
<del> attr (str): The attribute to check.
<del> RETURNS (List[str]): Names of components that require the attr.
<del> """
<del> return _get_feature_for_attr(nlp, attr, "assigns")
<del>
<del>
<del>def get_requires_for_attr(nlp: "Language", attr: str) -> List[str]:
<del> """Get all pipeline components that require an attr, e.g. "doc.tensor".
<del>
<del> pipeline (Language): The current nlp object.
<del> attr (str): The attribute to check.
<del> RETURNS (List[str]): Names of components that require the attr.
<del> """
<del> return _get_feature_for_attr(nlp, attr, "requires")
<del>
<del>
<del>def print_summary(
<del> nlp: "Language",
<del> *,
<del> keys: List[str] = ["requires", "assigns", "scores", "retokenizes"],
<del> pretty: bool = True,
<del> no_print: bool = False,
<del>) -> Optional[Dict[str, Union[List[str], Dict[str, List[str]]]]]:
<add>def analyze_pipes(
<add> nlp: "Language", *, keys: List[str] = DEFAULT_KEYS,
<add>) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
<ide> """Print a formatted summary for the current nlp object's pipeline. Shows
<ide> a table with the pipeline components and why they assign and require, as
<ide> well as any problems if available.
<ide>
<ide> nlp (Language): The nlp object.
<ide> keys (List[str]): The meta keys to show in the table.
<del> pretty (bool): Pretty-print the results (color etc).
<del> no_print (bool): Don't print anything, just return the data.
<del> RETURNS (dict): A dict with "overview" and "problems".
<add> RETURNS (dict): A dict with "summary" and "problems".
<ide> """
<del> msg = Printer(pretty=pretty, no_print=no_print)
<del> overview = {}
<del> problems = {}
<add> result = {"summary": {}, "problems": {}}
<add> all_attrs = set()
<ide> for i, name in enumerate(nlp.pipe_names):
<ide> meta = nlp.get_pipe_meta(name)
<del> overview[name] = {"i": i, "name": name}
<del> for key in keys:
<del> overview[name][key] = getattr(meta, key, None)
<del> problems[name] = analyze_pipes(nlp, name, i, warn=False)
<add> all_attrs.update(meta.assigns)
<add> all_attrs.update(meta.requires)
<add> result["summary"][name] = {key: getattr(meta, key, None) for key in keys}
<add> prev_pipes = nlp.pipeline[:i]
<add> requires = {annot: False for annot in meta.requires}
<add> if requires:
<add> for prev_name, prev_pipe in prev_pipes:
<add> prev_meta = nlp.get_pipe_meta(prev_name)
<add> for annot in prev_meta.assigns:
<add> requires[annot] = True
<add> result["problems"][name] = []
<add> for annot, fulfilled in requires.items():
<add> if not fulfilled:
<add> result["problems"][name].append(annot)
<add> result["attrs"] = {attr: get_attr_info(nlp, attr) for attr in all_attrs}
<add> return result
<add>
<add>
<add>def print_pipe_analysis(
<add> analysis: Dict[str, Union[List[str], Dict[str, List[str]]]],
<add> *,
<add> keys: List[str] = DEFAULT_KEYS,
<add>) -> Optional[Dict[str, Union[List[str], Dict[str, List[str]]]]]:
<add> """Print a formatted version of the pipe analysis produced by analyze_pipes.
<add>
<add> analysis (Dict[str, Union[List[str], Dict[str, List[str]]]]): The analysis.
<add> keys (List[str]): The meta keys to show in the table.
<add> """
<ide> msg.divider("Pipeline Overview")
<ide> header = ["#", "Component", *[key.capitalize() for key in keys]]
<del> body = [[info for info in entry.values()] for entry in overview.values()]
<add> summary = analysis["summary"].items()
<add> body = [[i, n, *[v for v in m.values()]] for i, (n, m) in enumerate(summary)]
<ide> msg.table(body, header=header, divider=True, multiline=True)
<del> n_problems = sum(len(p) for p in problems.values())
<del> if any(p for p in problems.values()):
<add> n_problems = sum(len(p) for p in analysis["problems"].values())
<add> if any(p for p in analysis["problems"].values()):
<ide> msg.divider(f"Problems ({n_problems})")
<del> for name, problem in problems.items():
<add> for name, problem in analysis["problems"].items():
<ide> if problem:
<ide> msg.warn(f"'{name}' requirements not met: {', '.join(problem)}")
<ide> else:
<ide> msg.good("No problems found.")
<del> if no_print:
<del> return {"overview": overview, "problems": problems}
<del>
<del>
<del>def count_pipeline_interdependencies(nlp: "Language") -> List[int]:
<del> """Count how many subsequent components require an annotation set by each
<del> component in the pipeline.
<del>
<del> nlp (Language): The current nlp object.
<del> RETURNS (List[int]): The interdependency counts.
<del> """
<del> pipe_assigns = []
<del> pipe_requires = []
<del> for name in nlp.pipe_names:
<del> meta = nlp.get_pipe_meta(name)
<del> pipe_assigns.append(set(meta.assigns))
<del> pipe_requires.append(set(meta.requires))
<del> counts = []
<del> for i, assigns in enumerate(pipe_assigns):
<del> count = 0
<del> for requires in pipe_requires[i + 1 :]:
<del> if assigns.intersection(requires):
<del> count += 1
<del> counts.append(count)
<del> return counts
<ide><path>spacy/tests/pipeline/test_analysis.py
<ide> from spacy.language import Language
<del>from spacy.pipe_analysis import get_assigns_for_attr, get_requires_for_attr
<del>from spacy.pipe_analysis import validate_attrs, count_pipeline_interdependencies
<add>from spacy.pipe_analysis import get_attr_info, validate_attrs
<ide> from mock import Mock
<ide> import pytest
<ide>
<ide> def test_component3(doc):
<ide> nlp = Language()
<ide> nlp.add_pipe("c1")
<ide> nlp.add_pipe("c2")
<del> problems = nlp.analyze_pipes(no_print=True)["problems"]
<add> problems = nlp.analyze_pipes()["problems"]
<ide> assert problems["c2"] == ["token.pos"]
<ide> nlp.add_pipe("c3")
<del> assert get_assigns_for_attr(nlp, "doc.tensor") == ["c1", "c2"]
<add> assert get_attr_info(nlp, "doc.tensor")["assigns"] == ["c1", "c2"]
<ide> nlp.add_pipe("c1", name="c4")
<ide> test_component4_meta = nlp.get_pipe_meta("c1")
<ide> assert test_component4_meta.factory == "c1"
<ide> assert nlp.pipe_names == ["c1", "c2", "c3", "c4"]
<ide> assert not Language.has_factory("c4")
<ide> assert nlp.pipe_factories["c1"] == "c1"
<ide> assert nlp.pipe_factories["c4"] == "c1"
<del> assert get_assigns_for_attr(nlp, "doc.tensor") == ["c1", "c2", "c4"]
<del> assert get_requires_for_attr(nlp, "token.pos") == ["c2"]
<add> assert get_attr_info(nlp, "doc.tensor")["assigns"] == ["c1", "c2", "c4"]
<add> assert get_attr_info(nlp, "token.pos")["requires"] == ["c2"]
<ide> assert nlp("hello world")
<ide>
<ide>
<ide> def c2(doc):
<ide> nlp = Language()
<ide> nlp.add_pipe("pipe_analysis_c6")
<ide> nlp.add_pipe("pipe_analysis_c7")
<del> problems = nlp.analyze_pipes(no_print=True)["problems"]
<add> problems = nlp.analyze_pipes()["problems"]
<ide> assert problems["pipe_analysis_c7"] == ["token.pos"]
<ide> nlp.remove_pipe("pipe_analysis_c7")
<del> problems = nlp.analyze_pipes(no_print=True)["problems"]
<add> problems = nlp.analyze_pipes()["problems"]
<ide> assert all(p == [] for p in problems.values())
<del>
<del>
<del>def test_pipe_interdependencies():
<del> prefix = "test_pipe_interdependencies"
<del>
<del> @Language.component(f"{prefix}.fancifier", assigns=("doc._.fancy",))
<del> def fancifier(doc):
<del> return doc
<del>
<del> @Language.component(f"{prefix}.needer", requires=("doc._.fancy",))
<del> def needer(doc):
<del> return doc
<del>
<del> nlp = Language()
<del> nlp.add_pipe(f"{prefix}.fancifier")
<del> nlp.add_pipe(f"{prefix}.needer")
<del> counts = count_pipeline_interdependencies(nlp)
<del> assert counts == [1, 0]
<ide><path>website/docs/api/language.md
<ide> decorator. For more details and examples, see the
<ide> | ----------------------- | -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
<ide> | `name` | str | The name of the component factory. |
<ide> | _keyword-only_ | | |
<del>| `assigns` | `Iterable[str]` | `Doc` or `Token` attributes assigned by this component, e.g. `["token.ent_id"]`. Used for pipeline analysis. |
<del>| `requires` | `Iterable[str]` | `Doc` or `Token` attributes required by this component, e.g. `["token.ent_id"]`. Used for pipeline analysis. |
<del>| `retokenizes` | bool | Whether the component changes tokenization. Used for pipeline analysis. |
<del>| `scores` | `Iterable[str]` | All scores set by the components if it's trainable, e.g. `["ents_f", "ents_r", "ents_p"]`. |
<add>| `assigns` | `Iterable[str]` | `Doc` or `Token` attributes assigned by this component, e.g. `["token.ent_id"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis).. |
<add>| `requires` | `Iterable[str]` | `Doc` or `Token` attributes required by this component, e.g. `["token.ent_id"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `retokenizes` | bool | Whether the component changes tokenization. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `scores` | `Iterable[str]` | All scores set by the components if it's trainable, e.g. `["ents_f", "ents_r", "ents_p"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<ide> | `default_score_weights` | `Dict[str, float]` | The scores to report during training, and their default weight towards the final score used to select the best model. Weights should sum to `1.0` per component and will be combined and normalized for the whole pipeline. |
<ide> | `func` | `Optional[Callable]` | Optional function if not used a a decorator. |
<ide>
<ide> examples, see the
<ide> | `name` | str | The name of the component factory. |
<ide> | _keyword-only_ | | |
<ide> | `default_config` | `Dict[str, any]` | The default config, describing the default values of the factory arguments. |
<del>| `assigns` | `Iterable[str]` | `Doc` or `Token` attributes assigned by this component, e.g. `["token.ent_id"]`. Used for pipeline analysis. |
<del>| `requires` | `Iterable[str]` | `Doc` or `Token` attributes required by this component, e.g. `["token.ent_id"]`. Used for pipeline analysis. |
<del>| `retokenizes` | bool | Whether the component changes tokenization. Used for pipeline analysis. |
<del>| `scores` | `Iterable[str]` | All scores set by the components if it's trainable, e.g. `["ents_f", "ents_r", "ents_p"]`. |
<add>| `assigns` | `Iterable[str]` | `Doc` or `Token` attributes assigned by this component, e.g. `["token.ent_id"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `requires` | `Iterable[str]` | `Doc` or `Token` attributes required by this component, e.g. `["token.ent_id"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `retokenizes` | bool | Whether the component changes tokenization. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `scores` | `Iterable[str]` | All scores set by the components if it's trainable, e.g. `["ents_f", "ents_r", "ents_p"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<ide> | `default_score_weights` | `Dict[str, float]` | The scores to report during training, and their default weight towards the final score used to select the best model. Weights should sum to `1.0` per component and will be combined and normalized for the whole pipeline. |
<ide> | `func` | `Optional[Callable]` | Optional function if not used a a decorator. |
<ide>
<ide> doesn't, the pipeline analysis won't catch that.
<ide> > nlp = spacy.blank("en")
<ide> > nlp.add_pipe("tagger")
<ide> > nlp.add_pipe("entity_linker")
<del>> nlp.analyze_pipes()
<add>> analysis = nlp.analyze_pipes()
<ide> > ```
<ide>
<ide> <Accordion title="Example output" spaced>
<ide>
<add>```json
<add>### Structured
<add>{
<add> "summary": {
<add> "tagger": {
<add> "assigns": ["token.tag"],
<add> "requires": [],
<add> "scores": ["tag_acc", "pos_acc", "lemma_acc"],
<add> "retokenizes": false
<add> },
<add> "entity_linker": {
<add> "assigns": ["token.ent_kb_id"],
<add> "requires": ["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"],
<add> "scores": [],
<add> "retokenizes": false
<add> }
<add> },
<add> "problems": {
<add> "tagger": [],
<add> "entity_linker": ["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"]
<add> },
<add> "attrs": {
<add> "token.ent_iob": { "assigns": [], "requires": ["entity_linker"] },
<add> "doc.ents": { "assigns": [], "requires": ["entity_linker"] },
<add> "token.ent_kb_id": { "assigns": ["entity_linker"], "requires": [] },
<add> "doc.sents": { "assigns": [], "requires": ["entity_linker"] },
<add> "token.tag": { "assigns": ["tagger"], "requires": [] },
<add> "token.ent_type": { "assigns": [], "requires": ["entity_linker"] }
<add> }
<add>}
<ide> ```
<add>
<add>```
<add>### Pretty
<ide> ============================= Pipeline Overview =============================
<ide>
<ide> # Component Assigns Requires Scores Retokenizes
<ide> token.ent_iob, token.ent_type
<ide>
<ide> </Accordion>
<ide>
<del>| Name | Type | Description |
<del>| -------------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
<del>| _keyword-only_ | | |
<del>| `keys` | `List[str]` | The values to display in the table. Corresponds to attributes of the [`FactoryMeta`](/api/language#factorymeta). Defaults to `["assigns", "requires", "scores", "retokenizes"]`. |
<del>| `pretty` | bool | Pretty-print the results with colors and icons. Defaults to `True`. |
<del>| `no_print` | bool | Don't print anything and return a structured dict instead. Defaults to `False`. |
<del>| **RETURNS** | dict | Optional dict, if `no_print` is set to `True`. |
<add>| Name | Type | Description |
<add>| -------------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
<add>| _keyword-only_ | | |
<add>| `keys` | `List[str]` | The values to display in the table. Corresponds to attributes of the [`FactoryMeta`](/api/language#factorymeta). Defaults to `["assigns", "requires", "scores", "retokenizes"]`. |
<add>| `pretty` | bool | Pretty-print the results as a table. Defaults to `False`. |
<add>| **RETURNS** | dict | Dictionary containing the pipe analysis, keyed by `"summary"` (component meta by pipe), `"problems"` (attribute names by pipe) and `"attrs"` (pipes that assign and require an attribute, keyed by attribute). |
<ide>
<ide> ## Language.meta {#meta tag="property"}
<ide>
<ide> instance and factory instance.
<ide> | ----------------------- | ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
<ide> | `factory` | str | The name of the registered component factory. |
<ide> | `default_config` | `Dict[str, Any]` | The default config, describing the default values of the factory arguments. |
<del>| `assigns` | `Iterable[str]` | `Doc` or `Token` attributes assigned by this component, e.g. `["token.ent_id"]`. Used for pipeline analysis. |
<del>| `requires` | `Iterable[str]` | `Doc` or `Token` attributes required by this component, e.g. `["token.ent_id"]`. Used for pipeline analysis. |
<del>| `retokenizes` | bool | Whether the component changes tokenization. Used for pipeline analysis. |
<del>| `scores` | `Iterable[str]` | All scores set by the components if it's trainable, e.g. `["ents_f", "ents_r", "ents_p"]`. |
<add>| `assigns` | `Iterable[str]` | `Doc` or `Token` attributes assigned by this component, e.g. `["token.ent_id"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `requires` | `Iterable[str]` | `Doc` or `Token` attributes required by this component, e.g. `["token.ent_id"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `retokenizes` | bool | Whether the component changes tokenization. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<add>| `scores` | `Iterable[str]` | All scores set by the components if it's trainable, e.g. `["ents_f", "ents_r", "ents_p"]`. Used for [pipe analysis](/usage/processing-pipelines#analysis). |
<ide> | `default_score_weights` | `Dict[str, float]` | The scores to report during training, and their default weight towards the final score used to select the best model. Weights should sum to `1.0` per component and will be combined and normalized for the whole pipeline. |
<ide><path>website/docs/usage/processing-pipelines.md
<ide> attributes they set on the [`Doc`](/api/doc) and [`Token`](/api/token), whether
<ide> they retokenize the `Doc` and which scores they produce during training. It will
<ide> also show warnings if components require values that aren't set by previous
<ide> component – for instance, if the entity linker is used but no component that
<del>runs before it sets named entities.
<add>runs before it sets named entities. Setting `pretty=True` will pretty-print a
<add>table instead of only returning the structured data.
<add>
<add>> #### ✏️ Things to try
<add>>
<add>> 1. Add the components `"ner"` and `"sentencizer"` _before_ the entity linker.
<add>> The analysis should now show no problems, because requirements are met.
<ide>
<ide> ```python
<add>### {executable="true"}
<add>import spacy
<add>
<ide> nlp = spacy.blank("en")
<ide> nlp.add_pipe("tagger")
<del>nlp.add_pipe("entity_linker") # this is a problem, because it needs entities
<del>nlp.analyze_pipes()
<add># This is a problem because it needs entities and sentence boundaries
<add>nlp.add_pipe("entity_linker")
<add>analysis = nlp.analyze_pipes(pretty=True)
<ide> ```
<ide>
<add><Accordion title="Example output">
<add>
<add>```json
<add>### Structured
<add>{
<add> "summary": {
<add> "tagger": {
<add> "assigns": ["token.tag"],
<add> "requires": [],
<add> "scores": ["tag_acc", "pos_acc", "lemma_acc"],
<add> "retokenizes": false
<add> },
<add> "entity_linker": {
<add> "assigns": ["token.ent_kb_id"],
<add> "requires": ["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"],
<add> "scores": [],
<add> "retokenizes": false
<add> }
<add> },
<add> "problems": {
<add> "tagger": [],
<add> "entity_linker": ["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"]
<add> },
<add> "attrs": {
<add> "token.ent_iob": { "assigns": [], "requires": ["entity_linker"] },
<add> "doc.ents": { "assigns": [], "requires": ["entity_linker"] },
<add> "token.ent_kb_id": { "assigns": ["entity_linker"], "requires": [] },
<add> "doc.sents": { "assigns": [], "requires": ["entity_linker"] },
<add> "token.tag": { "assigns": ["tagger"], "requires": [] },
<add> "token.ent_type": { "assigns": [], "requires": ["entity_linker"] }
<add> }
<add>}
<ide> ```
<del>### Example output
<add>
<add>```
<add>### Pretty
<ide> ============================= Pipeline Overview =============================
<ide>
<ide> # Component Assigns Requires Scores Retokenizes
<ide> nlp.analyze_pipes()
<ide> token.ent_iob, token.ent_type
<ide> ```
<ide>
<del>If you prefer a structured dictionary containing the component information and
<del>the problems, you can set `no_print=True`. This will return the data instead of
<del>printing it.
<del>
<del>```
<del>result = nlp.analyze_pipes(no_print=True)
<del>```
<add></Accordion>
<ide>
<ide> <Infobox variant="warning" title="Important note">
<ide> | 6 |
Text | Text | add section about doc testing | f45ac11fb32ba17e3e1fa831c30438ff9972a1ee | <ide><path>docs/README.md
<ide> check how they look like before committing for instance). You don't have to comm
<ide>
<ide> ## Building the documentation
<ide>
<del>Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing th
<del>following command:
<add>Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
<add>typing the following command:
<ide>
<ide> ```bash
<ide> doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build
<ide> We have an automatic script running with the `make style` comment that will make
<ide> This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
<ide> recommended to commit your changes before running `make style`, so you can revert the changes done by that script
<ide> easily.
<add>
<add># Testing documentation examples
<add>
<add>Good documentation oftens comes with an example of how a specific function or class should be used.
<add>Each model class should contain at least one example showcasing
<add>how to use this model class in inference. *E.g.* the class [Wav2Vec2ForCTC](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC)
<add>includes an example of how to transcribe speech to text in the
<add>[docstring of its forward function](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC.forward).
<add>
<add>## Writing documenation examples
<add>
<add>The syntax for Example docstrings can look as follows:
<add>
<add>```
<add> Example:
<add>
<add> ```python
<add> >>> from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
<add> >>> from datasets import load_dataset
<add> >>> import torch
<add>
<add> >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
<add> >>> dataset = dataset.sort("id")
<add> >>> sampling_rate = dataset.features["audio"].sampling_rate
<add>
<add> >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
<add> >>> model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
<add>
<add> >>> # audio file is decoded on the fly
<add> >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
<add> >>> with torch.no_grad():
<add> ... logits = model(**inputs).logits
<add> >>> predicted_ids = torch.argmax(logits, dim=-1)
<add>
<add> >>> # transcribe speech
<add> >>> transcription = processor.batch_decode(predicted_ids)
<add> >>> transcription[0]
<add> 'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'
<add> ```
<add>```
<add>
<add>The docstring should give a minimal, clear example of how the respective model
<add>is to be used in inference and also include the expected (ideally sensible)
<add>output.
<add>Often, readers will try out the example before even going through the function
<add>or class definitions. Therefore it is of utmost importance that the example
<add>works as expected.
<add>
<add>## Docstring testing
<add>
<add>To do so each example should be included in the doctests.
<add>We use pytests' [doctest integration](https://docs.pytest.org/doctest.html) to verify that all of our examples run correctly.
<add>For Transformers, the doctests are run on a daily basis via GitHub Actions as can be
<add>seen [here](https://github.com/huggingface/transformers/actions/workflows/doctests.yml).
<add>
<add>To include your example in the daily doctests, you need add the filename that
<add>contains the example docstring to the [documentation_tests.txt](../utils/documentation_tests.txt).
<add>You can test the example locally as follows:
<add>
<add>- For Python files ending with *.py*:
<add>```
<add>pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py::transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward -sv --doctest-continue-on-failure
<add>```
<add>
<add>- For Markdown files ending with *.mdx*:
<add>```
<add>pytest --doctest-modules docs/source/quicktour.mdx -sv --doctest-continue-on-failure --doctest-glob="*.mdx"
<add>``` | 1 |
Text | Text | add return 0; in example | 67500917089daa1de994db4f9a4bf276fd5d74c7 | <ide><path>guide/english/c/for/index.md
<ide> int main() {
<ide> for (int i = 0; i < 5; ++i) {
<ide> printf("Item on index %d is %d\n", i, array[i]);
<ide> }
<del>
<add>
<ide> return 0;
<ide> }
<ide> ```
<ide> int main () {
<ide> for (int i = 0; i < 5; i--) {
<ide> printf("%d \n", i);
<ide> }
<add>
<add> return 0;
<ide> }
<ide> ```
<ide>
<ide> int main () {
<ide> for (int i = 0; i < 5; i++) { // The int i = 0 will show you an error on older compiler versions
<ide> printf("Item on index %d is %d\n", i, array[i]);
<ide> }
<add>
<add> return 0;
<ide> }
<ide> ```
<ide>
<ide> int main () {
<ide> for (i = 0; i < 5; i++) { // Now you won't have a problem
<ide> printf("Item on index %d is %d\n", i, array[i]);
<ide> }
<add>
<add> return 0;
<ide> }
<ide> ```
<ide> | 1 |
Ruby | Ruby | generate environment dependent asset digests | ed5c6d254c9ef5d44a11159561fddde7a3033874 | <ide><path>actionpack/lib/sprockets/railtie.rb
<ide> class Railtie < ::Rails::Railtie
<ide> app.assets = Sprockets::Environment.new(app.root.to_s) do |env|
<ide> env.static_root = File.join(app.root.join('public'), config.assets.prefix)
<ide> env.logger = ::Rails.logger
<add> env.version = ::Rails.env + '-' + env.version
<ide>
<ide> if config.assets.cache_store != false
<ide> env.cache = ActiveSupport::Cache.lookup_store(config.assets.cache_store) || ::Rails.cache
<ide><path>actionpack/test/template/sprockets_helper_test.rb
<ide> def url_for(*args)
<ide> stubs(:asset_environment).returns(assets)
<ide> assert_match %r{/assets/style-[0-9a-f]+.css}, asset_path("style", "css")
<ide> end
<add>
<add> test "alternate hash based on environment" do
<add> assets = Sprockets::Environment.new
<add> assets.version = 'development'
<add> assets.append_path(FIXTURES.join("sprockets/alternate/stylesheets"))
<add> stubs(:asset_environment).returns(assets)
<add> dev_path = asset_path("style", "css")
<add>
<add> assets.version = 'production'
<add> prod_path = asset_path("style", "css")
<add>
<add> assert_not_equal prod_path, dev_path
<add> end
<ide> end | 2 |
Javascript | Javascript | remove unused helper function | bc47ba53b499a968db1c7eae019b36769fa5e922 | <ide><path>packages/ember-handlebars/tests/controls/select_test.js
<ide> module("Ember.Select", {
<ide> }
<ide> });
<ide>
<del>function setAndFlush(view, key, value) {
<del> Ember.run(function() {
<del> Ember.set(view, key, value);
<del> });
<del>}
<del>
<ide> function append() {
<ide> Ember.run(function() {
<ide> select.appendTo('#qunit-fixture'); | 1 |
Go | Go | change forbidden error (403) to conflict(409) | 31e8fcc67808488e9aade681a831d748df1af8a5 | <ide><path>daemon/container_operations.go
<ide> func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN
<ide> networkName := n.Name()
<ide> containerName := strings.TrimPrefix(container.Name, "/")
<ide> if network, ok := container.NetworkSettings.Networks[networkName]; ok && network.EndpointID != "" {
<del> return n, nil, types.ForbiddenErrorf("%s is already attached to network %s", containerName, networkName)
<add> err := fmt.Errorf("%s is already attached to network %s", containerName, networkName)
<add> return n, nil, errdefs.Conflict(err)
<ide> }
<ide> }
<ide> } | 1 |
Text | Text | fix typo in man page | 3a4775cb8003324d9aed5bfbe23f58eb0ca5bb41 | <ide><path>man/docker-build.1.md
<ide> cloned locally and then sent as the context.
<ide> name and value of a **buildarg**.
<ide>
<ide> For example, if you want to pass a value for `http_proxy`, use
<del> `--bulid-arg=http_proxy="http://some.proxy.url"`
<add> `--build-arg=http_proxy="http://some.proxy.url"`
<ide>
<ide> Users pass these values at build-time. Docker uses the `buildargs` as the
<ide> environment context for command(s) run via the Dockerfile's `RUN` instruction | 1 |
Python | Python | use new lemmatizer data and remove file import | 1da29a7146f2a8b8f60e1dcf5599dddb67612b95 | <ide><path>spacy/lemmatizer.py
<ide>
<ide> import ujson as json
<ide>
<add>from .en.lemmatizer import INDEX, EXC, RULES
<ide> from .symbols import POS, NOUN, VERB, ADJ, PUNCT
<ide>
<ide>
<ide> class Lemmatizer(object):
<ide> @classmethod
<ide> def load(cls, path, rules=None):
<del> index = {}
<del> exc = {}
<del> for pos in ['adj', 'noun', 'verb']:
<del> pos_index_path = path / 'wordnet' / 'index.{pos}'.format(pos=pos)
<del> if pos_index_path.exists():
<del> with pos_index_path.open() as file_:
<del> index[pos] = read_index(file_)
<del> else:
<del> index[pos] = set()
<del> pos_exc_path = path / 'wordnet' / '{pos}.exc'.format(pos=pos)
<del> if pos_exc_path.exists():
<del> with pos_exc_path.open() as file_:
<del> exc[pos] = read_exc(file_)
<del> else:
<del> exc[pos] = {}
<del> if rules is None and (path / 'vocab' / 'lemma_rules.json').exists():
<del> with (path / 'vocab' / 'lemma_rules.json').open('r', encoding='utf8') as file_:
<del> rules = json.load(file_)
<del> elif rules is None:
<del> rules = {}
<add> index = dict(INDEX)
<add> exc = dict(EXC)
<add> rules = dict(RULES)
<ide> return cls(index, exc, rules)
<ide>
<ide> def __init__(self, index, exceptions, rules):
<ide> def lemmatize(string, index, exceptions, rules):
<ide> if not forms:
<ide> forms.append(string)
<ide> return set(forms)
<del>
<del>
<del>def read_index(fileobj):
<del> index = set()
<del> for line in fileobj:
<del> if line.startswith(' '):
<del> continue
<del> pieces = line.split()
<del> word = pieces[0]
<del> if word.count('_') == 0:
<del> index.add(word)
<del> return index
<del>
<del>
<del>def read_exc(fileobj):
<del> exceptions = {}
<del> for line in fileobj:
<del> if line.startswith(' '):
<del> continue
<del> pieces = line.split()
<del> exceptions[pieces[0]] = tuple(pieces[1:])
<del> return exceptions
<ide><path>spacy/tests/tagger/test_lemmatizer.py
<ide> # coding: utf-8
<ide> from __future__ import unicode_literals
<ide>
<del>from ...lemmatizer import read_index, read_exc
<del>
<ide> import pytest
<ide>
<ide>
<ide> def test_tagger_lemmatizer_punct(lemmatizer):
<ide> assert lemmatizer.punct('“') == set(['"'])
<ide>
<ide>
<del>@pytest.mark.models
<del>def test_tagger_lemmatizer_read_index(path):
<del> if path is not None:
<del> with (path / 'wordnet' / 'index.noun').open() as file_:
<del> index = read_index(file_)
<del> assert 'man' in index
<del> assert 'plantes' not in index
<del> assert 'plant' in index
<del>
<del>
<del>@pytest.mark.models
<del>@pytest.mark.parametrize('text,lemma', [("was", "be")])
<del>def test_tagger_lemmatizer_read_exc(path, text, lemma):
<del> if path is not None:
<del> with (path / 'wordnet' / 'verb.exc').open() as file_:
<del> exc = read_exc(file_)
<del> assert exc[text] == (lemma,)
<del>
<del>
<ide> @pytest.mark.models
<ide> def test_tagger_lemmatizer_lemma_assignment(EN):
<ide> text = "Bananas in pyjamas are geese." | 2 |
Mixed | Go | add token pass-thru for authconfig | 8dce8e9901501863a80caf47d71713d4d36925a3 | <ide><path>cliconfig/config.go
<ide> type AuthConfig struct {
<ide> Auth string `json:"auth"`
<ide> Email string `json:"email"`
<ide> ServerAddress string `json:"serveraddress,omitempty"`
<add> RegistryToken string `json:"registrytoken,omitempty"`
<ide> }
<ide>
<ide> // ConfigFile ~/.docker/config.json file info
<ide><path>distribution/registry.go
<ide> package distribution
<ide>
<ide> import (
<ide> "errors"
<add> "fmt"
<ide> "net"
<ide> "net/http"
<ide> "net/url"
<ide> func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEnd
<ide> return nil, err
<ide> }
<ide>
<del> creds := dumbCredentialStore{auth: authConfig}
<del> tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName.Name(), actions...)
<del> basicHandler := auth.NewBasicHandler(creds)
<del> modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
<add> if authConfig.RegistryToken != "" {
<add> passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
<add> modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
<add> } else {
<add> creds := dumbCredentialStore{auth: authConfig}
<add> tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName.Name(), actions...)
<add> basicHandler := auth.NewBasicHandler(creds)
<add> modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
<add> }
<ide> tr := transport.NewTransport(base, modifiers...)
<ide>
<ide> return client.NewRepository(ctx, repoName.Name(), endpoint.URL, tr)
<ide> func digestFromManifest(m *schema1.SignedManifest, localName string) (digest.Dig
<ide> }
<ide> return manifestDigest, len(payload), nil
<ide> }
<add>
<add>type existingTokenHandler struct {
<add> token string
<add>}
<add>
<add>func (th *existingTokenHandler) Scheme() string {
<add> return "bearer"
<add>}
<add>
<add>func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
<add> req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
<add> return nil
<add>}
<ide><path>distribution/registry_unit_test.go
<add>package distribution
<add>
<add>import (
<add> "net/http"
<add> "net/http/httptest"
<add> "os"
<add> "strings"
<add> "testing"
<add>
<add> "github.com/Sirupsen/logrus"
<add> "github.com/docker/distribution/reference"
<add> "github.com/docker/distribution/registry/client/auth"
<add> "github.com/docker/docker/cliconfig"
<add> "github.com/docker/docker/pkg/streamformatter"
<add> "github.com/docker/docker/registry"
<add> "github.com/docker/docker/utils"
<add>)
<add>
<add>func TestTokenPassThru(t *testing.T) {
<add> authConfig := &cliconfig.AuthConfig{
<add> RegistryToken: "mysecrettoken",
<add> }
<add> gotToken := false
<add> handler := func(w http.ResponseWriter, r *http.Request) {
<add> if strings.Contains(r.Header.Get("Authorization"), authConfig.RegistryToken) {
<add> logrus.Debug("Detected registry token in auth header")
<add> gotToken = true
<add> }
<add> if r.RequestURI == "/v2/" {
<add> w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`)
<add> w.WriteHeader(401)
<add> }
<add> }
<add> ts := httptest.NewServer(http.HandlerFunc(handler))
<add> defer ts.Close()
<add>
<add> tmp, err := utils.TestDirectory("")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> defer os.RemoveAll(tmp)
<add>
<add> endpoint := registry.APIEndpoint{
<add> Mirror: false,
<add> URL: ts.URL,
<add> Version: 2,
<add> Official: false,
<add> TrimHostname: false,
<add> TLSConfig: nil,
<add> //VersionHeader: "verheader",
<add> Versions: []auth.APIVersion{
<add> {
<add> Type: "registry",
<add> Version: "2",
<add> },
<add> },
<add> }
<add> n, _ := reference.ParseNamed("testremotename")
<add> repoInfo := ®istry.RepositoryInfo{
<add> Index: ®istry.IndexInfo{
<add> Name: "testrepo",
<add> Mirrors: nil,
<add> Secure: false,
<add> Official: false,
<add> },
<add> RemoteName: n,
<add> LocalName: n,
<add> CanonicalName: n,
<add> Official: false,
<add> }
<add> imagePullConfig := &ImagePullConfig{
<add> MetaHeaders: http.Header{},
<add> AuthConfig: authConfig,
<add> }
<add> sf := streamformatter.NewJSONStreamFormatter()
<add> puller, err := newPuller(endpoint, repoInfo, imagePullConfig, sf)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> p := puller.(*v2Puller)
<add> p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> logrus.Debug("About to pull")
<add> // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above
<add> tag, _ := reference.WithTag(n, "tag_goes_here")
<add> _ = p.pullV2Repository(tag)
<add>
<add> if !gotToken {
<add> t.Fatal("Failed to receive registry token")
<add> }
<add>
<add>}
<ide><path>docs/reference/api/docker_remote_api.md
<ide> This section lists each version from latest to oldest. Each listing includes a
<ide> * `GET /networks/(name)` now returns a `Name` field for each container attached to the network.
<ide> * `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it
<ide> consistent with other date/time values returned by the API.
<add>* `AuthConfig` now supports a `registrytoken` for token based authentication
<ide>
<ide> ### v1.21 API changes
<ide>
<ide><path>docs/reference/api/docker_remote_api_v1.22.md
<ide> Query Parameters:
<ide>
<ide> Request Headers:
<ide>
<del>- **X-Registry-Auth** – base64-encoded AuthConfig object
<add>- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token
<add> - Credential based login:
<add>
<add> ```
<add> {
<add> "username": "jdoe",
<add> "password": "secret",
<add> "email": "[email protected]",
<add> }
<add> ```
<add>
<add> - Token based login:
<add>
<add> ```
<add> {
<add> "registrytoken": "9cbaf023786cd7..."
<add> }
<add> ```
<ide>
<ide> Status Codes:
<ide>
<ide> Query Parameters:
<ide>
<ide> Request Headers:
<ide>
<del>- **X-Registry-Auth** – Include a base64-encoded AuthConfig.
<del> object.
<add>- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token
<add> - Credential based login:
<add>
<add> ```
<add> {
<add> "username": "jdoe",
<add> "password": "secret",
<add> "email": "[email protected]",
<add> }
<add> ```
<add>
<add> - Token based login:
<add>
<add> ```
<add> {
<add> "registrytoken": "9cbaf023786cd7..."
<add> }
<add> ```
<ide>
<ide> Status Codes:
<ide> | 5 |
PHP | PHP | apply fixes from styleci | 6a42e1216210b3ad3f738ab27e51e850190d9a1f | <ide><path>src/Illuminate/Database/Eloquent/Builder.php
<ide> public function hydrate(array $items)
<ide>
<ide> $model->preventsLazyLoading = Model::preventsLazyLoading();
<ide>
<del>
<ide> return $model;
<ide> }, $items));
<ide> }
<ide><path>src/Illuminate/Database/Eloquent/Concerns/HasAttributes.php
<ide> use Illuminate\Database\Eloquent\JsonEncodingException;
<ide> use Illuminate\Database\Eloquent\Relations\Relation;
<ide> use Illuminate\Database\LazyLoadingViolationException;
<del>use Illuminate\Database\StrictLoadingViolationException;
<ide> use Illuminate\Support\Arr;
<ide> use Illuminate\Support\Carbon;
<ide> use Illuminate\Support\Collection as BaseCollection;
<ide><path>tests/Integration/Database/EloquentStrictLoadingTest.php
<ide>
<ide> use Illuminate\Database\Eloquent\Collection;
<ide> use Illuminate\Database\Eloquent\Model;
<del>use Illuminate\Database\Schema\Blueprint;
<ide> use Illuminate\Database\LazyLoadingViolationException;
<add>use Illuminate\Database\Schema\Blueprint;
<ide> use Illuminate\Support\Facades\Schema;
<ide>
<ide> /** | 3 |
Python | Python | remove obsolete defines | 4f22b8e666e1a4083ee9053c7d3516891b2e6c96 | <ide><path>numpy/core/setup.py
<ide> def generate_config_h(ext, build_dir):
<ide> else:
<ide> target_f.write('#define %s %s\n' % (d[0],d[1]))
<ide>
<del> # Keep those for backward compatibility for now
<del> target_f.write("""
<del>#ifdef HAVE_EXPL
<del>#define HAVE_LONGDOUBLE_FUNCS
<del>#endif
<del>
<del>#ifdef HAVE_EXPF
<del>#define HAVE_FLOAT_FUNCS
<del>#endif
<del>""")
<ide> target_f.close()
<ide> print 'File:',target
<ide> target_f = open(target) | 1 |
Javascript | Javascript | remove resetpose functionality | abdd1713c606135bc35028c6021698b52f27872b | <ide><path>examples/js/controls/VRControls.js
<ide> THREE.VRControls = function ( object, onError ) {
<ide>
<ide> };
<ide>
<del> this.resetPose = function () {
<del>
<del> if ( vrDisplay ) {
<del>
<del> vrDisplay.resetPose();
<del>
<del> }
<del>
<del> };
<del>
<del> this.resetSensor = function () {
<del>
<del> console.warn( 'THREE.VRControls: .resetSensor() is now .resetPose().' );
<del> this.resetPose();
<del>
<del> };
<del>
<del> this.zeroSensor = function () {
<del>
<del> console.warn( 'THREE.VRControls: .zeroSensor() is now .resetPose().' );
<del> this.resetPose();
<del>
<del> };
<del>
<ide> this.dispose = function () {
<ide>
<ide> vrDisplay = null; | 1 |
Python | Python | fix auto assigning of volume device in openstack | 25473a8fb9003ee15e5f4d17c2725a5be1540449 | <ide><path>libcloud/compute/drivers/openstack.py
<ide> def destroy_volume(self, volume):
<ide> def attach_volume(self, node, volume, device="auto"):
<ide> # when "auto" or None is provided for device, openstack will let
<ide> # the guest OS pick the next available device (fi. /dev/vdb)
<add> if device == "auto":
<add> device = None
<ide> return self.connection.request(
<ide> '/servers/%s/os-volume_attachments' % node.id,
<ide> method='POST', | 1 |
Go | Go | remove duplicated error judgement in cluster.go | 75c51ad76b0b5995e9349cb369c9e8664f40855c | <ide><path>daemon/cluster/cluster.go
<ide> func (c *Cluster) Inspect() (types.Swarm, error) {
<ide> return types.Swarm{}, err
<ide> }
<ide>
<del> if err != nil {
<del> return types.Swarm{}, err
<del> }
<del>
<ide> return convert.SwarmFromGRPC(*swarm), nil
<ide> }
<ide> | 1 |
Python | Python | fix begin_training if get_gold_tuples is none | bfe17b7df1192238b8c122cfd3b2c74bca9d0249 | <ide><path>examples/training/train_intent_parser.py
<ide> def main(model=None, output_dir=None, n_iter=100):
<ide>
<ide> other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'parser']
<ide> with nlp.disable_pipes(*other_pipes): # only train parser
<del> optimizer = nlp.begin_training(lambda: [])
<add> optimizer = nlp.begin_training()
<ide> for itn in range(n_iter):
<ide> random.shuffle(TRAIN_DATA)
<ide> losses = {}
<ide><path>examples/training/train_new_entity_type.py
<ide> def main(model=None, new_model_name='animal', output_dir=None, n_iter=50):
<ide> other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
<ide> with nlp.disable_pipes(*other_pipes): # only train NER
<ide> random.seed(0)
<del> optimizer = nlp.begin_training(lambda: [])
<add> optimizer = nlp.begin_training()
<ide> for itn in range(n_iter):
<ide> losses = {}
<ide> gold_parses = get_gold_parses(nlp.make_doc, TRAIN_DATA)
<ide><path>examples/training/train_parser.py
<ide> def main(model=None, output_dir=None, n_iter=1000):
<ide> # get names of other pipes to disable them during training
<ide> other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'parser']
<ide> with nlp.disable_pipes(*other_pipes): # only train parser
<del> optimizer = nlp.begin_training(lambda: [])
<add> optimizer = nlp.begin_training()
<ide> for itn in range(n_iter):
<ide> random.shuffle(TRAIN_DATA)
<ide> losses = {}
<ide><path>examples/training/train_tagger.py
<ide> def main(lang='en', output_dir=None, n_iter=25):
<ide> tagger = nlp.create_pipe('tagger')
<ide> nlp.add_pipe(tagger)
<ide>
<del> optimizer = nlp.begin_training(lambda: [])
<add> optimizer = nlp.begin_training()
<ide> for i in range(n_iter):
<ide> random.shuffle(TRAIN_DATA)
<ide> losses = {}
<ide><path>examples/training/train_textcat.py
<ide> def main(model=None, output_dir=None, n_iter=20):
<ide> # get names of other pipes to disable them during training
<ide> other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
<ide> with nlp.disable_pipes(*other_pipes): # only train textcat
<del> optimizer = nlp.begin_training(lambda: [])
<add> optimizer = nlp.begin_training()
<ide> print("Training the model...")
<ide> print('{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
<ide> for i in range(n_iter):
<ide><path>spacy/language.py
<ide> def begin_training(self, get_gold_tuples=None, **cfg):
<ide> **cfg: Config parameters.
<ide> RETURNS: An optimizer
<ide> """
<add> if get_gold_tuples is None:
<add> get_gold_tuples = lambda: []
<ide> # Populate vocab
<del> if get_gold_tuples is not None:
<add> else:
<ide> for _, annots_brackets in get_gold_tuples():
<ide> for annots, _ in annots_brackets:
<ide> for word in annots[1]: | 6 |
Text | Text | add information to the second paragraph. | 226de565f1ba229a07613ae465a72f7b07340339 | <ide><path>guide/english/html/index.md
<ide> HTML5 introduced a host of semantic elements. As discussed, HTML provides meanin
<ide> </body>
<ide> </html>
<ide> ```
<add>In HTML tags come in pairs, as seen above. The first tag in a pair is called the *start tag* or the *opening tag*,and the second tag is called the *end tag* or the *closing tag*. The later comes with a forward slash, which is inserted before the tag name.
<ide>
<ide> !DOCTYPE html: Defines this document to be HTML5
<ide> | 1 |
Javascript | Javascript | add tthey and xthey to ddescribe-iit check | 2404b77e481bf60e0c836107ca4e07aa8e2ad6d3 | <ide><path>Gruntfile.js
<ide> module.exports = function(grunt) {
<ide> 'test/**/*.js',
<ide> '!test/ngScenario/DescribeSpec.js',
<ide> '!src/ng/directive/attrs.js', // legitimate xit here
<del> '!src/ngScenario/**/*.js'
<del> ]
<add> '!src/ngScenario/**/*.js',
<add> '!test/helpers/privateMocks*.js'
<add> ],
<add> options: {
<add> disallowed: [
<add> 'iit',
<add> 'xit',
<add> 'tthey',
<add> 'xthey',
<add> 'ddescribe',
<add> 'xdescribe'
<add> ]
<add> }
<ide> },
<ide>
<ide> "merge-conflict": { | 1 |
Go | Go | change syslog format and facility | 05641ccffc5088a382fa3bfb21f1276ccb6c1fc0 | <ide><path>daemon/logger/syslog/syslog.go
<ide> type Syslog struct {
<ide> }
<ide>
<ide> func New(tag string) (logger.Logger, error) {
<del> log, err := syslog.New(syslog.LOG_USER, fmt.Sprintf("%s: <%s> ", path.Base(os.Args[0]), tag))
<add> log, err := syslog.New(syslog.LOG_DAEMON, fmt.Sprintf("%s/%s", path.Base(os.Args[0]), tag))
<ide> if err != nil {
<ide> return nil, err
<ide> } | 1 |
Mixed | Text | update docstrings and docs [ci skip] | 64f2f840983576f49f9c03f88a13e68a44650ea2 | <ide><path>spacy/pipeline/textcat.py
<ide> def predict(self, docs: Iterable[Doc]):
<ide> return scores
<ide>
<ide> def set_annotations(self, docs: Iterable[Doc], scores) -> None:
<del> """Modify a batch of documents, using pre-computed scores.
<add> """Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> docs (Iterable[Doc]): The documents to modify.
<ide> scores: The scores to set, produced by TextCategorizer.predict.
<ide><path>website/docs/api/dependencyparser.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## DependencyParser.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> >
<ide> Apply the pipeline's model to a batch of docs, without modifying them.
<ide>
<ide> ## DependencyParser.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> > #### Example
<ide> >
<ide><path>website/docs/api/entitylinker.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## EntityLinker.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them. Returns
<del>the KB IDs for each entity in each doc, including `NIL` if there is no
<del>prediction.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them. Returns the KB IDs for each entity in each doc, including `NIL`
<add>if there is no prediction.
<ide>
<ide> > #### Example
<ide> >
<ide><path>website/docs/api/entityrecognizer.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## EntityRecognizer.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> >
<ide> Apply the pipeline's model to a batch of docs, without modifying them.
<ide>
<ide> ## EntityRecognizer.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> > #### Example
<ide> >
<ide><path>website/docs/api/morphologizer.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## Morphologizer.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> >
<ide> Apply the pipeline's model to a batch of docs, without modifying them.
<ide>
<ide> ## Morphologizer.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> > #### Example
<ide> >
<ide> Modify a batch of documents, using pre-computed scores.
<ide>
<ide> ## Morphologizer.update {#update tag="method"}
<ide>
<del>Learn from a batch of documents and gold-standard information, updating the
<del>pipe's model. Delegates to [`predict`](/api/morphologizer#predict) and
<add>Learn from a batch of [`Example`](/api/example) objects containing the
<add>predictions and gold-standard annotations, and update the component's model.
<add>Delegates to [`predict`](/api/morphologizer#predict) and
<ide> [`get_loss`](/api/morphologizer#get_loss).
<ide>
<ide> > #### Example
<ide><path>website/docs/api/pipe.md
<ide> This class is a base class and **not instantiated directly**. Trainable pipeline
<ide> components like the [`EntityRecognizer`](/api/entityrecognizer) or
<ide> [`TextCategorizer`](/api/textcategorizer) inherit from it and it defines the
<ide> interface that components should follow to function as trainable components in a
<del>spaCy pipeline.
<add>spaCy pipeline. See the docs on
<add>[writing trainable components](/usage/processing-pipelines#trainable) for how to
<add>use the `Pipe` base class to implement custom components.
<add>
<add>> #### Why is Pipe implemented in Cython?
<add>>
<add>> The `Pipe` class is implemented in a `.pyx` module, the extension used by
<add>> [Cython](/api/cython). This is needed so that **other** Cython classes, like
<add>> the [`EntityRecognizer`](/api/entityrecognizer) can inherit from it. But it
<add>> doesn't mean you have to implement trainable components in Cython – pure
<add>> Python components like the [`TextCategorizer`](/api/textcategorizer) can also
<add>> inherit from `Pipe`.
<ide>
<ide> ```python
<ide> https://github.com/explosion/spaCy/blob/develop/spacy/pipeline/pipe.pyx
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## Pipe.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> <Infobox variant="danger">
<ide>
<ide> This method needs to be overwritten with your own custom `predict` method.
<ide>
<ide> ## Pipe.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> <Infobox variant="danger">
<ide>
<ide> method.
<ide>
<ide> ## Pipe.update {#update tag="method"}
<ide>
<del>Learn from a batch of documents and gold-standard information, updating the
<del>pipe's model. Delegates to [`predict`](/api/pipe#predict).
<add>Learn from a batch of [`Example`](/api/example) objects containing the
<add>predictions and gold-standard annotations, and update the component's model.
<ide>
<ide> <Infobox variant="danger">
<ide>
<ide><path>website/docs/api/sentencerecognizer.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## SentenceRecognizer.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> >
<ide> Apply the pipeline's model to a batch of docs, without modifying them.
<ide>
<ide> ## SentenceRecognizer.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> > #### Example
<ide> >
<ide> Modify a batch of documents, using pre-computed scores.
<ide>
<ide> ## SentenceRecognizer.update {#update tag="method"}
<ide>
<del>Learn from a batch of documents and gold-standard information, updating the
<del>pipe's model. Delegates to [`predict`](/api/sentencerecognizer#predict) and
<add>Learn from a batch of [`Example`](/api/example) objects containing the
<add>predictions and gold-standard annotations, and update the component's model.
<add>Delegates to [`predict`](/api/sentencerecognizer#predict) and
<ide> [`get_loss`](/api/sentencerecognizer#get_loss).
<ide>
<ide> > #### Example
<ide><path>website/docs/api/tagger.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## Tagger.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> >
<ide> Apply the pipeline's model to a batch of docs, without modifying them.
<ide>
<ide> ## Tagger.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> > #### Example
<ide> >
<ide> Modify a batch of documents, using pre-computed scores.
<ide>
<ide> ## Tagger.update {#update tag="method"}
<ide>
<del>Learn from a batch of documents and gold-standard information, updating the
<del>pipe's model. Delegates to [`predict`](/api/tagger#predict) and
<add>Learn from a batch of [`Example`](/api/example) objects containing the
<add>predictions and gold-standard annotations, and update the component's model.
<add>Delegates to [`predict`](/api/tagger#predict) and
<ide> [`get_loss`](/api/tagger#get_loss).
<ide>
<ide> > #### Example
<ide><path>website/docs/api/textcategorizer.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## TextCategorizer.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> >
<ide> Apply the pipeline's model to a batch of docs, without modifying them.
<ide>
<ide> ## TextCategorizer.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> > #### Example
<ide> >
<ide> Modify a batch of documents, using pre-computed scores.
<ide>
<ide> ## TextCategorizer.update {#update tag="method"}
<ide>
<del>Learn from a batch of documents and gold-standard information, updating the
<del>pipe's model. Delegates to [`predict`](/api/textcategorizer#predict) and
<add>Learn from a batch of [`Example`](/api/example) objects containing the
<add>predictions and gold-standard annotations, and update the component's model.
<add>Delegates to [`predict`](/api/textcategorizer#predict) and
<ide> [`get_loss`](/api/textcategorizer#get_loss).
<ide>
<ide> > #### Example
<ide><path>website/docs/api/tok2vec.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## Tok2Vec.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> >
<ide> Apply the pipeline's model to a batch of docs, without modifying them.
<ide>
<ide> ## Tok2Vec.set_annotations {#set_annotations tag="method"}
<ide>
<del>Modify a batch of documents, using pre-computed scores.
<add>Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores.
<ide>
<ide> > #### Example
<ide> >
<ide> Modify a batch of documents, using pre-computed scores.
<ide>
<ide> ## Tok2Vec.update {#update tag="method"}
<ide>
<del>Learn from a batch of documents and gold-standard information, updating the
<del>pipe's model. Delegates to [`predict`](/api/tok2vec#predict).
<add>Learn from a batch of [`Example`](/api/example) objects containing the
<add>predictions and gold-standard annotations, and update the component's model.
<add>Delegates to [`predict`](/api/tok2vec#predict).
<ide>
<ide> > #### Example
<ide> >
<ide><path>website/docs/api/transformer.md
<ide> Initialize the pipe for training, using data examples if available. Returns an
<ide>
<ide> ## Transformer.predict {#predict tag="method"}
<ide>
<del>Apply the pipeline's model to a batch of docs, without modifying them.
<add>Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
<add>modifying them.
<ide>
<ide> > #### Example
<ide> > | 11 |
Javascript | Javascript | translate statcmp.py to javascript | d7aba7b98b0a733a10c328bd62aefde915691732 | <ide><path>test/stats/statcmp.js
<add>/*jslint node: true */
<add>
<add>'use strict';
<add>
<add>var fs = require('fs');
<add>
<add>try {
<add> var ttest = require('ttest');
<add>} catch (e) {
<add> console.log('\nttest is not installed -- to intall, run "npm install ttest"');
<add> console.log('Continuing without significance test...\n');
<add>}
<add>
<add>var VALID_GROUP_BYS = ['browser', 'pdf', 'page', 'round', 'stat'];
<add>
<add>function parseOptions() {
<add> var yargs = require('yargs')
<add> .usage('Compare the results of two stats files.\n' +
<add> 'Usage:\n $0 <BASELINE> <CURRENT> [options]')
<add> .demand(2)
<add> .string(['groupBy'])
<add> .describe('groupBy', 'How statistics should grouped. Valid options: ' +
<add> VALID_GROUP_BYS.join(' '))
<add> .default('groupBy', 'browser,stat');
<add> var result = yargs.argv;
<add> result.baseline = result._[0];
<add> result.current = result._[1];
<add> if (result.groupBy) {
<add> result.groupBy = result.groupBy.split(/[;, ]+/);
<add> }
<add> return result;
<add>}
<add>
<add>function group(stats, groupBy) {
<add> var vals = [];
<add> for (var i = 0; i < stats.length; i++) {
<add> var stat = stats[i];
<add> var keyArr = [];
<add> for (var j = 0; j < groupBy.length; j++) {
<add> keyArr.push(stat[groupBy[j]]);
<add> }
<add> var key = keyArr.join(',');
<add> if (vals[key] === undefined) {
<add> vals[key] = [];
<add> }
<add> vals[key].push(stat['time']);
<add> }
<add> return vals;
<add>}
<add>
<add>/*
<add> * Flatten the stats so that there's one row per stats entry.
<add> * Also, if results are not grouped by 'stat', keep only 'Overall' results.
<add> */
<add>function flatten(stats) {
<add> var rows = [];
<add> stats.forEach(function(stat) {
<add> stat['stats'].forEach(function(s) {
<add> rows.push({
<add> browser: stat['browser'],
<add> page: stat['page'],
<add> pdf: stat['pdf'],
<add> round: stat['round'],
<add> stat: s['name'],
<add> time: s['end'] - s['start']
<add> });
<add> });
<add> });
<add> // Use only overall results if not grouped by 'stat'
<add> if (options.groupBy.indexOf('stat') < 0) {
<add> rows = rows.filter(function(s) { return s.stat === 'Overall'; });
<add> }
<add> return rows;
<add>}
<add>
<add>function pad(s, length, dir /* default: 'right' */) {
<add> s = '' + s;
<add> var spaces = new Array(Math.max(0, length - s.length + 1)).join(' ');
<add> return dir === 'left' ? spaces + s : s + spaces;
<add>}
<add>
<add>function mean(array) {
<add> function add(a, b) {
<add> return a + b;
<add> }
<add> return array.reduce(add, 0) / array.length;
<add>}
<add>
<add>/* Comparator for row key sorting. */
<add>function compareRow(a, b) {
<add> a = a.split(',');
<add> b = b.split(',');
<add> for (var i = 0; i < Math.min(a.length, b.length); i++) {
<add> var intA = parseInt(a[i], 10);
<add> var intB = parseInt(b[i], 10);
<add> var ai = isNaN(intA) ? a[i] : intA;
<add> var bi = isNaN(intB) ? b[i] : intB;
<add> if (ai < bi) {
<add> return -1;
<add> }
<add> if (ai > bi) {
<add> return 1;
<add> }
<add> }
<add> return 0;
<add>}
<add>
<add>/*
<add> * Dump various stats in a table to compare the baseline and current results.
<add> * T-test Refresher:
<add> * If I understand t-test correctly, p is the probability that we'll observe
<add> * another test that is as extreme as the current result assuming the null
<add> * hypothesis is true. P is NOT the probability of the null hypothesis. The null
<add> * hypothesis in this case is that the baseline and current results will be the
<add> * same. It is generally accepted that you can reject the null hypothesis if the
<add> * p-value is less than 0.05. So if p < 0.05 we can reject the results are the
<add> * same which doesn't necessarily mean the results are faster/slower but it can
<add> * be implied.
<add> */
<add>function stat(baseline, current) {
<add> var baselineGroup = group(baseline, options.groupBy);
<add> var currentGroup = group(current, options.groupBy);
<add>
<add> var keys = Object.keys(baselineGroup);
<add> keys.sort(compareRow);
<add>
<add> var labels = options.groupBy.slice(0);
<add> labels.push('Count', 'Baseline(ms)', 'Current(ms)', '+/-', '% ');
<add> if (ttest) {
<add> labels.push('Result(P<.05)');
<add> }
<add> var i, row, rows = [];
<add> // collect rows and measure column widths
<add> var width = labels.map(function(s) { return s.length; });
<add> rows.push(labels);
<add> for (var k = 0; k < keys.length; k++) {
<add> var key = keys[k];
<add> var baselineMean = mean(baselineGroup[key]);
<add> var currentMean = mean(currentGroup[key]);
<add> row = key.split(',');
<add> row.push('' + baselineGroup[key].length,
<add> '' + Math.round(baselineMean),
<add> '' + Math.round(currentMean),
<add> '' + Math.round(currentMean - baselineMean),
<add> (100 * (currentMean - baselineMean) / baselineMean).toFixed(2));
<add> if (ttest) {
<add> var p = (baselineGroup[key].length < 2) ? 1 :
<add> ttest(baselineGroup[key], currentGroup[key]).pValue();
<add> if (p < 0.05) {
<add> row.push(currentMean < baselineMean ? 'faster' : 'slower');
<add> } else {
<add> row.push('');
<add> }
<add> }
<add> for (i = 0; i < row.length; i++) {
<add> width[i] = Math.max(width[i], row[i].length);
<add> }
<add> rows.push(row);
<add> }
<add>
<add> // add horizontal line
<add> var hline = width.map(function(w) { return new Array(w+1).join('-'); });
<add> rows.splice(1, 0, hline);
<add>
<add> // print output
<add> console.log('-- Grouped By ' + options.groupBy.join(', ') + ' --');
<add> var groupCount = options.groupBy.length;
<add> for (var r = 0; r < rows.length; r++) {
<add> row = rows[r];
<add> for (i = 0; i < row.length; i++) {
<add> row[i] = pad(row[i], width[i], (i < groupCount) ? 'right' : 'left');
<add> }
<add> console.log(row.join(' | '));
<add> }
<add>}
<add>
<add>function main() {
<add> var baseline, current;
<add> try {
<add> var baselineFile = fs.readFileSync(options.baseline).toString();
<add> baseline = flatten(JSON.parse(baselineFile));
<add> } catch(e) {
<add> console.log('Error reading file "' + options.baseline + '": ' + e);
<add> process.exit(0);
<add> }
<add> try {
<add> var currentFile = fs.readFileSync(options.current).toString();
<add> current = flatten(JSON.parse(currentFile));
<add> } catch(e) {
<add> console.log('Error reading file "' + options.current + '": ' + e);
<add> process.exit(0);
<add> }
<add> stat(baseline, current);
<add>}
<add>
<add>var options = parseOptions();
<add>main(); | 1 |
Javascript | Javascript | note limitations with ipv6 addresses | 0633d8f2b0ac6cbad1c637768258b1f72994a614 | <ide><path>src/ng/directive/input.js
<ide> var inputType = {
<ide> *
<ide> * <div class="alert alert-warning">
<ide> * **Note:** `input[email]` uses a regex to validate email addresses that is derived from the regex
<del> * used in Chromium. If you need stricter validation (e.g. requiring a top-level domain), you can
<del> * use `ng-pattern` or modify the built-in validators (see the {@link guide/forms Forms guide})
<add> * used in Chromium, which may not fulfill your app's requirements.
<add> * If you need stricter (e.g. requiring a top-level domain), or more relaxed validation
<add> * (e.g. allowing IPv6 address literals) you can use `ng-pattern` or
<add> * modify the built-in validators (see the {@link guide/forms Forms guide}).
<ide> * </div>
<ide> *
<ide> * @param {string} ngModel Assignable AngularJS expression to data-bind to. | 1 |
Javascript | Javascript | do nothing except disable time slicing | 48740429b4a74e984193e4e2d364d461e4fdc3be | <ide><path>packages/react-reconciler/src/ReactFiberLane.new.js
<ide> export function markStarvedLanesAsExpired(
<ide> // expiration time. If so, we'll assume the update is being starved and mark
<ide> // it as expired to force it to finish.
<ide> let lanes = pendingLanes;
<del> let expiredLanes = 0;
<ide> while (lanes > 0) {
<ide> const index = pickArbitraryLaneIndex(lanes);
<ide> const lane = 1 << index;
<ide> export function markStarvedLanesAsExpired(
<ide> }
<ide> } else if (expirationTime <= currentTime) {
<ide> // This lane expired
<del> expiredLanes |= lane;
<add> root.expiredLanes |= lane;
<ide> }
<ide>
<ide> lanes &= ~lane;
<ide> }
<del>
<del> if (expiredLanes !== 0) {
<del> markRootExpired(root, expiredLanes);
<del> }
<ide> }
<ide>
<ide> // This returns the highest priority pending lanes regardless of whether they
<ide> export function includesOnlyTransitions(lanes: Lanes) {
<ide> }
<ide>
<ide> export function shouldTimeSlice(root: FiberRoot, lanes: Lanes) {
<del> if (!enableSyncDefaultUpdates) {
<add> if ((lanes & root.expiredLanes) !== NoLanes) {
<add> // At least one of these lanes expired. To prevent additional starvation,
<add> // finish rendering without yielding execution.
<add> return false;
<add> }
<add> if (enableSyncDefaultUpdates) {
<add> const SyncDefaultLanes =
<add> InputContinuousHydrationLane |
<add> InputContinuousLane |
<add> DefaultHydrationLane |
<add> DefaultLane;
<add> // TODO: Check for root override, once that lands
<add> return (lanes & SyncDefaultLanes) === NoLanes;
<add> } else {
<ide> return true;
<ide> }
<del> const SyncDefaultLanes =
<del> InputContinuousHydrationLane |
<del> InputContinuousLane |
<del> DefaultHydrationLane |
<del> DefaultLane;
<del> // TODO: Check for root override, once that lands
<del> return (lanes & SyncDefaultLanes) === NoLanes;
<ide> }
<ide>
<ide> export function isTransitionLane(lane: Lane) {
<ide> export function markRootPinged(
<ide> root.pingedLanes |= root.suspendedLanes & pingedLanes;
<ide> }
<ide>
<del>export function markRootExpired(root: FiberRoot, expiredLanes: Lanes) {
<del> const entanglements = root.entanglements;
<del> const SyncLaneIndex = 0;
<del> entanglements[SyncLaneIndex] |= expiredLanes;
<del> root.entangledLanes |= SyncLane;
<del> root.pendingLanes |= SyncLane;
<del>}
<del>
<ide> export function markRootMutableRead(root: FiberRoot, updateLane: Lane) {
<ide> root.mutableReadLanes |= updateLane & root.pendingLanes;
<ide> }
<ide> export function markRootFinished(root: FiberRoot, remainingLanes: Lanes) {
<ide> root.suspendedLanes = 0;
<ide> root.pingedLanes = 0;
<ide>
<add> root.expiredLanes &= remainingLanes;
<ide> root.mutableReadLanes &= remainingLanes;
<ide>
<ide> root.entangledLanes &= remainingLanes;
<ide><path>packages/react-reconciler/src/ReactFiberLane.old.js
<ide> export function markStarvedLanesAsExpired(
<ide> // expiration time. If so, we'll assume the update is being starved and mark
<ide> // it as expired to force it to finish.
<ide> let lanes = pendingLanes;
<del> let expiredLanes = 0;
<ide> while (lanes > 0) {
<ide> const index = pickArbitraryLaneIndex(lanes);
<ide> const lane = 1 << index;
<ide> export function markStarvedLanesAsExpired(
<ide> }
<ide> } else if (expirationTime <= currentTime) {
<ide> // This lane expired
<del> expiredLanes |= lane;
<add> root.expiredLanes |= lane;
<ide> }
<ide>
<ide> lanes &= ~lane;
<ide> }
<del>
<del> if (expiredLanes !== 0) {
<del> markRootExpired(root, expiredLanes);
<del> }
<ide> }
<ide>
<ide> // This returns the highest priority pending lanes regardless of whether they
<ide> export function includesOnlyTransitions(lanes: Lanes) {
<ide> }
<ide>
<ide> export function shouldTimeSlice(root: FiberRoot, lanes: Lanes) {
<del> if (!enableSyncDefaultUpdates) {
<add> if ((lanes & root.expiredLanes) !== NoLanes) {
<add> // At least one of these lanes expired. To prevent additional starvation,
<add> // finish rendering without yielding execution.
<add> return false;
<add> }
<add> if (enableSyncDefaultUpdates) {
<add> const SyncDefaultLanes =
<add> InputContinuousHydrationLane |
<add> InputContinuousLane |
<add> DefaultHydrationLane |
<add> DefaultLane;
<add> // TODO: Check for root override, once that lands
<add> return (lanes & SyncDefaultLanes) === NoLanes;
<add> } else {
<ide> return true;
<ide> }
<del> const SyncDefaultLanes =
<del> InputContinuousHydrationLane |
<del> InputContinuousLane |
<del> DefaultHydrationLane |
<del> DefaultLane;
<del> // TODO: Check for root override, once that lands
<del> return (lanes & SyncDefaultLanes) === NoLanes;
<ide> }
<ide>
<ide> export function isTransitionLane(lane: Lane) {
<ide> export function markRootPinged(
<ide> root.pingedLanes |= root.suspendedLanes & pingedLanes;
<ide> }
<ide>
<del>export function markRootExpired(root: FiberRoot, expiredLanes: Lanes) {
<del> const entanglements = root.entanglements;
<del> const SyncLaneIndex = 0;
<del> entanglements[SyncLaneIndex] |= expiredLanes;
<del> root.entangledLanes |= SyncLane;
<del> root.pendingLanes |= SyncLane;
<del>}
<del>
<ide> export function markRootMutableRead(root: FiberRoot, updateLane: Lane) {
<ide> root.mutableReadLanes |= updateLane & root.pendingLanes;
<ide> }
<ide> export function markRootFinished(root: FiberRoot, remainingLanes: Lanes) {
<ide> root.suspendedLanes = 0;
<ide> root.pingedLanes = 0;
<ide>
<add> root.expiredLanes &= remainingLanes;
<ide> root.mutableReadLanes &= remainingLanes;
<ide>
<ide> root.entangledLanes &= remainingLanes;
<ide><path>packages/react-reconciler/src/ReactFiberRoot.new.js
<ide> function FiberRootNode(containerInfo, tag, hydrate) {
<ide> this.pendingLanes = NoLanes;
<ide> this.suspendedLanes = NoLanes;
<ide> this.pingedLanes = NoLanes;
<add> this.expiredLanes = NoLanes;
<ide> this.mutableReadLanes = NoLanes;
<ide> this.finishedLanes = NoLanes;
<ide>
<ide><path>packages/react-reconciler/src/ReactFiberRoot.old.js
<ide> function FiberRootNode(containerInfo, tag, hydrate) {
<ide> this.pendingLanes = NoLanes;
<ide> this.suspendedLanes = NoLanes;
<ide> this.pingedLanes = NoLanes;
<add> this.expiredLanes = NoLanes;
<ide> this.mutableReadLanes = NoLanes;
<ide> this.finishedLanes = NoLanes;
<ide>
<ide><path>packages/react-reconciler/src/ReactFiberWorkLoop.new.js
<ide> import {
<ide> markRootUpdated,
<ide> markRootSuspended as markRootSuspended_dontCallThisOneDirectly,
<ide> markRootPinged,
<del> markRootExpired,
<add> markRootEntangled,
<ide> markRootFinished,
<ide> getHighestPriorityLane,
<ide> addFiberToLanesMap,
<ide> function performConcurrentWorkOnRoot(root, didTimeout) {
<ide> return null;
<ide> }
<ide>
<add> // We disable time-slicing in some cases: if the work has been CPU-bound
<add> // for too long ("expired" work, to prevent starvation), or we're in
<add> // sync-updates-by-default mode.
<ide> // TODO: We only check `didTimeout` defensively, to account for a Scheduler
<ide> // bug we're still investigating. Once the bug in Scheduler is fixed,
<ide> // we can remove this, since we track expiration ourselves.
<del> if (!disableSchedulerTimeoutInWorkLoop && didTimeout) {
<del> // Something expired. Flush synchronously until there's no expired
<del> // work left.
<del> markRootExpired(root, lanes);
<del> // This will schedule a synchronous callback.
<del> ensureRootIsScheduled(root, now());
<del> return null;
<del> }
<del>
<del> let exitStatus = shouldTimeSlice(root, lanes)
<del> ? renderRootConcurrent(root, lanes)
<del> : // Time slicing is disabled for default updates in this root.
<del> renderRootSync(root, lanes);
<add> let exitStatus =
<add> shouldTimeSlice(root, lanes) &&
<add> (disableSchedulerTimeoutInWorkLoop || !didTimeout)
<add> ? renderRootConcurrent(root, lanes)
<add> : renderRootSync(root, lanes);
<ide> if (exitStatus !== RootIncomplete) {
<ide> if (exitStatus === RootErrored) {
<ide> executionContext |= RetryAfterError;
<ide> function performSyncWorkOnRoot(root) {
<ide> flushPassiveEffects();
<ide>
<ide> let lanes = getNextLanes(root, NoLanes);
<del> if (includesSomeLane(lanes, SyncLane)) {
<del> if (
<del> root === workInProgressRoot &&
<del> includesSomeLane(lanes, workInProgressRootRenderLanes)
<del> ) {
<del> // There's a partial tree, and at least one of its lanes has expired. Finish
<del> // rendering it before rendering the rest of the expired work.
<del> lanes = workInProgressRootRenderLanes;
<del> }
<del> } else {
<add> if (!includesSomeLane(lanes, SyncLane)) {
<ide> // There's no remaining sync work left.
<ide> ensureRootIsScheduled(root, now());
<ide> return null;
<ide> function performSyncWorkOnRoot(root) {
<ide> return null;
<ide> }
<ide>
<del>// TODO: Do we still need this API? I think we can delete it. Was only used
<del>// internally.
<ide> export function flushRoot(root: FiberRoot, lanes: Lanes) {
<ide> if (lanes !== NoLanes) {
<del> markRootExpired(root, lanes);
<add> markRootEntangled(root, mergeLanes(lanes, SyncLane));
<ide> ensureRootIsScheduled(root, now());
<ide> if ((executionContext & (RenderContext | CommitContext)) === NoContext) {
<ide> resetRenderTimer();
<ide><path>packages/react-reconciler/src/ReactFiberWorkLoop.old.js
<ide> import {
<ide> markRootUpdated,
<ide> markRootSuspended as markRootSuspended_dontCallThisOneDirectly,
<ide> markRootPinged,
<del> markRootExpired,
<add> markRootEntangled,
<ide> markRootFinished,
<ide> getHighestPriorityLane,
<ide> addFiberToLanesMap,
<ide> function performConcurrentWorkOnRoot(root, didTimeout) {
<ide> return null;
<ide> }
<ide>
<add> // We disable time-slicing in some cases: if the work has been CPU-bound
<add> // for too long ("expired" work, to prevent starvation), or we're in
<add> // sync-updates-by-default mode.
<ide> // TODO: We only check `didTimeout` defensively, to account for a Scheduler
<ide> // bug we're still investigating. Once the bug in Scheduler is fixed,
<ide> // we can remove this, since we track expiration ourselves.
<del> if (!disableSchedulerTimeoutInWorkLoop && didTimeout) {
<del> // Something expired. Flush synchronously until there's no expired
<del> // work left.
<del> markRootExpired(root, lanes);
<del> // This will schedule a synchronous callback.
<del> ensureRootIsScheduled(root, now());
<del> return null;
<del> }
<del>
<del> let exitStatus = shouldTimeSlice(root, lanes)
<del> ? renderRootConcurrent(root, lanes)
<del> : // Time slicing is disabled for default updates in this root.
<del> renderRootSync(root, lanes);
<add> let exitStatus =
<add> shouldTimeSlice(root, lanes) &&
<add> (disableSchedulerTimeoutInWorkLoop || !didTimeout)
<add> ? renderRootConcurrent(root, lanes)
<add> : renderRootSync(root, lanes);
<ide> if (exitStatus !== RootIncomplete) {
<ide> if (exitStatus === RootErrored) {
<ide> executionContext |= RetryAfterError;
<ide> function performSyncWorkOnRoot(root) {
<ide> flushPassiveEffects();
<ide>
<ide> let lanes = getNextLanes(root, NoLanes);
<del> if (includesSomeLane(lanes, SyncLane)) {
<del> if (
<del> root === workInProgressRoot &&
<del> includesSomeLane(lanes, workInProgressRootRenderLanes)
<del> ) {
<del> // There's a partial tree, and at least one of its lanes has expired. Finish
<del> // rendering it before rendering the rest of the expired work.
<del> lanes = workInProgressRootRenderLanes;
<del> }
<del> } else {
<add> if (!includesSomeLane(lanes, SyncLane)) {
<ide> // There's no remaining sync work left.
<ide> ensureRootIsScheduled(root, now());
<ide> return null;
<ide> function performSyncWorkOnRoot(root) {
<ide> return null;
<ide> }
<ide>
<del>// TODO: Do we still need this API? I think we can delete it. Was only used
<del>// internally.
<ide> export function flushRoot(root: FiberRoot, lanes: Lanes) {
<ide> if (lanes !== NoLanes) {
<del> markRootExpired(root, lanes);
<add> markRootEntangled(root, mergeLanes(lanes, SyncLane));
<ide> ensureRootIsScheduled(root, now());
<ide> if ((executionContext & (RenderContext | CommitContext)) === NoContext) {
<ide> resetRenderTimer();
<ide><path>packages/react-reconciler/src/ReactInternalTypes.js
<ide> type BaseFiberRootProperties = {|
<ide> pendingLanes: Lanes,
<ide> suspendedLanes: Lanes,
<ide> pingedLanes: Lanes,
<add> expiredLanes: Lanes,
<ide> mutableReadLanes: Lanes,
<ide>
<ide> finishedLanes: Lanes,
<ide><path>packages/react-reconciler/src/__tests__/ReactExpiration-test.js
<ide> let Scheduler;
<ide> let readText;
<ide> let resolveText;
<ide> let startTransition;
<add>let useState;
<add>let useEffect;
<ide>
<ide> describe('ReactExpiration', () => {
<ide> beforeEach(() => {
<ide> describe('ReactExpiration', () => {
<ide> ReactNoop = require('react-noop-renderer');
<ide> Scheduler = require('scheduler');
<ide> startTransition = React.unstable_startTransition;
<add> useState = React.useState;
<add> useEffect = React.useEffect;
<ide>
<ide> const textCache = new Map();
<ide>
<ide> describe('ReactExpiration', () => {
<ide> });
<ide>
<ide> // @gate experimental || !enableSyncDefaultUpdates
<del> it('prevents starvation by sync updates', async () => {
<del> const {useState} = React;
<del>
<add> it('prevents starvation by sync updates by disabling time slicing if too much time has elapsed', async () => {
<ide> let updateSyncPri;
<ide> let updateNormalPri;
<ide> function App() {
<ide> describe('ReactExpiration', () => {
<ide> }
<ide> expect(Scheduler).toFlushAndYieldThrough(['Sync pri: 0']);
<ide> updateSyncPri();
<add> expect(Scheduler).toHaveYielded(['Sync pri: 1', 'Normal pri: 0']);
<add>
<add> // The remaining work hasn't expired, so the render phase is time sliced.
<add> // In other words, we can flush just the first child without flushing
<add> // the rest.
<add> Scheduler.unstable_flushNumberOfYields(1);
<add> // Yield right after first child.
<add> expect(Scheduler).toHaveYielded(['Sync pri: 1']);
<add> // Now do the rest.
<add> expect(Scheduler).toFlushAndYield(['Normal pri: 1']);
<ide> });
<del> expect(Scheduler).toHaveYielded([
<del> // Interrupt high pri update to render sync update
<del> 'Sync pri: 1',
<del> 'Normal pri: 0',
<del> // Now render normal pri
<del> 'Sync pri: 1',
<del> 'Normal pri: 1',
<del> ]);
<ide> expect(root).toMatchRenderedOutput('Sync pri: 1, Normal pri: 1');
<ide>
<ide> // Do the same thing, but starve the first update
<ide> describe('ReactExpiration', () => {
<ide> // starvation of normal priority updates.)
<ide> Scheduler.unstable_advanceTime(10000);
<ide>
<del> // So when we get a high pri update, we shouldn't interrupt
<ide> updateSyncPri();
<add> expect(Scheduler).toHaveYielded(['Sync pri: 2', 'Normal pri: 1']);
<add>
<add> // The remaining work _has_ expired, so the render phase is _not_ time
<add> // sliced. Attempting to flush just the first child also flushes the rest.
<add> Scheduler.unstable_flushNumberOfYields(1);
<add> expect(Scheduler).toHaveYielded(['Sync pri: 2', 'Normal pri: 2']);
<ide> });
<del> expect(Scheduler).toHaveYielded([
<del> // Finish normal pri update
<del> 'Normal pri: 2',
<del> // Then do high pri update
<del> 'Sync pri: 2',
<del> 'Normal pri: 2',
<del> ]);
<ide> expect(root).toMatchRenderedOutput('Sync pri: 2, Normal pri: 2');
<ide> });
<ide>
<ide> it('idle work never expires', async () => {
<del> const {useState} = React;
<del>
<ide> let updateSyncPri;
<ide> let updateIdlePri;
<ide> function App() {
<ide> describe('ReactExpiration', () => {
<ide> });
<ide>
<ide> // @gate experimental
<del> it('a single update can expire without forcing all other updates to expire', async () => {
<del> const {useState} = React;
<del>
<del> let updateHighPri;
<del> let updateNormalPri;
<add> it('when multiple lanes expire, we can finish the in-progress one without including the others', async () => {
<add> let setA;
<add> let setB;
<ide> function App() {
<del> const [highPri, setHighPri] = useState(0);
<del> const [normalPri, setNormalPri] = useState(0);
<del> updateHighPri = () => ReactNoop.flushSync(() => setHighPri(n => n + 1));
<del> updateNormalPri = () => setNormalPri(n => n + 1);
<add> const [a, _setA] = useState(0);
<add> const [b, _setB] = useState(0);
<add> setA = _setA;
<add> setB = _setB;
<ide> return (
<ide> <>
<del> <Text text={'High pri: ' + highPri} />
<del> {', '}
<del> <Text text={'Normal pri: ' + normalPri} />
<del> {', '}
<del> <Text text="Sibling" />
<add> <Text text={'A' + a} />
<add> <Text text={'B' + b} />
<add> <Text text="C" />
<ide> </>
<ide> );
<ide> }
<ide> describe('ReactExpiration', () => {
<ide> await ReactNoop.act(async () => {
<ide> root.render(<App />);
<ide> });
<del> expect(Scheduler).toHaveYielded([
<del> 'High pri: 0',
<del> 'Normal pri: 0',
<del> 'Sibling',
<del> ]);
<del> expect(root).toMatchRenderedOutput('High pri: 0, Normal pri: 0, Sibling');
<add> expect(Scheduler).toHaveYielded(['A0', 'B0', 'C']);
<add> expect(root).toMatchRenderedOutput('A0B0C');
<ide>
<ide> await ReactNoop.act(async () => {
<del> // Partially render an update
<ide> startTransition(() => {
<del> updateNormalPri();
<add> setA(1);
<ide> });
<del> expect(Scheduler).toFlushAndYieldThrough(['High pri: 0']);
<del>
<del> // Some time goes by. Schedule another update.
<del> // This will be placed into a separate batch.
<del> Scheduler.unstable_advanceTime(4000);
<del>
<add> expect(Scheduler).toFlushAndYieldThrough(['A1']);
<ide> startTransition(() => {
<del> updateNormalPri();
<del> });
<del> // Keep rendering the first update
<del> expect(Scheduler).toFlushAndYieldThrough(['Normal pri: 1']);
<del> // More time goes by. Enough to expire the first batch, but not the
<del> // second one.
<del> Scheduler.unstable_advanceTime(1000);
<del> // Attempt to interrupt with a high pri update.
<del> await ReactNoop.act(async () => {
<del> updateHighPri();
<add> setB(1);
<ide> });
<del>
<del> expect(Scheduler).toHaveYielded([
<del> // The first update expired
<del> 'Sibling',
<del> // Then render the high pri update
<del> 'High pri: 1',
<del> 'Normal pri: 1',
<del> 'Sibling',
<del> // Then the second normal pri update
<del> 'High pri: 1',
<del> 'Normal pri: 2',
<del> 'Sibling',
<del> ]);
<add> // Expire both the transitions
<add> Scheduler.unstable_advanceTime(10000);
<add> // Both transitions have expired, but since they aren't related
<add> // (entangled), we should be able to finish the in-progress transition
<add> // without also including the next one.
<add> Scheduler.unstable_flushNumberOfYields(1);
<add> expect(Scheduler).toHaveYielded(['B0', 'C']);
<add> expect(root).toMatchRenderedOutput('A1B0C');
<add>
<add> // The next transition also finishes without yielding.
<add> Scheduler.unstable_flushNumberOfYields(1);
<add> expect(Scheduler).toHaveYielded(['A1', 'B1', 'C']);
<add> expect(root).toMatchRenderedOutput('A1B1C');
<ide> });
<ide> });
<ide>
<ide> // @gate experimental || !enableSyncDefaultUpdates
<del> it('detects starvation in multiple batches', async () => {
<del> const {useState} = React;
<add> it('updates do not expire while they are IO-bound', async () => {
<add> const {Suspense} = React;
<ide>
<del> let updateHighPri;
<del> let updateNormalPri;
<del> function App() {
<del> const [highPri, setHighPri] = useState(0);
<del> const [normalPri, setNormalPri] = useState(0);
<del> updateHighPri = () => {
<del> ReactNoop.flushSync(() => {
<del> setHighPri(n => n + 1);
<del> });
<del> };
<del> updateNormalPri = () => setNormalPri(n => n + 1);
<add> function App({step}) {
<ide> return (
<del> <>
<del> <Text text={'High pri: ' + highPri} />
<del> {', '}
<del> <Text text={'Normal pri: ' + normalPri} />
<del> {', '}
<del> <Text text="Sibling" />
<del> </>
<add> <Suspense fallback={<Text text="Loading..." />}>
<add> <AsyncText text={'A' + step} />
<add> <Text text="B" />
<add> <Text text="C" />
<add> </Suspense>
<ide> );
<ide> }
<ide>
<ide> const root = ReactNoop.createRoot();
<ide> await ReactNoop.act(async () => {
<del> root.render(<App />);
<add> await resolveText('A0');
<add> root.render(<App step={0} />);
<ide> });
<del> expect(Scheduler).toHaveYielded([
<del> 'High pri: 0',
<del> 'Normal pri: 0',
<del> 'Sibling',
<del> ]);
<del> expect(root).toMatchRenderedOutput('High pri: 0, Normal pri: 0, Sibling');
<add> expect(Scheduler).toHaveYielded(['A0', 'B', 'C']);
<add> expect(root).toMatchRenderedOutput('A0BC');
<ide>
<ide> await ReactNoop.act(async () => {
<del> // Partially render an update
<ide> if (gate(flags => flags.enableSyncDefaultUpdates)) {
<ide> React.unstable_startTransition(() => {
<del> updateNormalPri();
<add> root.render(<App step={1} />);
<ide> });
<ide> } else {
<del> updateNormalPri();
<add> root.render(<App step={1} />);
<ide> }
<del> expect(Scheduler).toFlushAndYieldThrough(['High pri: 0']);
<del> // Some time goes by. In an interleaved event, schedule another update.
<del> // This will be placed into a separate batch.
<del> Scheduler.unstable_advanceTime(4000);
<del> updateNormalPri();
<del> // Keep rendering the first update
<del> expect(Scheduler).toFlushAndYieldThrough(['Normal pri: 1']);
<del> // More time goes by. This expires both of the updates just scheduled.
<add> expect(Scheduler).toFlushAndYield([
<add> 'Suspend! [A1]',
<add> 'B',
<add> 'C',
<add> 'Loading...',
<add> ]);
<add>
<add> // Lots of time elapses before the promise resolves
<ide> Scheduler.unstable_advanceTime(10000);
<del> expect(Scheduler).toHaveYielded([]);
<add> await resolveText('A1');
<add> expect(Scheduler).toHaveYielded(['Promise resolved [A1]']);
<ide>
<del> // Attempt to interrupt with a high pri update.
<del> updateHighPri();
<del>
<del> // Both normal pri updates should have expired.
<del> // The sync update and the expired normal pri updates render in a
<del> // single batch.
<del> expect(Scheduler).toHaveYielded([
<del> 'Sibling',
<del> 'High pri: 1',
<del> 'Normal pri: 2',
<del> 'Sibling',
<del> ]);
<add> // But the update doesn't expire, because it was IO bound. So we can
<add> // partially rendering without finishing.
<add> expect(Scheduler).toFlushAndYieldThrough(['A1']);
<add> expect(root).toMatchRenderedOutput('A0BC');
<add>
<add> // Lots more time elapses. We're CPU-bound now, so we should treat this
<add> // as starvation.
<add> Scheduler.unstable_advanceTime(10000);
<add>
<add> // The rest of the update finishes without yielding.
<add> Scheduler.unstable_flushNumberOfYields(1);
<add> expect(Scheduler).toHaveYielded(['B', 'C']);
<ide> });
<ide> });
<ide>
<del> // @gate experimental || !enableSyncDefaultUpdates
<del> it('updates do not expire while they are IO-bound', async () => {
<del> const {Suspense} = React;
<del>
<del> function App({text}) {
<add> // @gate experimental
<add> it('flushSync should not affect expired work', async () => {
<add> let setA;
<add> let setB;
<add> function App() {
<add> const [a, _setA] = useState(0);
<add> const [b, _setB] = useState(0);
<add> setA = _setA;
<add> setB = _setB;
<ide> return (
<del> <Suspense fallback={<Text text="Loading..." />}>
<del> <AsyncText text={text} />
<del> {', '}
<del> <Text text="Sibling" />
<del> </Suspense>
<add> <>
<add> <Text text={'A' + a} />
<add> <Text text={'B' + b} />
<add> </>
<ide> );
<ide> }
<ide>
<ide> const root = ReactNoop.createRoot();
<ide> await ReactNoop.act(async () => {
<del> await resolveText('A');
<del> root.render(<App text="A" />);
<add> root.render(<App />);
<ide> });
<del> expect(Scheduler).toHaveYielded(['A', 'Sibling']);
<del> expect(root).toMatchRenderedOutput('A, Sibling');
<add> expect(Scheduler).toHaveYielded(['A0', 'B0']);
<ide>
<ide> await ReactNoop.act(async () => {
<del> if (gate(flags => flags.enableSyncDefaultUpdates)) {
<del> React.unstable_startTransition(() => {
<del> root.render(<App text="B" />);
<del> });
<del> } else {
<del> root.render(<App text="B" />);
<del> }
<del> expect(Scheduler).toFlushAndYield([
<del> 'Suspend! [B]',
<del> 'Sibling',
<del> 'Loading...',
<del> ]);
<add> startTransition(() => {
<add> setA(1);
<add> });
<add> expect(Scheduler).toFlushAndYieldThrough(['A1']);
<ide>
<del> // Lots of time elapses before the promise resolves
<add> // Expire the in-progress update
<ide> Scheduler.unstable_advanceTime(10000);
<del> await resolveText('B');
<del> expect(Scheduler).toHaveYielded(['Promise resolved [B]']);
<ide>
<del> // But the update doesn't expire, because it was IO bound. So we can
<del> // partially rendering without finishing.
<del> expect(Scheduler).toFlushAndYieldThrough(['B']);
<del> expect(root).toMatchRenderedOutput('A, Sibling');
<add> ReactNoop.flushSync(() => {
<add> setB(1);
<add> });
<add> expect(Scheduler).toHaveYielded(['A0', 'B1']);
<ide>
<del> // Lots more time elapses. We're CPU-bound now, so we should treat this
<del> // as starvation.
<add> // Now flush the original update. Because it expired, it should finish
<add> // without yielding.
<add> Scheduler.unstable_flushNumberOfYields(1);
<add> expect(Scheduler).toHaveYielded(['A1', 'B1']);
<add> });
<add> });
<add>
<add> // @gate experimental
<add> it('passive effects of expired update flush after paint', async () => {
<add> function App({step}) {
<add> useEffect(() => {
<add> Scheduler.unstable_yieldValue('Effect: ' + step);
<add> }, [step]);
<add> return (
<add> <>
<add> <Text text={'A' + step} />
<add> <Text text={'B' + step} />
<add> <Text text={'C' + step} />
<add> </>
<add> );
<add> }
<add>
<add> const root = ReactNoop.createRoot();
<add> await ReactNoop.act(async () => {
<add> root.render(<App step={0} />);
<add> });
<add> expect(Scheduler).toHaveYielded(['A0', 'B0', 'C0', 'Effect: 0']);
<add> expect(root).toMatchRenderedOutput('A0B0C0');
<add>
<add> await ReactNoop.act(async () => {
<add> startTransition(() => {
<add> root.render(<App step={1} />);
<add> });
<add> // Expire the update
<ide> Scheduler.unstable_advanceTime(10000);
<ide>
<del> // Attempt to interrupt with a sync update.
<del> ReactNoop.flushSync(() => root.render(<App text="A" />));
<del> expect(Scheduler).toHaveYielded([
<del> // Because the previous update had already expired, we don't interrupt
<del> // it. Finish rendering it first.
<del> 'Sibling',
<del> // Then do the sync update.
<del> 'A',
<del> 'Sibling',
<del> ]);
<add> // The update finishes without yielding. But it does not flush the effect.
<add> Scheduler.unstable_flushNumberOfYields(1);
<add> expect(Scheduler).toHaveYielded(['A1', 'B1', 'C1']);
<ide> });
<add> // The effect flushes after paint.
<add> expect(Scheduler).toHaveYielded(['Effect: 1']);
<ide> });
<ide> });
<ide><path>packages/react-reconciler/src/__tests__/ReactSchedulerIntegration-test.js
<ide> describe(
<ide> ReactNoop.render(<App />);
<ide> });
<ide>
<del> ReactNoop.flushSync();
<del>
<ide> // Because the render expired, React should finish the tree without
<ide> // consulting `shouldYield` again
<add> Scheduler.unstable_flushNumberOfYields(1);
<ide> expect(Scheduler).toHaveYielded(['B', 'C']);
<ide> });
<ide> });
<ide><path>packages/react-reconciler/src/__tests__/ReactSuspenseWithNoopRenderer-test.js
<ide> describe('ReactSuspenseWithNoopRenderer', () => {
<ide> await advanceTimers(5000);
<ide>
<ide> // Retry with the new content.
<del> if (gate(flags => flags.disableSchedulerTimeoutInWorkLoop)) {
<del> expect(Scheduler).toFlushAndYield([
<del> 'A',
<del> // B still suspends
<del> 'Suspend! [B]',
<del> 'Loading more...',
<del> ]);
<del> } else {
<del> // In this branch, right as we start rendering, we detect that the work
<del> // has expired (via Scheduler's didTimeout argument) and re-schedule the
<del> // work as synchronous. Since sync work does not flow through Scheduler,
<del> // we need to use `flushSync`.
<del> //
<del> // Usually we would use `act`, which fluses both sync work and Scheduler
<del> // work, but that would also force the fallback to display, and this test
<del> // is specifically about whether we delay or show the fallback.
<del> expect(Scheduler).toFlushAndYield([]);
<del> // This will flush the synchronous callback we just scheduled.
<del> ReactNoop.flushSync();
<del> expect(Scheduler).toHaveYielded([
<del> 'A',
<del> // B still suspends
<del> 'Suspend! [B]',
<del> 'Loading more...',
<del> ]);
<del> }
<add> expect(Scheduler).toFlushAndYield([
<add> 'A',
<add> // B still suspends
<add> 'Suspend! [B]',
<add> 'Loading more...',
<add> ]);
<add>
<ide> // Because we've already been waiting for so long we've exceeded
<ide> // our threshold and we show the next level immediately.
<ide> expect(ReactNoop.getChildren()).toEqual([ | 10 |
Javascript | Javascript | bring query params fix up to date | 19ae330fd582ca3a9b97b70d16aceceb40f94261 | <ide><path>packages/ember-routing/lib/system/route.js
<ide> let Route = EmberObject.extend(ActionHandler, Evented, {
<ide> } else {
<ide> if (presentKey) {
<ide> svalue = params[presentKey];
<del> value = route.deserializeQueryParam(svalue, qp.urlKey, qp.type);
<add>
<add> if (svalue !== undefined) {
<add> value = route.deserializeQueryParam(svalue, qp.urlKey, qp.type);
<add> }
<ide> } else {
<ide> // No QP provided; use default value.
<ide> svalue = qp.serializedDefaultValue;
<ide> let Route = EmberObject.extend(ActionHandler, Evented, {
<ide> }
<ide> });
<ide> ```
<del>
<add>
<ide> @method disconnectOutlet
<ide> @param {Object|String} options the options hash or outlet name
<ide> @since 1.0.0
<ide><path>packages/ember-routing/lib/system/router.js
<ide> const EmberRouter = EmberObject.extend(Evented, {
<ide> */
<ide> _serializeQueryParam(value, type) {
<ide> if (value === null || value === undefined) {
<del> return null;
<add> return value;
<ide> } else if (type === 'array') {
<ide> return JSON.stringify(value);
<ide> }
<ide> const EmberRouter = EmberObject.extend(Evented, {
<ide> @param {String} defaultType
<ide> */
<ide> _deserializeQueryParam(value, defaultType) {
<del> if (defaultType === 'boolean') {
<add> if (value === null || value === undefined) {
<add> return value;
<add> } else if (defaultType === 'boolean') {
<ide> return value === 'true';
<ide> } else if (defaultType === 'number') {
<ide> return (Number(value)).valueOf();
<ide><path>packages/ember/tests/routing/query_params_test.js
<ide> moduleFor('Query Params - main', class extends QueryParamTestCase {
<ide> });
<ide> }
<ide>
<del> test("Setting bound query pram property to null or undefined do not seralize to url", function() {
<del> Router.map(function() {
<del> this.route("home", { path: '/home' });
<del> });
<add> ['@test Setting bound query param property to null or undefined does not serialize to url'](assert) {
<add> assert.expect(9);
<ide>
<del> App.HomeController = Ember.Controller.extend({
<del> queryParams: ['foo'],
<del> foo: [1, 2]
<add> this.router.map(function() {
<add> this.route('home');
<ide> });
<ide>
<del> startingURL = '/home';
<del> bootApplication();
<add> this.setSingleQPController('home', 'foo', [1, 2]);
<ide>
<del> var controller = container.lookup('controller:home');
<add> return this.visitAndAssert('/home').then(() => {
<add> var controller = this.getController('home');
<ide>
<del> deepEqual(controller.get('foo'), [1,2]);
<del> equal(router.get('location.path'), "/home");
<add> assert.deepEqual(controller.get('foo'), [1,2]);
<add> this.assertCurrentPath('/home');
<ide>
<del> Ember.run(controller, 'set', 'foo', [1,3]);
<del> equal(router.get('location.path'), "/home?foo=%5B1%2C3%5D");
<add> this.setAndFlush(controller, 'foo', emberA([1,3]));
<add> this.assertCurrentPath('/home?foo=%5B1%2C3%5D');
<ide>
<del> Ember.run(router, 'transitionTo', '/home');
<del> deepEqual(controller.get('foo'), [1,2]);
<add> return this.transitionTo('/home').then(() => {
<add> assert.deepEqual(controller.get('foo'), [1,2]);
<add> this.assertCurrentPath('/home');
<ide>
<del> Ember.run(controller, 'set', 'foo', null);
<del> equal(router.get('location.path'), "/home", "Setting property to null");
<add> this.setAndFlush(controller, 'foo', null);
<add> this.assertCurrentPath('/home', 'Setting property to null');
<ide>
<del> Ember.run(controller, 'set', 'foo', [1,3]);
<del> equal(router.get('location.path'), "/home?foo=%5B1%2C3%5D");
<add> this.setAndFlush(controller, 'foo', emberA([1,3]));
<add> this.assertCurrentPath('/home?foo=%5B1%2C3%5D');
<ide>
<del> Ember.run(controller, 'set', 'foo', undefined);
<del> equal(router.get('location.path'), "/home", "Setting property to undefined");
<del> });
<add> this.setAndFlush(controller, 'foo', undefined);
<add> this.assertCurrentPath('/home', 'Setting property to undefined');
<add> });
<add> });
<add> }
<ide>
<del> test("{{link-to}} with null or undefined qps do not get serialized into url", function() {
<del> Ember.TEMPLATES.home = Ember.Handlebars.compile(
<del> "{{link-to 'Home' 'home' (query-params foo=nullValue) id='null-link'}}" +
<del> "{{link-to 'Home' 'home' (query-params foo=undefinedValue) id='undefined-link'}}");
<del> Router.map(function() {
<del> this.route("home", { path: '/home' });
<add> ['@test {{link-to}} with null or undefined QPs does not get serialized into url'](assert) {
<add> assert.expect(3);
<add>
<add> this.addTemplate('home', '{{link-to \'Home\' \'home\' (query-params foo=nullValue) id=\'null-link\'}}{{link-to \'Home\' \'home\' (query-params foo=undefinedValue) id=\'undefined-link\'}}');
<add>
<add> this.router.map(function() {
<add> this.route('home');
<ide> });
<ide>
<del> App.HomeController = Ember.Controller.extend({
<del> queryParams: ['foo'],
<del> foo: [],
<add> this.setSingleQPController('home', 'foo', [], {
<ide> nullValue: null,
<ide> undefinedValue: undefined
<ide> });
<ide>
<del> startingURL = '/home';
<del> bootApplication();
<del>
<del> var controller = container.lookup('controller:home');
<del> equal(Ember.$('#null-link').attr('href'), "/home");
<del> equal(Ember.$('#undefined-link').attr('href'), "/home");
<del> });
<add> return this.visitAndAssert('/home').then(() => {
<add> assert.equal(this.$('#null-link').attr('href'), '/home');
<add> assert.equal(this.$('#undefined-link').attr('href'), '/home');
<add> });
<add> }
<ide>
<ide> ['@test A child of a resource route still defaults to parent route\'s model even if the child route has a query param'](assert) {
<ide> assert.expect(2); | 3 |
Javascript | Javascript | fix select test to append to right place | f0b653e37b60e62cc16a69e28575af7387c386e3 | <ide><path>packages/ember-handlebars/tests/controls/select_test.js
<ide> function setAndFlush(view, key, value) {
<ide>
<ide> function append() {
<ide> Ember.run(function() {
<del> select.appendTo('body');
<add> select.appendTo('#qunit-fixture');
<ide> });
<ide> }
<ide> | 1 |
Go | Go | provide tmpdir for applylayer | 409f65bfd1bde5e45c68e2e1ecf68016ed91426b | <ide><path>pkg/chrootarchive/diff.go
<ide> package chrootarchive
<ide> import (
<ide> "flag"
<ide> "fmt"
<add> "io/ioutil"
<ide> "os"
<ide> "runtime"
<ide> "syscall"
<ide> func applyLayer() {
<ide> if err := syscall.Chdir("/"); err != nil {
<ide> fatal(err)
<ide> }
<add> tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
<add> if err != nil {
<add> fatal(err)
<add> }
<add> os.Setenv("TMPDIR", tmpDir)
<ide> if err := archive.ApplyLayer("/", os.Stdin); err != nil {
<add> os.RemoveAll(tmpDir)
<ide> fatal(err)
<ide> }
<add> os.RemoveAll(tmpDir)
<ide> os.Exit(0)
<ide> }
<ide> | 1 |
Javascript | Javascript | setimmediate v8 optimization fix | 7ced966a32dd18b4de2768f41796af76638341f9 | <ide><path>lib/timers.js
<ide> Immediate.prototype._idlePrev = undefined;
<ide>
<ide> exports.setImmediate = function(callback) {
<ide> var immediate = new Immediate();
<del> var args;
<add> var args, index;
<ide>
<ide> L.init(immediate);
<ide>
<ide> immediate._onImmediate = callback;
<ide>
<ide> if (arguments.length > 1) {
<del> args = Array.prototype.slice.call(arguments, 1);
<add> args = [];
<add> for (index = 1; index < arguments.length; index++)
<add> args.push(arguments[index]);
<ide>
<ide> immediate._onImmediate = function() {
<ide> callback.apply(immediate, args); | 1 |
Javascript | Javascript | remove unused require from reactdom-test | bd6b10887a016f4f72f0acf89053a44c3241eafe | <ide><path>src/browser/__tests__/ReactDOM-test.js
<ide> "use strict";
<ide>
<ide> var React = require('React');
<del>var ReactDOM = require('ReactDOM');
<ide> var ReactTestUtils = require('ReactTestUtils');
<ide> var div = React.createFactory('div');
<ide> | 1 |
Javascript | Javascript | fix flaky interval test | 8d043238dee18c7356f28133e1d353048a14c7ee | <add><path>test/sequential/test-timers-set-interval-excludes-callback-duration.js
<del><path>test/sequential/test-timers-setInterval-excludes-callback-duration.js
<ide> const Timer = process.binding('timer_wrap').Timer;
<ide> const assert = require('assert');
<ide>
<ide> let cntr = 0;
<del>let first, second;
<add>let first;
<ide> const t = setInterval(() => {
<del> common.busyLoop(50);
<ide> cntr++;
<ide> if (cntr === 1) {
<ide> first = Timer.now();
<add> common.busyLoop(100);
<ide> } else if (cntr === 2) {
<del> second = Timer.now();
<del> assert(Math.abs(second - first - 100) < 10);
<add> assert(Timer.now() - first < 120);
<ide> clearInterval(t);
<ide> }
<ide> }, 100); | 1 |
Javascript | Javascript | add test for fork() + shell | 688765a3c97378da473db7517e39d8c1dd2fea1c | <ide><path>test/parallel/test-child-process-fork-no-shell.js
<add>'use strict';
<add>// This test verifies that the shell option is not supported by fork().
<add>const common = require('../common');
<add>const assert = require('assert');
<add>const cp = require('child_process');
<add>const expected = common.isWindows ? '%foo%' : '$foo';
<add>
<add>if (process.argv[2] === undefined) {
<add> const child = cp.fork(__filename, [expected], {
<add> shell: true,
<add> env: { foo: 'bar' }
<add> });
<add>
<add> child.on('exit', common.mustCall((code, signal) => {
<add> assert.strictEqual(code, 0);
<add> assert.strictEqual(signal, null);
<add> }));
<add>} else {
<add> assert.strictEqual(process.argv[2], expected);
<add>} | 1 |
PHP | PHP | fix doc block | 752da212b1775aaac9ac986999ecce176a659c0e | <ide><path>src/Network/Session.php
<ide> class Session
<ide> /**
<ide> * The Session handler instance used as an engine for persisting the session data.
<ide> *
<del> * @var \SessionHandlerInterface
<add> * @var \SessionHandlerInterface|null
<ide> */
<ide> protected $_engine;
<ide> | 1 |
PHP | PHP | fix mysql unions with parens | ea1d3c89311e763d6c78d17325d21f2ad4614c1c | <ide><path>src/Illuminate/Database/Query/Grammars/Grammar.php
<ide> protected function compileUnions(Builder $query)
<ide>
<ide> foreach ($query->unions as $union)
<ide> {
<del> $joiner = $union['all'] ? ' union all ' : ' union ';
<del>
<del> $sql .= $joiner.$union['query']->toSql();
<add> $sql .= $this->compileUnion($union);
<ide> }
<ide>
<ide> return ltrim($sql);
<ide> }
<ide>
<add> /**
<add> * Compile a single union statement.
<add> *
<add> * @param array $union
<add> * @return string
<add> */
<add> protected function compileUnion(array $union)
<add> {
<add> $joiner = $union['all'] ? ' union all ' : ' union ';
<add>
<add> return $joiner.$union['query']->toSql();
<add> }
<add>
<ide> /**
<ide> * Compile an insert statement into SQL.
<ide> *
<ide><path>src/Illuminate/Database/Query/Grammars/MySqlGrammar.php
<ide> class MySqlGrammar extends Grammar {
<ide> */
<ide> protected $wrapper = '`%s`';
<ide>
<add> /**
<add> * The components that make up a select clause.
<add> *
<add> * @var array
<add> */
<add> protected $selectComponents = array(
<add> 'aggregate',
<add> 'columns',
<add> 'from',
<add> 'joins',
<add> 'wheres',
<add> 'groups',
<add> 'havings',
<add> 'orders',
<add> 'limit',
<add> 'offset',
<add> );
<add>
<add> /**
<add> * Compile a select query into SQL.
<add> *
<add> * @param \Illuminate\Database\Query\Builder
<add> * @return string
<add> */
<add> public function compileSelect(Builder $query)
<add> {
<add> $sql = parent::compileSelect($query);
<add>
<add> if ($query->unions)
<add> {
<add> $sql = '('.$sql.') '.$this->compileUnions($query);
<add> }
<add>
<add> return $sql;
<add> }
<add>
<add> /**
<add> * Compile a single union statement.
<add> *
<add> * @param array $union
<add> * @return string
<add> */
<add> protected function compileUnion(array $union)
<add> {
<add> $joiner = $union['all'] ? ' union all ' : ' union ';
<add>
<add> return $joiner.'('.$union['query']->toSql().')';
<add> }
<add>
<ide> /**
<ide> * Compile an update statement into SQL.
<ide> *
<ide><path>tests/Database/DatabaseQueryBuilderTest.php
<ide> public function testUnions()
<ide> $builder->union($this->getBuilder()->select('*')->from('users')->where('id', '=', 2));
<ide> $this->assertEquals('select * from "users" where "id" = ? union select * from "users" where "id" = ?', $builder->toSql());
<ide> $this->assertEquals(array(0 => 1, 1 => 2), $builder->getBindings());
<add>
<add> $builder = $this->getMySqlBuilder();
<add> $builder->select('*')->from('users')->where('id', '=', 1);
<add> $builder->union($this->getMySqlBuilder()->select('*')->from('users')->where('id', '=', 2));
<add> $this->assertEquals('(select * from `users` where `id` = ?) union (select * from `users` where `id` = ?)', $builder->toSql());
<add> $this->assertEquals(array(0 => 1, 1 => 2), $builder->getBindings());
<ide> }
<ide>
<ide> | 3 |
PHP | PHP | fix stupid mistakes and remove default precision | a4225bc9a21a84e8c3652ae077898a4d1b3cd533 | <ide><path>lib/Cake/Database/Schema/MysqlSchema.php
<ide> public function columnSql(Table $table, $name) {
<ide> in_array($data['type'], $hasPrecision, true) &&
<ide> (isset($data['length']) || isset($data['precision']))
<ide> ) {
<del> $data += ['length' => 11, 'precision' => 3];
<del> $out .= '(' . (int)$data['length'] . ', ' . (int)$data['precision'] . ')';
<add> $out .= '(' . (int)$data['length'] . ',' . (int)$data['precision'] . ')';
<ide> }
<ide> if (isset($data['null']) && $data['null'] === false) {
<ide> $out .= ' NOT NULL';
<ide><path>lib/Cake/Test/TestCase/Database/Schema/MysqlSchemaTest.php
<ide> public static function columnSqlProvider() {
<ide> [
<ide> 'value',
<ide> ['type' => 'decimal', 'length' => 11],
<del> '`value` FLOAT(11, 3)'
<add> '`value` DECIMAL(11,0)'
<ide> ],
<ide> [
<ide> 'value',
<ide> ['type' => 'decimal', 'length' => 12, 'precision' => 5],
<del> '`value` FLOAT(12, 5)'
<add> '`value` DECIMAL(12,5)'
<ide> ],
<ide> // Float
<ide> [
<ide> public static function columnSqlProvider() {
<ide> ],
<ide> [
<ide> 'value',
<del> ['type' => 'float', 'length' => 11],
<del> '`value` FLOAT(11, 3)'
<add> ['type' => 'float', 'length' => 11, 'precision' => 3],
<add> '`value` FLOAT(11,3)'
<ide> ],
<ide> // Boolean
<ide> [ | 2 |
Go | Go | remove dead code | 76ebd3dd179444b4a464db6e970030dd695cf5a1 | <ide><path>pkg/term/term_windows.go
<ide> package term
<ide>
<ide> import (
<del> "fmt"
<ide> "io"
<ide> "os"
<ide> "os/signal"
<ide> "syscall"
<ide>
<ide> "github.com/Azure/go-ansiterm/winterm"
<del> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/docker/pkg/system"
<ide> "github.com/docker/docker/pkg/term/windows"
<ide> )
<ide> func GetWinsize(fd uintptr) (*Winsize, error) {
<ide> return winsize, nil
<ide> }
<ide>
<del>// SetWinsize tries to set the specified window size for the specified file descriptor.
<del>func SetWinsize(fd uintptr, ws *Winsize) error {
<del>
<del> // Ensure the requested dimensions are no larger than the maximum window size
<del> info, err := winterm.GetConsoleScreenBufferInfo(fd)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) {
<del> return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)",
<del> ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y)
<del> }
<del>
<del> // Narrow the sizes to that used by Windows
<del> width := winterm.SHORT(ws.Width)
<del> height := winterm.SHORT(ws.Height)
<del>
<del> // Set the dimensions while ensuring they remain within the bounds of the backing console buffer
<del> // -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs,
<del> // shift the upper left just enough to keep the new window within the buffer.
<del> rect := info.Window
<del> if width < rect.Right-rect.Left+1 {
<del> rect.Right = rect.Left + width - 1
<del> } else if width > rect.Right-rect.Left+1 {
<del> rect.Right = rect.Left + width - 1
<del> if rect.Right >= info.Size.X {
<del> rect.Left = info.Size.X - width
<del> rect.Right = info.Size.X - 1
<del> }
<del> }
<del>
<del> if height < rect.Bottom-rect.Top+1 {
<del> rect.Bottom = rect.Top + height - 1
<del> } else if height > rect.Bottom-rect.Top+1 {
<del> rect.Bottom = rect.Top + height - 1
<del> if rect.Bottom >= info.Size.Y {
<del> rect.Top = info.Size.Y - height
<del> rect.Bottom = info.Size.Y - 1
<del> }
<del> }
<del> logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect)
<del>
<del> return winterm.SetConsoleWindowInfo(fd, true, rect)
<del>}
<del>
<ide> // IsTerminal returns true if the given file descriptor is a terminal.
<ide> func IsTerminal(fd uintptr) bool {
<ide> return windows.IsConsole(fd) | 1 |
Javascript | Javascript | fix action targeting when there is no view | b325125a1f54c2e4a6e034da5ebfb55b15c8db2f | <ide><path>packages/ember-routing-htmlbars/lib/keywords/action.js
<ide> export default {
<ide> target = read(hash.target);
<ide> }
<ide> } else {
<del> target = get(env.view, 'controller');
<add> target = get(env.view, 'controller') || read(scope.self);
<ide> }
<ide>
<ide> return { actionName, actionArgs, target };
<ide><path>packages/ember-routing-htmlbars/tests/helpers/action_test.js
<ide> QUnit.test("should by default target the view's controller", function() {
<ide> equal(registeredTarget, controller, "The controller was registered as the target");
<ide> });
<ide>
<del>QUnit.skip("Inside a yield, the target points at the original target", function() {
<add>QUnit.test("Inside a yield, the target points at the original target", function() {
<ide> var watted = false;
<ide>
<ide> var component = EmberComponent.extend({
<ide><path>packages/ember/tests/component_registration_test.js
<ide> QUnit.skip("properties of a component without a template should not collide wit
<ide> equal(Ember.$('#wrapper').text(), "Some text inserted by jQuery", "The component is composed correctly");
<ide> });
<ide>
<del>QUnit.skip("Components trigger actions in the parents context when called from within a block", function() {
<add>QUnit.test("Components trigger actions in the parents context when called from within a block", function() {
<ide> Ember.TEMPLATES.application = compile("<div id='wrapper'>{{#my-component}}<a href='#' id='fizzbuzz' {{action 'fizzbuzz'}}>Fizzbuzz</a>{{/my-component}}</div>");
<ide>
<ide> boot(function() {
<ide> QUnit.skip("Components trigger actions in the parents context when called from w
<ide> });
<ide> });
<ide>
<del>QUnit.skip("Components trigger actions in the components context when called from within its template", function() {
<add>QUnit.test("Components trigger actions in the components context when called from within its template", function() {
<ide> Ember.TEMPLATES.application = compile("<div id='wrapper'>{{#my-component}}{{text}}{{/my-component}}</div>");
<ide> Ember.TEMPLATES['components/my-component'] = compile("<a href='#' id='fizzbuzz' {{action 'fizzbuzz'}}>Fizzbuzz</a>");
<ide>
<ide><path>packages/ember/tests/helpers/link_to_test.js
<ide> QUnit.test("The {{link-to}} helper supports multiple current-when routes", funct
<ide> equal(Ember.$('#link3.active', '#qunit-fixture').length, 0, "The link is not active since current-when does not contain the active route");
<ide> });
<ide>
<del>QUnit.skip("The {{link-to}} helper defaults to bubbling", function() {
<add>QUnit.test("The {{link-to}} helper defaults to bubbling", function() {
<ide> Ember.TEMPLATES.about = compile("<div {{action 'hide'}}>{{#link-to 'about.contact' id='about-contact'}}About{{/link-to}}</div>{{outlet}}");
<ide> Ember.TEMPLATES['about/contact'] = compile("<h1 id='contact'>Contact</h1>");
<ide>
<ide><path>packages/ember/tests/routing/basic_test.js
<ide> asyncTest("Nested callbacks are not exited when moving to siblings", function()
<ide> });
<ide> });
<ide>
<del>// Revert QUnit.skip to QUnit.asyncTest
<del>QUnit.skip("Events are triggered on the controller if a matching action name is implemented", function() {
<add>QUnit.asyncTest("Events are triggered on the controller if a matching action name is implemented", function() {
<ide> Router.map(function() {
<ide> this.route("home", { path: "/" });
<ide> });
<ide> QUnit.skip("Events are triggered on the controller if a matching action name is
<ide> action.handler(event);
<ide> });
<ide>
<del>// Revert QUnit.skip to QUnit.asyncTest
<del>QUnit.skip("Events are triggered on the current state when defined in `actions` object", function() {
<add>QUnit.asyncTest("Events are triggered on the current state when defined in `actions` object", function() {
<ide> Router.map(function() {
<ide> this.route("home", { path: "/" });
<ide> });
<ide> QUnit.skip("Events are triggered on the current state when defined in `actions`
<ide> action.handler(event);
<ide> });
<ide>
<del>// Revert QUnit.skip to QUnit.asyncTest
<del>QUnit.skip("Events defined in `actions` object are triggered on the current state when routes are nested", function() {
<add>QUnit.asyncTest("Events defined in `actions` object are triggered on the current state when routes are nested", function() {
<ide> Router.map(function() {
<ide> this.resource("root", { path: "/" }, function() {
<ide> this.route("index", { path: "/" });
<ide> QUnit.skip("Events defined in `actions` object are triggered on the current stat
<ide> action.handler(event);
<ide> });
<ide>
<del>// Revert QUnit.skip to QUnit.asyncTest
<del>QUnit.skip("Events are triggered on the current state when defined in `events` object (DEPRECATED)", function() {
<add>QUnit.asyncTest("Events are triggered on the current state when defined in `events` object (DEPRECATED)", function() {
<ide> Router.map(function() {
<ide> this.route("home", { path: "/" });
<ide> });
<ide> QUnit.skip("Events are triggered on the current state when defined in `events` o
<ide> action.handler(event);
<ide> });
<ide>
<del>// Revert QUnit.skip to QUnit.asyncTest
<del>QUnit.skip("Events defined in `events` object are triggered on the current state when routes are nested (DEPRECATED)", function() {
<add>QUnit.asyncTest("Events defined in `events` object are triggered on the current state when routes are nested (DEPRECATED)", function() {
<ide> Router.map(function() {
<ide> this.resource("root", { path: "/" }, function() {
<ide> this.route("index", { path: "/" });
<ide> QUnit.test("Events can be handled by inherited event handlers", function() {
<ide> router.send("baz");
<ide> });
<ide>
<del>// Revert QUnit.skip to QUnit.asyncTest
<del>QUnit.skip("Actions are not triggered on the controller if a matching action name is implemented as a method", function() {
<add>QUnit.asyncTest("Actions are not triggered on the controller if a matching action name is implemented as a method", function() {
<ide> Router.map(function() {
<ide> this.route("home", { path: "/" });
<ide> });
<ide> QUnit.skip("Actions are not triggered on the controller if a matching action nam
<ide> action.handler(event);
<ide> });
<ide>
<del>// Revert QUnit.skip to QUnit.asyncTest
<del>QUnit.skip("actions can be triggered with multiple arguments", function() {
<add>QUnit.asyncTest("actions can be triggered with multiple arguments", function() {
<ide> Router.map(function() {
<ide> this.resource("root", { path: "/" }, function() {
<ide> this.route("index", { path: "/" }); | 5 |
Javascript | Javascript | use strings for challengemap | cfb5f06cdff1fab70bcbad3329913ddca1047e2c | <ide><path>public/js/main_0.0.2.js
<ide> $(document).ready(function() {
<ide> });
<ide>
<ide> var challengeTypes = {
<del> 'HTML_CSS_JQ': 0,
<del> 'JAVASCRIPT': 1,
<del> 'VIDEO': 2,
<del> 'ZIPLINE': 3,
<del> 'BASEJUMP': 4,
<del> 'BONFIRE': 5
<add> 'HTML_CSS_JQ': '0',
<add> 'JAVASCRIPT': '1',
<add> 'VIDEO': '2',
<add> 'ZIPLINE': '3',
<add> 'BASEJUMP': '4',
<add> 'BONFIRE': '5'
<ide> };
<ide> $('#next-courseware-button').on('click', function() {
<ide> $('#next-courseware-button').unbind('click'); | 1 |
Javascript | Javascript | remove paypal credit button | 1519e7016546af065806f9a02ab20d17c13bcbec | <ide><path>client/src/components/Donation/PaypalButton.js
<ide> export class PaypalButton extends Component {
<ide> }
<ide> options={{
<ide> vault: true,
<del> disableFunding: 'card',
<add> disableFunding: 'credit,card',
<ide> clientId: paypalClientId
<ide> }}
<ide> style={{ | 1 |
Java | Java | defer charset.availablecharsets() call | 90477b40a4b92c357184cc182f546ce742b1ffa1 | <ide><path>spring-web/src/main/java/org/springframework/http/converter/StringHttpMessageConverter.java
<ide> public class StringHttpMessageConverter extends AbstractHttpMessageConverter<Str
<ide> public static final Charset DEFAULT_CHARSET = StandardCharsets.ISO_8859_1;
<ide>
<ide>
<del> private final List<Charset> availableCharsets;
<add> private volatile List<Charset> availableCharsets;
<ide>
<ide> private boolean writeAcceptCharset = true;
<ide>
<ide> public StringHttpMessageConverter() {
<ide> */
<ide> public StringHttpMessageConverter(Charset defaultCharset) {
<ide> super(defaultCharset, MediaType.TEXT_PLAIN, MediaType.ALL);
<del> this.availableCharsets = new ArrayList<>(Charset.availableCharsets().values());
<ide> }
<ide>
<ide>
<ide> protected void writeInternal(String str, HttpOutputMessage outputMessage) throws
<ide> * @return the list of accepted charsets
<ide> */
<ide> protected List<Charset> getAcceptedCharsets() {
<add> if (this.availableCharsets == null) {
<add> this.availableCharsets = new ArrayList<>(
<add> Charset.availableCharsets().values());
<add> }
<ide> return this.availableCharsets;
<ide> }
<ide> | 1 |
PHP | PHP | fix empty clauses causing errors in matching() | ffb14bcd5317e7610880795f37fd16696e068415 | <ide><path>src/Database/Query.php
<ide> protected function _decorateStatement($statement)
<ide> */
<ide> protected function _conjugate($part, $append, $conjunction, $types)
<ide> {
<add> $expression = $this->_parts[$part] ?: $this->newExpr();
<ide> if (empty($append)) {
<add> $this->_parts[$part] = $expression;
<ide> return;
<ide> }
<del> $expression = $this->_parts[$part] ?: $this->newExpr();
<ide>
<ide> if (!is_string($append) && is_callable($append)) {
<ide> $append = $append($this->newExpr(), $this);
<ide><path>tests/TestCase/Database/QueryTest.php
<ide> public function testWhereEmptyValues()
<ide> $query->from('comments')
<ide> ->where('');
<ide>
<del> $this->assertNull($query->clause('where'));
<add> $this->assertCount(0, $query->clause('where'));
<ide>
<ide> $query->where([]);
<del> $this->assertNull($query->clause('where'));
<add> $this->assertCount(0, $query->clause('where'));
<ide> }
<ide>
<ide> /**
<ide><path>tests/TestCase/ORM/QueryRegressionTest.php
<ide> public function testMatchingWithNoFields()
<ide> $this->assertEquals([2], $results);
<ide> }
<ide>
<add> /**
<add> * Test that empty conditions in a matching clause don't cause errors.
<add> *
<add> * @return void
<add> */
<add> public function testMatchingEmptyQuery()
<add> {
<add> $table = TableRegistry::get('Articles');
<add> $table->belongsToMany('Tags');
<add>
<add> $rows = $table->find()
<add> ->matching('Tags', function ($q) {
<add> return $q->where([]);
<add> })
<add> ->all();
<add> $this->assertNotEmpty($rows);
<add>
<add> $rows = $table->find()
<add> ->matching('Tags', function ($q) {
<add> return $q->where(null);
<add> })
<add> ->all();
<add> $this->assertNotEmpty($rows);
<add> }
<add>
<ide> /**
<ide> * Tests that using a subquery as part of an expression will not make invalid SQL
<ide> * | 3 |
Python | Python | add seed setting to image classification example | 88a0ce57bb6f2e76b45b07de3c8c1df832af9d10 | <ide><path>examples/pytorch/image-classification/run_image_classification.py
<ide> HfArgumentParser,
<ide> Trainer,
<ide> TrainingArguments,
<add> set_seed,
<ide> )
<ide> from transformers.trainer_utils import get_last_checkpoint
<ide> from transformers.utils import check_min_version, send_example_telemetry
<ide> def main():
<ide> "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
<ide> )
<ide>
<add> # Set seed before initializing model.
<add> set_seed(training_args.seed)
<add>
<ide> # Initialize our dataset and prepare it for the 'image-classification' task.
<ide> if data_args.dataset_name is not None:
<ide> dataset = load_dataset( | 1 |
PHP | PHP | fix tests that failed on merge | 6f83e8690f1df9e0482b3e4804347fd8675a5e54 | <ide><path>src/TestSuite/EmailTrait.php
<ide> public function assertMailContainsHtml(string $contents, string $message = ''):
<ide> /**
<ide> * Asserts an email contains an expected text content
<ide> *
<del> * @param string $contents Expected text.
<add> * @param string $expected Expected text.
<ide> * @param string $message Message to display if assertion fails.
<ide> * @return void
<ide> */
<del> public function assertMailContainsText(string $contents, string $message = ''): void
<add> public function assertMailContainsText(string $expected, string $message = ''): void
<ide> {
<del> $this->assertThat($expectedText, new MailContainsText(), $message);
<add> $this->assertThat($expected, new MailContainsText(), $message);
<ide> }
<ide>
<ide> /**
<ide><path>tests/TestCase/Shell/CompletionShellTest.php
<ide> public function testCommands()
<ide> $output = $this->out->output();
<ide>
<ide> $expected = 'TestPlugin.example TestPlugin.sample TestPluginTwo.example unique welcome ' .
<del> 'cache help plugin routes schema_cache server upgrade version ' .
<del> "abort auto_load_model demo i18n integration merge sample shell_test testing_dispatch";
<add> 'cache help i18n plugin routes schema_cache server upgrade version ' .
<add> "abort auto_load_model demo integration merge sample shell_test testing_dispatch";
<ide> $this->assertTextEquals($expected, $output);
<ide> }
<ide> | 2 |
Python | Python | add limit for jpype1 | 3699be49b24ef5a0a8d8de81a149af2c5a7dc206 | <ide><path>setup.py
<ide> def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version
<ide> ]
<ide> jdbc = [
<ide> 'jaydebeapi>=1.1.1',
<add> # JPype1 has been published without sdist in PyPI which caused failures when trying to build an
<add> # ARM image (JPype1 does not publish binary ARM packages)
<add> # The whole line below can be removed when https://github.com/jpype-project/jpype/issues/1069 is solved
<add> 'jpype1<1.4.0',
<ide> ]
<ide> jenkins = [
<ide> 'python-jenkins>=1.0.0', | 1 |
PHP | PHP | add index => viewany to resourceabilitymap | cda913232649edcf7dd1dc592dda81190bd4efe7 | <ide><path>src/Illuminate/Foundation/Auth/Access/AuthorizesRequests.php
<ide> public function authorizeResource($model, $parameter = null, array $options = []
<ide> protected function resourceAbilityMap()
<ide> {
<ide> return [
<add> 'index' => 'viewAny',
<ide> 'show' => 'view',
<ide> 'create' => 'create',
<ide> 'store' => 'create', | 1 |
Python | Python | add template fields to dynamodbtos3operator | a150ee0bc124f21b99fa94adbb16e6ccfe654ae4 | <ide><path>airflow/providers/amazon/aws/transfers/dynamodb_to_s3.py
<ide> from copy import copy
<ide> from os.path import getsize
<ide> from tempfile import NamedTemporaryFile
<del>from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional
<add>from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence
<ide> from uuid import uuid4
<ide>
<ide> from airflow.models import BaseOperator
<ide> class DynamoDBToS3Operator(BaseOperator):
<ide> maintained on each worker node).
<ide> """
<ide>
<add> template_fields: Sequence[str] = (
<add> 's3_bucket_name',
<add> 'dynamodb_table_name',
<add> )
<add> template_fields_renderers = {
<add> "dynamodb_scan_kwargs": "json",
<add> }
<add>
<ide> def __init__(
<ide> self,
<ide> *, | 1 |
Go | Go | synchronize datastore apis | a6c2dd75b5a93a473fb72c12b91c1edaadad2655 | <ide><path>libnetwork/datastore/datastore.go
<ide> import (
<ide> "log"
<ide> "reflect"
<ide> "strings"
<add> "sync"
<ide>
<ide> "github.com/docker/libkv"
<ide> "github.com/docker/libkv/store"
<ide> type datastore struct {
<ide> scope string
<ide> store store.Store
<ide> cache *cache
<add> sync.Mutex
<ide> }
<ide>
<ide> // KVObject is Key/Value interface used by objects to be part of the DataStore
<ide> func (ds *datastore) PutObjectAtomic(kvObject KVObject) error {
<ide> pair *store.KVPair
<ide> err error
<ide> )
<add> ds.Lock()
<add> defer ds.Unlock()
<ide>
<ide> if kvObject == nil {
<ide> return types.BadRequestErrorf("invalid KV Object : nil")
<ide> add_cache:
<ide>
<ide> // PutObject adds a new Record based on an object into the datastore
<ide> func (ds *datastore) PutObject(kvObject KVObject) error {
<add> ds.Lock()
<add> defer ds.Unlock()
<add>
<ide> if kvObject == nil {
<ide> return types.BadRequestErrorf("invalid KV Object : nil")
<ide> }
<ide> func (ds *datastore) putObjectWithKey(kvObject KVObject, key ...string) error {
<ide>
<ide> // GetObject returns a record matching the key
<ide> func (ds *datastore) GetObject(key string, o KVObject) error {
<add> ds.Lock()
<add> defer ds.Unlock()
<add>
<ide> if ds.cache != nil {
<ide> return ds.cache.get(key, o)
<ide> }
<ide> func (ds *datastore) ensureKey(key string) error {
<ide> }
<ide>
<ide> func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) {
<add> ds.Lock()
<add> defer ds.Unlock()
<add>
<ide> if ds.cache != nil {
<ide> return ds.cache.list(kvObject)
<ide> }
<ide> func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) {
<ide>
<ide> // DeleteObject unconditionally deletes a record from the store
<ide> func (ds *datastore) DeleteObject(kvObject KVObject) error {
<add> ds.Lock()
<add> defer ds.Unlock()
<add>
<ide> // cleaup the cache first
<ide> if ds.cache != nil {
<ide> ds.cache.del(kvObject)
<ide> func (ds *datastore) DeleteObject(kvObject KVObject) error {
<ide>
<ide> // DeleteObjectAtomic performs atomic delete on a record
<ide> func (ds *datastore) DeleteObjectAtomic(kvObject KVObject) error {
<add> ds.Lock()
<add> defer ds.Unlock()
<add>
<ide> if kvObject == nil {
<ide> return types.BadRequestErrorf("invalid KV Object : nil")
<ide> }
<ide> del_cache:
<ide>
<ide> // DeleteTree unconditionally deletes a record from the store
<ide> func (ds *datastore) DeleteTree(kvObject KVObject) error {
<add> ds.Lock()
<add> defer ds.Unlock()
<add>
<ide> // cleaup the cache first
<ide> if ds.cache != nil {
<ide> ds.cache.del(kvObject) | 1 |
Go | Go | add no prune to rmi | edd8d2d3511b0b632149d1c1f2cfd2bad2df4679 | <ide><path>api/client.go
<ide> func (cli *DockerCli) CmdPort(args ...string) error {
<ide> // 'docker rmi IMAGE' removes all images with the name IMAGE
<ide> func (cli *DockerCli) CmdRmi(args ...string) error {
<ide> var (
<del> cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
<del> force = cmd.Bool([]string{"f", "-force"}, false, "Force")
<add> cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
<add> force = cmd.Bool([]string{"f", "-force"}, false, "Force")
<add> noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
<ide> )
<ide> if err := cmd.Parse(args); err != nil {
<ide> return nil
<ide> func (cli *DockerCli) CmdRmi(args ...string) error {
<ide> if *force {
<ide> v.Set("force", "1")
<ide> }
<add> if *noprune {
<add> v.Set("noprune", "1")
<add> }
<ide>
<ide> var encounteredError error
<ide> for _, name := range cmd.Args() {
<ide><path>api/server.go
<ide> func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWr
<ide> var job = eng.Job("image_delete", vars["name"])
<ide> streamJSON(job, w, false)
<ide> job.Setenv("force", r.Form.Get("force"))
<add> job.Setenv("noprune", r.Form.Get("noprune"))
<ide>
<ide> return job.Run()
<ide> }
<ide><path>server/server.go
<ide> func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
<ide> return engine.StatusOK
<ide> }
<ide>
<del>func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error {
<add>func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, noprune bool) error {
<ide> var (
<ide> repoName, tag string
<ide> tags = []string{}
<ide> func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
<ide> out.Set("Deleted", img.ID)
<ide> imgs.Add(out)
<ide> srv.LogEvent("delete", img.ID, "")
<del> if img.Parent != "" {
<del> err := srv.DeleteImage(img.Parent, imgs, false, force)
<add> if img.Parent != "" && !noprune {
<add> err := srv.DeleteImage(img.Parent, imgs, false, force, noprune)
<ide> if first {
<ide> return err
<ide> }
<ide> func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
<ide> return job.Errorf("Usage: %s IMAGE", job.Name)
<ide> }
<ide> imgs := engine.NewTable("", 0)
<del> if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil {
<add> if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
<ide> return job.Error(err)
<ide> }
<ide> if len(imgs.Data) == 0 { | 3 |
Ruby | Ruby | remove unused user_controller from test | fa5b34ed227d81c105495658fe3b9741b2eff489 | <ide><path>actionpack/test/controller/action_pack_assertions_test.rb
<ide> def show
<ide> end
<ide> end
<ide>
<del>class UserController < ActionController::Base
<del>end
<del>
<ide> module Admin
<ide> class InnerModuleController < ActionController::Base
<ide> def index | 1 |
Python | Python | use integer disk size for vsphere nodesize | 76823a428f4ecdc43355195c400eae8def80971f | <ide><path>libcloud/compute/drivers/vsphere.py
<ide> def _to_node(self, vm):
<ide> path = vm.get('summary.config.vmPathName')
<ide> memory = vm.get('summary.config.memorySizeMB')
<ide> cpus = vm.get('summary.config.numCpu')
<del> disk = vm.get('summary.storage.committed', 0) / (1024 ** 3)
<add> disk = vm.get('summary.storage.committed', 0) // (1024 ** 3)
<ide> id_to_hash = str(memory) + str(cpus) + str(disk)
<ide> size_id = hashlib.md5(id_to_hash.encode("utf-8")).hexdigest()
<ide> size_name = name + "-size" | 1 |
Ruby | Ruby | add notes for future selves | cf423f8f767895d608284acb3543f7c9ba15ccf4 | <ide><path>actioncable/lib/rails/generators/channel/channel_generator.rb
<ide> def file_name
<ide> @_file_name ||= super.gsub(/\_channel/i, '')
<ide> end
<ide>
<add> # FIXME: Change these files to symlinks once RubyGems 2.5.0 is required.
<ide> def generate_application_cable_files
<ide> return if self.behavior != :invoke
<ide>
<ide><path>activerecord/lib/rails/generators/active_record/model/model_generator.rb
<ide> def attributes_with_index
<ide> attributes.select { |a| !a.reference? && a.has_index? }
<ide> end
<ide>
<add> # FIXME: Change this file to a symlink once RubyGems 2.5.0 is required.
<ide> def generate_application_record
<ide> if self.behavior == :invoke && !File.exist?('app/models/application_record.rb')
<ide> template 'application_record.rb', 'app/models/application_record.rb' | 2 |
Go | Go | pass the vxlan port in network endian order | 960639fbb9e2d7d21cc4a0a9496e8ca9e998efc3 | <ide><path>libnetwork/drivers/overlay/ov_utils.go
<ide> import (
<ide> "github.com/docker/libnetwork/netutils"
<ide> "github.com/docker/libnetwork/types"
<ide> "github.com/vishvananda/netlink"
<add> "github.com/vishvananda/netlink/nl"
<ide> )
<ide>
<ide> func validateID(nid, eid types.UUID) error {
<ide> func createVxlan(vni uint32) (string, error) {
<ide> LinkAttrs: netlink.LinkAttrs{Name: name},
<ide> VxlanId: int(vni),
<ide> Learning: true,
<del> Port: vxlanPort,
<add> Port: int(nl.Swap16(vxlanPort)), //network endian order
<ide> Proxy: true,
<ide> L3miss: true,
<ide> L2miss: true, | 1 |
Javascript | Javascript | add error for , add recommendations for errors | 8bc25f65e60001861a2c9f9962ddfbe3a5aa9025 | <ide><path>lib/RuleSet.js
<ide> RuleSet.normalizeRule = function(rule, refs) {
<ide> }
<ide>
<ide> if(rule.loader && rule.loaders)
<del> throw new Error("Provided loader and loaders for rule");
<add> throw new Error(RuleSet.buildErrorMessage(rule, new Error("Provided loader and loaders for rule (use only one of them)")));
<ide>
<ide> var loader = rule.loaders || rule.loader;
<ide> if(typeof loader === "string" && !rule.options && !rule.query) {
<ide> RuleSet.normalizeRule = function(rule, refs) {
<ide> query: rule.query
<ide> });
<ide> } else if(loader && (rule.options || rule.query)) {
<del> throw new Error("options/query cannot be used with loaders");
<add> throw new Error(RuleSet.buildErrorMessage(rule, new Error("options/query cannot be used with loaders (use options for each array item)")));
<ide> } else if(loader) {
<ide> checkUseSource("loaders");
<ide> newRule.use = RuleSet.normalizeUse(loader);
<add> } else if(rule.options || rule.query) {
<add> throw new Error(RuleSet.buildErrorMessage(rule, new Error("options/query provided without loader (use loader + options)")));
<ide> }
<ide>
<ide> if(rule.use) {
<ide> RuleSet.normalizeRule = function(rule, refs) {
<ide>
<ide> function checkUseSource(newSource) {
<ide> if(useSource && useSource !== newSource)
<del> throw new Error("Rule can only have one result source (provided " + newSource + " and " + useSource + ")");
<add> throw new Error(RuleSet.buildErrorMessage(rule, new Error("Rule can only have one result source (provided " + newSource + " and " + useSource + ")")));
<ide> useSource = newSource;
<ide> }
<ide>
<ide> function checkResourceSource(newSource) {
<ide> if(resourceSource && resourceSource !== newSource)
<del> throw new Error("Rule can only have one resource source (provided " + newSource + " and " + resourceSource + ")");
<add> throw new Error(RuleSet.buildErrorMessage(rule, new Error("Rule can only have one resource source (provided " + newSource + " and " + resourceSource + ")")));
<ide> resourceSource = newSource;
<ide> }
<ide> | 1 |
Text | Text | release notes for 1.0 temporal-domination release | 6dfe5be1556dc0c770cea8af78d1e0829ed24848 | <ide><path>CHANGELOG.md
<del><a name="v1.0.0rc12"></a>
<add><a name="1.0.0"></a>
<add># 1.0.0 temporal-domination (2012-06-13)
<add>
<add>
<add>## Bug Fixes
<add>
<add>- **$location:**
<add> - correctly parse link urls in hashbang mode with a prefix
<add> ([0f44964e](https://github.com/angular/angular.js/commit/0f44964e5e0f7e37d7fa3216bb10fd61fbf52ae2),
<add> [#1037](https://github.com/angular/angular.js/issues/1037))
<add> - fix link click interception in hash-bang mode
<add> ([6593a3e0](https://github.com/angular/angular.js/commit/6593a3e0823f3c08079f05010f9628fc4503cd43),
<add> [#1051](https://github.com/angular/angular.js/issues/1051))
<add>
<add>
<add><a name="1.0.0rc12"></a>
<ide> # 1.0.0rc12 regression-extermination (2012-06-12)
<ide>
<ide> ## Bug Fixes | 1 |
Ruby | Ruby | move xcode and clt modules to a new file | a784d2e045af20938b64a580be920362a74cd6ce | <ide><path>Library/Homebrew/macos.rb
<ide> def pkgutil_info id
<ide> end
<ide> end
<ide>
<del>module MacOS::Xcode extend self
<del>
<del> V4_BUNDLE_ID = "com.apple.dt.Xcode"
<del> V3_BUNDLE_ID = "com.apple.Xcode"
<del> V4_BUNDLE_PATH = Pathname.new("/Applications/Xcode.app")
<del> V3_BUNDLE_PATH = Pathname.new("/Developer/Applications/Xcode.app")
<del>
<del> # Locate the "current Xcode folder" via xcode-select. See:
<del> # man xcode-select
<del> def folder
<del> @folder ||= `xcode-select -print-path 2>/dev/null`.strip
<del> end
<del>
<del> # Xcode 4.3 tools hang if "/" is set
<del> def xctools_fucked?
<del> folder == "/"
<del> end
<del>
<del> def prefix
<del> @prefix ||= begin
<del> path = Pathname.new folder
<del> if path.absolute? and (path/'usr/bin/make').executable?
<del> path
<del> elsif File.executable? '/Developer/usr/bin/make'
<del> # we do this to support cowboys who insist on installing
<del> # only a subset of Xcode
<del> Pathname.new '/Developer'
<del> elsif (V4_BUNDLE_PATH/'Contents/Developer/usr/bin/make').executable?
<del> # fallback for broken Xcode 4.3 installs
<del> V4_BUNDLE_PATH/'Contents/Developer'
<del> else
<del> # Ask Spotlight where Xcode is. If the user didn't install the
<del> # helper tools and installed Xcode in a non-conventional place, this
<del> # is our only option. See: http://superuser.com/questions/390757
<del> path = MacOS.app_with_bundle_id(V4_BUNDLE_ID) ||
<del> MacOS.app_with_bundle_id(V3_BUNDLE_ID)
<del>
<del> unless path.nil?
<del> path += "Contents/Developer"
<del> path if (path/'usr/bin/make').executable?
<del> end
<del> end
<del> end
<del> end
<del>
<del> def installed?
<del> # Telling us whether the Xcode.app is installed or not.
<del> @installed ||= V4_BUNDLE_PATH.exist? ||
<del> V3_BUNDLE_PATH.exist? ||
<del> MacOS.app_with_bundle_id(V4_BUNDLE_ID) ||
<del> MacOS.app_with_bundle_id(V3_BUNDLE_ID) ||
<del> false
<del> end
<del>
<del> def version
<del> # may return a version string
<del> # that is guessed based on the compiler, so do not
<del> # use it in order to check if Xcode is installed.
<del> @version ||= begin
<del> return "0" unless MACOS
<del>
<del> # this shortcut makes version work for people who don't realise you
<del> # need to install the CLI tools
<del> xcode43build = "#{V4_BUNDLE_PATH}/Contents/Developer/usr/bin/xcodebuild"
<del> if File.file? xcode43build
<del> `#{xcode43build} -version 2>/dev/null` =~ /Xcode (\d(\.\d)*)/
<del> return $1 if $1
<del> end
<del>
<del> # Xcode 4.3 xc* tools hang indefinately if xcode-select path is set thus
<del> raise if xctools_fucked?
<del>
<del> raise unless which "xcodebuild"
<del> `xcodebuild -version 2>/dev/null` =~ /Xcode (\d(\.\d)*)/
<del> raise if $1.nil? or not $?.success?
<del> $1
<del> rescue
<del> # For people who's xcode-select is unset, or who have installed
<del> # xcode-gcc-installer or whatever other combinations we can try and
<del> # supprt. See https://github.com/mxcl/homebrew/wiki/Xcode
<del> case llvm_build_version.to_i
<del> when 1..2063 then "3.1.0"
<del> when 2064..2065 then "3.1.4"
<del> when 2366..2325
<del> # we have no data for this range so we are guessing
<del> "3.2.0"
<del> when 2326
<del> # also applies to "3.2.3"
<del> "3.2.4"
<del> when 2327..2333 then "3.2.5"
<del> when 2335
<del> # this build number applies to 3.2.6, 4.0 and 4.1
<del> # https://github.com/mxcl/homebrew/wiki/Xcode
<del> "4.0"
<del> else
<del> case (clang_version.to_f * 10).to_i
<del> when 0
<del> "dunno"
<del> when 1..14
<del> "3.2.2"
<del> when 15
<del> "3.2.4"
<del> when 16
<del> "3.2.5"
<del> when 17..20
<del> "4.0"
<del> when 21
<del> "4.1"
<del> when 22..30
<del> "4.2"
<del> when 31
<del> "4.3"
<del> when 40
<del> "4.4"
<del> else
<del> "4.4"
<del> end
<del> end
<del> end
<del> end
<del>end
<del>
<del>module MacOS::CLT extend self
<del> STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
<del> FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
<del>
<del> # This is true ift he standard UNIX tools are present under /usr. For
<del> # Xcode < 4.3, this is the standard location. Otherwise, it means that
<del> # the user has installed the "Command Line Tools for Xcode" package.
<del> def installed?
<del> MacOS.dev_tools_path == Pathname.new("/usr/bin")
<del> end
<del>
<del> def version
<del> # Version string (a pretty damn long one) of the CLT package.
<del> # Note, that different ways to install the CLTs lead to different
<del> # version numbers.
<del> @version ||= begin
<del> standalone = MacOS.pkgutil_info(CLT_STANDALONE_PKG_ID)
<del> from_xcode = MacOS.pkgutil_info(CLT_FROM_XCODE_PKG_ID)
<del>
<del> if not standalone.empty?
<del> standalone =~ /version: (.*)$/
<del> $1
<del> elsif not from_xcode.empty?
<del> from_xcode =~ /version: (.*)$/
<del> $1
<del> else
<del> nil
<del> end
<del> end
<del> end
<del>end
<add>require 'xcode'
<ide><path>Library/Homebrew/xcode.rb
<add>module MacOS::Xcode extend self
<add>
<add> V4_BUNDLE_ID = "com.apple.dt.Xcode"
<add> V3_BUNDLE_ID = "com.apple.Xcode"
<add> V4_BUNDLE_PATH = Pathname.new("/Applications/Xcode.app")
<add> V3_BUNDLE_PATH = Pathname.new("/Developer/Applications/Xcode.app")
<add>
<add> # Locate the "current Xcode folder" via xcode-select. See:
<add> # man xcode-select
<add> def folder
<add> @folder ||= `xcode-select -print-path 2>/dev/null`.strip
<add> end
<add>
<add> # Xcode 4.3 tools hang if "/" is set
<add> def xctools_fucked?
<add> folder == "/"
<add> end
<add>
<add> def prefix
<add> @prefix ||= begin
<add> path = Pathname.new folder
<add> if path.absolute? and (path/'usr/bin/make').executable?
<add> path
<add> elsif File.executable? '/Developer/usr/bin/make'
<add> # we do this to support cowboys who insist on installing
<add> # only a subset of Xcode
<add> Pathname.new '/Developer'
<add> elsif (V4_BUNDLE_PATH/'Contents/Developer/usr/bin/make').executable?
<add> # fallback for broken Xcode 4.3 installs
<add> V4_BUNDLE_PATH/'Contents/Developer'
<add> else
<add> # Ask Spotlight where Xcode is. If the user didn't install the
<add> # helper tools and installed Xcode in a non-conventional place, this
<add> # is our only option. See: http://superuser.com/questions/390757
<add> path = MacOS.app_with_bundle_id(V4_BUNDLE_ID) ||
<add> MacOS.app_with_bundle_id(V3_BUNDLE_ID)
<add>
<add> unless path.nil?
<add> path += "Contents/Developer"
<add> path if (path/'usr/bin/make').executable?
<add> end
<add> end
<add> end
<add> end
<add>
<add> def installed?
<add> # Telling us whether the Xcode.app is installed or not.
<add> @installed ||= V4_BUNDLE_PATH.exist? ||
<add> V3_BUNDLE_PATH.exist? ||
<add> MacOS.app_with_bundle_id(V4_BUNDLE_ID) ||
<add> MacOS.app_with_bundle_id(V3_BUNDLE_ID) ||
<add> false
<add> end
<add>
<add> def version
<add> # may return a version string
<add> # that is guessed based on the compiler, so do not
<add> # use it in order to check if Xcode is installed.
<add> @version ||= begin
<add> return "0" unless MACOS
<add>
<add> # this shortcut makes version work for people who don't realise you
<add> # need to install the CLI tools
<add> xcode43build = V4_BUNDLE_PATH/'Contents/Developer/usr/bin/xcodebuild'
<add> if xcode43build.file?
<add> `#{xcode43build} -version 2>/dev/null` =~ /Xcode (\d(\.\d)*)/
<add> return $1 if $1
<add> end
<add>
<add> # Xcode 4.3 xc* tools hang indefinately if xcode-select path is set thus
<add> raise if xctools_fucked?
<add>
<add> raise unless which "xcodebuild"
<add> `xcodebuild -version 2>/dev/null` =~ /Xcode (\d(\.\d)*)/
<add> raise if $1.nil? or not $?.success?
<add> $1
<add> rescue
<add> # For people who's xcode-select is unset, or who have installed
<add> # xcode-gcc-installer or whatever other combinations we can try and
<add> # supprt. See https://github.com/mxcl/homebrew/wiki/Xcode
<add> case MacOS.llvm_build_version.to_i
<add> when 1..2063 then "3.1.0"
<add> when 2064..2065 then "3.1.4"
<add> when 2366..2325
<add> # we have no data for this range so we are guessing
<add> "3.2.0"
<add> when 2326
<add> # also applies to "3.2.3"
<add> "3.2.4"
<add> when 2327..2333 then "3.2.5"
<add> when 2335
<add> # this build number applies to 3.2.6, 4.0 and 4.1
<add> # https://github.com/mxcl/homebrew/wiki/Xcode
<add> "4.0"
<add> else
<add> case (MacOS.clang_version.to_f * 10).to_i
<add> when 0
<add> "dunno"
<add> when 1..14
<add> "3.2.2"
<add> when 15
<add> "3.2.4"
<add> when 16
<add> "3.2.5"
<add> when 17..20
<add> "4.0"
<add> when 21
<add> "4.1"
<add> when 22..30
<add> "4.2"
<add> when 31
<add> "4.3"
<add> when 40
<add> "4.4"
<add> else
<add> "4.4"
<add> end
<add> end
<add> end
<add> end
<add>end
<add>
<add>module MacOS::CLT extend self
<add> STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
<add> FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
<add>
<add> # This is true ift he standard UNIX tools are present under /usr. For
<add> # Xcode < 4.3, this is the standard location. Otherwise, it means that
<add> # the user has installed the "Command Line Tools for Xcode" package.
<add> def installed?
<add> MacOS.dev_tools_path == Pathname.new("/usr/bin")
<add> end
<add>
<add> def version
<add> # Version string (a pretty damn long one) of the CLT package.
<add> # Note, that different ways to install the CLTs lead to different
<add> # version numbers.
<add> @version ||= begin
<add> standalone = MacOS.pkgutil_info(STANDALONE_PKG_ID)
<add> from_xcode = MacOS.pkgutil_info(FROM_XCODE_PKG_ID)
<add>
<add> if not standalone.empty?
<add> standalone =~ /version: (.*)$/
<add> $1
<add> elsif not from_xcode.empty?
<add> from_xcode =~ /version: (.*)$/
<add> $1
<add> else
<add> nil
<add> end
<add> end
<add> end
<add>end | 2 |
PHP | PHP | fix failing tests caused by incorrect mocks | d63d51e3dd8ab502e853b71d313b5607be36837a | <ide><path>lib/Cake/Test/Case/Model/Datasource/Database/MysqlTest.php
<ide> public function testGetEncoding() {
<ide> * @return void
<ide> */
<ide> public function testFieldDoubleEscaping() {
<add> $db = $this->Dbo->config['database'];
<ide> $test = $this->getMock('Mysql', array('connect', '_execute', 'execute'));
<add> $test->config['database'] = $db;
<add>
<ide> $this->Model = $this->getMock('Article2', array('getDataSource'));
<ide> $this->Model->alias = 'Article';
<ide> $this->Model->expects($this->any())
<ide> function &_prepareAssociationQuery($model, &$queryData, $binding) {
<ide> * @return void
<ide> */
<ide> public function testGenerateInnerJoinAssociationQuery() {
<add> $db = $this->Dbo->config['database'];
<ide> $test = $this->getMock('Mysql', array('connect', '_execute', 'execute'));
<add> $test->config['database'] = $db;
<add>
<ide> $this->Model = $this->getMock('TestModel9', array('getDataSource'));
<ide> $this->Model->expects($this->any())
<ide> ->method('getDataSource')
<ide> public function testBuildColumnBadType() {
<ide> * @return void
<ide> */
<ide> public function testHasAny() {
<add> $db = $this->Dbo->config['database'];
<ide> $this->Dbo = $this->getMock('Mysql', array('connect', '_execute', 'execute', 'value'));
<add> $this->Dbo->config['database'] = $db;
<add>
<ide> $this->Model = $this->getMock('TestModel', array('getDataSource'));
<ide> $this->Model->expects($this->any())
<ide> ->method('getDataSource') | 1 |
Text | Text | clarify subprocess.stdout/in/err property | a324ea03d94236c76a863bd4acf08fee1f7dc6b9 | <ide><path>doc/api/child_process.md
<ide> in which the child process is launched.
<ide> added: v0.1.90
<ide> -->
<ide>
<del>* {stream.Readable}
<add>* {stream.Readable|null|undefined}
<ide>
<ide> A `Readable Stream` that represents the child process's `stderr`.
<ide>
<ide> then this will be `null`.
<ide> `subprocess.stderr` is an alias for `subprocess.stdio[2]`. Both properties will
<ide> refer to the same value.
<ide>
<del>The `subprocess.stderr` property can be `null` if the child process could
<del>not be successfully spawned.
<add>The `subprocess.stderr` property can be `null` or `undefined`
<add>if the child process could not be successfully spawned.
<ide>
<ide> ### `subprocess.stdin`
<ide>
<ide> <!-- YAML
<ide> added: v0.1.90
<ide> -->
<ide>
<del>* {stream.Writable}
<add>* {stream.Writable|null|undefined}
<ide>
<ide> A `Writable Stream` that represents the child process's `stdin`.
<ide>
<ide> then this will be `null`.
<ide> `subprocess.stdin` is an alias for `subprocess.stdio[0]`. Both properties will
<ide> refer to the same value.
<ide>
<del>The `subprocess.stdin` property can be `undefined` if the child process could
<del>not be successfully spawned.
<add>The `subprocess.stdin` property can be `null` or `undefined`
<add>if the child process could not be successfully spawned.
<ide>
<ide> ### `subprocess.stdio`
<ide>
<ide> not be successfully spawned.
<ide> added: v0.1.90
<ide> -->
<ide>
<del>* {stream.Readable}
<add>* {stream.Readable|null|undefined}
<ide>
<ide> A `Readable Stream` that represents the child process's `stdout`.
<ide>
<ide> subprocess.stdout.on('data', (data) => {
<ide> });
<ide> ```
<ide>
<del>The `subprocess.stdout` property can be `null` if the child process could
<del>not be successfully spawned.
<add>The `subprocess.stdout` property can be `null` or `undefined`
<add>if the child process could not be successfully spawned.
<ide>
<ide> ### `subprocess.unref()`
<ide> | 1 |
PHP | PHP | fix bug in url generation | 0012f28f88f19037fbea71a60c162d69903ceea7 | <ide><path>src/Illuminate/Routing/UrlGenerator.php
<ide> protected function replaceRouteParameters($path, array $parameters)
<ide> $path = $this->replaceRouteParameter($path, $key, $value, $parameters);
<ide> }
<ide>
<del> return $path.$this->getRouteQueryString($parameters);
<add> $path = preg_replace('/\{.*?\?\}/', '', $path);
<add>
<add> return trim($path, '/').$this->getRouteQueryString($parameters);
<ide> }
<ide>
<ide> /**
<ide><path>tests/Routing/RoutingUrlGeneratorTest.php
<ide> public function testRoutesWithDomains()
<ide>
<ide> public function testRoutesWithDomainsAndPorts()
<ide> {
<del> $url = new UrlGenerator(
<add> $url = new UrlGenerator(
<ide> $routes = new Illuminate\Routing\RouteCollection,
<ide> $request = Illuminate\Http\Request::create('http://www.foo.com:8080/')
<ide> );
<ide> public function testRoutesWithDomainsAndPorts()
<ide> $this->assertEquals('http://sub.taylor.com:8080/foo/bar/otwell', $url->route('bar', array('taylor', 'otwell')));
<ide> }
<ide>
<add>
<add> public function testUrlGenerationForControllers()
<add> {
<add> $url = new UrlGenerator(
<add> $routes = new Illuminate\Routing\RouteCollection,
<add> $request = Illuminate\Http\Request::create('http://www.foo.com:8080/')
<add> );
<add>
<add> $route = new Illuminate\Routing\Route(array('GET'), 'foo/{one}/{two?}/{three?}', array('as' => 'foo', function() {}));
<add> $routes->add($route);
<add>
<add> $this->assertEquals('http://www.foo.com:8080/foo', $url->route('foo'));
<add> }
<add>
<ide> }
<ide>\ No newline at end of file | 2 |
Ruby | Ruby | use new pour_bottle dsl | f2faf49e3f189885c6787f7d4beb680c3a987d4f | <ide><path>Library/Homebrew/formula_installer.rb
<ide> def pour_bottle?(install_bottle_options = { :warn=>false })
<ide> return false unless options.empty?
<ide> return false if formula.bottle_disabled?
<ide> return true if formula.local_bottle_path
<del> return false unless formula.pour_bottle?
<add> unless formula.pour_bottle?
<add> if install_bottle_options[:warn] && formula.pour_bottle_check_unsatisfied_reason
<add> opoo <<-EOS.undent
<add> Building #{formula.full_name} from source:
<add> #{formula.pour_bottle_check_unsatisfied_reason}
<add> EOS
<add> end
<add> return false
<add> end
<ide>
<ide> unless bottle.compatible_cellar?
<ide> if install_bottle_options[:warn] | 1 |
PHP | PHP | add methods to caketestcase | bd0104d9727c8c15d01694ee52615b90103f2664 | <ide><path>lib/Cake/Test/Case/TestSuite/CakeTestCaseTest.php
<ide> App::uses('Controller', 'Controller');
<ide> App::uses('CakeHtmlReporter', 'TestSuite/Reporter');
<ide>
<del>if (!class_exists('AppController', false)) {
<del> require_once CAKE . 'Controller' . DS . 'AppController.php';
<del>} elseif (!defined('APP_CONTROLLER_EXISTS')) {
<del> define('APP_CONTROLLER_EXISTS', true);
<del>}
<del>
<ide> /**
<ide> * CakeTestCaseTest
<ide> *
<ide> public function testSetupBackUpValues() {
<ide> $this->assertArrayHasKey('debug', $this->_configure);
<ide> $this->assertArrayHasKey('Plugin', $this->_pathRestore);
<ide> }
<add>
<add>/**
<add> * test assertTextNotEquals()
<add> *
<add> * @return void
<add> */
<add> public function testAssertTextNotEquals() {
<add> $one = "\r\nOne\rTwooo";
<add> $two = "\nOne\nTwo";
<add> $this->assertTextNotEquals($one, $two);
<add> }
<add>
<add>/**
<add> * test assertTextEquals()
<add> *
<add> * @return void
<add> */
<add> public function testAssertTextEquals() {
<add> $one = "\r\nOne\rTwo";
<add> $two = "\nOne\nTwo";
<add> $this->assertTextEquals($one, $two);
<add> }
<ide> }
<ide><path>lib/Cake/TestSuite/CakeTestCase.php
<ide> public function loadFixtures() {
<ide> }
<ide> }
<ide>
<add>/**
<add> * Assert text equality, ignoring differences in newlines.
<add> * Helpful for doing cross platform tests of blocks of text.
<add> *
<add> * @param string $expected The expected value.
<add> * @param string $result The actual value.
<add> * @param message The message to use for failure.
<add> */
<add> public function assertTextNotEquals($expected, $result, $message = '') {
<add> $expected = str_replace(array("\r\n", "\r"), "\n", $expected);
<add> $result = str_replace(array("\r\n", "\r"), "\n", $result);
<add> return $this->assertNotEquals($expected, $result, $message);
<add> }
<add>
<add>/**
<add> * Assert text equality, ignoring differences in newlines.
<add> * Helpful for doing cross platform tests of blocks of text.
<add> *
<add> * @param string $expected The expected value.
<add> * @param string $result The actual value.
<add> * @param message The message to use for failure.
<add> */
<add> public function assertTextEquals($expected, $result, $message = '') {
<add> $expected = str_replace(array("\r\n", "\r"), "\n", $expected);
<add> $result = str_replace(array("\r\n", "\r"), "\n", $result);
<add> return $this->assertEquals($expected, $result, $message);
<add> }
<add>
<ide> /**
<ide> * Takes an array $expected and generates a regex from it to match the provided $string.
<ide> * Samples for $expected: | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.