content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
---|---|---|---|---|---|
PHP | PHP | simplify else cases | a90cb29c0f6f40b1605f97f0bba8bc74fff70955 | <ide><path>lib/Cake/Model/Datasource/Database/Mysql.php
<ide> public function listSources($data = null) {
<ide> if (!$result) {
<ide> $result->closeCursor();
<ide> return array();
<del> } else {
<del> $tables = array();
<del>
<del> while ($line = $result->fetch(PDO::FETCH_NUM)) {
<del> $tables[] = $line[0];
<del> }
<add> }
<add> $tables = array();
<ide>
<del> $result->closeCursor();
<del> parent::listSources($tables);
<del> return $tables;
<add> while ($line = $result->fetch(PDO::FETCH_NUM)) {
<add> $tables[] = $line[0];
<ide> }
<add>
<add> $result->closeCursor();
<add> parent::listSources($tables);
<add> return $tables;
<ide> }
<ide>
<ide> /**
<ide> public function listDetailedSources($name = null) {
<ide> if (!$result) {
<ide> $result->closeCursor();
<ide> return array();
<del> } else {
<del> $tables = array();
<del> foreach ($result as $row) {
<del> $tables[$row['Name']] = (array)$row;
<del> unset($tables[$row['Name']]['queryString']);
<del> if (!empty($row['Collation'])) {
<del> $charset = $this->getCharsetName($row['Collation']);
<del> if ($charset) {
<del> $tables[$row['Name']]['charset'] = $charset;
<del> }
<add> }
<add> $tables = array();
<add> foreach ($result as $row) {
<add> $tables[$row['Name']] = (array)$row;
<add> unset($tables[$row['Name']]['queryString']);
<add> if (!empty($row['Collation'])) {
<add> $charset = $this->getCharsetName($row['Collation']);
<add> if ($charset) {
<add> $tables[$row['Name']]['charset'] = $charset;
<ide> }
<ide> }
<del> $result->closeCursor();
<del> if (is_string($name) && isset($tables[$name])) {
<del> return $tables[$name];
<del> }
<del> return $tables;
<ide> }
<add> $result->closeCursor();
<add> if (is_string($name) && isset($tables[$name])) {
<add> return $tables[$name];
<add> }
<add> return $tables;
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/Model/Datasource/Database/Postgres.php
<ide> public function getSequence($table, $field = 'id') {
<ide> }
<ide> if (isset($this->_sequenceMap[$table][$field])) {
<ide> return $this->_sequenceMap[$table][$field];
<del> } else {
<del> return "{$table}_{$field}_seq";
<ide> }
<add> return "{$table}_{$field}_seq";
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/Model/Datasource/Database/Sqlite.php
<ide> public function fetchResult() {
<ide> }
<ide> }
<ide> return $resultRow;
<del> } else {
<del> $this->_result->closeCursor();
<del> return false;
<ide> }
<add> $this->_result->closeCursor();
<add> return false;
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/Model/Datasource/Database/Sqlserver.php
<ide> public function listSources($data = null) {
<ide> if (!$result) {
<ide> $result->closeCursor();
<ide> return array();
<del> } else {
<del> $tables = array();
<del>
<del> while ($line = $result->fetch(PDO::FETCH_NUM)) {
<del> $tables[] = $line[0];
<del> }
<add> }
<add> $tables = array();
<ide>
<del> $result->closeCursor();
<del> parent::listSources($tables);
<del> return $tables;
<add> while ($line = $result->fetch(PDO::FETCH_NUM)) {
<add> $tables[] = $line[0];
<ide> }
<add>
<add> $result->closeCursor();
<add> parent::listSources($tables);
<add> return $tables;
<ide> }
<ide>
<ide> /**
<ide> public function fields(Model $model, $alias = null, $fields = array(), $quote =
<ide> $result[] = $prepend . $fields[$i];
<ide> }
<ide> return $result;
<del> } else {
<del> return $fields;
<ide> }
<add> return $fields;
<ide> }
<ide>
<ide> /**
<ide> public function renderStatement($type, $data) {
<ide> ";
<ide> } elseif (strpos($limit, 'FETCH') !== false) {
<ide> return "SELECT {$fields} FROM {$table} {$alias} {$joins} {$conditions} {$group} {$order} {$limit}";
<del> } else {
<del> return "SELECT {$limit} {$fields} FROM {$table} {$alias} {$joins} {$conditions} {$group} {$order}";
<ide> }
<add> return "SELECT {$limit} {$fields} FROM {$table} {$alias} {$joins} {$conditions} {$group} {$order}";
<ide> case "schema":
<ide> extract($data);
<ide>
<ide><path>lib/Cake/Model/Datasource/DboSource.php
<ide> public function query() {
<ide> $recursive = $params[5 + $off];
<ide> }
<ide> return $args[2]->find('all', compact('conditions', 'fields', 'order', 'limit', 'page', 'recursive'));
<del> } else {
<del> if (isset($params[3 + $off])) {
<del> $recursive = $params[3 + $off];
<del> }
<del> return $args[2]->find('first', compact('conditions', 'fields', 'order', 'recursive'));
<ide> }
<del> } else {
<del> if (isset($args[1]) && $args[1] === true) {
<del> return $this->fetchAll($args[0], true);
<del> } elseif (isset($args[1]) && !is_array($args[1])) {
<del> return $this->fetchAll($args[0], false);
<del> } elseif (isset($args[1]) && is_array($args[1])) {
<del> if (isset($args[2])) {
<del> $cache = $args[2];
<del> } else {
<del> $cache = true;
<del> }
<del> return $this->fetchAll($args[0], $args[1], array('cache' => $cache));
<add> if (isset($params[3 + $off])) {
<add> $recursive = $params[3 + $off];
<ide> }
<add> return $args[2]->find('first', compact('conditions', 'fields', 'order', 'recursive'));
<add> }
<add> if (isset($args[1]) && $args[1] === true) {
<add> return $this->fetchAll($args[0], true);
<add> } elseif (isset($args[1]) && !is_array($args[1])) {
<add> return $this->fetchAll($args[0], false);
<add> } elseif (isset($args[1]) && is_array($args[1])) {
<add> if (isset($args[2])) {
<add> $cache = $args[2];
<add> } else {
<add> $cache = true;
<add> }
<add> return $this->fetchAll($args[0], $args[1], array('cache' => $cache));
<ide> }
<ide> }
<ide>
<ide> public function fetchRow($sql = null) {
<ide> $this->fetchVirtualField($resultRow);
<ide> }
<ide> return $resultRow;
<del> } else {
<del> return null;
<ide> }
<add> return null;
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/Model/Permission.php
<ide> public function check($aro, $aco, $action = "*") {
<ide>
<ide> if (empty($perms)) {
<ide> continue;
<del> } else {
<del> $perms = Hash::extract($perms, '{n}.' . $this->alias);
<del> foreach ($perms as $perm) {
<del> if ($action === '*') {
<add> }
<add> $perms = Hash::extract($perms, '{n}.' . $this->alias);
<add> foreach ($perms as $perm) {
<add> if ($action === '*') {
<ide>
<del> foreach ($permKeys as $key) {
<del> if (!empty($perm)) {
<del> if ($perm[$key] == -1) {
<del> return false;
<del> } elseif ($perm[$key] == 1) {
<del> $inherited[$key] = 1;
<del> }
<add> foreach ($permKeys as $key) {
<add> if (!empty($perm)) {
<add> if ($perm[$key] == -1) {
<add> return false;
<add> } elseif ($perm[$key] == 1) {
<add> $inherited[$key] = 1;
<ide> }
<ide> }
<add> }
<ide>
<del> if (count($inherited) === count($permKeys)) {
<add> if (count($inherited) === count($permKeys)) {
<add> return true;
<add> }
<add> } else {
<add> switch ($perm['_' . $action]) {
<add> case -1:
<add> return false;
<add> case 0:
<add> continue;
<add> case 1:
<ide> return true;
<del> }
<del> } else {
<del> switch ($perm['_' . $action]) {
<del> case -1:
<del> return false;
<del> case 0:
<del> continue;
<del> case 1:
<del> return true;
<del> }
<ide> }
<ide> }
<ide> }
<ide><path>lib/Cake/Model/Validator/CakeValidationRule.php
<ide> public function isRequired() {
<ide> if (in_array($this->required, array('create', 'update'), true)) {
<ide> if ($this->required === 'create' && !$this->isUpdate() || $this->required === 'update' && $this->isUpdate()) {
<ide> return true;
<del> } else {
<del> return false;
<ide> }
<add> return false;
<ide> }
<ide>
<ide> return $this->required;
<ide><path>lib/Cake/Network/CakeSocket.php
<ide> public function enableCrypto($type, $clientOrServer = 'client', $enable = true)
<ide> if ($enableCryptoResult === true) {
<ide> $this->encrypted = $enable;
<ide> return true;
<del> } else {
<del> $errorMessage = __d('cake_dev', 'Unable to perform enableCrypto operation on CakeSocket');
<del> $this->setLastError(null, $errorMessage);
<del> throw new SocketException($errorMessage);
<ide> }
<add> $errorMessage = __d('cake_dev', 'Unable to perform enableCrypto operation on CakeSocket');
<add> $this->setLastError(null, $errorMessage);
<add> throw new SocketException($errorMessage);
<ide> }
<ide>
<ide> }
<ide><path>lib/Cake/Network/Email/CakeEmail.php
<ide> protected function _getContentTypeCharset() {
<ide> $charset = strtoupper($this->charset);
<ide> if (array_key_exists($charset, $this->_contentTypeCharset)) {
<ide> return strtoupper($this->_contentTypeCharset[$charset]);
<del> } else {
<del> return strtoupper($this->charset);
<ide> }
<add> return strtoupper($this->charset);
<ide> }
<ide>
<ide> }
<ide><path>lib/Cake/Test/Case/Model/Behavior/AclBehaviorTest.php
<ide> public function parentNode() {
<ide> }
<ide> if (!$motherId) {
<ide> return null;
<del> } else {
<del> return array('AclPerson' => array('id' => $motherId));
<ide> }
<add> return array('AclPerson' => array('id' => $motherId));
<ide> }
<ide>
<ide> }
<ide><path>lib/Cake/Test/Case/Model/Behavior/ContainableBehaviorTest.php
<ide> protected function _containments($Model, $contain = array()) {
<ide> if (!is_array($Model)) {
<ide> $result = $Model->containments($contain);
<ide> return $this->_containments($result['models']);
<del> } else {
<del> $result = $Model;
<del> foreach ($result as $i => $containment) {
<del> $result[$i] = array_diff_key($containment, array('instance' => true));
<del> }
<add> }
<add> $result = $Model;
<add> foreach ($result as $i => $containment) {
<add> $result[$i] = array_diff_key($containment, array('instance' => true));
<ide> }
<ide> return $result;
<ide> }
<ide><path>lib/Cake/TestSuite/CakeTestSuiteCommand.php
<ide> public function run(array $argv, $exit = true) {
<ide> exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
<ide> } elseif (!isset($result) || $result->errorCount() > 0) {
<ide> exit(PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT);
<del> } else {
<del> exit(PHPUnit_TextUI_TestRunner::FAILURE_EXIT);
<ide> }
<add> exit(PHPUnit_TextUI_TestRunner::FAILURE_EXIT);
<ide> }
<ide> }
<ide>
<ide><path>lib/Cake/Utility/Folder.php
<ide> public function create($pathname, $mode = false) {
<ide> umask($old);
<ide> $this->_messages[] = __d('cake_dev', '%s created', $pathname);
<ide> return true;
<del> } else {
<del> umask($old);
<del> $this->_errors[] = __d('cake_dev', '%s NOT created', $pathname);
<del> return false;
<ide> }
<add> umask($old);
<add> $this->_errors[] = __d('cake_dev', '%s NOT created', $pathname);
<add> return false;
<ide> }
<ide> }
<ide> return false;
<ide> public function realpath($path) {
<ide> if (!empty($newparts)) {
<ide> array_pop($newparts);
<ide> continue;
<del> } else {
<del> return false;
<ide> }
<add> return false;
<ide> }
<ide> $newparts[] = $part;
<ide> }
<ide><path>lib/Cake/Utility/Validation.php
<ide> protected static function _pass($method, $check, $classPrefix) {
<ide> protected static function _check($check, $regex) {
<ide> if (is_string($regex) && preg_match($regex, $check)) {
<ide> return true;
<del> } else {
<del> return false;
<ide> }
<add> return false;
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/View/Helper/FormHelper.php
<ide> protected function _introspectModel($model, $key, $field = null) {
<ide> return $this->fieldset[$model]['fields'];
<ide> } elseif (isset($this->fieldset[$model]['fields'][$field])) {
<ide> return $this->fieldset[$model]['fields'][$field];
<del> } else {
<del> return isset($object->hasAndBelongsToMany[$field]) ? array('type' => 'multiple') : null;
<ide> }
<add> return isset($object->hasAndBelongsToMany[$field]) ? array('type' => 'multiple') : null;
<ide> }
<ide>
<ide> if ($key === 'errors' && !isset($this->validationErrors[$model])) {
<ide> protected function _introspectModel($model, $key, $field = null) {
<ide> if ($key === 'validates') {
<ide> if (empty($field)) {
<ide> return $this->fieldset[$model]['validates'];
<del> } else {
<del> return isset($this->fieldset[$model]['validates'][$field]) ?
<del> $this->fieldset[$model]['validates'] : null;
<ide> }
<add> return isset($this->fieldset[$model]['validates'][$field]) ?
<add> $this->fieldset[$model]['validates'] : null;
<ide> }
<ide> }
<ide>
<ide> protected function _name($options = array(), $field = null, $key = 'name') {
<ide> if (is_array($options)) {
<ide> $options[$key] = $name;
<ide> return $options;
<del> } else {
<del> return $name;
<ide> }
<add> return $name;
<ide> }
<ide> return parent::_name($options, $field, $key);
<ide> }
<ide><path>lib/Cake/View/Helper/PaginatorHelper.php
<ide> protected function _pagingLink($which, $title = null, $options = array(), $disab
<ide> }
<ide> $link = $this->link($title, $url, compact('escape', 'model') + $options);
<ide> return $this->Html->tag($tag, $link, compact('class'));
<del> } else {
<del> unset($options['rel']);
<del> if (!$tag) {
<del> if ($disabledTag) {
<del> $tag = $disabledTag;
<del> $disabledTag = null;
<del> } else {
<del> $tag = $_defaults['tag'];
<del> }
<del> }
<add> }
<add> unset($options['rel']);
<add> if (!$tag) {
<ide> if ($disabledTag) {
<del> $title = $this->Html->tag($disabledTag, $title, compact('escape') + $options);
<del> return $this->Html->tag($tag, $title, compact('class'));
<add> $tag = $disabledTag;
<add> $disabledTag = null;
<add> } else {
<add> $tag = $_defaults['tag'];
<ide> }
<del> return $this->Html->tag($tag, $title, compact('escape', 'class') + $options);
<ide> }
<add> if ($disabledTag) {
<add> $title = $this->Html->tag($disabledTag, $title, compact('escape') + $options);
<add> return $this->Html->tag($tag, $title, compact('class'));
<add> }
<add> return $this->Html->tag($tag, $title, compact('escape', 'class') + $options);
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/View/View.php
<ide> public function renderCache($filename, $timeStart) {
<ide> //@codingStandardsIgnoreEnd
<ide> unset($out);
<ide> return false;
<del> } else {
<del> return substr($out, strlen($match[0]));
<ide> }
<add> return substr($out, strlen($match[0]));
<ide> }
<ide> }
<ide> | 17 |
Javascript | Javascript | add test case | 5ff9a1486f321122595d94cf9159aebff994c721 | <ide><path>test/Defaults.unittest.js
<ide> describe("Defaults", () => {
<ide> process.chdir(cwd);
<ide> }
<ide> );
<add>
<add> test(
<add> "array defaults",
<add> {
<add> output: {
<add> enabledChunkLoadingTypes: ["require", "..."],
<add> enabledWasmLoadingTypes: ["...", "async-node"]
<add> }
<add> },
<add> e =>
<add> e.toMatchInlineSnapshot(`
<add> - Expected
<add> + Received
<add>
<add>
<add> + "require",
<add>
<add> + "async-node",
<add> `)
<add> );
<ide> }); | 1 |
Javascript | Javascript | apply split methods to their usages | 524043032189e95fac724da49d316e44cec76c68 | <ide><path>lib/APIPlugin.js
<ide> class APIPlugin {
<ide> parser.plugin(`evaluate typeof ${key}`, ParserHelpers.evaluateToString(REPLACEMENT_TYPES[key]));
<ide> });
<ide> IGNORES.forEach(key => {
<del> parser.plugin(key, ParserHelpers.returnTrue);
<add> parser.plugin(key, ParserHelpers.skipTraversal);
<ide> });
<ide> });
<ide> });
<ide><path>lib/DefinePlugin.js
<ide> class DefinePlugin {
<ide> const splittedKey = key.split(".");
<ide> splittedKey.slice(1).forEach((_, i) => {
<ide> const fullKey = prefix + splittedKey.slice(0, i + 1).join(".");
<del> parser.plugin("can-rename " + fullKey, ParserHelpers.returnTrue);
<add> parser.plugin("can-rename " + fullKey, ParserHelpers.approve);
<ide> });
<ide> }
<ide>
<ide> class DefinePlugin {
<ide> let recurseTypeof = false;
<ide> code = toCode(code);
<ide> if(!isTypeof) {
<del> parser.plugin("can-rename " + key, ParserHelpers.returnTrue);
<add> parser.plugin("can-rename " + key, ParserHelpers.approve);
<ide> parser.plugin("evaluate Identifier " + key, (expr) => {
<ide> if(recurse) return;
<ide> let res = parser.evaluate(code);
<ide> class DefinePlugin {
<ide>
<ide> function applyObjectDefine(key, obj) {
<ide> let code = stringifyObj(obj);
<del> parser.plugin("can-rename " + key, ParserHelpers.returnTrue);
<add> parser.plugin("can-rename " + key, ParserHelpers.approve);
<ide> parser.plugin("evaluate Identifier " + key, (expr) => new BasicEvaluatedExpression().setRange(expr.range));
<ide> parser.plugin("evaluate typeof " + key, ParserHelpers.evaluateToString("object"));
<ide> parser.plugin("expression " + key, ParserHelpers.toConstantDependency(code));
<ide><path>lib/HotModuleReplacementPlugin.js
<ide> HotModuleReplacementPlugin.prototype.apply = function(compiler) {
<ide> }.bind(this));
<ide> }
<ide> });
<del> parser.plugin("expression module.hot", ParserHelpers.returnTrue);
<add> parser.plugin("expression module.hot", ParserHelpers.skipTraversal);
<ide> });
<ide> });
<ide>
<ide><path>lib/ProvidePlugin.js
<ide> ProvidePlugin.prototype.apply = function(compiler) {
<ide> if(splittedName.length > 0) {
<ide> splittedName.slice(1).forEach(function(_, i) {
<ide> var name = splittedName.slice(0, i + 1).join(".");
<del> parser.plugin("can-rename " + name, ParserHelpers.returnTrue);
<add> parser.plugin("can-rename " + name, ParserHelpers.approve);
<ide> });
<ide> }
<ide> parser.plugin("expression " + name, function(expr) {
<ide><path>lib/dependencies/AMDPlugin.js
<ide> AMDPlugin.prototype.apply = function(compiler) {
<ide> parser.plugin("evaluate Identifier require.amd", ParserHelpers.evaluateToBoolean(true));
<ide> parser.plugin("typeof define", ParserHelpers.toConstantDependency(JSON.stringify("function")));
<ide> parser.plugin("evaluate typeof define", ParserHelpers.evaluateToString("function"));
<del> parser.plugin("can-rename define", ParserHelpers.returnTrue);
<add> parser.plugin("can-rename define", ParserHelpers.approve);
<ide> parser.plugin("rename define", function(expr) {
<ide> var dep = new AMDRequireItemDependency("!!webpack amd define", expr.range);
<ide> dep.userRequest = "define";
<ide><path>lib/dependencies/CommonJsPlugin.js
<ide> CommonJsPlugin.prototype.apply = function(compiler) {
<ide> this.scope.definitions.push("require");
<ide> return true;
<ide> });
<del> parser.plugin("can-rename require", ParserHelpers.returnTrue);
<add> parser.plugin("can-rename require", ParserHelpers.approve);
<ide> parser.plugin("rename require", function(expr) {
<ide> // define the require variable. It's still undefined, but not "not defined".
<ide> var dep = new ConstDependency("var require;", 0);
<ide> dep.loc = expr.loc;
<ide> this.state.current.addDependency(dep);
<ide> return false;
<ide> });
<del> parser.plugin("typeof module", ParserHelpers.returnTrue);
<add> parser.plugin("typeof module", ParserHelpers.skipTraversal);
<ide> parser.plugin("evaluate typeof exports", ParserHelpers.evaluateToString("object"));
<ide> parser.apply(
<ide> new CommonJsRequireDependencyParserPlugin(options), | 6 |
Go | Go | remove unused config check | c38d2d4601f8523a4f9c031fca5de9e5ab00ddf5 | <ide><path>libnetwork/drivers/bridge/bridge.go
<ide> func (d *driver) Config(option map[string]interface{}) error {
<ide> }
<ide>
<ide> if config.EnableIPForwarding {
<del> return setupIPForwarding(config)
<add> return setupIPForwarding()
<ide> }
<ide>
<ide> return nil
<ide><path>libnetwork/drivers/bridge/errors.go
<ide> func (eim ErrInvalidMtu) Error() string {
<ide> // BadRequest denotes the type of this error
<ide> func (eim ErrInvalidMtu) BadRequest() {}
<ide>
<del>// ErrIPFwdCfg is returned when ip forwarding setup is invoked when the configuration
<del>// not enabled.
<del>type ErrIPFwdCfg struct{}
<del>
<del>func (eipf *ErrIPFwdCfg) Error() string {
<del> return "unexpected request to enable IP Forwarding"
<del>}
<del>
<del>// BadRequest denotes the type of this error
<del>func (eipf *ErrIPFwdCfg) BadRequest() {}
<del>
<ide> // ErrInvalidPort is returned when the container or host port specified in the port binding is not valid.
<ide> type ErrInvalidPort string
<ide>
<ide><path>libnetwork/drivers/bridge/setup_ip_forwarding.go
<ide> const (
<ide> ipv4ForwardConfPerm = 0644
<ide> )
<ide>
<del>func setupIPForwarding(config *configuration) error {
<del> // Sanity Check
<del> if config.EnableIPForwarding == false {
<del> return &ErrIPFwdCfg{}
<del> }
<del>
<add>func setupIPForwarding() error {
<ide> // Enable IPv4 forwarding
<ide> if err := ioutil.WriteFile(ipv4ForwardConf, []byte{'1', '\n'}, ipv4ForwardConfPerm); err != nil {
<ide> return fmt.Errorf("Setup IP forwarding failed: %v", err)
<ide><path>libnetwork/drivers/bridge/setup_ip_forwarding_test.go
<ide> func TestSetupIPForwarding(t *testing.T) {
<ide> writeIPForwardingSetting(t, []byte{'0', '\n'})
<ide> }
<ide>
<del> // Create test interface with ip forwarding setting enabled
<del> config := &configuration{
<del> EnableIPForwarding: true}
<del>
<ide> // Set IP Forwarding
<del> if err := setupIPForwarding(config); err != nil {
<add> if err := setupIPForwarding(); err != nil {
<ide> t.Fatalf("Failed to setup IP forwarding: %v", err)
<ide> }
<ide>
<ide> func TestSetupIPForwarding(t *testing.T) {
<ide> }
<ide> }
<ide>
<del>func TestUnexpectedSetupIPForwarding(t *testing.T) {
<del> // Read current setting and ensure the original value gets restored
<del> procSetting := readCurrentIPForwardingSetting(t)
<del> defer reconcileIPForwardingSetting(t, procSetting)
<del>
<del> // Create test interface without ip forwarding setting enabled
<del> config := &configuration{
<del> EnableIPForwarding: false}
<del>
<del> // Attempt Set IP Forwarding
<del> err := setupIPForwarding(config)
<del> if err == nil {
<del> t.Fatal("Setup IP forwarding was expected to fail")
<del> }
<del>
<del> if _, ok := err.(*ErrIPFwdCfg); !ok {
<del> t.Fatalf("Setup IP forwarding failed with unexpected error: %v", err)
<del> }
<del>}
<del>
<ide> func readCurrentIPForwardingSetting(t *testing.T) []byte {
<ide> procSetting, err := ioutil.ReadFile(ipv4ForwardConf)
<ide> if err != nil { | 4 |
Text | Text | document the behavior of console.assert() | 4cee226eb41f2c2e3c71800facc0cbaa2f06ecb5 | <ide><path>doc/api/console.md
<ide> changes:
<ide> * `...message` {any} All arguments besides `value` are used as error message.
<ide>
<ide> A simple assertion test that verifies whether `value` is truthy. If it is not,
<add>or `value` is not passed,
<ide> `Assertion failed` is logged. If provided, the error `message` is formatted
<ide> using [`util.format()`][] by passing along all message arguments. The output is
<ide> used as the error message.
<ide> console.assert(true, 'does nothing');
<ide> // OK
<ide> console.assert(false, 'Whoops %s work', 'didn\'t');
<ide> // Assertion failed: Whoops didn't work
<add>console.assert();
<add>// Assertion failed
<ide> ```
<ide>
<ide> Calling `console.assert()` with a falsy assertion will only cause the `message` | 1 |
PHP | PHP | allow multiple folders for migrations | 615ba9a6ead3608ef2cf6956c85a6fc71ac37e57 | <ide><path>src/Illuminate/Database/Console/Migrations/MigrateCommand.php
<ide> public function fire()
<ide> // we will use the path relative to the root of this installation folder
<ide> // so that migrations may be run for any path within the applications.
<ide> if (! is_null($path = $this->input->getOption('path'))) {
<del> $path = $this->laravel->basePath().'/'.$path;
<add> $paths[] = $this->laravel->basePath().'/'.$path;
<ide> } else {
<del> $path = $this->getMigrationPath();
<add> $paths[] = $this->getMigrationPath();
<add>
<add> $paths = array_merge($paths, $this->migrator->paths());
<ide> }
<ide>
<del> $this->migrator->run($path, [
<add> $this->migrator->run($paths, [
<ide> 'pretend' => $pretend,
<ide> 'step' => $this->input->getOption('step'),
<ide> ]);
<ide><path>src/Illuminate/Database/Console/Migrations/ResetCommand.php
<ide> use Illuminate\Database\Migrations\Migrator;
<ide> use Symfony\Component\Console\Input\InputOption;
<ide>
<del>class ResetCommand extends Command
<add>class ResetCommand extends BaseCommand
<ide> {
<ide> use ConfirmableTrait;
<ide>
<ide> public function fire()
<ide>
<ide> $pretend = $this->input->getOption('pretend');
<ide>
<del> $this->migrator->reset($pretend);
<add> $paths[] = $this->getMigrationPath();
<add>
<add> $paths = array_merge($paths, $this->migrator->paths());
<add>
<add> $this->migrator->reset($paths, $pretend);
<ide>
<ide> // Once the migrator has run we will grab the note output and send it out to
<ide> // the console screen, since the migrator itself functions without having
<ide><path>src/Illuminate/Database/Console/Migrations/RollbackCommand.php
<ide> use Illuminate\Database\Migrations\Migrator;
<ide> use Symfony\Component\Console\Input\InputOption;
<ide>
<del>class RollbackCommand extends Command
<add>class RollbackCommand extends BaseCommand
<ide> {
<ide> use ConfirmableTrait;
<ide>
<ide> public function fire()
<ide>
<ide> $pretend = $this->input->getOption('pretend');
<ide>
<del> $this->migrator->rollback($pretend);
<add> $paths[] = $this->getMigrationPath();
<add>
<add> $paths = array_merge($paths, $this->migrator->paths());
<add>
<add> $this->migrator->rollback($paths, $pretend);
<ide>
<ide> // Once the migrator has run we will grab the note output and send it out to
<ide> // the console screen, since the migrator itself functions without having
<ide><path>src/Illuminate/Database/Console/Migrations/StatusCommand.php
<ide> public function fire()
<ide> $this->migrator->setConnection($this->input->getOption('database'));
<ide>
<ide> if (! is_null($path = $this->input->getOption('path'))) {
<del> $path = $this->laravel->basePath().'/'.$path;
<add> $paths[] = $this->laravel->basePath().'/'.$path;
<ide> } else {
<del> $path = $this->getMigrationPath();
<add> $paths[] = $this->getMigrationPath();
<add>
<add> $paths = array_merge($paths, $this->migrator->paths());
<ide> }
<ide>
<ide> $ran = $this->migrator->getRepository()->getRan();
<ide>
<ide> $migrations = [];
<ide>
<del> foreach ($this->getAllMigrationFiles($path) as $migration) {
<del> $migrations[] = in_array($migration, $ran) ? ['<info>Y</info>', $migration] : ['<fg=red>N</fg=red>', $migration];
<add> foreach ($this->getAllMigrationFiles($paths) as $migration) {
<add> $migrations[] = in_array($this->migrator->getMigrationName($migration), $ran) ? ['<info>Y</info>', $this->migrator->getMigrationName($migration)] : ['<fg=red>N</fg=red>', $this->migrator->getMigrationName($migration)];
<ide> }
<ide>
<ide> if (count($migrations) > 0) {
<ide> public function fire()
<ide> /**
<ide> * Get all of the migration files.
<ide> *
<del> * @param string $path
<add> * @param array $paths
<ide> * @return array
<ide> */
<del> protected function getAllMigrationFiles($path)
<add> protected function getAllMigrationFiles(array $paths)
<ide> {
<del> return $this->migrator->getMigrationFiles($path);
<add> return $this->migrator->getMigrationFiles($paths);
<ide> }
<ide>
<ide> /**
<ide><path>src/Illuminate/Database/Migrations/Migrator.php
<ide> class Migrator
<ide> */
<ide> protected $notes = [];
<ide>
<add> /**
<add> * The paths for all migration files.
<add> *
<add> * @var array
<add> */
<add> protected $paths = [];
<add>
<ide> /**
<ide> * Create a new migrator instance.
<ide> *
<ide> public function __construct(MigrationRepositoryInterface $repository,
<ide> /**
<ide> * Run the outstanding migrations at a given path.
<ide> *
<del> * @param string $path
<add> * @param string|array $paths
<ide> * @param array $options
<ide> * @return void
<ide> */
<del> public function run($path, array $options = [])
<add> public function run($paths, array $options = [])
<ide> {
<ide> $this->notes = [];
<ide>
<del> $files = $this->getMigrationFiles($path);
<add> $files = $this->getMigrationFiles($paths);
<ide>
<ide> // Once we grab all of the migration files for the path, we will compare them
<ide> // against the migrations that have already been run for this package then
<ide> // run each of the outstanding migrations against a database connection.
<ide> $ran = $this->repository->getRan();
<ide>
<del> $migrations = array_diff($files, $ran);
<add> $migrations = [];
<add>
<add> foreach ($files as $file) {
<add> if (! in_array($this->getMigrationName($file), $ran)) {
<add> $migrations[] = $file;
<add> }
<add> }
<ide>
<del> $this->requireFiles($path, $migrations);
<add> $this->requireFiles($migrations);
<ide>
<ide> $this->runMigrationList($migrations, $options);
<ide> }
<ide> public function runMigrationList($migrations, array $options = [])
<ide> */
<ide> protected function runUp($file, $batch, $pretend)
<ide> {
<add> $file = $this->getMigrationName($file);
<add>
<ide> // First we will resolve a "real" instance of the migration class from this
<ide> // migration file name. Once we have the instances we can run the actual
<ide> // command such as "up" or "down", or we can just simulate the action.
<ide> protected function runUp($file, $batch, $pretend)
<ide> /**
<ide> * Rollback the last migration operation.
<ide> *
<add> * @param string|array $paths
<ide> * @param bool $pretend
<ide> * @return int
<ide> */
<del> public function rollback($pretend = false)
<add> public function rollback(array $paths, $pretend = false)
<ide> {
<ide> $this->notes = [];
<ide>
<ide> public function rollback($pretend = false)
<ide>
<ide> $count = count($migrations);
<ide>
<add> $files = $this->getMigrationFiles($paths);
<add>
<ide> if ($count === 0) {
<ide> $this->note('<info>Nothing to rollback.</info>');
<ide> } else {
<ide> // We need to reverse these migrations so that they are "downed" in reverse
<ide> // to what they run on "up". It lets us backtrack through the migrations
<ide> // and properly reverse the entire database schema operation that ran.
<add> $this->requireFiles($files);
<ide> foreach ($migrations as $migration) {
<del> $this->runDown((object) $migration, $pretend);
<add> foreach ($files as $file) {
<add> if ($this->getMigrationName($file) == $migration->migration) {
<add> $this->runDown($file, (object) $migration, $pretend);
<add> }
<add> }
<ide> }
<ide> }
<ide>
<ide> public function rollback($pretend = false)
<ide> /**
<ide> * Rolls all of the currently applied migrations back.
<ide> *
<add> * @param string|array $paths
<ide> * @param bool $pretend
<ide> * @return int
<ide> */
<del> public function reset($pretend = false)
<add> public function reset($paths, $pretend = false)
<ide> {
<ide> $this->notes = [];
<ide>
<add> $files = $this->getMigrationFiles($paths);
<add>
<ide> $migrations = array_reverse($this->repository->getRan());
<ide>
<ide> $count = count($migrations);
<ide>
<ide> if ($count === 0) {
<ide> $this->note('<info>Nothing to rollback.</info>');
<ide> } else {
<add> $this->requireFiles($files);
<ide> foreach ($migrations as $migration) {
<del> $this->runDown((object) ['migration' => $migration], $pretend);
<add> foreach ($files as $file) {
<add> if ($this->getMigrationName($file) == $migration) {
<add> $this->runDown($file, (object) ['migration' => $migration], $pretend);
<add> }
<add> }
<ide> }
<ide> }
<ide>
<ide> public function reset($pretend = false)
<ide> /**
<ide> * Run "down" a migration instance.
<ide> *
<add> * @param string $file
<ide> * @param object $migration
<ide> * @param bool $pretend
<ide> * @return void
<ide> */
<del> protected function runDown($migration, $pretend)
<add> protected function runDown($file, $migration, $pretend)
<ide> {
<del> $file = $migration->migration;
<add> $file = $this->getMigrationName($file);
<ide>
<ide> // First we will get the file name of the migration so we can resolve out an
<ide> // instance of the migration. Once we get an instance we can either run a
<ide> protected function runDown($migration, $pretend)
<ide> /**
<ide> * Get all of the migration files in a given path.
<ide> *
<del> * @param string $path
<add> * @param string|array $paths
<ide> * @return array
<ide> */
<del> public function getMigrationFiles($path)
<add> public function getMigrationFiles($paths)
<ide> {
<del> $files = $this->files->glob($path.'/*_*.php');
<add> $files = [];
<add>
<add> $paths = is_array($paths) ? $paths : [$paths];
<add>
<add> foreach ($paths as $path) {
<add> $files[] = $this->files->glob($path.'/*_*.php');
<add> }
<add>
<add> $files = array_flatten($files);
<add>
<add> $files = array_filter($files);
<ide>
<ide> // Once we have the array of files in the directory we will just remove the
<ide> // extension and take the basename of the file which is all we need when
<ide> // finding the migrations that haven't been run against the databases.
<del> if ($files === false) {
<add> if (empty($files)) {
<ide> return [];
<ide> }
<ide>
<del> $files = array_map(function ($file) {
<del> return str_replace('.php', '', basename($file));
<add> // Now we have a full list of file names we will sort them and because they
<add> // all start with a timestamp this should give us the migrations in the
<add> // order they were actually created in by the application developers.
<add> usort($files, function ($a, $b) {
<add> $a = $this->getMigrationName($a);
<add> $b = $this->getMigrationName($b);
<ide>
<del> }, $files);
<add> if ($a == $b) {
<add> return 0;
<add> }
<ide>
<del> // Once we have all of the formatted file names we will sort them and since
<del> // they all start with a timestamp this should give us the migrations in
<del> // the order they were actually created by the application developers.
<del> sort($files);
<add> return ($a < $b) ? -1 : 1;
<add> });
<ide>
<ide> return $files;
<ide> }
<ide>
<ide> /**
<ide> * Require in all the migration files in a given path.
<ide> *
<del> * @param string $path
<ide> * @param array $files
<ide> * @return void
<ide> */
<del> public function requireFiles($path, array $files)
<add> public function requireFiles(array $files)
<ide> {
<ide> foreach ($files as $file) {
<del> $this->files->requireOnce($path.'/'.$file.'.php');
<add> $this->files->requireOnce($file);
<ide> }
<ide> }
<ide>
<ide> public function getFilesystem()
<ide> {
<ide> return $this->files;
<ide> }
<add>
<add> /**
<add> * Set a path which contains migration files.
<add> *
<add> * @param string $path
<add> */
<add> public function path($path)
<add> {
<add> $this->paths[] = $path;
<add> }
<add>
<add> /**
<add> * Get all custom migration paths.
<add> *
<add> * @return array
<add> */
<add> public function paths()
<add> {
<add> return $this->paths;
<add> }
<add>
<add> public function getMigrationName($path)
<add> {
<add> return str_replace('.php', '', basename($path));
<add> }
<ide> }
<ide><path>tests/Database/DatabaseMigrationMigrateCommandTest.php
<ide> public function testBasicMigrationsCallMigratorWithProperArguments()
<ide> $app = new ApplicationDatabaseMigrationStub(['path.database' => __DIR__]);
<ide> $app->useDatabasePath(__DIR__);
<ide> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with(null);
<del> $migrator->shouldReceive('run')->once()->with(__DIR__.DIRECTORY_SEPARATOR.'migrations', ['pretend' => false, 'step' => false]);
<add> $migrator->shouldReceive('run')->once()->with([__DIR__.DIRECTORY_SEPARATOR.'migrations'], ['pretend' => false, 'step' => false]);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide> $migrator->shouldReceive('repositoryExists')->once()->andReturn(true);
<ide>
<ide> public function testMigrationRepositoryCreatedWhenNecessary()
<ide> $app = new ApplicationDatabaseMigrationStub(['path.database' => __DIR__]);
<ide> $app->useDatabasePath(__DIR__);
<ide> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with(null);
<del> $migrator->shouldReceive('run')->once()->with(__DIR__.DIRECTORY_SEPARATOR.'migrations', ['pretend' => false, 'step' => false]);
<add> $migrator->shouldReceive('run')->once()->with([__DIR__.DIRECTORY_SEPARATOR.'migrations'], ['pretend' => false, 'step' => false]);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide> $migrator->shouldReceive('repositoryExists')->once()->andReturn(false);
<ide> $command->expects($this->once())->method('call')->with($this->equalTo('migrate:install'), $this->equalTo(['--database' => null]));
<ide> public function testTheCommandMayBePretended()
<ide> $app = new ApplicationDatabaseMigrationStub(['path.database' => __DIR__]);
<ide> $app->useDatabasePath(__DIR__);
<ide> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with(null);
<del> $migrator->shouldReceive('run')->once()->with(__DIR__.DIRECTORY_SEPARATOR.'migrations', ['pretend' => true, 'step' => false]);
<add> $migrator->shouldReceive('run')->once()->with([__DIR__.DIRECTORY_SEPARATOR.'migrations'], ['pretend' => true, 'step' => false]);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide> $migrator->shouldReceive('repositoryExists')->once()->andReturn(true);
<ide>
<ide> public function testTheDatabaseMayBeSet()
<ide> $app = new ApplicationDatabaseMigrationStub(['path.database' => __DIR__]);
<ide> $app->useDatabasePath(__DIR__);
<ide> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with('foo');
<del> $migrator->shouldReceive('run')->once()->with(__DIR__.DIRECTORY_SEPARATOR.'migrations', ['pretend' => false, 'step' => false]);
<add> $migrator->shouldReceive('run')->once()->with([__DIR__.DIRECTORY_SEPARATOR.'migrations'], ['pretend' => false, 'step' => false]);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide> $migrator->shouldReceive('repositoryExists')->once()->andReturn(true);
<ide>
<ide> public function testStepMayBeSet()
<ide> $app = new ApplicationDatabaseMigrationStub(['path.database' => __DIR__]);
<ide> $app->useDatabasePath(__DIR__);
<ide> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with(null);
<del> $migrator->shouldReceive('run')->once()->with(__DIR__.DIRECTORY_SEPARATOR.'migrations', ['pretend' => false, 'step' => true]);
<add> $migrator->shouldReceive('run')->once()->with([__DIR__.DIRECTORY_SEPARATOR.'migrations'], ['pretend' => false, 'step' => true]);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide> $migrator->shouldReceive('repositoryExists')->once()->andReturn(true);
<ide>
<ide><path>tests/Database/DatabaseMigrationResetCommandTest.php
<ide> <?php
<ide>
<ide> use Mockery as m;
<add>use Illuminate\Foundation\Application;
<ide> use Illuminate\Database\Console\Migrations\ResetCommand;
<ide>
<ide> class DatabaseMigrationResetCommandTest extends PHPUnit_Framework_TestCase
<ide> public function tearDown()
<ide> public function testResetCommandCallsMigratorWithProperArguments()
<ide> {
<ide> $command = new ResetCommand($migrator = m::mock('Illuminate\Database\Migrations\Migrator'));
<del> $command->setLaravel(new AppDatabaseMigrationStub());
<add> $app = new ApplicationDatabaseResetStub(['path.database' => __DIR__]);
<add> $app->useDatabasePath(__DIR__);
<add> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with(null);
<ide> $migrator->shouldReceive('repositoryExists')->once()->andReturn(true);
<del> $migrator->shouldReceive('reset')->once()->with(false);
<add> $migrator->shouldReceive('reset')->once()->with([__DIR__.'/migrations'], false);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide>
<ide> $this->runCommand($command);
<ide> public function testResetCommandCallsMigratorWithProperArguments()
<ide> public function testResetCommandCanBePretended()
<ide> {
<ide> $command = new ResetCommand($migrator = m::mock('Illuminate\Database\Migrations\Migrator'));
<del> $command->setLaravel(new AppDatabaseMigrationStub());
<add> $app = new ApplicationDatabaseResetStub(['path.database' => __DIR__]);
<add> $app->useDatabasePath(__DIR__);
<add> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with('foo');
<ide> $migrator->shouldReceive('repositoryExists')->once()->andReturn(true);
<del> $migrator->shouldReceive('reset')->once()->with(true);
<add> $migrator->shouldReceive('reset')->once()->with([__DIR__.'/migrations'], true);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide>
<ide> $this->runCommand($command, ['--pretend' => true, '--database' => 'foo']);
<ide> protected function runCommand($command, $input = [])
<ide> }
<ide> }
<ide>
<del>class AppDatabaseMigrationStub extends Illuminate\Foundation\Application
<add>class ApplicationDatabaseResetStub extends Application
<ide> {
<add> public function __construct(array $data = [])
<add> {
<add> foreach ($data as $abstract => $instance) {
<add> $this->instance($abstract, $instance);
<add> }
<add> }
<add>
<ide> public function environment()
<ide> {
<ide> return 'development';
<ide><path>tests/Database/DatabaseMigrationRollbackCommandTest.php
<ide> public function tearDown()
<ide> public function testRollbackCommandCallsMigratorWithProperArguments()
<ide> {
<ide> $command = new RollbackCommand($migrator = m::mock('Illuminate\Database\Migrations\Migrator'));
<del> $command->setLaravel(new AppDatabaseMigrationRollbackStub());
<add> $app = new ApplicationDatabaseRollbackStub(['path.database' => __DIR__]);
<add> $app->useDatabasePath(__DIR__);
<add> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with(null);
<del> $migrator->shouldReceive('rollback')->once()->with(false);
<add> $migrator->shouldReceive('rollback')->once()->with([__DIR__.'/migrations'], false);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide>
<ide> $this->runCommand($command);
<ide> public function testRollbackCommandCallsMigratorWithProperArguments()
<ide> public function testRollbackCommandCanBePretended()
<ide> {
<ide> $command = new RollbackCommand($migrator = m::mock('Illuminate\Database\Migrations\Migrator'));
<del> $command->setLaravel(new AppDatabaseMigrationRollbackStub());
<add> $app = new ApplicationDatabaseRollbackStub(['path.database' => __DIR__]);
<add> $app->useDatabasePath(__DIR__);
<add> $command->setLaravel($app);
<add> $migrator->shouldReceive('paths')->once()->andReturn([]);
<ide> $migrator->shouldReceive('setConnection')->once()->with('foo');
<del> $migrator->shouldReceive('rollback')->once()->with(true);
<add> $migrator->shouldReceive('rollback')->once()->with([__DIR__.'/migrations'], true);
<ide> $migrator->shouldReceive('getNotes')->andReturn([]);
<ide>
<ide> $this->runCommand($command, ['--pretend' => true, '--database' => 'foo']);
<ide> protected function runCommand($command, $input = [])
<ide> }
<ide> }
<ide>
<del>class AppDatabaseMigrationRollbackStub extends Application
<add>class ApplicationDatabaseRollbackStub extends Application
<ide> {
<add> public function __construct(array $data = [])
<add> {
<add> foreach ($data as $abstract => $instance) {
<add> $this->instance($abstract, $instance);
<add> }
<add> }
<add>
<ide> public function environment()
<ide> {
<ide> return 'development'; | 8 |
Text | Text | use https clone url for docker/docker in arm.md | cddd28817001bc07104bf7f3481b6e74fd43caa9 | <ide><path>project/ARM.md
<ide> The Makefile does include logic to determine on which OS and architecture the Do
<ide> Based on OS and architecture it chooses the correct Dockerfile.
<ide> For the ARM 32bit architecture it uses `Dockerfile.armhf`.
<ide>
<del>So for example in order to build a Docker binary one has to
<del>1. clone the Docker/Docker repository on an ARM device `git clone [email protected]:docker/docker.git`
<add>So for example in order to build a Docker binary one has to:
<add>1. clone the Docker/Docker repository on an ARM device `git clone https://github.com/docker/docker.git`
<ide> 2. change into the checked out repository with `cd docker`
<ide> 3. execute `make binary` to create a Docker Engine binary for ARM
<ide> | 1 |
Text | Text | update tf install link | 5ca1ef4900c75cbab20b1a6e43d55a99839eec8e | <ide><path>im2txt/README.md
<ide> Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan.
<ide> Full text available at: http://arxiv.org/abs/1609.06647
<ide>
<ide> ## Contact
<del>***Author:*** Chris Shallue ([email protected]).
<add>***Author:*** Chris Shallue
<ide>
<del>***Pull requests and issues:*** @cshallue.
<add>***Pull requests and issues:*** @cshallue
<ide>
<ide> ## Contents
<ide> * [Model Overview](#model-overview)
<ide> approximately 10 times slower.
<ide> ### Install Required Packages
<ide> First ensure that you have installed the following required packages:
<ide>
<del>* **Bazel** ([instructions](http://bazel.io/docs/install.html)).
<del>* **TensorFlow** r0.12 or greater ([instructions](https://www.tensorflow.org/versions/master/get_started/os_setup.html)).
<del>* **NumPy** ([instructions](http://www.scipy.org/install.html)).
<add>* **Bazel** ([instructions](http://bazel.io/docs/install.html))
<add>* **TensorFlow** 1.0 or greater ([instructions](https://www.tensorflow.org/install/))
<add>* **NumPy** ([instructions](http://www.scipy.org/install.html))
<ide> * **Natural Language Toolkit (NLTK)**:
<del> * First install NLTK ([instructions](http://www.nltk.org/install.html)).
<del> * Then install the NLTK data ([instructions](http://www.nltk.org/data.html)).
<add> * First install NLTK ([instructions](http://www.nltk.org/install.html))
<add> * Then install the NLTK data ([instructions](http://www.nltk.org/data.html))
<ide>
<ide> ### Prepare the Training Data
<ide> | 1 |
Javascript | Javascript | convert `src/core/jpg.js` to use standard classes | 69dea39a42463d4fac4235853ce781f82f2fe23b | <ide><path>src/core/jpg.js
<ide> class EOIMarkerError extends BaseException {}
<ide> * (partners.adobe.com/public/developer/en/ps/sdk/5116.DCT_Filter.pdf)
<ide> */
<ide>
<del>const JpegImage = (function JpegImageClosure() {
<del> // prettier-ignore
<del> const dctZigZag = new Uint8Array([
<add>// prettier-ignore
<add>const dctZigZag = new Uint8Array([
<ide> 0,
<ide> 1, 8,
<ide> 16, 9, 2,
<ide> const JpegImage = (function JpegImageClosure() {
<ide> 63
<ide> ]);
<ide>
<del> const dctCos1 = 4017; // cos(pi/16)
<del> const dctSin1 = 799; // sin(pi/16)
<del> const dctCos3 = 3406; // cos(3*pi/16)
<del> const dctSin3 = 2276; // sin(3*pi/16)
<del> const dctCos6 = 1567; // cos(6*pi/16)
<del> const dctSin6 = 3784; // sin(6*pi/16)
<del> const dctSqrt2 = 5793; // sqrt(2)
<del> const dctSqrt1d2 = 2896; // sqrt(2) / 2
<del>
<del> // eslint-disable-next-line no-shadow
<del> function JpegImage({ decodeTransform = null, colorTransform = -1 } = {}) {
<del> this._decodeTransform = decodeTransform;
<del> this._colorTransform = colorTransform;
<add>const dctCos1 = 4017; // cos(pi/16)
<add>const dctSin1 = 799; // sin(pi/16)
<add>const dctCos3 = 3406; // cos(3*pi/16)
<add>const dctSin3 = 2276; // sin(3*pi/16)
<add>const dctCos6 = 1567; // cos(6*pi/16)
<add>const dctSin6 = 3784; // sin(6*pi/16)
<add>const dctSqrt2 = 5793; // sqrt(2)
<add>const dctSqrt1d2 = 2896; // sqrt(2) / 2
<add>
<add>function buildHuffmanTable(codeLengths, values) {
<add> let k = 0,
<add> i,
<add> j,
<add> length = 16;
<add> while (length > 0 && !codeLengths[length - 1]) {
<add> length--;
<ide> }
<del>
<del> function buildHuffmanTable(codeLengths, values) {
<del> let k = 0,
<del> i,
<del> j,
<del> length = 16;
<del> while (length > 0 && !codeLengths[length - 1]) {
<del> length--;
<del> }
<del> const code = [{ children: [], index: 0 }];
<del> let p = code[0],
<del> q;
<del> for (i = 0; i < length; i++) {
<del> for (j = 0; j < codeLengths[i]; j++) {
<add> const code = [{ children: [], index: 0 }];
<add> let p = code[0],
<add> q;
<add> for (i = 0; i < length; i++) {
<add> for (j = 0; j < codeLengths[i]; j++) {
<add> p = code.pop();
<add> p.children[p.index] = values[k];
<add> while (p.index > 0) {
<ide> p = code.pop();
<del> p.children[p.index] = values[k];
<del> while (p.index > 0) {
<del> p = code.pop();
<del> }
<del> p.index++;
<del> code.push(p);
<del> while (code.length <= i) {
<del> code.push((q = { children: [], index: 0 }));
<del> p.children[p.index] = q.children;
<del> p = q;
<del> }
<del> k++;
<ide> }
<del> if (i + 1 < length) {
<del> // p here points to last code
<add> p.index++;
<add> code.push(p);
<add> while (code.length <= i) {
<ide> code.push((q = { children: [], index: 0 }));
<ide> p.children[p.index] = q.children;
<ide> p = q;
<ide> }
<add> k++;
<add> }
<add> if (i + 1 < length) {
<add> // p here points to last code
<add> code.push((q = { children: [], index: 0 }));
<add> p.children[p.index] = q.children;
<add> p = q;
<ide> }
<del> return code[0].children;
<ide> }
<add> return code[0].children;
<add>}
<ide>
<del> function getBlockBufferOffset(component, row, col) {
<del> return 64 * ((component.blocksPerLine + 1) * row + col);
<del> }
<add>function getBlockBufferOffset(component, row, col) {
<add> return 64 * ((component.blocksPerLine + 1) * row + col);
<add>}
<ide>
<del> function decodeScan(
<del> data,
<del> offset,
<del> frame,
<del> components,
<del> resetInterval,
<del> spectralStart,
<del> spectralEnd,
<del> successivePrev,
<del> successive,
<del> parseDNLMarker = false
<del> ) {
<del> const mcusPerLine = frame.mcusPerLine;
<del> const progressive = frame.progressive;
<del>
<del> const startOffset = offset;
<del> let bitsData = 0,
<del> bitsCount = 0;
<del>
<del> function readBit() {
<del> if (bitsCount > 0) {
<del> bitsCount--;
<del> return (bitsData >> bitsCount) & 1;
<del> }
<del> bitsData = data[offset++];
<del> if (bitsData === 0xff) {
<del> const nextByte = data[offset++];
<del> if (nextByte) {
<del> if (nextByte === /* DNL = */ 0xdc && parseDNLMarker) {
<del> offset += 2; // Skip marker length.
<del>
<del> const scanLines = readUint16(data, offset);
<del> offset += 2;
<del> if (scanLines > 0 && scanLines !== frame.scanLines) {
<add>function decodeScan(
<add> data,
<add> offset,
<add> frame,
<add> components,
<add> resetInterval,
<add> spectralStart,
<add> spectralEnd,
<add> successivePrev,
<add> successive,
<add> parseDNLMarker = false
<add>) {
<add> const mcusPerLine = frame.mcusPerLine;
<add> const progressive = frame.progressive;
<add>
<add> const startOffset = offset;
<add> let bitsData = 0,
<add> bitsCount = 0;
<add>
<add> function readBit() {
<add> if (bitsCount > 0) {
<add> bitsCount--;
<add> return (bitsData >> bitsCount) & 1;
<add> }
<add> bitsData = data[offset++];
<add> if (bitsData === 0xff) {
<add> const nextByte = data[offset++];
<add> if (nextByte) {
<add> if (nextByte === /* DNL = */ 0xdc && parseDNLMarker) {
<add> offset += 2; // Skip marker length.
<add>
<add> const scanLines = readUint16(data, offset);
<add> offset += 2;
<add> if (scanLines > 0 && scanLines !== frame.scanLines) {
<add> throw new DNLMarkerError(
<add> "Found DNL marker (0xFFDC) while parsing scan data",
<add> scanLines
<add> );
<add> }
<add> } else if (nextByte === /* EOI = */ 0xd9) {
<add> if (parseDNLMarker) {
<add> // NOTE: only 8-bit JPEG images are supported in this decoder.
<add> const maybeScanLines = blockRow * (frame.precision === 8 ? 8 : 0);
<add> // Heuristic to attempt to handle corrupt JPEG images with too
<add> // large `scanLines` parameter, by falling back to the currently
<add> // parsed number of scanLines when it's at least (approximately)
<add> // one order of magnitude smaller than expected (fixes
<add> // issue10880.pdf and issue10989.pdf).
<add> if (
<add> maybeScanLines > 0 &&
<add> Math.round(frame.scanLines / maybeScanLines) >= 10
<add> ) {
<ide> throw new DNLMarkerError(
<del> "Found DNL marker (0xFFDC) while parsing scan data",
<del> scanLines
<add> "Found EOI marker (0xFFD9) while parsing scan data, " +
<add> "possibly caused by incorrect `scanLines` parameter",
<add> maybeScanLines
<ide> );
<ide> }
<del> } else if (nextByte === /* EOI = */ 0xd9) {
<del> if (parseDNLMarker) {
<del> // NOTE: only 8-bit JPEG images are supported in this decoder.
<del> const maybeScanLines = blockRow * (frame.precision === 8 ? 8 : 0);
<del> // Heuristic to attempt to handle corrupt JPEG images with too
<del> // large `scanLines` parameter, by falling back to the currently
<del> // parsed number of scanLines when it's at least (approximately)
<del> // one order of magnitude smaller than expected (fixes
<del> // issue10880.pdf and issue10989.pdf).
<del> if (
<del> maybeScanLines > 0 &&
<del> Math.round(frame.scanLines / maybeScanLines) >= 10
<del> ) {
<del> throw new DNLMarkerError(
<del> "Found EOI marker (0xFFD9) while parsing scan data, " +
<del> "possibly caused by incorrect `scanLines` parameter",
<del> maybeScanLines
<del> );
<del> }
<del> }
<del> throw new EOIMarkerError(
<del> "Found EOI marker (0xFFD9) while parsing scan data"
<del> );
<ide> }
<del> throw new JpegError(
<del> `unexpected marker ${((bitsData << 8) | nextByte).toString(16)}`
<add> throw new EOIMarkerError(
<add> "Found EOI marker (0xFFD9) while parsing scan data"
<ide> );
<ide> }
<del> // unstuff 0
<add> throw new JpegError(
<add> `unexpected marker ${((bitsData << 8) | nextByte).toString(16)}`
<add> );
<ide> }
<del> bitsCount = 7;
<del> return bitsData >>> 7;
<add> // unstuff 0
<ide> }
<add> bitsCount = 7;
<add> return bitsData >>> 7;
<add> }
<ide>
<del> function decodeHuffman(tree) {
<del> let node = tree;
<del> while (true) {
<del> node = node[readBit()];
<del> switch (typeof node) {
<del> case "number":
<del> return node;
<del> case "object":
<del> continue;
<del> }
<del> throw new JpegError("invalid huffman sequence");
<add> function decodeHuffman(tree) {
<add> let node = tree;
<add> while (true) {
<add> node = node[readBit()];
<add> switch (typeof node) {
<add> case "number":
<add> return node;
<add> case "object":
<add> continue;
<ide> }
<add> throw new JpegError("invalid huffman sequence");
<ide> }
<add> }
<ide>
<del> function receive(length) {
<del> let n = 0;
<del> while (length > 0) {
<del> n = (n << 1) | readBit();
<del> length--;
<del> }
<del> return n;
<add> function receive(length) {
<add> let n = 0;
<add> while (length > 0) {
<add> n = (n << 1) | readBit();
<add> length--;
<ide> }
<add> return n;
<add> }
<ide>
<del> function receiveAndExtend(length) {
<del> if (length === 1) {
<del> return readBit() === 1 ? 1 : -1;
<del> }
<del> const n = receive(length);
<del> if (n >= 1 << (length - 1)) {
<del> return n;
<del> }
<del> return n + (-1 << length) + 1;
<add> function receiveAndExtend(length) {
<add> if (length === 1) {
<add> return readBit() === 1 ? 1 : -1;
<add> }
<add> const n = receive(length);
<add> if (n >= 1 << (length - 1)) {
<add> return n;
<ide> }
<add> return n + (-1 << length) + 1;
<add> }
<ide>
<del> function decodeBaseline(component, blockOffset) {
<del> const t = decodeHuffman(component.huffmanTableDC);
<del> const diff = t === 0 ? 0 : receiveAndExtend(t);
<del> component.blockData[blockOffset] = component.pred += diff;
<del> let k = 1;
<del> while (k < 64) {
<del> const rs = decodeHuffman(component.huffmanTableAC);
<del> const s = rs & 15,
<del> r = rs >> 4;
<del> if (s === 0) {
<del> if (r < 15) {
<del> break;
<del> }
<del> k += 16;
<del> continue;
<add> function decodeBaseline(component, blockOffset) {
<add> const t = decodeHuffman(component.huffmanTableDC);
<add> const diff = t === 0 ? 0 : receiveAndExtend(t);
<add> component.blockData[blockOffset] = component.pred += diff;
<add> let k = 1;
<add> while (k < 64) {
<add> const rs = decodeHuffman(component.huffmanTableAC);
<add> const s = rs & 15,
<add> r = rs >> 4;
<add> if (s === 0) {
<add> if (r < 15) {
<add> break;
<ide> }
<del> k += r;
<del> const z = dctZigZag[k];
<del> component.blockData[blockOffset + z] = receiveAndExtend(s);
<del> k++;
<add> k += 16;
<add> continue;
<ide> }
<add> k += r;
<add> const z = dctZigZag[k];
<add> component.blockData[blockOffset + z] = receiveAndExtend(s);
<add> k++;
<ide> }
<add> }
<ide>
<del> function decodeDCFirst(component, blockOffset) {
<del> const t = decodeHuffman(component.huffmanTableDC);
<del> const diff = t === 0 ? 0 : receiveAndExtend(t) << successive;
<del> component.blockData[blockOffset] = component.pred += diff;
<del> }
<add> function decodeDCFirst(component, blockOffset) {
<add> const t = decodeHuffman(component.huffmanTableDC);
<add> const diff = t === 0 ? 0 : receiveAndExtend(t) << successive;
<add> component.blockData[blockOffset] = component.pred += diff;
<add> }
<ide>
<del> function decodeDCSuccessive(component, blockOffset) {
<del> component.blockData[blockOffset] |= readBit() << successive;
<del> }
<add> function decodeDCSuccessive(component, blockOffset) {
<add> component.blockData[blockOffset] |= readBit() << successive;
<add> }
<ide>
<del> let eobrun = 0;
<del> function decodeACFirst(component, blockOffset) {
<del> if (eobrun > 0) {
<del> eobrun--;
<del> return;
<del> }
<del> let k = spectralStart;
<del> const e = spectralEnd;
<del> while (k <= e) {
<del> const rs = decodeHuffman(component.huffmanTableAC);
<del> const s = rs & 15,
<del> r = rs >> 4;
<del> if (s === 0) {
<del> if (r < 15) {
<del> eobrun = receive(r) + (1 << r) - 1;
<del> break;
<del> }
<del> k += 16;
<del> continue;
<add> let eobrun = 0;
<add> function decodeACFirst(component, blockOffset) {
<add> if (eobrun > 0) {
<add> eobrun--;
<add> return;
<add> }
<add> let k = spectralStart;
<add> const e = spectralEnd;
<add> while (k <= e) {
<add> const rs = decodeHuffman(component.huffmanTableAC);
<add> const s = rs & 15,
<add> r = rs >> 4;
<add> if (s === 0) {
<add> if (r < 15) {
<add> eobrun = receive(r) + (1 << r) - 1;
<add> break;
<ide> }
<del> k += r;
<del> const z = dctZigZag[k];
<del> component.blockData[blockOffset + z] =
<del> receiveAndExtend(s) * (1 << successive);
<del> k++;
<add> k += 16;
<add> continue;
<ide> }
<add> k += r;
<add> const z = dctZigZag[k];
<add> component.blockData[blockOffset + z] =
<add> receiveAndExtend(s) * (1 << successive);
<add> k++;
<ide> }
<add> }
<ide>
<del> let successiveACState = 0,
<del> successiveACNextValue;
<del> function decodeACSuccessive(component, blockOffset) {
<del> let k = spectralStart;
<del> const e = spectralEnd;
<del> let r = 0;
<del> let s;
<del> let rs;
<del> while (k <= e) {
<del> const offsetZ = blockOffset + dctZigZag[k];
<del> const sign = component.blockData[offsetZ] < 0 ? -1 : 1;
<del> switch (successiveACState) {
<del> case 0: // initial state
<del> rs = decodeHuffman(component.huffmanTableAC);
<del> s = rs & 15;
<del> r = rs >> 4;
<del> if (s === 0) {
<del> if (r < 15) {
<del> eobrun = receive(r) + (1 << r);
<del> successiveACState = 4;
<del> } else {
<del> r = 16;
<del> successiveACState = 1;
<del> }
<del> } else {
<del> if (s !== 1) {
<del> throw new JpegError("invalid ACn encoding");
<del> }
<del> successiveACNextValue = receiveAndExtend(s);
<del> successiveACState = r ? 2 : 3;
<del> }
<del> continue;
<del> case 1: // skipping r zero items
<del> case 2:
<del> if (component.blockData[offsetZ]) {
<del> component.blockData[offsetZ] += sign * (readBit() << successive);
<add> let successiveACState = 0,
<add> successiveACNextValue;
<add> function decodeACSuccessive(component, blockOffset) {
<add> let k = spectralStart;
<add> const e = spectralEnd;
<add> let r = 0;
<add> let s;
<add> let rs;
<add> while (k <= e) {
<add> const offsetZ = blockOffset + dctZigZag[k];
<add> const sign = component.blockData[offsetZ] < 0 ? -1 : 1;
<add> switch (successiveACState) {
<add> case 0: // initial state
<add> rs = decodeHuffman(component.huffmanTableAC);
<add> s = rs & 15;
<add> r = rs >> 4;
<add> if (s === 0) {
<add> if (r < 15) {
<add> eobrun = receive(r) + (1 << r);
<add> successiveACState = 4;
<ide> } else {
<del> r--;
<del> if (r === 0) {
<del> successiveACState = successiveACState === 2 ? 3 : 0;
<del> }
<add> r = 16;
<add> successiveACState = 1;
<ide> }
<del> break;
<del> case 3: // set value for a zero item
<del> if (component.blockData[offsetZ]) {
<del> component.blockData[offsetZ] += sign * (readBit() << successive);
<del> } else {
<del> component.blockData[offsetZ] =
<del> successiveACNextValue << successive;
<del> successiveACState = 0;
<add> } else {
<add> if (s !== 1) {
<add> throw new JpegError("invalid ACn encoding");
<ide> }
<del> break;
<del> case 4: // eob
<del> if (component.blockData[offsetZ]) {
<del> component.blockData[offsetZ] += sign * (readBit() << successive);
<add> successiveACNextValue = receiveAndExtend(s);
<add> successiveACState = r ? 2 : 3;
<add> }
<add> continue;
<add> case 1: // skipping r zero items
<add> case 2:
<add> if (component.blockData[offsetZ]) {
<add> component.blockData[offsetZ] += sign * (readBit() << successive);
<add> } else {
<add> r--;
<add> if (r === 0) {
<add> successiveACState = successiveACState === 2 ? 3 : 0;
<ide> }
<del> break;
<del> }
<del> k++;
<add> }
<add> break;
<add> case 3: // set value for a zero item
<add> if (component.blockData[offsetZ]) {
<add> component.blockData[offsetZ] += sign * (readBit() << successive);
<add> } else {
<add> component.blockData[offsetZ] = successiveACNextValue << successive;
<add> successiveACState = 0;
<add> }
<add> break;
<add> case 4: // eob
<add> if (component.blockData[offsetZ]) {
<add> component.blockData[offsetZ] += sign * (readBit() << successive);
<add> }
<add> break;
<ide> }
<del> if (successiveACState === 4) {
<del> eobrun--;
<del> if (eobrun === 0) {
<del> successiveACState = 0;
<del> }
<add> k++;
<add> }
<add> if (successiveACState === 4) {
<add> eobrun--;
<add> if (eobrun === 0) {
<add> successiveACState = 0;
<ide> }
<ide> }
<add> }
<ide>
<del> let blockRow = 0;
<del> function decodeMcu(component, decode, mcu, row, col) {
<del> const mcuRow = (mcu / mcusPerLine) | 0;
<del> const mcuCol = mcu % mcusPerLine;
<del> blockRow = mcuRow * component.v + row;
<del> const blockCol = mcuCol * component.h + col;
<del> const blockOffset = getBlockBufferOffset(component, blockRow, blockCol);
<del> decode(component, blockOffset);
<del> }
<add> let blockRow = 0;
<add> function decodeMcu(component, decode, mcu, row, col) {
<add> const mcuRow = (mcu / mcusPerLine) | 0;
<add> const mcuCol = mcu % mcusPerLine;
<add> blockRow = mcuRow * component.v + row;
<add> const blockCol = mcuCol * component.h + col;
<add> const blockOffset = getBlockBufferOffset(component, blockRow, blockCol);
<add> decode(component, blockOffset);
<add> }
<ide>
<del> function decodeBlock(component, decode, mcu) {
<del> blockRow = (mcu / component.blocksPerLine) | 0;
<del> const blockCol = mcu % component.blocksPerLine;
<del> const blockOffset = getBlockBufferOffset(component, blockRow, blockCol);
<del> decode(component, blockOffset);
<del> }
<add> function decodeBlock(component, decode, mcu) {
<add> blockRow = (mcu / component.blocksPerLine) | 0;
<add> const blockCol = mcu % component.blocksPerLine;
<add> const blockOffset = getBlockBufferOffset(component, blockRow, blockCol);
<add> decode(component, blockOffset);
<add> }
<ide>
<del> const componentsLength = components.length;
<del> let component, i, j, k, n;
<del> let decodeFn;
<del> if (progressive) {
<del> if (spectralStart === 0) {
<del> decodeFn = successivePrev === 0 ? decodeDCFirst : decodeDCSuccessive;
<del> } else {
<del> decodeFn = successivePrev === 0 ? decodeACFirst : decodeACSuccessive;
<del> }
<add> const componentsLength = components.length;
<add> let component, i, j, k, n;
<add> let decodeFn;
<add> if (progressive) {
<add> if (spectralStart === 0) {
<add> decodeFn = successivePrev === 0 ? decodeDCFirst : decodeDCSuccessive;
<ide> } else {
<del> decodeFn = decodeBaseline;
<add> decodeFn = successivePrev === 0 ? decodeACFirst : decodeACSuccessive;
<ide> }
<add> } else {
<add> decodeFn = decodeBaseline;
<add> }
<ide>
<del> let mcu = 0,
<del> fileMarker;
<del> let mcuExpected;
<del> if (componentsLength === 1) {
<del> mcuExpected = components[0].blocksPerLine * components[0].blocksPerColumn;
<del> } else {
<del> mcuExpected = mcusPerLine * frame.mcusPerColumn;
<del> }
<add> let mcu = 0,
<add> fileMarker;
<add> let mcuExpected;
<add> if (componentsLength === 1) {
<add> mcuExpected = components[0].blocksPerLine * components[0].blocksPerColumn;
<add> } else {
<add> mcuExpected = mcusPerLine * frame.mcusPerColumn;
<add> }
<ide>
<del> let h, v;
<del> while (mcu <= mcuExpected) {
<del> // reset interval stuff
<del> const mcuToRead = resetInterval
<del> ? Math.min(mcuExpected - mcu, resetInterval)
<del> : mcuExpected;
<del>
<del> // The `mcuToRead === 0` case should only occur when all of the expected
<del> // MCU data has been already parsed, i.e. when `mcu === mcuExpected`, but
<del> // some corrupt JPEG images contain more data than intended and we thus
<del> // want to skip over any extra RSTx markers below (fixes issue11794.pdf).
<del> if (mcuToRead > 0) {
<del> for (i = 0; i < componentsLength; i++) {
<del> components[i].pred = 0;
<del> }
<del> eobrun = 0;
<add> let h, v;
<add> while (mcu <= mcuExpected) {
<add> // reset interval stuff
<add> const mcuToRead = resetInterval
<add> ? Math.min(mcuExpected - mcu, resetInterval)
<add> : mcuExpected;
<add>
<add> // The `mcuToRead === 0` case should only occur when all of the expected
<add> // MCU data has been already parsed, i.e. when `mcu === mcuExpected`, but
<add> // some corrupt JPEG images contain more data than intended and we thus
<add> // want to skip over any extra RSTx markers below (fixes issue11794.pdf).
<add> if (mcuToRead > 0) {
<add> for (i = 0; i < componentsLength; i++) {
<add> components[i].pred = 0;
<add> }
<add> eobrun = 0;
<ide>
<del> if (componentsLength === 1) {
<del> component = components[0];
<del> for (n = 0; n < mcuToRead; n++) {
<del> decodeBlock(component, decodeFn, mcu);
<del> mcu++;
<del> }
<del> } else {
<del> for (n = 0; n < mcuToRead; n++) {
<del> for (i = 0; i < componentsLength; i++) {
<del> component = components[i];
<del> h = component.h;
<del> v = component.v;
<del> for (j = 0; j < v; j++) {
<del> for (k = 0; k < h; k++) {
<del> decodeMcu(component, decodeFn, mcu, j, k);
<del> }
<add> if (componentsLength === 1) {
<add> component = components[0];
<add> for (n = 0; n < mcuToRead; n++) {
<add> decodeBlock(component, decodeFn, mcu);
<add> mcu++;
<add> }
<add> } else {
<add> for (n = 0; n < mcuToRead; n++) {
<add> for (i = 0; i < componentsLength; i++) {
<add> component = components[i];
<add> h = component.h;
<add> v = component.v;
<add> for (j = 0; j < v; j++) {
<add> for (k = 0; k < h; k++) {
<add> decodeMcu(component, decodeFn, mcu, j, k);
<ide> }
<ide> }
<del> mcu++;
<ide> }
<add> mcu++;
<ide> }
<ide> }
<del>
<del> // find marker
<del> bitsCount = 0;
<del> fileMarker = findNextFileMarker(data, offset);
<del> if (!fileMarker) {
<del> break; // Reached the end of the image data without finding any marker.
<del> }
<del> if (fileMarker.invalid) {
<del> // Some bad images seem to pad Scan blocks with e.g. zero bytes, skip
<del> // past those to attempt to find a valid marker (fixes issue4090.pdf).
<del> const partialMsg = mcuToRead > 0 ? "unexpected" : "excessive";
<del> warn(
<del> `decodeScan - ${partialMsg} MCU data, current marker is: ${fileMarker.invalid}`
<del> );
<del> offset = fileMarker.offset;
<del> }
<del> if (fileMarker.marker >= 0xffd0 && fileMarker.marker <= 0xffd7) {
<del> // RSTx
<del> offset += 2;
<del> } else {
<del> break;
<del> }
<ide> }
<ide>
<del> return offset - startOffset;
<add> // find marker
<add> bitsCount = 0;
<add> fileMarker = findNextFileMarker(data, offset);
<add> if (!fileMarker) {
<add> break; // Reached the end of the image data without finding any marker.
<add> }
<add> if (fileMarker.invalid) {
<add> // Some bad images seem to pad Scan blocks with e.g. zero bytes, skip
<add> // past those to attempt to find a valid marker (fixes issue4090.pdf).
<add> const partialMsg = mcuToRead > 0 ? "unexpected" : "excessive";
<add> warn(
<add> `decodeScan - ${partialMsg} MCU data, current marker is: ${fileMarker.invalid}`
<add> );
<add> offset = fileMarker.offset;
<add> }
<add> if (fileMarker.marker >= 0xffd0 && fileMarker.marker <= 0xffd7) {
<add> // RSTx
<add> offset += 2;
<add> } else {
<add> break;
<add> }
<ide> }
<ide>
<del> // A port of poppler's IDCT method which in turn is taken from:
<del> // Christoph Loeffler, Adriaan Ligtenberg, George S. Moschytz,
<del> // 'Practical Fast 1-D DCT Algorithms with 11 Multiplications',
<del> // IEEE Intl. Conf. on Acoustics, Speech & Signal Processing, 1989,
<del> // 988-991.
<del> function quantizeAndInverse(component, blockBufferOffset, p) {
<del> const qt = component.quantizationTable,
<del> blockData = component.blockData;
<del> let v0, v1, v2, v3, v4, v5, v6, v7;
<del> let p0, p1, p2, p3, p4, p5, p6, p7;
<del> let t;
<del>
<del> if (!qt) {
<del> throw new JpegError("missing required Quantization Table.");
<del> }
<add> return offset - startOffset;
<add>}
<ide>
<del> // inverse DCT on rows
<del> for (let row = 0; row < 64; row += 8) {
<del> // gather block data
<del> p0 = blockData[blockBufferOffset + row];
<del> p1 = blockData[blockBufferOffset + row + 1];
<del> p2 = blockData[blockBufferOffset + row + 2];
<del> p3 = blockData[blockBufferOffset + row + 3];
<del> p4 = blockData[blockBufferOffset + row + 4];
<del> p5 = blockData[blockBufferOffset + row + 5];
<del> p6 = blockData[blockBufferOffset + row + 6];
<del> p7 = blockData[blockBufferOffset + row + 7];
<del>
<del> // dequant p0
<del> p0 *= qt[row];
<del>
<del> // check for all-zero AC coefficients
<del> if ((p1 | p2 | p3 | p4 | p5 | p6 | p7) === 0) {
<del> t = (dctSqrt2 * p0 + 512) >> 10;
<del> p[row] = t;
<del> p[row + 1] = t;
<del> p[row + 2] = t;
<del> p[row + 3] = t;
<del> p[row + 4] = t;
<del> p[row + 5] = t;
<del> p[row + 6] = t;
<del> p[row + 7] = t;
<del> continue;
<del> }
<del> // dequant p1 ... p7
<del> p1 *= qt[row + 1];
<del> p2 *= qt[row + 2];
<del> p3 *= qt[row + 3];
<del> p4 *= qt[row + 4];
<del> p5 *= qt[row + 5];
<del> p6 *= qt[row + 6];
<del> p7 *= qt[row + 7];
<del>
<del> // stage 4
<del> v0 = (dctSqrt2 * p0 + 128) >> 8;
<del> v1 = (dctSqrt2 * p4 + 128) >> 8;
<del> v2 = p2;
<del> v3 = p6;
<del> v4 = (dctSqrt1d2 * (p1 - p7) + 128) >> 8;
<del> v7 = (dctSqrt1d2 * (p1 + p7) + 128) >> 8;
<del> v5 = p3 << 4;
<del> v6 = p5 << 4;
<del>
<del> // stage 3
<del> v0 = (v0 + v1 + 1) >> 1;
<del> v1 = v0 - v1;
<del> t = (v2 * dctSin6 + v3 * dctCos6 + 128) >> 8;
<del> v2 = (v2 * dctCos6 - v3 * dctSin6 + 128) >> 8;
<del> v3 = t;
<del> v4 = (v4 + v6 + 1) >> 1;
<del> v6 = v4 - v6;
<del> v7 = (v7 + v5 + 1) >> 1;
<del> v5 = v7 - v5;
<del>
<del> // stage 2
<del> v0 = (v0 + v3 + 1) >> 1;
<del> v3 = v0 - v3;
<del> v1 = (v1 + v2 + 1) >> 1;
<del> v2 = v1 - v2;
<del> t = (v4 * dctSin3 + v7 * dctCos3 + 2048) >> 12;
<del> v4 = (v4 * dctCos3 - v7 * dctSin3 + 2048) >> 12;
<del> v7 = t;
<del> t = (v5 * dctSin1 + v6 * dctCos1 + 2048) >> 12;
<del> v5 = (v5 * dctCos1 - v6 * dctSin1 + 2048) >> 12;
<del> v6 = t;
<del>
<del> // stage 1
<del> p[row] = v0 + v7;
<del> p[row + 7] = v0 - v7;
<del> p[row + 1] = v1 + v6;
<del> p[row + 6] = v1 - v6;
<del> p[row + 2] = v2 + v5;
<del> p[row + 5] = v2 - v5;
<del> p[row + 3] = v3 + v4;
<del> p[row + 4] = v3 - v4;
<del> }
<add>// A port of poppler's IDCT method which in turn is taken from:
<add>// Christoph Loeffler, Adriaan Ligtenberg, George S. Moschytz,
<add>// 'Practical Fast 1-D DCT Algorithms with 11 Multiplications',
<add>// IEEE Intl. Conf. on Acoustics, Speech & Signal Processing, 1989,
<add>// 988-991.
<add>function quantizeAndInverse(component, blockBufferOffset, p) {
<add> const qt = component.quantizationTable,
<add> blockData = component.blockData;
<add> let v0, v1, v2, v3, v4, v5, v6, v7;
<add> let p0, p1, p2, p3, p4, p5, p6, p7;
<add> let t;
<add>
<add> if (!qt) {
<add> throw new JpegError("missing required Quantization Table.");
<add> }
<ide>
<del> // inverse DCT on columns
<del> for (let col = 0; col < 8; ++col) {
<del> p0 = p[col];
<del> p1 = p[col + 8];
<del> p2 = p[col + 16];
<del> p3 = p[col + 24];
<del> p4 = p[col + 32];
<del> p5 = p[col + 40];
<del> p6 = p[col + 48];
<del> p7 = p[col + 56];
<del>
<del> // check for all-zero AC coefficients
<del> if ((p1 | p2 | p3 | p4 | p5 | p6 | p7) === 0) {
<del> t = (dctSqrt2 * p0 + 8192) >> 14;
<del> // Convert to 8-bit.
<del> if (t < -2040) {
<del> t = 0;
<del> } else if (t >= 2024) {
<del> t = 255;
<del> } else {
<del> t = (t + 2056) >> 4;
<del> }
<del> blockData[blockBufferOffset + col] = t;
<del> blockData[blockBufferOffset + col + 8] = t;
<del> blockData[blockBufferOffset + col + 16] = t;
<del> blockData[blockBufferOffset + col + 24] = t;
<del> blockData[blockBufferOffset + col + 32] = t;
<del> blockData[blockBufferOffset + col + 40] = t;
<del> blockData[blockBufferOffset + col + 48] = t;
<del> blockData[blockBufferOffset + col + 56] = t;
<del> continue;
<del> }
<add> // inverse DCT on rows
<add> for (let row = 0; row < 64; row += 8) {
<add> // gather block data
<add> p0 = blockData[blockBufferOffset + row];
<add> p1 = blockData[blockBufferOffset + row + 1];
<add> p2 = blockData[blockBufferOffset + row + 2];
<add> p3 = blockData[blockBufferOffset + row + 3];
<add> p4 = blockData[blockBufferOffset + row + 4];
<add> p5 = blockData[blockBufferOffset + row + 5];
<add> p6 = blockData[blockBufferOffset + row + 6];
<add> p7 = blockData[blockBufferOffset + row + 7];
<add>
<add> // dequant p0
<add> p0 *= qt[row];
<add>
<add> // check for all-zero AC coefficients
<add> if ((p1 | p2 | p3 | p4 | p5 | p6 | p7) === 0) {
<add> t = (dctSqrt2 * p0 + 512) >> 10;
<add> p[row] = t;
<add> p[row + 1] = t;
<add> p[row + 2] = t;
<add> p[row + 3] = t;
<add> p[row + 4] = t;
<add> p[row + 5] = t;
<add> p[row + 6] = t;
<add> p[row + 7] = t;
<add> continue;
<add> }
<add> // dequant p1 ... p7
<add> p1 *= qt[row + 1];
<add> p2 *= qt[row + 2];
<add> p3 *= qt[row + 3];
<add> p4 *= qt[row + 4];
<add> p5 *= qt[row + 5];
<add> p6 *= qt[row + 6];
<add> p7 *= qt[row + 7];
<add>
<add> // stage 4
<add> v0 = (dctSqrt2 * p0 + 128) >> 8;
<add> v1 = (dctSqrt2 * p4 + 128) >> 8;
<add> v2 = p2;
<add> v3 = p6;
<add> v4 = (dctSqrt1d2 * (p1 - p7) + 128) >> 8;
<add> v7 = (dctSqrt1d2 * (p1 + p7) + 128) >> 8;
<add> v5 = p3 << 4;
<add> v6 = p5 << 4;
<add>
<add> // stage 3
<add> v0 = (v0 + v1 + 1) >> 1;
<add> v1 = v0 - v1;
<add> t = (v2 * dctSin6 + v3 * dctCos6 + 128) >> 8;
<add> v2 = (v2 * dctCos6 - v3 * dctSin6 + 128) >> 8;
<add> v3 = t;
<add> v4 = (v4 + v6 + 1) >> 1;
<add> v6 = v4 - v6;
<add> v7 = (v7 + v5 + 1) >> 1;
<add> v5 = v7 - v5;
<add>
<add> // stage 2
<add> v0 = (v0 + v3 + 1) >> 1;
<add> v3 = v0 - v3;
<add> v1 = (v1 + v2 + 1) >> 1;
<add> v2 = v1 - v2;
<add> t = (v4 * dctSin3 + v7 * dctCos3 + 2048) >> 12;
<add> v4 = (v4 * dctCos3 - v7 * dctSin3 + 2048) >> 12;
<add> v7 = t;
<add> t = (v5 * dctSin1 + v6 * dctCos1 + 2048) >> 12;
<add> v5 = (v5 * dctCos1 - v6 * dctSin1 + 2048) >> 12;
<add> v6 = t;
<add>
<add> // stage 1
<add> p[row] = v0 + v7;
<add> p[row + 7] = v0 - v7;
<add> p[row + 1] = v1 + v6;
<add> p[row + 6] = v1 - v6;
<add> p[row + 2] = v2 + v5;
<add> p[row + 5] = v2 - v5;
<add> p[row + 3] = v3 + v4;
<add> p[row + 4] = v3 - v4;
<add> }
<ide>
<del> // stage 4
<del> v0 = (dctSqrt2 * p0 + 2048) >> 12;
<del> v1 = (dctSqrt2 * p4 + 2048) >> 12;
<del> v2 = p2;
<del> v3 = p6;
<del> v4 = (dctSqrt1d2 * (p1 - p7) + 2048) >> 12;
<del> v7 = (dctSqrt1d2 * (p1 + p7) + 2048) >> 12;
<del> v5 = p3;
<del> v6 = p5;
<del>
<del> // stage 3
<del> // Shift v0 by 128.5 << 5 here, so we don't need to shift p0...p7 when
<del> // converting to UInt8 range later.
<del> v0 = ((v0 + v1 + 1) >> 1) + 4112;
<del> v1 = v0 - v1;
<del> t = (v2 * dctSin6 + v3 * dctCos6 + 2048) >> 12;
<del> v2 = (v2 * dctCos6 - v3 * dctSin6 + 2048) >> 12;
<del> v3 = t;
<del> v4 = (v4 + v6 + 1) >> 1;
<del> v6 = v4 - v6;
<del> v7 = (v7 + v5 + 1) >> 1;
<del> v5 = v7 - v5;
<del>
<del> // stage 2
<del> v0 = (v0 + v3 + 1) >> 1;
<del> v3 = v0 - v3;
<del> v1 = (v1 + v2 + 1) >> 1;
<del> v2 = v1 - v2;
<del> t = (v4 * dctSin3 + v7 * dctCos3 + 2048) >> 12;
<del> v4 = (v4 * dctCos3 - v7 * dctSin3 + 2048) >> 12;
<del> v7 = t;
<del> t = (v5 * dctSin1 + v6 * dctCos1 + 2048) >> 12;
<del> v5 = (v5 * dctCos1 - v6 * dctSin1 + 2048) >> 12;
<del> v6 = t;
<del>
<del> // stage 1
<del> p0 = v0 + v7;
<del> p7 = v0 - v7;
<del> p1 = v1 + v6;
<del> p6 = v1 - v6;
<del> p2 = v2 + v5;
<del> p5 = v2 - v5;
<del> p3 = v3 + v4;
<del> p4 = v3 - v4;
<del>
<del> // Convert to 8-bit integers.
<del> if (p0 < 16) {
<del> p0 = 0;
<del> } else if (p0 >= 4080) {
<del> p0 = 255;
<del> } else {
<del> p0 >>= 4;
<del> }
<del> if (p1 < 16) {
<del> p1 = 0;
<del> } else if (p1 >= 4080) {
<del> p1 = 255;
<del> } else {
<del> p1 >>= 4;
<del> }
<del> if (p2 < 16) {
<del> p2 = 0;
<del> } else if (p2 >= 4080) {
<del> p2 = 255;
<del> } else {
<del> p2 >>= 4;
<del> }
<del> if (p3 < 16) {
<del> p3 = 0;
<del> } else if (p3 >= 4080) {
<del> p3 = 255;
<del> } else {
<del> p3 >>= 4;
<del> }
<del> if (p4 < 16) {
<del> p4 = 0;
<del> } else if (p4 >= 4080) {
<del> p4 = 255;
<del> } else {
<del> p4 >>= 4;
<del> }
<del> if (p5 < 16) {
<del> p5 = 0;
<del> } else if (p5 >= 4080) {
<del> p5 = 255;
<del> } else {
<del> p5 >>= 4;
<del> }
<del> if (p6 < 16) {
<del> p6 = 0;
<del> } else if (p6 >= 4080) {
<del> p6 = 255;
<add> // inverse DCT on columns
<add> for (let col = 0; col < 8; ++col) {
<add> p0 = p[col];
<add> p1 = p[col + 8];
<add> p2 = p[col + 16];
<add> p3 = p[col + 24];
<add> p4 = p[col + 32];
<add> p5 = p[col + 40];
<add> p6 = p[col + 48];
<add> p7 = p[col + 56];
<add>
<add> // check for all-zero AC coefficients
<add> if ((p1 | p2 | p3 | p4 | p5 | p6 | p7) === 0) {
<add> t = (dctSqrt2 * p0 + 8192) >> 14;
<add> // Convert to 8-bit.
<add> if (t < -2040) {
<add> t = 0;
<add> } else if (t >= 2024) {
<add> t = 255;
<ide> } else {
<del> p6 >>= 4;
<del> }
<del> if (p7 < 16) {
<del> p7 = 0;
<del> } else if (p7 >= 4080) {
<del> p7 = 255;
<del> } else {
<del> p7 >>= 4;
<add> t = (t + 2056) >> 4;
<ide> }
<add> blockData[blockBufferOffset + col] = t;
<add> blockData[blockBufferOffset + col + 8] = t;
<add> blockData[blockBufferOffset + col + 16] = t;
<add> blockData[blockBufferOffset + col + 24] = t;
<add> blockData[blockBufferOffset + col + 32] = t;
<add> blockData[blockBufferOffset + col + 40] = t;
<add> blockData[blockBufferOffset + col + 48] = t;
<add> blockData[blockBufferOffset + col + 56] = t;
<add> continue;
<add> }
<ide>
<del> // store block data
<del> blockData[blockBufferOffset + col] = p0;
<del> blockData[blockBufferOffset + col + 8] = p1;
<del> blockData[blockBufferOffset + col + 16] = p2;
<del> blockData[blockBufferOffset + col + 24] = p3;
<del> blockData[blockBufferOffset + col + 32] = p4;
<del> blockData[blockBufferOffset + col + 40] = p5;
<del> blockData[blockBufferOffset + col + 48] = p6;
<del> blockData[blockBufferOffset + col + 56] = p7;
<add> // stage 4
<add> v0 = (dctSqrt2 * p0 + 2048) >> 12;
<add> v1 = (dctSqrt2 * p4 + 2048) >> 12;
<add> v2 = p2;
<add> v3 = p6;
<add> v4 = (dctSqrt1d2 * (p1 - p7) + 2048) >> 12;
<add> v7 = (dctSqrt1d2 * (p1 + p7) + 2048) >> 12;
<add> v5 = p3;
<add> v6 = p5;
<add>
<add> // stage 3
<add> // Shift v0 by 128.5 << 5 here, so we don't need to shift p0...p7 when
<add> // converting to UInt8 range later.
<add> v0 = ((v0 + v1 + 1) >> 1) + 4112;
<add> v1 = v0 - v1;
<add> t = (v2 * dctSin6 + v3 * dctCos6 + 2048) >> 12;
<add> v2 = (v2 * dctCos6 - v3 * dctSin6 + 2048) >> 12;
<add> v3 = t;
<add> v4 = (v4 + v6 + 1) >> 1;
<add> v6 = v4 - v6;
<add> v7 = (v7 + v5 + 1) >> 1;
<add> v5 = v7 - v5;
<add>
<add> // stage 2
<add> v0 = (v0 + v3 + 1) >> 1;
<add> v3 = v0 - v3;
<add> v1 = (v1 + v2 + 1) >> 1;
<add> v2 = v1 - v2;
<add> t = (v4 * dctSin3 + v7 * dctCos3 + 2048) >> 12;
<add> v4 = (v4 * dctCos3 - v7 * dctSin3 + 2048) >> 12;
<add> v7 = t;
<add> t = (v5 * dctSin1 + v6 * dctCos1 + 2048) >> 12;
<add> v5 = (v5 * dctCos1 - v6 * dctSin1 + 2048) >> 12;
<add> v6 = t;
<add>
<add> // stage 1
<add> p0 = v0 + v7;
<add> p7 = v0 - v7;
<add> p1 = v1 + v6;
<add> p6 = v1 - v6;
<add> p2 = v2 + v5;
<add> p5 = v2 - v5;
<add> p3 = v3 + v4;
<add> p4 = v3 - v4;
<add>
<add> // Convert to 8-bit integers.
<add> if (p0 < 16) {
<add> p0 = 0;
<add> } else if (p0 >= 4080) {
<add> p0 = 255;
<add> } else {
<add> p0 >>= 4;
<ide> }
<add> if (p1 < 16) {
<add> p1 = 0;
<add> } else if (p1 >= 4080) {
<add> p1 = 255;
<add> } else {
<add> p1 >>= 4;
<add> }
<add> if (p2 < 16) {
<add> p2 = 0;
<add> } else if (p2 >= 4080) {
<add> p2 = 255;
<add> } else {
<add> p2 >>= 4;
<add> }
<add> if (p3 < 16) {
<add> p3 = 0;
<add> } else if (p3 >= 4080) {
<add> p3 = 255;
<add> } else {
<add> p3 >>= 4;
<add> }
<add> if (p4 < 16) {
<add> p4 = 0;
<add> } else if (p4 >= 4080) {
<add> p4 = 255;
<add> } else {
<add> p4 >>= 4;
<add> }
<add> if (p5 < 16) {
<add> p5 = 0;
<add> } else if (p5 >= 4080) {
<add> p5 = 255;
<add> } else {
<add> p5 >>= 4;
<add> }
<add> if (p6 < 16) {
<add> p6 = 0;
<add> } else if (p6 >= 4080) {
<add> p6 = 255;
<add> } else {
<add> p6 >>= 4;
<add> }
<add> if (p7 < 16) {
<add> p7 = 0;
<add> } else if (p7 >= 4080) {
<add> p7 = 255;
<add> } else {
<add> p7 >>= 4;
<add> }
<add>
<add> // store block data
<add> blockData[blockBufferOffset + col] = p0;
<add> blockData[blockBufferOffset + col + 8] = p1;
<add> blockData[blockBufferOffset + col + 16] = p2;
<add> blockData[blockBufferOffset + col + 24] = p3;
<add> blockData[blockBufferOffset + col + 32] = p4;
<add> blockData[blockBufferOffset + col + 40] = p5;
<add> blockData[blockBufferOffset + col + 48] = p6;
<add> blockData[blockBufferOffset + col + 56] = p7;
<ide> }
<add>}
<ide>
<del> function buildComponentData(frame, component) {
<del> const blocksPerLine = component.blocksPerLine;
<del> const blocksPerColumn = component.blocksPerColumn;
<del> const computationBuffer = new Int16Array(64);
<add>function buildComponentData(frame, component) {
<add> const blocksPerLine = component.blocksPerLine;
<add> const blocksPerColumn = component.blocksPerColumn;
<add> const computationBuffer = new Int16Array(64);
<ide>
<del> for (let blockRow = 0; blockRow < blocksPerColumn; blockRow++) {
<del> for (let blockCol = 0; blockCol < blocksPerLine; blockCol++) {
<del> const offset = getBlockBufferOffset(component, blockRow, blockCol);
<del> quantizeAndInverse(component, offset, computationBuffer);
<del> }
<add> for (let blockRow = 0; blockRow < blocksPerColumn; blockRow++) {
<add> for (let blockCol = 0; blockCol < blocksPerLine; blockCol++) {
<add> const offset = getBlockBufferOffset(component, blockRow, blockCol);
<add> quantizeAndInverse(component, offset, computationBuffer);
<ide> }
<del> return component.blockData;
<ide> }
<add> return component.blockData;
<add>}
<ide>
<del> function findNextFileMarker(data, currentPos, startPos = currentPos) {
<del> const maxPos = data.length - 1;
<del> let newPos = startPos < currentPos ? startPos : currentPos;
<add>function findNextFileMarker(data, currentPos, startPos = currentPos) {
<add> const maxPos = data.length - 1;
<add> let newPos = startPos < currentPos ? startPos : currentPos;
<ide>
<del> if (currentPos >= maxPos) {
<del> return null; // Don't attempt to read non-existent data and just return.
<del> }
<del> const currentMarker = readUint16(data, currentPos);
<del> if (currentMarker >= 0xffc0 && currentMarker <= 0xfffe) {
<del> return {
<del> invalid: null,
<del> marker: currentMarker,
<del> offset: currentPos,
<del> };
<del> }
<del> let newMarker = readUint16(data, newPos);
<del> while (!(newMarker >= 0xffc0 && newMarker <= 0xfffe)) {
<del> if (++newPos >= maxPos) {
<del> return null; // Don't attempt to read non-existent data and just return.
<del> }
<del> newMarker = readUint16(data, newPos);
<del> }
<add> if (currentPos >= maxPos) {
<add> return null; // Don't attempt to read non-existent data and just return.
<add> }
<add> const currentMarker = readUint16(data, currentPos);
<add> if (currentMarker >= 0xffc0 && currentMarker <= 0xfffe) {
<ide> return {
<del> invalid: currentMarker.toString(16),
<del> marker: newMarker,
<del> offset: newPos,
<add> invalid: null,
<add> marker: currentMarker,
<add> offset: currentPos,
<ide> };
<ide> }
<add> let newMarker = readUint16(data, newPos);
<add> while (!(newMarker >= 0xffc0 && newMarker <= 0xfffe)) {
<add> if (++newPos >= maxPos) {
<add> return null; // Don't attempt to read non-existent data and just return.
<add> }
<add> newMarker = readUint16(data, newPos);
<add> }
<add> return {
<add> invalid: currentMarker.toString(16),
<add> marker: newMarker,
<add> offset: newPos,
<add> };
<add>}
<ide>
<del> JpegImage.prototype = {
<del> parse(data, { dnlScanLines = null } = {}) {
<del> function readDataBlock() {
<del> const length = readUint16(data, offset);
<del> offset += 2;
<del> let endOffset = offset + length - 2;
<del>
<del> const fileMarker = findNextFileMarker(data, endOffset, offset);
<del> if (fileMarker && fileMarker.invalid) {
<del> warn(
<del> "readDataBlock - incorrect length, current marker is: " +
<del> fileMarker.invalid
<del> );
<del> endOffset = fileMarker.offset;
<del> }
<add>class JpegImage {
<add> constructor({ decodeTransform = null, colorTransform = -1 } = {}) {
<add> this._decodeTransform = decodeTransform;
<add> this._colorTransform = colorTransform;
<add> }
<ide>
<del> const array = data.subarray(offset, endOffset);
<del> offset += array.length;
<del> return array;
<del> }
<add> parse(data, { dnlScanLines = null } = {}) {
<add> function readDataBlock() {
<add> const length = readUint16(data, offset);
<add> offset += 2;
<add> let endOffset = offset + length - 2;
<ide>
<del> function prepareComponents(frame) {
<del> const mcusPerLine = Math.ceil(frame.samplesPerLine / 8 / frame.maxH);
<del> const mcusPerColumn = Math.ceil(frame.scanLines / 8 / frame.maxV);
<del> for (let i = 0, ii = frame.components.length; i < ii; i++) {
<del> const component = frame.components[i];
<del> const blocksPerLine = Math.ceil(
<del> (Math.ceil(frame.samplesPerLine / 8) * component.h) / frame.maxH
<del> );
<del> const blocksPerColumn = Math.ceil(
<del> (Math.ceil(frame.scanLines / 8) * component.v) / frame.maxV
<del> );
<del> const blocksPerLineForMcu = mcusPerLine * component.h;
<del> const blocksPerColumnForMcu = mcusPerColumn * component.v;
<del>
<del> const blocksBufferSize =
<del> 64 * blocksPerColumnForMcu * (blocksPerLineForMcu + 1);
<del> component.blockData = new Int16Array(blocksBufferSize);
<del> component.blocksPerLine = blocksPerLine;
<del> component.blocksPerColumn = blocksPerColumn;
<del> }
<del> frame.mcusPerLine = mcusPerLine;
<del> frame.mcusPerColumn = mcusPerColumn;
<add> const fileMarker = findNextFileMarker(data, endOffset, offset);
<add> if (fileMarker && fileMarker.invalid) {
<add> warn(
<add> "readDataBlock - incorrect length, current marker is: " +
<add> fileMarker.invalid
<add> );
<add> endOffset = fileMarker.offset;
<ide> }
<ide>
<del> let offset = 0;
<del> let jfif = null;
<del> let adobe = null;
<del> let frame, resetInterval;
<del> let numSOSMarkers = 0;
<del> const quantizationTables = [];
<del> const huffmanTablesAC = [],
<del> huffmanTablesDC = [];
<del>
<del> let fileMarker = readUint16(data, offset);
<del> offset += 2;
<del> if (fileMarker !== /* SOI (Start of Image) = */ 0xffd8) {
<del> throw new JpegError("SOI not found");
<del> }
<del> fileMarker = readUint16(data, offset);
<del> offset += 2;
<add> const array = data.subarray(offset, endOffset);
<add> offset += array.length;
<add> return array;
<add> }
<ide>
<del> markerLoop: while (fileMarker !== /* EOI (End of Image) = */ 0xffd9) {
<del> let i, j, l;
<del> switch (fileMarker) {
<del> case 0xffe0: // APP0 (Application Specific)
<del> case 0xffe1: // APP1
<del> case 0xffe2: // APP2
<del> case 0xffe3: // APP3
<del> case 0xffe4: // APP4
<del> case 0xffe5: // APP5
<del> case 0xffe6: // APP6
<del> case 0xffe7: // APP7
<del> case 0xffe8: // APP8
<del> case 0xffe9: // APP9
<del> case 0xffea: // APP10
<del> case 0xffeb: // APP11
<del> case 0xffec: // APP12
<del> case 0xffed: // APP13
<del> case 0xffee: // APP14
<del> case 0xffef: // APP15
<del> case 0xfffe: // COM (Comment)
<del> const appData = readDataBlock();
<del>
<del> if (fileMarker === 0xffe0) {
<del> // 'JFIF\x00'
<del> if (
<del> appData[0] === 0x4a &&
<del> appData[1] === 0x46 &&
<del> appData[2] === 0x49 &&
<del> appData[3] === 0x46 &&
<del> appData[4] === 0
<del> ) {
<del> jfif = {
<del> version: { major: appData[5], minor: appData[6] },
<del> densityUnits: appData[7],
<del> xDensity: (appData[8] << 8) | appData[9],
<del> yDensity: (appData[10] << 8) | appData[11],
<del> thumbWidth: appData[12],
<del> thumbHeight: appData[13],
<del> thumbData: appData.subarray(
<del> 14,
<del> 14 + 3 * appData[12] * appData[13]
<del> ),
<del> };
<del> }
<del> }
<del> // TODO APP1 - Exif
<del> if (fileMarker === 0xffee) {
<del> // 'Adobe'
<del> if (
<del> appData[0] === 0x41 &&
<del> appData[1] === 0x64 &&
<del> appData[2] === 0x6f &&
<del> appData[3] === 0x62 &&
<del> appData[4] === 0x65
<del> ) {
<del> adobe = {
<del> version: (appData[5] << 8) | appData[6],
<del> flags0: (appData[7] << 8) | appData[8],
<del> flags1: (appData[9] << 8) | appData[10],
<del> transformCode: appData[11],
<del> };
<del> }
<del> }
<del> break;
<add> function prepareComponents(frame) {
<add> const mcusPerLine = Math.ceil(frame.samplesPerLine / 8 / frame.maxH);
<add> const mcusPerColumn = Math.ceil(frame.scanLines / 8 / frame.maxV);
<add> for (let i = 0, ii = frame.components.length; i < ii; i++) {
<add> const component = frame.components[i];
<add> const blocksPerLine = Math.ceil(
<add> (Math.ceil(frame.samplesPerLine / 8) * component.h) / frame.maxH
<add> );
<add> const blocksPerColumn = Math.ceil(
<add> (Math.ceil(frame.scanLines / 8) * component.v) / frame.maxV
<add> );
<add> const blocksPerLineForMcu = mcusPerLine * component.h;
<add> const blocksPerColumnForMcu = mcusPerColumn * component.v;
<add>
<add> const blocksBufferSize =
<add> 64 * blocksPerColumnForMcu * (blocksPerLineForMcu + 1);
<add> component.blockData = new Int16Array(blocksBufferSize);
<add> component.blocksPerLine = blocksPerLine;
<add> component.blocksPerColumn = blocksPerColumn;
<add> }
<add> frame.mcusPerLine = mcusPerLine;
<add> frame.mcusPerColumn = mcusPerColumn;
<add> }
<ide>
<del> case 0xffdb: // DQT (Define Quantization Tables)
<del> const quantizationTablesLength = readUint16(data, offset);
<del> offset += 2;
<del> const quantizationTablesEnd = quantizationTablesLength + offset - 2;
<del> let z;
<del> while (offset < quantizationTablesEnd) {
<del> const quantizationTableSpec = data[offset++];
<del> const tableData = new Uint16Array(64);
<del> if (quantizationTableSpec >> 4 === 0) {
<del> // 8 bit values
<del> for (j = 0; j < 64; j++) {
<del> z = dctZigZag[j];
<del> tableData[z] = data[offset++];
<del> }
<del> } else if (quantizationTableSpec >> 4 === 1) {
<del> // 16 bit values
<del> for (j = 0; j < 64; j++) {
<del> z = dctZigZag[j];
<del> tableData[z] = readUint16(data, offset);
<del> offset += 2;
<del> }
<del> } else {
<del> throw new JpegError("DQT - invalid table spec");
<del> }
<del> quantizationTables[quantizationTableSpec & 15] = tableData;
<add> let offset = 0;
<add> let jfif = null;
<add> let adobe = null;
<add> let frame, resetInterval;
<add> let numSOSMarkers = 0;
<add> const quantizationTables = [];
<add> const huffmanTablesAC = [],
<add> huffmanTablesDC = [];
<add>
<add> let fileMarker = readUint16(data, offset);
<add> offset += 2;
<add> if (fileMarker !== /* SOI (Start of Image) = */ 0xffd8) {
<add> throw new JpegError("SOI not found");
<add> }
<add> fileMarker = readUint16(data, offset);
<add> offset += 2;
<add>
<add> markerLoop: while (fileMarker !== /* EOI (End of Image) = */ 0xffd9) {
<add> let i, j, l;
<add> switch (fileMarker) {
<add> case 0xffe0: // APP0 (Application Specific)
<add> case 0xffe1: // APP1
<add> case 0xffe2: // APP2
<add> case 0xffe3: // APP3
<add> case 0xffe4: // APP4
<add> case 0xffe5: // APP5
<add> case 0xffe6: // APP6
<add> case 0xffe7: // APP7
<add> case 0xffe8: // APP8
<add> case 0xffe9: // APP9
<add> case 0xffea: // APP10
<add> case 0xffeb: // APP11
<add> case 0xffec: // APP12
<add> case 0xffed: // APP13
<add> case 0xffee: // APP14
<add> case 0xffef: // APP15
<add> case 0xfffe: // COM (Comment)
<add> const appData = readDataBlock();
<add>
<add> if (fileMarker === 0xffe0) {
<add> // 'JFIF\x00'
<add> if (
<add> appData[0] === 0x4a &&
<add> appData[1] === 0x46 &&
<add> appData[2] === 0x49 &&
<add> appData[3] === 0x46 &&
<add> appData[4] === 0
<add> ) {
<add> jfif = {
<add> version: { major: appData[5], minor: appData[6] },
<add> densityUnits: appData[7],
<add> xDensity: (appData[8] << 8) | appData[9],
<add> yDensity: (appData[10] << 8) | appData[11],
<add> thumbWidth: appData[12],
<add> thumbHeight: appData[13],
<add> thumbData: appData.subarray(
<add> 14,
<add> 14 + 3 * appData[12] * appData[13]
<add> ),
<add> };
<ide> }
<del> break;
<del>
<del> case 0xffc0: // SOF0 (Start of Frame, Baseline DCT)
<del> case 0xffc1: // SOF1 (Start of Frame, Extended DCT)
<del> case 0xffc2: // SOF2 (Start of Frame, Progressive DCT)
<del> if (frame) {
<del> throw new JpegError("Only single frame JPEGs supported");
<add> }
<add> // TODO APP1 - Exif
<add> if (fileMarker === 0xffee) {
<add> // 'Adobe'
<add> if (
<add> appData[0] === 0x41 &&
<add> appData[1] === 0x64 &&
<add> appData[2] === 0x6f &&
<add> appData[3] === 0x62 &&
<add> appData[4] === 0x65
<add> ) {
<add> adobe = {
<add> version: (appData[5] << 8) | appData[6],
<add> flags0: (appData[7] << 8) | appData[8],
<add> flags1: (appData[9] << 8) | appData[10],
<add> transformCode: appData[11],
<add> };
<ide> }
<del> offset += 2; // Skip marker length.
<del>
<del> frame = {};
<del> frame.extended = fileMarker === 0xffc1;
<del> frame.progressive = fileMarker === 0xffc2;
<del> frame.precision = data[offset++];
<del> const sofScanLines = readUint16(data, offset);
<del> offset += 2;
<del> frame.scanLines = dnlScanLines || sofScanLines;
<del> frame.samplesPerLine = readUint16(data, offset);
<del> offset += 2;
<del> frame.components = [];
<del> frame.componentIds = {};
<del> const componentsCount = data[offset++];
<del> let maxH = 0,
<del> maxV = 0;
<del> for (i = 0; i < componentsCount; i++) {
<del> const componentId = data[offset];
<del> const h = data[offset + 1] >> 4;
<del> const v = data[offset + 1] & 15;
<del> if (maxH < h) {
<del> maxH = h;
<add> }
<add> break;
<add>
<add> case 0xffdb: // DQT (Define Quantization Tables)
<add> const quantizationTablesLength = readUint16(data, offset);
<add> offset += 2;
<add> const quantizationTablesEnd = quantizationTablesLength + offset - 2;
<add> let z;
<add> while (offset < quantizationTablesEnd) {
<add> const quantizationTableSpec = data[offset++];
<add> const tableData = new Uint16Array(64);
<add> if (quantizationTableSpec >> 4 === 0) {
<add> // 8 bit values
<add> for (j = 0; j < 64; j++) {
<add> z = dctZigZag[j];
<add> tableData[z] = data[offset++];
<ide> }
<del> if (maxV < v) {
<del> maxV = v;
<add> } else if (quantizationTableSpec >> 4 === 1) {
<add> // 16 bit values
<add> for (j = 0; j < 64; j++) {
<add> z = dctZigZag[j];
<add> tableData[z] = readUint16(data, offset);
<add> offset += 2;
<ide> }
<del> const qId = data[offset + 2];
<del> l = frame.components.push({
<del> h,
<del> v,
<del> quantizationId: qId,
<del> quantizationTable: null, // See comment below.
<del> });
<del> frame.componentIds[componentId] = l - 1;
<del> offset += 3;
<add> } else {
<add> throw new JpegError("DQT - invalid table spec");
<ide> }
<del> frame.maxH = maxH;
<del> frame.maxV = maxV;
<del> prepareComponents(frame);
<del> break;
<del>
<del> case 0xffc4: // DHT (Define Huffman Tables)
<del> const huffmanLength = readUint16(data, offset);
<del> offset += 2;
<del> for (i = 2; i < huffmanLength; ) {
<del> const huffmanTableSpec = data[offset++];
<del> const codeLengths = new Uint8Array(16);
<del> let codeLengthSum = 0;
<del> for (j = 0; j < 16; j++, offset++) {
<del> codeLengthSum += codeLengths[j] = data[offset];
<del> }
<del> const huffmanValues = new Uint8Array(codeLengthSum);
<del> for (j = 0; j < codeLengthSum; j++, offset++) {
<del> huffmanValues[j] = data[offset];
<del> }
<del> i += 17 + codeLengthSum;
<add> quantizationTables[quantizationTableSpec & 15] = tableData;
<add> }
<add> break;
<ide>
<del> (huffmanTableSpec >> 4 === 0 ? huffmanTablesDC : huffmanTablesAC)[
<del> huffmanTableSpec & 15
<del> ] = buildHuffmanTable(codeLengths, huffmanValues);
<add> case 0xffc0: // SOF0 (Start of Frame, Baseline DCT)
<add> case 0xffc1: // SOF1 (Start of Frame, Extended DCT)
<add> case 0xffc2: // SOF2 (Start of Frame, Progressive DCT)
<add> if (frame) {
<add> throw new JpegError("Only single frame JPEGs supported");
<add> }
<add> offset += 2; // Skip marker length.
<add>
<add> frame = {};
<add> frame.extended = fileMarker === 0xffc1;
<add> frame.progressive = fileMarker === 0xffc2;
<add> frame.precision = data[offset++];
<add> const sofScanLines = readUint16(data, offset);
<add> offset += 2;
<add> frame.scanLines = dnlScanLines || sofScanLines;
<add> frame.samplesPerLine = readUint16(data, offset);
<add> offset += 2;
<add> frame.components = [];
<add> frame.componentIds = {};
<add> const componentsCount = data[offset++];
<add> let maxH = 0,
<add> maxV = 0;
<add> for (i = 0; i < componentsCount; i++) {
<add> const componentId = data[offset];
<add> const h = data[offset + 1] >> 4;
<add> const v = data[offset + 1] & 15;
<add> if (maxH < h) {
<add> maxH = h;
<ide> }
<del> break;
<del>
<del> case 0xffdd: // DRI (Define Restart Interval)
<del> offset += 2; // Skip marker length.
<del>
<del> resetInterval = readUint16(data, offset);
<del> offset += 2;
<del> break;
<del>
<del> case 0xffda: // SOS (Start of Scan)
<del> // A DNL marker (0xFFDC), if it exists, is only allowed at the end
<del> // of the first scan segment and may only occur once in an image.
<del> // Furthermore, to prevent an infinite loop, do *not* attempt to
<del> // parse DNL markers during re-parsing of the JPEG scan data.
<del> const parseDNLMarker = ++numSOSMarkers === 1 && !dnlScanLines;
<del>
<del> offset += 2; // Skip marker length.
<del>
<del> const selectorsCount = data[offset++],
<del> components = [];
<del> for (i = 0; i < selectorsCount; i++) {
<del> const index = data[offset++];
<del> const componentIndex = frame.componentIds[index];
<del> const component = frame.components[componentIndex];
<del> component.index = index;
<del> const tableSpec = data[offset++];
<del> component.huffmanTableDC = huffmanTablesDC[tableSpec >> 4];
<del> component.huffmanTableAC = huffmanTablesAC[tableSpec & 15];
<del> components.push(component);
<add> if (maxV < v) {
<add> maxV = v;
<ide> }
<del> const spectralStart = data[offset++],
<del> spectralEnd = data[offset++],
<del> successiveApproximation = data[offset++];
<del> try {
<del> const processed = decodeScan(
<del> data,
<del> offset,
<del> frame,
<del> components,
<del> resetInterval,
<del> spectralStart,
<del> spectralEnd,
<del> successiveApproximation >> 4,
<del> successiveApproximation & 15,
<del> parseDNLMarker
<del> );
<del> offset += processed;
<del> } catch (ex) {
<del> if (ex instanceof DNLMarkerError) {
<del> warn(`${ex.message} -- attempting to re-parse the JPEG image.`);
<del> return this.parse(data, { dnlScanLines: ex.scanLines });
<del> } else if (ex instanceof EOIMarkerError) {
<del> warn(`${ex.message} -- ignoring the rest of the image data.`);
<del> break markerLoop;
<del> }
<del> throw ex;
<add> const qId = data[offset + 2];
<add> l = frame.components.push({
<add> h,
<add> v,
<add> quantizationId: qId,
<add> quantizationTable: null, // See comment below.
<add> });
<add> frame.componentIds[componentId] = l - 1;
<add> offset += 3;
<add> }
<add> frame.maxH = maxH;
<add> frame.maxV = maxV;
<add> prepareComponents(frame);
<add> break;
<add>
<add> case 0xffc4: // DHT (Define Huffman Tables)
<add> const huffmanLength = readUint16(data, offset);
<add> offset += 2;
<add> for (i = 2; i < huffmanLength; ) {
<add> const huffmanTableSpec = data[offset++];
<add> const codeLengths = new Uint8Array(16);
<add> let codeLengthSum = 0;
<add> for (j = 0; j < 16; j++, offset++) {
<add> codeLengthSum += codeLengths[j] = data[offset];
<ide> }
<del> break;
<del>
<del> case 0xffdc: // DNL (Define Number of Lines)
<del> // Ignore the marker, since it's being handled in `decodeScan`.
<del> offset += 4;
<del> break;
<del>
<del> case 0xffff: // Fill bytes
<del> if (data[offset] !== 0xff) {
<del> // Avoid skipping a valid marker.
<del> offset--;
<add> const huffmanValues = new Uint8Array(codeLengthSum);
<add> for (j = 0; j < codeLengthSum; j++, offset++) {
<add> huffmanValues[j] = data[offset];
<ide> }
<del> break;
<add> i += 17 + codeLengthSum;
<ide>
<del> default:
<del> // Could be incorrect encoding -- the last 0xFF byte of the previous
<del> // block could have been eaten by the encoder, hence we fallback to
<del> // `startPos = offset - 3` when looking for the next valid marker.
<del> const nextFileMarker = findNextFileMarker(
<add> (huffmanTableSpec >> 4 === 0 ? huffmanTablesDC : huffmanTablesAC)[
<add> huffmanTableSpec & 15
<add> ] = buildHuffmanTable(codeLengths, huffmanValues);
<add> }
<add> break;
<add>
<add> case 0xffdd: // DRI (Define Restart Interval)
<add> offset += 2; // Skip marker length.
<add>
<add> resetInterval = readUint16(data, offset);
<add> offset += 2;
<add> break;
<add>
<add> case 0xffda: // SOS (Start of Scan)
<add> // A DNL marker (0xFFDC), if it exists, is only allowed at the end
<add> // of the first scan segment and may only occur once in an image.
<add> // Furthermore, to prevent an infinite loop, do *not* attempt to
<add> // parse DNL markers during re-parsing of the JPEG scan data.
<add> const parseDNLMarker = ++numSOSMarkers === 1 && !dnlScanLines;
<add>
<add> offset += 2; // Skip marker length.
<add>
<add> const selectorsCount = data[offset++],
<add> components = [];
<add> for (i = 0; i < selectorsCount; i++) {
<add> const index = data[offset++];
<add> const componentIndex = frame.componentIds[index];
<add> const component = frame.components[componentIndex];
<add> component.index = index;
<add> const tableSpec = data[offset++];
<add> component.huffmanTableDC = huffmanTablesDC[tableSpec >> 4];
<add> component.huffmanTableAC = huffmanTablesAC[tableSpec & 15];
<add> components.push(component);
<add> }
<add> const spectralStart = data[offset++],
<add> spectralEnd = data[offset++],
<add> successiveApproximation = data[offset++];
<add> try {
<add> const processed = decodeScan(
<ide> data,
<del> /* currentPos = */ offset - 2,
<del> /* startPos = */ offset - 3
<add> offset,
<add> frame,
<add> components,
<add> resetInterval,
<add> spectralStart,
<add> spectralEnd,
<add> successiveApproximation >> 4,
<add> successiveApproximation & 15,
<add> parseDNLMarker
<ide> );
<del> if (nextFileMarker && nextFileMarker.invalid) {
<del> warn(
<del> "JpegImage.parse - unexpected data, current marker is: " +
<del> nextFileMarker.invalid
<del> );
<del> offset = nextFileMarker.offset;
<del> break;
<del> }
<del> if (!nextFileMarker || offset >= data.length - 1) {
<del> warn(
<del> "JpegImage.parse - reached the end of the image data " +
<del> "without finding an EOI marker (0xFFD9)."
<del> );
<add> offset += processed;
<add> } catch (ex) {
<add> if (ex instanceof DNLMarkerError) {
<add> warn(`${ex.message} -- attempting to re-parse the JPEG image.`);
<add> return this.parse(data, { dnlScanLines: ex.scanLines });
<add> } else if (ex instanceof EOIMarkerError) {
<add> warn(`${ex.message} -- ignoring the rest of the image data.`);
<ide> break markerLoop;
<ide> }
<del> throw new JpegError(
<del> "JpegImage.parse - unknown marker: " + fileMarker.toString(16)
<del> );
<del> }
<del> fileMarker = readUint16(data, offset);
<del> offset += 2;
<del> }
<add> throw ex;
<add> }
<add> break;
<ide>
<del> this.width = frame.samplesPerLine;
<del> this.height = frame.scanLines;
<del> this.jfif = jfif;
<del> this.adobe = adobe;
<del> this.components = [];
<del> for (let i = 0, ii = frame.components.length; i < ii; i++) {
<del> const component = frame.components[i];
<add> case 0xffdc: // DNL (Define Number of Lines)
<add> // Ignore the marker, since it's being handled in `decodeScan`.
<add> offset += 4;
<add> break;
<ide>
<del> // Prevent errors when DQT markers are placed after SOF{n} markers,
<del> // by assigning the `quantizationTable` entry after the entire image
<del> // has been parsed (fixes issue7406.pdf).
<del> const quantizationTable = quantizationTables[component.quantizationId];
<del> if (quantizationTable) {
<del> component.quantizationTable = quantizationTable;
<del> }
<add> case 0xffff: // Fill bytes
<add> if (data[offset] !== 0xff) {
<add> // Avoid skipping a valid marker.
<add> offset--;
<add> }
<add> break;
<add>
<add> default:
<add> // Could be incorrect encoding -- the last 0xFF byte of the previous
<add> // block could have been eaten by the encoder, hence we fallback to
<add> // `startPos = offset - 3` when looking for the next valid marker.
<add> const nextFileMarker = findNextFileMarker(
<add> data,
<add> /* currentPos = */ offset - 2,
<add> /* startPos = */ offset - 3
<add> );
<add> if (nextFileMarker && nextFileMarker.invalid) {
<add> warn(
<add> "JpegImage.parse - unexpected data, current marker is: " +
<add> nextFileMarker.invalid
<add> );
<add> offset = nextFileMarker.offset;
<add> break;
<add> }
<add> if (!nextFileMarker || offset >= data.length - 1) {
<add> warn(
<add> "JpegImage.parse - reached the end of the image data " +
<add> "without finding an EOI marker (0xFFD9)."
<add> );
<add> break markerLoop;
<add> }
<add> throw new JpegError(
<add> "JpegImage.parse - unknown marker: " + fileMarker.toString(16)
<add> );
<add> }
<add> fileMarker = readUint16(data, offset);
<add> offset += 2;
<add> }
<ide>
<del> this.components.push({
<del> index: component.index,
<del> output: buildComponentData(frame, component),
<del> scaleX: component.h / frame.maxH,
<del> scaleY: component.v / frame.maxV,
<del> blocksPerLine: component.blocksPerLine,
<del> blocksPerColumn: component.blocksPerColumn,
<del> });
<add> this.width = frame.samplesPerLine;
<add> this.height = frame.scanLines;
<add> this.jfif = jfif;
<add> this.adobe = adobe;
<add> this.components = [];
<add> for (let i = 0, ii = frame.components.length; i < ii; i++) {
<add> const component = frame.components[i];
<add>
<add> // Prevent errors when DQT markers are placed after SOF{n} markers,
<add> // by assigning the `quantizationTable` entry after the entire image
<add> // has been parsed (fixes issue7406.pdf).
<add> const quantizationTable = quantizationTables[component.quantizationId];
<add> if (quantizationTable) {
<add> component.quantizationTable = quantizationTable;
<ide> }
<del> this.numComponents = this.components.length;
<del> return undefined;
<del> },
<ide>
<del> _getLinearizedBlockData(width, height, isSourcePDF = false) {
<del> const scaleX = this.width / width,
<del> scaleY = this.height / height;
<add> this.components.push({
<add> index: component.index,
<add> output: buildComponentData(frame, component),
<add> scaleX: component.h / frame.maxH,
<add> scaleY: component.v / frame.maxV,
<add> blocksPerLine: component.blocksPerLine,
<add> blocksPerColumn: component.blocksPerColumn,
<add> });
<add> }
<add> this.numComponents = this.components.length;
<add> return undefined;
<add> }
<ide>
<del> let component, componentScaleX, componentScaleY, blocksPerScanline;
<del> let x, y, i, j, k;
<del> let index;
<del> let offset = 0;
<del> let output;
<del> const numComponents = this.components.length;
<del> const dataLength = width * height * numComponents;
<del> const data = new Uint8ClampedArray(dataLength);
<del> const xScaleBlockOffset = new Uint32Array(width);
<del> const mask3LSB = 0xfffffff8; // used to clear the 3 LSBs
<del> let lastComponentScaleX;
<del>
<del> for (i = 0; i < numComponents; i++) {
<del> component = this.components[i];
<del> componentScaleX = component.scaleX * scaleX;
<del> componentScaleY = component.scaleY * scaleY;
<del> offset = i;
<del> output = component.output;
<del> blocksPerScanline = (component.blocksPerLine + 1) << 3;
<del> // Precalculate the `xScaleBlockOffset`. Since it doesn't depend on the
<del> // component data, that's only necessary when `componentScaleX` changes.
<del> if (componentScaleX !== lastComponentScaleX) {
<del> for (x = 0; x < width; x++) {
<del> j = 0 | (x * componentScaleX);
<del> xScaleBlockOffset[x] = ((j & mask3LSB) << 3) | (j & 7);
<del> }
<del> lastComponentScaleX = componentScaleX;
<add> _getLinearizedBlockData(width, height, isSourcePDF = false) {
<add> const scaleX = this.width / width,
<add> scaleY = this.height / height;
<add>
<add> let component, componentScaleX, componentScaleY, blocksPerScanline;
<add> let x, y, i, j, k;
<add> let index;
<add> let offset = 0;
<add> let output;
<add> const numComponents = this.components.length;
<add> const dataLength = width * height * numComponents;
<add> const data = new Uint8ClampedArray(dataLength);
<add> const xScaleBlockOffset = new Uint32Array(width);
<add> const mask3LSB = 0xfffffff8; // used to clear the 3 LSBs
<add> let lastComponentScaleX;
<add>
<add> for (i = 0; i < numComponents; i++) {
<add> component = this.components[i];
<add> componentScaleX = component.scaleX * scaleX;
<add> componentScaleY = component.scaleY * scaleY;
<add> offset = i;
<add> output = component.output;
<add> blocksPerScanline = (component.blocksPerLine + 1) << 3;
<add> // Precalculate the `xScaleBlockOffset`. Since it doesn't depend on the
<add> // component data, that's only necessary when `componentScaleX` changes.
<add> if (componentScaleX !== lastComponentScaleX) {
<add> for (x = 0; x < width; x++) {
<add> j = 0 | (x * componentScaleX);
<add> xScaleBlockOffset[x] = ((j & mask3LSB) << 3) | (j & 7);
<ide> }
<del> // linearize the blocks of the component
<del> for (y = 0; y < height; y++) {
<del> j = 0 | (y * componentScaleY);
<del> index = (blocksPerScanline * (j & mask3LSB)) | ((j & 7) << 3);
<del> for (x = 0; x < width; x++) {
<del> data[offset] = output[index + xScaleBlockOffset[x]];
<del> offset += numComponents;
<del> }
<add> lastComponentScaleX = componentScaleX;
<add> }
<add> // linearize the blocks of the component
<add> for (y = 0; y < height; y++) {
<add> j = 0 | (y * componentScaleY);
<add> index = (blocksPerScanline * (j & mask3LSB)) | ((j & 7) << 3);
<add> for (x = 0; x < width; x++) {
<add> data[offset] = output[index + xScaleBlockOffset[x]];
<add> offset += numComponents;
<ide> }
<ide> }
<add> }
<ide>
<del> // decodeTransform contains pairs of multiplier (-256..256) and additive
<del> let transform = this._decodeTransform;
<del>
<del> // In PDF files, JPEG images with CMYK colour spaces are usually inverted
<del> // (this can be observed by extracting the raw image data).
<del> // Since the conversion algorithms (see below) were written primarily for
<del> // the PDF use-cases, attempting to use `JpegImage` to parse standalone
<del> // JPEG (CMYK) images may thus result in inverted images (see issue 9513).
<del> //
<del> // Unfortunately it's not (always) possible to tell, from the image data
<del> // alone, if it needs to be inverted. Thus in an attempt to provide better
<del> // out-of-box behaviour when `JpegImage` is used standalone, default to
<del> // inverting JPEG (CMYK) images if and only if the image data does *not*
<del> // come from a PDF file and no `decodeTransform` was passed by the user.
<del> if (!isSourcePDF && numComponents === 4 && !transform) {
<del> // prettier-ignore
<del> transform = new Int32Array([
<add> // decodeTransform contains pairs of multiplier (-256..256) and additive
<add> let transform = this._decodeTransform;
<add>
<add> // In PDF files, JPEG images with CMYK colour spaces are usually inverted
<add> // (this can be observed by extracting the raw image data).
<add> // Since the conversion algorithms (see below) were written primarily for
<add> // the PDF use-cases, attempting to use `JpegImage` to parse standalone
<add> // JPEG (CMYK) images may thus result in inverted images (see issue 9513).
<add> //
<add> // Unfortunately it's not (always) possible to tell, from the image data
<add> // alone, if it needs to be inverted. Thus in an attempt to provide better
<add> // out-of-box behaviour when `JpegImage` is used standalone, default to
<add> // inverting JPEG (CMYK) images if and only if the image data does *not*
<add> // come from a PDF file and no `decodeTransform` was passed by the user.
<add> if (!isSourcePDF && numComponents === 4 && !transform) {
<add> // prettier-ignore
<add> transform = new Int32Array([
<ide> -256, 255, -256, 255, -256, 255, -256, 255]);
<del> }
<add> }
<ide>
<del> if (transform) {
<del> for (i = 0; i < dataLength; ) {
<del> for (j = 0, k = 0; j < numComponents; j++, i++, k += 2) {
<del> data[i] = ((data[i] * transform[k]) >> 8) + transform[k + 1];
<del> }
<add> if (transform) {
<add> for (i = 0; i < dataLength; ) {
<add> for (j = 0, k = 0; j < numComponents; j++, i++, k += 2) {
<add> data[i] = ((data[i] * transform[k]) >> 8) + transform[k + 1];
<ide> }
<ide> }
<del> return data;
<del> },
<add> }
<add> return data;
<add> }
<ide>
<del> get _isColorConversionNeeded() {
<del> if (this.adobe) {
<del> // The adobe transform marker overrides any previous setting.
<del> return !!this.adobe.transformCode;
<del> }
<del> if (this.numComponents === 3) {
<del> if (this._colorTransform === 0) {
<del> // If the Adobe transform marker is not present and the image
<del> // dictionary has a 'ColorTransform' entry, explicitly set to `0`,
<del> // then the colours should *not* be transformed.
<del> return false;
<del> } else if (
<del> this.components[0].index === /* "R" = */ 0x52 &&
<del> this.components[1].index === /* "G" = */ 0x47 &&
<del> this.components[2].index === /* "B" = */ 0x42
<del> ) {
<del> // If the three components are indexed as RGB in ASCII
<del> // then the colours should *not* be transformed.
<del> return false;
<del> }
<del> return true;
<del> }
<del> // `this.numComponents !== 3`
<del> if (this._colorTransform === 1) {
<add> get _isColorConversionNeeded() {
<add> if (this.adobe) {
<add> // The adobe transform marker overrides any previous setting.
<add> return !!this.adobe.transformCode;
<add> }
<add> if (this.numComponents === 3) {
<add> if (this._colorTransform === 0) {
<ide> // If the Adobe transform marker is not present and the image
<del> // dictionary has a 'ColorTransform' entry, explicitly set to `1`,
<del> // then the colours should be transformed.
<del> return true;
<del> }
<del> return false;
<del> },
<del>
<del> _convertYccToRgb: function convertYccToRgb(data) {
<del> let Y, Cb, Cr;
<del> for (let i = 0, length = data.length; i < length; i += 3) {
<del> Y = data[i];
<del> Cb = data[i + 1];
<del> Cr = data[i + 2];
<del> data[i] = Y - 179.456 + 1.402 * Cr;
<del> data[i + 1] = Y + 135.459 - 0.344 * Cb - 0.714 * Cr;
<del> data[i + 2] = Y - 226.816 + 1.772 * Cb;
<add> // dictionary has a 'ColorTransform' entry, explicitly set to `0`,
<add> // then the colours should *not* be transformed.
<add> return false;
<add> } else if (
<add> this.components[0].index === /* "R" = */ 0x52 &&
<add> this.components[1].index === /* "G" = */ 0x47 &&
<add> this.components[2].index === /* "B" = */ 0x42
<add> ) {
<add> // If the three components are indexed as RGB in ASCII
<add> // then the colours should *not* be transformed.
<add> return false;
<ide> }
<del> return data;
<del> },
<add> return true;
<add> }
<add> // `this.numComponents !== 3`
<add> if (this._colorTransform === 1) {
<add> // If the Adobe transform marker is not present and the image
<add> // dictionary has a 'ColorTransform' entry, explicitly set to `1`,
<add> // then the colours should be transformed.
<add> return true;
<add> }
<add> return false;
<add> }
<ide>
<del> _convertYcckToRgb: function convertYcckToRgb(data) {
<del> let Y, Cb, Cr, k;
<del> let offset = 0;
<del> for (let i = 0, length = data.length; i < length; i += 4) {
<del> Y = data[i];
<del> Cb = data[i + 1];
<del> Cr = data[i + 2];
<del> k = data[i + 3];
<del>
<del> data[offset++] =
<del> -122.67195406894 +
<del> Cb *
<del> (-6.60635669420364e-5 * Cb +
<del> 0.000437130475926232 * Cr -
<del> 5.4080610064599e-5 * Y +
<del> 0.00048449797120281 * k -
<del> 0.154362151871126) +
<del> Cr *
<del> (-0.000957964378445773 * Cr +
<del> 0.000817076911346625 * Y -
<del> 0.00477271405408747 * k +
<del> 1.53380253221734) +
<del> Y *
<del> (0.000961250184130688 * Y -
<del> 0.00266257332283933 * k +
<del> 0.48357088451265) +
<del> k * (-0.000336197177618394 * k + 0.484791561490776);
<del>
<del> data[offset++] =
<del> 107.268039397724 +
<del> Cb *
<del> (2.19927104525741e-5 * Cb -
<del> 0.000640992018297945 * Cr +
<del> 0.000659397001245577 * Y +
<del> 0.000426105652938837 * k -
<del> 0.176491792462875) +
<del> Cr *
<del> (-0.000778269941513683 * Cr +
<del> 0.00130872261408275 * Y +
<del> 0.000770482631801132 * k -
<del> 0.151051492775562) +
<del> Y *
<del> (0.00126935368114843 * Y -
<del> 0.00265090189010898 * k +
<del> 0.25802910206845) +
<del> k * (-0.000318913117588328 * k - 0.213742400323665);
<del>
<del> data[offset++] =
<del> -20.810012546947 +
<del> Cb *
<del> (-0.000570115196973677 * Cb -
<del> 2.63409051004589e-5 * Cr +
<del> 0.0020741088115012 * Y -
<del> 0.00288260236853442 * k +
<del> 0.814272968359295) +
<del> Cr *
<del> (-1.53496057440975e-5 * Cr -
<del> 0.000132689043961446 * Y +
<del> 0.000560833691242812 * k -
<del> 0.195152027534049) +
<del> Y *
<del> (0.00174418132927582 * Y -
<del> 0.00255243321439347 * k +
<del> 0.116935020465145) +
<del> k * (-0.000343531996510555 * k + 0.24165260232407);
<del> }
<del> // Ensure that only the converted RGB data is returned.
<del> return data.subarray(0, offset);
<del> },
<del>
<del> _convertYcckToCmyk: function convertYcckToCmyk(data) {
<del> let Y, Cb, Cr;
<del> for (let i = 0, length = data.length; i < length; i += 4) {
<del> Y = data[i];
<del> Cb = data[i + 1];
<del> Cr = data[i + 2];
<del> data[i] = 434.456 - Y - 1.402 * Cr;
<del> data[i + 1] = 119.541 - Y + 0.344 * Cb + 0.714 * Cr;
<del> data[i + 2] = 481.816 - Y - 1.772 * Cb;
<del> // K in data[i + 3] is unchanged
<del> }
<del> return data;
<del> },
<add> _convertYccToRgb(data) {
<add> let Y, Cb, Cr;
<add> for (let i = 0, length = data.length; i < length; i += 3) {
<add> Y = data[i];
<add> Cb = data[i + 1];
<add> Cr = data[i + 2];
<add> data[i] = Y - 179.456 + 1.402 * Cr;
<add> data[i + 1] = Y + 135.459 - 0.344 * Cb - 0.714 * Cr;
<add> data[i + 2] = Y - 226.816 + 1.772 * Cb;
<add> }
<add> return data;
<add> }
<add>
<add> _convertYcckToRgb(data) {
<add> let Y, Cb, Cr, k;
<add> let offset = 0;
<add> for (let i = 0, length = data.length; i < length; i += 4) {
<add> Y = data[i];
<add> Cb = data[i + 1];
<add> Cr = data[i + 2];
<add> k = data[i + 3];
<add>
<add> data[offset++] =
<add> -122.67195406894 +
<add> Cb *
<add> (-6.60635669420364e-5 * Cb +
<add> 0.000437130475926232 * Cr -
<add> 5.4080610064599e-5 * Y +
<add> 0.00048449797120281 * k -
<add> 0.154362151871126) +
<add> Cr *
<add> (-0.000957964378445773 * Cr +
<add> 0.000817076911346625 * Y -
<add> 0.00477271405408747 * k +
<add> 1.53380253221734) +
<add> Y *
<add> (0.000961250184130688 * Y -
<add> 0.00266257332283933 * k +
<add> 0.48357088451265) +
<add> k * (-0.000336197177618394 * k + 0.484791561490776);
<add>
<add> data[offset++] =
<add> 107.268039397724 +
<add> Cb *
<add> (2.19927104525741e-5 * Cb -
<add> 0.000640992018297945 * Cr +
<add> 0.000659397001245577 * Y +
<add> 0.000426105652938837 * k -
<add> 0.176491792462875) +
<add> Cr *
<add> (-0.000778269941513683 * Cr +
<add> 0.00130872261408275 * Y +
<add> 0.000770482631801132 * k -
<add> 0.151051492775562) +
<add> Y *
<add> (0.00126935368114843 * Y -
<add> 0.00265090189010898 * k +
<add> 0.25802910206845) +
<add> k * (-0.000318913117588328 * k - 0.213742400323665);
<add>
<add> data[offset++] =
<add> -20.810012546947 +
<add> Cb *
<add> (-0.000570115196973677 * Cb -
<add> 2.63409051004589e-5 * Cr +
<add> 0.0020741088115012 * Y -
<add> 0.00288260236853442 * k +
<add> 0.814272968359295) +
<add> Cr *
<add> (-1.53496057440975e-5 * Cr -
<add> 0.000132689043961446 * Y +
<add> 0.000560833691242812 * k -
<add> 0.195152027534049) +
<add> Y *
<add> (0.00174418132927582 * Y -
<add> 0.00255243321439347 * k +
<add> 0.116935020465145) +
<add> k * (-0.000343531996510555 * k + 0.24165260232407);
<add> }
<add> // Ensure that only the converted RGB data is returned.
<add> return data.subarray(0, offset);
<add> }
<add>
<add> _convertYcckToCmyk(data) {
<add> let Y, Cb, Cr;
<add> for (let i = 0, length = data.length; i < length; i += 4) {
<add> Y = data[i];
<add> Cb = data[i + 1];
<add> Cr = data[i + 2];
<add> data[i] = 434.456 - Y - 1.402 * Cr;
<add> data[i + 1] = 119.541 - Y + 0.344 * Cb + 0.714 * Cr;
<add> data[i + 2] = 481.816 - Y - 1.772 * Cb;
<add> // K in data[i + 3] is unchanged
<add> }
<add> return data;
<add> }
<ide>
<del> _convertCmykToRgb: function convertCmykToRgb(data) {
<del> let c, m, y, k;
<add> _convertCmykToRgb(data) {
<add> let c, m, y, k;
<add> let offset = 0;
<add> for (let i = 0, length = data.length; i < length; i += 4) {
<add> c = data[i];
<add> m = data[i + 1];
<add> y = data[i + 2];
<add> k = data[i + 3];
<add>
<add> data[offset++] =
<add> 255 +
<add> c *
<add> (-0.00006747147073602441 * c +
<add> 0.0008379262121013727 * m +
<add> 0.0002894718188643294 * y +
<add> 0.003264231057537806 * k -
<add> 1.1185611867203937) +
<add> m *
<add> (0.000026374107616089405 * m -
<add> 0.00008626949158638572 * y -
<add> 0.0002748769067499491 * k -
<add> 0.02155688794978967) +
<add> y *
<add> (-0.00003878099212869363 * y -
<add> 0.0003267808279485286 * k +
<add> 0.0686742238595345) -
<add> k * (0.0003361971776183937 * k + 0.7430659151342254);
<add>
<add> data[offset++] =
<add> 255 +
<add> c *
<add> (0.00013596372813588848 * c +
<add> 0.000924537132573585 * m +
<add> 0.00010567359618683593 * y +
<add> 0.0004791864687436512 * k -
<add> 0.3109689587515875) +
<add> m *
<add> (-0.00023545346108370344 * m +
<add> 0.0002702845253534714 * y +
<add> 0.0020200308977307156 * k -
<add> 0.7488052167015494) +
<add> y *
<add> (0.00006834815998235662 * y +
<add> 0.00015168452363460973 * k -
<add> 0.09751927774728933) -
<add> k * (0.00031891311758832814 * k + 0.7364883807733168);
<add>
<add> data[offset++] =
<add> 255 +
<add> c *
<add> (0.000013598650411385307 * c +
<add> 0.00012423956175490851 * m +
<add> 0.0004751985097583589 * y -
<add> 0.0000036729317476630422 * k -
<add> 0.05562186980264034) +
<add> m *
<add> (0.00016141380598724676 * m +
<add> 0.0009692239130725186 * y +
<add> 0.0007782692450036253 * k -
<add> 0.44015232367526463) +
<add> y *
<add> (5.068882914068769e-7 * y +
<add> 0.0017778369011375071 * k -
<add> 0.7591454649749609) -
<add> k * (0.0003435319965105553 * k + 0.7063770186160144);
<add> }
<add> // Ensure that only the converted RGB data is returned.
<add> return data.subarray(0, offset);
<add> }
<add>
<add> getData({ width, height, forceRGB = false, isSourcePDF = false }) {
<add> if (
<add> typeof PDFJSDev === "undefined" ||
<add> PDFJSDev.test("!PRODUCTION || TESTING")
<add> ) {
<add> assert(
<add> isSourcePDF === true,
<add> 'JpegImage.getData: Unexpected "isSourcePDF" value for PDF files.'
<add> );
<add> }
<add> if (this.numComponents > 4) {
<add> throw new JpegError("Unsupported color mode");
<add> }
<add> // Type of data: Uint8ClampedArray(width * height * numComponents)
<add> const data = this._getLinearizedBlockData(width, height, isSourcePDF);
<add>
<add> if (this.numComponents === 1 && forceRGB) {
<add> const dataLength = data.length;
<add> const rgbData = new Uint8ClampedArray(dataLength * 3);
<ide> let offset = 0;
<del> for (let i = 0, length = data.length; i < length; i += 4) {
<del> c = data[i];
<del> m = data[i + 1];
<del> y = data[i + 2];
<del> k = data[i + 3];
<del>
<del> data[offset++] =
<del> 255 +
<del> c *
<del> (-0.00006747147073602441 * c +
<del> 0.0008379262121013727 * m +
<del> 0.0002894718188643294 * y +
<del> 0.003264231057537806 * k -
<del> 1.1185611867203937) +
<del> m *
<del> (0.000026374107616089405 * m -
<del> 0.00008626949158638572 * y -
<del> 0.0002748769067499491 * k -
<del> 0.02155688794978967) +
<del> y *
<del> (-0.00003878099212869363 * y -
<del> 0.0003267808279485286 * k +
<del> 0.0686742238595345) -
<del> k * (0.0003361971776183937 * k + 0.7430659151342254);
<del>
<del> data[offset++] =
<del> 255 +
<del> c *
<del> (0.00013596372813588848 * c +
<del> 0.000924537132573585 * m +
<del> 0.00010567359618683593 * y +
<del> 0.0004791864687436512 * k -
<del> 0.3109689587515875) +
<del> m *
<del> (-0.00023545346108370344 * m +
<del> 0.0002702845253534714 * y +
<del> 0.0020200308977307156 * k -
<del> 0.7488052167015494) +
<del> y *
<del> (0.00006834815998235662 * y +
<del> 0.00015168452363460973 * k -
<del> 0.09751927774728933) -
<del> k * (0.00031891311758832814 * k + 0.7364883807733168);
<del>
<del> data[offset++] =
<del> 255 +
<del> c *
<del> (0.000013598650411385307 * c +
<del> 0.00012423956175490851 * m +
<del> 0.0004751985097583589 * y -
<del> 0.0000036729317476630422 * k -
<del> 0.05562186980264034) +
<del> m *
<del> (0.00016141380598724676 * m +
<del> 0.0009692239130725186 * y +
<del> 0.0007782692450036253 * k -
<del> 0.44015232367526463) +
<del> y *
<del> (5.068882914068769e-7 * y +
<del> 0.0017778369011375071 * k -
<del> 0.7591454649749609) -
<del> k * (0.0003435319965105553 * k + 0.7063770186160144);
<del> }
<del> // Ensure that only the converted RGB data is returned.
<del> return data.subarray(0, offset);
<del> },
<del>
<del> getData({ width, height, forceRGB = false, isSourcePDF = false }) {
<del> if (
<del> typeof PDFJSDev === "undefined" ||
<del> PDFJSDev.test("!PRODUCTION || TESTING")
<del> ) {
<del> assert(
<del> isSourcePDF === true,
<del> 'JpegImage.getData: Unexpected "isSourcePDF" value for PDF files.'
<del> );
<add> for (let i = 0; i < dataLength; i++) {
<add> const grayColor = data[i];
<add> rgbData[offset++] = grayColor;
<add> rgbData[offset++] = grayColor;
<add> rgbData[offset++] = grayColor;
<ide> }
<del> if (this.numComponents > 4) {
<del> throw new JpegError("Unsupported color mode");
<del> }
<del> // Type of data: Uint8ClampedArray(width * height * numComponents)
<del> const data = this._getLinearizedBlockData(width, height, isSourcePDF);
<del>
<del> if (this.numComponents === 1 && forceRGB) {
<del> const dataLength = data.length;
<del> const rgbData = new Uint8ClampedArray(dataLength * 3);
<del> let offset = 0;
<del> for (let i = 0; i < dataLength; i++) {
<del> const grayColor = data[i];
<del> rgbData[offset++] = grayColor;
<del> rgbData[offset++] = grayColor;
<del> rgbData[offset++] = grayColor;
<del> }
<del> return rgbData;
<del> } else if (this.numComponents === 3 && this._isColorConversionNeeded) {
<del> return this._convertYccToRgb(data);
<del> } else if (this.numComponents === 4) {
<del> if (this._isColorConversionNeeded) {
<del> if (forceRGB) {
<del> return this._convertYcckToRgb(data);
<del> }
<del> return this._convertYcckToCmyk(data);
<del> } else if (forceRGB) {
<del> return this._convertCmykToRgb(data);
<add> return rgbData;
<add> } else if (this.numComponents === 3 && this._isColorConversionNeeded) {
<add> return this._convertYccToRgb(data);
<add> } else if (this.numComponents === 4) {
<add> if (this._isColorConversionNeeded) {
<add> if (forceRGB) {
<add> return this._convertYcckToRgb(data);
<ide> }
<add> return this._convertYcckToCmyk(data);
<add> } else if (forceRGB) {
<add> return this._convertCmykToRgb(data);
<ide> }
<del> return data;
<del> },
<del> };
<del>
<del> return JpegImage;
<del>})();
<add> }
<add> return data;
<add> }
<add>}
<ide>
<ide> export { JpegImage }; | 1 |
Go | Go | remove job from history api | 5c7c3fea6caf43d51344faaf190b869db1b44f46 | <ide><path>api/server/server.go
<ide> func getImagesHistory(eng *engine.Engine, version version.Version, w http.Respon
<ide> return fmt.Errorf("Missing parameter")
<ide> }
<ide>
<del> var job = eng.Job("history", vars["name"])
<del> streamJSON(job, w, false)
<del>
<del> if err := job.Run(); err != nil {
<add> name := vars["name"]
<add> history, err := getDaemon(eng).Repositories().History(name)
<add> if err != nil {
<ide> return err
<ide> }
<del> return nil
<add>
<add> return writeJSON(w, http.StatusOK, history)
<ide> }
<ide>
<ide> func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
<ide><path>api/server/server_unit_test.go
<ide> func TestLogsNoStreams(t *testing.T) {
<ide> }
<ide> }
<ide>
<del>func TestGetImagesHistory(t *testing.T) {
<del> eng := engine.New()
<del> imageName := "docker-test-image"
<del> var called bool
<del> eng.Register("history", func(job *engine.Job) error {
<del> called = true
<del> if len(job.Args) == 0 {
<del> t.Fatal("Job arguments is empty")
<del> }
<del> if job.Args[0] != imageName {
<del> t.Fatalf("name != '%s': %#v", imageName, job.Args[0])
<del> }
<del> v := &engine.Env{}
<del> if _, err := v.WriteTo(job.Stdout); err != nil {
<del> return err
<del> }
<del> return nil
<del> })
<del> r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
<del> if !called {
<del> t.Fatalf("handler was not called")
<del> }
<del> if r.Code != http.StatusOK {
<del> t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
<del> }
<del> if r.HeaderMap.Get("Content-Type") != "application/json" {
<del> t.Fatalf("%#v\n", r)
<del> }
<del>}
<del>
<ide> func TestGetImagesByName(t *testing.T) {
<ide> eng := engine.New()
<ide> name := "image_name"
<ide><path>graph/history.go
<ide> package graph
<ide>
<ide> import (
<del> "encoding/json"
<del> "fmt"
<ide> "strings"
<ide>
<ide> "github.com/docker/docker/api/types"
<del> "github.com/docker/docker/engine"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/utils"
<ide> )
<ide>
<del>func (s *TagStore) CmdHistory(job *engine.Job) error {
<del> if n := len(job.Args); n != 1 {
<del> return fmt.Errorf("Usage: %s IMAGE", job.Name)
<del> }
<del> name := job.Args[0]
<add>func (s *TagStore) History(name string) ([]*types.ImageHistory, error) {
<ide> foundImage, err := s.LookupImage(name)
<ide> if err != nil {
<del> return err
<add> return nil, err
<ide> }
<ide>
<ide> lookupMap := make(map[string][]string)
<ide> func (s *TagStore) CmdHistory(job *engine.Job) error {
<ide> }
<ide> }
<ide>
<del> history := []types.ImageHistory{}
<add> history := []*types.ImageHistory{}
<ide>
<ide> err = foundImage.WalkHistory(func(img *image.Image) error {
<del> history = append(history, types.ImageHistory{
<add> history = append(history, &types.ImageHistory{
<ide> ID: img.ID,
<ide> Created: img.Created.Unix(),
<ide> CreatedBy: strings.Join(img.ContainerConfig.Cmd, " "),
<ide> func (s *TagStore) CmdHistory(job *engine.Job) error {
<ide> return nil
<ide> })
<ide>
<del> if err = json.NewEncoder(job.Stdout).Encode(history); err != nil {
<del> return err
<del> }
<del>
<del> return nil
<add> return history, err
<ide> }
<ide><path>graph/service.go
<ide> func (s *TagStore) Install(eng *engine.Engine) error {
<ide> "image_inspect": s.CmdLookup,
<ide> "image_tarlayer": s.CmdTarLayer,
<ide> "image_export": s.CmdImageExport,
<del> "history": s.CmdHistory,
<ide> "viz": s.CmdViz,
<ide> "load": s.CmdLoad,
<ide> "import": s.CmdImport, | 4 |
Javascript | Javascript | fix instagram email | a5e1830d5fa9d2292c1624b5d999f60309ba1063 | <ide><path>config/passport.js
<ide> passport.use(new InstagramStrategy(secrets.instagram,function(req, accessToken,
<ide> // Similar to Twitter API, assigns a temporary e-mail address
<ide> // to get on with the registration process. It can be changed later
<ide> // to a valid e-mail address in Profile Management.
<del> profile.username + "@instagram.com";
<add> user.email = profile.username + "@instagram.com";
<ide> user.profile.website = profile._json.data.website;
<ide> user.profile.picture = profile._json.data.profile_picture;
<ide> user.save(function(err) {
<ide><path>generator.js
<ide> inquirer.prompt({
<ide> // Similar to Twitter API, assigns a temporary e-mail address
<ide> // to get on with the registration process. It can be changed later
<ide> // to a valid e-mail address in Profile Management.
<del> profile.username + "@instagram.com";
<add> user.email = profile.username + "@instagram.com";
<ide> user.profile.website = profile._json.data.website;
<ide> user.profile.picture = profile._json.data.profile_picture;
<ide> user.save(function(err) { | 2 |
Text | Text | add bridgear to collaborators | e9c026369628834a806e36b42780db44a5017613 | <ide><path>README.md
<ide> more information about the governance of the Node.js project, see
<ide> **Ben Noordhuis** <[email protected]>
<ide> * [brendanashworth](https://github.com/brendanashworth) -
<ide> **Brendan Ashworth** <[email protected]>
<add>* [BridgeAR](https://github.com/BridgeAR) -
<add>**Ruben Bridgewater** <[email protected]>
<ide> * [bzoz](https://github.com/bzoz) -
<ide> **Bartosz Sosnowski** <[email protected]>
<ide> * [calvinmetcalf](https://github.com/calvinmetcalf) - | 1 |
Text | Text | kick travis into action | c07cdbdaca16e226c2e042443994a7579964faf3 | <ide><path>README.md
<ide> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<ide> [markdown]: http://pypi.python.org/pypi/Markdown/
<ide> [pyyaml]: http://pypi.python.org/pypi/PyYAML
<ide> [django-filter]: http://pypi.python.org/pypi/django-filter
<del> | 1 |
Javascript | Javascript | add fallback shim for abortcontroller | 8dcedba15a4a0f5545e3b386355000208884b24f | <ide><path>packages/react-dom/src/__tests__/ReactDOMFizzServerBrowser-test.js
<ide> // Polyfills for test environment
<ide> global.ReadableStream = require('web-streams-polyfill/ponyfill/es6').ReadableStream;
<ide> global.TextEncoder = require('util').TextEncoder;
<del>global.AbortController = require('abort-controller');
<ide>
<ide> let React;
<ide> let ReactDOMFizzServer;
<ide><path>packages/react-reconciler/src/ReactFiberCacheComponent.new.js
<ide> import {REACT_CONTEXT_TYPE} from 'shared/ReactSymbols';
<ide> import {pushProvider, popProvider} from './ReactFiberNewContext.new';
<ide> import * as Scheduler from 'scheduler';
<ide>
<add>// In environments without AbortController (e.g. tests)
<add>// replace it with a lightweight shim that only has the features we use.
<add>const AbortControllerLocal = enableCache
<add> ? typeof AbortController !== 'undefined'
<add> ? AbortController
<add> : (function AbortControllerShim() {
<add> const listeners = [];
<add> const signal = (this.signal = {
<add> aborted: false,
<add> addEventListener: (type, listener) => {
<add> listeners.push(listener);
<add> },
<add> });
<add>
<add> this.abort = () => {
<add> signal.aborted = true;
<add> listeners.forEach(listener => listener());
<add> };
<add> }: AbortController)
<add> : (null: any);
<add>
<ide> export type Cache = {|
<del> controller: AbortController,
<add> controller: AbortControllerLocal,
<ide> data: Map<() => mixed, mixed>,
<ide> refCount: number,
<ide> |};
<ide> export function createCache(): Cache {
<ide> return (null: any);
<ide> }
<ide> const cache: Cache = {
<del> controller: new AbortController(),
<add> controller: new AbortControllerLocal(),
<ide> data: new Map(),
<ide> refCount: 0,
<ide> };
<ide><path>packages/react-reconciler/src/ReactFiberCacheComponent.old.js
<ide> import {REACT_CONTEXT_TYPE} from 'shared/ReactSymbols';
<ide> import {pushProvider, popProvider} from './ReactFiberNewContext.old';
<ide> import * as Scheduler from 'scheduler';
<ide>
<add>// In environments without AbortController (e.g. tests)
<add>// replace it with a lightweight shim that only has the features we use.
<add>const AbortControllerLocal = enableCache
<add> ? typeof AbortController !== 'undefined'
<add> ? AbortController
<add> : (function AbortControllerShim() {
<add> const listeners = [];
<add> const signal = (this.signal = {
<add> aborted: false,
<add> addEventListener: (type, listener) => {
<add> listeners.push(listener);
<add> },
<add> });
<add>
<add> this.abort = () => {
<add> signal.aborted = true;
<add> listeners.forEach(listener => listener());
<add> };
<add> }: AbortController)
<add> : (null: any);
<add>
<ide> export type Cache = {|
<del> controller: AbortController,
<add> controller: AbortControllerLocal,
<ide> data: Map<() => mixed, mixed>,
<ide> refCount: number,
<ide> |};
<ide> export function createCache(): Cache {
<ide> return (null: any);
<ide> }
<ide> const cache: Cache = {
<del> controller: new AbortController(),
<add> controller: new AbortControllerLocal(),
<ide> data: new Map(),
<ide> refCount: 0,
<ide> };
<ide><path>scripts/jest/setupEnvironment.js
<ide> /* eslint-disable */
<ide>
<del>const AbortController = require('abort-controller');
<del>
<ide> const NODE_ENV = process.env.NODE_ENV;
<ide> if (NODE_ENV !== 'development' && NODE_ENV !== 'production') {
<ide> throw new Error('NODE_ENV must either be set to development or production.');
<ide> global.__EXPERIMENTAL__ =
<ide>
<ide> global.__VARIANT__ = !!process.env.VARIANT;
<ide>
<del>global.AbortController = AbortController;
<del>
<ide> if (typeof window !== 'undefined') {
<ide> global.requestIdleCallback = function(callback) {
<ide> return setTimeout(() => { | 4 |
PHP | PHP | fix failing tests | fa8298dd83e4cc3c41c97933563676b98ac12b06 | <ide><path>Cake/Test/TestCase/ORM/Association/HasManyTest.php
<ide> public function testEagerLoaderWithDefaults() {
<ide> ->will($this->returnValue($query));
<ide>
<ide> $query->expects($this->once())->method('andWhere')
<del> ->with(['Article.author_id in' => $keys])
<add> ->with(['Article.author_id IN' => $keys])
<ide> ->will($this->returnValue($query));
<ide>
<ide> $query->expects($this->once())->method('order')
<ide> public function testEagerLoaderWithOverrides() {
<ide> ->will($this->returnValue($query));
<ide>
<ide> $query->expects($this->once())->method('andWhere')
<del> ->with(['Article.author_id in' => $keys])
<add> ->with(['Article.author_id IN' => $keys])
<ide> ->will($this->returnValue($query));
<ide>
<ide> $query->expects($this->once())->method('order')
<ide> public function testEagerLoaderSubquery() {
<ide> ->select('Article.author_id', true)
<ide> ->join($joins, [], true);
<ide> $query->expects($this->once())->method('andWhere')
<del> ->with(['Article.author_id in' => $expected])
<add> ->with(['Article.author_id IN' => $expected])
<ide> ->will($this->returnValue($query));
<ide>
<ide> $callable = $association->eagerLoader([ | 1 |
Text | Text | add draft of "consolidate core atom packages" rfc | 6479e7f6c01117fb0d8e76abf26b48c06a560800 | <ide><path>docs/rfcs/003-consolidate-core-packages.md
<add># Consolidate Core Atom Packages
<add>
<add>## Status
<add>
<add>Proposed
<add>
<add>## Summary
<add>
<add>Atom's official distribution is comprised of 91 core packages which provide its built-in functionality. These packages currently live in their own independent repositories in the Atom organization, all with their own separate issues, PRs, releases, and CI configurations. This RFC proposes that by consolidating most, if not all, of these core packages back into the `atom/atom` repo, we will see the following benefits:
<add>
<add>- Less confusion for new contributors
<add>- Simpler core package contribution experience
<add>- Greatly reduced burden for maintainers
<add>
<add>## Motivation
<add>
<add>Let's cover each of the bullet points mentioned above:
<add>
<add>### Less confusion for contributors
<add>
<add>Imagine that a new contributor wants to add a small new feature to the `tree-view` package. The first place they are likely to look is the `atom/atom` repository. Scanning through the folders will lead to a dead end, nothing that looks like `tree-view` code can be found. They might take one of the following steps next:
<add>
<add>- By reading README.md, maybe they will decide to click the link to the Atom Flight Manual and maybe_ find the [Contributing to Official Atom Packages](https://flight-manual.atom.io/hacking-atom/sections/contributing-to-official-atom-packages/) page there.
<add>- They could read the CONTRIBUTING.md file which [has a section](https://github.com/atom/atom/blob/master/CONTRIBUTING.md#atom-and-packages) that explains where to find the repos for core packages and how to contribute, but we don't really have a clear pointer to that in our README.md
<add>- If they don't happen to find that page, they might use Google to search for "atom tree view" and find the atom/tree-view repo and _maybe_ read the CONTRIBUTING.md file which sends them to Atom's overall contribution documentation
<add>- They might go to the Atom Forum or Slack community to ask how to contribute to
<add>
<add>Having all of the core Atom packages represented in a top-level `packages` folder, even if they don't actually live in the repo, will go a long way to making the core package code more discoverable.
<add>
<add>### Simpler core package contribution experience
<add>
<add>Separating core Atom features out into separate repositories and delivered via `apm` is a great idea in theory because it validates the Atom package ecosystem and gives developers many examples of how to develop an Atom package. It also gives Atom developers real-world experience working with Atom's APIs so that we ensure community package authors have the same hackability that the Atom developers enjoy.
<add>
<add>On the other hand, having these packages live in separate repositories and released "independently" introduces a great deal of overhead when adding new features. Here is a comparison of the current package development workflow contrasted to what we could achieve with consolidated packages:
<add>
<add>#### Current Package Development Workflow
<add>
<add>For example, to add a single feature to the `tree-view` package, one must:
<add>
<add>1. Fork and clone the `tree-view` repository to their computer (making sure to pull the commit relevant to the version of Atom they are working with)
<add>1. Run `apm install` and `apm link` inside of the repo folder
<add>1. Make their desired changes to the code
<add>1. Open a PR to the `tree-view` repo and wait for CI to pass and a maintainer to review it
<add>1. Work with maintainers to get the PR approved and merged
<add>
<add>After this is finished, an Atom maintainer must take the following steps
<add>
<add>1. Clone the `tree-view` repo
<add>2. Run `apm publish` to publish a new release of the package
<add>3. Edit `package.json` in the Atom repo to reflect the new version of `tree-view`
<add>4. Commit and push the changes to the relevant branch where the change belongs (`master` or `1.nn-releases`)
<add>
<add>If `tree-view` was moved into the `atom/atom` repository
<add>
<add>#### Simplified Package Development
<add>
<add>If we were to move `tree-view` (or any other core Atom package) back into `atom/atom`, the development workflow would look more like this:
<add>
<add>1. Fork and clone `atom/atom` and switch to a release branch if necessary
<add>1. Build Atom and launch it in dev mode
<add>1. Make desired changes to the code in `packages/tree-view`
<add>1. Open a PR on `atom/atom` and wait for CI to pass and a maintainer to review it
<add>1. Work with maintainers to get the PR approved and merged
<add>
<add>At this point, the change is merged into Atom and ready for inclusion in the next release.
<add>
<add>### Greatly reduced burden for maintainers
<add>
<add>Since packages all have their own repositories, this means that we have to watch 91 different repos for issues and pull requests. This also means that we have to redirect issues filed on `atom/atom` to the appropriate repository when a user doesn't know where it belongs. Even more importantly, there's not an easy way to prioritize and track issues across the Atom organization without using GitHub projects.
<add>
<add>Also, as mentioned above, there's the added duty of doing the package "version dance" when we merge any new PRs to a package repository: publish the package update, update `package.json` in Atom. It's very easy to forget to do this and not have community contributions included in the next Atom release!
<add>
<add>The more core packages live in `atom/atom`, the less work Atom maintainers have to do overall.
<add>
<add>## Explanation
<add>
<add>Many of Atom's core packages now live in the core `atom/atom` repository. To the Atom user, this change will be imperceptible as these packages still show up in the list of Core Packages in the Settings View. For maintainers and contributors, there will be less juggling of repositories and no more publishing of updates to these packages with `apm`.
<add>
<add>Contributors now clone and build `atom/atom` to work on improvements to core packages. They will no longer have to use `apm link` in dev mode to test changes they make to packages in the repo's `packages` folder.
<add>
<add>When a contributor sends a PR to `atom/atom` that only affects files in a folder under `packages`, only the specs for the relevant package folders will be executed using Atom's CI scripts. This means that a full Atom build will not be required when no Atom Core code is changed in a PR. Package specs are also now run against all 3 OSes on Atom `master` and release builds.
<add>
<add>Core packages that aren't consolidated still have folders under `packages` with README.md files that point to the home repository for that package.
<add>
<add>## Drawbacks
<add>
<add>One possible drawback of this approach is that there might be some initial confusion where core Atom packages live, especially if some are consolidated into `atom/atom` and others still live in their own repositories. We will manage this confusion by doing the following:
<add>
<add>- Include folders for _all_ core packages in the `packages` folder of the Atom repo and add README.md files to folders of those packages that still live in separate repos. This will allow us to direct users to the proper home for packages that are not yet consolidated.
<add>
<add>- Archive the repositories for consolidated core packages, but only after migrating existing issues, merging or closing existing PRs, and updating the README.md to point to the new home of the package code.
<add>
<add>Also, contributors will now have to fork, clone, and build `atom/atom` to contribute to core packages where they would previously just need to clone the package repository. This might put added burden on them such as installing necessary build dependencies on their machine that they wouldn't otherwise need. It is very likely we could simplify this process for them, though.
<add>
<add>One final drawback is that it will now be harder to have single-package maintainers. We currently have 7 core packages where there is a maintainer who isn't a part of the core Atom maintainers team. These maintainers generally are able to merge community PRs and make commits to those packages with their own judgement. If we get rid of individual package repositories, do we now make those maintainers full Atom maintainers?
<add>
<add>## Rationale and alternatives
<add>
<add>The Motivation section explains most of the rationale, so this section will focus on the process of consolidating packages back into `atom/atom`. The set of packages we've chosen to consolidate were evaluated based on a few factors:
<add>
<add>- Number of open issues and PRs (exclude any with > 10 open PRs)
<add>- Time since last update (longer duration since last update is prioritized)
<add>- Number of package-only maintainers on the repo (exclude any with package maintainers for now)
<add>
<add>Using this criteria, all 91 packages have been evaluated and categorized to determine whether they are good candidates for consolidation:
<add>
<add>#### Initial Consolidation Candidates
<add>
<add>| Package | Open Issues | Open PRs | Outside Maintainers | Last Updated |
<add>|---------|-------------|----------|---------------------| -------------|
<add>| **[about]** | 2 | 0 | 0 | 7/11/18 |
<add>| **[archive-view]** | 10 | 0 | 0 | 6/3/18 |
<add>| **[atom-dark-syntax]** | 5 | 0 | 0 | 12/6/17 |
<add>| **[atom-dark-ui]** | 1 | 2 | 0 | 2/13/18 |
<add>| **[atom-light-syntax]** | 1 | 0 | 0 | 10/17/16 |
<add>| **[atom-light-ui]** | 1 | 0 | 0 | 2/13/18 |
<add>| **[autoflow]** | 17 | 4 | 0 | 4/17/18 |
<add>| **[autosave]** | 13 | 0 | 0 | 9/16/17 |
<add>| **[background-tips]** | 3 | 2 | 0 | 2/17/18 |
<add>| **[base16-tomorrow-dark-theme]** | 5 | 0 | 0 | 1/10/17 |
<add>| **[base16-tomorrow-light-theme]** | 1 | 0 | 0 | 1/10/17 |
<add>| **[bookmarks]** | 19 | 4 | 0 | 12/10/17 |
<add>| **[bracket-matcher]** | 74 | 8 | 0 | 3/20/18 |
<add>| **[command-palette]** | 18 | 6 | 0 | 2/27/18 |
<add>| **[dalek]** | 2 | 0 | 0 | 2/28/18 |
<add>| **[deprecation-cop]** | 5 | 0 | 0 | 9/7/17 |
<add>| **[dev-live-reload]** | 4 | 0 | 0 | 11/14/17 |
<add>| **[encoding-selector]** | 11 | 2 | 0 | 4/19/18 |
<add>| **[exception-reporting]** | 5 | 0 | 0 | 2/6/18 |
<add>| **[git-diff]** | 38 | 1 | 0 | 1/18/18 |
<add>| **[go-to-line]** | 5 | 2 | 0 | 1/25/18 |
<add>| **[grammar-selector]** | 3 | 1 | 0 | 4/12/18 |
<add>| **[image-view]** | 4 | 4 | 0 | 7/9/18 |
<add>| **[incompatible-packages]** | 1 | 0 | 0 | 4/25/17 |
<add>| **[keybinding-resolver]** | 11 | 3 | 0 | 7/6/18 |
<add>| **[language-clojure]** | 13 | 3 | 0 | 1/26/18 |
<add>| **[language-coffee-script]** | 9 | 2 | 0 | 11/1/17 |
<add>| **[language-csharp]** | 1 | 1 | 0 | 4/27/18 |
<add>| **[language-css]** | 6 | 7 | 0 | 6/11/18 |
<add>| **[language-gfm]** | 52 | 9 | 0 | 6/15/18 |
<add>| **[language-git]** | 4 | 2 | 0 | 4/18/17 |
<add>| **[language-html]** | 11 | 4 | 0 | 7/5/18 |
<add>| **[language-hyperlink]** | 2 | 3 | 0 | 10/25/17 |
<add>| **[language-json]** | 1 | 0 | 0 | 5/11/18 |
<add>| **[language-less]** | 5 | 1 | 0 | 6/11/18 |
<add>| **[language-make]** | 7 | 3 | 0 | 11/26/16 |
<add>| **[language-mustache]** | 0 | 0 | 0 | 2/5/18 |
<add>| **[language-objective-c]** | 2 | 0 | 0 | 12/1/15 |
<add>| **[language-php]** | 25 | 7 | 0 | 6/11/18 |
<add>| **[language-property-list]** | 1 | 0 | 0 | 3/11/17 |
<add>| **[language-python]** | 33 | 4 | 0 | 6/18/18 |
<add>| **[language-ruby]** | 38 | 10 | 0 | 10/25/17 |
<add>| **[language-ruby-on-rails]** | 9 | 6 | 0 | 12/7/17 |
<add>| **[language-sass]** | 12 | 5 | 0 | 5/2/18 |
<add>| **[language-shellscript]** | 12 | 3 | 0 | 6/18/18 |
<add>| **[language-source]** | 0 | 0 | 0 | 1/6/15 |
<add>| **[language-sql]** | 6 | 4 | 0 | 1/26/18 |
<add>| **[language-text]** | 1 | 0 | 0 | 3/9/18 |
<add>| **[language-todo]** | 10 | 6 | 0 | 1/26/18 |
<add>| **[language-toml]** | 1 | 0 | 0 | 1/6/18 |
<add>| **[language-typescript]** | 6 | 0 | 0 | 6/18/18 |
<add>| **[language-xml]** | 2 | 1 | 0 | 6/12/17 |
<add>| **[language-yaml]** | 8 | 2 | 0 | 3/9/18 |
<add>| **[line-ending-selector]** | 10 | 0 | 0 | 5/18/18 |
<add>| **[link]** | 0 | 1 | 0 | 11/14/17 |
<add>| **[metrics]** | 1 | 2 | 0 | 7/5/18 |
<add>| **[notifications]** | 29 | 8 | 0 | 3/22/18 |
<add>| **[one-dark-syntax]** | 4 | 0 | 0 | 5/27/18 |
<add>| **[one-dark-ui]** | 13 | 1 | 0 | 5/1/18 |
<add>| **[one-light-syntax]** | 2 | 1 | 0 | 5/27/18 |
<add>| **[one-light-ui]** | 2 | 0 | 0 | 5/1/18 |
<add>| **[open-on-github]** | 8 | 3 | 0 | 11/21/17 |
<add>| **[package-generator]** | 10 | 2 | 0 | 11/16/17 |
<add>| **[status-bar]** | 25 | 3 | 0 | 11/6/17 |
<add>| **[styleguide]** | 12 | 2 | 0 | 4/12/18 |
<add>| **[tabs]** | 66 | 7 | 0 | 5/13/18 |
<add>| **[timecop]** | 5 | 0 | 0 | 11/4/17 |
<add>| **[update-package-dependencies]** | 0 | 0 | 0 | 12/10/17 |
<add>| **[welcome]** | 0 | 0 | 0 | 11/21/17 |
<add>| **[whitespace]** | 31 | 6 | 0 | 5/30/18 |
<add>| **[wrap-guide]** | 3 | 4 | 0 | 11/27/17 |
<add>
<add>#### Packages Consolidated Later
<add>
<add>The following packages will not be consolidated until the stated reasons can be resolved or we decide on a consolidation strategy for them:
<add>
<add>| Package | Open Issues | Open PRs | Outside Maintainers | Last Updated | Reason |
<add>|---------|-------------|----------|---------------------|--------------|-------|
<add>| **[find-and-replace]** | 219 | 17 | 0 | 6/4/18 | Too many open PRs |
<add>| **[fuzzy-finder]** | 89 | 22 | 0 | 5/17/18 | Too many open PRs |
<add>| **[language-c]** | 53 | 15 | 0 | 7/10/18 | Too many open PRs |
<add>| **[language-go]** | 12 | 2 | **1** | 6/18/18 | Package maintainer, possibly inactive? |
<add>| **[language-java]** | 8 | 2 | **1** | 6/11/18 | Package maintainer |
<add>| **[language-javascript]** | 66 | 12 | 0 | 7/6/18 | Too many open PRs |
<add>| **[language-perl]** | 17 | 1 | **1** | 10/30/17 | Package maintainer, possibly inactive? |
<add>| **[markdown-preview]** | 139 | 12 | 0 | 1/8/18 | Too many open PRs |
<add>| **[settings-view]** | 137 | 18 | 0 | 5/17/18 | Too many open PRs |
<add>| **[snippets]** | 57 | 4 | **1** | 4/17/18 | Package maintainer |
<add>| **[solarized-dark-syntax]** | 8 | 3 | **1** | 5/27/18 | Package maintainer |
<add>| **[solarized-light-syntax]** | 2 | 3 | **1** | 5/27/18 | Package maintainer |
<add>| **[spell-check]** | 68 | 14 | **1** | 5/25/18 | Too many open PRs, package maintainer |
<add>| **[symbols-view]** | 86 | 13 | 0 | 12/10/17 | Too many open PRs |
<add>| **[tree-view]** | 210 | 36 | 0 | 3/21/18 | Too many open PRs |
<add>
<add>#### Packages to Never Consolidate
<add>
<add>These packages will not be consolidated for the following reasons:
<add>
<add>| Package | Open Issues | Open PRs | Outside Maintainers | Last Updated | Reason |
<add>|---------|-------------|----------|---------------------|--------------|-------|
<add>| **[autocomplete-atom-api]** | | | | | Blocks contribution from Facebook |
<add>| **[autocomplete-css]** | | | | | Same as above |
<add>| **[autocomplete-html]** | | | | | Same as above |
<add>| **[autocomplete-plus]** | | | | | Same as above |
<add>| **[autocomplete-snippets]** | | | | | Same as above |
<add>| **[github]** | | | | | Independent project |
<add>
<add>### Consolidation Process
<add>
<add>To consolidate a single core package repository back into `atom/atom`, the following steps will be taken:
<add>
<add>1. All open pull requests on the package's repository must either be closed or merged before consolidation can proceed
<add>1. The package repository's code in `master` will be copied over to a subfolder in Atom's `packages` folder with a subfolder bearing that package's name.
<add>1. A test CI build will be run to ensure that the package loads and works correctly at first glance
<add>1. The package's original repository will have all of its existing issues moved over to `atom/atom` using a bulk issue mover tool
<add>1. The package's original repository will have its README.md to point contributors to the code's new home in `atom/atom`
<add>1. The package's original repository will now be archived
<add>
<add>### Alternative Approaches
<add>
<add>We haven't yet identified another approach which allows us to achieve the goals set forth in this RFC without consolidating these packages into `atom/atom`.
<add>
<add>## Unresolved questions
<add>
<add>- What are the criteria we might use to eventually decide to move larger packages like `tree-view`, `settings-view`, and `find-and-replace` back into `atom/atom`?
<add>
<add>- Is there a good reason to not move the `language-*` packages into `atom/atom`?
<add>
<add>- Will we be losing any useful data about these packages if we don't have standalone repositories anymore?
<add>
<add>[about]: https://github.com/atom/about
<add>[archive-view]: https://github.com/atom/archive-view
<add>[atom-dark-syntax]: https://github.com/atom/atom-dark-syntax
<add>[atom-dark-ui]: https://github.com/atom/atom-dark-ui
<add>[atom-light-syntax]: https://github.com/atom/atom-light-syntax
<add>[atom-light-ui]: https://github.com/atom/atom-light-ui
<add>[autocomplete-atom-api]: https://github.com/atom/autocomplete-atom-api
<add>[autocomplete-css]: https://github.com/atom/autocomplete-css
<add>[autocomplete-html]: https://github.com/atom/autocomplete-html
<add>[autocomplete-plus]: https://github.com/atom/autocomplete-plus
<add>[autocomplete-snippets]: https://github.com/atom/autocomplete-snippets
<add>[autoflow]: https://github.com/atom/autoflow
<add>[autosave]: https://github.com/atom/autosave
<add>[background-tips]: https://github.com/atom/background-tips
<add>[base16-tomorrow-dark-theme]: https://github.com/atom/base16-tomorrow-dark-theme
<add>[base16-tomorrow-light-theme]: https://github.com/atom/base16-tomorrow-light-theme
<add>[bookmarks]: https://github.com/atom/bookmarks
<add>[bracket-matcher]: https://github.com/atom/bracket-matcher
<add>[command-palette]: https://github.com/atom/command-palette
<add>[dalek]: https://github.com/atom/dalek
<add>[deprecation-cop]: https://github.com/atom/deprecation-cop
<add>[dev-live-reload]: https://github.com/atom/dev-live-reload
<add>[encoding-selector]: https://github.com/atom/encoding-selector
<add>[exception-reporting]: https://github.com/atom/exception-reporting
<add>[find-and-replace]: https://github.com/atom/find-and-replace
<add>[fuzzy-finder]: https://github.com/atom/fuzzy-finder
<add>[git-diff]: https://github.com/atom/git-diff
<add>[github]: https://github.com/atom/github
<add>[go-to-line]: https://github.com/atom/go-to-line
<add>[grammar-selector]: https://github.com/atom/grammar-selector
<add>[image-view]: https://github.com/atom/image-view
<add>[incompatible-packages]: https://github.com/atom/incompatible-packages
<add>[keybinding-resolver]: https://github.com/atom/keybinding-resolver
<add>[language-c]: https://github.com/atom/language-c
<add>[language-clojure]: https://github.com/atom/language-clojure
<add>[language-coffee-script]: https://github.com/atom/language-coffee-script
<add>[language-csharp]: https://github.com/atom/language-csharp
<add>[language-css]: https://github.com/atom/language-css
<add>[language-gfm]: https://github.com/atom/language-gfm
<add>[language-git]: https://github.com/atom/language-git
<add>[language-go]: https://github.com/atom/language-go
<add>[language-html]: https://github.com/atom/language-html
<add>[language-hyperlink]: https://github.com/atom/language-hyperlink
<add>[language-java]: https://github.com/atom/language-java
<add>[language-javascript]: https://github.com/atom/language-javascript
<add>[language-json]: https://github.com/atom/language-json
<add>[language-less]: https://github.com/atom/language-less
<add>[language-make]: https://github.com/atom/language-make
<add>[language-mustache]: https://github.com/atom/language-mustache
<add>[language-objective-c]: https://github.com/atom/language-objective-c
<add>[language-perl]: https://github.com/atom/language-perl
<add>[language-php]: https://github.com/atom/language-php
<add>[language-property-list]: https://github.com/atom/language-property-list
<add>[language-python]: https://github.com/atom/language-python
<add>[language-ruby]: https://github.com/atom/language-ruby
<add>[language-ruby-on-rails]: https://github.com/atom/language-ruby-on-rails
<add>[language-sass]: https://github.com/atom/language-sass
<add>[language-shellscript]: https://github.com/atom/language-shellscript
<add>[language-source]: https://github.com/atom/language-source
<add>[language-sql]: https://github.com/atom/language-sql
<add>[language-text]: https://github.com/atom/language-text
<add>[language-todo]: https://github.com/atom/language-todo
<add>[language-toml]: https://github.com/atom/language-toml
<add>[language-typescript]: https://github.com/atom/language-typescript
<add>[language-xml]: https://github.com/atom/language-xml
<add>[language-yaml]: https://github.com/atom/language-yaml
<add>[line-ending-selector]: https://github.com/atom/line-ending-selector
<add>[link]: https://github.com/atom/link
<add>[markdown-preview]: https://github.com/atom/markdown-preview
<add>[metrics]: https://github.com/atom/metrics
<add>[notifications]: https://github.com/atom/notifications
<add>[one-dark-syntax]: https://github.com/atom/one-dark-syntax
<add>[one-dark-ui]: https://github.com/atom/one-dark-ui
<add>[one-light-syntax]: https://github.com/atom/one-light-syntax
<add>[one-light-ui]: https://github.com/atom/one-light-ui
<add>[open-on-github]: https://github.com/atom/open-on-github
<add>[package-generator]: https://github.com/atom/package-generator
<add>[settings-view]: https://github.com/atom/settings-view
<add>[snippets]: https://github.com/atom/snippets
<add>[solarized-dark-syntax]: https://github.com/atom/solarized-dark-syntax
<add>[solarized-light-syntax]: https://github.com/atom/solarized-light-syntax
<add>[spell-check]: https://github.com/atom/spell-check
<add>[status-bar]: https://github.com/atom/status-bar
<add>[styleguide]: https://github.com/atom/styleguide
<add>[symbols-view]: https://github.com/atom/symbols-view
<add>[tabs]: https://github.com/atom/tabs
<add>[timecop]: https://github.com/atom/timecop
<add>[tree-view]: https://github.com/atom/tree-view
<add>[update-package-dependencies]: https://github.com/atom/update-package-dependencies
<add>[welcome]: https://github.com/atom/welcome
<add>[whitespace]: https://github.com/atom/whitespace
<add>[wrap-guide]: https://github.com/atom/wrap-guide | 1 |
Text | Text | fix typo in actionpack changelog [ci skip] | 4824971f36a92ca152d618aac5a4e8a5dd54422d | <ide><path>actionpack/CHANGELOG.md
<ide> *Edouard Chin*
<ide>
<ide> * Allow `ActionController::Parameters` instances as an argument to URL
<del> helper methods. An `ArguemntError` will be raised if the passed parameters
<add> helper methods. An `ArgumentError` will be raised if the passed parameters
<ide> are not secure.
<ide>
<ide> Fixes #22832 | 1 |
Javascript | Javascript | convert var to let/const | 2decfe97dc74b580f2b6e4b0af38c43708773cc6 | <ide><path>packages/events/EventPluginHub.js
<ide> import type {AnyNativeEvent} from './PluginModuleType';
<ide> * Internal queue of events that have accumulated their dispatches and are
<ide> * waiting to have their dispatches executed.
<ide> */
<del>var eventQueue: ?(Array<ReactSyntheticEvent> | ReactSyntheticEvent) = null;
<add>let eventQueue: ?(Array<ReactSyntheticEvent> | ReactSyntheticEvent) = null;
<ide>
<ide> /**
<ide> * Dispatches an event and releases it back into the pool, unless persistent.
<ide> var eventQueue: ?(Array<ReactSyntheticEvent> | ReactSyntheticEvent) = null;
<ide> * @param {boolean} simulated If the event is simulated (changes exn behavior)
<ide> * @private
<ide> */
<del>var executeDispatchesAndRelease = function(
<add>const executeDispatchesAndRelease = function(
<ide> event: ReactSyntheticEvent,
<ide> simulated: boolean,
<ide> ) {
<ide> var executeDispatchesAndRelease = function(
<ide> }
<ide> }
<ide> };
<del>var executeDispatchesAndReleaseSimulated = function(e) {
<add>const executeDispatchesAndReleaseSimulated = function(e) {
<ide> return executeDispatchesAndRelease(e, true);
<ide> };
<del>var executeDispatchesAndReleaseTopLevel = function(e) {
<add>const executeDispatchesAndReleaseTopLevel = function(e) {
<ide> return executeDispatchesAndRelease(e, false);
<ide> };
<ide>
<ide> export const injection = {
<ide> * @return {?function} The stored callback.
<ide> */
<ide> export function getListener(inst: Fiber, registrationName: string) {
<del> var listener;
<add> let listener;
<ide>
<ide> // TODO: shouldPreventMouseEvent is DOM-specific and definitely should not
<ide> // live here; needs to be moved to a better place soon
<ide> export function extractEvents(
<ide> nativeEvent: AnyNativeEvent,
<ide> nativeEventTarget: EventTarget,
<ide> ) {
<del> var events;
<del> for (var i = 0; i < plugins.length; i++) {
<add> let events;
<add> for (let i = 0; i < plugins.length; i++) {
<ide> // Not every plugin in the ordering may be loaded at runtime.
<del> var possiblePlugin: PluginModule<AnyNativeEvent> = plugins[i];
<add> const possiblePlugin: PluginModule<AnyNativeEvent> = plugins[i];
<ide> if (possiblePlugin) {
<del> var extractedEvents = possiblePlugin.extractEvents(
<add> const extractedEvents = possiblePlugin.extractEvents(
<ide> topLevelType,
<ide> targetInst,
<ide> nativeEvent,
<ide> export function enqueueEvents(
<ide> export function processEventQueue(simulated: boolean) {
<ide> // Set `eventQueue` to null before processing it so that we can tell if more
<ide> // events get enqueued while processing.
<del> var processingEventQueue = eventQueue;
<add> const processingEventQueue = eventQueue;
<ide> eventQueue = null;
<ide>
<ide> if (!processingEventQueue) {
<ide><path>packages/events/EventPluginRegistry.js
<ide> import lowPriorityWarning from 'shared/lowPriorityWarning';
<ide> type NamesToPlugins = {[key: PluginName]: PluginModule<AnyNativeEvent>};
<ide> type EventPluginOrder = null | Array<PluginName>;
<ide>
<del>var shouldWarnOnInjection = false;
<add>let shouldWarnOnInjection = false;
<ide>
<ide> /**
<ide> * Injectable ordering of event plugins.
<ide> */
<del>var eventPluginOrder: EventPluginOrder = null;
<add>let eventPluginOrder: EventPluginOrder = null;
<ide>
<ide> /**
<ide> * Injectable mapping from names to event plugin modules.
<ide> */
<del>var namesToPlugins: NamesToPlugins = {};
<add>const namesToPlugins: NamesToPlugins = {};
<ide>
<ide> export function enableWarningOnInjection() {
<ide> shouldWarnOnInjection = true;
<ide> function recomputePluginOrdering(): void {
<ide> // Wait until an `eventPluginOrder` is injected.
<ide> return;
<ide> }
<del> for (var pluginName in namesToPlugins) {
<del> var pluginModule = namesToPlugins[pluginName];
<del> var pluginIndex = eventPluginOrder.indexOf(pluginName);
<add> for (const pluginName in namesToPlugins) {
<add> const pluginModule = namesToPlugins[pluginName];
<add> const pluginIndex = eventPluginOrder.indexOf(pluginName);
<ide> invariant(
<ide> pluginIndex > -1,
<ide> 'EventPluginRegistry: Cannot inject event plugins that do not exist in ' +
<ide> function recomputePluginOrdering(): void {
<ide> pluginName,
<ide> );
<ide> plugins[pluginIndex] = pluginModule;
<del> var publishedEvents = pluginModule.eventTypes;
<del> for (var eventName in publishedEvents) {
<add> const publishedEvents = pluginModule.eventTypes;
<add> for (const eventName in publishedEvents) {
<ide> invariant(
<ide> publishEventForPlugin(
<ide> publishedEvents[eventName],
<ide> function publishEventForPlugin(
<ide> );
<ide> eventNameDispatchConfigs[eventName] = dispatchConfig;
<ide>
<del> var phasedRegistrationNames = dispatchConfig.phasedRegistrationNames;
<add> const phasedRegistrationNames = dispatchConfig.phasedRegistrationNames;
<ide> if (phasedRegistrationNames) {
<del> for (var phaseName in phasedRegistrationNames) {
<add> for (const phaseName in phasedRegistrationNames) {
<ide> if (phasedRegistrationNames.hasOwnProperty(phaseName)) {
<del> var phasedRegistrationName = phasedRegistrationNames[phaseName];
<add> const phasedRegistrationName = phasedRegistrationNames[phaseName];
<ide> publishRegistrationName(
<ide> phasedRegistrationName,
<ide> pluginModule,
<ide> function publishRegistrationName(
<ide> pluginModule.eventTypes[eventName].dependencies;
<ide>
<ide> if (__DEV__) {
<del> var lowerCasedName = registrationName.toLowerCase();
<add> const lowerCasedName = registrationName.toLowerCase();
<ide> possibleRegistrationNames[lowerCasedName] = registrationName;
<ide>
<ide> if (registrationName === 'onDoubleClick') {
<ide> export function injectEventPluginsByName(
<ide> }
<ide> }
<ide>
<del> var isOrderingDirty = false;
<del> for (var pluginName in injectedNamesToPlugins) {
<add> let isOrderingDirty = false;
<add> for (const pluginName in injectedNamesToPlugins) {
<ide> if (!injectedNamesToPlugins.hasOwnProperty(pluginName)) {
<ide> continue;
<ide> }
<del> var pluginModule = injectedNamesToPlugins[pluginName];
<add> const pluginModule = injectedNamesToPlugins[pluginName];
<ide> if (
<ide> !namesToPlugins.hasOwnProperty(pluginName) ||
<ide> namesToPlugins[pluginName] !== pluginModule
<ide><path>packages/events/EventPluginUtils.js
<ide> export function isStartish(topLevelType) {
<ide> return topLevelType === 'topMouseDown' || topLevelType === 'topTouchStart';
<ide> }
<ide>
<del>var validateEventDispatches;
<add>let validateEventDispatches;
<ide> if (__DEV__) {
<ide> validateEventDispatches = function(event) {
<del> var dispatchListeners = event._dispatchListeners;
<del> var dispatchInstances = event._dispatchInstances;
<add> const dispatchListeners = event._dispatchListeners;
<add> const dispatchInstances = event._dispatchInstances;
<ide>
<del> var listenersIsArr = Array.isArray(dispatchListeners);
<del> var listenersLen = listenersIsArr
<add> const listenersIsArr = Array.isArray(dispatchListeners);
<add> const listenersLen = listenersIsArr
<ide> ? dispatchListeners.length
<ide> : dispatchListeners ? 1 : 0;
<ide>
<del> var instancesIsArr = Array.isArray(dispatchInstances);
<del> var instancesLen = instancesIsArr
<add> const instancesIsArr = Array.isArray(dispatchInstances);
<add> const instancesLen = instancesIsArr
<ide> ? dispatchInstances.length
<ide> : dispatchInstances ? 1 : 0;
<ide>
<ide> if (__DEV__) {
<ide> * @param {*} inst Internal component instance
<ide> */
<ide> function executeDispatch(event, simulated, listener, inst) {
<del> var type = event.type || 'unknown-event';
<add> const type = event.type || 'unknown-event';
<ide> event.currentTarget = getNodeFromInstance(inst);
<ide> ReactErrorUtils.invokeGuardedCallbackAndCatchFirstError(
<ide> type,
<ide> function executeDispatch(event, simulated, listener, inst) {
<ide> * Standard/simple iteration through an event's collected dispatches.
<ide> */
<ide> export function executeDispatchesInOrder(event, simulated) {
<del> var dispatchListeners = event._dispatchListeners;
<del> var dispatchInstances = event._dispatchInstances;
<add> const dispatchListeners = event._dispatchListeners;
<add> const dispatchInstances = event._dispatchInstances;
<ide> if (__DEV__) {
<ide> validateEventDispatches(event);
<ide> }
<ide> if (Array.isArray(dispatchListeners)) {
<del> for (var i = 0; i < dispatchListeners.length; i++) {
<add> for (let i = 0; i < dispatchListeners.length; i++) {
<ide> if (event.isPropagationStopped()) {
<ide> break;
<ide> }
<ide> export function executeDispatchesInOrder(event, simulated) {
<ide> * true, or null if no listener returned true.
<ide> */
<ide> function executeDispatchesInOrderStopAtTrueImpl(event) {
<del> var dispatchListeners = event._dispatchListeners;
<del> var dispatchInstances = event._dispatchInstances;
<add> const dispatchListeners = event._dispatchListeners;
<add> const dispatchInstances = event._dispatchInstances;
<ide> if (__DEV__) {
<ide> validateEventDispatches(event);
<ide> }
<ide> if (Array.isArray(dispatchListeners)) {
<del> for (var i = 0; i < dispatchListeners.length; i++) {
<add> for (let i = 0; i < dispatchListeners.length; i++) {
<ide> if (event.isPropagationStopped()) {
<ide> break;
<ide> }
<ide> function executeDispatchesInOrderStopAtTrueImpl(event) {
<ide> * @see executeDispatchesInOrderStopAtTrueImpl
<ide> */
<ide> export function executeDispatchesInOrderStopAtTrue(event) {
<del> var ret = executeDispatchesInOrderStopAtTrueImpl(event);
<add> const ret = executeDispatchesInOrderStopAtTrueImpl(event);
<ide> event._dispatchInstances = null;
<ide> event._dispatchListeners = null;
<ide> return ret;
<ide> export function executeDirectDispatch(event) {
<ide> if (__DEV__) {
<ide> validateEventDispatches(event);
<ide> }
<del> var dispatchListener = event._dispatchListeners;
<del> var dispatchInstance = event._dispatchInstances;
<add> const dispatchListener = event._dispatchListeners;
<add> const dispatchInstance = event._dispatchInstances;
<ide> invariant(
<ide> !Array.isArray(dispatchListener),
<ide> 'executeDirectDispatch(...): Invalid `event`.',
<ide> );
<ide> event.currentTarget = dispatchListener
<ide> ? getNodeFromInstance(dispatchInstance)
<ide> : null;
<del> var res = dispatchListener ? dispatchListener(event) : null;
<add> const res = dispatchListener ? dispatchListener(event) : null;
<ide> event.currentTarget = null;
<ide> event._dispatchListeners = null;
<ide> event._dispatchInstances = null;
<ide><path>packages/events/EventPropagators.js
<ide> type PropagationPhases = 'bubbled' | 'captured';
<ide> * "phases" of propagation. This finds listeners by a given phase.
<ide> */
<ide> function listenerAtPhase(inst, event, propagationPhase: PropagationPhases) {
<del> var registrationName =
<add> const registrationName =
<ide> event.dispatchConfig.phasedRegistrationNames[propagationPhase];
<ide> return getListener(inst, registrationName);
<ide> }
<ide> function accumulateDirectionalDispatches(inst, phase, event) {
<ide> if (__DEV__) {
<ide> warning(inst, 'Dispatching inst must not be null');
<ide> }
<del> var listener = listenerAtPhase(inst, event, phase);
<add> const listener = listenerAtPhase(inst, event, phase);
<ide> if (listener) {
<ide> event._dispatchListeners = accumulateInto(
<ide> event._dispatchListeners,
<ide> function accumulateTwoPhaseDispatchesSingle(event) {
<ide> */
<ide> function accumulateTwoPhaseDispatchesSingleSkipTarget(event) {
<ide> if (event && event.dispatchConfig.phasedRegistrationNames) {
<del> var targetInst = event._targetInst;
<del> var parentInst = targetInst ? getParentInstance(targetInst) : null;
<add> const targetInst = event._targetInst;
<add> const parentInst = targetInst ? getParentInstance(targetInst) : null;
<ide> traverseTwoPhase(parentInst, accumulateDirectionalDispatches, event);
<ide> }
<ide> }
<ide> function accumulateTwoPhaseDispatchesSingleSkipTarget(event) {
<ide> */
<ide> function accumulateDispatches(inst, ignoredDirection, event) {
<ide> if (inst && event && event.dispatchConfig.registrationName) {
<del> var registrationName = event.dispatchConfig.registrationName;
<del> var listener = getListener(inst, registrationName);
<add> const registrationName = event.dispatchConfig.registrationName;
<add> const listener = getListener(inst, registrationName);
<ide> if (listener) {
<ide> event._dispatchListeners = accumulateInto(
<ide> event._dispatchListeners,
<ide><path>packages/events/ReactControlledComponent.js
<ide> import {
<ide>
<ide> // Use to restore controlled state after a change event has fired.
<ide>
<del>var fiberHostComponent = null;
<add>let fiberHostComponent = null;
<ide>
<del>var ReactControlledComponentInjection = {
<add>const ReactControlledComponentInjection = {
<ide> injectFiberControlledHostComponent: function(hostComponentImpl) {
<ide> // The fiber implementation doesn't use dynamic dispatch so we need to
<ide> // inject the implementation.
<ide> fiberHostComponent = hostComponentImpl;
<ide> },
<ide> };
<ide>
<del>var restoreTarget = null;
<del>var restoreQueue = null;
<add>let restoreTarget = null;
<add>let restoreQueue = null;
<ide>
<ide> function restoreStateOfTarget(target) {
<ide> // We perform this translation at the end of the event loop so that we
<ide> // always receive the correct fiber here
<del> var internalInstance = getInstanceFromNode(target);
<add> const internalInstance = getInstanceFromNode(target);
<ide> if (!internalInstance) {
<ide> // Unmounted
<ide> return;
<ide> export function restoreStateIfNeeded() {
<ide> if (!restoreTarget) {
<ide> return;
<ide> }
<del> var target = restoreTarget;
<del> var queuedTargets = restoreQueue;
<add> const target = restoreTarget;
<add> const queuedTargets = restoreQueue;
<ide> restoreTarget = null;
<ide> restoreQueue = null;
<ide>
<ide> restoreStateOfTarget(target);
<ide> if (queuedTargets) {
<del> for (var i = 0; i < queuedTargets.length; i++) {
<add> for (let i = 0; i < queuedTargets.length; i++) {
<ide> restoreStateOfTarget(queuedTargets[i]);
<ide> }
<ide> }
<ide><path>packages/events/ReactEventEmitterMixin.js
<ide> export function handleTopLevel(
<ide> nativeEvent,
<ide> nativeEventTarget,
<ide> ) {
<del> var events = extractEvents(
<add> const events = extractEvents(
<ide> topLevelType,
<ide> targetInst,
<ide> nativeEvent,
<ide><path>packages/events/ReactGenericBatching.js
<ide> import {restoreStateIfNeeded} from './ReactControlledComponent';
<ide> // scheduled work and instead do synchronous work.
<ide>
<ide> // Defaults
<del>var fiberBatchedUpdates = function(fn, bookkeeping) {
<add>let fiberBatchedUpdates = function(fn, bookkeeping) {
<ide> return fn(bookkeeping);
<ide> };
<ide>
<del>var isNestingBatched = false;
<add>let isNestingBatched = false;
<ide> export function batchedUpdates(fn, bookkeeping) {
<ide> if (isNestingBatched) {
<ide> // If we are currently inside another batch, we need to wait until it
<ide> export function batchedUpdates(fn, bookkeeping) {
<ide> }
<ide> }
<ide>
<del>var ReactGenericBatchingInjection = {
<add>const ReactGenericBatchingInjection = {
<ide> injectFiberBatchedUpdates: function(_batchedUpdates) {
<ide> fiberBatchedUpdates = _batchedUpdates;
<ide> },
<ide><path>packages/events/ResponderEventPlugin.js
<ide> import accumulate from './accumulate';
<ide> * Instance of element that should respond to touch/move types of interactions,
<ide> * as indicated explicitly by relevant callbacks.
<ide> */
<del>var responderInst = null;
<add>let responderInst = null;
<ide>
<ide> /**
<ide> * Count of current touches. A textInput should become responder iff the
<ide> * selection changes while there is a touch on the screen.
<ide> */
<del>var trackedTouchCount = 0;
<add>let trackedTouchCount = 0;
<ide>
<ide> /**
<ide> * Last reported number of active touches.
<ide> */
<del>var previousActiveTouches = 0;
<add>let previousActiveTouches = 0;
<ide>
<del>var changeResponder = function(nextResponderInst, blockHostResponder) {
<del> var oldResponderInst = responderInst;
<add>const changeResponder = function(nextResponderInst, blockHostResponder) {
<add> const oldResponderInst = responderInst;
<ide> responderInst = nextResponderInst;
<ide> if (ResponderEventPlugin.GlobalResponderHandler !== null) {
<ide> ResponderEventPlugin.GlobalResponderHandler.onChange(
<ide> var changeResponder = function(nextResponderInst, blockHostResponder) {
<ide> }
<ide> };
<ide>
<del>var eventTypes = {
<add>const eventTypes = {
<ide> /**
<ide> * On a `touchStart`/`mouseDown`, is it desired that this element become the
<ide> * responder?
<ide> function setResponderAndExtractTransfer(
<ide> nativeEvent,
<ide> nativeEventTarget,
<ide> ) {
<del> var shouldSetEventType = isStartish(topLevelType)
<add> const shouldSetEventType = isStartish(topLevelType)
<ide> ? eventTypes.startShouldSetResponder
<ide> : isMoveish(topLevelType)
<ide> ? eventTypes.moveShouldSetResponder
<ide> function setResponderAndExtractTransfer(
<ide> : eventTypes.scrollShouldSetResponder;
<ide>
<ide> // TODO: stop one short of the current responder.
<del> var bubbleShouldSetFrom = !responderInst
<add> const bubbleShouldSetFrom = !responderInst
<ide> ? targetInst
<ide> : getLowestCommonAncestor(responderInst, targetInst);
<ide>
<ide> // When capturing/bubbling the "shouldSet" event, we want to skip the target
<ide> // (deepest ID) if it happens to be the current responder. The reasoning:
<ide> // It's strange to get an `onMoveShouldSetResponder` when you're *already*
<ide> // the responder.
<del> var skipOverBubbleShouldSetFrom = bubbleShouldSetFrom === responderInst;
<del> var shouldSetEvent = ResponderSyntheticEvent.getPooled(
<add> const skipOverBubbleShouldSetFrom = bubbleShouldSetFrom === responderInst;
<add> const shouldSetEvent = ResponderSyntheticEvent.getPooled(
<ide> shouldSetEventType,
<ide> bubbleShouldSetFrom,
<ide> nativeEvent,
<ide> function setResponderAndExtractTransfer(
<ide> } else {
<ide> accumulateTwoPhaseDispatches(shouldSetEvent);
<ide> }
<del> var wantsResponderInst = executeDispatchesInOrderStopAtTrue(shouldSetEvent);
<add> const wantsResponderInst = executeDispatchesInOrderStopAtTrue(shouldSetEvent);
<ide> if (!shouldSetEvent.isPersistent()) {
<ide> shouldSetEvent.constructor.release(shouldSetEvent);
<ide> }
<ide>
<ide> if (!wantsResponderInst || wantsResponderInst === responderInst) {
<ide> return null;
<ide> }
<del> var extracted;
<del> var grantEvent = ResponderSyntheticEvent.getPooled(
<add> let extracted;
<add> const grantEvent = ResponderSyntheticEvent.getPooled(
<ide> eventTypes.responderGrant,
<ide> wantsResponderInst,
<ide> nativeEvent,
<ide> function setResponderAndExtractTransfer(
<ide> grantEvent.touchHistory = ResponderTouchHistoryStore.touchHistory;
<ide>
<ide> accumulateDirectDispatches(grantEvent);
<del> var blockHostResponder = executeDirectDispatch(grantEvent) === true;
<add> const blockHostResponder = executeDirectDispatch(grantEvent) === true;
<ide> if (responderInst) {
<del> var terminationRequestEvent = ResponderSyntheticEvent.getPooled(
<add> const terminationRequestEvent = ResponderSyntheticEvent.getPooled(
<ide> eventTypes.responderTerminationRequest,
<ide> responderInst,
<ide> nativeEvent,
<ide> function setResponderAndExtractTransfer(
<ide> terminationRequestEvent.touchHistory =
<ide> ResponderTouchHistoryStore.touchHistory;
<ide> accumulateDirectDispatches(terminationRequestEvent);
<del> var shouldSwitch =
<add> const shouldSwitch =
<ide> !hasDispatches(terminationRequestEvent) ||
<ide> executeDirectDispatch(terminationRequestEvent);
<ide> if (!terminationRequestEvent.isPersistent()) {
<ide> terminationRequestEvent.constructor.release(terminationRequestEvent);
<ide> }
<ide>
<ide> if (shouldSwitch) {
<del> var terminateEvent = ResponderSyntheticEvent.getPooled(
<add> const terminateEvent = ResponderSyntheticEvent.getPooled(
<ide> eventTypes.responderTerminate,
<ide> responderInst,
<ide> nativeEvent,
<ide> function setResponderAndExtractTransfer(
<ide> extracted = accumulate(extracted, [grantEvent, terminateEvent]);
<ide> changeResponder(wantsResponderInst, blockHostResponder);
<ide> } else {
<del> var rejectEvent = ResponderSyntheticEvent.getPooled(
<add> const rejectEvent = ResponderSyntheticEvent.getPooled(
<ide> eventTypes.responderReject,
<ide> wantsResponderInst,
<ide> nativeEvent,
<ide> function canTriggerTransfer(topLevelType, topLevelInst, nativeEvent) {
<ide> * @return {boolean} Whether or not this touch end event ends the responder.
<ide> */
<ide> function noResponderTouches(nativeEvent) {
<del> var touches = nativeEvent.touches;
<add> const touches = nativeEvent.touches;
<ide> if (!touches || touches.length === 0) {
<ide> return true;
<ide> }
<del> for (var i = 0; i < touches.length; i++) {
<del> var activeTouch = touches[i];
<del> var target = activeTouch.target;
<add> for (let i = 0; i < touches.length; i++) {
<add> const activeTouch = touches[i];
<add> const target = activeTouch.target;
<ide> if (target !== null && target !== undefined && target !== 0) {
<ide> // Is the original touch location inside of the current responder?
<del> var targetInst = getInstanceFromNode(target);
<add> const targetInst = getInstanceFromNode(target);
<ide> if (isAncestor(responderInst, targetInst)) {
<ide> return false;
<ide> }
<ide> function noResponderTouches(nativeEvent) {
<ide> return true;
<ide> }
<ide>
<del>var ResponderEventPlugin = {
<add>const ResponderEventPlugin = {
<ide> /* For unit testing only */
<ide> _getResponder: function() {
<ide> return responderInst;
<ide> var ResponderEventPlugin = {
<ide>
<ide> ResponderTouchHistoryStore.recordTouchTrack(topLevelType, nativeEvent);
<ide>
<del> var extracted = canTriggerTransfer(topLevelType, targetInst, nativeEvent)
<add> let extracted = canTriggerTransfer(topLevelType, targetInst, nativeEvent)
<ide> ? setResponderAndExtractTransfer(
<ide> topLevelType,
<ide> targetInst,
<ide> var ResponderEventPlugin = {
<ide> // These multiple individual change touch events are are always bookended
<ide> // by `onResponderGrant`, and one of
<ide> // (`onResponderRelease/onResponderTerminate`).
<del> var isResponderTouchStart = responderInst && isStartish(topLevelType);
<del> var isResponderTouchMove = responderInst && isMoveish(topLevelType);
<del> var isResponderTouchEnd = responderInst && isEndish(topLevelType);
<del> var incrementalTouch = isResponderTouchStart
<add> const isResponderTouchStart = responderInst && isStartish(topLevelType);
<add> const isResponderTouchMove = responderInst && isMoveish(topLevelType);
<add> const isResponderTouchEnd = responderInst && isEndish(topLevelType);
<add> const incrementalTouch = isResponderTouchStart
<ide> ? eventTypes.responderStart
<ide> : isResponderTouchMove
<ide> ? eventTypes.responderMove
<ide> : isResponderTouchEnd ? eventTypes.responderEnd : null;
<ide>
<ide> if (incrementalTouch) {
<del> var gesture = ResponderSyntheticEvent.getPooled(
<add> const gesture = ResponderSyntheticEvent.getPooled(
<ide> incrementalTouch,
<ide> responderInst,
<ide> nativeEvent,
<ide> var ResponderEventPlugin = {
<ide> extracted = accumulate(extracted, gesture);
<ide> }
<ide>
<del> var isResponderTerminate =
<add> const isResponderTerminate =
<ide> responderInst && topLevelType === 'topTouchCancel';
<del> var isResponderRelease =
<add> const isResponderRelease =
<ide> responderInst &&
<ide> !isResponderTerminate &&
<ide> isEndish(topLevelType) &&
<ide> noResponderTouches(nativeEvent);
<del> var finalTouch = isResponderTerminate
<add> const finalTouch = isResponderTerminate
<ide> ? eventTypes.responderTerminate
<ide> : isResponderRelease ? eventTypes.responderRelease : null;
<ide> if (finalTouch) {
<del> var finalEvent = ResponderSyntheticEvent.getPooled(
<add> const finalEvent = ResponderSyntheticEvent.getPooled(
<ide> finalTouch,
<ide> responderInst,
<ide> nativeEvent,
<ide> var ResponderEventPlugin = {
<ide> changeResponder(null);
<ide> }
<ide>
<del> var numberActiveTouches =
<add> const numberActiveTouches =
<ide> ResponderTouchHistoryStore.touchHistory.numberActiveTouches;
<ide> if (
<ide> ResponderEventPlugin.GlobalInteractionHandler &&
<ide><path>packages/events/ResponderSyntheticEvent.js
<ide> import SyntheticEvent from './SyntheticEvent';
<ide> * interface will ensure that it is cleaned up when pooled/destroyed. The
<ide> * `ResponderEventPlugin` will populate it appropriately.
<ide> */
<del>var ResponderEventInterface = {
<add>const ResponderEventInterface = {
<ide> touchHistory: function(nativeEvent) {
<ide> return null; // Actually doesn't even look at the native event.
<ide> },
<ide><path>packages/events/SyntheticEvent.js
<ide> import emptyFunction from 'fbjs/lib/emptyFunction';
<ide> import invariant from 'fbjs/lib/invariant';
<ide> import warning from 'fbjs/lib/warning';
<ide>
<del>var didWarnForAddedNewProperty = false;
<del>var isProxySupported = typeof Proxy === 'function';
<del>var EVENT_POOL_SIZE = 10;
<add>let didWarnForAddedNewProperty = false;
<add>const isProxySupported = typeof Proxy === 'function';
<add>const EVENT_POOL_SIZE = 10;
<ide>
<del>var shouldBeReleasedProperties = [
<add>const shouldBeReleasedProperties = [
<ide> 'dispatchConfig',
<ide> '_targetInst',
<ide> 'nativeEvent',
<ide> var shouldBeReleasedProperties = [
<ide> * @interface Event
<ide> * @see http://www.w3.org/TR/DOM-Level-3-Events/
<ide> */
<del>var EventInterface = {
<add>const EventInterface = {
<ide> type: null,
<ide> target: null,
<ide> // currentTarget is set when dispatching; no use in copying it here
<ide> function SyntheticEvent(
<ide> this._targetInst = targetInst;
<ide> this.nativeEvent = nativeEvent;
<ide>
<del> var Interface = this.constructor.Interface;
<del> for (var propName in Interface) {
<add> const Interface = this.constructor.Interface;
<add> for (const propName in Interface) {
<ide> if (!Interface.hasOwnProperty(propName)) {
<ide> continue;
<ide> }
<ide> if (__DEV__) {
<ide> delete this[propName]; // this has a getter/setter for warnings
<ide> }
<del> var normalize = Interface[propName];
<add> const normalize = Interface[propName];
<ide> if (normalize) {
<ide> this[propName] = normalize(nativeEvent);
<ide> } else {
<ide> function SyntheticEvent(
<ide> }
<ide> }
<ide>
<del> var defaultPrevented =
<add> const defaultPrevented =
<ide> nativeEvent.defaultPrevented != null
<ide> ? nativeEvent.defaultPrevented
<ide> : nativeEvent.returnValue === false;
<ide> function SyntheticEvent(
<ide> Object.assign(SyntheticEvent.prototype, {
<ide> preventDefault: function() {
<ide> this.defaultPrevented = true;
<del> var event = this.nativeEvent;
<add> const event = this.nativeEvent;
<ide> if (!event) {
<ide> return;
<ide> }
<ide> Object.assign(SyntheticEvent.prototype, {
<ide> },
<ide>
<ide> stopPropagation: function() {
<del> var event = this.nativeEvent;
<add> const event = this.nativeEvent;
<ide> if (!event) {
<ide> return;
<ide> }
<ide> Object.assign(SyntheticEvent.prototype, {
<ide> * `PooledClass` looks for `destructor` on each instance it releases.
<ide> */
<ide> destructor: function() {
<del> var Interface = this.constructor.Interface;
<del> for (var propName in Interface) {
<add> const Interface = this.constructor.Interface;
<add> for (const propName in Interface) {
<ide> if (__DEV__) {
<ide> Object.defineProperty(
<ide> this,
<ide> Object.assign(SyntheticEvent.prototype, {
<ide> this[propName] = null;
<ide> }
<ide> }
<del> for (var i = 0; i < shouldBeReleasedProperties.length; i++) {
<add> for (let i = 0; i < shouldBeReleasedProperties.length; i++) {
<ide> this[shouldBeReleasedProperties[i]] = null;
<ide> }
<ide> if (__DEV__) {
<ide> SyntheticEvent.Interface = EventInterface;
<ide> * @param {?object} Interface
<ide> */
<ide> SyntheticEvent.augmentClass = function(Class, Interface) {
<del> var Super = this;
<add> const Super = this;
<ide>
<del> var E = function() {};
<add> const E = function() {};
<ide> E.prototype = Super.prototype;
<del> var prototype = new E();
<add> const prototype = new E();
<ide>
<ide> Object.assign(prototype, Class.prototype);
<ide> Class.prototype = prototype;
<ide> addEventPoolingTo(SyntheticEvent);
<ide> * @return {object} defineProperty object
<ide> */
<ide> function getPooledWarningPropertyDefinition(propName, getVal) {
<del> var isFunction = typeof getVal === 'function';
<add> const isFunction = typeof getVal === 'function';
<ide> return {
<ide> configurable: true,
<ide> set: set,
<ide> get: get,
<ide> };
<ide>
<ide> function set(val) {
<del> var action = isFunction ? 'setting the method' : 'setting the property';
<add> const action = isFunction ? 'setting the method' : 'setting the property';
<ide> warn(action, 'This is effectively a no-op');
<ide> return val;
<ide> }
<ide>
<ide> function get() {
<del> var action = isFunction ? 'accessing the method' : 'accessing the property';
<del> var result = isFunction
<add> const action = isFunction
<add> ? 'accessing the method'
<add> : 'accessing the property';
<add> const result = isFunction
<ide> ? 'This is a no-op function'
<ide> : 'This is set to null';
<ide> warn(action, result);
<ide> return getVal;
<ide> }
<ide>
<ide> function warn(action, result) {
<del> var warningCondition = false;
<add> const warningCondition = false;
<ide> warning(
<ide> warningCondition,
<ide> "This synthetic event is reused for performance reasons. If you're seeing this, " +
<ide> function getPooledEvent(dispatchConfig, targetInst, nativeEvent, nativeInst) {
<ide> }
<ide>
<ide> function releasePooledEvent(event) {
<del> var EventConstructor = this;
<add> const EventConstructor = this;
<ide> invariant(
<ide> event instanceof EventConstructor,
<ide> 'Trying to release an event instance into a pool of a different type.',
<ide><path>packages/events/TouchHistoryMath.js
<ide> * LICENSE file in the root directory of this source tree.
<ide> */
<ide>
<del>var TouchHistoryMath = {
<add>const TouchHistoryMath = {
<ide> /**
<ide> * This code is optimized and not intended to look beautiful. This allows
<ide> * computing of touch centroids that have moved after `touchesChangedAfter`
<ide> var TouchHistoryMath = {
<ide> isXAxis,
<ide> ofCurrent,
<ide> ) {
<del> var touchBank = touchHistory.touchBank;
<del> var total = 0;
<del> var count = 0;
<add> const touchBank = touchHistory.touchBank;
<add> let total = 0;
<add> let count = 0;
<ide>
<del> var oneTouchData =
<add> const oneTouchData =
<ide> touchHistory.numberActiveTouches === 1
<ide> ? touchHistory.touchBank[touchHistory.indexOfSingleActiveTouch]
<ide> : null;
<ide> var TouchHistoryMath = {
<ide> count = 1;
<ide> }
<ide> } else {
<del> for (var i = 0; i < touchBank.length; i++) {
<del> var touchTrack = touchBank[i];
<add> for (let i = 0; i < touchBank.length; i++) {
<add> const touchTrack = touchBank[i];
<ide> if (
<ide> touchTrack !== null &&
<ide> touchTrack !== undefined &&
<ide> touchTrack.touchActive &&
<ide> touchTrack.currentTimeStamp >= touchesChangedAfter
<ide> ) {
<del> var toAdd; // Yuck, program temporarily in invalid state.
<add> let toAdd; // Yuck, program temporarily in invalid state.
<ide> if (ofCurrent && isXAxis) {
<ide> toAdd = touchTrack.currentPageX;
<ide> } else if (ofCurrent && !isXAxis) {
<ide><path>packages/events/__tests__/EventPluginRegistry-test.internal.js
<ide> 'use strict';
<ide>
<ide> describe('EventPluginRegistry', () => {
<del> var EventPluginRegistry;
<del> var createPlugin;
<add> let EventPluginRegistry;
<add> let createPlugin;
<ide>
<ide> beforeEach(() => {
<ide> jest.resetModuleRegistry();
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should be able to inject ordering before plugins', () => {
<del> var OnePlugin = createPlugin();
<del> var TwoPlugin = createPlugin();
<del> var ThreePlugin = createPlugin();
<add> const OnePlugin = createPlugin();
<add> const TwoPlugin = createPlugin();
<add> const ThreePlugin = createPlugin();
<ide>
<ide> EventPluginRegistry.injectEventPluginOrder(['one', 'two', 'three']);
<ide> EventPluginRegistry.injectEventPluginsByName({
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should be able to inject plugins before and after ordering', () => {
<del> var OnePlugin = createPlugin();
<del> var TwoPlugin = createPlugin();
<del> var ThreePlugin = createPlugin();
<add> const OnePlugin = createPlugin();
<add> const TwoPlugin = createPlugin();
<add> const ThreePlugin = createPlugin();
<ide>
<ide> EventPluginRegistry.injectEventPluginsByName({
<ide> one: OnePlugin,
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should be able to inject repeated plugins and out-of-order', () => {
<del> var OnePlugin = createPlugin();
<del> var TwoPlugin = createPlugin();
<del> var ThreePlugin = createPlugin();
<add> const OnePlugin = createPlugin();
<add> const TwoPlugin = createPlugin();
<add> const ThreePlugin = createPlugin();
<ide>
<ide> EventPluginRegistry.injectEventPluginsByName({
<ide> one: OnePlugin,
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should throw if plugin does not implement `extractEvents`', () => {
<del> var BadPlugin = {};
<add> const BadPlugin = {};
<ide>
<ide> EventPluginRegistry.injectEventPluginOrder(['bad']);
<ide>
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should throw if plugin does not exist in ordering', () => {
<del> var OnePlugin = createPlugin();
<del> var RandomPlugin = createPlugin();
<add> const OnePlugin = createPlugin();
<add> const RandomPlugin = createPlugin();
<ide>
<ide> EventPluginRegistry.injectEventPluginOrder(['one']);
<ide>
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should throw if ordering is injected more than once', () => {
<del> var pluginOrdering = [];
<add> const pluginOrdering = [];
<ide>
<ide> EventPluginRegistry.injectEventPluginOrder(pluginOrdering);
<ide>
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should throw if different plugins injected using same name', () => {
<del> var OnePlugin = createPlugin();
<del> var TwoPlugin = createPlugin();
<add> const OnePlugin = createPlugin();
<add> const TwoPlugin = createPlugin();
<ide>
<ide> EventPluginRegistry.injectEventPluginsByName({same: OnePlugin});
<ide>
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should publish registration names of injected plugins', () => {
<del> var OnePlugin = createPlugin({
<add> const OnePlugin = createPlugin({
<ide> eventTypes: {
<ide> click: {registrationName: 'onClick'},
<ide> focus: {registrationName: 'onFocus'},
<ide> },
<ide> });
<del> var TwoPlugin = createPlugin({
<add> const TwoPlugin = createPlugin({
<ide> eventTypes: {
<ide> magic: {
<ide> phasedRegistrationNames: {
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should throw if multiple registration names collide', () => {
<del> var OnePlugin = createPlugin({
<add> const OnePlugin = createPlugin({
<ide> eventTypes: {
<ide> photoCapture: {registrationName: 'onPhotoCapture'},
<ide> },
<ide> });
<del> var TwoPlugin = createPlugin({
<add> const TwoPlugin = createPlugin({
<ide> eventTypes: {
<ide> photo: {
<ide> phasedRegistrationNames: {
<ide> describe('EventPluginRegistry', () => {
<ide> });
<ide>
<ide> it('should throw if an invalid event is published', () => {
<del> var OnePlugin = createPlugin({
<add> const OnePlugin = createPlugin({
<ide> eventTypes: {
<ide> badEvent: {
<ide> /* missing configuration */
<ide><path>packages/events/__tests__/ResponderEventPlugin-test.internal.js
<ide>
<ide> 'use strict';
<ide>
<del>var {HostComponent} = require('shared/ReactTypeOfWork');
<add>const {HostComponent} = require('shared/ReactTypeOfWork');
<ide>
<del>var EventPluginHub;
<del>var ResponderEventPlugin;
<add>let EventPluginHub;
<add>let ResponderEventPlugin;
<ide>
<del>var touch = function(nodeHandle, i) {
<add>const touch = function(nodeHandle, i) {
<ide> return {target: nodeHandle, identifier: i};
<ide> };
<ide>
<ide> var touch = function(nodeHandle, i) {
<ide> * @return {TouchEvent} Model of a touch event that is compliant with responder
<ide> * system plugin.
<ide> */
<del>var touchEvent = function(nodeHandle, touches, changedTouches) {
<add>const touchEvent = function(nodeHandle, touches, changedTouches) {
<ide> return {
<ide> target: nodeHandle,
<ide> changedTouches: changedTouches,
<ide> touches: touches,
<ide> };
<ide> };
<ide>
<del>var subsequence = function(arr, indices) {
<del> var ret = [];
<del> for (var i = 0; i < indices.length; i++) {
<del> var index = indices[i];
<add>const subsequence = function(arr, indices) {
<add> const ret = [];
<add> for (let i = 0; i < indices.length; i++) {
<add> const index = indices[i];
<ide> ret.push(arr[index]);
<ide> }
<ide> return ret;
<ide> };
<ide>
<del>var antiSubsequence = function(arr, indices) {
<del> var ret = [];
<del> for (var i = 0; i < arr.length; i++) {
<add>const antiSubsequence = function(arr, indices) {
<add> const ret = [];
<add> for (let i = 0; i < arr.length; i++) {
<ide> if (indices.indexOf(i) === -1) {
<ide> ret.push(arr[i]);
<ide> }
<ide> var antiSubsequence = function(arr, indices) {
<ide> * Helper for creating touch test config data.
<ide> * @param allTouchHandles
<ide> */
<del>var _touchConfig = function(
<add>const _touchConfig = function(
<ide> topType,
<ide> targetNodeHandle,
<ide> allTouchHandles,
<ide> changedIndices,
<ide> eventTarget,
<ide> ) {
<del> var allTouchObjects = allTouchHandles.map(touch);
<del> var changedTouchObjects = subsequence(allTouchObjects, changedIndices);
<del> var activeTouchObjects = topType === 'topTouchStart'
<add> const allTouchObjects = allTouchHandles.map(touch);
<add> const changedTouchObjects = subsequence(allTouchObjects, changedIndices);
<add> const activeTouchObjects = topType === 'topTouchStart'
<ide> ? allTouchObjects
<ide> : topType === 'topTouchMove'
<ide> ? allTouchObjects
<ide> var _touchConfig = function(
<ide> * @return {object} Config data used by test cases for extracting responder
<ide> * events.
<ide> */
<del>var startConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<add>const startConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<ide> return _touchConfig(
<ide> 'topTouchStart',
<ide> nodeHandle,
<ide> var startConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<ide> /**
<ide> * @see `startConfig`
<ide> */
<del>var moveConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<add>const moveConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<ide> return _touchConfig(
<ide> 'topTouchMove',
<ide> nodeHandle,
<ide> var moveConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<ide> /**
<ide> * @see `startConfig`
<ide> */
<del>var endConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<add>const endConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<ide> return _touchConfig(
<ide> 'topTouchEnd',
<ide> nodeHandle,
<ide> var endConfig = function(nodeHandle, allTouchHandles, changedIndices) {
<ide> * ever invoked).
<ide> *
<ide> */
<del>var NA = -1;
<del>var oneEventLoopTestConfig = function(readableIDToID) {
<del> var ret = {
<add>const NA = -1;
<add>const oneEventLoopTestConfig = function(readableIDToID) {
<add> const ret = {
<ide> // Negotiation
<ide> scrollShouldSetResponder: {bubbled: {}, captured: {}},
<ide> startShouldSetResponder: {bubbled: {}, captured: {}},
<ide> var oneEventLoopTestConfig = function(readableIDToID) {
<ide> responderEnd: {},
<ide> responderRelease: {},
<ide> };
<del> for (var eventName in ret) {
<del> for (var readableNodeName in readableIDToID) {
<add> for (const eventName in ret) {
<add> for (const readableNodeName in readableIDToID) {
<ide> if (ret[eventName].bubbled) {
<ide> // Two phase
<ide> ret[eventName].bubbled[readableNodeName] = {
<ide> var oneEventLoopTestConfig = function(readableIDToID) {
<ide> * @param {object} eventTestConfig
<ide> * @param {object} readableIDToID
<ide> */
<del>var registerTestHandlers = function(eventTestConfig, readableIDToID) {
<del> var runs = {dispatchCount: 0};
<del> var neverFire = function(readableID, registrationName) {
<add>const registerTestHandlers = function(eventTestConfig, readableIDToID) {
<add> const runs = {dispatchCount: 0};
<add> const neverFire = function(readableID, registrationName) {
<ide> runs.dispatchCount++;
<ide> expect('').toBe(
<ide> 'Event type: ' +
<ide> var registerTestHandlers = function(eventTestConfig, readableIDToID) {
<ide> );
<ide> };
<ide>
<del> var registerOneEventType = function(registrationName, eventTypeTestConfig) {
<del> for (var readableID in eventTypeTestConfig) {
<del> var nodeConfig = eventTypeTestConfig[readableID];
<del> var id = readableIDToID[readableID];
<del> var handler = nodeConfig.order === NA
<add> const registerOneEventType = function(registrationName, eventTypeTestConfig) {
<add> for (const readableID in eventTypeTestConfig) {
<add> const nodeConfig = eventTypeTestConfig[readableID];
<add> const id = readableIDToID[readableID];
<add> const handler = nodeConfig.order === NA
<ide> ? neverFire.bind(null, readableID, registrationName)
<ide> : // We partially apply readableID and nodeConfig, as they change in the
<ide> // parent closure across iterations.
<ide> var registerTestHandlers = function(eventTestConfig, readableIDToID) {
<ide> putListener(getInstanceFromNode(id), registrationName, handler);
<ide> }
<ide> };
<del> for (var eventName in eventTestConfig) {
<del> var oneEventTypeTestConfig = eventTestConfig[eventName];
<del> var hasTwoPhase = !!oneEventTypeTestConfig.bubbled;
<add> for (const eventName in eventTestConfig) {
<add> const oneEventTypeTestConfig = eventTestConfig[eventName];
<add> const hasTwoPhase = !!oneEventTypeTestConfig.bubbled;
<ide> if (hasTwoPhase) {
<ide> registerOneEventType(
<ide> ResponderEventPlugin.eventTypes[eventName].phasedRegistrationNames
<ide> var registerTestHandlers = function(eventTestConfig, readableIDToID) {
<ide> return runs;
<ide> };
<ide>
<del>var run = function(config, hierarchyConfig, nativeEventConfig) {
<del> var max = NA;
<del> var searchForMax = function(nodeConfig) {
<del> for (var readableID in nodeConfig) {
<del> var order = nodeConfig[readableID].order;
<add>const run = function(config, hierarchyConfig, nativeEventConfig) {
<add> let max = NA;
<add> const searchForMax = function(nodeConfig) {
<add> for (const readableID in nodeConfig) {
<add> const order = nodeConfig[readableID].order;
<ide> max = order > max ? order : max;
<ide> }
<ide> };
<del> for (var eventName in config) {
<del> var eventConfig = config[eventName];
<add> for (const eventName in config) {
<add> const eventConfig = config[eventName];
<ide> if (eventConfig.bubbled) {
<ide> searchForMax(eventConfig.bubbled);
<ide> searchForMax(eventConfig.captured);
<ide> var run = function(config, hierarchyConfig, nativeEventConfig) {
<ide> }
<ide>
<ide> // Register the handlers
<del> var runData = registerTestHandlers(config, hierarchyConfig);
<add> const runData = registerTestHandlers(config, hierarchyConfig);
<ide>
<ide> // Trigger the event
<del> var extractedEvents = ResponderEventPlugin.extractEvents(
<add> const extractedEvents = ResponderEventPlugin.extractEvents(
<ide> nativeEventConfig.topLevelType,
<ide> nativeEventConfig.targetInst,
<ide> nativeEventConfig.nativeEvent,
<ide> var run = function(config, hierarchyConfig, nativeEventConfig) {
<ide> ); // +1 for extra ++
<ide> };
<ide>
<del>var GRANDPARENT_HOST_NODE = {};
<del>var PARENT_HOST_NODE = {};
<del>var CHILD_HOST_NODE = {};
<del>var CHILD_HOST_NODE2 = {};
<add>const GRANDPARENT_HOST_NODE = {};
<add>const PARENT_HOST_NODE = {};
<add>const CHILD_HOST_NODE = {};
<add>const CHILD_HOST_NODE2 = {};
<ide>
<ide> // These intentionally look like Fibers. ReactTreeTraversal depends on their field names.
<ide> // TODO: we could test this with regular DOM nodes (and real fibers) instead.
<del>var GRANDPARENT_INST = {
<add>const GRANDPARENT_INST = {
<ide> return: null,
<ide> tag: HostComponent,
<ide> stateNode: GRANDPARENT_HOST_NODE,
<ide> memoizedProps: {},
<ide> };
<del>var PARENT_INST = {
<add>const PARENT_INST = {
<ide> return: GRANDPARENT_INST,
<ide> tag: HostComponent,
<ide> stateNode: PARENT_HOST_NODE,
<ide> memoizedProps: {},
<ide> };
<del>var CHILD_INST = {
<add>const CHILD_INST = {
<ide> return: PARENT_INST,
<ide> tag: HostComponent,
<ide> stateNode: CHILD_HOST_NODE,
<ide> memoizedProps: {},
<ide> };
<del>var CHILD_INST2 = {
<add>const CHILD_INST2 = {
<ide> return: PARENT_INST,
<ide> tag: HostComponent,
<ide> stateNode: CHILD_HOST_NODE2,
<ide> PARENT_HOST_NODE.testInstance = PARENT_INST;
<ide> CHILD_HOST_NODE.testInstance = CHILD_INST;
<ide> CHILD_HOST_NODE2.testInstance = CHILD_INST2;
<ide>
<del>var three = {
<add>const three = {
<ide> grandParent: GRANDPARENT_HOST_NODE,
<ide> parent: PARENT_HOST_NODE,
<ide> child: CHILD_HOST_NODE,
<ide> };
<ide>
<del>var siblings = {
<add>const siblings = {
<ide> parent: PARENT_HOST_NODE,
<ide> childOne: CHILD_HOST_NODE,
<ide> childTwo: CHILD_HOST_NODE2,
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should do nothing when no one wants to respond', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> */
<ide>
<ide> it('should grant responder grandParent while capturing', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: true,
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder parent while capturing', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder child while capturing', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder child while bubbling', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder parent while bubbling', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder grandParent while bubbling', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> */
<ide>
<ide> it('should grant responder grandParent while capturing move', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide>
<ide> config.startShouldSetResponder.captured.grandParent = {order: 0};
<ide> config.startShouldSetResponder.captured.parent = {order: 1};
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder parent while capturing move', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide>
<ide> config.startShouldSetResponder.captured.grandParent = {order: 0};
<ide> config.startShouldSetResponder.captured.parent = {order: 1};
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder child while capturing move', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide>
<ide> config.startShouldSetResponder.captured.grandParent = {order: 0};
<ide> config.startShouldSetResponder.captured.parent = {order: 1};
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder child while bubbling move', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide>
<ide> config.startShouldSetResponder.captured.grandParent = {order: 0};
<ide> config.startShouldSetResponder.captured.parent = {order: 1};
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder parent while bubbling move', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide>
<ide> config.startShouldSetResponder.captured.grandParent = {order: 0};
<ide> config.startShouldSetResponder.captured.parent = {order: 1};
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should grant responder grandParent while bubbling move', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide>
<ide> config.startShouldSetResponder.captured.grandParent = {order: 0};
<ide> config.startShouldSetResponder.captured.parent = {order: 1};
<ide> describe('ResponderEventPlugin', () => {
<ide> */
<ide>
<ide> it('should bubble negotiation to first common ancestor of responder', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> });
<ide>
<ide> it('should bubble negotiation to first common ancestor of responder then transfer', () => {
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> */
<ide> it('should negotiate with deepest target on second touch if nothing is responder', () => {
<ide> // Initially nothing wants to become the responder
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> */
<ide> it('should negotiate until first common ancestor when there are siblings', () => {
<ide> // Initially nothing wants to become the responder
<del> var config = oneEventLoopTestConfig(siblings);
<add> let config = oneEventLoopTestConfig(siblings);
<ide> config.startShouldSetResponder.captured.parent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> };
<ide> config.responderStart.childOne = {order: 2};
<ide>
<del> var touchConfig = startConfig(
<add> const touchConfig = startConfig(
<ide> siblings.childTwo,
<ide> [siblings.childOne, siblings.childTwo],
<ide> [1],
<ide> describe('ResponderEventPlugin', () => {
<ide>
<ide> it('should notify of being rejected. responderStart/Move happens on current responder', () => {
<ide> // Initially nothing wants to become the responder
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> // The start/move should occur on the original responder if new one is rejected
<ide> config.responderMove.child = {order: 6};
<ide>
<del> var touchConfig = moveConfig(three.child, [three.child], [0]);
<add> let touchConfig = moveConfig(three.child, [three.child], [0]);
<ide> run(config, three, touchConfig);
<ide> expect(ResponderEventPlugin._getResponder()).toBe(
<ide> getInstanceFromNode(three.child),
<ide> describe('ResponderEventPlugin', () => {
<ide>
<ide> it('should negotiate scroll', () => {
<ide> // Initially nothing wants to become the responder
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide>
<ide> it('should cancel correctly', () => {
<ide> // Initially our child becomes responder
<del> var config = oneEventLoopTestConfig(three);
<add> let config = oneEventLoopTestConfig(three);
<ide> config.startShouldSetResponder.captured.grandParent = {
<ide> order: 0,
<ide> returnVal: false,
<ide> describe('ResponderEventPlugin', () => {
<ide> config.responderEnd.child = {order: 0};
<ide> config.responderTerminate.child = {order: 1};
<ide>
<del> var nativeEvent = _touchConfig(
<add> const nativeEvent = _touchConfig(
<ide> 'topTouchCancel',
<ide> three.child,
<ide> [three.child],
<ide> describe('ResponderEventPlugin', () => {
<ide> it('should determine the first common ancestor correctly', () => {
<ide> // This test was moved here from the ReactTreeTraversal test since only the
<ide> // ResponderEventPlugin uses `getLowestCommonAncestor`
<del> var React = require('react');
<del> var ReactTestUtils = require('react-dom/test-utils');
<del> var ReactTreeTraversal = require('shared/ReactTreeTraversal');
<del> var ReactDOMComponentTree = require('../../react-dom/src/client/ReactDOMComponentTree');
<add> const React = require('react');
<add> const ReactTestUtils = require('react-dom/test-utils');
<add> const ReactTreeTraversal = require('shared/ReactTreeTraversal');
<add> const ReactDOMComponentTree = require('../../react-dom/src/client/ReactDOMComponentTree');
<ide>
<ide> class ChildComponent extends React.Component {
<ide> render() {
<ide> describe('ResponderEventPlugin', () => {
<ide> }
<ide> }
<ide>
<del> var parent = ReactTestUtils.renderIntoDocument(<ParentComponent />);
<add> const parent = ReactTestUtils.renderIntoDocument(<ParentComponent />);
<ide>
<del> var ancestors = [
<add> const ancestors = [
<ide> // Common ancestor with self is self.
<ide> {
<ide> one: parent.refs.P_P1_C1.refs.DIV_1,
<ide> describe('ResponderEventPlugin', () => {
<ide> com: parent.refs.P,
<ide> },
<ide> ];
<del> var i;
<add> let i;
<ide> for (i = 0; i < ancestors.length; i++) {
<del> var plan = ancestors[i];
<del> var firstCommon = ReactTreeTraversal.getLowestCommonAncestor(
<add> const plan = ancestors[i];
<add> const firstCommon = ReactTreeTraversal.getLowestCommonAncestor(
<ide> ReactDOMComponentTree.getInstanceFromNode(plan.one),
<ide> ReactDOMComponentTree.getInstanceFromNode(plan.two),
<ide> );
<ide><path>packages/events/__tests__/accumulateInto-test.internal.js
<ide>
<ide> 'use strict';
<ide>
<del>var accumulateInto;
<add>let accumulateInto;
<ide>
<ide> describe('accumulateInto', () => {
<ide> beforeEach(() => {
<ide> describe('accumulateInto', () => {
<ide> });
<ide>
<ide> it('returns the second item if first is null', () => {
<del> var a = [];
<add> const a = [];
<ide> expect(accumulateInto(null, a)).toBe(a);
<ide> });
<ide>
<ide> it('merges the second into the first if first item is an array', () => {
<del> var a = [1, 2];
<del> var b = [3, 4];
<add> const a = [1, 2];
<add> const b = [3, 4];
<ide> accumulateInto(a, b);
<ide> expect(a).toEqual([1, 2, 3, 4]);
<ide> expect(b).toEqual([3, 4]);
<del> var c = [1];
<add> const c = [1];
<ide> accumulateInto(c, 2);
<ide> expect(c).toEqual([1, 2]);
<ide> });
<ide>
<ide> it('returns a new array if first or both items are scalar', () => {
<del> var a = [2];
<add> const a = [2];
<ide> expect(accumulateInto(1, a)).toEqual([1, 2]);
<ide> expect(a).toEqual([2]);
<ide> expect(accumulateInto(1, 2)).toEqual([1, 2]); | 14 |
Text | Text | remove info for deprecated database switching | dfb4c1114baa2a8b196b33a1cddb303bb49c7a83 | <ide><path>guides/source/active_record_multiple_databases.md
<ide> The "role" in the `connected_to` call looks up the connections that are connecte
<ide> connection handler (or role). The `reading` connection handler will hold all the connections
<ide> that were connected via `connects_to` with the role name of `reading`.
<ide>
<del>There also may be a case where you have a database that you don't always want to connect to
<del>on application boot but may need for a slow query or analytics. After defining that database
<del>in the `database.yml` you can connect by passing a database argument to `connected_to`
<del>
<del>```ruby
<del>ActiveRecord::Base.connected_to(database: { reading_slow: :animals_slow_replica }) do
<del> # do something while connected to the slow replica
<del>end
<del>```
<del>
<del>The `database` argument for `connected_to` will take a symbol or a config hash.
<del>
<ide> Note that `connected_to` with a role will look up an existing connection and switch
<ide> using the connection specification name. This means that if you pass an unknown role
<ide> like `connected_to(role: :nonexistent)` you will get an error that says | 1 |
Javascript | Javascript | allow deeper trees | fdaefc1103f1d1d8c7fb7c2d428a20990383ff91 | <ide><path>src/renderers/shared/reconciler/ReactInstanceHandles.js
<ide> var SEPARATOR_LENGTH = SEPARATOR.length;
<ide> /**
<ide> * Maximum depth of traversals before we consider the possibility of a bad ID.
<ide> */
<del>var MAX_TREE_DEPTH = 100;
<add>var MAX_TREE_DEPTH = 10000;
<ide>
<ide> /**
<ide> * Creates a DOM ID prefix to use when mounting React components. | 1 |
Text | Text | add uv_threadpool_size link definition | 0ff0c70e44581ebff6327a9f95e3d49a6ec1609d | <ide><path>doc/api/zlib.md
<ide> Decompress a chunk of data with [Unzip][].
<ide> [Inflate]: #zlib_class_zlib_inflate
<ide> [Memory Usage Tuning]: #zlib_memory_usage_tuning
<ide> [Unzip]: #zlib_class_zlib_unzip
<add>[`UV_THREADPOOL_SIZE`]: cli.html#cli_uv_threadpool_size_size
<ide> [options]: #zlib_class_options
<ide> [zlib documentation]: http://zlib.net/manual.html#Constants | 1 |
Javascript | Javascript | relax chunk count expectations | cc26957cc30d89619d3b9be85f5301111e17615a | <ide><path>test/parallel/test-fs-read-stream-concurrent-reads.js
<ide> const fs = require('fs');
<ide> const filename = fixtures.path('loop.js'); // Some small non-homogeneous file.
<ide> const content = fs.readFileSync(filename);
<ide>
<del>const N = 1000;
<add>const N = 2000;
<ide> let started = 0;
<ide> let done = 0;
<ide>
<ide> function startRead() {
<ide> .on('data', (chunk) => {
<ide> chunks.push(chunk);
<ide> arrayBuffers.add(chunk.buffer);
<del> if (started < N)
<del> startRead();
<ide> })
<ide> .on('end', common.mustCall(() => {
<add> if (started < N)
<add> startRead();
<ide> assert.deepStrictEqual(Buffer.concat(chunks), content);
<ide> if (++done === N) {
<ide> const retainedMemory =
<ide> function startRead() {
<ide>
<ide> // Don’t start the reads all at once – that way we would have to allocate
<ide> // a large amount of memory upfront.
<del>for (let i = 0; i < 4; ++i)
<add>for (let i = 0; i < 6; ++i)
<ide> startRead(); | 1 |
Javascript | Javascript | remove unnecessary xss check introduced by | c7488c7dd5ea697facc96202417cd1c4363a4ee7 | <ide><path>lib/helpers/isURLSameOrigin.js
<ide> 'use strict';
<ide>
<ide> var utils = require('./../utils');
<del>var isValidXss = require('./isValidXss');
<ide>
<ide> module.exports = (
<ide> utils.isStandardBrowserEnv() ?
<ide> module.exports = (
<ide> function resolveURL(url) {
<ide> var href = url;
<ide>
<del> if (isValidXss(url)) {
<del> throw new Error('URL contains XSS injection attempt');
<del> }
<del>
<ide> if (msie) {
<ide> // IE needs attribute set twice to normalize properties
<ide> urlParsingNode.setAttribute('href', href);
<ide><path>lib/helpers/isValidXss.js
<del>'use strict';
<del>
<del>module.exports = function isValidXss(requestURL) {
<del> var xssRegex = /(\b)(on\w+)=|javascript|(<\s*)(\/*)script/gi;
<del> return xssRegex.test(requestURL);
<del>};
<del>
<ide><path>test/specs/helpers/isURLSameOrigin.spec.js
<ide> describe('helpers::isURLSameOrigin', function () {
<ide> it('should detect different origin', function () {
<ide> expect(isURLSameOrigin('https://github.com/axios/axios')).toEqual(false);
<ide> });
<del>
<del> it('should detect XSS scripts on a same origin request', function () {
<del> expect(function() {
<del> isURLSameOrigin('https://github.com/axios/axios?<script>alert("hello")</script>');
<del> }).toThrowError(Error, 'URL contains XSS injection attempt')
<del> });
<ide> });
<ide><path>test/specs/helpers/isValidXss.spec.js
<del>var isValidXss = require('../../../lib/helpers/isValidXss');
<del>
<del>describe('helpers::isValidXss', function () {
<del> it('should detect script tags', function () {
<del> expect(isValidXss("<script/xss>alert('blah')</script/xss>")).toBe(true);
<del> expect(isValidXss("<SCRIPT>alert('getting your password')</SCRIPT>")).toBe(true);
<del> expect(isValidXss("<script src='http://xssinjections.com/inject.js'>xss</script>")).toBe(true);
<del> expect(isValidXss("<img src='/' onerror='javascript:alert('xss')'>xss</script>")).toBe(true);
<del> expect(isValidXss("<script>console.log('XSS')</script>")).toBe(true);
<del> expect(isValidXss("onerror=alert('XSS')")).toBe(true);
<del> expect(isValidXss("<a onclick='alert('XSS')'>Click Me</a>")).toBe(true);
<del> });
<del>
<del> it('should not detect non script tags', function() {
<del> expect(isValidXss("/one/?foo=bar")).toBe(false);
<del> expect(isValidXss("<safe> tags")).toBe(false);
<del> expect(isValidXss("<safetag>")).toBe(false);
<del> expect(isValidXss(">>> safe <<<")).toBe(false);
<del> expect(isValidXss("<<< safe >>>")).toBe(false);
<del> expect(isValidXss("my script rules")).toBe(false);
<del> expect(isValidXss("<a notonlistener='nomatch'>")).toBe(false);
<del> expect(isValidXss("<h2>MyTitle</h2>")).toBe(false);
<del> expect(isValidXss("<img src='#'/>")).toBe(false);
<del> })
<del>}); | 4 |
Javascript | Javascript | add api to get a view's client rects | 89da7c29a1a46c9fd31b1c68f44ad77310139d20 | <ide><path>packages/ember-views/lib/main.js
<ide> Ember Views
<ide> import Ember from "ember-runtime";
<ide> import jQuery from "ember-views/system/jquery";
<ide> import {
<del> isSimpleClick
<add> isSimpleClick,
<add> getViewClientRects,
<add> getViewBoundingClientRect
<ide> } from "ember-views/system/utils";
<ide> import RenderBuffer from "ember-views/system/render_buffer";
<ide> import "ember-views/system/ext"; // for the side effect of extending Ember.run.queues
<ide> import {
<ide> states
<ide> } from "ember-views/views/states";
<ide>
<del>import CoreView from "ember-views/views/core_view";
<del>import View from "ember-views/views/view";
<add>import CoreView from "ember-views/views/core_view";
<add>import View from "ember-views/views/view";
<ide> import ContainerView from "ember-views/views/container_view";
<ide> import CollectionView from "ember-views/views/collection_view";
<ide> import Component from "ember-views/views/component";
<ide> Ember.RenderBuffer = RenderBuffer;
<ide>
<ide> var ViewUtils = Ember.ViewUtils = {};
<ide> ViewUtils.isSimpleClick = isSimpleClick;
<add>ViewUtils.getViewClientRects = getViewClientRects;
<add>ViewUtils.getViewBoundingClientRect = getViewBoundingClientRect;
<ide>
<ide> Ember.CoreView = CoreView;
<ide> Ember.View = View;
<ide><path>packages/ember-views/lib/system/utils.js
<ide> export function isSimpleClick(event) {
<ide>
<ide> return !modifier && !secondaryClick;
<ide> }
<add>
<add>/**
<add> @private
<add> @method getViewRange
<add> @param {Ember.View} view
<add>*/
<add>function getViewRange(view) {
<add> var range = document.createRange();
<add> range.setStartAfter(view._morph.start);
<add> range.setEndBefore(view._morph.end);
<add> return range;
<add>}
<add>
<add>/**
<add> `getViewClientRects` provides information about the position of the border
<add> box edges of a view relative to the viewport.
<add>
<add> It is only intended to be used by development tools like the Ember Inpsector
<add> and may not work on older browsers.
<add>
<add> @private
<add> @method getViewClientRects
<add> @param {Ember.View} view
<add>*/
<add>export function getViewClientRects(view) {
<add> var range = getViewRange(view);
<add> return range.getClientRects();
<add>}
<add>
<add>/**
<add> `getViewBoundingClientRect` provides information about the position of the
<add> bounding border box edges of a view relative to the viewport.
<add>
<add> It is only intended to be used by development tools like the Ember Inpsector
<add> and may not work on older browsers.
<add>
<add> @private
<add> @method getViewBoundingClientRect
<add> @param {Ember.View} view
<add>*/
<add>export function getViewBoundingClientRect(view) {
<add> var range = getViewRange(view);
<add> return range.getBoundingClientRect();
<add>}
<ide><path>packages/ember-views/tests/system/view_utils_test.js
<add>import run from "ember-metal/run_loop";
<add>import View from "ember-views/views/view";
<add>
<add>var view;
<add>
<add>QUnit.module("ViewUtils", {
<add> teardown: function() {
<add> run(function() {
<add> if (view) { view.destroy(); }
<add> });
<add> }
<add>});
<add>
<add>test("getViewClientRects", function() {
<add> if (!(window.Range && window.Range.prototype.getClientRects)) {
<add> ok(true, "The test environment does not support the DOM API required for getViewClientRects.");
<add> return;
<add> }
<add>
<add> view = View.create({
<add> render: function(buffer) {
<add> buffer.push("Hello, world!");
<add> }
<add> });
<add>
<add> run(function() { view.appendTo('#qunit-fixture'); });
<add>
<add> ok(Ember.ViewUtils.getViewClientRects(view) instanceof window.ClientRectList);
<add>});
<add>
<add>test("getViewBoundingClientRect", function() {
<add> if (!(window.Range && window.Range.prototype.getBoundingClientRect)) {
<add> ok(true, "The test environment does not support the DOM API required for getViewBoundingClientRect.");
<add> return;
<add> }
<add>
<add> view = View.create({
<add> render: function(buffer) {
<add> buffer.push("Hello, world!");
<add> }
<add> });
<add>
<add> run(function() { view.appendTo('#qunit-fixture'); });
<add>
<add> ok(Ember.ViewUtils.getViewBoundingClientRect(view) instanceof window.ClientRect);
<add>}); | 3 |
PHP | PHP | add shell in the option list | 9e3da12b073398db5a30d5920a4abc264fc69169 | <ide><path>src/Shell/Task/TestTask.php
<ide> public function getOptionParser() {
<ide> 'Helper', 'helper',
<ide> 'Component', 'component',
<ide> 'Behavior', 'behavior',
<add> 'Shell', 'shell',
<ide> 'Cell', 'cell',
<ide> ]
<ide> ])->addArgument('name', [ | 1 |
Go | Go | add check for empty list not producing an error | 1a7ffe4fe45642c7482e206e15ae50eeba85e02a | <ide><path>integration/secret/secret_test.go
<ide> func TestSecretList(t *testing.T) {
<ide> defer c.Close()
<ide> ctx := context.Background()
<ide>
<add> configs, err := c.SecretList(ctx, types.SecretListOptions{})
<add> assert.NilError(t, err)
<add> assert.Check(t, is.Equal(len(configs), 0))
<add>
<ide> testName0 := "test0_" + t.Name()
<ide> testName1 := "test1_" + t.Name()
<ide> testNames := []string{testName0, testName1} | 1 |
Javascript | Javascript | remove unreachable execsync() code | dd13d71eb3ee34d84ca9059d40616bd0b44ca0ed | <ide><path>lib/child_process.js
<ide> exports.execFileSync = execFileSync;
<ide>
<ide> function execSync(command /*, options*/) {
<ide> var opts = normalizeExecArgs.apply(null, arguments);
<del> var inheritStderr = opts.options ? !opts.options.stdio : true;
<add> var inheritStderr = !opts.options.stdio;
<ide>
<ide> var ret = spawnSync(opts.file, opts.options);
<ide> ret.cmd = command; | 1 |
Ruby | Ruby | add missing information about attributes method | ae8070e21be405331a1ad97f59c4e43d45f10eb3 | <ide><path>activemodel/lib/active_model/attribute_methods.rb
<ide> class MissingAttributeError < NoMethodError
<ide> # A minimal implementation could be:
<ide> #
<ide> # class Person
<del> #
<ide> # include ActiveModel::AttributeMethods
<ide> #
<ide> # attribute_method_affix :prefix => 'reset_', :suffix => '_to_default!'
<ide> class MissingAttributeError < NoMethodError
<ide> # def reset_attribute_to_default!(attr)
<ide> # send("#{attr}=", "Default Name")
<ide> # end
<del> #
<ide> # end
<del> #
<add> #
<add> # Please notice that whenever you include ActiveModel::AtributeMethods in your class,
<add> # it requires you to implement a <tt>attributes</tt> methods which returns a hash with
<add> # each attribute name in your model as hash key and the attribute value as hash value.
<add> # Hash keys must be a string.
<add> #
<ide> module AttributeMethods
<ide> extend ActiveSupport::Concern
<ide> | 1 |
PHP | PHP | specify behavior of configrequest more precisely | f4a423e880f66ed3610fdaf20b31fe42d1cae4c8 | <ide><path>src/TestSuite/IntegrationTestTrait.php
<ide> public function enableRetainFlashMessages(): void
<ide> *
<ide> * You can call this method multiple times to append into
<ide> * the current state.
<add> * Sub-keys like 'headers' will be reset, though.
<ide> *
<ide> * @param array $data The request data to use.
<ide> * @return void | 1 |
Javascript | Javascript | force the latest version of selenium | ed4a1fddce0c7e2f756b07e8be348320ae29de6b | <ide><path>karma-shared.conf.js
<ide> module.exports = function(config, specificOptions) {
<ide> // SauceLabs config for local development.
<ide> sauceLabs: {
<ide> testName: specificOptions.testName || 'AngularJS',
<del> startConnect: true
<add> startConnect: true,
<add> options: {
<add> 'selenium-version': '2.37.0'
<add> }
<ide> },
<ide>
<ide> // BrowserStack config for local development. | 1 |
Ruby | Ruby | move documentation around a bit | fe9773414a29317ac8fa6c0c911bb88eb2819f40 | <ide><path>activerecord/lib/active_record/enum.rb
<ide> module ActiveRecord
<ide> # Declare an enum attribute where the values map to integers in the database, but can be queried by name. Example:
<ide> #
<ide> # class Conversation < ActiveRecord::Base
<del> # enum status: [:active, :archived]
<del> #
<del> # # same but with explicit mapping
<del> # enum status: {active: 0, archived: 1}
<add> # enum status: [ :active, :archived ]
<ide> # end
<ide> #
<ide> # Conversation::STATUS # => { active: 0, archived: 1 }
<ide> module ActiveRecord
<ide> # end
<ide> #
<ide> # Good practice is to let the first declared status be the default.
<add> #
<add> # Finally, it's also possible to explicitly map the relation between attribute and database integer:
<add> #
<add> # class Conversation < ActiveRecord::Base
<add> # enum status: { active: 0, archived: 1 }
<add> # end
<ide> module Enum
<ide> def enum(definitions)
<ide> definitions.each do |name, values| | 1 |
Ruby | Ruby | fix typo in implicit_render | 9cb7a22811a72ee69d236d05bf17bc30de863e50 | <ide><path>actionpack/lib/action_controller/metal/implicit_render.rb
<ide> def default_render(*args)
<ide> "action but none of them were suitable for this request.\n\n" \
<ide> "This usually happens when the client requested an unsupported format " \
<ide> "(e.g. requesting HTML content from a JSON endpoint or vice versa), but " \
<del> "it might also be failing due to other constraints, such as locales or" \
<add> "it might also be failing due to other constraints, such as locales or " \
<ide> "variants.\n"
<ide>
<ide> if request.formats.any? | 1 |
Javascript | Javascript | add "location" param to `open()` | 52606171bfa837071c2c18ceeae9bc316b110475 | <ide><path>spec/workspace-spec.js
<ide> describe('Workspace', () => {
<ide> })
<ide>
<ide> it('constructs the view with the same panes', () => {
<del> const getRightDockActivePane = () => atom.workspace.getRightDock().getActivePane()
<ide> const pane1 = atom.workspace.getRightDock().getActivePane()
<ide> const pane2 = pane1.splitRight({copyActiveItem: true})
<ide> const pane3 = pane2.splitRight({copyActiveItem: true})
<ide> let pane4 = null
<ide>
<ide> waitsForPromise(() =>
<del> atom.workspace.open(null, {pane: getRightDockActivePane()}).then(editor => editor.setText('An untitled editor.'))
<add> atom.workspace.open(null, {location: 'right'}).then(editor => editor.setText('An untitled editor.'))
<ide> )
<ide>
<ide> waitsForPromise(() =>
<del> atom.workspace.open('b', {pane: getRightDockActivePane()}).then(editor => pane2.activateItem(editor.copy()))
<add> atom.workspace.open('b', {location: 'right'}).then(editor => pane2.activateItem(editor.copy()))
<ide> )
<ide>
<ide> waitsForPromise(() =>
<del> atom.workspace.open('../sample.js', {pane: getRightDockActivePane()}).then(editor => pane3.activateItem(editor))
<add> atom.workspace.open('../sample.js', {location: 'right'}).then(editor => pane3.activateItem(editor))
<ide> )
<ide>
<ide> runs(() => {
<ide> describe('Workspace', () => {
<ide> })
<ide>
<ide> waitsForPromise(() =>
<del> atom.workspace.open('../sample.txt', {pane: getRightDockActivePane()}).then(editor => pane4.activateItem(editor))
<add> atom.workspace.open('../sample.txt', {location: 'right'}).then(editor => pane4.activateItem(editor))
<ide> )
<ide>
<ide> runs(() => {
<ide><path>src/workspace.js
<ide> module.exports = class Workspace extends Model {
<ide> // activate an existing item for the given URI on any pane.
<ide> // If `false`, only the active pane will be searched for
<ide> // an existing item for the same URI. Defaults to `false`.
<add> // * `location` (optional) A {String} containing the name of the location
<add> // in which this item should be opened (one of "left", "right", "bottom",
<add> // or "center"). If omitted, Atom will fall back to the last location in
<add> // which a user has placed an item with the same URI or, if this is a new
<add> // URI, the default location specified by the item. NOTE: This option
<add> // should almost always be omitted to honor user preference.
<ide> //
<ide> // Returns a {Promise} that resolves to the {TextEditor} for the file URI.
<ide> open (uri_, options = {}) {
<ide> module.exports = class Workspace extends Model {
<ide> }
<ide>
<ide> async openItem (item, options = {}) {
<del> let {pane, split} = options
<add> let {pane, split, location} = options
<ide>
<ide> if (item == null) return undefined
<ide> if (pane != null && pane.isDestroyed()) return item
<ide> module.exports = class Workspace extends Model {
<ide> paneContainer = this.getPaneContainers().find(container => container.getPanes().includes(pane))
<ide> }
<ide>
<del> // Determine which location to use, unless a split was provided. In that case, make sure it goes
<del> // in the center location (legacy behavior)
<del> let location
<del> if (paneContainer == null && pane == null && split == null && uri != null) {
<del> location = await this.itemLocationStore.load(uri)
<del> }
<del>
<ide> if (paneContainer == null) {
<add> // Determine which location to use, unless a split was provided. In that case, make sure it goes
<add> // in the center location (legacy behavior)
<add> if (location == null && pane == null && split == null && uri != null) {
<add> location = await this.itemLocationStore.load(uri)
<add> }
<ide> if (location == null && typeof item.getDefaultLocation === 'function') {
<ide> location = item.getDefaultLocation()
<ide> } | 2 |
Java | Java | add constructors to messagehandlingexception | 0e7b94f9e906b6944ef17c33eb4e4f0e04d403e6 | <ide><path>spring-messaging/src/main/java/org/springframework/messaging/MessageHandlingException.java
<ide> /*
<del> * Copyright 2002-2013 the original author or authors.
<add> * Copyright 2002-2014 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> @SuppressWarnings("serial")
<ide> public class MessageHandlingException extends MessagingException {
<ide>
<add> public MessageHandlingException(Message<?> failedMessage) {
<add> super(failedMessage);
<add> }
<add>
<ide> public MessageHandlingException(Message<?> message, String description) {
<ide> super(message, description);
<ide> }
<ide>
<add> public MessageHandlingException(Message<?> failedMessage, Throwable cause) {
<add> super(failedMessage, cause);
<add> }
<add>
<ide> public MessageHandlingException(Message<?> message, String description, Throwable cause) {
<ide> super(message, description, cause);
<ide> } | 1 |
Mixed | Javascript | use samevaluezero in deepstrictequal | ea2e6363f221c1c6ca5b04b295bd6d17ecca355b | <ide><path>doc/api/assert.md
<ide> parameter is undefined, a default error message is assigned.
<ide> <!-- YAML
<ide> added: v1.2.0
<ide> changes:
<add> - version: REPLACEME
<add> pr-url: https://github.com/nodejs/node/pull/15036
<add> description: NaN is now compared using the [SameValueZero][] comparison.
<ide> - version: REPLACEME
<ide> pr-url: https://github.com/nodejs/node/pull/15001
<ide> description: Error names and messages are now properly compared
<ide> changes:
<ide>
<ide> Generally identical to `assert.deepEqual()` with three exceptions:
<ide>
<del>1. Primitive values are compared using the [Strict Equality Comparison][]
<del> ( `===` ). Set values and Map keys are compared using the [SameValueZero][]
<del> comparison. (Which means they are free of the [caveats][]).
<add>1. Primitive values besides `NaN` are compared using the [Strict Equality
<add> Comparison][] ( `===` ). Set and Map values, Map keys and `NaN` are compared
<add> using the [SameValueZero][] comparison (which means they are free of the
<add> [caveats][]).
<ide> 2. [`[[Prototype]]`][prototype-spec] of objects are compared using
<ide> the [Strict Equality Comparison][] too.
<ide> 3. [Type tags][Object.prototype.toString()] of objects should be the same.
<ide> assert.deepEqual(date, fakeDate);
<ide> assert.deepStrictEqual(date, fakeDate);
<ide> // AssertionError: 2017-03-11T14:25:31.849Z deepStrictEqual Date {}
<ide> // Different type tags
<add>assert.deepStrictEqual(NaN, NaN);
<add>// OK, because of the SameValueZero comparison
<ide> ```
<ide>
<ide> If the values are not equal, an `AssertionError` is thrown with a `message`
<ide> parameter is undefined, a default error message is assigned.
<ide> <!-- YAML
<ide> added: v1.2.0
<ide> changes:
<add> - version: REPLACEME
<add> pr-url: https://github.com/nodejs/node/pull/15036
<add> description: NaN is now compared using the [SameValueZero][] comparison.
<ide> - version: REPLACEME
<ide> pr-url: https://github.com/nodejs/node/pull/15001
<ide> description: Error names and messages are now properly compared
<ide><path>lib/assert.js
<ide> function isObjectOrArrayTag(tag) {
<ide> // a) The same built-in type tags
<ide> // b) The same prototypes.
<ide> function strictDeepEqual(actual, expected) {
<del> if (actual === null || expected === null ||
<del> typeof actual !== 'object' || typeof expected !== 'object') {
<add> if (typeof actual !== 'object') {
<add> return typeof actual === 'number' && Number.isNaN(actual) &&
<add> Number.isNaN(expected);
<add> }
<add> if (typeof expected !== 'object' || actual === null || expected === null) {
<ide> return false;
<ide> }
<ide> const actualTag = objectToString(actual);
<ide><path>test/parallel/test-assert-deep.js
<ide> assertOnlyDeepEqual(
<ide> assertDeepAndStrictEqual(m3, m4);
<ide> }
<ide>
<add>// Handle sparse arrays
<ide> assertDeepAndStrictEqual([1, , , 3], [1, , , 3]);
<ide> assertOnlyDeepEqual([1, , , 3], [1, , , 3, , , ]);
<ide>
<ide> assertOnlyDeepEqual([1, , , 3], [1, , , 3, , , ]);
<ide> assertOnlyDeepEqual(err1, {}, assert.AssertionError);
<ide> }
<ide>
<add>// Handle NaN
<add>assert.throws(() => { assert.deepEqual(NaN, NaN); }, assert.AssertionError);
<add>assert.doesNotThrow(() => { assert.deepStrictEqual(NaN, NaN); });
<add>assert.doesNotThrow(() => { assert.deepStrictEqual({ a: NaN }, { a: NaN }); });
<add>assert.doesNotThrow(
<add> () => { assert.deepStrictEqual([ 1, 2, NaN, 4 ], [ 1, 2, NaN, 4 ]); });
<add>
<ide> /* eslint-enable */ | 3 |
Text | Text | update the challenge helpers | c23043ffd2ea77cdace1018221ab96c70cfbc87d | <ide><path>docs/how-to-work-on-practice-projects.md
<ide> If you want to create new steps, the following tools simplify that process.
<ide>
<ide> ## create-next-step
<ide>
<del>A one-off script that will automatically add the next step based on the last step in the project. The challenge seed code will use the previous step's challenge seed code with the editable region markers (ERMs) removed.
<add>A one-off script that will automatically add the next step based on the last step in the project. The challenge seed code will use the previous step's challenge seed code.
<ide>
<ide> ### How to run script:
<ide>
<ide> npm run create-next-step
<ide>
<ide> A one-off script that automatically adds a specified number of steps. The challenge seed code for all steps created will be empty.
<ide>
<add>**Note:** This script also runs [update-step-titles](#update-step-titles).
<add>
<ide> ### How to run script:
<ide>
<ide> 1. Change to the directory of the project. | 1 |
Text | Text | fix punctuation and wrapping in buffer.md | 08155554bce6351d399c1a5e88f5007bf3255ac1 | <ide><path>doc/api/buffer.md
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 8`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 8`.
<ide> * Returns: {number}
<ide>
<ide> Reads a 64-bit double from `buf` at the specified `offset` with specified
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 4`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 4`.
<ide> * Returns: {number}
<ide>
<ide> Reads a 32-bit float from `buf` at the specified `offset` with specified
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 1`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 1`.
<ide> * Returns: {integer}
<ide>
<ide> Reads a signed 8-bit integer from `buf` at the specified `offset`.
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 2`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 2`.
<ide> * Returns: {integer}
<ide>
<ide> Reads a signed 16-bit integer from `buf` at the specified `offset` with
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 4`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 4`.
<ide> * Returns: {integer}
<ide>
<ide> Reads a signed 32-bit integer from `buf` at the specified `offset` with
<ide> changes:
<ide> byteLength to uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - byteLength`.
<del>* `byteLength` {integer} Number of bytes to read. Must satisfy: `0 < byteLength <= 6`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - byteLength`.
<add>* `byteLength` {integer} Number of bytes to read. Must satisfy
<add> `0 < byteLength <= 6`.
<ide> * Returns: {integer}
<ide>
<ide> Reads `byteLength` number of bytes from `buf` at the specified `offset`
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 1`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 1`.
<ide> * Returns: {integer}
<ide>
<ide> Reads an unsigned 8-bit integer from `buf` at the specified `offset`.
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 2`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 2`.
<ide> * Returns: {integer}
<ide>
<ide> Reads an unsigned 16-bit integer from `buf` at the specified `offset` with
<ide> changes:
<ide> uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - 4`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - 4`.
<ide> * Returns: {integer}
<ide>
<ide> Reads an unsigned 32-bit integer from `buf` at the specified `offset` with
<ide> changes:
<ide> byteLength to uint32 anymore.
<ide> -->
<ide>
<del>* `offset` {integer} Number of bytes to skip before starting to read. Must satisfy: `0 <= offset <= buf.length - byteLength`.
<del>* `byteLength` {integer} Number of bytes to read. Must satisfy: `0 < byteLength <= 6`.
<add>* `offset` {integer} Number of bytes to skip before starting to read. Must
<add> satisfy `0 <= offset <= buf.length - byteLength`.
<add>* `byteLength` {integer} Number of bytes to read. Must satisfy
<add> `0 < byteLength <= 6`.
<ide> * Returns: {integer}
<ide>
<ide> Reads `byteLength` number of bytes from `buf` at the specified `offset`
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {number} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 8`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 8`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset` with specified endian
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {number} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 4`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 4`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset` with specified endian
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 1`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 1`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset`. `value` *should* be a valid
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 2`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 2`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset` with specified endian
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 4`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 4`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset` with specified endian
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - byteLength`.
<del>* `byteLength` {integer} Number of bytes to write. Must satisfy: `0 < byteLength <= 6`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - byteLength`.
<add>* `byteLength` {integer} Number of bytes to write. Must satisfy
<add> `0 < byteLength <= 6`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `byteLength` bytes of `value` to `buf` at the specified `offset`.
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 1`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 1`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset`. `value` *should* be a
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 2`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 2`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset` with specified endian
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write. Must satisfy: `0 <= offset <= buf.length - 4`.
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - 4`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide>
<ide> Writes `value` to `buf` at the specified `offset` with specified endian
<ide> changes:
<ide> -->
<ide>
<ide> * `value` {integer} Number to be written to `buf`.
<del>* `offset` {integer} Number of bytes to skip before starting to write.
<del> Must satisfy: `0 <= offset <= buf.length - byteLength`.
<del>* `byteLength` {integer} Number of bytes to write. Must satisfy:
<add>* `offset` {integer} Number of bytes to skip before starting to write. Must
<add> satisfy `0 <= offset <= buf.length - byteLength`.
<add>* `byteLength` {integer} Number of bytes to write. Must satisfy
<ide> `0 < byteLength <= 6`.
<ide> * Returns: {integer} `offset` plus the number of bytes written.
<ide> | 1 |
Text | Text | remove doc proposal | 9f67978513aef3dfab7e9d2ca96a55481123323a | <ide><path>docs/proposals/atom-docs.md
<del>## Atom Documentation Format
<del>
<del>This document describes our documentation format, which is markdown with
<del>a few rules.
<del>
<del>### Philosophy
<del>
<del>1. Method and argument names **should** clearly communicate its use.
<del>1. Use documentation to enhance and not correct method/argument names.
<del>
<del>#### Basic
<del>
<del>In some cases all that's required is a single line. **Do not** feel
<del>obligated to write more because we have a format.
<del>
<del>```markdown
<del># Private: Returns the number of pixels from the top of the screen.
<del>```
<del>
<del>* **Each method should declare whether it's public or private by using `Public:`
<del>or `Private:`** prefix.
<del>* Following the colon, there should be a short description (that isn't redundant with the
<del>method name).
<del>* Documentation should be hard wrapped to 80 columns.
<del>
<del>### Public vs Private
<del>
<del>If a method is public it can be used by other classes (and possibly by
<del>the public API). The appropriate steps should be taken to minimize the impact
<del>when changing public methods. In some cases that might mean adding an
<del>appropriate release note. In other cases it might mean doing the legwork to
<del>ensure all affected packages are updated.
<del>
<del>#### Complex
<del>
<del>For complex methods it's necessary to explain exactly what arguments
<del>are required and how different inputs effect the operation of the
<del>function.
<del>
<del>The idea is to communicate things that the API user might not know about,
<del>so repeating information that can be gleaned from the method or argument names
<del>is not useful.
<del>
<del>```markdown
<del># Private: Determine the accelerator for a given command.
<del>#
<del># * command:
<del># The name of the command.
<del># * keystrokesByCommand:
<del># An {Object} whose keys are commands and the values are Arrays containing
<del># the keystrokes.
<del># * options:
<del># + accelerators:
<del># Boolean to determine whether accelerators should be shown.
<del>#
<del># Returns a String containing the keystroke in a format that can be interpreted
<del># by atom shell to provide nice icons where available.
<del>#
<del># Raises an Exception if no window is available.
<del>```
<del>
<del>* Use curly brackets `{}` to provide links to other classes.
<del>* Use `+` for the options list. | 1 |
Python | Python | fix py3k compat with functools.reduce | fd4a66cfc7888775d20b18665d63156cf3dae13a | <ide><path>rest_framework/filters.py
<ide> returned by list views.
<ide> """
<ide> from __future__ import unicode_literals
<del>
<ide> from django.db import models
<ide> from rest_framework.compat import django_filters
<add>from functools import reduce
<ide> import operator
<ide>
<ide> FilterSet = django_filters and django_filters.FilterSet or None | 1 |
PHP | PHP | run schedule after booting app | 6a9aa29278e13274549d8205f2b21a2d2cb70e98 | <ide><path>src/Illuminate/Foundation/Console/Kernel.php
<ide> public function __construct(Application $app, Dispatcher $events)
<ide> {
<ide> $this->app = $app;
<ide> $this->events = $events;
<del> $this->defineConsoleSchedule();
<add>
<add> $this->app->booted(function()
<add> {
<add> $this->defineConsoleSchedule();
<add> });
<ide> }
<ide>
<ide> /** | 1 |
Text | Text | add fspromises.readfile() example | 624dadb00706a9fc08f919ac72941cdaba7e3ec9 | <ide><path>doc/api/fs.md
<ide> platform-specific. On macOS, Linux, and Windows, the promise will be rejected
<ide> with an error. On FreeBSD, a representation of the directory's contents will be
<ide> returned.
<ide>
<add>An example of reading a `package.json` file located in the same directory of the
<add>running code:
<add>
<add>```mjs
<add>import { readFile } from 'node:fs/promises';
<add>try {
<add> const filePath = new URL('./package.json', import.meta.url);
<add> const contents = await readFile(filePath, { encoding: 'utf8' });
<add> console.log(contents);
<add>} catch (err) {
<add> console.error(err.message);
<add>}
<add>```
<add>
<add>```cjs
<add>const { readFile } = require('node:fs/promises');
<add>const { resolve } = require('node:path');
<add>async function logFile() {
<add> try {
<add> const filePath = resolve('./package.json');
<add> const contents = await readFile(filePath, { encoding: 'utf8' });
<add> console.log(contents);
<add> } catch (err) {
<add> console.error(err.message);
<add> }
<add>}
<add>logFile();
<add>```
<add>
<ide> It is possible to abort an ongoing `readFile` using an {AbortSignal}. If a
<ide> request is aborted the promise returned is rejected with an `AbortError`:
<ide> | 1 |
Javascript | Javascript | use standard for instead for of | 2c32a6f18961904cfb8366e48ed56db984571928 | <ide><path>examples/js/loaders/GLTFLoader.js
<ide> THREE.GLTFLoader = ( function () {
<ide>
<ide> var keys = Object.keys( attributes ).sort();
<ide>
<del> for ( var key of keys ) {
<add> for ( var i = 0, il = keys.length; i < il; i ++ ) {
<ide>
<del> attributesKey += key + attributes[ key ];
<add> attributesKey += keys[ i ] + attributes[ keys[ i ] ];
<ide>
<ide> }
<ide> | 1 |
Javascript | Javascript | add missing jsdocs for pdfjs globals | 7ebec6c5a447a66e3b0cc1cf814139dc7207a47b | <ide><path>src/display/api.js
<ide> PDFJS.maxImageSize = PDFJS.maxImageSize === undefined ? -1 : PDFJS.maxImageSize;
<ide> * @var {Boolean}
<ide> */
<ide> PDFJS.disableFontFace = PDFJS.disableFontFace === undefined ?
<del> false :
<del> PDFJS.disableFontFace;
<add> false : PDFJS.disableFontFace;
<add>
<add>/**
<add> * Path for image resources, mainly for annotation icons. Include trailing
<add> * slash.
<add> * @var {String}
<add> */
<add>PDFJS.imageResourcesPath = PDFJS.imageResourcesPath === undefined ?
<add> '' : PDFJS.imageResourcesPath;
<add>
<add>/**
<add> * Disable the web worker and run all code on the main thread. This will happen
<add> * automatically if the browser doesn't support workers or sending typed arrays
<add> * to workers.
<add> * @var {Boolean}
<add> */
<add>PDFJS.disableWorker = PDFJS.disableWorker === undefined ?
<add> false : PDFJS.disableWorker;
<add>
<add>/**
<add> * Path and filename of the worker file. Required when the worker is enabled.
<add> * @var {String}
<add> */
<add>PDFJS.workerSrc = PDFJS.workerSrc === undefined ? null : PDFJS.workerSrc;
<add>
<add>/**
<add> * Disable range request loading of PDF files. When enabled and if the server
<add> * supports partial content requests then the PDF will be fetched in chunks.
<add> * Enabled(false) by default.
<add> * @var {Boolean}
<add> */
<add>PDFJS.disableRange = PDFJS.disableRange === undefined ?
<add> false : PDFJS.disableRange;
<add>
<add>/**
<add> * Disable pre-fetching of PDF file data. When range requests are enabled PDF.js
<add> * will automatically keep fetching more data even if it isn't needed to display
<add> * the current page. This default behavior can be disabled.
<add> * @var {Boolean}
<add> */
<add>PDFJS.disableAutoFetch = PDFJS.disableAutoFetch === undefined ?
<add> false : PDFJS.disableAutoFetch;
<add>
<add>/**
<add> * Enables special hooks for debugging PDF.js.
<add> * @var {Boolean}
<add> */
<add>PDFJS.pdfBug = PDFJS.pdfBug === undefined ? false : PDFJS.pdfBug;
<ide>
<ide> /**
<ide> * This is the main entry point for loading a PDF and interacting with it.
<ide> var WorkerTransport = (function WorkerTransportClosure() {
<ide> // as it arrives on the worker. Chrome added this with version 15.
<ide> if (!globalScope.PDFJS.disableWorker && typeof Worker !== 'undefined') {
<ide> var workerSrc = PDFJS.workerSrc;
<del> if (typeof workerSrc === 'undefined') {
<add> if (!workerSrc) {
<ide> error('No PDFJS.workerSrc specified');
<ide> }
<ide> | 1 |
Python | Python | remove changes introduced as part of libcloud-278 | 0f75f5d76a9f3beb42ed92f03fa6bc208125e8a5 | <ide><path>libcloud/compute/ssh.py
<ide> def delete(self, path):
<ide> sftp.close()
<ide>
<ide> def run(self, cmd):
<del> if cmd[0] != '/':
<del> # If 'cmd' based on relative path,
<del> # set the absoute path joining the HOME path
<del> sftp = self.client.open_sftp()
<del> # Chdir to its own directory is mandatory because otherwise
<del> # the 'getcwd()' method returns None
<del> sftp.chdir('.')
<del> cwd = sftp.getcwd()
<del> sftp.close()
<del>
<del> # Join the command to the current path
<del> cmd = pjoin(cwd, cmd)
<del>
<ide> # based on exec_command()
<ide> bufsize = -1
<ide> t = self.client.get_transport()
<ide><path>libcloud/test/compute/test_ssh_client.py
<ide> def test_basic_usage_absolute_path(self):
<ide>
<ide> mock.close()
<ide>
<del> def test_run_script_with_relative_path(self):
<add> def _disabled_test_run_script_with_relative_path(self):
<ide> """
<ide> Execute script with relative path.
<ide> """ | 2 |
Ruby | Ruby | add python27 dependency | 31ed0d6505f26d70350d348980f477e3a66440d9 | <ide><path>Library/Homebrew/requirements.rb
<ide> class GitDependency < Requirement
<ide> default_formula 'git'
<ide> satisfy { which('git') }
<ide> end
<add>
<add>class Python27Dependency < Requirement
<add> fatal true
<add> default_formula 'python'
<add> satisfy do
<add> # Note that python -V outputs to stderr
<add> `python -V 2>&1` =~ /^Python 2.7/
<add> end
<add>end | 1 |
Javascript | Javascript | fix common.pipe path bug | 8e268c70dd5b5d98658544a5d46cd8d8d4b17481 | <ide><path>test/common/index.js
<ide> Object.defineProperty(exports, 'hasFipsCrypto', {
<ide> const localRelative = path.relative(process.cwd(), `${exports.tmpDir}/`);
<ide> const pipePrefix = exports.isWindows ? '\\\\.\\pipe\\' : localRelative;
<ide> const pipeName = `node-test.${process.pid}.sock`;
<del> exports.PIPE = pipePrefix + pipeName;
<add> exports.PIPE = path.join(pipePrefix, pipeName);
<ide> }
<ide>
<ide> { | 1 |
Javascript | Javascript | increase test-crypto.js strictness | e21126d20f10b319593b755312dea13cd39f048a | <ide><path>test/parallel/test-crypto.js
<ide> assert.throws(function() {
<ide> }, /^TypeError: Data must be a string or a buffer$/);
<ide>
<ide>
<del>function assertSorted(list) {
<add>function validateList(list) {
<add> // The list must not be empty
<add> assert(list.length > 0);
<add>
<add> // The list should be sorted.
<ide> // Array#sort() modifies the list in place so make a copy.
<del> const sorted = list.slice().sort();
<add> const sorted = [...list].sort();
<ide> assert.deepStrictEqual(list, sorted);
<add>
<add> // Each element should be unique.
<add> assert.strictEqual([...new Set(list)].length, list.length);
<add>
<add> // Each element should be a string.
<add> assert(list.every((value) => typeof value === 'string'));
<ide> }
<ide>
<ide> // Assume that we have at least AES-128-CBC.
<del>assert.notStrictEqual(0, crypto.getCiphers().length);
<add>const cryptoCiphers = crypto.getCiphers();
<ide> assert(crypto.getCiphers().includes('aes-128-cbc'));
<del>assert(!crypto.getCiphers().includes('AES-128-CBC'));
<del>assertSorted(crypto.getCiphers());
<add>validateList(cryptoCiphers);
<ide>
<ide> // Assume that we have at least AES256-SHA.
<del>assert.notStrictEqual(0, tls.getCiphers().length);
<add>const tlsCiphers = tls.getCiphers();
<ide> assert(tls.getCiphers().includes('aes256-sha'));
<del>assert(!tls.getCiphers().includes('AES256-SHA'));
<del>assertSorted(tls.getCiphers());
<add>// There should be no capital letters in any element.
<add>assert(tlsCiphers.every((value) => /^[^A-Z]+$/.test(value)));
<add>validateList(tlsCiphers);
<ide>
<ide> // Assert that we have sha and sha1 but not SHA and SHA1.
<ide> assert.notStrictEqual(0, crypto.getHashes().length);
<ide> assert(!crypto.getHashes().includes('SHA1'));
<ide> assert(!crypto.getHashes().includes('SHA'));
<ide> assert(crypto.getHashes().includes('RSA-SHA1'));
<ide> assert(!crypto.getHashes().includes('rsa-sha1'));
<del>assertSorted(crypto.getHashes());
<add>validateList(crypto.getHashes());
<ide>
<ide> // Assume that we have at least secp384r1.
<ide> assert.notStrictEqual(0, crypto.getCurves().length);
<ide> assert(crypto.getCurves().includes('secp384r1'));
<ide> assert(!crypto.getCurves().includes('SECP384R1'));
<del>assertSorted(crypto.getCurves());
<add>validateList(crypto.getCurves());
<ide>
<ide> // Regression tests for #5725: hex input that's not a power of two should
<ide> // throw, not assert in C++ land. | 1 |
Javascript | Javascript | add missing var | e283a95ab5c629ed9cc4c15d9ffda92006d4bfd5 | <ide><path>packages/sproutcore-views/lib/views/view.js
<ide> SC.View = SC.Object.extend(
<ide> // Normalize property path to be suitable for use
<ide> // as a class name. For exaple, content.foo.barBaz
<ide> // becomes bar-baz.
<del> parts = property.split('.');
<add> var parts = property.split('.');
<ide> return SC.String.dasherize(parts[parts.length-1]);
<ide>
<ide> // If the value is not NO, undefined, or null, return the current | 1 |
Javascript | Javascript | improve the code in test-fs-read-stream | 7a46b992d292654c093639df3605e818827026b6 | <ide><path>test/parallel/test-fs-read-stream-fd.js
<ide> const common = require('../common');
<ide> const fs = require('fs');
<ide> const assert = require('assert');
<ide> const path = require('path');
<del>var file = path.join(common.tmpDir, '/read_stream_fd_test.txt');
<del>var input = 'hello world';
<del>var output = '';
<del>var fd, stream;
<add>const file = path.join(common.tmpDir, '/read_stream_fd_test.txt');
<add>const input = 'hello world';
<ide>
<add>let output = '';
<ide> common.refreshTmpDir();
<ide> fs.writeFileSync(file, input);
<del>fd = fs.openSync(file, 'r');
<ide>
<del>stream = fs.createReadStream(null, { fd: fd, encoding: 'utf8' });
<del>stream.on('data', function(data) {
<add>const fd = fs.openSync(file, 'r');
<add>const stream = fs.createReadStream(null, { fd: fd, encoding: 'utf8' });
<add>
<add>stream.on('data', (data) => {
<ide> output += data;
<ide> });
<ide>
<del>process.on('exit', function() {
<del> fs.unlinkSync(file);
<del> assert.equal(output, input);
<add>process.on('exit', () => {
<add> assert.strictEqual(output, input);
<ide> }); | 1 |
Go | Go | add regression test | e592f1b298c778d0b9adfd6751f5fe1843a7001d | <ide><path>server.go
<ide> func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
<ide> parsedRepo := strings.Split(repoAndTag, ":")[0]
<ide> if strings.Contains(img.ID, repoName) {
<ide> repoName = parsedRepo
<del> if len(strings.Split(repoAndTag, ":")) > 1 {
<add> if len(srv.runtime.repositories.ByID()[img.ID]) == 1 && len(strings.Split(repoAndTag, ":")) > 1 {
<ide> tag = strings.Split(repoAndTag, ":")[1]
<ide> }
<ide> } else if repoName != parsedRepo {
<ide><path>server_test.go
<ide> package docker
<ide>
<ide> import (
<ide> "github.com/dotcloud/docker/utils"
<add> "strings"
<ide> "testing"
<ide> "time"
<ide> )
<ide> func TestLogEvent(t *testing.T) {
<ide> })
<ide>
<ide> }
<add>
<add>func TestRmi(t *testing.T) {
<add> runtime := mkRuntime(t)
<add> defer nuke(runtime)
<add> srv := &Server{runtime: runtime}
<add>
<add> initialImages, err := srv.Images(false, "")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> containerID, err := srv.ContainerCreate(config)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> //To remove
<add> err = srv.ContainerStart(containerID, hostConfig)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> imageID, err := srv.ContainerCommit(containerID, "test", "", "", "", nil)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> err = srv.ContainerTag(imageID, "test", "0.1", false)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> containerID, err = srv.ContainerCreate(config)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> //To remove
<add> err = srv.ContainerStart(containerID, hostConfig)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> _, err = srv.ContainerCommit(containerID, "test", "", "", "", nil)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> images, err := srv.Images(false, "")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> if len(images)-len(initialImages) != 2 {
<add> t.Fatalf("Expected 2 new images, found %d.", len(images)-len(initialImages))
<add> }
<add>
<add> _, err = srv.ImageDelete(imageID, true)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> images, err = srv.Images(false, "")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> if len(images)-len(initialImages) != 1 {
<add> t.Fatalf("Expected 1 new image, found %d.", len(images)-len(initialImages))
<add> }
<add>
<add> for _, image := range images {
<add> if strings.Contains(unitTestImageID, image.ID) {
<add> continue
<add> }
<add> if image.Repository == "" {
<add> t.Fatalf("Expected tagged image, got untagged one.")
<add> }
<add> }
<add>} | 2 |
Java | Java | fix custom config with older httpclient | 7a6ec6952357d23b3f17694236a253f385b14e10 | <ide><path>spring-web/src/main/java/org/springframework/http/client/HttpComponentsClientHttpRequest.java
<ide> public URI getURI() {
<ide> return this.httpRequest.getURI();
<ide> }
<ide>
<add> HttpContext getHttpContext() {
<add> return httpContext;
<add> }
<ide>
<ide> @Override
<ide> protected ClientHttpResponse executeInternal(HttpHeaders headers, byte[] bufferedOutput) throws IOException {
<ide><path>spring-web/src/main/java/org/springframework/http/client/HttpComponentsClientHttpRequestFactory.java
<ide> public HttpClient getHttpClient() {
<ide> public void setConnectTimeout(int timeout) {
<ide> Assert.isTrue(timeout >= 0, "Timeout must be a non-negative value");
<ide> this.connectTimeout = timeout;
<add> setLegacyConnectionTimeout(getHttpClient(), connectTimeout);
<ide> }
<ide>
<ide> /**
<ide> public void setConnectTimeout(int timeout) {
<ide> public void setReadTimeout(int timeout) {
<ide> Assert.isTrue(timeout >= 0, "Timeout must be a non-negative value");
<ide> this.socketTimeout= timeout;
<add> setLegacyReadTimeout(getHttpClient(), socketTimeout);
<ide> }
<ide>
<ide> /**
<ide> protected HttpContext createHttpContext(HttpMethod httpMethod, URI uri) {
<ide> return null;
<ide> }
<ide>
<add> /**
<add> * Apply the specified custom connection timeout for deprecated {@link HttpClient}
<add> * instances.
<add> * <p>As from HttpClient 4.3, default parameters have to be set in a
<add> * {@link RequestConfig} instance instead of setting the parameters
<add> * on the client.
<add> * <p>Unfortunately, this behaviour is not backward compatible and older
<add> * {@link HttpClient} implementations will ignore the {@link RequestConfig}
<add> * object set in the context.
<add> * <p>If the specified client is an older implementation, we set the
<add> * custom connection timeout through the deprecated API. Otherwise, we just
<add> * return as it is set per request with newer clients
<add> * @param client the client to handle
<add> * @param connectionTimeout the custom connection timeout
<add> */
<add> @SuppressWarnings("deprecation")
<add> private void setLegacyConnectionTimeout(HttpClient client, int connectionTimeout) {
<add> if (org.apache.http.impl.client.AbstractHttpClient.class.isInstance(client)) {
<add> client.getParams().setIntParameter(
<add> org.apache.http.params.CoreConnectionPNames.CONNECTION_TIMEOUT, connectionTimeout);
<add> }
<add> }
<add>
<add> /**
<add> * Apply the specified read timeout for deprecated {@link HttpClient}
<add> * instances.
<add> * @param client the client to handle
<add> * @param readTimeout the custom read timeout
<add> * @see #setLegacyConnectionTimeout(org.apache.http.client.HttpClient, int)
<add> */
<add> @SuppressWarnings("deprecation")
<add> private void setLegacyReadTimeout(HttpClient client, int readTimeout) {
<add> if (org.apache.http.impl.client.AbstractHttpClient.class.isInstance(client)) {
<add> client.getParams().setIntParameter(
<add> org.apache.http.params.CoreConnectionPNames.SO_TIMEOUT, readTimeout);
<add> }
<add> }
<add>
<ide> /**
<ide> * Shutdown hook that closes the underlying
<ide> * {@link org.apache.http.conn.HttpClientConnectionManager ClientConnectionManager}'s
<ide><path>spring-web/src/test/java/org/springframework/http/client/HttpComponentsClientHttpRequestFactoryTests.java
<ide> /*
<del> * Copyright 2002-2012 the original author or authors.
<add> * Copyright 2002-2014 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide>
<ide> package org.springframework.http.client;
<ide>
<add>import static org.junit.Assert.*;
<add>
<add>import java.net.URI;
<add>
<add>import org.apache.http.client.HttpClient;
<add>import org.apache.http.client.config.RequestConfig;
<add>import org.apache.http.client.protocol.HttpClientContext;
<add>import org.apache.http.impl.client.DefaultHttpClient;
<add>import org.apache.http.impl.client.HttpClientBuilder;
<add>import org.apache.http.params.CoreConnectionPNames;
<ide> import org.junit.Test;
<ide> import org.springframework.http.HttpMethod;
<ide>
<ide> public void httpMethods() throws Exception {
<ide> assertHttpMethod("patch", HttpMethod.PATCH);
<ide> }
<ide>
<add> @SuppressWarnings("deprecation")
<add> @Test
<add> public void assertLegacyCustomConfig() {
<add> HttpClient httpClient = new DefaultHttpClient(); // Does not support RequestConfig
<add> HttpComponentsClientHttpRequestFactory hrf = new HttpComponentsClientHttpRequestFactory(httpClient);
<add> hrf.setConnectTimeout(1234);
<add> assertEquals(1234, httpClient.getParams().getIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 0));
<add>
<add> hrf.setReadTimeout(4567);
<add> assertEquals(4567, httpClient.getParams().getIntParameter(CoreConnectionPNames.SO_TIMEOUT, 0));
<add> }
<add>
<add> @Test
<add> public void assertCustomConfig() throws Exception {
<add> HttpClient httpClient = HttpClientBuilder.create().build();
<add> HttpComponentsClientHttpRequestFactory hrf = new HttpComponentsClientHttpRequestFactory(httpClient);
<add> hrf.setConnectTimeout(1234);
<add> hrf.setReadTimeout(4567);
<add>
<add> URI uri = new URI(baseUrl + "/status/ok");
<add> HttpComponentsClientHttpRequest request = (HttpComponentsClientHttpRequest)
<add> hrf.createRequest(uri, HttpMethod.GET);
<add>
<add> Object config = request.getHttpContext().getAttribute(HttpClientContext.REQUEST_CONFIG);
<add> assertNotNull("Request config should be set", config);
<add> assertTrue("Wrong request config type" + config.getClass().getName(),
<add> RequestConfig.class.isInstance(config));
<add> RequestConfig requestConfig = (RequestConfig) config;
<add> assertEquals("Wrong custom connection timeout", 1234, requestConfig.getConnectTimeout());
<add> assertEquals("Wrong custom socket timeout", 4567, requestConfig.getSocketTimeout());
<add>
<add> }
<ide> } | 3 |
Text | Text | change my site address | 2b34e67af7a9481595dd3fd78ed39469236e9d03 | <ide><path>docs/Kickstarter-Supporters.md
<ide> These wonderful people supported our Kickstarter by giving us £10 or more:
<ide> * [Andrew Brown](http://pvalu.es)
<ide> * [Bethany Sumner](http://www.bethanysumner.com/)
<ide> * [Orta](http://orta.io)
<del>* [Michał Gołębiowski](https://mgol.eu)
<add>* [Michał Gołębiowski](https://github.com/mgol)
<ide> * [Adam C. Foltzer](http://www.acfoltzer.net/)
<ide> * [Steve Hiemstra](https://www.speg.com)
<ide> * [Anton Sipos](http://www.softwarefuturism.com) | 1 |
Text | Text | update required compiler level for aix | 94596560c2556fae614ef237ed0ff2f749b14651 | <ide><path>BUILDING.md
<ide> Depending on host platform, the selection of toolchains may vary.
<ide> * GCC 4.9.4 or newer
<ide> * Clang 3.4.2 or newer
<ide>
<add>#### AIX
<add>* GCC 6.3 or newer
<add>
<ide> #### Windows
<ide>
<ide> * Visual Studio 2017 or the Build Tools thereof | 1 |
Text | Text | add note about rewrites query updating | 7cd9ffc519c7b81f198dc6171c4c59418c5f15f3 | <ide><path>docs/api-reference/next.config.js/rewrites.md
<ide> module.exports = {
<ide> }
<ide> ```
<ide>
<add>Note: for static pages from the [Automatic Static Optimization](/docs/advanced-features/automatic-static-optimization.md) or [prerendering](/docs/basic-features/data-fetching.md#getstaticprops-static-generation) params from rewrites will be parsed on the client after hydration and provided in the query.
<add>
<ide> ## Path Matching
<ide>
<ide> Path matches are allowed, for example `/blog/:slug` will match `/blog/hello-world` (no nested paths): | 1 |
Go | Go | check content type along with content length | 8266c381d62a0790c489e27cc93ed8f4618d03c7 | <ide><path>api/server/server.go
<ide> func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
<ide> )
<ide>
<ide> // allow a nil body for backwards compatibility
<del> if r.Body != nil {
<add> if r.Body != nil && r.ContentLength > 0 {
<ide> if !api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
<del> return fmt.Errorf("Content-Type is not supported: %s", r.Header.Get("Content-Type"))
<add> return fmt.Errorf("Content-Type of application/json is required")
<ide> }
<ide>
<ide> if err := job.DecodeEnv(r.Body); err != nil {
<ide> return err
<ide> }
<ide> }
<add>
<ide> if err := job.Run(); err != nil {
<ide> if err.Error() == "Container already started" {
<ide> w.WriteHeader(http.StatusNotModified) | 1 |
Ruby | Ruby | remove marshal support from schemacache | 65f2eeaaf5774f0891fff700f4defb0b90a05789 | <ide><path>activerecord/lib/active_record/connection_adapters/schema_cache.rb
<ide> def clear_data_source_cache!(name)
<ide> @data_sources.delete name
<ide> end
<ide>
<del> def marshal_dump
<del> # if we get current version during initialization, it happens stack over flow.
<del> @version = connection.migration_context.current_version
<del> [@version, @columns, @columns_hash, @primary_keys, @data_sources]
<del> end
<del>
<del> def marshal_load(array)
<del> @version, @columns, @columns_hash, @primary_keys, @data_sources = array
<del> end
<del>
<ide> private
<ide>
<ide> def prepare_data_sources
<ide><path>activerecord/test/cases/connection_adapters/schema_cache_test.rb
<ide> def test_clearing
<ide> assert_equal 0, @cache.size
<ide> end
<ide>
<del> def test_dump_and_load
<del> @cache.columns("posts")
<del> @cache.columns_hash("posts")
<del> @cache.data_sources("posts")
<del> @cache.primary_keys("posts")
<del>
<del> @cache = Marshal.load(Marshal.dump(@cache))
<del>
<del> assert_no_queries do
<del> assert_equal 12, @cache.columns("posts").size
<del> assert_equal 12, @cache.columns_hash("posts").size
<del> assert @cache.data_sources("posts")
<del> assert_equal "id", @cache.primary_keys("posts")
<del> end
<del> end
<del>
<ide> def test_data_source_exist
<ide> assert @cache.data_source_exists?("posts")
<ide> assert_not @cache.data_source_exists?("foo") | 2 |
Ruby | Ruby | add the class name to the assertion message | f8964bd546b925fda49aa4dc76f14acf7bf84022 | <ide><path>activesupport/test/core_ext/duplicable_test.rb
<ide> def test_duplicable
<ide> end
<ide>
<ide> RAISE_DUP.each do |v|
<del> assert_raises(TypeError) do
<add> assert_raises(TypeError, v.class.name) do
<ide> v.dup
<ide> end
<ide> end | 1 |
Ruby | Ruby | raise a useful exception for incomplete formulae | a05bb488073cc8974d263576ca61706c76e110d0 | <ide><path>Library/Homebrew/formula.rb
<ide> def determine_active_spec
<ide> when @head && ARGV.build_head? then @head # --HEAD
<ide> when @devel && ARGV.build_devel? then @devel # --devel
<ide> when @bottle && install_bottle?(self) then @bottle # bottle available
<add> when @stable then @stable
<ide> when @devel && @stable.nil? then @devel # devel-only
<ide> when @head && @stable.nil? then @head # head-only
<del> else @stable
<add> else
<add> raise "Formulae require at least a URL"
<ide> end
<ide> end
<ide>
<ide><path>Library/Homebrew/test/test_formula_validation.rb
<ide> def test_head_only_valid
<ide> end
<ide> end
<ide> end
<add>
<add> def test_empty_formula_invalid
<add> e = assert_raises(RuntimeError) { formula {} }
<add> assert_equal "Formulae require at least a URL", e.message
<add> end
<ide> end | 2 |
Go | Go | fix deadlock on plugin shutdown | 4d009084de8cad94a180130eb57efa2a98df6d98 | <ide><path>integration-cli/docker_cli_daemon_experimental_test.go
<ide> func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) {
<ide> c.Assert(out, checker.Contains, "true")
<ide> }
<ide>
<del>// TestDaemonRestartWithPluginEnabled tests state restore for a disabled plugin
<add>// TestDaemonRestartWithPluginDisabled tests state restore for a disabled plugin
<ide> func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) {
<ide> if err := s.d.Start(); err != nil {
<ide> c.Fatalf("Could not start daemon: %v", err)
<ide><path>plugin/manager_linux.go
<ide> func (pm *Manager) Shutdown() {
<ide> }
<ide> }
<ide> close(p.exitChan)
<del> pm.Lock()
<del> p.PluginObj.Active = false
<del> pm.save()
<del> pm.Unlock()
<ide> }
<ide> if err := os.RemoveAll(p.runtimeSourcePath); err != nil {
<ide> logrus.Errorf("Remove plugin runtime failed with error: %v", err) | 2 |
Python | Python | fix dict_keys equality test for python 3 | 613df5c6501f715c0775229f34fcba9f4291c05d | <ide><path>rest_framework/utils/mediatypes.py
<ide> def precedence(self):
<ide> return 0
<ide> elif self.sub_type == '*':
<ide> return 1
<del> elif not self.params or self.params.keys() == ['q']:
<add> elif not self.params or list(self.params.keys()) == ['q']:
<ide> return 2
<ide> return 3
<ide> | 1 |
Java | Java | update copyright header | 991eb4858e48535c04fe2cc42ed7778fdb2ed965 | <ide><path>spring-beans/src/main/java/org/springframework/beans/factory/support/PropertiesBeanDefinitionReader.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-context/src/main/java/org/springframework/cache/config/CacheAdviceParser.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-context/src/main/java/org/springframework/jmx/support/JmxUtils.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-context/src/main/java/org/springframework/validation/DataBinder.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-core/src/main/java/org/springframework/util/comparator/Comparators.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-jdbc/src/main/java/org/springframework/jdbc/config/DatabasePopulatorConfigUtils.java
<ide> /*
<del> * Copyright 2002-2016 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-jdbc/src/main/java/org/springframework/jdbc/support/GeneratedKeyHolder.java
<ide> /*
<del> * Copyright 2002-2016 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/simp/stomp/ReactorNettyTcpStompClient.java
<ide> /*
<del> * Copyright 2002-2016 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-test/src/main/java/org/springframework/test/context/BootstrapUtils.java
<ide> /*
<del> * Copyright 2002-2016 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-test/src/main/java/org/springframework/test/web/ModelAndViewAssert.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-web/src/main/java/org/springframework/http/MediaTypeFactory.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-web/src/main/java/org/springframework/http/codec/ServerSentEvent.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-web/src/main/java/org/springframework/remoting/caucho/HessianExporter.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-web/src/main/java/org/springframework/web/util/pattern/RegexPathElement.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/result/method/annotation/InitBinderBindingContext.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistry.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/tags/form/TagWriter.java
<ide> /*
<del> * Copyright 2002-2016 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/view/groovy/GroovyMarkupConfigurer.java
<ide> /*
<del> * Copyright 2002-2017 the original author or authors.
<add> * Copyright 2002-2018 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License. | 18 |
Python | Python | remove redundent function in backtracking sudoku | c824b90ead698da4f10ac38e431844d96af109b6 | <ide><path>backtracking/sudoku.py
<ide> def is_safe(grid: Matrix, row: int, column: int, n: int) -> bool:
<ide> return True
<ide>
<ide>
<del>def is_completed(grid: Matrix) -> bool:
<del> """
<del> This function checks if the puzzle is completed or not.
<del> it is completed when all the cells are assigned with a non-zero number.
<del>
<del> >>> is_completed([[0]])
<del> False
<del> >>> is_completed([[1]])
<del> True
<del> >>> is_completed([[1, 2], [0, 4]])
<del> False
<del> >>> is_completed([[1, 2], [3, 4]])
<del> True
<del> >>> is_completed(initial_grid)
<del> False
<del> >>> is_completed(no_solution)
<del> False
<del> """
<del> return all(all(cell != 0 for cell in row) for row in grid)
<del>
<del>
<ide> def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]:
<ide> """
<ide> This function finds an empty location so that we can assign a number
<ide> def sudoku(grid: Matrix) -> Optional[Matrix]:
<ide> >>> sudoku(no_solution) is None
<ide> True
<ide> """
<del>
<del> if is_completed(grid):
<del> return grid
<del>
<del> location = find_empty_location(grid)
<del> if location is not None:
<add> if location := find_empty_location(grid):
<ide> row, column = location
<ide> else:
<ide> # If the location is ``None``, then the grid is solved. | 1 |
Javascript | Javascript | add key to footer wrapper in windowedlistview | 58db9f399633a18a5ef0119a52dbbadb15b3dff4 | <ide><path>Libraries/Experimental/WindowedListView.js
<ide> class WindowedListView extends React.Component {
<ide> lastRow === this.props.data.length - 1;
<ide> if (this.props.renderFooter) {
<ide> rows.push(
<del> <View style={showFooter ? styles.include : styles.remove}>
<add> <View
<add> key="ind-footer"
<add> style={showFooter ? styles.include : styles.remove}>
<ide> {this.props.renderFooter()}
<ide> </View>
<ide> ); | 1 |
Python | Python | implement ldap authentication | 6ea9e9fa98c0fbd391a8b33e79e1267687570223 | <ide><path>airflow/__init__.py
<ide> import os
<ide> import sys
<ide>
<del>from airflow.configuration import conf, AirflowConfigException
<add>import airflow.configuration
<add>
<ide> from airflow.models import DAG
<ide> from flask.ext.admin import BaseView
<ide> from importlib import import_module
<ide> from airflow.utils import AirflowException
<ide>
<del>DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
<add>DAGS_FOLDER = os.path.expanduser(configuration.conf.get('core', 'DAGS_FOLDER'))
<ide> if DAGS_FOLDER not in sys.path:
<ide> sys.path.append(DAGS_FOLDER)
<ide>
<ide> auth_backend = 'airflow.default_login'
<ide> try:
<del> auth_backend = conf.get('webserver', 'auth_backend')
<del>except AirflowConfigException:
<del> if conf.getboolean('webserver', 'AUTHENTICATE'):
<add> auth_backend = configuration.conf.get('webserver', 'auth_backend')
<add>except configuration.AirflowConfigException:
<add> if configuration.conf.getboolean('webserver', 'AUTHENTICATE'):
<ide> logging.warning("auth_backend not found in webserver config reverting to *deprecated*"
<ide> " behavior of importing airflow_login")
<ide> auth_backend = "airflow_login"
<ide> "Please correct your authentication backend or disable authentication",
<ide> auth_backend
<ide> )
<del> if conf.getboolean('webserver', 'AUTHENTICATE'):
<add> if configuration.conf.getboolean('webserver', 'AUTHENTICATE'):
<ide> raise AirflowException("Failed to import authentication backend")
<ide>
<ide>
<ide><path>airflow/bin/cli.py
<ide>
<ide> import airflow
<ide> from airflow import jobs, settings, utils
<del>from airflow.configuration import conf
<add>from airflow import configuration
<ide> from airflow.executors import DEFAULT_EXECUTOR
<ide> from airflow.models import DagBag, TaskInstance, DagPickle
<ide> from airflow.utils import AirflowException
<ide>
<del>DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
<add>DAGS_FOLDER = os.path.expanduser(configuration.conf.get('core', 'DAGS_FOLDER'))
<ide>
<ide> # Common help text across subcommands
<ide> mark_success_help = "Mark jobs as succeeded without running them"
<ide> subdir_help = "File location or directory from which to look for the dag"
<ide>
<ide>
<ide> def process_subdir(subdir):
<del> dags_folder = conf.get("core", "DAGS_FOLDER")
<add> dags_folder = configuration.conf.get("core", "DAGS_FOLDER")
<ide> dags_folder = os.path.expanduser(dags_folder)
<ide> if subdir:
<ide> subdir = os.path.expanduser(subdir)
<ide> def backfill(args):
<ide> mark_success=args.mark_success,
<ide> include_adhoc=args.include_adhoc,
<ide> local=args.local,
<del> donot_pickle=(args.donot_pickle or conf.getboolean('core', 'donot_pickle')),
<add> donot_pickle=(args.donot_pickle or configuration.conf.getboolean('core', 'donot_pickle')),
<ide> ignore_dependencies=args.ignore_dependencies,
<ide> pool=args.pool)
<ide>
<ide> def run(args):
<ide>
<ide> utils.pessimistic_connection_handling()
<ide> # Setting up logging
<del> log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
<add> log = os.path.expanduser(configuration.conf.get('core', 'BASE_LOG_FOLDER'))
<ide> directory = log + "/{args.dag_id}/{args.task_id}".format(args=args)
<ide> if not os.path.exists(directory):
<ide> os.makedirs(directory)
<ide> def run(args):
<ide> executor.heartbeat()
<ide> executor.end()
<ide>
<del> if conf.get('core', 'S3_LOG_FOLDER').startswith('s3:'):
<add> if configuration.conf.get('core', 'S3_LOG_FOLDER').startswith('s3:'):
<ide> import boto
<del> s3_log = filename.replace(log, conf.get('core', 'S3_LOG_FOLDER'))
<add> s3_log = filename.replace(log, configuration.conf.get('core', 'S3_LOG_FOLDER'))
<ide> bucket, key = s3_log.lstrip('s3:/').split('/', 1)
<ide> if os.path.exists(filename):
<ide>
<ide> def webserver(args):
<ide> print(settings.HEADER)
<ide> log_to_stdout()
<ide> from airflow.www.app import cached_app
<del> app = cached_app(conf)
<del> threads = args.threads or conf.get('webserver', 'threads')
<add> app = cached_app(configuration.conf)
<add> threads = args.threads or configuration.conf.get('webserver', 'threads')
<ide> if args.debug:
<ide> print(
<ide> "Starting the web server on port {0} and host {1}.".format(
<ide> def serve_logs(args):
<ide>
<ide> @flask_app.route('/log/<path:filename>')
<ide> def serve_logs(filename):
<del> log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
<add> log = os.path.expanduser(configuration.conf.get('core', 'BASE_LOG_FOLDER'))
<ide> return flask.send_from_directory(
<ide> log,
<ide> filename,
<ide> mimetype="application/json",
<ide> as_attachment=False)
<ide> WORKER_LOG_SERVER_PORT = \
<del> int(conf.get('celery', 'WORKER_LOG_SERVER_PORT'))
<add> int(configuration.conf.get('celery', 'WORKER_LOG_SERVER_PORT'))
<ide> flask_app.run(
<ide> host='0.0.0.0', port=WORKER_LOG_SERVER_PORT)
<ide>
<ide> def worker(args):
<ide>
<ide>
<ide> def initdb(args):
<del> print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
<add> print("DB: " + configuration.conf.get('core', 'SQL_ALCHEMY_CONN'))
<ide> utils.initdb()
<ide> print("Done.")
<ide>
<ide>
<ide> def resetdb(args):
<del> print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
<add> print("DB: " + configuration.conf.get('core', 'SQL_ALCHEMY_CONN'))
<ide> if input(
<ide> "This will drop existing tables if they exist. "
<ide> "Proceed? (y/n)").upper() == "Y":
<ide> def resetdb(args):
<ide>
<ide>
<ide> def upgradedb(args):
<del> print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
<add> print("DB: " + configuration.conf.get('core', 'SQL_ALCHEMY_CONN'))
<ide> utils.upgradedb()
<ide>
<ide>
<ide> def version(args):
<ide>
<ide>
<ide> def flower(args):
<del> broka = conf.get('celery', 'BROKER_URL')
<del> args.port = args.port or conf.get('celery', 'FLOWER_PORT')
<add> broka = configuration.conf.get('celery', 'BROKER_URL')
<add> args.port = args.port or configuration.conf.get('celery', 'FLOWER_PORT')
<ide> port = '--port=' + args.port
<ide> api = ''
<ide> if args.broker_api:
<ide> def get_parser():
<ide> parser_webserver = subparsers.add_parser('webserver', help=ht)
<ide> parser_webserver.add_argument(
<ide> "-p", "--port",
<del> default=conf.get('webserver', 'WEB_SERVER_PORT'),
<add> default=configuration.conf.get('webserver', 'WEB_SERVER_PORT'),
<ide> type=int,
<ide> help="Set the port on which to run the web server")
<ide> parser_webserver.add_argument(
<ide> "-w", "--threads",
<del> default=conf.get('webserver', 'THREADS'),
<add> default=configuration.conf.get('webserver', 'THREADS'),
<ide> type=int,
<ide> help="Number of threads to run the webserver on")
<ide> parser_webserver.add_argument(
<ide> "-hn", "--hostname",
<del> default=conf.get('webserver', 'WEB_SERVER_HOST'),
<add> default=configuration.conf.get('webserver', 'WEB_SERVER_HOST'),
<ide> help="Set the hostname on which to run the web server")
<ide> ht = "Use the server that ships with Flask in debug mode"
<ide> parser_webserver.add_argument(
<ide> def get_parser():
<ide> parser_worker.add_argument(
<ide> "-q", "--queues",
<ide> help="Comma delimited list of queues to serve",
<del> default=conf.get('celery', 'DEFAULT_QUEUE'))
<add> default=configuration.conf.get('celery', 'DEFAULT_QUEUE'))
<ide> parser_worker.add_argument(
<ide> "-c", "--concurrency",
<ide> type=int,
<ide> help="The number of worker processes",
<del> default=conf.get('celery', 'celeryd_concurrency'))
<add> default=configuration.conf.get('celery', 'celeryd_concurrency'))
<ide> parser_worker.set_defaults(func=worker)
<ide>
<ide> ht = "Serve logs generate by worker"
<ide> def get_parser():
<ide> parser_kerberos = subparsers.add_parser('kerberos', help=ht)
<ide> parser_kerberos.add_argument(
<ide> "-kt", "--keytab", help="keytab",
<del> nargs='?', default=conf.get('kerberos', 'keytab'))
<add> nargs='?', default=configuration.conf.get('kerberos', 'keytab'))
<ide> parser_kerberos.add_argument(
<ide> "principal", help="kerberos principal",
<del> nargs='?', default=conf.get('kerberos', 'principal'))
<add> nargs='?', default=configuration.conf.get('kerberos', 'principal'))
<ide> parser_kerberos.set_defaults(func=kerberos)
<ide>
<ide> return parser
<ide><path>airflow/jobs.py
<ide> from sqlalchemy.orm.session import make_transient
<ide>
<ide> from airflow import executors, models, settings, utils
<del>from airflow.configuration import conf
<add>from airflow import configuration
<ide> from airflow.utils import AirflowException, State
<ide>
<ide>
<ide>
<ide> # Setting up a statsd client if needed
<ide> statsd = None
<del>if conf.getboolean('scheduler', 'statsd_on'):
<add>if configuration.conf.getboolean('scheduler', 'statsd_on'):
<ide> from statsd import StatsClient
<ide> statsd = StatsClient(
<del> host=conf.get('scheduler', 'statsd_host'),
<del> port=conf.getint('scheduler', 'statsd_port'),
<del> prefix=conf.get('scheduler', 'statsd_prefix'))
<add> host=configuration.conf.get('scheduler', 'statsd_host'),
<add> port=configuration.conf.getint('scheduler', 'statsd_port'),
<add> prefix=configuration.conf.get('scheduler', 'statsd_prefix'))
<ide>
<ide>
<ide> class BaseJob(Base):
<ide> class BaseJob(Base):
<ide> def __init__(
<ide> self,
<ide> executor=executors.DEFAULT_EXECUTOR,
<del> heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
<add> heartrate=configuration.conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
<ide> *args, **kwargs):
<ide> self.hostname = socket.gethostname()
<ide> self.executor = executor
<ide> def __init__(
<ide> def is_alive(self):
<ide> return (
<ide> (datetime.now() - self.latest_heartbeat).seconds <
<del> (conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
<add> (configuration.conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
<ide> )
<ide>
<ide> def kill(self):
<ide> def __init__(
<ide> self.do_pickle = do_pickle
<ide> super(SchedulerJob, self).__init__(*args, **kwargs)
<ide>
<del> self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
<add> self.heartrate = configuration.conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
<ide>
<ide> @utils.provide_session
<ide> def manage_slas(self, dag, session=None):
<ide><path>airflow/models.py
<ide>
<ide> from airflow import settings, utils
<ide> from airflow.executors import DEFAULT_EXECUTOR, LocalExecutor
<del>from airflow.configuration import conf
<add>from airflow import configuration
<ide> from airflow.utils import (
<ide> AirflowException, State, apply_defaults, provide_session,
<ide> is_container, as_tuple, TriggerRule)
<ide>
<ide> Base = declarative_base()
<ide> ID_LEN = 250
<del>SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN')
<del>DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
<add>SQL_ALCHEMY_CONN = configuration.conf.get('core', 'SQL_ALCHEMY_CONN')
<add>DAGS_FOLDER = os.path.expanduser(configuration.conf.get('core', 'DAGS_FOLDER'))
<ide> XCOM_RETURN_KEY = 'return_value'
<ide>
<ide> ENCRYPTION_ON = False
<ide> try:
<ide> from cryptography.fernet import Fernet
<del> FERNET = Fernet(conf.get('core', 'FERNET_KEY').encode('utf-8'))
<add> FERNET = Fernet(configuration.conf.get('core', 'FERNET_KEY').encode('utf-8'))
<ide> ENCRYPTION_ON = True
<ide> except:
<ide> pass
<ide> def __init__(
<ide> self,
<ide> dag_folder=None,
<ide> executor=DEFAULT_EXECUTOR,
<del> include_examples=conf.getboolean('core', 'LOAD_EXAMPLES'),
<add> include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES'),
<ide> sync_to_db=False):
<ide>
<ide> dag_folder = dag_folder or DAGS_FOLDER
<ide> def kill_zombies(self, session):
<ide> """
<ide> from airflow.jobs import LocalTaskJob as LJ
<ide> logging.info("Finding 'running' jobs without a recent heartbeat")
<del> secs = (conf.getint('scheduler', 'job_heartbeat_sec') * 3) + 120
<add> secs = (configuration.conf.getint('scheduler', 'job_heartbeat_sec') * 3) + 120
<ide> limit_dttm = datetime.now() - timedelta(seconds=secs)
<ide> print("Failing jobs without heartbeat after {}".format(limit_dttm))
<ide> jobs = (
<ide> def command(
<ide> @property
<ide> def log_filepath(self):
<ide> iso = self.execution_date.isoformat()
<del> log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
<add> log = os.path.expanduser(configuration.conf.get('core', 'BASE_LOG_FOLDER'))
<ide> return (
<ide> "{log}/{self.dag_id}/{self.task_id}/{iso}.log".format(**locals()))
<ide>
<ide> @property
<ide> def log_url(self):
<ide> iso = self.execution_date.isoformat()
<del> BASE_URL = conf.get('webserver', 'BASE_URL')
<add> BASE_URL = configuration.conf.get('webserver', 'BASE_URL')
<ide> return BASE_URL + (
<ide> "/admin/airflow/log"
<ide> "?dag_id={self.dag_id}"
<ide> def log_url(self):
<ide> @property
<ide> def mark_success_url(self):
<ide> iso = self.execution_date.isoformat()
<del> BASE_URL = conf.get('webserver', 'BASE_URL')
<add> BASE_URL = configuration.conf.get('webserver', 'BASE_URL')
<ide> return BASE_URL + (
<ide> "/admin/airflow/action"
<ide> "?action=success"
<ide> def get_template_context(self):
<ide> 'task_instance': self,
<ide> 'ti': self,
<ide> 'task_instance_key_str': ti_key_str,
<del> 'conf': conf,
<add> 'conf': configuration.conf,
<ide> }
<ide>
<ide> def render_templates(self):
<ide> def __init__(
<ide> default_args=None,
<ide> adhoc=False,
<ide> priority_weight=1,
<del> queue=conf.get('celery', 'default_queue'),
<add> queue=configuration.conf.get('celery', 'default_queue'),
<ide> pool=None,
<ide> sla=None,
<ide> execution_timeout=None,
<ide> def db_merge(self):
<ide> def run(
<ide> self, start_date=None, end_date=None, mark_success=False,
<ide> include_adhoc=False, local=False, executor=None,
<del> donot_pickle=conf.getboolean('core', 'donot_pickle'),
<add> donot_pickle=configuration.conf.getboolean('core', 'donot_pickle'),
<ide> ignore_dependencies=False,
<ide> pool=None):
<ide> from airflow.jobs import BackfillJob
<ide><path>airflow/settings.py
<ide> from sqlalchemy.orm import scoped_session, sessionmaker
<ide> from sqlalchemy import create_engine
<ide>
<del>from airflow.configuration import conf
<add>from airflow import configuration
<ide>
<ide> HEADER = """\
<ide> ____________ _____________
<ide> """
<ide>
<ide> BASE_LOG_URL = '/admin/airflow/log'
<del>AIRFLOW_HOME = os.path.expanduser(conf.get('core', 'AIRFLOW_HOME'))
<del>SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN')
<add>AIRFLOW_HOME = os.path.expanduser(configuration.conf.get('core', 'AIRFLOW_HOME'))
<add>SQL_ALCHEMY_CONN = configuration.conf.get('core', 'SQL_ALCHEMY_CONN')
<ide> LOGGING_LEVEL = logging.INFO
<del>DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
<add>DAGS_FOLDER = os.path.expanduser(configuration.conf.get('core', 'DAGS_FOLDER'))
<ide>
<ide> engine_args = {}
<ide> if 'sqlite' not in SQL_ALCHEMY_CONN:
<ide><path>airflow/www/app.py
<ide> from airflow.www.blueprints import ck, routes
<ide> from airflow import jobs
<ide> from airflow import settings
<del>from airflow.configuration import conf
<add>from airflow import configuration
<ide>
<ide>
<ide> def create_app(config=None):
<ide> app = Flask(__name__)
<del> app.secret_key = conf.get('webserver', 'SECRET_KEY')
<add> app.secret_key = configuration.conf.get('webserver', 'SECRET_KEY')
<ide> #app.config = config
<ide> login.login_manager.init_app(app)
<ide>
<ide><path>airflow/www/utils.py
<ide> import wtforms
<ide> from wtforms.compat import text_type
<ide>
<del>from airflow.configuration import conf
<add>from airflow import configuration
<ide> from airflow import login, models, settings
<del>AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
<add>AUTHENTICATE = configuration.conf.getboolean('webserver', 'AUTHENTICATE')
<ide>
<ide>
<ide> class LoginMixin(object):
<ide><path>airflow/www/views.py
<ide> from airflow import models
<ide> from airflow.settings import Session
<ide> from airflow import login
<del>from airflow.configuration import conf, AirflowConfigException
<add>from airflow import configuration
<ide> from airflow import utils
<ide> from airflow.utils import AirflowException
<ide> from airflow.www import utils as wwwutils
<ide> QUERY_LIMIT = 100000
<ide> CHART_LIMIT = 200000
<ide>
<del>dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
<add>dagbag = models.DagBag(os.path.expanduser(configuration.conf.get('core', 'DAGS_FOLDER')))
<ide>
<ide> login_required = login.login_required
<ide> current_user = login.current_user
<ide> logout_user = login.logout_user
<ide>
<del>AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
<add>AUTHENTICATE = configuration.conf.getboolean('webserver', 'AUTHENTICATE')
<ide> if AUTHENTICATE is False:
<ide> login_required = lambda x: x
<ide>
<ide> FILTER_BY_OWNER = False
<del>if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
<add>if configuration.conf.getboolean('webserver', 'FILTER_BY_OWNER'):
<ide> # filter_by_owner if authentication is enabled and filter_by_owner is true
<ide> FILTER_BY_OWNER = AUTHENTICATE
<ide>
<ide> def code(self):
<ide> return self.render(
<ide> 'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
<ide> root=request.args.get('root'),
<del> demo_mode=conf.getboolean('webserver', 'demo_mode'))
<add> demo_mode=configuration.conf.getboolean('webserver', 'demo_mode'))
<ide>
<ide> @current_app.errorhandler(404)
<ide> def circles(self):
<ide> def rendered(self):
<ide> @wwwutils.action_logging
<ide> def log(self):
<ide> BASE_LOG_FOLDER = os.path.expanduser(
<del> conf.get('core', 'BASE_LOG_FOLDER'))
<add> configuration.conf.get('core', 'BASE_LOG_FOLDER'))
<ide> dag_id = request.args.get('dag_id')
<ide> task_id = request.args.get('task_id')
<ide> execution_date = request.args.get('execution_date')
<ide> def log(self):
<ide> log = "*** Log file isn't where expected.\n".format(loc)
<ide> else:
<ide> WORKER_LOG_SERVER_PORT = \
<del> conf.get('celery', 'WORKER_LOG_SERVER_PORT')
<add> configuration.conf.get('celery', 'WORKER_LOG_SERVER_PORT')
<ide> url = os.path.join(
<ide> "http://{host}:{WORKER_LOG_SERVER_PORT}/log", log_relative
<ide> ).format(**locals())
<ide> def log(self):
<ide> **locals())
<ide>
<ide> # try to load log backup from S3
<del> s3_log_folder = conf.get('core', 'S3_LOG_FOLDER')
<add> s3_log_folder = configuration.conf.get('core', 'S3_LOG_FOLDER')
<ide> if not log_loaded and s3_log_folder.startswith('s3:'):
<ide> import boto
<ide> s3 = boto.connect_s3()
<ide> s3_log_loc = os.path.join(
<del> conf.get('core', 'S3_LOG_FOLDER'), log_relative)
<add> configuration.conf.get('core', 'S3_LOG_FOLDER'), log_relative)
<ide> log += '*** Fetching log from S3: {}\n'.format(s3_log_loc)
<ide> log += ('*** Note: S3 logs are only available once '
<ide> 'tasks have completed.\n')
<ide> def success(self):
<ide> @wwwutils.action_logging
<ide> def tree(self):
<ide> dag_id = request.args.get('dag_id')
<del> blur = conf.getboolean('webserver', 'demo_mode')
<add> blur = configuration.conf.getboolean('webserver', 'demo_mode')
<ide> dag = dagbag.get_dag(dag_id)
<ide> root = request.args.get('root')
<ide> if root:
<ide> def recurse_nodes(task, visited):
<ide> def graph(self):
<ide> session = settings.Session()
<ide> dag_id = request.args.get('dag_id')
<del> blur = conf.getboolean('webserver', 'demo_mode')
<add> blur = configuration.conf.getboolean('webserver', 'demo_mode')
<ide> arrange = request.args.get('arrange', "LR")
<ide> dag = dagbag.get_dag(dag_id)
<ide> if dag_id not in dagbag.dags:
<ide> def duration(self):
<ide> data=all_data,
<ide> chart_options={'yAxis': {'title': {'text': 'hours'}}},
<ide> height="700px",
<del> demo_mode=conf.getboolean('webserver', 'demo_mode'),
<add> demo_mode=configuration.conf.getboolean('webserver', 'demo_mode'),
<ide> root=root,
<ide> )
<ide>
<ide> def landing_times(self):
<ide> data=all_data,
<ide> height="700px",
<ide> chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
<del> demo_mode=conf.getboolean('webserver', 'demo_mode'),
<add> demo_mode=configuration.conf.getboolean('webserver', 'demo_mode'),
<ide> root=root,
<ide> )
<ide>
<ide> def gantt(self):
<ide> session = settings.Session()
<ide> dag_id = request.args.get('dag_id')
<ide> dag = dagbag.get_dag(dag_id)
<del> demo_mode = conf.getboolean('webserver', 'demo_mode')
<add> demo_mode = configuration.conf.getboolean('webserver', 'demo_mode')
<ide>
<ide> root = request.args.get('root')
<ide> if root:
<ide> def on_model_change(self, form, model, is_created):
<ide>
<ide> @classmethod
<ide> def alert_fernet_key(cls):
<del> return not conf.has_option('core', 'fernet_key')
<add> return not configuration.conf.has_option('core', 'fernet_key')
<ide>
<ide> @classmethod
<ide> def is_secure(self):
<ide> def is_secure(self):
<ide> is_secure = False
<ide> try:
<ide> import cryptography
<del> conf.get('core', 'fernet_key')
<add> configuration.conf.get('core', 'fernet_key')
<ide> is_secure = True
<ide> except:
<ide> pass
<ide> def conf(self):
<ide> raw = request.args.get('raw') == "true"
<ide> title = "Airflow Configuration"
<ide> subtitle = configuration.AIRFLOW_CONFIG
<del> if conf.getboolean("webserver", "expose_config"):
<add> if configuration.conf.getboolean("webserver", "expose_config"):
<ide> with open(configuration.AIRFLOW_CONFIG, 'r') as f:
<ide> config = f.read()
<ide> else:
<ide><path>tests/core.py
<ide> configuration.test_mode()
<ide> from airflow import jobs, models, DAG, utils, operators, hooks, macros
<ide> from airflow.bin import cli
<del>from airflow.configuration import conf
<del>from airflow.www.app import create_app
<add>from airflow.www import app as application
<ide> from airflow.settings import Session
<ide>
<ide> NUM_EXAMPLE_DAGS = 7
<ide> class CliTests(unittest.TestCase):
<ide>
<ide> def setUp(self):
<ide> configuration.test_mode()
<del> app = create_app()
<add> app = application.create_app()
<ide> app.config['TESTING'] = True
<ide> self.parser = cli.get_parser()
<ide> self.dagbag = models.DagBag(
<ide> class WebUiTests(unittest.TestCase):
<ide>
<ide> def setUp(self):
<ide> configuration.test_mode()
<del> app = create_app()
<add> app = application.create_app()
<ide> app.config['TESTING'] = True
<ide> self.app = app.test_client()
<ide>
<ide> def test_charts(self):
<ide> def tearDown(self):
<ide> pass
<ide>
<add>class WebLdapAuthTest(unittest.TestCase):
<add>
<add> def setUp(self):
<add> configuration.conf.set("webserver", "authenticate", "True")
<add> configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
<add> try:
<add> configuration.conf.add_section("ldap")
<add> except:
<add> pass
<add> configuration.conf.set("ldap", "uri", "ldap://localhost")
<add> configuration.conf.set("ldap", "user_filter", "objectClass=*")
<add> configuration.conf.set("ldap", "user_name_attr", "True")
<add> configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
<add> configuration.conf.set("ldap", "bind_password", "insecure")
<add> configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
<add> configuration.conf.set("ldap", "cacert", "")
<add>
<add> app = application.create_app()
<add> app.config['TESTING'] = True
<add> self.app = app.test_client()
<add>
<add> def login(self, username, password):
<add> return self.app.post('/admin/airflow/login', data=dict(
<add> username=username,
<add> password=password
<add> ), follow_redirects=True)
<add>
<add> def logout(self):
<add> return self.app.get('/admin/airflow/logout', follow_redirects=True)
<add>
<add> def test_login_logout_ldap(self):
<add> assert configuration.conf.getboolean('webserver', 'authenticate') is True
<add>
<add> #response = self.login('user1', 'userx')
<add> #print(response.data)
<add> #assert 'Incorrect login details' in response.data
<add>
<add> #response = self.login('userz', 'user1')
<add> #assert 'Incorrect login details' in response.data
<add>
<add> #response = self.login('user1', 'user1')
<add> #assert 'Data Profiling' in response.data
<add>
<add> #response = self.logout()
<add> #assert 'form-signin' in response.data
<add>
<add> def test_unauthorized(self):
<add> response = self.app.get("/admin/connection/")
<add> assert '403 Forbidden' in response.data
<add>
<add> def tearDown(self):
<add> pass
<add>
<add>
<ide> if 'MySqlOperator' in dir(operators):
<ide> # Only testing if the operator is installed
<ide> class MySqlTest(unittest.TestCase): | 9 |
Go | Go | move top job | 812798a7d6659178300d00c470a19c7fe5e68d1c | <ide><path>api.go
<ide> func getContainersTop(srv *Server, version float64, w http.ResponseWriter, r *ht
<ide> if err := parseForm(r); err != nil {
<ide> return err
<ide> }
<del> procsStr, err := srv.ContainerTop(vars["name"], r.Form.Get("ps_args"))
<del> if err != nil {
<del> return err
<del> }
<del> return writeJSON(w, http.StatusOK, procsStr)
<add>
<add> job := srv.Eng.Job("top", vars["name"], r.Form.Get("ps_args"))
<add> job.Stdout.Add(w)
<add> return job.Run()
<ide> }
<ide>
<ide> func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
<ide><path>server.go
<ide> func jobInitApi(job *engine.Job) engine.Status {
<ide> job.Error(err)
<ide> return engine.StatusErr
<ide> }
<add> if err := job.Eng.Register("top", srv.ContainerTop); err != nil {
<add> job.Error(err)
<add> return engine.StatusErr
<add> }
<ide> return engine.StatusOK
<ide> }
<ide>
<ide> func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
<ide> return engine.StatusOK
<ide> }
<ide>
<del>func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) {
<add>func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
<add> if len(job.Args) != 1 && len(job.Args) != 2 {
<add> job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
<add> return engine.StatusErr
<add> }
<add> var (
<add> name = job.Args[0]
<add> psArgs = "-ef"
<add> )
<add>
<add> if len(job.Args) == 2 && job.Args[1] != "" {
<add> psArgs = job.Args[1]
<add> }
<add>
<ide> if container := srv.runtime.Get(name); container != nil {
<ide> if !container.State.IsRunning() {
<del> return nil, fmt.Errorf("Container %s is not running", name)
<add> job.Errorf("Container %s is not running", name)
<add> return engine.StatusErr
<ide> }
<ide> pids, err := cgroups.GetPidsForContainer(container.ID)
<ide> if err != nil {
<del> return nil, err
<del> }
<del> if len(psArgs) == 0 {
<del> psArgs = "-ef"
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide> output, err := exec.Command("ps", psArgs).Output()
<ide> if err != nil {
<del> return nil, fmt.Errorf("Error running ps: %s", err)
<add> job.Errorf("Error running ps: %s", err)
<add> return engine.StatusErr
<ide> }
<ide>
<ide> lines := strings.Split(string(output), "\n")
<ide> header := strings.Fields(lines[0])
<del> procs := APITop{
<del> Titles: header,
<del> }
<add> out := &engine.Env{}
<add> out.SetList("Titles", header)
<ide>
<ide> pidIndex := -1
<ide> for i, name := range header {
<ide> func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) {
<ide> }
<ide> }
<ide> if pidIndex == -1 {
<del> return nil, errors.New("Couldn't find PID field in ps output")
<add> job.Errorf("Couldn't find PID field in ps output")
<add> return engine.StatusErr
<ide> }
<ide>
<add> processes := [][]string{}
<ide> for _, line := range lines[1:] {
<ide> if len(line) == 0 {
<ide> continue
<ide> }
<ide> fields := strings.Fields(line)
<ide> p, err := strconv.Atoi(fields[pidIndex])
<ide> if err != nil {
<del> return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
<add> job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
<add> return engine.StatusErr
<ide> }
<ide>
<ide> for _, pid := range pids {
<ide> if pid == p {
<ide> // Make sure number of fields equals number of header titles
<ide> // merging "overhanging" fields
<del> processes := fields[:len(procs.Titles)-1]
<del> processes = append(processes, strings.Join(fields[len(procs.Titles)-1:], " "))
<del>
<del> procs.Processes = append(procs.Processes, processes)
<add> process := fields[:len(header)-1]
<add> process = append(process, strings.Join(fields[len(header)-1:], " "))
<add> processes = append(processes, process)
<ide> }
<ide> }
<ide> }
<del> return &procs, nil
<add> out.SetJson("Processes", processes)
<add> out.WriteTo(job.Stdout)
<add> return engine.StatusOK
<ide>
<ide> }
<del> return nil, fmt.Errorf("No such container: %s", name)
<add> job.Errorf("No such container: %s", name)
<add> return engine.StatusErr
<ide> }
<ide>
<ide> func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { | 2 |
Text | Text | use serial comma in report docs | 3d0a0b6825972dd690aff1896f219c0bcfdcdb14 | <ide><path>doc/api/report.md
<ide> Node.js report completed
<ide>
<ide> When a report is written, start and end messages are issued to stderr
<ide> and the filename of the report is returned to the caller. The default filename
<del>includes the date, time, PID and a sequence number. The sequence number helps
<add>includes the date, time, PID, and a sequence number. The sequence number helps
<ide> in associating the report dump with the runtime state if generated multiple
<ide> times for the same Node.js process.
<ide>
<ide> Special meaning is attached to `stdout` and `stderr`. Usage of these
<ide> will result in report being written to the associated standard streams.
<ide> In cases where standard streams are used, the value in `directory` is ignored.
<ide> URLs are not supported. Defaults to a composite filename that contains
<del>timestamp, PID and sequence number.
<add>timestamp, PID, and sequence number.
<ide>
<ide> `directory` specifies the filesystem directory where the report will be written.
<ide> URLs are not supported. Defaults to the current working directory of the | 1 |
Java | Java | add getqueryparams to serverhttprequest | a7f32cd5280f78848017538cd824fc57122f6ce0 | <ide><path>spring-web-reactive/src/main/java/org/springframework/http/server/reactive/AbstractServerHttpRequest.java
<ide>
<ide> import java.net.URI;
<ide> import java.net.URISyntaxException;
<del>import java.util.List;
<del>import java.util.Map;
<add>import java.util.regex.Matcher;
<add>import java.util.regex.Pattern;
<ide>
<ide> import org.springframework.http.HttpCookie;
<ide> import org.springframework.http.HttpHeaders;
<del>import org.springframework.util.LinkedCaseInsensitiveMap;
<add>import org.springframework.util.CollectionUtils;
<ide> import org.springframework.util.LinkedMultiValueMap;
<ide> import org.springframework.util.MultiValueMap;
<add>import org.springframework.util.StringUtils;
<ide>
<ide> /**
<ide> * Common base class for {@link ServerHttpRequest} implementations.
<ide> */
<ide> public abstract class AbstractServerHttpRequest implements ServerHttpRequest {
<ide>
<add> private static final Pattern QUERY_PATTERN = Pattern.compile("([^&=]+)(=?)([^&]+)?");
<add>
<add>
<ide> private URI uri;
<ide>
<add> private MultiValueMap<String, String> queryParams;
<add>
<ide> private HttpHeaders headers;
<ide>
<ide> private MultiValueMap<String, HttpCookie> cookies;
<ide> public URI getURI() {
<ide> */
<ide> protected abstract URI initUri() throws URISyntaxException;
<ide>
<add> @Override
<add> public MultiValueMap<String, String> getQueryParams() {
<add> if (this.queryParams == null) {
<add> this.queryParams = CollectionUtils.unmodifiableMultiValueMap(initQueryParams());
<add> }
<add> return this.queryParams;
<add> }
<add>
<add> protected MultiValueMap<String, String> initQueryParams() {
<add> MultiValueMap<String, String> queryParams = new LinkedMultiValueMap<>();
<add> String query = getURI().getRawQuery();
<add> if (query != null) {
<add> Matcher matcher = QUERY_PATTERN.matcher(query);
<add> while (matcher.find()) {
<add> String name = matcher.group(1);
<add> String eq = matcher.group(2);
<add> String value = matcher.group(3);
<add> value = (value != null ? value : (StringUtils.hasLength(eq) ? "" : null));
<add> queryParams.add(name, value);
<add> }
<add> }
<add> return queryParams;
<add> }
<add>
<ide> @Override
<ide> public HttpHeaders getHeaders() {
<ide> if (this.headers == null) {
<ide> public HttpHeaders getHeaders() {
<ide> @Override
<ide> public MultiValueMap<String, HttpCookie> getCookies() {
<ide> if (this.cookies == null) {
<del> this.cookies = new LinkedMultiValueMap<String, HttpCookie>();
<add> this.cookies = new LinkedMultiValueMap<>();
<ide> initCookies(this.cookies);
<ide> }
<ide> return this.cookies;
<ide><path>spring-web-reactive/src/main/java/org/springframework/http/server/reactive/ServerHttpRequest.java
<ide>
<ide> package org.springframework.http.server.reactive;
<ide>
<del>import java.util.List;
<del>import java.util.Map;
<del>
<ide> import org.springframework.http.HttpCookie;
<ide> import org.springframework.http.HttpRequest;
<ide> import org.springframework.http.ReactiveHttpInputMessage;
<ide> */
<ide> public interface ServerHttpRequest extends HttpRequest, ReactiveHttpInputMessage {
<ide>
<add> /**
<add> * Return a read-only map with parsed and decoded query parameter values.
<add> */
<add> MultiValueMap<String, String> getQueryParams();
<add>
<ide> /**
<ide> * Return a read-only map of cookies sent by the client.
<ide> */
<ide><path>spring-web-reactive/src/test/java/org/springframework/http/server/reactive/MockServerHttpRequest.java
<ide> public class MockServerHttpRequest implements ServerHttpRequest {
<ide>
<ide> private URI uri;
<ide>
<add> private MultiValueMap<String, String> queryParams = new LinkedMultiValueMap<>();
<add>
<ide> private HttpHeaders headers = new HttpHeaders();
<ide>
<ide> private MultiValueMap<String, HttpCookie> cookies = new LinkedMultiValueMap<>();
<ide> public HttpHeaders getHeaders() {
<ide> return this.headers;
<ide> }
<ide>
<add> @Override
<add> public MultiValueMap<String, String> getQueryParams() {
<add> return this.queryParams;
<add> }
<add>
<ide> @Override
<ide> public MultiValueMap<String, HttpCookie> getCookies() {
<ide> return this.cookies;
<ide><path>spring-web-reactive/src/test/java/org/springframework/http/server/reactive/ServerHttpRequestTests.java
<add>/*
<add> * Copyright 2002-2016 the original author or authors.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License");
<add> * you may not use this file except in compliance with the License.
<add> * You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software
<add> * distributed under the License is distributed on an "AS IS" BASIS,
<add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add> * See the License for the specific language governing permissions and
<add> * limitations under the License.
<add> */
<add>package org.springframework.http.server.reactive;
<add>
<add>import java.util.Arrays;
<add>import java.util.Collections;
<add>import javax.servlet.http.HttpServletRequest;
<add>
<add>import org.junit.Test;
<add>import reactor.core.publisher.Flux;
<add>
<add>import org.springframework.mock.web.MockHttpServletRequest;
<add>import org.springframework.util.MultiValueMap;
<add>
<add>import static org.junit.Assert.assertEquals;
<add>
<add>/**
<add> * Unit tests for {@link AbstractServerHttpRequest}.
<add> *
<add> * @author Rossen Stoyanchev
<add> */
<add>public class ServerHttpRequestTests {
<add>
<add>
<add> @Test
<add> public void queryParamsNone() throws Exception {
<add> MultiValueMap<String, String> params = createHttpRequest("/path").getQueryParams();
<add> assertEquals(0, params.size());
<add> }
<add>
<add> @Test
<add> public void queryParams() throws Exception {
<add> MultiValueMap<String, String> params = createHttpRequest("/path?a=A&b=B").getQueryParams();
<add> assertEquals(2, params.size());
<add> assertEquals(Collections.singletonList("A"), params.get("a"));
<add> assertEquals(Collections.singletonList("B"), params.get("b"));
<add> }
<add>
<add> @Test
<add> public void queryParamsWithMulitpleValues() throws Exception {
<add> MultiValueMap<String, String> params = createHttpRequest("/path?a=1&a=2").getQueryParams();
<add> assertEquals(1, params.size());
<add> assertEquals(Arrays.asList("1", "2"), params.get("a"));
<add> }
<add>
<add> @Test
<add> public void queryParamsWithEmptyValue() throws Exception {
<add> MultiValueMap<String, String> params = createHttpRequest("/path?a=").getQueryParams();
<add> assertEquals(1, params.size());
<add> assertEquals(Collections.singletonList(""), params.get("a"));
<add> }
<add>
<add> @Test
<add> public void queryParamsWithNoValue() throws Exception {
<add> MultiValueMap<String, String> params = createHttpRequest("/path?a").getQueryParams();
<add> assertEquals(1, params.size());
<add> assertEquals(Collections.singletonList(null), params.get("a"));
<add> }
<add>
<add>
<add> private ServerHttpRequest createHttpRequest(String path) {
<add> HttpServletRequest servletRequest = new MockHttpServletRequest("GET", path);
<add> return new ServletServerHttpRequest(servletRequest, Flux.empty());
<add> }
<add>
<add>} | 4 |
Go | Go | add experimental docker stack commands | 71104bb592dc98467d3828394eabcbe50ca22ae4 | <ide><path>api/client/bundlefile/bundlefile.go
<add>// +build experimental
<add>
<add>package bundlefile
<add>
<add>import (
<add> "encoding/json"
<add> "io"
<add> "os"
<add>)
<add>
<add>// Bundlefile stores the contents of a bundlefile
<add>type Bundlefile struct {
<add> Version string
<add> Services map[string]Service
<add>}
<add>
<add>// Service is a service from a bundlefile
<add>type Service struct {
<add> Image string
<add> Command []string `json:",omitempty"`
<add> Args []string `json:",omitempty"`
<add> Env []string `json:",omitempty"`
<add> Labels map[string]string `json:",omitempty"`
<add> Ports []Port `json:",omitempty"`
<add> WorkingDir *string `json:",omitempty"`
<add> User *string `json:",omitempty"`
<add> Networks []string `json:",omitempty"`
<add>}
<add>
<add>// Port is a port as defined in a bundlefile
<add>type Port struct {
<add> Protocol string
<add> Port uint32
<add>}
<add>
<add>// LoadFile loads a bundlefile from a path to the file
<add>func LoadFile(path string) (*Bundlefile, error) {
<add> reader, err := os.Open(path)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> bundlefile := &Bundlefile{}
<add>
<add> if err := json.NewDecoder(reader).Decode(bundlefile); err != nil {
<add> return nil, err
<add> }
<add>
<add> return bundlefile, err
<add>}
<add>
<add>// Print writes the contents of the bundlefile to the output writer
<add>// as human readable json
<add>func Print(out io.Writer, bundle *Bundlefile) error {
<add> bytes, err := json.MarshalIndent(*bundle, "", " ")
<add> if err != nil {
<add> return err
<add> }
<add>
<add> _, err = out.Write(bytes)
<add> return err
<add>}
<ide><path>api/client/node/cmd.go
<ide> import (
<ide> func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command {
<ide> cmd := &cobra.Command{
<ide> Use: "node",
<del> Short: "Manage docker swarm nodes",
<add> Short: "Manage Docker Swarm nodes",
<ide> Args: cli.NoArgs,
<ide> Run: func(cmd *cobra.Command, args []string) {
<ide> fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
<ide><path>api/client/service/cmd.go
<ide> import (
<ide> func NewServiceCommand(dockerCli *client.DockerCli) *cobra.Command {
<ide> cmd := &cobra.Command{
<ide> Use: "service",
<del> Short: "Manage docker services",
<add> Short: "Manage Docker services",
<ide> Args: cli.NoArgs,
<ide> Run: func(cmd *cobra.Command, args []string) {
<ide> fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
<ide><path>api/client/stack/cmd.go
<add>// +build experimental
<add>
<add>package stack
<add>
<add>import (
<add> "fmt"
<add>
<add> "github.com/docker/docker/api/client"
<add> "github.com/docker/docker/cli"
<add> "github.com/spf13/cobra"
<add>)
<add>
<add>// NewStackCommand returns a cobra command for `stack` subcommands
<add>func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> cmd := &cobra.Command{
<add> Use: "stack",
<add> Short: "Manage Docker stacks",
<add> Args: cli.NoArgs,
<add> Run: func(cmd *cobra.Command, args []string) {
<add> fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
<add> },
<add> }
<add> cmd.AddCommand(
<add> newConfigCommand(dockerCli),
<add> newDeployCommand(dockerCli),
<add> newRemoveCommand(dockerCli),
<add> newTasksCommand(dockerCli),
<add> )
<add> return cmd
<add>}
<add>
<add>// NewTopLevelDeployCommand return a command for `docker deploy`
<add>func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> cmd := newDeployCommand(dockerCli)
<add> // Remove the aliases at the top level
<add> cmd.Aliases = []string{}
<add> return cmd
<add>}
<ide><path>api/client/stack/cmd_stub.go
<add>// +build !experimental
<add>
<add>package stack
<add>
<add>import (
<add> "github.com/docker/docker/api/client"
<add> "github.com/spf13/cobra"
<add>)
<add>
<add>// NewStackCommand returns nocommand
<add>func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> return &cobra.Command{}
<add>}
<add>
<add>// NewTopLevelDeployCommand return no command
<add>func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> return &cobra.Command{}
<add>}
<ide><path>api/client/stack/common.go
<add>// +build experimental
<add>
<add>package stack
<add>
<add>import (
<add> "golang.org/x/net/context"
<add>
<add> "github.com/docker/engine-api/client"
<add> "github.com/docker/engine-api/types"
<add> "github.com/docker/engine-api/types/filters"
<add> "github.com/docker/engine-api/types/swarm"
<add>)
<add>
<add>const (
<add> labelNamespace = "com.docker.stack.namespace"
<add>)
<add>
<add>func getStackLabels(namespace string, labels map[string]string) map[string]string {
<add> if labels == nil {
<add> labels = make(map[string]string)
<add> }
<add> labels[labelNamespace] = namespace
<add> return labels
<add>}
<add>
<add>func getStackFilter(namespace string) filters.Args {
<add> filter := filters.NewArgs()
<add> filter.Add("label", labelNamespace+"="+namespace)
<add> return filter
<add>}
<add>
<add>func getServices(
<add> ctx context.Context,
<add> apiclient client.APIClient,
<add> namespace string,
<add>) ([]swarm.Service, error) {
<add> return apiclient.ServiceList(
<add> ctx,
<add> types.ServiceListOptions{Filter: getStackFilter(namespace)})
<add>}
<add>
<add>func getNetworks(
<add> ctx context.Context,
<add> apiclient client.APIClient,
<add> namespace string,
<add>) ([]types.NetworkResource, error) {
<add> return apiclient.NetworkList(
<add> ctx,
<add> types.NetworkListOptions{Filters: getStackFilter(namespace)})
<add>}
<ide><path>api/client/stack/config.go
<add>// +build experimental
<add>
<add>package stack
<add>
<add>import (
<add> "github.com/docker/docker/api/client"
<add> "github.com/docker/docker/api/client/bundlefile"
<add> "github.com/docker/docker/cli"
<add> "github.com/spf13/cobra"
<add>)
<add>
<add>type configOptions struct {
<add> bundlefile string
<add> namespace string
<add>}
<add>
<add>func newConfigCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> var opts configOptions
<add>
<add> cmd := &cobra.Command{
<add> Use: "config [OPTIONS] STACK",
<add> Short: "Print the stack configuration",
<add> Args: cli.ExactArgs(1),
<add> RunE: func(cmd *cobra.Command, args []string) error {
<add> opts.namespace = args[0]
<add> return runConfig(dockerCli, opts)
<add> },
<add> }
<add>
<add> flags := cmd.Flags()
<add> addBundlefileFlag(&opts.bundlefile, flags)
<add> return cmd
<add>}
<add>
<add>func runConfig(dockerCli *client.DockerCli, opts configOptions) error {
<add> bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
<add> if err != nil {
<add> return err
<add> }
<add> return bundlefile.Print(dockerCli.Out(), bundle)
<add>}
<ide><path>api/client/stack/deploy.go
<add>// +build experimental
<add>
<add>package stack
<add>
<add>import (
<add> "fmt"
<add>
<add> "github.com/spf13/cobra"
<add> "golang.org/x/net/context"
<add>
<add> "github.com/docker/docker/api/client"
<add> "github.com/docker/docker/api/client/bundlefile"
<add> "github.com/docker/docker/cli"
<add> "github.com/docker/engine-api/types"
<add> "github.com/docker/engine-api/types/network"
<add> "github.com/docker/engine-api/types/swarm"
<add>)
<add>
<add>const (
<add> defaultNetworkDriver = "overlay"
<add>)
<add>
<add>type deployOptions struct {
<add> bundlefile string
<add> namespace string
<add>}
<add>
<add>func newDeployCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> var opts deployOptions
<add>
<add> cmd := &cobra.Command{
<add> Use: "deploy [OPTIONS] STACK",
<add> Aliases: []string{"up"},
<add> Short: "Create and update a stack",
<add> Args: cli.ExactArgs(1),
<add> RunE: func(cmd *cobra.Command, args []string) error {
<add> opts.namespace = args[0]
<add> return runDeploy(dockerCli, opts)
<add> },
<add> }
<add>
<add> flags := cmd.Flags()
<add> addBundlefileFlag(&opts.bundlefile, flags)
<add> return cmd
<add>}
<add>
<add>func runDeploy(dockerCli *client.DockerCli, opts deployOptions) error {
<add> bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> networks := getUniqueNetworkNames(bundle.Services)
<add> ctx := context.Background()
<add>
<add> if err := updateNetworks(ctx, dockerCli, networks, opts.namespace); err != nil {
<add> return err
<add> }
<add> return deployServices(ctx, dockerCli, bundle.Services, opts.namespace)
<add>}
<add>
<add>func getUniqueNetworkNames(services map[string]bundlefile.Service) []string {
<add> networkSet := make(map[string]bool)
<add> for _, service := range services {
<add> for _, network := range service.Networks {
<add> networkSet[network] = true
<add> }
<add> }
<add>
<add> networks := []string{}
<add> for network := range networkSet {
<add> networks = append(networks, network)
<add> }
<add> return networks
<add>}
<add>
<add>func updateNetworks(
<add> ctx context.Context,
<add> dockerCli *client.DockerCli,
<add> networks []string,
<add> namespace string,
<add>) error {
<add> client := dockerCli.Client()
<add>
<add> existingNetworks, err := getNetworks(ctx, client, namespace)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> existingNetworkMap := make(map[string]types.NetworkResource)
<add> for _, network := range existingNetworks {
<add> existingNetworkMap[network.Name] = network
<add> }
<add>
<add> createOpts := types.NetworkCreate{
<add> Labels: getStackLabels(namespace, nil),
<add> Driver: defaultNetworkDriver,
<add> // TODO: remove when engine-api uses omitempty for IPAM
<add> IPAM: network.IPAM{Driver: "default"},
<add> }
<add>
<add> for _, internalName := range networks {
<add> name := fmt.Sprintf("%s_%s", namespace, internalName)
<add>
<add> if _, exists := existingNetworkMap[name]; exists {
<add> continue
<add> }
<add> fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name)
<add> if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil {
<add> return err
<add> }
<add> }
<add> return nil
<add>}
<add>
<add>func convertNetworks(networks []string, namespace string, name string) []swarm.NetworkAttachmentConfig {
<add> nets := []swarm.NetworkAttachmentConfig{}
<add> for _, network := range networks {
<add> nets = append(nets, swarm.NetworkAttachmentConfig{
<add> Target: namespace + "_" + network,
<add> Aliases: []string{name},
<add> })
<add> }
<add> return nets
<add>}
<add>
<add>func deployServices(
<add> ctx context.Context,
<add> dockerCli *client.DockerCli,
<add> services map[string]bundlefile.Service,
<add> namespace string,
<add>) error {
<add> apiClient := dockerCli.Client()
<add> out := dockerCli.Out()
<add>
<add> existingServices, err := getServices(ctx, apiClient, namespace)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> existingServiceMap := make(map[string]swarm.Service)
<add> for _, service := range existingServices {
<add> existingServiceMap[service.Spec.Name] = service
<add> }
<add>
<add> for internalName, service := range services {
<add> name := fmt.Sprintf("%s_%s", namespace, internalName)
<add>
<add> var ports []swarm.PortConfig
<add> for _, portSpec := range service.Ports {
<add> ports = append(ports, swarm.PortConfig{
<add> Protocol: swarm.PortConfigProtocol(portSpec.Protocol),
<add> TargetPort: portSpec.Port,
<add> })
<add> }
<add>
<add> serviceSpec := swarm.ServiceSpec{
<add> Annotations: swarm.Annotations{
<add> Name: name,
<add> Labels: getStackLabels(namespace, service.Labels),
<add> },
<add> TaskTemplate: swarm.TaskSpec{
<add> ContainerSpec: swarm.ContainerSpec{
<add> Image: service.Image,
<add> Command: service.Command,
<add> Args: service.Args,
<add> Env: service.Env,
<add> },
<add> },
<add> EndpointSpec: &swarm.EndpointSpec{
<add> Ports: ports,
<add> },
<add> Networks: convertNetworks(service.Networks, namespace, internalName),
<add> }
<add>
<add> cspec := &serviceSpec.TaskTemplate.ContainerSpec
<add> if service.WorkingDir != nil {
<add> cspec.Dir = *service.WorkingDir
<add> }
<add> if service.User != nil {
<add> cspec.User = *service.User
<add> }
<add>
<add> if service, exists := existingServiceMap[name]; exists {
<add> fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID)
<add>
<add> if err := apiClient.ServiceUpdate(
<add> ctx,
<add> service.ID,
<add> service.Version,
<add> serviceSpec,
<add> ); err != nil {
<add> return err
<add> }
<add> } else {
<add> fmt.Fprintf(out, "Creating service %s\n", name)
<add>
<add> if _, err := apiClient.ServiceCreate(ctx, serviceSpec); err != nil {
<add> return err
<add> }
<add> }
<add> }
<add>
<add> return nil
<add>}
<ide><path>api/client/stack/opts.go
<add>// +build experimental
<add>
<add>package stack
<add>
<add>import (
<add> "fmt"
<add> "io"
<add> "os"
<add>
<add> "github.com/docker/docker/api/client/bundlefile"
<add> "github.com/spf13/pflag"
<add>)
<add>
<add>func addBundlefileFlag(opt *string, flags *pflag.FlagSet) {
<add> flags.StringVarP(
<add> opt,
<add> "bundle", "f", "",
<add> "Path to a bundle (Default: STACK.dsb)")
<add>}
<add>
<add>func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) {
<add> defaultPath := fmt.Sprintf("%s.dsb", namespace)
<add>
<add> if path == "" {
<add> path = defaultPath
<add> }
<add> if _, err := os.Stat(path); err != nil {
<add> return nil, fmt.Errorf(
<add> "Bundle %s not found. Specify the path with -f or --bundle",
<add> path)
<add> }
<add>
<add> fmt.Fprintf(stderr, "Loading bundle from %s\n", path)
<add> bundle, err := bundlefile.LoadFile(path)
<add> if err != nil {
<add> return nil, fmt.Errorf("Error reading %s: %v\n", path, err)
<add> }
<add> return bundle, err
<add>}
<ide><path>api/client/stack/remove.go
<add>// +build experimental
<add>
<add>package stack
<add>
<add>import (
<add> "fmt"
<add>
<add> "golang.org/x/net/context"
<add>
<add> "github.com/docker/docker/api/client"
<add> "github.com/docker/docker/cli"
<add> "github.com/spf13/cobra"
<add>)
<add>
<add>type removeOptions struct {
<add> namespace string
<add>}
<add>
<add>func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> var opts removeOptions
<add>
<add> cmd := &cobra.Command{
<add> Use: "rm STACK",
<add> Aliases: []string{"remove", "down"},
<add> Short: "Remove the stack",
<add> Args: cli.ExactArgs(1),
<add> RunE: func(cmd *cobra.Command, args []string) error {
<add> opts.namespace = args[0]
<add> return runRemove(dockerCli, opts)
<add> },
<add> }
<add> return cmd
<add>}
<add>
<add>func runRemove(dockerCli *client.DockerCli, opts removeOptions) error {
<add> namespace := opts.namespace
<add> client := dockerCli.Client()
<add> stderr := dockerCli.Err()
<add> ctx := context.Background()
<add> hasError := false
<add>
<add> services, err := getServices(ctx, client, namespace)
<add> if err != nil {
<add> return err
<add> }
<add> for _, service := range services {
<add> fmt.Fprintf(stderr, "Removing service %s\n", service.Spec.Name)
<add> if err := client.ServiceRemove(ctx, service.ID); err != nil {
<add> hasError = true
<add> fmt.Fprintf(stderr, "Failed to remove service %s: %s", service.ID, err)
<add> }
<add> }
<add>
<add> networks, err := getNetworks(ctx, client, namespace)
<add> if err != nil {
<add> return err
<add> }
<add> for _, network := range networks {
<add> fmt.Fprintf(stderr, "Removing network %s\n", network.Name)
<add> if err := client.NetworkRemove(ctx, network.ID); err != nil {
<add> hasError = true
<add> fmt.Fprintf(stderr, "Failed to remove network %s: %s", network.ID, err)
<add> }
<add> }
<add>
<add> if hasError {
<add> return fmt.Errorf("Failed to remove some resources")
<add> }
<add> return nil
<add>}
<ide><path>api/client/stack/tasks.go
<add>// +build experimental
<add>
<add>package stack
<add>
<add>import (
<add> "golang.org/x/net/context"
<add>
<add> "github.com/docker/docker/api/client"
<add> "github.com/docker/docker/api/client/idresolver"
<add> "github.com/docker/docker/api/client/task"
<add> "github.com/docker/docker/cli"
<add> "github.com/docker/docker/opts"
<add> "github.com/docker/engine-api/types"
<add> "github.com/docker/engine-api/types/swarm"
<add> "github.com/spf13/cobra"
<add>)
<add>
<add>type tasksOptions struct {
<add> all bool
<add> filter opts.FilterOpt
<add> namespace string
<add> noResolve bool
<add>}
<add>
<add>func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command {
<add> opts := tasksOptions{filter: opts.NewFilterOpt()}
<add>
<add> cmd := &cobra.Command{
<add> Use: "tasks [OPTIONS] STACK",
<add> Short: "List the tasks in the stack",
<add> Args: cli.ExactArgs(1),
<add> RunE: func(cmd *cobra.Command, args []string) error {
<add> opts.namespace = args[0]
<add> return runTasks(dockerCli, opts)
<add> },
<add> }
<add> flags := cmd.Flags()
<add> flags.BoolVarP(&opts.all, "all", "a", false, "Display all tasks")
<add> flags.BoolVarP(&opts.noResolve, "no-resolve", "n", false, "Do not map IDs to Names")
<add> flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
<add>
<add> return cmd
<add>}
<add>
<add>func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
<add> client := dockerCli.Client()
<add> ctx := context.Background()
<add>
<add> filter := opts.filter.Value()
<add> filter.Add("label", labelNamespace+"="+opts.namespace)
<add> if !opts.all && !filter.Include("desired_state") {
<add> filter.Add("desired_state", string(swarm.TaskStateRunning))
<add> filter.Add("desired_state", string(swarm.TaskStateAccepted))
<add> }
<add>
<add> tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter})
<add> if err != nil {
<add> return err
<add> }
<add>
<add> return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve))
<add>}
<ide><path>api/client/swarm/cmd.go
<ide> import (
<ide> func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command {
<ide> cmd := &cobra.Command{
<ide> Use: "swarm",
<del> Short: "Manage docker swarm",
<add> Short: "Manage Docker Swarm",
<ide> Args: cli.NoArgs,
<ide> Run: func(cmd *cobra.Command, args []string) {
<ide> fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString())
<ide><path>cli/cobraadaptor/adaptor.go
<ide> import (
<ide> "github.com/docker/docker/api/client/node"
<ide> "github.com/docker/docker/api/client/registry"
<ide> "github.com/docker/docker/api/client/service"
<add> "github.com/docker/docker/api/client/stack"
<ide> "github.com/docker/docker/api/client/swarm"
<ide> "github.com/docker/docker/api/client/system"
<ide> "github.com/docker/docker/api/client/volume"
<ide> func NewCobraAdaptor(clientFlags *cliflags.ClientFlags) CobraAdaptor {
<ide> rootCmd.AddCommand(
<ide> node.NewNodeCommand(dockerCli),
<ide> service.NewServiceCommand(dockerCli),
<add> stack.NewStackCommand(dockerCli),
<add> stack.NewTopLevelDeployCommand(dockerCli),
<ide> swarm.NewSwarmCommand(dockerCli),
<ide> container.NewAttachCommand(dockerCli),
<ide> container.NewCommitCommand(dockerCli),
<ide> func NewCobraAdaptor(clientFlags *cliflags.ClientFlags) CobraAdaptor {
<ide> func (c CobraAdaptor) Usage() []cli.Command {
<ide> cmds := []cli.Command{}
<ide> for _, cmd := range c.rootCmd.Commands() {
<del> cmds = append(cmds, cli.Command{Name: cmd.Name(), Description: cmd.Short})
<add> if cmd.Name() != "" {
<add> cmds = append(cmds, cli.Command{Name: cmd.Name(), Description: cmd.Short})
<add> }
<ide> }
<ide> return cmds
<ide> } | 13 |
Javascript | Javascript | change formatmessage parameter to getstack | 9993fdd957248beb8950cb3a6986ddcff7437ea0 | <ide><path>src/isomorphic/classic/types/checkPropTypes.js
<ide> var loggedTypeFailures = {};
<ide> * @param {object} values Runtime values that need to be type-checked
<ide> * @param {string} location e.g. "prop", "context", "child context"
<ide> * @param {string} componentName Name of the component for error messages.
<del> * @param {?Function} formatMessage Function that transforms the error message, to add additional info.
<add> * @param {?Function} getStack Returns the component stack.
<ide> * @private
<ide> */
<del>function checkPropTypes(typeSpecs, values, location, componentName, formatMessage) {
<add>function checkPropTypes(typeSpecs, values, location, componentName, getStack) {
<ide> for (var typeSpecName in typeSpecs) {
<ide> if (typeSpecs.hasOwnProperty(typeSpecName)) {
<ide> var error;
<ide> function checkPropTypes(typeSpecs, values, location, componentName, formatMessag
<ide> // same error.
<ide> loggedTypeFailures[error.message] = true;
<ide>
<del> var message = formatMessage ? formatMessage(error.message) : error.message;
<add> var stack = getStack ? getStack() : '';
<ide>
<ide> warning(
<ide> false,
<del> 'Failed %s type: %s',
<add> 'Failed %s type: %s%s',
<ide> location,
<del> message,
<add> error.message,
<add> stack,
<ide> );
<ide> }
<ide> }
<ide><path>src/shared/types/checkReactTypeSpec.js
<ide> function checkReactTypeSpec(
<ide> // only during reconciliation (begin and complete phase).
<ide> workInProgressOrDebugID
<ide> ) {
<del> function formatMessage(message) {
<add> function getStack() {
<add> let stack = '';
<ide> if (__DEV__) {
<del> let componentStackInfo = '';
<ide> if (!ReactComponentTreeHook) {
<ide> ReactComponentTreeHook = require('ReactComponentTreeHook');
<ide> }
<ide> if (workInProgressOrDebugID != null) {
<ide> if (typeof workInProgressOrDebugID === 'number') {
<ide> // DebugID from Stack.
<ide> const debugID = workInProgressOrDebugID;
<del> componentStackInfo = ReactComponentTreeHook.getStackAddendumByID(debugID);
<add> stack = ReactComponentTreeHook.getStackAddendumByID(debugID);
<ide> } else if (typeof workInProgressOrDebugID.tag === 'number') {
<ide> // This is a Fiber.
<ide> // The stack will only be correct if this is a work in progress
<ide> // version and we're calling it during reconciliation.
<ide> const workInProgress = workInProgressOrDebugID;
<del> componentStackInfo = ReactComponentTreeHook.getStackAddendumByWorkInProgressFiber(workInProgress);
<add> stack = ReactComponentTreeHook.getStackAddendumByWorkInProgressFiber(workInProgress);
<ide> }
<ide> } else if (element !== null) {
<del> componentStackInfo = ReactComponentTreeHook.getCurrentStackAddendum(element);
<add> stack = ReactComponentTreeHook.getCurrentStackAddendum(element);
<ide> }
<del> message += componentStackInfo;
<ide> }
<del> return message;
<add> return stack;
<ide> }
<ide>
<del> checkPropTypes(typeSpecs, values, location, componentName, formatMessage);
<add> checkPropTypes(typeSpecs, values, location, componentName, getStack);
<ide> }
<ide>
<ide> module.exports = checkReactTypeSpec; | 2 |
Javascript | Javascript | add additional test case for css order in dev | 906da4c9ec76c0f75b3d7301cf23b3ab6131cb5e | <ide><path>test/integration/css/test/index.test.js
<ide> describe('CSS Support', () => {
<ide> )
<ide> expect(currentColor).toMatchInlineSnapshot(`"rgb(0, 0, 255)"`)
<ide> })
<add>
<add> it('should have the correct color (css ordering) during hot reloads', async () => {
<add> let browser
<add> try {
<add> browser = await webdriver(appPort, '/')
<add>
<add> const currentColor = await browser.eval(
<add> `window.getComputedStyle(document.querySelector('#blueText')).color`
<add> )
<add> expect(currentColor).toMatchInlineSnapshot(`"rgb(0, 0, 255)"`)
<add>
<add> const cssFile = new File(join(appDir, 'pages/index.module.css'))
<add> try {
<add> cssFile.replace('color: blue;', 'color: blue; ')
<add> await waitFor(2000) // wait for HMR
<add>
<add> const refreshedColor = await browser.eval(
<add> `window.getComputedStyle(document.querySelector('#blueText')).color`
<add> )
<add> expect(refreshedColor).toMatchInlineSnapshot(`"rgb(0, 0, 255)"`)
<add> } finally {
<add> cssFile.restore()
<add> }
<add> } finally {
<add> if (browser) {
<add> await browser.close()
<add> }
<add> }
<add> })
<ide> })
<ide>
<ide> describe('Ordering with Global CSS and Modules (prod)', () => { | 1 |
Java | Java | use fixed gmt time-zone for websession clock | ba3a12e4ccadb561603869dd208629296d6a6473 | <ide><path>spring-web/src/main/java/org/springframework/web/server/session/DefaultWebSessionManager.java
<ide>
<ide> import java.time.Clock;
<ide> import java.time.Instant;
<add>import java.time.ZoneId;
<ide> import java.util.List;
<ide> import java.util.UUID;
<ide>
<ide> public class DefaultWebSessionManager implements WebSessionManager {
<ide>
<ide> private WebSessionStore sessionStore = new InMemoryWebSessionStore();
<ide>
<del> private Clock clock = Clock.systemDefaultZone();
<add> private Clock clock = Clock.system(ZoneId.of("GMT"));
<ide>
<ide>
<ide> /**
<ide> public WebSessionStore getSessionStore() {
<ide> * Configure the {@link Clock} for access to current time. During tests you
<ide> * may use {code Clock.offset(clock, Duration.ofMinutes(-31))} to set the
<ide> * clock back for example to test changes after sessions expire.
<del> * <p>By default {@link Clock#systemDefaultZone()} is used.
<add> * <p>By default {@code Clock.system(ZoneId.of("GMT"))} is used.
<ide> * @param clock the clock to use
<ide> */
<ide> public void setClock(Clock clock) { | 1 |
Text | Text | add changelog entry for 3110cae [ci skip] | f1e625f85385f820ea69b092fe1d5d241c4f4c41 | <ide><path>activejob/CHANGELOG.md
<add>* Allow passing multiple exceptions to `retry_on`, and `discard_on`.
<add>
<add> *George Claghorn*
<add>
<ide> * Pass the error instance as the second parameter of block executed by `discard_on`.
<ide>
<ide> Fixes #32853. | 1 |
Text | Text | update docs with minimum node version 14.x | df35cf32d4ec454ad8cbe62ebc7c6ad164479a1b | <ide><path>docs/getting-started.md
<ide> If you have questions about anything related to Next.js, you're always welcome t
<ide>
<ide> #### System Requirements
<ide>
<del>- [Node.js 12.22.0](https://nodejs.org/) or later
<add>- [Node.js 14.0.0](https://nodejs.org/) or newer
<ide> - MacOS, Windows (including WSL), and Linux are supported
<ide>
<ide> ## Automatic Setup
<ide><path>docs/upgrading.md
<ide> description: Learn how to upgrade Next.js.
<ide>
<ide> ## Upgrading from 12 to 13
<ide>
<add>The minimum Node.js version has been bumped from 12.22.0 to 14.0.0, since 12.x has reached end-of-life.
<add>
<ide> The `next/image` import was renamed to `next/legacy/image`. The `next/future/image` import was renamed to `next/image`.
<ide> A [codemod is available](/docs/advanced-features/codemods.md#next-image-to-legacy-image) to safely and automatically rename your imports.
<ide> | 2 |
Ruby | Ruby | push arg checking up | 960398cb261ba1a1ee331d79976be486f957cd78 | <ide><path>actionpack/lib/action_dispatch/http/url.rb
<ide> def extract_subdomain(host, tld_length = @@tld_length)
<ide> end
<ide>
<ide> def url_for(options)
<add> unless options[:host] || options[:only_path]
<add> raise ArgumentError, 'Missing host to link to! Please provide the :host parameter, set default_url_options[:host], or set :only_path to true'
<add> end
<add>
<ide> path = options[:script_name].to_s.chomp("/")
<ide> path << options[:path].to_s
<ide>
<ide> def url_for(options)
<ide> private
<ide>
<ide> def build_host_url(options)
<del> unless options[:host] || options[:only_path]
<del> raise ArgumentError, 'Missing host to link to! Please provide the :host parameter, set default_url_options[:host], or set :only_path to true'
<del> end
<del>
<ide> result = ""
<ide>
<ide> unless options[:only_path] | 1 |
Javascript | Javascript | save window options when opening a window | ab1bc2e173b762018e0b50e7f38983f2edaf9eff | <ide><path>src/main-process/atom-application.js
<ide> class AtomApplication extends EventEmitter {
<ide> window.browserWindow.removeListener('blur', blurHandler)
<ide> })
<ide> window.browserWindow.webContents.once('did-finish-load', blurHandler)
<add> this.saveCurrentWindowOptions(false)
<ide> }
<ide> }
<ide> | 1 |
Javascript | Javascript | add continuous build | 8ef676c18f0805b6868c6b44623f35bcd1811870 | <ide><path>Gruntfile.js
<ide> module.exports = function(grunt) {
<ide> return !s_ignoreRE.test(filename);
<ide> }
<ide>
<add> const s_isMdRE = /\.md$/i;
<add> function mdsOnly(filename) {
<add> return s_isMdRE.test(filename);
<add> }
<add>
<ide> function notFolder(filename) {
<ide> return !fs.statSync(filename).isDirectory();
<ide> }
<ide> module.exports = function(grunt) {
<ide> clean: [
<ide> 'out/**/*',
<ide> ],
<add> buildlesson: {
<add> main: {
<add> files: [],
<add> },
<add> },
<ide> watch: {
<ide> main: {
<ide> files: [
<ide> module.exports = function(grunt) {
<ide> spawn: false,
<ide> },
<ide> },
<add> lessons: {
<add> files: [
<add> 'threejs/lessons/**/threejs*.md',
<add> ],
<add> tasks: ['buildlesson'],
<add> options: {
<add> spawn: false,
<add> },
<add> },
<ide> },
<ide> });
<ide>
<ide> module.exports = function(grunt) {
<ide> dest: 'out/',
<ide> };
<ide> }));
<add> grunt.config('buildlesson.main.files', Object.keys(changedFiles).filter(mdsOnly).map((file) => {
<add> return {
<add> src: file,
<add> };
<add> }));
<ide> changedFiles = {};
<ide> }, 200);
<ide> grunt.event.on('watch', function(action, filepath) {
<ide> changedFiles[filepath] = action;
<ide> onChange();
<ide> });
<ide>
<add> const buildSettings = {
<add> outDir: 'out',
<add> baseUrl: 'http://threejsfundamentals.org',
<add> rootFolder: 'threejs',
<add> lessonGrep: 'threejs*.md',
<add> siteName: 'ThreeJSFundamentals',
<add> siteThumbnail: 'threejsfundamentals.jpg', // in rootFolder/lessons/resources
<add> templatePath: 'build/templates',
<add> };
<add>
<add> // just the hackiest way to get this working.
<add> grunt.registerMultiTask('buildlesson', 'build a lesson', function() {
<add> const filenames = new Set();
<add> this.files.forEach((files) => {
<add> files.src.forEach((filename) => {
<add> filenames.add(filename);
<add> });
<add> });
<add> const buildStuff = require('./build/js/build');
<add> const settings = Object.assign({}, buildSettings, {
<add> filenames,
<add> });
<add> const finish = this.async();
<add> buildStuff(settings).then(function() {
<add> finish();
<add> }).done();
<add> });
<add>
<ide> grunt.registerTask('buildlessons', function() {
<ide> const buildStuff = require('./build/js/build');
<ide> const finish = this.async();
<del> buildStuff({
<del> outDir: 'out',
<del> baseUrl: 'http://threejsfundamentals.org',
<del> rootFolder: 'threejs',
<del> lessonGrep: 'threejs*.md',
<del> siteName: 'ThreeJSFundamentals',
<del> siteThumbnail: 'threejsfundamentals.jpg', // in rootFolder/lessons/resources
<del> }).then(function() {
<del> finish();
<add> buildStuff(buildSettings).then(function() {
<add> finish();
<ide> }).done();
<ide> });
<ide>
<ide><path>build/js/build.js
<ide> if (parseInt((/^v(\d+)\./).exec(process.version)[1]) < requiredNodeVersion) {
<ide>
<ide> module.exports = function(settings) { // wrapper in case we're in module_context mode
<ide>
<add>const hackyProcessSelectFiles = settings.filenames !== undefined;
<add>
<ide> const cache = new (require('inmemfilecache'))();
<ide> const Feed = require('feed').Feed;
<ide> const fs = require('fs');
<ide> function slashify(s) {
<ide> }
<ide>
<ide> function articleFilter(f) {
<add> if (hackyProcessSelectFiles) {
<add> if (!settings.filenames.has(f)) {
<add> return false;
<add> }
<add> }
<ide> return !process.env['ARTICLE_FILTER'] || f.indexOf(process.env['ARTICLE_FILTER']) >= 0;
<ide> }
<ide>
<ide> const Builder = function(outBaseDir, options) {
<ide> });
<ide> }
<ide>
<add> if (hackyProcessSelectFiles) {
<add> return Promise.resolve();
<add> }
<add>
<ide> // generate place holders for non-translated files
<ide> const missing = g_origArticles.filter(name => articlesFilenames.indexOf(name) < 0);
<ide> missing.forEach(name => {
<ide> const Builder = function(outBaseDir, options) {
<ide> table_of_contents: '',
<ide> templateOptions: '',
<ide> });
<add>
<add> {
<add> const filename = path.join(settings.outDir, 'link-check.html');
<add> const html = `
<add> <html>
<add> <body>
<add> ${langs.map(lang => `<a href="${lang.home}">${lang.lang}</a>`).join('\n')}
<add> </body>
<add> </html>
<add> `;
<add> writeFileIfChanged(filename, html);
<add> }
<ide> };
<ide>
<ide>
<ide> langs = langs.concat(readdirs(`${settings.rootFolder}/lessons`)
<ide>
<ide> b.preProcess(langs);
<ide>
<del>{
<del> const filename = path.join(settings.outDir, 'link-check.html');
<del> const html = `
<del> <html>
<del> <body>
<del> ${langs.map(lang => `<a href="${lang.home}">${lang.lang}</a>`).join('\n')}
<del> </body>
<del> </html>
<del> `;
<del> writeFileIfChanged(filename, html);
<add>if (hackyProcessSelectFiles) {
<add> const langsInFilenames = new Set();
<add> [...settings.filenames].forEach((filename) => {
<add> const m = /lessons\/(\w{2}|\w{5})\//.exec(filename);
<add> const lang = m ? m[1] : 'en';
<add> langsInFilenames.add(lang);
<add> });
<add> langs = langs.filter(lang => langsInFilenames.has(lang.lang));
<ide> }
<ide>
<ide> const tasks = langs.map(function(lang) {
<ide> const tasks = langs.map(function(lang) {
<ide> return tasks.reduce(function(cur, next) {
<ide> return cur.then(next);
<ide> }, Promise.resolve()).then(function() {
<del> b.writeGlobalFiles();
<del> cache.clear();
<add> if (!hackyProcessSelectFiles) {
<add> b.writeGlobalFiles(langs);
<add> }
<ide> return numErrors ? Promise.reject(new Error(`${numErrors} errors`)) : Promise.resolve();
<add>}).finally(() => {
<add> cache.clear();
<ide> });
<ide>
<ide> }; | 2 |
PHP | PHP | add webp to image validation rule | 0ecc2589023f8d0b51353c2cdd04f683342e1a61 | <ide><path>src/Illuminate/Validation/Concerns/ValidatesAttributes.php
<ide> public function validateLte($attribute, $value, $parameters)
<ide> */
<ide> public function validateImage($attribute, $value)
<ide> {
<del> return $this->validateMimes($attribute, $value, ['jpeg', 'png', 'gif', 'bmp', 'svg']);
<add> return $this->validateMimes($attribute, $value, ['jpeg', 'png', 'gif', 'bmp', 'svg', 'webp']);
<ide> }
<ide>
<ide> /**
<ide><path>tests/Validation/ValidationValidatorTest.php
<ide> public function testValidateImage()
<ide> $file6->expects($this->any())->method('getClientOriginalExtension')->will($this->returnValue('svg'));
<ide> $v = new Validator($trans, ['x' => $file6], ['x' => 'Image']);
<ide> $this->assertTrue($v->passes());
<add>
<add> $file7 = $this->getMockBuilder(UploadedFile::class)->setMethods(['guessExtension', 'getClientOriginalExtension'])->setConstructorArgs($uploadedFile)->getMock();
<add> $file7->expects($this->any())->method('guessExtension')->will($this->returnValue('webp'));
<add> $file7->expects($this->any())->method('getClientOriginalExtension')->will($this->returnValue('webp'));
<add> $v = new Validator($trans, ['x' => $file7], ['x' => 'Image']);
<add> $this->assertTrue($v->passes());
<ide> }
<ide>
<ide> public function testValidateImageDoesNotAllowPhpExtensionsOnImageMime() | 2 |
Javascript | Javascript | update styles on build indicator container | ca85cc9973e4e90419bc5062e0d1803deca064f3 | <ide><path>packages/next/client/dev-build-watcher.js
<ide> export default function initializeBuildWatcher () {
<ide> let shadowRoot
<ide> let prefix = ''
<ide>
<add> // Make sure container is fixed and on a high zIndex so it shows
<add> shadowHost.style.position = 'fixed'
<add> shadowHost.style.bottom = '10px'
<add> shadowHost.style.right = '10px'
<add> shadowHost.style.zIndex = 99999
<add>
<ide> if (shadowHost.attachShadow) {
<ide> shadowRoot = shadowHost.attachShadow({ mode: 'open' })
<ide> } else { | 1 |
Javascript | Javascript | remove `flushdiscreteupdates` from end of event | 78120032d4ca7ee8611d630d7c67e7808885dfe9 | <ide><path>packages/react-dom/src/__tests__/ReactDOMNestedEvents-test.js
<add>/**
<add> * Copyright (c) Facebook, Inc. and its affiliates.
<add> *
<add> * This source code is licensed under the MIT license found in the
<add> * LICENSE file in the root directory of this source tree.
<add> *
<add> * @emails react-core
<add> */
<add>
<add>'use strict';
<add>
<add>describe('ReactDOMNestedEvents', () => {
<add> let React;
<add> let ReactDOM;
<add> let Scheduler;
<add> let TestUtils;
<add> let act;
<add> let useState;
<add>
<add> beforeEach(() => {
<add> jest.resetModules();
<add> React = require('react');
<add> ReactDOM = require('react-dom');
<add> Scheduler = require('scheduler');
<add> TestUtils = require('react-dom/test-utils');
<add> act = TestUtils.unstable_concurrentAct;
<add> useState = React.useState;
<add> });
<add>
<add> // @gate experimental
<add> test('nested event dispatches should not cause updates to flush', async () => {
<add> const buttonRef = React.createRef(null);
<add> function App() {
<add> const [isClicked, setIsClicked] = useState(false);
<add> const [isFocused, setIsFocused] = useState(false);
<add> const onClick = () => {
<add> setIsClicked(true);
<add> const el = buttonRef.current;
<add> el.focus();
<add> // The update triggered by the focus event should not have flushed yet.
<add> // Nor the click update. They would have if we had wrapped the focus
<add> // call in `flushSync`, though.
<add> Scheduler.unstable_yieldValue(
<add> 'Value right after focus call: ' + el.innerHTML,
<add> );
<add> };
<add> const onFocus = () => {
<add> setIsFocused(true);
<add> };
<add> return (
<add> <>
<add> <button ref={buttonRef} onFocus={onFocus} onClick={onClick}>
<add> {`Clicked: ${isClicked}, Focused: ${isFocused}`}
<add> </button>
<add> </>
<add> );
<add> }
<add>
<add> const container = document.createElement('div');
<add> document.body.appendChild(container);
<add> const root = ReactDOM.unstable_createRoot(container);
<add>
<add> await act(async () => {
<add> root.render(<App />);
<add> });
<add> expect(buttonRef.current.innerHTML).toEqual(
<add> 'Clicked: false, Focused: false',
<add> );
<add>
<add> await act(async () => {
<add> buttonRef.current.click();
<add> });
<add> expect(Scheduler).toHaveYielded([
<add> 'Value right after focus call: Clicked: false, Focused: false',
<add> ]);
<add> expect(buttonRef.current.innerHTML).toEqual('Clicked: true, Focused: true');
<add> });
<add>});
<ide><path>packages/react-dom/src/events/ReactDOMEventListener.js
<ide> import {
<ide> getSuspenseInstanceFromFiber,
<ide> } from 'react-reconciler/src/ReactFiberTreeReflection';
<ide> import {HostRoot, SuspenseComponent} from 'react-reconciler/src/ReactWorkTags';
<del>import {
<del> type EventSystemFlags,
<del> IS_CAPTURE_PHASE,
<del> IS_LEGACY_FB_SUPPORT_MODE,
<del>} from './EventSystemFlags';
<add>import {type EventSystemFlags, IS_CAPTURE_PHASE} from './EventSystemFlags';
<ide>
<ide> import getEventTarget from './getEventTarget';
<ide> import {getClosestInstanceFromNode} from '../client/ReactDOMComponentTree';
<ide>
<del>import {enableLegacyFBSupport} from 'shared/ReactFeatureFlags';
<ide> import {dispatchEventForPluginEventSystem} from './DOMPluginEventSystem';
<del>import {
<del> flushDiscreteUpdatesIfNeeded,
<del> discreteUpdates,
<del>} from './ReactDOMUpdateBatching';
<add>import {discreteUpdates} from './ReactDOMUpdateBatching';
<ide>
<ide> import {
<ide> getCurrentPriorityLevel as getCurrentSchedulerPriorityLevel,
<ide> function dispatchDiscreteEvent(
<ide> container,
<ide> nativeEvent,
<ide> ) {
<del> if (
<del> !enableLegacyFBSupport ||
<del> // If we are in Legacy FB support mode, it means we've already
<del> // flushed for this event and we don't need to do it again.
<del> (eventSystemFlags & IS_LEGACY_FB_SUPPORT_MODE) === 0
<del> ) {
<del> flushDiscreteUpdatesIfNeeded(nativeEvent.timeStamp);
<del> }
<ide> discreteUpdates(
<ide> dispatchEvent,
<ide> domEventName,
<ide><path>packages/react-dom/src/events/ReactDOMUpdateBatching.js
<ide> export function discreteUpdates(fn, a, b, c, d) {
<ide> }
<ide> }
<ide>
<del>// TODO: Replace with flushSync
<del>export function flushDiscreteUpdatesIfNeeded(timeStamp: number) {
<del> if (!isInsideEventHandler) {
<del> flushDiscreteUpdatesImpl();
<del> }
<del>}
<del>
<ide> export function setBatchingImplementation(
<ide> _batchedUpdatesImpl,
<ide> _discreteUpdatesImpl,
<ide><path>packages/react-dom/src/events/__tests__/DOMPluginEventSystem-test.internal.js
<ide> describe('DOMPluginEventSystem', () => {
<ide> document.body.removeChild(parentContainer);
<ide> });
<ide>
<del> it('handle click events on dynamic portals', () => {
<add> it('handle click events on dynamic portals', async () => {
<ide> const log = [];
<ide>
<ide> function Parent() {
<ide> describe('DOMPluginEventSystem', () => {
<ide> ref.current,
<ide> ),
<ide> );
<del> });
<add> }, []);
<ide>
<ide> return (
<ide> <div ref={ref} onClick={() => log.push('parent')} id="parent">
<ide> describe('DOMPluginEventSystem', () => {
<ide> );
<ide> }
<ide>
<del> ReactDOM.render(<Parent />, container);
<add> await act(async () => {
<add> ReactDOM.render(<Parent />, container);
<add> });
<ide>
<ide> const parent = container.lastChild;
<ide> expect(parent.id).toEqual('parent');
<del> dispatchClickEvent(parent);
<add>
<add> await act(async () => {
<add> dispatchClickEvent(parent);
<add> });
<ide>
<ide> expect(log).toEqual(['parent']);
<ide>
<ide> const child = parent.lastChild;
<ide> expect(child.id).toEqual('child');
<del> dispatchClickEvent(child);
<add>
<add> await act(async () => {
<add> dispatchClickEvent(child);
<add> });
<ide>
<ide> // we add both 'child' and 'parent' due to bubbling
<ide> expect(log).toEqual(['parent', 'child', 'parent']);
<ide> });
<ide>
<ide> // Slight alteration to the last test, to catch
<ide> // a subtle difference in traversal.
<del> it('handle click events on dynamic portals #2', () => {
<add> it('handle click events on dynamic portals #2', async () => {
<ide> const log = [];
<ide>
<ide> function Parent() {
<ide> describe('DOMPluginEventSystem', () => {
<ide> ref.current,
<ide> ),
<ide> );
<del> });
<add> }, []);
<ide>
<ide> return (
<ide> <div ref={ref} onClick={() => log.push('parent')} id="parent">
<ide> describe('DOMPluginEventSystem', () => {
<ide> );
<ide> }
<ide>
<del> ReactDOM.render(<Parent />, container);
<add> await act(async () => {
<add> ReactDOM.render(<Parent />, container);
<add> });
<ide>
<ide> const parent = container.lastChild;
<ide> expect(parent.id).toEqual('parent');
<del> dispatchClickEvent(parent);
<add>
<add> await act(async () => {
<add> dispatchClickEvent(parent);
<add> });
<ide>
<ide> expect(log).toEqual(['parent']);
<ide>
<ide> const child = parent.lastChild;
<ide> expect(child.id).toEqual('child');
<del> dispatchClickEvent(child);
<add>
<add> await act(async () => {
<add> dispatchClickEvent(child);
<add> });
<ide>
<ide> // we add both 'child' and 'parent' due to bubbling
<ide> expect(log).toEqual(['parent', 'child', 'parent']);
<ide><path>packages/react-native-renderer/src/ReactFabric.js
<ide> import {
<ide> batchedEventUpdates,
<ide> batchedUpdates as batchedUpdatesImpl,
<ide> discreteUpdates,
<del> flushDiscreteUpdates,
<ide> createContainer,
<ide> updateContainer,
<ide> injectIntoDevTools,
<ide> function createPortal(
<ide> setBatchingImplementation(
<ide> batchedUpdatesImpl,
<ide> discreteUpdates,
<del> flushDiscreteUpdates,
<ide> batchedEventUpdates,
<ide> );
<ide>
<ide><path>packages/react-native-renderer/src/ReactNativeRenderer.js
<ide> import {
<ide> batchedUpdates as batchedUpdatesImpl,
<ide> batchedEventUpdates,
<ide> discreteUpdates,
<del> flushDiscreteUpdates,
<ide> createContainer,
<ide> updateContainer,
<ide> injectIntoDevTools,
<ide> function createPortal(
<ide> setBatchingImplementation(
<ide> batchedUpdatesImpl,
<ide> discreteUpdates,
<del> flushDiscreteUpdates,
<ide> batchedEventUpdates,
<ide> );
<ide>
<ide><path>packages/react-native-renderer/src/legacy-events/ReactGenericBatching.js
<ide> let batchedUpdatesImpl = function(fn, bookkeeping) {
<ide> let discreteUpdatesImpl = function(fn, a, b, c, d) {
<ide> return fn(a, b, c, d);
<ide> };
<del>let flushDiscreteUpdatesImpl = function() {};
<ide> let batchedEventUpdatesImpl = batchedUpdatesImpl;
<ide>
<ide> let isInsideEventHandler = false;
<ide> export function discreteUpdates(fn, a, b, c, d) {
<ide> return discreteUpdatesImpl(fn, a, b, c, d);
<ide> } finally {
<ide> isInsideEventHandler = prevIsInsideEventHandler;
<del> if (!isInsideEventHandler) {
<del> }
<del> }
<del>}
<del>
<del>export function flushDiscreteUpdatesIfNeeded() {
<del> if (!isInsideEventHandler) {
<del> flushDiscreteUpdatesImpl();
<ide> }
<ide> }
<ide>
<ide> export function setBatchingImplementation(
<ide> _batchedUpdatesImpl,
<ide> _discreteUpdatesImpl,
<del> _flushDiscreteUpdatesImpl,
<ide> _batchedEventUpdatesImpl,
<ide> ) {
<ide> batchedUpdatesImpl = _batchedUpdatesImpl;
<ide> discreteUpdatesImpl = _discreteUpdatesImpl;
<del> flushDiscreteUpdatesImpl = _flushDiscreteUpdatesImpl;
<ide> batchedEventUpdatesImpl = _batchedEventUpdatesImpl;
<ide> } | 7 |
Javascript | Javascript | combine register and cache methods | b5f7c9e2d526b17b9962976bb704dce8779d7362 | <ide><path>src/data/Data.js
<ide> Data.uid = 1;
<ide>
<ide> Data.prototype = {
<ide>
<del> register: function( owner ) {
<del> var value = {};
<del>
<del> // If it is a node unlikely to be stringify-ed or looped over
<del> // use plain assignment
<del> if ( owner.nodeType ) {
<del> owner[ this.expando ] = value;
<del>
<del> // Otherwise secure it in a non-enumerable property
<del> // configurable must be true to allow the property to be
<del> // deleted when data is removed
<del> } else {
<del> Object.defineProperty( owner, this.expando, {
<del> value: value,
<del> configurable: true
<del> } );
<del> }
<del> return owner[ this.expando ];
<del> },
<ide> cache: function( owner ) {
<ide>
<del> // We can accept data for non-element nodes in modern browsers,
<del> // but we should not, see #8335.
<del> // Always return an empty object.
<del> if ( !acceptData( owner ) ) {
<del> return {};
<del> }
<del>
<ide> // Check if the owner object already has a cache
<del> var cache = owner[ this.expando ];
<del>
<del> // If so, return it
<del> if ( cache ) {
<del> return cache;
<add> var value = owner[ this.expando ];
<add>
<add> // If not, create one
<add> if ( !value ) {
<add> value = {};
<add>
<add> // We can accept data for non-element nodes in modern browsers,
<add> // but we should not, see #8335.
<add> // Always return an empty object.
<add> if ( acceptData( owner ) ) {
<add>
<add> // If it is a node unlikely to be stringify-ed or looped over
<add> // use plain assignment
<add> if ( owner.nodeType ) {
<add> owner[ this.expando ] = value;
<add>
<add> // Otherwise secure it in a non-enumerable property
<add> // configurable must be true to allow the property to be
<add> // deleted when data is removed
<add> } else {
<add> Object.defineProperty( owner, this.expando, {
<add> value: value,
<add> configurable: true
<add> } );
<add> }
<add> }
<ide> }
<ide>
<del> // If not, register one
<del> return this.register( owner );
<add> return value;
<ide> },
<ide> set: function( owner, data, value ) {
<ide> var prop, | 1 |
Text | Text | remove extra 10 | 48a00493502b41fa1b5a12cfc6f7a6918650fb03 | <ide><path>docs/guides/setup.md
<ide> You can download the Video.js source and host it on your own servers, or use the
<ide>
<ide> ### CDN Version ###
<ide> ```html
<del><link href="//vjs.zencdn.net/4.10.10.0/video-js.css" rel="stylesheet">
<del><script src="//vjs.zencdn.net/4.10.10.0/video.js"></script>
<add><link href="//vjs.zencdn.net/4.10/video-js.css" rel="stylesheet">
<add><script src="//vjs.zencdn.net/4.10/video.js"></script>
<ide> ```
<ide>
<ide> ### Self Hosted. ### | 1 |
Java | Java | report errors from onerror to plugin when done | 7791076a21d8b3d4462b320188f8263da59f2c0e | <ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableAny.java
<ide> */
<ide> package io.reactivex.internal.operators.flowable;
<ide>
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide> import org.reactivestreams.*;
<ide>
<ide> import io.reactivex.exceptions.Exceptions;
<ide> public void onNext(T t) {
<ide>
<ide> @Override
<ide> public void onError(Throwable t) {
<del> if (!done) {
<del> done = true;
<del> actual.onError(t);
<add> if (done) {
<add> RxJavaPlugins.onError(t);
<add> return;
<ide> }
<add>
<add> done = true;
<add> actual.onError(t);
<ide> }
<ide>
<ide> @Override
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableAnySingle.java
<ide> public void onNext(T t) {
<ide>
<ide> @Override
<ide> public void onError(Throwable t) {
<del> if (!done) {
<del> done = true;
<del> s = SubscriptionHelper.CANCELLED;
<del> actual.onError(t);
<add> if (done) {
<add> RxJavaPlugins.onError(t);
<add> return;
<ide> }
<add>
<add> done = true;
<add> s = SubscriptionHelper.CANCELLED;
<add> actual.onError(t);
<ide> }
<ide>
<ide> @Override
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableFlatMap.java
<ide>
<ide> package io.reactivex.internal.operators.flowable;
<ide>
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide> import java.util.concurrent.Callable;
<ide> import java.util.concurrent.atomic.*;
<ide>
<ide> void tryEmit(U value, InnerSubscriber<T, U> inner) {
<ide> public void onError(Throwable t) {
<ide> // safeguard against misbehaving sources
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> getErrorQueue().offer(t);
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableOnBackpressureDrop.java
<ide>
<ide> package io.reactivex.internal.operators.flowable;
<ide>
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide> import java.util.concurrent.atomic.AtomicLong;
<ide>
<ide> import org.reactivestreams.*;
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableSingle.java
<ide>
<ide> package io.reactivex.internal.operators.flowable;
<ide>
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide> import org.reactivestreams.*;
<ide>
<ide> import io.reactivex.internal.subscriptions.*;
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableSingleMaybe.java
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableSingleSingle.java
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableTakeWhile.java
<ide>
<ide> package io.reactivex.internal.operators.flowable;
<ide>
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide> import org.reactivestreams.*;
<ide>
<ide> import io.reactivex.exceptions.Exceptions;
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableAny.java
<ide> import io.reactivex.exceptions.Exceptions;
<ide> import io.reactivex.functions.Predicate;
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableAny<T> extends AbstractObservableWithUpstream<T, Boolean> {
<ide> final Predicate<? super T> predicate;
<ide> public void onNext(T t) {
<ide>
<ide> @Override
<ide> public void onError(Throwable t) {
<del> if (!done) {
<del> done = true;
<del> actual.onError(t);
<add> if (done) {
<add> RxJavaPlugins.onError(t);
<add> return;
<ide> }
<add>
<add> done = true;
<add> actual.onError(t);
<ide> }
<ide>
<ide> @Override
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableAnySingle.java
<ide> public void onNext(T t) {
<ide>
<ide> @Override
<ide> public void onError(Throwable t) {
<del> if (!done) {
<del> done = true;
<del> actual.onError(t);
<add> if (done) {
<add> RxJavaPlugins.onError(t);
<add> return;
<ide> }
<add>
<add> done = true;
<add> actual.onError(t);
<ide> }
<ide>
<ide> @Override
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableElementAt.java
<ide> import io.reactivex.*;
<ide> import io.reactivex.disposables.Disposable;
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableElementAt<T> extends AbstractObservableWithUpstream<T, T> {
<ide> final long index;
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableElementAtMaybe.java
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableElementAtSingle.java
<ide> import io.reactivex.*;
<ide> import io.reactivex.disposables.Disposable;
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableElementAtSingle<T> extends Single<T> {
<ide> final ObservableSource<T> source;
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableFlatMap.java
<ide>
<ide> package io.reactivex.internal.operators.observable;
<ide>
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide> import java.util.*;
<ide> import java.util.concurrent.Callable;
<ide> import java.util.concurrent.atomic.*;
<ide> void tryEmit(U value, InnerObserver<T, U> inner) {
<ide>
<ide> @Override
<ide> public void onError(Throwable t) {
<del> // safeguard against misbehaving sources
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> getErrorQueue().offer(t);
<ide> public void onError(Throwable t) {
<ide>
<ide> @Override
<ide> public void onComplete() {
<del> // safeguard against misbehaving sources
<ide> if (done) {
<ide> return;
<ide> }
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableSingle.java
<ide> import io.reactivex.*;
<ide> import io.reactivex.disposables.Disposable;
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableSingle<T> extends AbstractObservableWithUpstream<T, T> {
<ide>
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableSingleMaybe.java
<ide> import io.reactivex.*;
<ide> import io.reactivex.disposables.Disposable;
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableSingleMaybe<T> extends Maybe<T> {
<ide>
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableSingleSingle.java
<ide> import io.reactivex.*;
<ide> import io.reactivex.disposables.Disposable;
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableSingleSingle<T> extends Single<T> {
<ide>
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableTake.java
<ide> import io.reactivex.*;
<ide> import io.reactivex.disposables.Disposable;
<ide> import io.reactivex.internal.disposables.*;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableTake<T> extends AbstractObservableWithUpstream<T, T> {
<ide> final long limit;
<ide> public void onNext(T t) {
<ide> }
<ide> @Override
<ide> public void onError(Throwable t) {
<del> if (!done) {
<del> done = true;
<del> subscription.dispose();
<del> actual.onError(t);
<add> if (done) {
<add> RxJavaPlugins.onError(t);
<add> return;
<ide> }
<add>
<add> done = true;
<add> subscription.dispose();
<add> actual.onError(t);
<ide> }
<ide> @Override
<ide> public void onComplete() {
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableTakeWhile.java
<ide> import io.reactivex.exceptions.Exceptions;
<ide> import io.reactivex.functions.Predicate;
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public final class ObservableTakeWhile<T> extends AbstractObservableWithUpstream<T, T> {
<ide> final Predicate<? super T> predicate;
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/observers/SafeObserver.java
<ide> void onNextNoSubscription() {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/main/java/io/reactivex/subjects/UnicastSubject.java
<ide>
<ide> package io.reactivex.subjects;
<ide>
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide> import java.util.concurrent.atomic.*;
<ide>
<ide> import io.reactivex.Observer;
<ide> public void onNext(T t) {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done || disposed) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> if (t == null) {
<ide><path>src/main/java/io/reactivex/subscribers/SafeSubscriber.java
<ide> void onNextNoSubscription() {
<ide> @Override
<ide> public void onError(Throwable t) {
<ide> if (done) {
<add> RxJavaPlugins.onError(t);
<ide> return;
<ide> }
<ide> done = true;
<ide><path>src/test/java/io/reactivex/internal/operators/observable/ObservableSingleTest.java
<ide>
<ide> package io.reactivex.internal.operators.observable;
<ide>
<add>import static org.junit.Assert.assertSame;
<ide> import static org.junit.Assert.assertEquals;
<ide> import static org.mockito.Matchers.isA;
<ide> import static org.mockito.Mockito.*;
<ide>
<ide> import org.junit.Test;
<ide> import org.mockito.InOrder;
<ide>
<add>import java.util.concurrent.atomic.AtomicReference;
<add>
<ide> import io.reactivex.*;
<ide> import io.reactivex.functions.*;
<add>import io.reactivex.plugins.RxJavaPlugins;
<ide>
<ide> public class ObservableSingleTest {
<ide>
<ide> public Integer apply(Integer i1, Integer i2) {
<ide> Integer r = reduced.blockingGet();
<ide> assertEquals(21, r.intValue());
<ide> }
<del>}
<ide>\ No newline at end of file
<add>
<add> @Test
<add> public void singleElementOperatorDoNotSwallowExceptionWhenDone() {
<add> final Throwable exception = new RuntimeException("some error");
<add> final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
<add>
<add> try {
<add> RxJavaPlugins.setErrorHandler(new Consumer<Throwable>() {
<add> @Override public void accept(final Throwable throwable) throws Exception {
<add> error.set(throwable);
<add> }
<add> });
<add>
<add> Observable.unsafeCreate(new ObservableSource<Integer>() {
<add> @Override public void subscribe(final Observer<? super Integer> observer) {
<add> observer.onComplete();
<add> observer.onError(exception);
<add> }
<add> }).singleElement().test().assertComplete();
<add>
<add> assertSame(exception, error.get());
<add> } finally {
<add> RxJavaPlugins.reset();
<add> }
<add> }
<add>} | 23 |
Javascript | Javascript | clear the meta cache without using observers | d6b061a3fd303e665ab506e836691421ea1a3ead | <ide><path>packages/ember-views/lib/views/view.js
<ide> var get = Ember.get, set = Ember.set;
<ide> var guidFor = Ember.guidFor;
<ide> var a_forEach = Ember.EnumerableUtils.forEach;
<ide> var a_addObject = Ember.EnumerableUtils.addObject;
<add>var meta = Ember.meta;
<ide>
<ide> var childViewsProperty = Ember.computed(function() {
<ide> var childViews = this._childViews, ret = Ember.A(), view = this;
<ide> Ember.View = Ember.CoreView.extend(
<ide> return viewCollection;
<ide> },
<ide>
<del> _elementWillChange: Ember.beforeObserver(function() {
<del> this.forEachChildView(function(view) {
<del> Ember.propertyWillChange(view, 'element');
<del> });
<del> }, 'element'),
<del>
<ide> /**
<ide> @private
<ide>
<ide> Ember.View = Ember.CoreView.extend(
<ide> */
<ide> _elementDidChange: Ember.observer(function() {
<ide> this.forEachChildView(function(view) {
<del> Ember.propertyDidChange(view, 'element');
<add> delete meta(view).cache.element;
<ide> });
<ide> }, 'element'),
<ide>
<ide> Ember.View = Ember.CoreView.extend(
<ide>
<ide> if (priorState && priorState.exit) { priorState.exit(this); }
<ide> if (currentState.enter) { currentState.enter(this); }
<add> if (state === 'inDOM') { delete Ember.meta(this).cache.element; }
<ide>
<ide> if (children !== false) {
<ide> this.forEachChildView(function(view) { | 1 |
PHP | PHP | move shared code down to driver | f13f32fa4a3b341d804e394e556a61e7476b67b2 | <ide><path>lib/Cake/Database/Driver.php
<ide> public function supportsQuoting() {
<ide> return true;
<ide> }
<ide>
<add>/**
<add> * Escapes values for use in schema definitions.
<add> *
<add> * @param mixed $value The value to escape.
<add> * @return string String for use in schema definitions.
<add> */
<add> public function schemaValue($value) {
<add> if (is_null($value)) {
<add> return 'NULL';
<add> }
<add> if ($value === false) {
<add> return 'FALSE';
<add> }
<add> if ($value === true) {
<add> return 'TRUE';
<add> }
<add> if (is_float($value)) {
<add> return str_replace(',', '.', strval($value));
<add> }
<add> if ((is_int($value) || $value === '0') || (
<add> is_numeric($value) && strpos($value, ',') === false &&
<add> $value[0] != '0' && strpos($value, 'e') === false)
<add> ) {
<add> return $value;
<add> }
<add> return $this->_connection->quote($value, \PDO::PARAM_STR);
<add> }
<add>
<ide> /**
<ide> * Returns last id generated for a table or sequence in database
<ide> *
<ide><path>lib/Cake/Database/Schema/MysqlSchema.php
<ide> public function columnSql(Table $table, $name) {
<ide> unset($data['default']);
<ide> }
<ide> if (isset($data['default']) && $data['type'] !== 'timestamp') {
<del> $out .= ' DEFAULT ' . $this->_value($data['default']);
<add> $out .= ' DEFAULT ' . $this->_driver->schemaValue($data['default']);
<ide> }
<ide> if (
<ide> isset($data['default']) &&
<ide> public function columnSql(Table $table, $name) {
<ide> $out .= ' DEFAULT CURRENT_TIMESTAMP';
<ide> }
<ide> if (isset($data['comment'])) {
<del> $out .= ' COMMENT ' . $this->_value($data['comment']);
<add> $out .= ' COMMENT ' . $this->_driver->schemaValue($data['comment']);
<ide> }
<ide> return $out;
<ide> }
<ide>
<del>/**
<del> * Escapes values for use in schema definitions.
<del> *
<del> * @param mixed $value The value to escape.
<del> * @return string String for use in schema definitions.
<del> */
<del> protected function _value($value) {
<del> if (is_null($value)) {
<del> return 'NULL';
<del> }
<del> if ($value === false) {
<del> return 'FALSE';
<del> }
<del> if ($value === true) {
<del> return 'TRUE';
<del> }
<del> if (is_float($value)) {
<del> return str_replace(',', '.', strval($value));
<del> }
<del> if ((is_int($value) || $value === '0') || (
<del> is_numeric($value) && strpos($value, ',') === false &&
<del> $value[0] != '0' && strpos($value, 'e') === false)
<del> ) {
<del> return $value;
<del> }
<del> return $this->_driver->quote($value, \PDO::PARAM_STR);
<del> }
<del>
<ide> /**
<ide> * Generate the SQL fragment for a single index in MySQL
<ide> *
<ide><path>lib/Cake/Database/Schema/SqliteSchema.php
<ide> public function columnSql(Table $table, $name) {
<ide> unset($data['default']);
<ide> }
<ide> if (isset($data['default'])) {
<del> $out .= ' DEFAULT ' . $this->_value($data['default']);
<add> $out .= ' DEFAULT ' . $this->_driver->schemaValue($data['default']);
<ide> }
<ide> return $out;
<ide> }
<ide>
<del>/**
<del> * Escapes values for use in schema definitions.
<del> *
<del> * @param mixed $value The value to escape.
<del> * @return string String for use in schema definitions.
<del> */
<del> protected function _value($value) {
<del> if (is_null($value)) {
<del> return 'NULL';
<del> }
<del> if ($value === false) {
<del> return 'FALSE';
<del> }
<del> if ($value === true) {
<del> return 'TRUE';
<del> }
<del> if (is_float($value)) {
<del> return str_replace(',', '.', strval($value));
<del> }
<del> if ((is_int($value) || $value === '0') || (
<del> is_numeric($value) && strpos($value, ',') === false &&
<del> $value[0] != '0' && strpos($value, 'e') === false)
<del> ) {
<del> return $value;
<del> }
<del> return $this->_driver->quote($value, \PDO::PARAM_STR);
<del> }
<del>
<ide> /**
<ide> * Generate the SQL fragment for a single index in MySQL
<ide> * | 3 |
Python | Python | add tests for the new `generic` constructors | 4d931c5a158f8c98eef87d2d35b4e91aa93d9a69 | <ide><path>numpy/tests/typing/pass/scalars.py
<add>import datetime as dt
<ide> import numpy as np
<ide>
<ide>
<ide> # Construction
<add>class D:
<add> def __index__(self) -> int:
<add> return 0
<add>
<add>
<ide> class C:
<del> def __complex__(self):
<add> def __complex__(self) -> complex:
<ide> return 3j
<ide>
<ide>
<ide> class B:
<del> def __int__(self):
<add> def __int__(self) -> int:
<ide> return 4
<ide>
<ide>
<ide> class A:
<del> def __float__(self):
<add> def __float__(self) -> float:
<ide> return 4.0
<ide>
<ide>
<ide> np.complex64(3j)
<add>np.complex64(A())
<ide> np.complex64(C())
<ide> np.complex128(3j)
<ide> np.complex128(C())
<ide> np.complex128(None)
<add>np.complex64(D())
<add>np.complex64("1.2")
<add>np.complex128(b"2j")
<ide>
<ide> np.int8(4)
<ide> np.int16(3.4)
<ide> np.int32(4)
<ide> np.int64(-1)
<ide> np.uint8(B())
<ide> np.uint32()
<add>np.uint64(D())
<add>np.int32("1")
<add>np.int64(b"2")
<ide>
<ide> np.float16(A())
<ide> np.float32(16)
<ide> np.float64(3.0)
<ide> np.float64(None)
<add>np.float32("1")
<add>np.float16(b"2.5")
<add>np.float32(D())
<ide>
<ide> np.bytes_(b"hello")
<ide> np.bytes_("hello", 'utf-8')
<ide> def __float__(self):
<ide> # Time structures
<ide> np.datetime64()
<ide> np.datetime64(0, "D")
<add>np.datetime64(0, b"D")
<ide> np.datetime64("2019")
<add>np.datetime64(b"2019")
<ide> np.datetime64("2019", "D")
<add>np.datetime64(np.datetime64())
<add>np.datetime64(dt.datetime(2000, 5, 3))
<ide> np.datetime64(None)
<ide> np.datetime64(None, "D")
<ide>
<ide> np.timedelta64()
<ide> np.timedelta64(0)
<ide> np.timedelta64(0, "D")
<add>np.timedelta64(0, b"D")
<add>np.timedelta64("3")
<add>np.timedelta64(b"5")
<add>np.timedelta64(np.timedelta64(2))
<add>np.timedelta64(dt.timedelta(2))
<ide> np.timedelta64(None)
<ide> np.timedelta64(None, "D")
<ide> | 1 |
Javascript | Javascript | test privateencrypt/publicdecrypt + padding | 2fed83dee884c3bddafa67bb53abf507db1a8ba3 | <ide><path>test/parallel/test-crypto-rsa-dsa.js
<ide> const decryptError = {
<ide> }, encryptedBuffer);
<ide> assert.strictEqual(decryptedBufferWithPassword.toString(), input);
<ide>
<add> // Now with explicit RSA_PKCS1_PADDING.
<add> encryptedBuffer = crypto.privateEncrypt({
<add> padding: crypto.constants.RSA_PKCS1_PADDING,
<add> key: rsaKeyPemEncrypted,
<add> passphrase: Buffer.from('password')
<add> }, bufferToEncrypt);
<add>
<add> decryptedBufferWithPassword = crypto.publicDecrypt({
<add> padding: crypto.constants.RSA_PKCS1_PADDING,
<add> key: rsaKeyPemEncrypted,
<add> passphrase: Buffer.from('password')
<add> }, encryptedBuffer);
<add> assert.strictEqual(decryptedBufferWithPassword.toString(), input);
<add>
<add> // Omitting padding should be okay because RSA_PKCS1_PADDING is the default.
<add> decryptedBufferWithPassword = crypto.publicDecrypt({
<add> key: rsaKeyPemEncrypted,
<add> passphrase: Buffer.from('password')
<add> }, encryptedBuffer);
<add> assert.strictEqual(decryptedBufferWithPassword.toString(), input);
<add>
<add> // Now with RSA_NO_PADDING. Plaintext needs to match key size.
<add> const plaintext = 'x'.repeat(128);
<add> encryptedBuffer = crypto.privateEncrypt({
<add> padding: crypto.constants.RSA_NO_PADDING,
<add> key: rsaKeyPemEncrypted,
<add> passphrase: Buffer.from('password')
<add> }, Buffer.from(plaintext));
<add>
<add> decryptedBufferWithPassword = crypto.publicDecrypt({
<add> padding: crypto.constants.RSA_NO_PADDING,
<add> key: rsaKeyPemEncrypted,
<add> passphrase: Buffer.from('password')
<add> }, encryptedBuffer);
<add> assert.strictEqual(decryptedBufferWithPassword.toString(), plaintext);
<add>
<ide> encryptedBuffer = crypto.publicEncrypt(certPem, bufferToEncrypt);
<ide>
<ide> decryptedBuffer = crypto.privateDecrypt(keyPem, encryptedBuffer); | 1 |
Ruby | Ruby | add therubyracer gem commented in default gemfile | 94bcdd60eaecd81e243731ac62af56b68f91b9fe | <ide><path>railties/lib/rails/generators/app_base.rb
<ide> def assets_gemfile_entry
<ide> group :assets do
<ide> gem 'sass-rails', :git => 'https://github.com/rails/sass-rails.git'
<ide> gem 'coffee-rails', :git => 'https://github.com/rails/coffee-rails.git'
<del> #{"gem 'therubyrhino'\n" if defined?(JRUBY_VERSION)}
<add>
<add> # See https://github.com/sstephenson/execjs#readme for more supported runtimes
<add> #{javascript_runtime_gemfile_entry}
<ide> gem 'uglifier', '>= 1.0.3'
<ide> end
<ide> GEMFILE
<ide> def assets_gemfile_entry
<ide> group :assets do
<ide> gem 'sass-rails', '~> 4.0.0.beta'
<ide> gem 'coffee-rails', '~> 4.0.0.beta'
<del> #{"gem 'therubyrhino'\n" if defined?(JRUBY_VERSION)}
<add>
<add> # See https://github.com/sstephenson/execjs#readme for more supported runtimes
<add> #{javascript_runtime_gemfile_entry}
<ide> gem 'uglifier', '>= 1.0.3'
<ide> end
<ide> GEMFILE
<ide> def javascript_gemfile_entry
<ide> "gem '#{options[:javascript]}-rails'" unless options[:skip_javascript]
<ide> end
<ide>
<add> def javascript_runtime_gemfile_entry
<add> if defined?(JRUBY_VERSION)
<add> "gem 'therubyrhino'\n"
<add> else
<add> "# gem 'therubyracer'\n"
<add> end
<add> end
<add>
<ide> def bundle_command(command)
<ide> say_status :run, "bundle #{command}"
<ide>
<ide><path>railties/test/generators/app_generator_test.rb
<ide> def test_generator_if_skip_sprockets_is_given
<ide> assert_file "test/performance/browsing_test.rb"
<ide> end
<ide>
<del> def test_inclusion_of_therubyrhino_under_jruby
<add> def test_inclusion_of_javascript_runtime
<ide> run_generator([destination_root])
<ide> if defined?(JRUBY_VERSION)
<ide> assert_file "Gemfile", /gem\s+["']therubyrhino["']$/
<ide> else
<del> assert_file "Gemfile" do |content|
<del> assert_no_match(/gem\s+["']therubyrhino["']$/, content)
<del> end
<add> assert_file "Gemfile", /# gem\s+["']therubyracer["']$/
<ide> end
<ide> end
<ide> | 2 |
PHP | PHP | fix record removal from session storage | fa2be1e46ce1af220dd45e40d9ef745540414135 | <ide><path>src/Auth/Storage/SessionStorage.php
<ide> public function set(array $user)
<ide> */
<ide> public function remove()
<ide> {
<del> unset($this->_user);
<add> $this->_user = null;
<ide>
<ide> $this->_session->delete($this->_config['key']);
<ide> $this->_session->renew();
<ide><path>tests/TestCase/Controller/Component/AuthComponentTest.php
<ide> public function testAuthorizeFalse()
<ide> $event = new Event('Controller.startup', $this->Controller);
<ide> $Users = TableRegistry::get('Users');
<ide> $user = $Users->find('all')->hydrate(false)->first();
<del> $this->Auth->session->write('Auth.User', $user);
<add> $this->Controller->Auth->storage()->set($user);
<ide> $this->Controller->Auth->config('userModel', 'Users');
<ide> $this->Controller->Auth->config('authorize', false);
<ide> $this->Controller->request->addParams(Router::parse('auth_test/add'));
<ide> $result = $this->Controller->Auth->startup($event);
<ide> $this->assertNull($result);
<ide>
<del> $this->Auth->session->delete('Auth');
<add> $this->Controller->Auth->storage()->remove();
<ide> $result = $this->Controller->Auth->startup($event);
<ide> $this->assertTrue($event->isStopped());
<ide> $this->assertInstanceOf('Cake\Network\Response', $result); | 2 |
Javascript | Javascript | represent cid chars using integers, not strings | adf58ed6870302fd87a8a0fa25f24c0e4e0716e9 | <ide><path>src/core/cmap.js
<ide> var CMap = (function CMapClosure() {
<ide> // where nBytePairs are ranges e.g. [low1, high1, low2, high2, ...]
<ide> this.codespaceRanges = [[], [], [], []];
<ide> this.numCodespaceRanges = 0;
<add> // Map entries have one of two forms.
<add> // - cid chars are 16-bit unsigned integers, stored as integers.
<add> // - bf chars are variable-length byte sequences, stored as strings, with
<add> // one byte per character.
<ide> this._map = [];
<ide> this.vertical = false;
<ide> this.useCMap = null;
<ide> var CMap = (function CMapClosure() {
<ide> this.numCodespaceRanges++;
<ide> },
<ide>
<del> mapRange: function(low, high, dstLow) {
<add> mapCidRange: function(low, high, dstLow) {
<add> while (low <= high) {
<add> this._map[low++] = dstLow++;
<add> }
<add> },
<add>
<add> mapBfRange: function(low, high, dstLow) {
<ide> var lastByte = dstLow.length - 1;
<ide> while (low <= high) {
<del> this._map[low] = dstLow;
<add> this._map[low++] = dstLow;
<ide> // Only the last byte has to be incremented.
<ide> dstLow = dstLow.substr(0, lastByte) +
<ide> String.fromCharCode(dstLow.charCodeAt(lastByte) + 1);
<del> ++low;
<ide> }
<ide> },
<ide>
<del> mapRangeToArray: function(low, high, array) {
<add> mapBfRangeToArray: function(low, high, array) {
<ide> var i = 0, ii = array.length;
<ide> while (low <= high && i < ii) {
<ide> this._map[low] = array[i++];
<ide> ++low;
<ide> }
<ide> },
<ide>
<add> // This is used for both bf and cid chars.
<ide> mapOne: function(src, dst) {
<ide> this._map[src] = dst;
<ide> },
<ide> var IdentityCMap = (function IdentityCMapClosure() {
<ide> CMap.call(this);
<ide> this.vertical = vertical;
<ide> this.addCodespaceRange(n, 0, 0xffff);
<del> this.mapRange(0, 0xffff, '\u0000');
<add> this.mapCidRange(0, 0xffff, 0);
<ide> }
<ide> Util.inherit(IdentityCMap, CMap, {});
<ide>
<ide> var BinaryCMapReader = (function BinaryCMapReaderClosure() {
<ide> case 2: // cidchar
<ide> stream.readHex(char, dataSize);
<ide> code = stream.readNumber();
<del> cMap.mapOne(hexToInt(char, dataSize), String.fromCharCode(code));
<add> cMap.mapOne(hexToInt(char, dataSize), code);
<ide> for (i = 1; i < subitemsCount; i++) {
<ide> incHex(char, dataSize);
<ide> if (!sequence) {
<ide> stream.readHexNumber(tmp, dataSize);
<ide> addHex(char, tmp, dataSize);
<ide> }
<ide> code = stream.readSigned() + (code + 1);
<del> cMap.mapOne(hexToInt(char, dataSize), String.fromCharCode(code));
<add> cMap.mapOne(hexToInt(char, dataSize), code);
<ide> }
<ide> break;
<ide> case 3: // cidrange
<ide> stream.readHex(start, dataSize);
<ide> stream.readHexNumber(end, dataSize);
<ide> addHex(end, start, dataSize);
<ide> code = stream.readNumber();
<del> cMap.mapRange(hexToInt(start, dataSize), hexToInt(end, dataSize),
<del> String.fromCharCode(code));
<add> cMap.mapCidRange(hexToInt(start, dataSize), hexToInt(end, dataSize),
<add> code);
<ide> for (i = 1; i < subitemsCount; i++) {
<ide> incHex(end, dataSize);
<ide> if (!sequence) {
<ide> var BinaryCMapReader = (function BinaryCMapReaderClosure() {
<ide> stream.readHexNumber(end, dataSize);
<ide> addHex(end, start, dataSize);
<ide> code = stream.readNumber();
<del> cMap.mapRange(hexToInt(start, dataSize), hexToInt(end, dataSize),
<del> String.fromCharCode(code));
<add> cMap.mapCidRange(hexToInt(start, dataSize), hexToInt(end, dataSize),
<add> code);
<ide> }
<ide> break;
<ide> case 4: // bfchar
<ide> var BinaryCMapReader = (function BinaryCMapReaderClosure() {
<ide> stream.readHexNumber(end, ucs2DataSize);
<ide> addHex(end, start, ucs2DataSize);
<ide> stream.readHex(charCode, dataSize);
<del> cMap.mapRange(hexToInt(start, ucs2DataSize),
<del> hexToInt(end, ucs2DataSize),
<del> hexToStr(charCode, dataSize));
<add> cMap.mapBfRange(hexToInt(start, ucs2DataSize),
<add> hexToInt(end, ucs2DataSize),
<add> hexToStr(charCode, dataSize));
<ide> for (i = 1; i < subitemsCount; i++) {
<ide> incHex(end, ucs2DataSize);
<ide> if (!sequence) {
<ide> var BinaryCMapReader = (function BinaryCMapReaderClosure() {
<ide> stream.readHexNumber(end, ucs2DataSize);
<ide> addHex(end, start, ucs2DataSize);
<ide> stream.readHex(charCode, dataSize);
<del> cMap.mapRange(hexToInt(start, ucs2DataSize),
<del> hexToInt(end, ucs2DataSize),
<del> hexToStr(charCode, dataSize));
<add> cMap.mapBfRange(hexToInt(start, ucs2DataSize),
<add> hexToInt(end, ucs2DataSize),
<add> hexToStr(charCode, dataSize));
<ide> }
<ide> break;
<ide> default:
<ide> var CMapFactory = (function CMapFactoryClosure() {
<ide> obj = lexer.getObj();
<ide> if (isInt(obj) || isString(obj)) {
<ide> var dstLow = isInt(obj) ? String.fromCharCode(obj) : obj;
<del> cMap.mapRange(low, high, dstLow);
<add> cMap.mapBfRange(low, high, dstLow);
<ide> } else if (isCmd(obj, '[')) {
<ide> obj = lexer.getObj();
<ide> var array = [];
<ide> while (!isCmd(obj, ']') && !isEOF(obj)) {
<ide> array.push(obj);
<ide> obj = lexer.getObj();
<ide> }
<del> cMap.mapRangeToArray(low, high, array);
<add> cMap.mapBfRangeToArray(low, high, array);
<ide> } else {
<ide> break;
<ide> }
<ide> var CMapFactory = (function CMapFactoryClosure() {
<ide> var src = strToInt(obj);
<ide> obj = lexer.getObj();
<ide> expectInt(obj);
<del> var dst = String.fromCharCode(obj);
<add> var dst = obj;
<ide> cMap.mapOne(src, dst);
<ide> }
<ide> }
<ide> var CMapFactory = (function CMapFactoryClosure() {
<ide> var high = strToInt(obj);
<ide> obj = lexer.getObj();
<ide> expectInt(obj);
<del> var dstLow = String.fromCharCode(obj);
<del> cMap.mapRange(low, high, dstLow);
<add> var dstLow = obj;
<add> cMap.mapCidRange(low, high, dstLow);
<ide> }
<ide> }
<ide>
<ide><path>src/core/fonts.js
<ide> var Font = (function FontClosure() {
<ide> var cidToGidMap = properties.cidToGidMap || [];
<ide> var cidToGidMapLength = cidToGidMap.length;
<ide> properties.cMap.forEach(function(charCode, cid) {
<del> assert(cid.length === 1, 'Max size of CID is 65,535');
<del> cid = cid.charCodeAt(0);
<add> assert(cid <= 0xffff, 'Max size of CID is 65,535');
<ide> var glyphId = -1;
<ide> if (cidToGidMapLength === 0) {
<ide> glyphId = charCode;
<ide> var Font = (function FontClosure() {
<ide> var cMap = properties.cMap;
<ide> toUnicode = [];
<ide> cMap.forEach(function(charcode, cid) {
<del> assert(cid.length === 1, 'Max size of CID is 65,535');
<add> assert(cid <= 0xffff, 'Max size of CID is 65,535');
<ide> // e) Map the CID obtained in step (a) according to the CMap obtained
<ide> // in step (d), producing a Unicode value.
<del> var ucs2 = ucs2CMap.lookup(cid.charCodeAt(0));
<add> var ucs2 = ucs2CMap.lookup(cid);
<ide> if (ucs2) {
<ide> toUnicode[charcode] =
<ide> String.fromCharCode((ucs2.charCodeAt(0) << 8) +
<ide> var Font = (function FontClosure() {
<ide> var charcode = 0;
<ide> if (this.composite) {
<ide> if (this.cMap.contains(glyphUnicode)) {
<del> charcode = this.cMap.lookup(glyphUnicode).charCodeAt(0);
<add> charcode = this.cMap.lookup(glyphUnicode);
<ide> }
<ide> }
<ide> // ... via toUnicode map
<ide> var Font = (function FontClosure() {
<ide>
<ide> var widthCode = charcode;
<ide> if (this.cMap && this.cMap.contains(charcode)) {
<del> widthCode = this.cMap.lookup(charcode).charCodeAt(0);
<add> widthCode = this.cMap.lookup(charcode);
<ide> }
<ide> width = this.widths[widthCode];
<ide> width = isNum(width) ? width : this.defaultWidth;
<ide> var CFFFont = (function CFFFontClosure() {
<ide> // If the font is actually a CID font then we should use the charset
<ide> // to map CIDs to GIDs.
<ide> for (glyphId = 0; glyphId < charsets.length; glyphId++) {
<del> var cidString = String.fromCharCode(charsets[glyphId]);
<del> var charCode = properties.cMap.charCodeOf(cidString);
<add> var cid = charsets[glyphId];
<add> var charCode = properties.cMap.charCodeOf(cid);
<ide> charCodeToGlyphId[charCode] = glyphId;
<ide> }
<ide> } else {
<ide><path>test/unit/cmap_spec.js
<ide> describe('cmap', function() {
<ide> 'endcidchar\n';
<ide> var stream = new StringStream(str);
<ide> var cmap = CMapFactory.create(stream);
<del> expect(cmap.lookup(0x14)).toEqual(String.fromCharCode(0x00));
<add> expect(cmap.lookup(0x14)).toEqual(0x00);
<ide> expect(cmap.lookup(0x15)).toBeUndefined();
<ide> });
<ide> it('parses begincidrange', function() {
<ide> describe('cmap', function() {
<ide> var stream = new StringStream(str);
<ide> var cmap = CMapFactory.create(stream);
<ide> expect(cmap.lookup(0x15)).toBeUndefined();
<del> expect(cmap.lookup(0x16)).toEqual(String.fromCharCode(0x00));
<del> expect(cmap.lookup(0x1B)).toEqual(String.fromCharCode(0x05));
<add> expect(cmap.lookup(0x16)).toEqual(0x00);
<add> expect(cmap.lookup(0x1B)).toEqual(0x05);
<ide> expect(cmap.lookup(0x1C)).toBeUndefined();
<ide> });
<ide> it('decodes codespace ranges', function() { | 3 |
Ruby | Ruby | synchronize keg#link and #unlink counts | 3822267d2c9981192b909a1fb74d997f536cb579 | <ide><path>Library/Homebrew/extend/pathname.rb
<ide> def rmdir
<ide> $d+=1
<ide> end
<ide> def mkpath
<add> return if exist?
<ide> super
<ide> puts "mkpath #{to_s}" if ARGV.verbose?
<ide> $d+=1 | 1 |
Ruby | Ruby | remove the todo note for arel#lock | aefa975fdde01b1beaacbe065fe4b2bad69295d3 | <ide><path>activerecord/lib/active_record/base.rb
<ide> def default_select(qualified)
<ide> end
<ide>
<ide> def construct_finder_arel(options = {}, scope = scope(:find))
<del> # TODO add lock to Arel
<ide> validate_find_options(options)
<ide>
<ide> relation = arel_table. | 1 |
Javascript | Javascript | add blacklist to usernames | c2f408fb56937b85e653114e4502dbe0cdcf99c2 | <ide><path>common/models/user.js
<del>var Rx = require('rx');
<ide> var debug = require('debug')('freecc:user:remote');
<del>
<del>function destroyById(id, Model) {
<del> return Rx.Observable.create(function(observer) {
<del> Model.destroyById(id, function(err) {
<del> if (err) { return observer.onError(err); }
<del> observer.onCompleted();
<del> });
<del> return Rx.Disposable(Rx.helpers.noop);
<del> });
<del>}
<add>var blacklistedUsernames =
<add> require('../../server/utils/constants').blacklistedUsernames;
<ide>
<ide> module.exports = function(User) {
<ide> // NOTE(berks): user email validation currently not needed but build in. This
<ide> // work around should let us sneak by
<ide> // see:
<ide> // https://github.com/strongloop/loopback/issues/1137#issuecomment-109200135
<ide> delete User.validations.email;
<del> var app = User.app;
<del> var UserIdentity = app.models.UserIdentity;
<del> var UserCredential = app.models.UserCredential;
<ide> debug('setting up user hooks');
<ide> // send verification email to new camper
<ide> User.afterRemote('create', function(ctx, user, next) {
<ide> module.exports = function(User) {
<ide> });
<ide> }
<ide> debug('checking existence');
<add>
<add> // check to see if username is on blacklist
<add> if (username && blacklistedUsernames.indexOf(username) !== -1) {
<add> return cb(null, true);
<add> }
<add>
<ide> var where = {};
<ide> if (username) {
<ide> where.username = username.toLowerCase();
<ide> module.exports = function(User) {
<ide> }
<ide> }
<ide> );
<del>
<del> User.observe('after delete', function(ctx, next) {
<del> debug('removing user', ctx.where);
<del> var id = ctx.where && ctx.where.id ? ctx.where.id : null;
<del> if (!id) {
<del> return next();
<del> }
<del> Rx.Observable.combineLatest(
<del> destroyById(id, UserIdentity),
<del> destroyById(id, UserCredential),
<del> Rx.helpers.noop
<del> ).subscribe(
<del> Rx.helpers.noop,
<del> function(err) {
<del> debug('error deleting user %s stuff', id, err);
<del> next(err);
<del> },
<del> function() {
<del> debug('user stuff deleted for user %s', id);
<del> next();
<del> }
<del> );
<del> });
<ide> };
<ide><path>server/boot/a-extendUser.js
<add>var Rx = require('rx');
<add>var debug = require('debug')('freecc:user:remote');
<add>
<add>function destroyById(id, Model) {
<add> return Rx.Observable.create(function(observer) {
<add> Model.destroyById(id, function(err) {
<add> if (err) { return observer.onError(err); }
<add> observer.onCompleted();
<add> });
<add> return Rx.Disposable(Rx.helpers.noop);
<add> });
<add>}
<add>
<add>module.exports = function(app) {
<add> var User = app.models.User;
<add> var UserIdentity = app.models.UserIdentity;
<add> var UserCredential = app.models.UserCredential;
<add> User.observe('after delete', function(ctx, next) {
<add> debug('removing user', ctx.where);
<add> var id = ctx.where && ctx.where.id ? ctx.where.id : null;
<add> if (!id) {
<add> return next();
<add> }
<add> Rx.Observable.combineLatest(
<add> destroyById(id, UserIdentity),
<add> destroyById(id, UserCredential),
<add> Rx.helpers.noop
<add> ).subscribe(
<add> Rx.helpers.noop,
<add> function(err) {
<add> debug('error deleting user %s stuff', id, err);
<add> next(err);
<add> },
<add> function() {
<add> debug('user stuff deleted for user %s', id);
<add> next();
<add> }
<add> );
<add> });
<add>};
<ide><path>server/utils/constants.js
<add>exports.blacklistedUsernames = [
<add> 'bonfire',
<add> 'account',
<add> 'user',
<add> 'challenge',
<add> 'challenges',
<add> 'completed-challenge',
<add> 'completed-zipline-or-basejump',
<add> 'completed-bonfire',
<add> 'map',
<add> 'learn-to-code',
<add> 'about',
<add> 'api',
<add> 'explorer',
<add> 'field-guide',
<add> 'completed-field-guide',
<add> 'jobs',
<add> 'nonprofits',
<add> 'api',
<add> 'sitemap.xml',
<add> 'get-help',
<add> 'chat',
<add> 'twitch',
<add> 'get-pai',
<add> 'get-help',
<add> 'nonprofits',
<add> 'nonproifts-form',
<add> 'jobs-form',
<add> 'unsubscribe',
<add> 'unsubscribed',
<add> 'cats.json',
<add> 'agile',
<add> 'privacy',
<add> 'stories',
<add> 'signin',
<add> 'signout',
<add> 'forgot',
<add> 'reset'
<add>]; | 3 |
Javascript | Javascript | fix typo in comment | 6ea749170b6d59b8dfb8bbbb8d05d986099e7830 | <ide><path>packages/react-reconciler/src/ReactFiberCompleteWork.new.js
<ide> function completeWork(
<ide> // Don't count time spent in a timed out Suspense subtree as part of the base duration.
<ide> const primaryChildFragment = workInProgress.child;
<ide> if (primaryChildFragment !== null) {
<del> // $FlowFixMe Flow doens't support type casting in combiation with the -= operator
<add> // $FlowFixMe Flow doesn't support type casting in combination with the -= operator
<ide> workInProgress.treeBaseDuration -= ((primaryChildFragment.treeBaseDuration: any): number);
<ide> }
<ide> }
<ide> function completeWork(
<ide> // Don't count time spent in a timed out Suspense subtree as part of the base duration.
<ide> const primaryChildFragment = workInProgress.child;
<ide> if (primaryChildFragment !== null) {
<del> // $FlowFixMe Flow doens't support type casting in combiation with the -= operator
<add> // $FlowFixMe Flow doesn't support type casting in combination with the -= operator
<ide> workInProgress.treeBaseDuration -= ((primaryChildFragment.treeBaseDuration: any): number);
<ide> }
<ide> }
<ide> function completeWork(
<ide> // Don't count time spent in a timed out Suspense subtree as part of the base duration.
<ide> const primaryChildFragment = workInProgress.child;
<ide> if (primaryChildFragment !== null) {
<del> // $FlowFixMe Flow doens't support type casting in combiation with the -= operator
<add> // $FlowFixMe Flow doesn't support type casting in combination with the -= operator
<ide> workInProgress.treeBaseDuration -= ((primaryChildFragment.treeBaseDuration: any): number);
<ide> }
<ide> }
<ide><path>packages/react-reconciler/src/ReactFiberCompleteWork.old.js
<ide> function completeWork(
<ide> // Don't count time spent in a timed out Suspense subtree as part of the base duration.
<ide> const primaryChildFragment = workInProgress.child;
<ide> if (primaryChildFragment !== null) {
<del> // $FlowFixMe Flow doens't support type casting in combiation with the -= operator
<add> // $FlowFixMe Flow doesn't support type casting in combination with the -= operator
<ide> workInProgress.treeBaseDuration -= ((primaryChildFragment.treeBaseDuration: any): number);
<ide> }
<ide> }
<ide> function completeWork(
<ide> // Don't count time spent in a timed out Suspense subtree as part of the base duration.
<ide> const primaryChildFragment = workInProgress.child;
<ide> if (primaryChildFragment !== null) {
<del> // $FlowFixMe Flow doens't support type casting in combiation with the -= operator
<add> // $FlowFixMe Flow doesn't support type casting in combination with the -= operator
<ide> workInProgress.treeBaseDuration -= ((primaryChildFragment.treeBaseDuration: any): number);
<ide> }
<ide> }
<ide> function completeWork(
<ide> // Don't count time spent in a timed out Suspense subtree as part of the base duration.
<ide> const primaryChildFragment = workInProgress.child;
<ide> if (primaryChildFragment !== null) {
<del> // $FlowFixMe Flow doens't support type casting in combiation with the -= operator
<add> // $FlowFixMe Flow doesn't support type casting in combination with the -= operator
<ide> workInProgress.treeBaseDuration -= ((primaryChildFragment.treeBaseDuration: any): number);
<ide> }
<ide> } | 2 |
Text | Text | convert inline code tags to markdown | c89c163a0e7df7b29ba33608742eaba09a058090 | <ide><path>guides/source/action_controller_overview.md
<ide> To send a hash you include the key name inside the brackets:
<ide> </form>
<ide> ```
<ide>
<del>When this form is submitted, the value of +params[:client]+ will be <tt>{"name" => "Acme", "phone" => "12345", "address" => {"postcode" => "12345", "city" => "Carrot City"}}</tt>. Note the nested hash in +params[:client][:address]+.
<add>When this form is submitted, the value of +params[:client]+ will be `{"name" => "Acme", "phone" => "12345", "address" => {"postcode" => "12345", "city" => "Carrot City"}}`. Note the nested hash in +params[:client][:address]+.
<ide>
<ide> Note that the +params+ hash is actually an instance of +HashWithIndifferentAccess+ from Active Support, which acts like a hash that lets you use symbols and strings interchangeably as keys.
<ide>
<ide> So for example, if you are sending this JSON parameter:
<ide> { "company": { "name": "acme", "address": "123 Carrot Street" } }
<ide> ```
<ide>
<del>You'll get <tt>params[:company]</tt> as <tt>{ :name => "acme", "address" => "123 Carrot Street" }</tt>.
<add>You'll get `params[:company]` as `{ :name => "acme", "address" => "123 Carrot Street" }`.
<ide>
<ide> Also, if you've turned on +config.wrap_parameters+ in your initializer or calling +wrap_parameters+ in your controller, you can safely omit the root element in the JSON/XML parameter. The parameters will be cloned and wrapped in the key according to your controller's name by default. So the above parameter can be written as:
<ide>
<ide> class UsersController < ApplicationController
<ide> end
<ide> ```
<ide>
<del>Notice that in the above case code is <tt>render :xml => @users</tt> and not <tt>render :xml => @users.to_xml</tt>. That is because if the input is not string then rails automatically invokes +to_xml+ .
<add>Notice that in the above case code is `render :xml => @users` and not `render :xml => @users.to_xml`. That is because if the input is not string then rails automatically invokes +to_xml+ .
<ide>
<ide>
<ide> Filters
<ide> end
<ide>
<ide> This will read and stream the file 4kB at the time, avoiding loading the entire file into memory at once. You can turn off streaming with the +:stream+ option or adjust the block size with the +:buffer_size+ option.
<ide>
<del>If +:type+ is not specified, it will be guessed from the file extension specified in +:filename+. If the content type is not registered for the extension, <tt>application/octet-stream</tt> will be used.
<add>If +:type+ is not specified, it will be guessed from the file extension specified in +:filename+. If the content type is not registered for the extension, `application/octet-stream` will be used.
<ide>
<ide> WARNING: Be careful when using data coming from the client (params, cookies, etc.) to locate the file on disk, as this is a security risk that might allow someone to gain access to files they are not meant to see.
<ide>
<ide> GET /clients/1.pdf
<ide> Parameter Filtering
<ide> -------------------
<ide>
<del>Rails keeps a log file for each environment in the +log+ folder. These are extremely useful when debugging what's actually going on in your application, but in a live application you may not want every bit of information to be stored in the log file. You can filter certain request parameters from your log files by appending them to <tt>config.filter_parameters</tt> in the application configuration. These parameters will be marked [FILTERED] in the log.
<add>Rails keeps a log file for each environment in the +log+ folder. These are extremely useful when debugging what's actually going on in your application, but in a live application you may not want every bit of information to be stored in the log file. You can filter certain request parameters from your log files by appending them to `config.filter_parameters` in the application configuration. These parameters will be marked [FILTERED] in the log.
<ide>
<ide> ```ruby
<ide> config.filter_parameters << :password
<ide><path>guides/source/action_mailer_basics.md
<ide> WARNING. This guide is based on Rails 3.2. Some of the code shown here will not
<ide> Introduction
<ide> ------------
<ide>
<del>Action Mailer allows you to send emails from your application using a mailer model and views. So, in Rails, emails are used by creating mailers that inherit from +ActionMailer::Base+ and live in +app/mailers+. Those mailers have associated views that appear alongside controller views in +app/views+.
<add>Action Mailer allows you to send emails from your application using a mailer model and views. So, in Rails, emails are used by creating mailers that inherit from `ActionMailer::Base` and live in `app/mailers`. Those mailers have associated views that appear alongside controller views in `app/views`.
<ide>
<ide> Sending Emails
<ide> --------------
<ide> So we got the mailer, the views, and the tests.
<ide>
<ide> #### Edit the Mailer
<ide>
<del>+app/mailers/user_mailer.rb+ contains an empty mailer:
<add>`app/mailers/user_mailer.rb` contains an empty mailer:
<ide>
<ide> ```ruby
<ide> class UserMailer < ActionMailer::Base
<ide> default :from => "[email protected]"
<ide> end
<ide> ```
<ide>
<del>Let's add a method called +welcome_email+, that will send an email to the user's registered email address:
<add>Let's add a method called `welcome_email`, that will send an email to the user's registered email address:
<ide>
<ide> ```ruby
<ide> class UserMailer < ActionMailer::Base
<ide> end
<ide>
<ide> Here is a quick explanation of the items presented in the preceding method. For a full list of all available options, please have a look further down at the Complete List of Action Mailer user-settable attributes section.
<ide>
<del>* <tt>default Hash</tt> - This is a hash of default values for any email you send, in this case we are setting the <tt>:from</tt> header to a value for all messages in this class, this can be overridden on a per email basis
<del>* +mail+ - The actual email message, we are passing the <tt>:to</tt> and <tt>:subject</tt> headers in.
<add>* `default Hash` - This is a hash of default values for any email you send, in this case we are setting the `:from` header to a value for all messages in this class, this can be overridden on a per email basis
<add>* `mail` - The actual email message, we are passing the `:to` and `:subject` headers in.
<ide>
<ide> Just like controllers, any instance variables we define in the method become available for use in the views.
<ide>
<ide> #### Create a Mailer View
<ide>
<del>Create a file called +welcome_email.html.erb+ in +app/views/user_mailer/+. This will be the template used for the email, formatted in HTML:
<add>Create a file called `welcome_email.html.erb` in `app/views/user_mailer/`. This will be the template used for the email, formatted in HTML:
<ide>
<ide> ```erb
<ide> <!DOCTYPE html>
<ide> Create a file called +welcome_email.html.erb+ in +app/views/user_mailer/+. This
<ide> </html>
<ide> ```
<ide>
<del>It is also a good idea to make a text part for this email. To do this, create a file called +welcome_email.text.erb+ in +app/views/user_mailer/+:
<add>It is also a good idea to make a text part for this email. To do this, create a file called `welcome_email.text.erb` in `app/views/user_mailer/`:
<ide>
<ide> ```erb
<ide> Welcome to example.com, <%= @user.name %>
<ide> To login to the site, just follow this link: <%= @url %>.
<ide> Thanks for joining and have a great day!
<ide> ```
<ide>
<del>When you call the +mail+ method now, Action Mailer will detect the two templates (text and HTML) and automatically generate a <tt>multipart/alternative</tt> email.
<add>When you call the `mail` method now, Action Mailer will detect the two templates (text and HTML) and automatically generate a `multipart/alternative` email.
<ide>
<ide> #### Wire It Up So That the System Sends the Email When a User Signs Up
<ide>
<ide> There are several ways to do this, some people create Rails Observers to fire off emails, others do it inside of the User Model. However, in Rails 3, mailers are really just another way to render a view. Instead of rendering a view and sending out the HTTP protocol, they are just sending it out through the Email protocols instead. Due to this, it makes sense to just have your controller tell the mailer to send an email when a user is successfully created.
<ide>
<ide> Setting this up is painfully simple.
<ide>
<del>First off, we need to create a simple +User+ scaffold:
<add>First off, we need to create a simple `User` scaffold:
<ide>
<ide> ```shell
<ide> $ rails generate scaffold user name:string email:string login:string
<ide> $ rake db:migrate
<ide> ```
<ide>
<del>Now that we have a user model to play with, we will just edit the +app/controllers/users_controller.rb+ make it instruct the UserMailer to deliver an email to the newly created user by editing the create action and inserting a call to <tt>UserMailer.welcome_email</tt> right after the user is successfully saved:
<add>Now that we have a user model to play with, we will just edit the `app/controllers/users_controller.rb` make it instruct the UserMailer to deliver an email to the newly created user by editing the create action and inserting a call to `UserMailer.welcome_email` right after the user is successfully saved:
<ide>
<ide> ```ruby
<ide> class UsersController < ApplicationController
<ide> end
<ide>
<ide> This provides a much simpler implementation that does not require the registering of observers and the like.
<ide>
<del>The method +welcome_email+ returns a <tt>Mail::Message</tt> object which can then just be told +deliver+ to send itself out.
<add>The method `welcome_email` returns a `Mail::Message` object which can then just be told `deliver` to send itself out.
<ide>
<del>NOTE: In previous versions of Rails, you would call +deliver_welcome_email+ or +create_welcome_email+. This has been deprecated in Rails 3.0 in favour of just calling the method name itself.
<add>NOTE: In previous versions of Rails, you would call `deliver_welcome_email` or `create_welcome_email`. This has been deprecated in Rails 3.0 in favour of just calling the method name itself.
<ide>
<ide> WARNING: Sending out an email should only take a fraction of a second. If you are planning on sending out many emails, or you have a slow domain resolution service, you might want to investigate using a background process like Delayed Job.
<ide>
<ide> For more complex examples such as defining alternate character sets or self-enco
<ide>
<ide> There are just three methods that you need to send pretty much any email message:
<ide>
<del>* <tt>headers</tt> - Specifies any header on the email you want. You can pass a hash of header field names and value pairs, or you can call <tt>headers[:field_name] = 'value'</tt>.
<del>* <tt>attachments</tt> - Allows you to add attachments to your email. For example, <tt>attachments['file-name.jpg'] = File.read('file-name.jpg')</tt>.
<del>* <tt>mail</tt> - Sends the actual email itself. You can pass in headers as a hash to the mail method as a parameter, mail will then create an email, either plain text, or multipart, depending on what email templates you have defined.
<add>* `headers` - Specifies any header on the email you want. You can pass a hash of header field names and value pairs, or you can call `headers[:field_name] = 'value'`.
<add>* `attachments` - Allows you to add attachments to your email. For example, `attachments['file-name.jpg'] = File.read('file-name.jpg')`.
<add>* `mail` - Sends the actual email itself. You can pass in headers as a hash to the mail method as a parameter, mail will then create an email, either plain text, or multipart, depending on what email templates you have defined.
<ide>
<ide> #### Custom Headers
<ide>
<ide> Defining custom headers are simple, you can do it one of three ways:
<ide>
<del>* Defining a header field as a parameter to the +mail+ method:
<add>* Defining a header field as a parameter to the `mail` method:
<ide>
<ide> ```ruby
<ide> mail("X-Spam" => value)
<ide> ```
<ide>
<del>* Passing in a key value assignment to the +headers+ method:
<add>* Passing in a key value assignment to the `headers` method:
<ide>
<ide> ```ruby
<ide> headers["X-Spam"] = value
<ide> ```
<ide>
<del>* Passing a hash of key value pairs to the +headers+ method:
<add>* Passing a hash of key value pairs to the `headers` method:
<ide>
<ide> ```ruby
<ide> headers {"X-Spam" => value, "X-Special" => another_value}
<ide> ```
<ide>
<del>TIP: All <tt>X-Value</tt> headers per the RFC2822 can appear more than once. If you want to delete an <tt>X-Value</tt> header, you need to assign it a value of <tt>nil</tt>.
<add>TIP: All `X-Value` headers per the RFC2822 can appear more than once. If you want to delete an `X-Value` header, you need to assign it a value of `nil`.
<ide>
<ide> #### Adding Attachments
<ide>
<ide> Adding attachments has been simplified in Action Mailer 3.0.
<ide> attachments['filename.jpg'] = File.read('/path/to/filename.jpg')
<ide> ```
<ide>
<del>NOTE: Mail will automatically Base64 encode an attachment. If you want something different, pre-encode your content and pass in the encoded content and encoding in a +Hash+ to the +attachments+ method.
<add>NOTE: Mail will automatically Base64 encode an attachment. If you want something different, pre-encode your content and pass in the encoded content and encoding in a `Hash` to the `attachments` method.
<ide>
<ide> * Pass the file name and specify headers and content and Action Mailer and Mail will use the settings you pass in.
<ide>
<ide> NOTE: If you specify an encoding, Mail will assume that your content is already
<ide>
<ide> Action Mailer 3.0 makes inline attachments, which involved a lot of hacking in pre 3.0 versions, much simpler and trivial as they should be.
<ide>
<del>* Firstly, to tell Mail to turn an attachment into an inline attachment, you just call <tt>#inline</tt> on the attachments method within your Mailer:
<add>* Firstly, to tell Mail to turn an attachment into an inline attachment, you just call `#inline` on the attachments method within your Mailer:
<ide>
<ide> ```ruby
<ide> def welcome
<ide> attachments.inline['image.jpg'] = File.read('/path/to/image.jpg')
<ide> end
<ide> ```
<ide>
<del>* Then in your view, you can just reference <tt>attachments[]</tt> as a hash and specify which attachment you want to show, calling +url+ on it and then passing the result into the <tt>image_tag</tt> method:
<add>* Then in your view, you can just reference `attachments[]` as a hash and specify which attachment you want to show, calling `url` on it and then passing the result into the `image_tag` method:
<ide>
<ide> ```erb
<ide> <p>Hello there, this is our image</p>
<ide>
<ide> <%= image_tag attachments['image.jpg'].url %>
<ide> ```
<ide>
<del>* As this is a standard call to +image_tag+ you can pass in an options hash after the attachment URL as you could for any other image:
<add>* As this is a standard call to `image_tag` you can pass in an options hash after the attachment URL as you could for any other image:
<ide>
<ide> ```erb
<ide> <p>Hello there, this is our image</p>
<ide> end
<ide>
<ide> #### Sending Email To Multiple Recipients
<ide>
<del>It is possible to send email to one or more recipients in one email (e.g., informing all admins of a new signup) by setting the list of emails to the <tt>:to</tt> key. The list of emails can be an array of email addresses or a single string with the addresses separated by commas.
<add>It is possible to send email to one or more recipients in one email (e.g., informing all admins of a new signup) by setting the list of emails to the `:to` key. The list of emails can be an array of email addresses or a single string with the addresses separated by commas.
<ide>
<ide> ```ruby
<ide> class AdminMailer < ActionMailer::Base
<ide> class AdminMailer < ActionMailer::Base
<ide> end
<ide> ```
<ide>
<del>The same format can be used to set carbon copy (Cc:) and blind carbon copy (Bcc:) recipients, by using the <tt>:cc</tt> and <tt>:bcc</tt> keys respectively.
<add>The same format can be used to set carbon copy (Cc:) and blind carbon copy (Bcc:) recipients, by using the `:cc` and `:bcc` keys respectively.
<ide>
<ide> #### Sending Email With Name
<ide>
<ide> Sometimes you wish to show the name of the person instead of just their email address when they receive the email. The trick to doing that is
<del>to format the email address in the format <tt>"Name <email>"</tt>.
<add>to format the email address in the format `"Name <email>"`.
<ide>
<ide> ```ruby
<ide> def welcome_email(user)
<ide> end
<ide>
<ide> ### Mailer Views
<ide>
<del>Mailer views are located in the +app/views/name_of_mailer_class+ directory. The specific mailer view is known to the class because its name is the same as the mailer method. In our example from above, our mailer view for the +welcome_email+ method will be in +app/views/user_mailer/welcome_email.html.erb+ for the HTML version and +welcome_email.text.erb+ for the plain text version.
<add>Mailer views are located in the `app/views/name_of_mailer_class` directory. The specific mailer view is known to the class because its name is the same as the mailer method. In our example from above, our mailer view for the `welcome_email` method will be in `app/views/user_mailer/welcome_email.html.erb` for the HTML version and `welcome_email.text.erb` for the plain text version.
<ide>
<ide> To change the default mailer view for your action you do something like:
<ide>
<ide> class UserMailer < ActionMailer::Base
<ide> end
<ide> ```
<ide>
<del>In this case it will look for templates at +app/views/notifications+ with name +another+.
<add>In this case it will look for templates at `app/views/notifications` with name `another`.
<ide>
<ide> If you want more flexibility you can also pass a block and render specific templates or even render inline or text without using a template file:
<ide>
<ide> class UserMailer < ActionMailer::Base
<ide> end
<ide> ```
<ide>
<del>This will render the template 'another_template.html.erb' for the HTML part and use the rendered text for the text part. The render command is the same one used inside of Action Controller, so you can use all the same options, such as <tt>:text</tt>, <tt>:inline</tt> etc.
<add>This will render the template 'another_template.html.erb' for the HTML part and use the rendered text for the text part. The render command is the same one used inside of Action Controller, so you can use all the same options, such as `:text`, `:inline` etc.
<ide>
<ide> ### Action Mailer Layouts
<ide>
<del>Just like controller views, you can also have mailer layouts. The layout name needs to be the same as your mailer, such as +user_mailer.html.erb+ and +user_mailer.text.erb+ to be automatically recognized by your mailer as a layout.
<add>Just like controller views, you can also have mailer layouts. The layout name needs to be the same as your mailer, such as `user_mailer.html.erb` and `user_mailer.text.erb` to be automatically recognized by your mailer as a layout.
<ide>
<ide> In order to use a different file just use:
<ide>
<ide> class UserMailer < ActionMailer::Base
<ide> end
<ide> ```
<ide>
<del>Just like with controller views, use +yield+ to render the view inside the layout.
<add>Just like with controller views, use `yield` to render the view inside the layout.
<ide>
<del>You can also pass in a <tt>:layout => 'layout_name'</tt> option to the render call inside the format block to specify different layouts for different actions:
<add>You can also pass in a `:layout => 'layout_name'` option to the render call inside the format block to specify different layouts for different actions:
<ide>
<ide> ```ruby
<ide> class UserMailer < ActionMailer::Base
<ide> class UserMailer < ActionMailer::Base
<ide> end
<ide> ```
<ide>
<del>Will render the HTML part using the <tt>my_layout.html.erb</tt> file and the text part with the usual <tt>user_mailer.text.erb</tt> file if it exists.
<add>Will render the HTML part using the `my_layout.html.erb` file and the text part with the usual `user_mailer.text.erb` file if it exists.
<ide>
<ide> ### Generating URLs in Action Mailer Views
<ide>
<del>URLs can be generated in mailer views using +url_for+ or named routes.
<add>URLs can be generated in mailer views using `url_for` or named routes.
<ide>
<del>Unlike controllers, the mailer instance doesn't have any context about the incoming request so you'll need to provide the +:host+, +:controller+, and +:action+:
<add>Unlike controllers, the mailer instance doesn't have any context about the incoming request so you'll need to provide the `:host`, `:controller`, and `:action`:
<ide>
<ide> ```erb
<ide> <%= url_for(:host => "example.com",
<ide> :controller => "welcome",
<ide> :action => "greeting") %>
<ide> ```
<ide>
<del>When using named routes you only need to supply the +:host+:
<add>When using named routes you only need to supply the `:host`:
<ide>
<ide> ```erb
<ide> <%= user_url(@user, :host => "example.com") %>
<ide> ```
<ide>
<ide> Email clients have no web context and so paths have no base URL to form complete web addresses. Thus, when using named routes only the "_url" variant makes sense.
<ide>
<del>It is also possible to set a default host that will be used in all mailers by setting the <tt>:host</tt> option as a configuration option in <tt>config/application.rb</tt>:
<add>It is also possible to set a default host that will be used in all mailers by setting the `:host` option as a configuration option in `config/application.rb`:
<ide>
<ide> ```ruby
<ide> config.action_mailer.default_url_options = { :host => "example.com" }
<ide> ```
<ide>
<del>If you use this setting, you should pass the <tt>:only_path => false</tt> option when using +url_for+. This will ensure that absolute URLs are generated because the +url_for+ view helper will, by default, generate relative URLs when a <tt>:host</tt> option isn't explicitly provided.
<add>If you use this setting, you should pass the `:only_path => false` option when using `url_for`. This will ensure that absolute URLs are generated because the `url_for` view helper will, by default, generate relative URLs when a `:host` option isn't explicitly provided.
<ide>
<ide> ### Sending Multipart Emails
<ide>
<del>Action Mailer will automatically send multipart emails if you have different templates for the same action. So, for our UserMailer example, if you have +welcome_email.text.erb+ and +welcome_email.html.erb+ in +app/views/user_mailer+, Action Mailer will automatically send a multipart email with the HTML and text versions setup as different parts.
<add>Action Mailer will automatically send multipart emails if you have different templates for the same action. So, for our UserMailer example, if you have `welcome_email.text.erb` and `welcome_email.html.erb` in `app/views/user_mailer`, Action Mailer will automatically send a multipart email with the HTML and text versions setup as different parts.
<ide>
<del>The order of the parts getting inserted is determined by the <tt>:parts_order</tt> inside of the <tt>ActionMailer::Base.default</tt> method. If you want to explicitly alter the order, you can either change the <tt>:parts_order</tt> or explicitly render the parts in a different order:
<add>The order of the parts getting inserted is determined by the `:parts_order` inside of the `ActionMailer::Base.default` method. If you want to explicitly alter the order, you can either change the `:parts_order` or explicitly render the parts in a different order:
<ide>
<ide> ```ruby
<ide> class UserMailer < ActionMailer::Base
<ide> Will put the HTML part first, and the plain text part second.
<ide>
<ide> ### Sending Emails with Attachments
<ide>
<del>Attachments can be added by using the +attachments+ method:
<add>Attachments can be added by using the `attachments` method:
<ide>
<ide> ```ruby
<ide> class UserMailer < ActionMailer::Base
<ide> class UserMailer < ActionMailer::Base
<ide> end
<ide> ```
<ide>
<del>The above will send a multipart email with an attachment, properly nested with the top level being <tt>multipart/mixed</tt> and the first part being a <tt>multipart/alternative</tt> containing the plain text and HTML email messages.
<add>The above will send a multipart email with an attachment, properly nested with the top level being `multipart/mixed` and the first part being a `multipart/alternative` containing the plain text and HTML email messages.
<ide>
<del><<<<<<< HEAD
<del>h5. Sending Emails with Dynamic Delivery Options
<add>#### Sending Emails with Dynamic Delivery Options
<ide>
<del>If you wish to override the default delivery options (e.g. SMTP credentials) while delivering emails, you can do this using +delivery_method_options+ in the mailer action.
<add>If you wish to override the default delivery options (e.g. SMTP credentials) while delivering emails, you can do this using `delivery_method_options` in the mailer action.
<ide>
<del><ruby>
<add>```ruby
<ide> class UserMailer < ActionMailer::Base
<ide> def welcome_email(user,company)
<ide> @user = user
<ide> class UserMailer < ActionMailer::Base
<ide> mail(to: user.email, subject: "Please see the Terms and Conditions attached", delivery_method_options: delivery_options)
<ide> end
<ide> end
<del></ruby>
<add>```
<ide>
<del>h3. Receiving Emails
<del>=======
<ide> Receiving Emails
<ide> ----------------
<del>>>>>>>> Convert heading tags and heading section
<ide>
<ide> Receiving and parsing emails with Action Mailer can be a rather complex endeavor. Before your email reaches your Rails app, you would have had to configure your system to somehow forward emails to your app, which needs to be listening for that. So, to receive emails in your Rails app you'll need to:
<ide>
<del>* Implement a +receive+ method in your mailer.
<add>* Implement a `receive` method in your mailer.
<ide>
<del>* Configure your email server to forward emails from the address(es) you would like your app to receive to +/path/to/app/script/rails runner 'UserMailer.receive(STDIN.read)'+.
<add>* Configure your email server to forward emails from the address(es) you would like your app to receive to `/path/to/app/script/rails runner 'UserMailer.receive(STDIN.read)'`.
<ide>
<del>Once a method called +receive+ is defined in any mailer, Action Mailer will parse the raw incoming email into an email object, decode it, instantiate a new mailer, and pass the email object to the mailer +receive+ instance method. Here's an example:
<add>Once a method called `receive` is defined in any mailer, Action Mailer will parse the raw incoming email into an email object, decode it, instantiate a new mailer, and pass the email object to the mailer `receive` instance method. Here's an example:
<ide>
<ide> ```ruby
<ide> class UserMailer < ActionMailer::Base
<ide> Action Mailer Configuration
<ide>
<ide> The following configuration options are best made in one of the environment files (environment.rb, production.rb, etc...)
<ide>
<del>|+template_root+|Determines the base from which template references will be made.|
<del>|+logger+|Generates information on the mailing run if available. Can be set to +nil+ for no logging. Compatible with both Ruby's own +Logger+ and +Log4r+ loggers.|
<del>|+smtp_settings+|Allows detailed configuration for <tt>:smtp</tt> delivery method:<ul><li><tt>:address</tt> - Allows you to use a remote mail server. Just change it from its default "localhost" setting.</li><li><tt>:port</tt> - On the off chance that your mail server doesn't run on port 25, you can change it.</li><li><tt>:domain</tt> - If you need to specify a HELO domain, you can do it here.</li><li><tt>:user_name</tt> - If your mail server requires authentication, set the username in this setting.</li><li><tt>:password</tt> - If your mail server requires authentication, set the password in this setting.</li><li><tt>:authentication</tt> - If your mail server requires authentication, you need to specify the authentication type here. This is a symbol and one of <tt>:plain</tt>, <tt>:login</tt>, <tt>:cram_md5</tt>.</li><li><tt>:enable_starttls_auto</tt> - Set this to <tt>false</tt> if there is a problem with your server certificate that you cannot resolve.</li></ul>|
<del>|+sendmail_settings+|Allows you to override options for the <tt>:sendmail</tt> delivery method.<ul><li><tt>:location</tt> - The location of the sendmail executable. Defaults to <tt>/usr/sbin/sendmail</tt>.</li><li><tt>:arguments</tt> - The command line arguments to be passed to sendmail. Defaults to <tt>-i -t</tt>.</li></ul>|
<del>|+raise_delivery_errors+|Whether or not errors should be raised if the email fails to be delivered.|
<del>|+delivery_method+|Defines a delivery method. Possible values are <tt>:smtp</tt> (default), <tt>:sendmail</tt>, <tt>:file</tt> and <tt>:test</tt>.|
<del>|+perform_deliveries+|Determines whether deliveries are actually carried out when the +deliver+ method is invoked on the Mail message. By default they are, but this can be turned off to help functional testing.|
<del>|+deliveries+|Keeps an array of all the emails sent out through the Action Mailer with delivery_method :test. Most useful for unit and functional testing.|
<del>|+default_options+|Allows you to set default values for the <tt>mail</tt> method options (<tt>:from</tt>, <tt>:reply_to</tt>, etc.).|
<add>|`template_root`|Determines the base from which template references will be made.|
<add>|`logger`|Generates information on the mailing run if available. Can be set to `nil` for no logging. Compatible with both Ruby's own `Logger` and `Log4r` loggers.|
<add>|`smtp_settings`|Allows detailed configuration for `:smtp` delivery method:<ul><li>`:address` - Allows you to use a remote mail server. Just change it from its default "localhost" setting.</li><li>`:port` - On the off chance that your mail server doesn't run on port 25, you can change it.</li><li>`:domain` - If you need to specify a HELO domain, you can do it here.</li><li>`:user_name` - If your mail server requires authentication, set the username in this setting.</li><li>`:password` - If your mail server requires authentication, set the password in this setting.</li><li>`:authentication` - If your mail server requires authentication, you need to specify the authentication type here. This is a symbol and one of `:plain`, `:login`, `:cram_md5`.</li><li>`:enable_starttls_auto` - Set this to `false` if there is a problem with your server certificate that you cannot resolve.</li></ul>|
<add>|`sendmail_settings`|Allows you to override options for the `:sendmail` delivery method.<ul><li>`:location` - The location of the sendmail executable. Defaults to `/usr/sbin/sendmail`.</li><li>`:arguments` - The command line arguments to be passed to sendmail. Defaults to `-i -t`.</li></ul>|
<add>|`raise_delivery_errors`|Whether or not errors should be raised if the email fails to be delivered.|
<add>|`delivery_method`|Defines a delivery method. Possible values are `:smtp` (default), `:sendmail`, `:file` and `:test`.|
<add>|`perform_deliveries`|Determines whether deliveries are actually carried out when the `deliver` method is invoked on the Mail message. By default they are, but this can be turned off to help functional testing.|
<add>|`deliveries`|Keeps an array of all the emails sent out through the Action Mailer with delivery_method :test. Most useful for unit and functional testing.|
<add>|`default_options`|Allows you to set default values for the `mail` method options (`:from`, `:reply_to`, etc.).|
<add>|`async`|Setting this flag will turn on asynchronous message sending, message rendering and delivery will be pushed to `Rails.queue` for processing.|
<add>|`default_options`|Allows you to set default values for the `mail` method options (`:from`, `:reply_to`, etc.).|
<ide>
<ide> ### Example Action Mailer Configuration
<ide>
<del>An example would be adding the following to your appropriate <tt>config/environments/$RAILS_ENV.rb</tt> file:
<add>An example would be adding the following to your appropriate `config/environments/$RAILS_ENV.rb` file:
<ide>
<ide> ```ruby
<ide> config.action_mailer.delivery_method = :sendmail
<ide> config.action_mailer.default_options = {from: "[email protected]"}
<ide>
<ide> ### Action Mailer Configuration for GMail
<ide>
<del>As Action Mailer now uses the Mail gem, this becomes as simple as adding to your <tt>config/environments/$RAILS_ENV.rb</tt> file:
<add>As Action Mailer now uses the Mail gem, this becomes as simple as adding to your `config/environments/$RAILS_ENV.rb` file:
<ide>
<ide> ```ruby
<ide> config.action_mailer.delivery_method = :smtp
<ide> config.action_mailer.smtp_settings = {
<ide> Mailer Testing
<ide> --------------
<ide>
<del>By default Action Mailer does not send emails in the test environment. They are just added to the +ActionMailer::Base.deliveries+ array.
<add>By default Action Mailer does not send emails in the test environment. They are just added to the `ActionMailer::Base.deliveries` array.
<ide>
<ide> Testing mailers normally involves two things: One is that the mail was queued, and the other one that the email is correct. With that in mind, we could test our example mailer from above like so:
<ide>
<ide> class UserMailerTest < ActionMailer::TestCase
<ide> end
<ide> ```
<ide>
<del>In the test we send the email and store the returned object in the +email+ variable. We then ensure that it was sent (the first assert), then, in the second batch of assertions, we ensure that the email does indeed contain what we expect.
<add>In the test we send the email and store the returned object in the `email` variable. We then ensure that it was sent (the first assert), then, in the second batch of assertions, we ensure that the email does indeed contain what we expect.
<ide>
<ide> Asynchronous
<ide> ------------
<ide> Rails provides a Synchronous Queue by default. If you want to use an Asynchronou
<ide>
<ide> ### Custom Queues
<ide>
<del>If you need a different queue than <tt>Rails.queue</tt> for your mailer you can use <tt>ActionMailer::Base.queue=</tt>:
<add>If you need a different queue than `Rails.queue` for your mailer you can use `ActionMailer::Base.queue=`:
<ide>
<ide> ```ruby
<ide> class WelcomeMailer < ActionMailer::Base
<ide> self.queue = MyQueue.new
<ide> end
<ide> ```
<ide>
<del>or adding to your <tt>config/environments/$RAILS_ENV.rb</tt>:
<add>or adding to your `config/environments/$RAILS_ENV.rb`:
<ide>
<ide> ```ruby
<ide> config.action_mailer.queue = MyQueue.new
<ide> ```
<ide>
<del>Your custom queue should expect a job that responds to <tt>#run</tt>.
<add>Your custom queue should expect a job that responds to `#run`.
<ide><path>guides/source/action_view_overview.md
<ide> NOTE. Some features of Action View are tied to Active Record, but that doesn't m
<ide> Using Action View with Rails
<ide> ----------------------------
<ide>
<del>For each controller there is an associated directory in the <tt>app/views</tt> directory which holds the template files that make up the views associated with that controller. These files are used to display the view that results from each controller action.
<add>For each controller there is an associated directory in the `app/views` directory which holds the template files that make up the views associated with that controller. These files are used to display the view that results from each controller action.
<ide>
<ide> Let's take a look at what Rails does by default when creating a new resource using the scaffold generator:
<ide>
<ide> $ rails generate scaffold post
<ide> ```
<ide>
<ide> There is a naming convention for views in Rails. Typically, the views share their name with the associated controller action, as you can see above.
<del>For example, the index controller action of the <tt>posts_controller.rb</tt> will use the <tt>index.html.erb</tt> view file in the <tt>app/views/posts</tt> directory.
<add>For example, the index controller action of the `posts_controller.rb` will use the `index.html.erb` view file in the `app/views/posts` directory.
<ide> The complete HTML returned to the client is composed of a combination of this ERB file, a layout template that wraps it, and all the partials that the view may reference. Later on this guide you can find a more detailed documentation of each one of this three components.
<ide>
<ide> Using Action View outside of Rails
<ide> Find below a brief overview of each one of them.
<ide>
<ide> ### Templates
<ide>
<del>Action View templates can be written in several ways. If the template file has a <tt>.erb</tt> extension then it uses a mixture of ERB (included in Ruby) and HTML. If the template file has a <tt>.builder</tt> extension then a fresh instance of <tt>Builder::XmlMarkup</tt> library is used.
<add>Action View templates can be written in several ways. If the template file has a `.erb` extension then it uses a mixture of ERB (included in Ruby) and HTML. If the template file has a `.builder` extension then a fresh instance of `Builder::XmlMarkup` library is used.
<ide>
<del>Rails supports multiple template systems and uses a file extension to distinguish amongst them. For example, an HTML file using the ERB template system will have <tt>.html.erb</tt> as a file extension.
<add>Rails supports multiple template systems and uses a file extension to distinguish amongst them. For example, an HTML file using the ERB template system will have `.html.erb` as a file extension.
<ide>
<ide> #### ERB
<ide>
<ide> To suppress leading and trailing whitespaces, you can use +<%-+ +-%>+ interchang
<ide>
<ide> #### Builder
<ide>
<del>Builder templates are a more programmatic alternative to ERB. They are especially useful for generating XML content. An XmlMarkup object named +xml+ is automatically made available to templates with a <tt>.builder</tt> extension.
<add>Builder templates are a more programmatic alternative to ERB. They are especially useful for generating XML content. An XmlMarkup object named +xml+ is automatically made available to templates with a `.builder` extension.
<ide>
<ide> Here are some basic examples:
<ide>
<ide> Here, the +_ad_banner.html.erb+ and +_footer.html.erb+ partials could contain co
<ide>
<ide> #### The :as and :object options
<ide>
<del>By default <tt>ActionView::Partials::PartialRenderer</tt> has its object in a local variable with the same name as the template. So, given
<add>By default `ActionView::Partials::PartialRenderer` has its object in a local variable with the same name as the template. So, given
<ide>
<ide> ```erb
<ide> <%= render :partial => "product" %>
<ide> ```
<ide>
<del>within product we'll get <tt>@product</tt> in the local variable +product+, as if we had written:
<add>within product we'll get `@product` in the local variable +product+, as if we had written:
<ide>
<ide> ```erb
<ide> <%= render :partial => "product", :locals => { :product => @product } %>
<ide> ```
<ide>
<del>With the <tt>:as</tt> option we can specify a different name for said local variable. For example, if we wanted it to be +item+ instead of product+ we'd do:
<add>With the `:as` option we can specify a different name for said local variable. For example, if we wanted it to be +item+ instead of product+ we'd do:
<ide>
<ide> ```erb
<ide> <%= render :partial => "product", :as => 'item' %>
<ide> ```
<ide>
<del>The <tt>:object</tt> option can be used to directly specify which object is rendered into the partial; useful when the template's object is elsewhere, in a different ivar or in a local variable for instance.
<add>The `:object` option can be used to directly specify which object is rendered into the partial; useful when the template's object is elsewhere, in a different ivar or in a local variable for instance.
<ide>
<ide> For example, instead of:
<ide>
<ide> you'd do:
<ide> <%= render :partial => "product", :object => @item %>
<ide> ```
<ide>
<del>The <tt>:object</tt> and <tt>:as</tt> options can be used together.
<add>The `:object` and `:as` options can be used together.
<ide>
<ide> #### Rendering Collections
<ide>
<ide> Sample usage (selecting the associated Author for an instance of Post, +@post+):
<ide> collection_select(:post, :author_id, Author.all, :id, :name_with_initial, {:prompt => true})
<ide> ```
<ide>
<del>If <tt>@post.author_id</tt> is 1, this would return:
<add>If `@post.author_id` is 1, this would return:
<ide>
<ide> ```html
<ide> <select name="post[author_id]">
<ide> Sample usage (selecting the associated Author for an instance of Post, +@post+):
<ide> collection_radio_buttons(:post, :author_id, Author.all, :id, :name_with_initial)
<ide> ```
<ide>
<del>If <tt>@post.author_id</tt> is 1, this would return:
<add>If `@post.author_id` is 1, this would return:
<ide>
<ide> ```html
<ide> <input id="post_author_id_1" name="post[author_id]" type="radio" value="1" checked="checked" />
<ide> Sample usage (selecting the associated Authors for an instance of Post, +@post+)
<ide> collection_check_boxes(:post, :author_ids, Author.all, :id, :name_with_initial)
<ide> ```
<ide>
<del>If <tt>@post.author_ids</tt> is <tt><notextile>[1]</notextile></tt>, this would return:
<add>If `@post.author_ids` is [1], this would return:
<ide>
<ide> ```html
<ide> <input id="post_author_ids_1" name="post[author_ids][]" type="checkbox" value="1" checked="checked" />
<ide> Example:
<ide> select("post", "person_id", Person.all.collect {|p| [ p.name, p.id ] }, { :include_blank => true })
<ide> ```
<ide>
<del>If <tt>@post.person_id</tt> is 1, this would become:
<add>If `@post.person_id` is 1, this would become:
<ide>
<ide> ```html
<ide> <select name="post[person_id]">
<ide><path>guides/source/active_record_querying.md
<ide> The methods are:
<ide> * +uniq+
<ide> * +where+
<ide>
<del>All of the above methods return an instance of <tt>ActiveRecord::Relation</tt>.
<add>All of the above methods return an instance of `ActiveRecord::Relation`.
<ide>
<del>The primary operation of <tt>Model.find(options)</tt> can be summarized as:
<add>The primary operation of `Model.find(options)` can be summarized as:
<ide>
<ide> * Convert the supplied options to an equivalent SQL query.
<ide> * Fire the SQL query and retrieve the corresponding results from the database.
<ide> Active Record provides five different ways of retrieving a single object.
<ide>
<ide> #### Using a Primary Key
<ide>
<del>Using <tt>Model.find(primary_key)</tt>, you can retrieve the object corresponding to the specified _primary key_ that matches any supplied options. For example:
<add>Using `Model.find(primary_key)`, you can retrieve the object corresponding to the specified _primary key_ that matches any supplied options. For example:
<ide>
<ide> ```ruby
<ide> # Find the client with primary key (id) 10.
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients WHERE (clients.id = 10) LIMIT 1
<ide> ```
<ide>
<del><tt>Model.find(primary_key)</tt> will raise an +ActiveRecord::RecordNotFound+ exception if no matching record is found.
<add>`Model.find(primary_key)` will raise an +ActiveRecord::RecordNotFound+ exception if no matching record is found.
<ide>
<ide> #### +take+
<ide>
<del><tt>Model.take</tt> retrieves a record without any implicit ordering. For example:
<add>`Model.take` retrieves a record without any implicit ordering. For example:
<ide>
<ide> ```ruby
<ide> client = Client.take
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients LIMIT 1
<ide> ```
<ide>
<del><tt>Model.take</tt> returns +nil+ if no record is found and no exception will be raised.
<add>`Model.take` returns +nil+ if no record is found and no exception will be raised.
<ide>
<ide> TIP: The retrieved record may vary depending on the database engine.
<ide>
<ide> #### +first+
<ide>
<del><tt>Model.first</tt> finds the first record ordered by the primary key. For example:
<add>`Model.first` finds the first record ordered by the primary key. For example:
<ide>
<ide> ```ruby
<ide> client = Client.first
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients ORDER BY clients.id ASC LIMIT 1
<ide> ```
<ide>
<del><tt>Model.first</tt> returns +nil+ if no matching record is found and no exception will be raised.
<add>`Model.first` returns +nil+ if no matching record is found and no exception will be raised.
<ide>
<ide> #### +last+
<ide>
<del><tt>Model.last</tt> finds the last record ordered by the primary key. For example:
<add>`Model.last` finds the last record ordered by the primary key. For example:
<ide>
<ide> ```ruby
<ide> client = Client.last
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients ORDER BY clients.id DESC LIMIT 1
<ide> ```
<ide>
<del><tt>Model.last</tt> returns +nil+ if no matching record is found and no exception will be raised.
<add>`Model.last` returns +nil+ if no matching record is found and no exception will be raised.
<ide>
<ide> #### +find_by+
<ide>
<del><tt>Model.find_by</tt> finds the first record matching some conditions. For example:
<add>`Model.find_by` finds the first record matching some conditions. For example:
<ide>
<ide> ```ruby
<ide> Client.find_by first_name: 'Lifo'
<ide> Client.where(first_name: 'Lifo').take
<ide>
<ide> #### +take!+
<ide>
<del><tt>Model.take!</tt> retrieves a record without any implicit ordering. For example:
<add>`Model.take!` retrieves a record without any implicit ordering. For example:
<ide>
<ide> ```ruby
<ide> client = Client.take!
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients LIMIT 1
<ide> ```
<ide>
<del><tt>Model.take!</tt> raises +ActiveRecord::RecordNotFound+ if no matching record is found.
<add>`Model.take!` raises +ActiveRecord::RecordNotFound+ if no matching record is found.
<ide>
<ide> #### +first!+
<ide>
<del><tt>Model.first!</tt> finds the first record ordered by the primary key. For example:
<add>`Model.first!` finds the first record ordered by the primary key. For example:
<ide>
<ide> ```ruby
<ide> client = Client.first!
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients ORDER BY clients.id ASC LIMIT 1
<ide> ```
<ide>
<del><tt>Model.first!</tt> raises +ActiveRecord::RecordNotFound+ if no matching record is found.
<add>`Model.first!` raises +ActiveRecord::RecordNotFound+ if no matching record is found.
<ide>
<ide> #### +last!+
<ide>
<del><tt>Model.last!</tt> finds the last record ordered by the primary key. For example:
<add>`Model.last!` finds the last record ordered by the primary key. For example:
<ide>
<ide> ```ruby
<ide> client = Client.last!
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients ORDER BY clients.id DESC LIMIT 1
<ide> ```
<ide>
<del><tt>Model.last!</tt> raises +ActiveRecord::RecordNotFound+ if no matching record is found.
<add>`Model.last!` raises +ActiveRecord::RecordNotFound+ if no matching record is found.
<ide>
<ide> #### +find_by!+
<ide>
<del><tt>Model.find_by!</tt> finds the first record matching some conditions. It raises +ActiveRecord::RecordNotFound+ if no matching record is found. For example:
<add>`Model.find_by!` finds the first record matching some conditions. It raises +ActiveRecord::RecordNotFound+ if no matching record is found. For example:
<ide>
<ide> ```ruby
<ide> Client.find_by! first_name: 'Lifo'
<ide> Client.where(first_name: 'Lifo').take!
<ide>
<ide> #### Using Multiple Primary Keys
<ide>
<del><tt>Model.find(array_of_primary_key)</tt> accepts an array of _primary keys_, returning an array containing all of the matching records for the supplied _primary keys_. For example:
<add>`Model.find(array_of_primary_key)` accepts an array of _primary keys_, returning an array containing all of the matching records for the supplied _primary keys_. For example:
<ide>
<ide> ```ruby
<ide> # Find the clients with primary keys 1 and 10.
<ide> The SQL equivalent of the above is:
<ide> SELECT * FROM clients WHERE (clients.id IN (1,10))
<ide> ```
<ide>
<del>WARNING: <tt>Model.find(array_of_primary_key)</tt> will raise an +ActiveRecord::RecordNotFound+ exception unless a matching record is found for <strong>all</strong> of the supplied primary keys.
<add>WARNING: `Model.find(array_of_primary_key)` will raise an +ActiveRecord::RecordNotFound+ exception unless a matching record is found for <strong>all</strong> of the supplied primary keys.
<ide>
<ide> #### take
<ide>
<del><tt>Model.take(limit)</tt> retrieves the first number of records specified by +limit+ without any explicit ordering:
<add>`Model.take(limit)` retrieves the first number of records specified by +limit+ without any explicit ordering:
<ide>
<ide> ```ruby
<ide> Client.take(2)
<ide> SELECT * FROM clients LIMIT 2
<ide>
<ide> #### first
<ide>
<del><tt>Model.first(limit)</tt> finds the first number of records specified by +limit+ ordered by primary key:
<add>`Model.first(limit)` finds the first number of records specified by +limit+ ordered by primary key:
<ide>
<ide> ```ruby
<ide> Client.first(2)
<ide> SELECT * FROM clients LIMIT 2
<ide>
<ide> #### last
<ide>
<del><tt>Model.last(limit)</tt> finds the number of records specified by +limit+ ordered by primary key in descending order:
<add>`Model.last(limit)` finds the number of records specified by +limit+ ordered by primary key in descending order:
<ide>
<ide> ```ruby
<ide> Client.last(2)
<ide> User.find_each(:start => 2000, :batch_size => 5000) do |user|
<ide> end
<ide> ```
<ide>
<del>Another example would be if you wanted multiple workers handling the same processing queue. You could have each worker handle 10000 records by setting the appropriate <tt>:start</tt> option on each worker.
<add>Another example would be if you wanted multiple workers handling the same processing queue. You could have each worker handle 10000 records by setting the appropriate `:start` option on each worker.
<ide>
<ide> NOTE: The +:include+ option allows you to name associations that should be loaded alongside with the models.
<ide>
<ide> Client.order("orders_count ASC").order("created_at DESC")
<ide> Selecting Specific Fields
<ide> -------------------------
<ide>
<del>By default, <tt>Model.find</tt> selects all the fields from the result set using +select *+.
<add>By default, `Model.find` selects all the fields from the result set using +select *+.
<ide>
<ide> To select only a subset of fields from the result set, you can specify the subset via the +select+ method.
<ide>
<ide> c2.save # Raises an ActiveRecord::StaleObjectError
<ide>
<ide> You're then responsible for dealing with the conflict by rescuing the exception and either rolling back, merging, or otherwise apply the business logic needed to resolve the conflict.
<ide>
<del>This behavior can be turned off by setting <tt>ActiveRecord::Base.lock_optimistically = false</tt>.
<add>This behavior can be turned off by setting `ActiveRecord::Base.lock_optimistically = false`.
<ide>
<ide> To override the name of the +lock_version+ column, +ActiveRecord::Base+ provides a class attribute called +locking_column+:
<ide>
<ide> For every field (also known as an attribute) you define in your table, Active Re
<ide>
<ide> You can also use +find_last_by_*+ methods which will find the last record matching your argument.
<ide>
<del>You can specify an exclamation point (<tt>!</tt>) on the end of the dynamic finders to get them to raise an +ActiveRecord::RecordNotFound+ error if they do not return any records, like +Client.find_by_name!("Ryan")+
<add>You can specify an exclamation point (`!`) on the end of the dynamic finders to get them to raise an +ActiveRecord::RecordNotFound+ error if they do not return any records, like +Client.find_by_name!("Ryan")+
<ide>
<ide> If you want to find both by name and locked, you can chain these finders together by simply typing "+and+" between the fields. For example, +Client.find_by_first_name_and_locked("Ryan", true)+.
<ide>
<del>WARNING: Up to and including Rails 3.1, when the number of arguments passed to a dynamic finder method is lesser than the number of fields, say <tt>Client.find_by_name_and_locked("Ryan")</tt>, the behavior is to pass +nil+ as the missing argument. This is *unintentional* and this behavior will be changed in Rails 3.2 to throw an +ArgumentError+.
<add>WARNING: Up to and including Rails 3.1, when the number of arguments passed to a dynamic finder method is lesser than the number of fields, say `Client.find_by_name_and_locked("Ryan")`, the behavior is to pass +nil+ as the missing argument. This is *unintentional* and this behavior will be changed in Rails 3.2 to throw an +ArgumentError+.
<ide>
<ide> Find or build a new object
<ide> --------------------------
<ide> Client.find_by_sql("SELECT * FROM clients
<ide> +select_all+
<ide> ------------
<ide>
<del><tt>find_by_sql</tt> has a close relative called +connection#select_all+. +select_all+ will retrieve objects from the database using custom SQL just like +find_by_sql+ but will not instantiate them. Instead, you will get an array of hashes where each hash indicates a record.
<add>`find_by_sql` has a close relative called +connection#select_all+. +select_all+ will retrieve objects from the database using custom SQL just like +find_by_sql+ but will not instantiate them. Instead, you will get an array of hashes where each hash indicates a record.
<ide>
<ide> ```ruby
<ide> Client.connection.select_all("SELECT * FROM clients WHERE id = '1'")
<ide> Client.connection.select_all("SELECT * FROM clients WHERE id = '1'")
<ide> +pluck+
<ide> -------
<ide>
<del><tt>pluck</tt> can be used to query a single or multiple columns from the underlying table of a model. It accepts a list of column names as argument and returns an array of values of the specified columns with the corresponding data type.
<add>`pluck` can be used to query a single or multiple columns from the underlying table of a model. It accepts a list of column names as argument and returns an array of values of the specified columns with the corresponding data type.
<ide>
<ide> ```ruby
<ide> Client.where(:active => true).pluck(:id)
<ide><path>guides/source/active_record_validations_callbacks.md
<ide> The possible length constraint options are:
<ide> * +:in+ (or +:within+) - The attribute length must be included in a given interval. The value for this option must be a range.
<ide> * +:is+ - The attribute length must be equal to the given value.
<ide>
<del>The default error messages depend on the type of length validation being performed. You can personalize these messages using the +:wrong_length+, +:too_long+, and +:too_short+ options and <tt>%{count}</tt> as a placeholder for the number corresponding to the length constraint being used. You can still use the +:message+ option to specify an error message.
<add>The default error messages depend on the type of length validation being performed. You can personalize these messages using the +:wrong_length+, +:too_long+, and +:too_short+ options and `%{count}` as a placeholder for the number corresponding to the length constraint being used. You can still use the +:message+ option to specify an error message.
<ide>
<ide> ```ruby
<ide> class Person < ActiveRecord::Base
<ide> end
<ide>
<ide> If you validate the presence of an object associated via a +has_one+ or +has_many+ relationship, it will check that the object is neither +blank?+ nor +marked_for_destruction?+.
<ide>
<del>Since +false.blank?+ is true, if you want to validate the presence of a boolean field you should use <tt>validates :field_name, :inclusion => { :in => [true, false] }</tt>.
<add>Since +false.blank?+ is true, if you want to validate the presence of a boolean field you should use `validates :field_name, :inclusion => { :in => [true, false] }`.
<ide>
<ide> The default error message is "_can't be empty_".
<ide>
<ide> When the built-in validation helpers are not enough for your needs, you can writ
<ide>
<ide> ### Custom Validators
<ide>
<del>Custom validators are classes that extend <tt>ActiveModel::Validator</tt>. These classes must implement a +validate+ method which takes a record as an argument and performs the validation on it. The custom validator is called using the +validates_with+ method.
<add>Custom validators are classes that extend `ActiveModel::Validator`. These classes must implement a +validate+ method which takes a record as an argument and performs the validation on it. The custom validator is called using the +validates_with+ method.
<ide>
<ide> ```ruby
<ide> class MyValidator < ActiveModel::Validator
<ide> class Person
<ide> end
<ide> ```
<ide>
<del>The easiest way to add custom validators for validating individual attributes is with the convenient <tt>ActiveModel::EachValidator</tt>. In this case, the custom validator class must implement a +validate_each+ method which takes three arguments: record, attribute and value which correspond to the instance, the attribute to be validated and the value of the attribute in the passed instance.
<add>The easiest way to add custom validators for validating individual attributes is with the convenient `ActiveModel::EachValidator`. In this case, the custom validator class must implement a +validate_each+ method which takes three arguments: record, attribute and value which correspond to the instance, the attribute to be validated and the value of the attribute in the passed instance.
<ide>
<ide> ```ruby
<ide> class EmailValidator < ActiveModel::EachValidator
<ide><path>guides/source/active_support_core_extensions.md
<ide> The following values are considered to be blank in a Rails application:
<ide>
<ide> * any other object that responds to +empty?+ and it is empty.
<ide>
<del>INFO: The predicate for strings uses the Unicode-aware character class <tt>[:space:]</tt>, so for example U+2029 (paragraph separator) is considered to be whitespace.
<add>INFO: The predicate for strings uses the Unicode-aware character class `[:space:]`, so for example U+2029 (paragraph separator) is considered to be whitespace.
<ide>
<ide> WARNING: Note that numbers are not mentioned, in particular 0 and 0.0 are *not* blank.
<ide>
<ide> account.to_query('company[name]')
<ide>
<ide> so its output is ready to be used in a query string.
<ide>
<del>Arrays return the result of applying +to_query+ to each element with <tt>_key_[]</tt> as key, and join the result with "&":
<add>Arrays return the result of applying +to_query+ to each element with `_key_[]` as key, and join the result with "&":
<ide>
<ide> ```ruby
<ide> [3.4, -45.6].to_query('sample')
<ide> NOTE: Defined in +active_support/core_ext/class/subclasses.rb+.
<ide>
<ide> #### +descendants+
<ide>
<del>The +descendants+ method returns all classes that are <tt><</tt> than its receiver:
<add>The +descendants+ method returns all classes that are `<` than its receiver:
<ide>
<ide> ```ruby
<ide> class C; end
<ide> s # => "<script>...</script>"
<ide>
<ide> It is your responsibility to ensure calling +html_safe+ on a particular string is fine.
<ide>
<del>If you append onto a safe string, either in-place with +concat+/<tt><<</tt>, or with <tt>+</tt>, the result is a safe string. Unsafe arguments are escaped:
<add>If you append onto a safe string, either in-place with +concat+/`<<`, or with `+`, the result is a safe string. Unsafe arguments are escaped:
<ide>
<ide> ```ruby
<ide> "".html_safe + "<" # => "<"
<ide> To insert something verbatim use the +raw+ helper rather than calling +html_safe
<ide> <%= raw @cms.current_template %> <%# inserts @cms.current_template as is %>
<ide> ```
<ide>
<del>or, equivalently, use <tt><%==</tt>:
<add>or, equivalently, use `<%==`:
<ide>
<ide> ```erb
<ide> <%== @cms.current_template %> <%# inserts @cms.current_template as is %>
<ide> NOTE: Defined in +active_support/core_ext/string/filters.rb+.
<ide>
<ide> ### +inquiry+
<ide>
<del>The <tt>inquiry</tt> method converts a string into a +StringInquirer+ object making equality checks prettier.
<add>The `inquiry` method converts a string into a +StringInquirer+ object making equality checks prettier.
<ide>
<ide> ```ruby
<ide> "production".inquiry.production? # => true
<ide> The method +pluralize+ returns the plural of its receiver:
<ide>
<ide> As the previous example shows, Active Support knows some irregular plurals and uncountable nouns. Built-in rules can be extended in +config/initializers/inflections.rb+. That file is generated by the +rails+ command and has instructions in comments.
<ide>
<del>+pluralize+ can also take an optional +count+ parameter. If <tt>count == 1</tt> the singular form will be returned. For any other value of +count+ the plural form will be returned:
<add>+pluralize+ can also take an optional +count+ parameter. If `count == 1` the singular form will be returned. For any other value of +count+ the plural form will be returned:
<ide>
<ide> ```ruby
<ide> "dude".pluralize(0) # => "dudes"
<ide> end
<ide>
<ide> That may be handy to compute method names in a language that follows that convention, for example JavaScript.
<ide>
<del>INFO: As a rule of thumb you can think of +camelize+ as the inverse of +underscore+, though there are cases where that does not hold: <tt>"SSLError".underscore.camelize</tt> gives back <tt>"SslError"</tt>. To support cases such as this, Active Support allows you to specify acronyms in +config/initializers/inflections.rb+:
<add>INFO: As a rule of thumb you can think of +camelize+ as the inverse of +underscore+, though there are cases where that does not hold: `"SSLError".underscore.camelize` gives back `"SslError"`. To support cases such as this, Active Support allows you to specify acronyms in +config/initializers/inflections.rb+:
<ide>
<ide> ```ruby
<ide> ActiveSupport::Inflector.inflections do |inflect|
<ide> def load_missing_constant(from_mod, const_name)
<ide> end
<ide> ```
<ide>
<del>INFO: As a rule of thumb you can think of +underscore+ as the inverse of +camelize+, though there are cases where that does not hold. For example, <tt>"SSLError".underscore.camelize</tt> gives back <tt>"SslError"</tt>.
<add>INFO: As a rule of thumb you can think of +underscore+ as the inverse of +camelize+, though there are cases where that does not hold. For example, `"SSLError".underscore.camelize` gives back `"SslError"`.
<ide>
<ide> NOTE: Defined in +active_support/core_ext/string/inflections.rb+.
<ide>
<ide> The method +sum+ adds the elements of an enumerable:
<ide> (1..100).sum # => 5050
<ide> ```
<ide>
<del>Addition only assumes the elements respond to <tt>+</tt>:
<add>Addition only assumes the elements respond to `+`:
<ide>
<ide> ```ruby
<ide> [[1, 2], [2, 3], [3, 4]].sum # => [1, 2, 2, 3, 3, 4]
<ide> NOTE: Defined in +active_support/core_ext/array/access.rb+.
<ide>
<ide> #### +prepend+
<ide>
<del>This method is an alias of <tt>Array#unshift</tt>.
<add>This method is an alias of `Array#unshift`.
<ide>
<ide> ```ruby
<ide> %w(a b c d).prepend('e') # => %w(e a b c d)
<ide> NOTE: Defined in +active_support/core_ext/array/prepend_and_append.rb+.
<ide>
<ide> #### +append+
<ide>
<del>This method is an alias of <tt>Array#<<</tt>.
<add>This method is an alias of `Array#<<`.
<ide>
<ide> ```ruby
<ide> %w(a b c d).append('e') # => %w(a b c d e)
<ide> User.exists?(:email => params[:email])
<ide>
<ide> That syntactic sugar is used a lot in Rails to avoid positional arguments where there would be too many, offering instead interfaces that emulate named parameters. In particular it is very idiomatic to use a trailing hash for options.
<ide>
<del>If a method expects a variable number of arguments and uses <tt>*</tt> in its declaration, however, such an options hash ends up being an item of the array of arguments, where it loses its role.
<add>If a method expects a variable number of arguments and uses `*` in its declaration, however, such an options hash ends up being an item of the array of arguments, where it loses its role.
<ide>
<ide> In those cases, you may give an options hash a distinguished treatment with +extract_options!+. This method checks the type of the last item of an array. If it is a hash it pops it and returns it, otherwise it returns an empty hash.
<ide>
<ide> The method +to_sentence+ turns an array into a string containing a sentence that
<ide>
<ide> This method accepts three options:
<ide>
<del>* <tt>:two_words_connector</tt>: What is used for arrays of length 2. Default is " and ".
<del>* <tt>:words_connector</tt>: What is used to join the elements of arrays with 3 or more elements, except for the last two. Default is ", ".
<del>* <tt>:last_word_connector</tt>: What is used to join the last items of an array with 3 or more elements. Default is ", and ".
<add>* `:two_words_connector`: What is used for arrays of length 2. Default is " and ".
<add>* `:words_connector`: What is used to join the elements of arrays with 3 or more elements, except for the last two. Default is ", ".
<add>* `:last_word_connector`: What is used to join the last items of an array with 3 or more elements. Default is ", and ".
<ide>
<ide> The defaults for these options can be localised, their keys are:
<ide>
<ide> |_. Option |_. I18n key |
<del>| <tt>:two_words_connector</tt> | <tt>support.array.two_words_connector</tt> |
<del>| <tt>:words_connector</tt> | <tt>support.array.words_connector</tt> |
<del>| <tt>:last_word_connector</tt> | <tt>support.array.last_word_connector</tt> |
<add>| `:two_words_connector` | `support.array.two_words_connector` |
<add>| `:words_connector` | `support.array.words_connector` |
<add>| `:last_word_connector` | `support.array.last_word_connector` |
<ide>
<del>Options <tt>:connector</tt> and <tt>:skip_last_comma</tt> are deprecated.
<add>Options `:connector` and `:skip_last_comma` are deprecated.
<ide>
<ide> NOTE: Defined in +active_support/core_ext/array/conversions.rb+.
<ide>
<ide> #### +to_formatted_s+
<ide>
<ide> The method +to_formatted_s+ acts like +to_s+ by default.
<ide>
<del>If the array contains items that respond to +id+, however, it may be passed the symbol <tt>:db</tt> as argument. That's typically used with collections of ARs. Returned strings are:
<add>If the array contains items that respond to +id+, however, it may be passed the symbol `:db` as argument. That's typically used with collections of ARs. Returned strings are:
<ide>
<ide> ```ruby
<ide> [].to_formatted_s(:db) # => "null"
<ide> Contributor.limit(2).order(:rank).to_xml
<ide>
<ide> To do so it sends +to_xml+ to every item in turn, and collects the results under a root node. All items must respond to +to_xml+, an exception is raised otherwise.
<ide>
<del>By default, the name of the root element is the underscorized and dasherized plural of the name of the class of the first item, provided the rest of elements belong to that type (checked with <tt>is_a?</tt>) and they are not hashes. In the example above that's "contributors".
<add>By default, the name of the root element is the underscorized and dasherized plural of the name of the class of the first item, provided the rest of elements belong to that type (checked with `is_a?`) and they are not hashes. In the example above that's "contributors".
<ide>
<ide> If there's any element that does not belong to the type of the first one the root node becomes "objects":
<ide>
<ide> If the receiver is an array of hashes the root element is by default also "objec
<ide> # </objects>
<ide> ```
<ide>
<del>WARNING. If the collection is empty the root element is by default "nil-classes". That's a gotcha, for example the root element of the list of contributors above would not be "contributors" if the collection was empty, but "nil-classes". You may use the <tt>:root</tt> option to ensure a consistent root element.
<add>WARNING. If the collection is empty the root element is by default "nil-classes". That's a gotcha, for example the root element of the list of contributors above would not be "contributors" if the collection was empty, but "nil-classes". You may use the `:root` option to ensure a consistent root element.
<ide>
<del>The name of children nodes is by default the name of the root node singularized. In the examples above we've seen "contributor" and "object". The option <tt>:children</tt> allows you to set these node names.
<add>The name of children nodes is by default the name of the root node singularized. In the examples above we've seen "contributor" and "object". The option `:children` allows you to set these node names.
<ide>
<del>The default XML builder is a fresh instance of <tt>Builder::XmlMarkup</tt>. You can configure your own builder via the <tt>:builder</tt> option. The method also accepts options like <tt>:dasherize</tt> and friends, they are forwarded to the builder:
<add>The default XML builder is a fresh instance of `Builder::XmlMarkup`. You can configure your own builder via the `:builder` option. The method also accepts options like `:dasherize` and friends, they are forwarded to the builder:
<ide>
<ide> ```ruby
<ide> Contributor.limit(2).order(:rank).to_xml(:skip_types => true)
<ide> Array.wrap([1, 2, 3]) # => [1, 2, 3]
<ide> Array.wrap(0) # => [0]
<ide> ```
<ide>
<del>This method is similar in purpose to <tt>Kernel#Array</tt>, but there are some differences:
<add>This method is similar in purpose to `Kernel#Array`, but there are some differences:
<ide>
<del>* If the argument responds to +to_ary+ the method is invoked. <tt>Kernel#Array</tt> moves on to try +to_a+ if the returned value is +nil+, but <tt>Array.wrap</tt> returns +nil+ right away.
<del>* If the returned value from +to_ary+ is neither +nil+ nor an +Array+ object, <tt>Kernel#Array</tt> raises an exception, while <tt>Array.wrap</tt> does not, it just returns the value.
<add>* If the argument responds to +to_ary+ the method is invoked. `Kernel#Array` moves on to try +to_a+ if the returned value is +nil+, but `Array.wrap` returns +nil+ right away.
<add>* If the returned value from +to_ary+ is neither +nil+ nor an +Array+ object, `Kernel#Array` raises an exception, while `Array.wrap` does not, it just returns the value.
<ide> * It does not call +to_a+ on the argument, though special-cases +nil+ to return an empty array.
<ide>
<ide> The last point is particularly worth comparing for some enumerables:
<ide> There's also a related idiom that uses the splat operator:
<ide> [*object]
<ide> ```
<ide>
<del>which in Ruby 1.8 returns +[nil]+ for +nil+, and calls to <tt>Array(object)</tt> otherwise. (Please if you know the exact behavior in 1.9 contact fxn.)
<add>which in Ruby 1.8 returns +[nil]+ for +nil+, and calls to `Array(object)` otherwise. (Please if you know the exact behavior in 1.9 contact fxn.)
<ide>
<del>Thus, in this case the behavior is different for +nil+, and the differences with <tt>Kernel#Array</tt> explained above apply to the rest of +object+s.
<add>Thus, in this case the behavior is different for +nil+, and the differences with `Kernel#Array` explained above apply to the rest of +object+s.
<ide>
<ide> NOTE: Defined in +active_support/core_ext/array/wrap.rb+.
<ide>
<ide> The method +to_xml+ returns a string containing an XML representation of its rec
<ide>
<ide> To do so, the method loops over the pairs and builds nodes that depend on the _values_. Given a pair +key+, +value+:
<ide>
<del>* If +value+ is a hash there's a recursive call with +key+ as <tt>:root</tt>.
<add>* If +value+ is a hash there's a recursive call with +key+ as `:root`.
<ide>
<del>* If +value+ is an array there's a recursive call with +key+ as <tt>:root</tt>, and +key+ singularized as <tt>:children</tt>.
<add>* If +value+ is an array there's a recursive call with +key+ as `:root`, and +key+ singularized as `:children`.
<ide>
<del>* If +value+ is a callable object it must expect one or two arguments. Depending on the arity, the callable is invoked with the +options+ hash as first argument with +key+ as <tt>:root</tt>, and +key+ singularized as second argument. Its return value becomes a new node.
<add>* If +value+ is a callable object it must expect one or two arguments. Depending on the arity, the callable is invoked with the +options+ hash as first argument with +key+ as `:root`, and +key+ singularized as second argument. Its return value becomes a new node.
<ide>
<del>* If +value+ responds to +to_xml+ the method is invoked with +key+ as <tt>:root</tt>.
<add>* If +value+ responds to +to_xml+ the method is invoked with +key+ as `:root`.
<ide>
<del>* Otherwise, a node with +key+ as tag is created with a string representation of +value+ as text node. If +value+ is +nil+ an attribute "nil" set to "true" is added. Unless the option <tt>:skip_types</tt> exists and is true, an attribute "type" is added as well according to the following mapping:
<add>* Otherwise, a node with +key+ as tag is created with a string representation of +value+ as text node. If +value+ is +nil+ an attribute "nil" set to "true" is added. Unless the option `:skip_types` exists and is true, an attribute "type" is added as well according to the following mapping:
<ide>
<ide> ```ruby
<ide> XML_TYPE_NAMES = {
<ide> XML_TYPE_NAMES = {
<ide> }
<ide> ```
<ide>
<del>By default the root node is "hash", but that's configurable via the <tt>:root</tt> option.
<add>By default the root node is "hash", but that's configurable via the `:root` option.
<ide>
<del>The default XML builder is a fresh instance of <tt>Builder::XmlMarkup</tt>. You can configure your own builder with the <tt>:builder</tt> option. The method also accepts options like <tt>:dasherize</tt> and friends, they are forwarded to the builder.
<add>The default XML builder is a fresh instance of `Builder::XmlMarkup`. You can configure your own builder with the `:builder` option. The method also accepts options like `:dasherize` and friends, they are forwarded to the builder.
<ide>
<ide> NOTE: Defined in +active_support/core_ext/hash/conversions.rb+.
<ide>
<ide><path>guides/source/ajax_on_rails.md
<ide> You are ready to add some AJAX love to your Rails app!
<ide>
<ide> ### Examples
<ide>
<del>To make them working with AJAX, simply pass the <tt>remote: true</tt> option to
<add>To make them working with AJAX, simply pass the `remote: true` option to
<ide> the original non-remote method.
<ide>
<ide> ```ruby
<ide><path>guides/source/api_documentation_guidelines.md
<ide> Short docs do not need an explicit "Examples" label to introduce snippets; they
<ide>
<ide> ```ruby
<ide> # Converts a collection of elements into a formatted string by calling
<del># <tt>to_s</tt> on all elements and joining them.
<add># `to_s` on all elements and joining them.
<ide> #
<ide> # Blog.all.to_formatted_s # => "First PostSecond PostThird Post"
<ide> ```
<ide> Use fixed-width fonts for:
<ide>
<ide> ```ruby
<ide> class Array
<del> # Calls <tt>to_param</tt> on all its elements and joins the result with
<del> # slashes. This is used by <tt>url_for</tt> in Action Pack.
<add> # Calls `to_param` on all its elements and joins the result with
<add> # slashes. This is used by `url_for` in Action Pack.
<ide> def to_param
<ide> collect { |e| e.to_param }.join '/'
<ide> end
<ide> end
<ide> ```
<ide>
<del>WARNING: Using a pair of ++...++ for fixed-width font only works with *words*; that is: anything matching <tt>\A\w+\z</tt>. For anything else use +<tt>...</tt>+, notably symbols, setters, inline snippets, etc.
<add>WARNING: Using a pair of ++...++ for fixed-width font only works with *words*; that is: anything matching `\A\w+\z`. For anything else use +<tt>...</tt>+, notably symbols, setters, inline snippets, etc.
<ide>
<ide> ### Regular Font
<ide>
<ide> When "true" and "false" are English words rather than Ruby keywords use a regula
<ide> # Runs all the validations within the specified context. Returns true if no errors are found,
<ide> # false otherwise.
<ide> #
<del># If the argument is false (default is +nil+), the context is set to <tt>:create</tt> if
<del># <tt>new_record?</tt> is true, and to <tt>:update</tt> if it is not.
<add># If the argument is false (default is +nil+), the context is set to `:create` if
<add># `new_record?` is true, and to `:update` if it is not.
<ide> #
<del># Validations with no <tt>:on</tt> option will run no matter the context. Validations with
<del># some <tt>:on</tt> option will only run in the specified context.
<add># Validations with no `:on` option will run no matter the context. Validations with
<add># some `:on` option will only run in the specified context.
<ide> def valid?(context = nil)
<ide> ...
<ide> end
<ide> Description Lists
<ide> In lists of options, parameters, etc. use a hyphen between the item and its description (reads better than a colon because normally options are symbols):
<ide>
<ide> ```ruby
<del># * <tt>:allow_nil</tt> - Skip validation if attribute is <tt>nil</tt>.
<add># * `:allow_nil` - Skip validation if attribute is `nil`.
<ide> ```
<ide>
<ide> The description starts in upper case and ends with a full stop—it's standard English.
<ide><path>guides/source/asset_pipeline.md
<ide> In Rails 3.1, the asset pipeline is enabled by default. It can be disabled in +c
<ide> config.assets.enabled = false
<ide> ```
<ide>
<del>You can also disable the asset pipeline while creating a new application by passing the <tt>--skip-sprockets</tt> option.
<add>You can also disable the asset pipeline while creating a new application by passing the `--skip-sprockets` option.
<ide>
<ide> ```
<ide> rails new appname --skip-sprockets
<ide> It is important to note that files you want to reference outside a manifest must
<ide>
<ide> Sprockets uses files named +index+ (with the relevant extensions) for a special purpose.
<ide>
<del>For example, if you have a jQuery library with many modules, which is stored in +lib/assets/library_name+, the file +lib/assets/library_name/index.js+ serves as the manifest for all files in this library. This file could include a list of all the required files in order, or a simple <tt>require_tree</tt> directive.
<add>For example, if you have a jQuery library with many modules, which is stored in +lib/assets/library_name+, the file +lib/assets/library_name/index.js+ serves as the manifest for all files in this library. This file could include a list of all the required files in order, or a simple `require_tree` directive.
<ide>
<ide> The library as a whole can be accessed in the site's application manifest like so:
<ide>
<ide> Images can also be organized into subdirectories if required, and they can be ac
<ide> <%= image_tag "icons/rails.png" %>
<ide> ```
<ide>
<del>WARNING: If you're precompiling your assets (see "In Production":#in-production below), linking to an asset that does not exist will raise an exception in the calling page. This includes linking to a blank string. As such, be careful using <tt>image_tag</tt> and the other helpers with user-supplied data.
<add>WARNING: If you're precompiling your assets (see "In Production":#in-production below), linking to an asset that does not exist will raise an exception in the calling page. This includes linking to a blank string. As such, be careful using `image_tag` and the other helpers with user-supplied data.
<ide>
<ide> #### CSS and ERB
<ide>
<ide> This can be changed with the +config.assets.manifest+ option. A fully specified
<ide> config.assets.manifest = '/path/to/some/other/location'
<ide> ```
<ide>
<del>NOTE: If there are missing precompiled files in production you will get an <tt>Sprockets::Helpers::RailsHelper::AssetPaths::AssetNotPrecompiledError</tt> exception indicating the name of the missing file(s).
<add>NOTE: If there are missing precompiled files in production you will get an `Sprockets::Helpers::RailsHelper::AssetPaths::AssetNotPrecompiledError` exception indicating the name of the missing file(s).
<ide>
<ide> #### Far-future Expires header
<ide>
<ide> There are two caveats:
<ide> * You must not run the Capistrano deployment task that precompiles assets.
<ide> * You must change the following two application configuration settings.
<ide>
<del>In <tt>config/environments/development.rb</tt>, place the following line:
<add>In `config/environments/development.rb`, place the following line:
<ide>
<ide> ```erb
<ide> config.assets.prefix = "/dev-assets"
<ide> This is a handy option if you are updating an existing project (pre Rails 3.1) t
<ide>
<ide> The X-Sendfile header is a directive to the web server to ignore the response from the application, and instead serve a specified file from disk. This option is off by default, but can be enabled if your server supports it. When enabled, this passes responsibility for serving the file to the web server, which is faster.
<ide>
<del>Apache and nginx support this option, which can be enabled in <tt>config/environments/production.rb</tt>.
<add>Apache and nginx support this option, which can be enabled in `config/environments/production.rb`.
<ide>
<ide> ```erb
<ide> # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
<ide><path>guides/source/association_basics.md
<ide> c.first_name == o.customer.first_name # => true
<ide>
<ide> There are a few limitations to +inverse_of+ support:
<ide>
<del>* They do not work with <tt>:through</tt> associations.
<del>* They do not work with <tt>:polymorphic</tt> associations.
<del>* They do not work with <tt>:as</tt> associations.
<add>* They do not work with `:through` associations.
<add>* They do not work with `:polymorphic` associations.
<add>* They do not work with `:as` associations.
<ide> * For +belongs_to+ associations, +has_many+ inverse associations are ignored.
<ide>
<ide> Detailed Association Reference
<ide> The +belongs_to+ association creates a one-to-one match with another model. In d
<ide>
<ide> When you declare a +belongs_to+ association, the declaring class automatically gains four methods related to the association:
<ide>
<del>* <tt><em>association</em>(force_reload = false)</tt>
<del>* <tt><em>association</em>=(associate)</tt>
<del>* <tt>build_<em>association</em>(attributes = {})</tt>
<del>* <tt>create_<em>association</em>(attributes = {})</tt>
<add>* `<em>association</em>(force_reload = false)`
<add>* `<em>association</em>=(associate)`
<add>* `build_<em>association</em>(attributes = {})`
<add>* `create_<em>association</em>(attributes = {})`
<ide>
<del>In all of these methods, <tt><em>association</em></tt> is replaced with the symbol passed as the first argument to +belongs_to+. For example, given the declaration:
<add>In all of these methods, `<em>association</em>` is replaced with the symbol passed as the first argument to +belongs_to+. For example, given the declaration:
<ide>
<ide> ```ruby
<ide> class Order < ActiveRecord::Base
<ide> create_customer
<ide>
<ide> NOTE: When initializing a new +has_one+ or +belongs_to+ association you must use the +build_+ prefix to build the association, rather than the +association.build+ method that would be used for +has_many+ or +has_and_belongs_to_many+ associations. To create one, use the +create_+ prefix.
<ide>
<del>##### <tt><em>association</em>(force_reload = false)</tt>
<add>##### `<em>association</em>(force_reload = false)`
<ide>
<del>The <tt><em>association</em></tt> method returns the associated object, if any. If no associated object is found, it returns +nil+.
<add>The `<em>association</em>` method returns the associated object, if any. If no associated object is found, it returns +nil+.
<ide>
<ide> ```ruby
<ide> @customer = @order.customer
<ide> ```
<ide>
<ide> If the associated object has already been retrieved from the database for this object, the cached version will be returned. To override this behavior (and force a database read), pass +true+ as the +force_reload+ argument.
<ide>
<del>##### <tt>_association_=(associate)</tt>
<add>##### `_association_=(associate)`
<ide>
<del>The <tt><em>association</em>=</tt> method assigns an associated object to this object. Behind the scenes, this means extracting the primary key from the associate object and setting this object's foreign key to the same value.
<add>The `<em>association</em>=` method assigns an associated object to this object. Behind the scenes, this means extracting the primary key from the associate object and setting this object's foreign key to the same value.
<ide>
<ide> ```ruby
<ide> @order.customer = @customer
<ide> ```
<ide>
<del>##### <tt>build_<em>association</em>(attributes = {})</tt>
<add>##### `build_<em>association</em>(attributes = {})`
<ide>
<del>The <tt>build_<em>association</em></tt> method returns a new object of the associated type. This object will be instantiated from the passed attributes, and the link through this object's foreign key will be set, but the associated object will _not_ yet be saved.
<add>The `build_<em>association</em>` method returns a new object of the associated type. This object will be instantiated from the passed attributes, and the link through this object's foreign key will be set, but the associated object will _not_ yet be saved.
<ide>
<ide> ```ruby
<ide> @customer = @order.build_customer(:customer_number => 123,
<ide> :customer_name => "John Doe")
<ide> ```
<ide>
<del>##### <tt>create_<em>association</em>(attributes = {})</tt>
<add>##### `create_<em>association</em>(attributes = {})`
<ide>
<del>The <tt>create_<em>association</em></tt> method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through this object's foreign key will be set, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<add>The `create_<em>association</em>` method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through this object's foreign key will be set, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<ide>
<ide> ```ruby
<ide> @customer = @order.create_customer(:customer_number => 123,
<ide> TIP: If you use the +select+ method on a +belongs_to+ association, you should al
<ide>
<ide> #### Do Any Associated Objects Exist?
<ide>
<del>You can see if any associated objects exist by using the <tt><em>association</em>.nil?</tt> method:
<add>You can see if any associated objects exist by using the `<em>association</em>.nil?` method:
<ide>
<ide> ```ruby
<ide> if @order.customer.nil?
<ide> The +has_one+ association creates a one-to-one match with another model. In data
<ide>
<ide> When you declare a +has_one+ association, the declaring class automatically gains four methods related to the association:
<ide>
<del>* <tt><em>association</em>(force_reload = false)</tt>
<del>* <tt><em>association</em>=(associate)</tt>
<del>* <tt>build_<em>association</em>(attributes = {})</tt>
<del>* <tt>create_<em>association</em>(attributes = {})</tt>
<add>* `<em>association</em>(force_reload = false)`
<add>* `<em>association</em>=(associate)`
<add>* `build_<em>association</em>(attributes = {})`
<add>* `create_<em>association</em>(attributes = {})`
<ide>
<del>In all of these methods, <tt><em>association</em></tt> is replaced with the symbol passed as the first argument to +has_one+. For example, given the declaration:
<add>In all of these methods, `<em>association</em>` is replaced with the symbol passed as the first argument to +has_one+. For example, given the declaration:
<ide>
<ide> ```ruby
<ide> class Supplier < ActiveRecord::Base
<ide> create_account
<ide>
<ide> NOTE: When initializing a new +has_one+ or +belongs_to+ association you must use the +build_+ prefix to build the association, rather than the +association.build+ method that would be used for +has_many+ or +has_and_belongs_to_many+ associations. To create one, use the +create_+ prefix.
<ide>
<del>##### <tt><em>association</em>(force_reload = false)</tt>
<add>##### `<em>association</em>(force_reload = false)`
<ide>
<del>The <tt><em>association</em></tt> method returns the associated object, if any. If no associated object is found, it returns +nil+.
<add>The `<em>association</em>` method returns the associated object, if any. If no associated object is found, it returns +nil+.
<ide>
<ide> ```ruby
<ide> @account = @supplier.account
<ide> ```
<ide>
<ide> If the associated object has already been retrieved from the database for this object, the cached version will be returned. To override this behavior (and force a database read), pass +true+ as the +force_reload+ argument.
<ide>
<del>##### <tt><em>association</em>=(associate)</tt>
<add>##### `<em>association</em>=(associate)`
<ide>
<del>The <tt><em>association</em>=</tt> method assigns an associated object to this object. Behind the scenes, this means extracting the primary key from this object and setting the associate object's foreign key to the same value.
<add>The `<em>association</em>=` method assigns an associated object to this object. Behind the scenes, this means extracting the primary key from this object and setting the associate object's foreign key to the same value.
<ide>
<ide> ```ruby
<ide> @supplier.account = @account
<ide> ```
<ide>
<del>##### <tt>build_<em>association</em>(attributes = {})</tt>
<add>##### `build_<em>association</em>(attributes = {})`
<ide>
<del>The <tt>build_<em>association</em></tt> method returns a new object of the associated type. This object will be instantiated from the passed attributes, and the link through its foreign key will be set, but the associated object will _not_ yet be saved.
<add>The `build_<em>association</em>` method returns a new object of the associated type. This object will be instantiated from the passed attributes, and the link through its foreign key will be set, but the associated object will _not_ yet be saved.
<ide>
<ide> ```ruby
<ide> @account = @supplier.build_account(:terms => "Net 30")
<ide> ```
<ide>
<del>##### <tt>create_<em>association</em>(attributes = {})</tt>
<add>##### `create_<em>association</em>(attributes = {})`
<ide>
<del>The <tt>create_<em>association</em></tt> method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through its foreign key will be set, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<add>The `create_<em>association</em>` method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through its foreign key will be set, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<ide>
<ide> ```ruby
<ide> @account = @supplier.create_account(:terms => "Net 30")
<ide> The +select+ method lets you override the SQL +SELECT+ clause that is used to re
<ide>
<ide> #### Do Any Associated Objects Exist?
<ide>
<del>You can see if any associated objects exist by using the <tt><em>association</em>.nil?</tt> method:
<add>You can see if any associated objects exist by using the `<em>association</em>.nil?` method:
<ide>
<ide> ```ruby
<ide> if @supplier.account.nil?
<ide> If either of these saves fails due to validation errors, then the assignment sta
<ide>
<ide> If the parent object (the one declaring the +has_one+ association) is unsaved (that is, +new_record?+ returns +true+) then the child objects are not saved. They will automatically when the parent object is saved.
<ide>
<del>If you want to assign an object to a +has_one+ association without saving the object, use the <tt><em>association</em>.build</tt> method.
<add>If you want to assign an object to a +has_one+ association without saving the object, use the `<em>association</em>.build` method.
<ide>
<ide> ### +has_many+ Association Reference
<ide>
<ide> The +has_many+ association creates a one-to-many relationship with another model
<ide>
<ide> When you declare a +has_many+ association, the declaring class automatically gains 13 methods related to the association:
<ide>
<del>* <tt><em>collection</em>(force_reload = false)</tt>
<del>* <tt><em>collection</em><<(object, ...)</tt>
<del>* <tt><em>collection</em>.delete(object, ...)</tt>
<del>* <tt><em>collection</em>=objects</tt>
<del>* <tt><em>collection_singular</em>_ids</tt>
<del>* <tt><em>collection_singular</em>_ids=ids</tt>
<del>* <tt><em>collection</em>.clear</tt>
<del>* <tt><em>collection</em>.empty?</tt>
<del>* <tt><em>collection</em>.size</tt>
<del>* <tt><em>collection</em>.find(...)</tt>
<del>* <tt><em>collection</em>.where(...)</tt>
<del>* <tt><em>collection</em>.exists?(...)</tt>
<del>* <tt><em>collection</em>.build(attributes = {}, ...)</tt>
<del>* <tt><em>collection</em>.create(attributes = {})</tt>
<add>* `<em>collection</em>(force_reload = false)`
<add>* `<em>collection</em><<(object, ...)`
<add>* `<em>collection</em>.delete(object, ...)`
<add>* `<em>collection</em>=objects`
<add>* `<em>collection_singular</em>_ids`
<add>* `<em>collection_singular</em>_ids=ids`
<add>* `<em>collection</em>.clear`
<add>* `<em>collection</em>.empty?`
<add>* `<em>collection</em>.size`
<add>* `<em>collection</em>.find(...)`
<add>* `<em>collection</em>.where(...)`
<add>* `<em>collection</em>.exists?(...)`
<add>* `<em>collection</em>.build(attributes = {}, ...)`
<add>* `<em>collection</em>.create(attributes = {})`
<ide>
<del>In all of these methods, <tt><em>collection</em></tt> is replaced with the symbol passed as the first argument to +has_many+, and <tt><em>collection_singular</em></tt> is replaced with the singularized version of that symbol.. For example, given the declaration:
<add>In all of these methods, `<em>collection</em>` is replaced with the symbol passed as the first argument to +has_many+, and `<em>collection_singular</em>` is replaced with the singularized version of that symbol.. For example, given the declaration:
<ide>
<ide> ```ruby
<ide> class Customer < ActiveRecord::Base
<ide> orders.build(attributes = {}, ...)
<ide> orders.create(attributes = {})
<ide> ```
<ide>
<del>##### <tt><em>collection</em>(force_reload = false)</tt>
<add>##### `<em>collection</em>(force_reload = false)`
<ide>
<del>The <tt><em>collection</em></tt> method returns an array of all of the associated objects. If there are no associated objects, it returns an empty array.
<add>The `<em>collection</em>` method returns an array of all of the associated objects. If there are no associated objects, it returns an empty array.
<ide>
<ide> ```ruby
<ide> @orders = @customer.orders
<ide> ```
<ide>
<del>##### <tt><em>collection</em><<(object, ...)</tt>
<add>##### `<em>collection</em><<(object, ...)`
<ide>
<del>The <tt><em>collection</em><<</tt> method adds one or more objects to the collection by setting their foreign keys to the primary key of the calling model.
<add>The `<em>collection</em><<` method adds one or more objects to the collection by setting their foreign keys to the primary key of the calling model.
<ide>
<ide> ```ruby
<ide> @customer.orders << @order1
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.delete(object, ...)</tt>
<add>##### `<em>collection</em>.delete(object, ...)`
<ide>
<del>The <tt><em>collection</em>.delete</tt> method removes one or more objects from the collection by setting their foreign keys to +NULL+.
<add>The `<em>collection</em>.delete` method removes one or more objects from the collection by setting their foreign keys to +NULL+.
<ide>
<ide> ```ruby
<ide> @customer.orders.delete(@order1)
<ide> The <tt><em>collection</em>.delete</tt> method removes one or more objects from
<ide> WARNING: Additionally, objects will be destroyed if they're associated with +:dependent => :destroy+, and deleted if they're associated with +:dependent => :delete_all+.
<ide>
<ide>
<del>##### <tt><em>collection</em>=objects</tt>
<add>##### `<em>collection</em>=objects`
<ide>
<del>The <tt><em>collection</em>=</tt> method makes the collection contain only the supplied objects, by adding and deleting as appropriate.
<add>The `<em>collection</em>=` method makes the collection contain only the supplied objects, by adding and deleting as appropriate.
<ide>
<del>##### <tt><em>collection_singular</em>_ids</tt>
<add>##### `<em>collection_singular</em>_ids`
<ide>
<del>The <tt><em>collection_singular</em>_ids</tt> method returns an array of the ids of the objects in the collection.
<add>The `<em>collection_singular</em>_ids` method returns an array of the ids of the objects in the collection.
<ide>
<ide> ```ruby
<ide> @order_ids = @customer.order_ids
<ide> ```
<ide>
<del>##### <tt><em>collection_singular</em>_ids=ids</tt>
<add>##### `<em>collection_singular</em>_ids=ids`
<ide>
<del>The <tt><em>collection_singular</em>_ids=</tt> method makes the collection contain only the objects identified by the supplied primary key values, by adding and deleting as appropriate.
<add>The `<em>collection_singular</em>_ids=` method makes the collection contain only the objects identified by the supplied primary key values, by adding and deleting as appropriate.
<ide>
<del>##### <tt><em>collection</em>.clear</tt>
<add>##### `<em>collection</em>.clear`
<ide>
<del>The <tt><em>collection</em>.clear</tt> method removes every object from the collection. This destroys the associated objects if they are associated with +:dependent => :destroy+, deletes them directly from the database if +:dependent => :delete_all+, and otherwise sets their foreign keys to +NULL+.
<add>The `<em>collection</em>.clear` method removes every object from the collection. This destroys the associated objects if they are associated with +:dependent => :destroy+, deletes them directly from the database if +:dependent => :delete_all+, and otherwise sets their foreign keys to +NULL+.
<ide>
<del>##### <tt><em>collection</em>.empty?</tt>
<add>##### `<em>collection</em>.empty?`
<ide>
<del>The <tt><em>collection</em>.empty?</tt> method returns +true+ if the collection does not contain any associated objects.
<add>The `<em>collection</em>.empty?` method returns +true+ if the collection does not contain any associated objects.
<ide>
<ide> ```ruby
<ide> <% if @customer.orders.empty? %>
<ide> No Orders Found
<ide> <% end %>
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.size</tt>
<add>##### `<em>collection</em>.size`
<ide>
<del>The <tt><em>collection</em>.size</tt> method returns the number of objects in the collection.
<add>The `<em>collection</em>.size` method returns the number of objects in the collection.
<ide>
<ide> ```ruby
<ide> @order_count = @customer.orders.size
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.find(...)</tt>
<add>##### `<em>collection</em>.find(...)`
<ide>
<del>The <tt><em>collection</em>.find</tt> method finds objects within the collection. It uses the same syntax and options as +ActiveRecord::Base.find+.
<add>The `<em>collection</em>.find` method finds objects within the collection. It uses the same syntax and options as +ActiveRecord::Base.find+.
<ide>
<ide> ```ruby
<ide> @open_orders = @customer.orders.find(1)
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.where(...)</tt>
<add>##### `<em>collection</em>.where(...)`
<ide>
<del>The <tt><em>collection</em>.where</tt> method finds objects within the collection based on the conditions supplied but the objects are loaded lazily meaning that the database is queried only when the object(s) are accessed.
<add>The `<em>collection</em>.where` method finds objects within the collection based on the conditions supplied but the objects are loaded lazily meaning that the database is queried only when the object(s) are accessed.
<ide>
<ide> ```ruby
<ide> @open_orders = @customer.orders.where(:open => true) # No query yet
<ide> @open_order = @open_orders.first # Now the database will be queried
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.exists?(...)</tt>
<add>##### `<em>collection</em>.exists?(...)`
<ide>
<del>The <tt><em>collection</em>.exists?</tt> method checks whether an object meeting the supplied conditions exists in the collection. It uses the same syntax and options as +ActiveRecord::Base.exists?+.
<add>The `<em>collection</em>.exists?` method checks whether an object meeting the supplied conditions exists in the collection. It uses the same syntax and options as +ActiveRecord::Base.exists?+.
<ide>
<del>##### <tt><em>collection</em>.build(attributes = {}, ...)</tt>
<add>##### `<em>collection</em>.build(attributes = {}, ...)`
<ide>
<del>The <tt><em>collection</em>.build</tt> method returns one or more new objects of the associated type. These objects will be instantiated from the passed attributes, and the link through their foreign key will be created, but the associated objects will _not_ yet be saved.
<add>The `<em>collection</em>.build` method returns one or more new objects of the associated type. These objects will be instantiated from the passed attributes, and the link through their foreign key will be created, but the associated objects will _not_ yet be saved.
<ide>
<ide> ```ruby
<ide> @order = @customer.orders.build(:order_date => Time.now,
<ide> :order_number => "A12345")
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.create(attributes = {})</tt>
<add>##### `<em>collection</em>.create(attributes = {})`
<ide>
<del>The <tt><em>collection</em>.create</tt> method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through its foreign key will be created, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<add>The `<em>collection</em>.create` method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through its foreign key will be created, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<ide>
<ide> ```ruby
<ide> @order = @customer.orders.create(:order_date => Time.now,
<ide> If any of these saves fails due to validation errors, then the assignment statem
<ide>
<ide> If the parent object (the one declaring the +has_many+ association) is unsaved (that is, +new_record?+ returns +true+) then the child objects are not saved when they are added. All unsaved members of the association will automatically be saved when the parent is saved.
<ide>
<del>If you want to assign an object to a +has_many+ association without saving the object, use the <tt><em>collection</em>.build</tt> method.
<add>If you want to assign an object to a +has_many+ association without saving the object, use the `<em>collection</em>.build` method.
<ide>
<ide> ### +has_and_belongs_to_many+ Association Reference
<ide>
<ide> The +has_and_belongs_to_many+ association creates a many-to-many relationship wi
<ide>
<ide> When you declare a +has_and_belongs_to_many+ association, the declaring class automatically gains 13 methods related to the association:
<ide>
<del>* <tt><em>collection</em>(force_reload = false)</tt>
<del>* <tt><em>collection</em><<(object, ...)</tt>
<del>* <tt><em>collection</em>.delete(object, ...)</tt>
<del>* <tt><em>collection</em>=objects</tt>
<del>* <tt><em>collection_singular</em>_ids</tt>
<del>* <tt><em>collection_singular</em>_ids=ids</tt>
<del>* <tt><em>collection</em>.clear</tt>
<del>* <tt><em>collection</em>.empty?</tt>
<del>* <tt><em>collection</em>.size</tt>
<del>* <tt><em>collection</em>.find(...)</tt>
<del>* <tt><em>collection</em>.where(...)</tt>
<del>* <tt><em>collection</em>.exists?(...)</tt>
<del>* <tt><em>collection</em>.build(attributes = {})</tt>
<del>* <tt><em>collection</em>.create(attributes = {})</tt>
<add>* `<em>collection</em>(force_reload = false)`
<add>* `<em>collection</em><<(object, ...)`
<add>* `<em>collection</em>.delete(object, ...)`
<add>* `<em>collection</em>=objects`
<add>* `<em>collection_singular</em>_ids`
<add>* `<em>collection_singular</em>_ids=ids`
<add>* `<em>collection</em>.clear`
<add>* `<em>collection</em>.empty?`
<add>* `<em>collection</em>.size`
<add>* `<em>collection</em>.find(...)`
<add>* `<em>collection</em>.where(...)`
<add>* `<em>collection</em>.exists?(...)`
<add>* `<em>collection</em>.build(attributes = {})`
<add>* `<em>collection</em>.create(attributes = {})`
<ide>
<del>In all of these methods, <tt><em>collection</em></tt> is replaced with the symbol passed as the first argument to +has_and_belongs_to_many+, and <tt><em>collection_singular</em></tt> is replaced with the singularized version of that symbol. For example, given the declaration:
<add>In all of these methods, `<em>collection</em>` is replaced with the symbol passed as the first argument to +has_and_belongs_to_many+, and `<em>collection_singular</em>` is replaced with the singularized version of that symbol. For example, given the declaration:
<ide>
<ide> ```ruby
<ide> class Part < ActiveRecord::Base
<ide> If the join table for a +has_and_belongs_to_many+ association has additional col
<ide> WARNING: The use of extra attributes on the join table in a +has_and_belongs_to_many+ association is deprecated. If you require this sort of complex behavior on the table that joins two models in a many-to-many relationship, you should use a +has_many :through+ association instead of +has_and_belongs_to_many+.
<ide>
<ide>
<del>##### <tt><em>collection</em>(force_reload = false)</tt>
<add>##### `<em>collection</em>(force_reload = false)`
<ide>
<del>The <tt><em>collection</em></tt> method returns an array of all of the associated objects. If there are no associated objects, it returns an empty array.
<add>The `<em>collection</em>` method returns an array of all of the associated objects. If there are no associated objects, it returns an empty array.
<ide>
<ide> ```ruby
<ide> @assemblies = @part.assemblies
<ide> ```
<ide>
<del>##### <tt><em>collection</em><<(object, ...)</tt>
<add>##### `<em>collection</em><<(object, ...)`
<ide>
<del>The <tt><em>collection</em><<</tt> method adds one or more objects to the collection by creating records in the join table.
<add>The `<em>collection</em><<` method adds one or more objects to the collection by creating records in the join table.
<ide>
<ide> ```ruby
<ide> @part.assemblies << @assembly1
<ide> ```
<ide>
<del>NOTE: This method is aliased as <tt><em>collection</em>.concat</tt> and <tt><em>collection</em>.push</tt>.
<add>NOTE: This method is aliased as `<em>collection</em>.concat` and `<em>collection</em>.push`.
<ide>
<del>##### <tt><em>collection</em>.delete(object, ...)</tt>
<add>##### `<em>collection</em>.delete(object, ...)`
<ide>
<del>The <tt><em>collection</em>.delete</tt> method removes one or more objects from the collection by deleting records in the join table. This does not destroy the objects.
<add>The `<em>collection</em>.delete` method removes one or more objects from the collection by deleting records in the join table. This does not destroy the objects.
<ide>
<ide> ```ruby
<ide> @part.assemblies.delete(@assembly1)
<ide> ```
<ide>
<del>##### <tt><em>collection</em>=objects</tt>
<add>##### `<em>collection</em>=objects`
<ide>
<del>The <tt><em>collection</em>=</tt> method makes the collection contain only the supplied objects, by adding and deleting as appropriate.
<add>The `<em>collection</em>=` method makes the collection contain only the supplied objects, by adding and deleting as appropriate.
<ide>
<del>##### <tt><em>collection_singular</em>_ids</tt>
<add>##### `<em>collection_singular</em>_ids`
<ide>
<del>The <tt><em>collection_singular</em>_ids</tt> method returns an array of the ids of the objects in the collection.
<add>The `<em>collection_singular</em>_ids` method returns an array of the ids of the objects in the collection.
<ide>
<ide> ```ruby
<ide> @assembly_ids = @part.assembly_ids
<ide> ```
<ide>
<del>##### <tt><em>collection_singular</em>_ids=ids</tt>
<add>##### `<em>collection_singular</em>_ids=ids`
<ide>
<del>The <tt><em>collection_singular</em>_ids=</tt> method makes the collection contain only the objects identified by the supplied primary key values, by adding and deleting as appropriate.
<add>The `<em>collection_singular</em>_ids=` method makes the collection contain only the objects identified by the supplied primary key values, by adding and deleting as appropriate.
<ide>
<del>##### <tt><em>collection</em>.clear</tt>
<add>##### `<em>collection</em>.clear`
<ide>
<del>The <tt><em>collection</em>.clear</tt> method removes every object from the collection by deleting the rows from the joining table. This does not destroy the associated objects.
<add>The `<em>collection</em>.clear` method removes every object from the collection by deleting the rows from the joining table. This does not destroy the associated objects.
<ide>
<del>##### <tt><em>collection</em>.empty?</tt>
<add>##### `<em>collection</em>.empty?`
<ide>
<del>The <tt><em>collection</em>.empty?</tt> method returns +true+ if the collection does not contain any associated objects.
<add>The `<em>collection</em>.empty?` method returns +true+ if the collection does not contain any associated objects.
<ide>
<ide> ```ruby
<ide> <% if @part.assemblies.empty? %>
<ide> This part is not used in any assemblies
<ide> <% end %>
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.size</tt>
<add>##### `<em>collection</em>.size`
<ide>
<del>The <tt><em>collection</em>.size</tt> method returns the number of objects in the collection.
<add>The `<em>collection</em>.size` method returns the number of objects in the collection.
<ide>
<ide> ```ruby
<ide> @assembly_count = @part.assemblies.size
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.find(...)</tt>
<add>##### `<em>collection</em>.find(...)`
<ide>
<del>The <tt><em>collection</em>.find</tt> method finds objects within the collection. It uses the same syntax and options as +ActiveRecord::Base.find+. It also adds the additional condition that the object must be in the collection.
<add>The `<em>collection</em>.find` method finds objects within the collection. It uses the same syntax and options as +ActiveRecord::Base.find+. It also adds the additional condition that the object must be in the collection.
<ide>
<ide> ```ruby
<ide> @assembly = @part.assemblies.find(1)
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.where(...)</tt>
<add>##### `<em>collection</em>.where(...)`
<ide>
<del>The <tt><em>collection</em>.where</tt> method finds objects within the collection based on the conditions supplied but the objects are loaded lazily meaning that the database is queried only when the object(s) are accessed. It also adds the additional condition that the object must be in the collection.
<add>The `<em>collection</em>.where` method finds objects within the collection based on the conditions supplied but the objects are loaded lazily meaning that the database is queried only when the object(s) are accessed. It also adds the additional condition that the object must be in the collection.
<ide>
<ide> ```ruby
<ide> @new_assemblies = @part.assemblies.where("created_at > ?", 2.days.ago)
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.exists?(...)</tt>
<add>##### `<em>collection</em>.exists?(...)`
<ide>
<del>The <tt><em>collection</em>.exists?</tt> method checks whether an object meeting the supplied conditions exists in the collection. It uses the same syntax and options as +ActiveRecord::Base.exists?+.
<add>The `<em>collection</em>.exists?` method checks whether an object meeting the supplied conditions exists in the collection. It uses the same syntax and options as +ActiveRecord::Base.exists?+.
<ide>
<del>##### <tt><em>collection</em>.build(attributes = {})</tt>
<add>##### `<em>collection</em>.build(attributes = {})`
<ide>
<del>The <tt><em>collection</em>.build</tt> method returns a new object of the associated type. This object will be instantiated from the passed attributes, and the link through the join table will be created, but the associated object will _not_ yet be saved.
<add>The `<em>collection</em>.build` method returns a new object of the associated type. This object will be instantiated from the passed attributes, and the link through the join table will be created, but the associated object will _not_ yet be saved.
<ide>
<ide> ```ruby
<ide> @assembly = @part.assemblies.build(
<ide> {:assembly_name => "Transmission housing"})
<ide> ```
<ide>
<del>##### <tt><em>collection</em>.create(attributes = {})</tt>
<add>##### `<em>collection</em>.create(attributes = {})`
<ide>
<del>The <tt><em>collection</em>.create</tt> method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through the join table will be created, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<add>The `<em>collection</em>.create` method returns a new object of the associated type. This object will be instantiated from the passed attributes, the link through the join table will be created, and, once it passes all of the validations specified on the associated model, the associated object _will_ be saved.
<ide>
<ide> ```ruby
<ide> @assembly = @part.assemblies.create(
<ide> If any of these saves fails due to validation errors, then the assignment statem
<ide>
<ide> If the parent object (the one declaring the +has_and_belongs_to_many+ association) is unsaved (that is, +new_record?+ returns +true+) then the child objects are not saved when they are added. All unsaved members of the association will automatically be saved when the parent is saved.
<ide>
<del>If you want to assign an object to a +has_and_belongs_to_many+ association without saving the object, use the <tt><em>collection</em>.build</tt> method.
<add>If you want to assign an object to a +has_and_belongs_to_many+ association without saving the object, use the `<em>collection</em>.build` method.
<ide>
<ide> ### Association Callbacks
<ide>
<ide><path>guides/source/command_line.md
<ide> Command Line Basics
<ide>
<ide> There are a few commands that are absolutely critical to your everyday usage of Rails. In the order of how much you'll probably use them are:
<ide>
<del>* <tt>rails console</tt>
<del>* <tt>rails server</tt>
<del>* <tt>rake</tt>
<del>* <tt>rails generate</tt>
<del>* <tt>rails dbconsole</tt>
<del>* <tt>rails new app_name</tt>
<add>* `rails console`
<add>* `rails server`
<add>* `rake`
<add>* `rails generate`
<add>* `rails dbconsole`
<add>* `rails new app_name`
<ide>
<ide> Let's create a simple Rails application to step through each of these commands in context.
<ide>
<ide> $ rails server
<ide>
<ide> With just three commands we whipped up a Rails server listening on port 3000. Go to your browser and open "http://localhost:3000":http://localhost:3000, you will see a basic Rails app running.
<ide>
<del>INFO: You can also use the alias "s" to start the server: <tt>rails s</tt>.
<add>INFO: You can also use the alias "s" to start the server: `rails s`.
<ide>
<ide> The server can be run on a different port using the +-p+ option. The default development environment can be changed using +-e+.
<ide>
<ide> The +-b+ option binds Rails to the specified ip, by default it is 0.0.0.0. You c
<ide>
<ide> The +rails generate+ command uses templates to create a whole lot of things. Running +rails generate+ by itself gives a list of available generators:
<ide>
<del>INFO: You can also use the alias "g" to invoke the generator command: <tt>rails g</tt>.
<add>INFO: You can also use the alias "g" to invoke the generator command: `rails g`.
<ide>
<ide> ```shell
<ide> $ rails generate
<ide> Go to your browser and open "http://localhost:3000/high_scores":http://localhost
<ide>
<ide> The +console+ command lets you interact with your Rails application from the command line. On the underside, +rails console+ uses IRB, so if you've ever used it, you'll be right at home. This is useful for testing out quick ideas with code and changing data server-side without touching the website.
<ide>
<del>INFO: You can also use the alias "c" to invoke the console: <tt>rails c</tt>.
<add>INFO: You can also use the alias "c" to invoke the console: `rails c`.
<ide>
<ide> You can specify the environment in which the +console+ command should operate.
<ide>
<ide> irb(main):001:0>
<ide>
<ide> +rails dbconsole+ figures out which database you're using and drops you into whichever command line interface you would use with it (and figures out the command line parameters to give to it, too!). It supports MySQL, PostgreSQL, SQLite and SQLite3.
<ide>
<del>INFO: You can also use the alias "db" to invoke the dbconsole: <tt>rails db</tt>.
<add>INFO: You can also use the alias "db" to invoke the dbconsole: `rails db`.
<ide>
<ide> ### +rails runner+
<ide>
<del><tt>runner</tt> runs Ruby code in the context of Rails non-interactively. For instance:
<add>`runner` runs Ruby code in the context of Rails non-interactively. For instance:
<ide>
<ide> ```shell
<ide> $ rails runner "Model.long_running_method"
<ide> ```
<ide>
<del>INFO: You can also use the alias "r" to invoke the runner: <tt>rails r</tt>.
<add>INFO: You can also use the alias "r" to invoke the runner: `rails r`.
<ide>
<ide> You can specify the environment in which the +runner+ command should operate using the +-e+ switch.
<ide>
<ide> $ rails runner -e staging "Model.long_running_method"
<ide>
<ide> Think of +destroy+ as the opposite of +generate+. It'll figure out what generate did, and undo it.
<ide>
<del>INFO: You can also use the alias "d" to invoke the destroy command: <tt>rails d</tt>.
<add>INFO: You can also use the alias "d" to invoke the destroy command: `rails d`.
<ide>
<ide> ```shell
<ide> $ rails generate model Oops
<ide> rake tmp:create # Creates tmp directories for sessions, cache, sockets,
<ide>
<ide> ### +about+
<ide>
<del><tt>rake about</tt> gives information about version numbers for Ruby, RubyGems, Rails, the Rails subcomponents, your application's folder, the current Rails environment name, your app's database adapter, and schema version. It is useful when you need to ask for help, check if a security patch might affect you, or when you need some stats for an existing Rails installation.
<add>`rake about` gives information about version numbers for Ruby, RubyGems, Rails, the Rails subcomponents, your application's folder, the current Rails environment name, your app's database adapter, and schema version. It is useful when you need to ask for help, check if a security patch might affect you, or when you need some stats for an existing Rails installation.
<ide>
<ide> ```shell
<ide> $ rake about
<ide> Database schema version 20110805173523
<ide>
<ide> ### +assets+
<ide>
<del>You can precompile the assets in <tt>app/assets</tt> using <tt>rake assets:precompile</tt> and remove those compiled assets using <tt>rake assets:clean</tt>.
<add>You can precompile the assets in `app/assets` using `rake assets:precompile` and remove those compiled assets using `rake assets:clean`.
<ide>
<ide> ### +db+
<ide>
<ide> rspec/model/user_spec.rb:
<ide>
<ide> INFO: A good description of unit testing in Rails is given in "A Guide to Testing Rails Applications":testing.html
<ide>
<del>Rails comes with a test suite called <tt>Test::Unit</tt>. Rails owes its stability to the use of tests. The tasks available in the +test:+ namespace helps in running the different tests you will hopefully write.
<add>Rails comes with a test suite called `Test::Unit`. Rails owes its stability to the use of tests. The tasks available in the +test:+ namespace helps in running the different tests you will hopefully write.
<ide>
<ide> ### +tmp+
<ide>
<del>The <tt>Rails.root/tmp</tt> directory is, like the *nix /tmp directory, the holding place for temporary files like sessions (if you're using a file store for files), process id files, and cached actions.
<add>The `Rails.root/tmp` directory is, like the *nix /tmp directory, the holding place for temporary files like sessions (if you're using a file store for files), process id files, and cached actions.
<ide>
<del>The +tmp:+ namespaced tasks will help you clear the <tt>Rails.root/tmp</tt> directory:
<add>The +tmp:+ namespaced tasks will help you clear the `Rails.root/tmp` directory:
<ide>
<del>* +rake tmp:cache:clear+ clears <tt>tmp/cache</tt>.
<del>* +rake tmp:sessions:clear+ clears <tt>tmp/sessions</tt>.
<del>* +rake tmp:sockets:clear+ clears <tt>tmp/sockets</tt>.
<add>* +rake tmp:cache:clear+ clears `tmp/cache`.
<add>* +rake tmp:sessions:clear+ clears `tmp/sessions`.
<add>* +rake tmp:sockets:clear+ clears `tmp/sockets`.
<ide> * +rake tmp:clear+ clears all the three: cache, sessions and sockets.
<ide>
<ide> ### Miscellaneous
<ide>
<ide> * +rake stats+ is great for looking at statistics on your code, displaying things like KLOCs (thousands of lines of code) and your code to test ratio.
<ide> * +rake secret+ will give you a pseudo-random key to use for your session secret.
<del>* <tt>rake time:zones:all</tt> lists all the timezones Rails knows about.
<add>* `rake time:zones:all` lists all the timezones Rails knows about.
<ide>
<ide> ### Writing Rake Tasks
<ide>
<ide> If you have (or want to write) any automation scripts outside your app (data import, checks, etc), you can make them as rake tasks. It's easy.
<ide>
<ide> INFO: "Complete guide about how to write tasks":http://rake.rubyforge.org/files/doc/rakefile_rdoc.html is available in the official documentation.
<ide>
<del>Tasks should be placed in <tt>Rails.root/lib/tasks</tt> and should have a +.rake+ extension.
<add>Tasks should be placed in `Rails.root/lib/tasks` and should have a +.rake+ extension.
<ide>
<ide> Each task should be defined in next format (dependencies are optional):
<ide>
<ide> namespace :do
<ide> end
<ide> ```
<ide>
<del>You can see your tasks to be listed by <tt>rake -T</tt> command. And, according to the examples above, you can invoke them as follows:
<add>You can see your tasks to be listed by `rake -T` command. And, according to the examples above, you can invoke them as follows:
<ide>
<ide> ```shell
<ide> rake task_name
<ide><path>guides/source/configuring.md
<ide> config.middleware.delete ActionDispatch::BestStandardsSupport
<ide>
<ide> ### Configuring Active Record
<ide>
<del><tt>config.active_record</tt> includes a variety of configuration options:
<add>`config.active_record` includes a variety of configuration options:
<ide>
<ide> * +config.active_record.logger+ accepts a logger conforming to the interface of Log4r or the default Ruby Logger class, which is then passed on to any new database connections made. You can retrieve this logger by calling +logger+ on either an Active Record model class or an Active Record model instance. Set to +nil+ to disable logging.
<ide>
<ide> The schema dumper adds one additional configuration option:
<ide>
<ide> ### Configuring Action Controller
<ide>
<del><tt>config.action_controller</tt> includes a number of configuration settings:
<add>`config.action_controller` includes a number of configuration settings:
<ide>
<ide> * +config.action_controller.asset_host+ sets the host for the assets. Useful when CDNs are used for hosting assets rather than the application server itself.
<ide>
<ide> * +config.action_controller.asset_path+ takes a block which configures where assets can be found. Shorter version of +config.action_controller.asset_path+.
<ide>
<del>* +config.action_controller.page_cache_directory+ should be the document root for the web server and is set using <tt>Base.page_cache_directory = "/document/root"</tt>. For Rails, this directory has already been set to +Rails.public_path+ (which is usually set to <tt>Rails.root + "/public"</tt>). Changing this setting can be useful to avoid naming conflicts with files in <tt>public/</tt>, but doing so will likely require configuring your web server to look in the new location for cached files.
<add>* +config.action_controller.page_cache_directory+ should be the document root for the web server and is set using `Base.page_cache_directory = "/document/root"`. For Rails, this directory has already been set to +Rails.public_path+ (which is usually set to `Rails.root + "/public"`). Changing this setting can be useful to avoid naming conflicts with files in `public/`, but doing so will likely require configuring your web server to look in the new location for cached files.
<ide>
<ide> * +config.action_controller.page_cache_extension+ configures the extension used for cached pages saved to +page_cache_directory+. Defaults to +.html+.
<ide>
<ide> The schema dumper adds one additional configuration option:
<ide>
<ide> The caching code adds two additional settings:
<ide>
<del>* +ActionController::Base.page_cache_directory+ sets the directory where Rails will create cached pages for your web server. The default is +Rails.public_path+ (which is usually set to <tt>Rails.root + "/public"</tt>).
<add>* +ActionController::Base.page_cache_directory+ sets the directory where Rails will create cached pages for your web server. The default is +Rails.public_path+ (which is usually set to `Rails.root + "/public"`).
<ide>
<ide> * +ActionController::Base.page_cache_extension+ sets the extension to be used when generating pages for the cache (this is ignored if the incoming request already has an extension). The default is +.html+.
<ide>
<ide> config.action_dispatch.default_headers = { 'X-Frame-Options' => 'SAMEORIGIN', 'X
<ide>
<ide> ### Configuring Action View
<ide>
<del><tt>config.action_view</tt> includes a small number of configuration settings:
<add>`config.action_view` includes a small number of configuration settings:
<ide>
<ide> * +config.action_view.field_error_proc+ provides an HTML generator for displaying errors that come from Active Record. The default is
<ide>
<ide> Just about every Rails application will interact with a database. The database t
<ide> * The +test+ environment is used when running automated tests.
<ide> * The +production+ environment is used when you deploy your application for the world to use.
<ide>
<del>TIP: You don't have to update the database configurations manually. If you look at the options of the application generator, you will see that one of the options is named <tt>--database</tt>. This option allows you to choose an adapter from a list of the most used relational databases. You can even run the generator repeatedly: <tt>cd .. && rails new blog --database=mysql</tt>. When you confirm the overwriting of the +config/database.yml+ file, your application will be configured for MySQL instead of SQLite. Detailed examples of the common database connections are below.
<add>TIP: You don't have to update the database configurations manually. If you look at the options of the application generator, you will see that one of the options is named `--database`. This option allows you to choose an adapter from a list of the most used relational databases. You can even run the generator repeatedly: `cd .. && rails new blog --database=mysql`. When you confirm the overwriting of the +config/database.yml+ file, your application will be configured for MySQL instead of SQLite. Detailed examples of the common database connections are below.
<ide>
<ide> #### Configuring an SQLite3 Database
<ide>
<ide> Rails comes with built-in support for "SQLite3":http://www.sqlite.org, which is a lightweight serverless database application. While a busy production environment may overload SQLite, it works well for development and testing. Rails defaults to using an SQLite database when creating a new project, but you can always change it later.
<ide>
<del>Here's the section of the default configuration file (<tt>config/database.yml</tt>) with connection information for the development environment:
<add>Here's the section of the default configuration file (`config/database.yml`) with connection information for the development environment:
<ide>
<ide> ```yaml
<ide> development:
<ide><path>guides/source/debugging_rails_applications.md
<ide> Now you should know where you are in the running trace and be able to print the
<ide>
<ide> Use +step+ (abbreviated +s+) to continue running your program until the next logical stopping point and return control to the debugger.
<ide>
<del>TIP: You can also use <tt>step<plus> n</tt> and <tt>step- n</tt> to move forward or backward +n+ steps respectively.
<add>TIP: You can also use `step<plus> n` and `step- n` to move forward or backward +n+ steps respectively.
<ide>
<ide> You may also use +next+ which is similar to step, but function or method calls that appear within the line of code are executed without stopping. As with step, you may use plus sign to move _n_ steps.
<ide>
<ide><path>guides/source/engines.md
<ide> Now instead of the ugly Ruby object output the author's name will be displayed.
<ide>
<ide> #### Using a controller provided by the application
<ide>
<del>Because Rails controllers generally share code for things like authentication and accessing session variables, by default they inherit from <tt>ApplicationController</tt>. Rails engines, however are scoped to run independently from the main application, so each engine gets a scoped +ApplicationController+. This namespace prevents code collisions, but often engine controllers should access methods in the main application's +ApplicationController+. An easy way to provide this access is to change the engine's scoped +ApplicationController+ to inherit from the main application's +ApplicationController+. For our Blorgh engine this would be done by changing +app/controllers/blorgh/application_controller.rb+ to look like:
<add>Because Rails controllers generally share code for things like authentication and accessing session variables, by default they inherit from `ApplicationController`. Rails engines, however are scoped to run independently from the main application, so each engine gets a scoped +ApplicationController+. This namespace prevents code collisions, but often engine controllers should access methods in the main application's +ApplicationController+. An easy way to provide this access is to change the engine's scoped +ApplicationController+ to inherit from the main application's +ApplicationController+. For our Blorgh engine this would be done by changing +app/controllers/blorgh/application_controller.rb+ to look like:
<ide>
<ide> ```ruby
<ide> class Blorgh::ApplicationController < ApplicationController
<ide> end
<ide> ```
<ide>
<del>By default, the engine's controllers inherit from <tt>Blorgh::ApplicationController</tt>. So, after making this change they will have access to the main applications +ApplicationController+ as though they were part of the main application.
<add>By default, the engine's controllers inherit from `Blorgh::ApplicationController`. So, after making this change they will have access to the main applications +ApplicationController+ as though they were part of the main application.
<ide>
<ide> This change does require that the engine is run from a Rails application that has an +ApplicationController+.
<ide>
<ide><path>guides/source/form_helpers.md
<ide> As with other helpers, if you were to use the +select+ helper on a form builder
<ide> <%= f.select(:city_id, ...) %>
<ide> ```
<ide>
<del>WARNING: If you are using +select+ (or similar helpers such as +collection_select+, +select_tag+) to set a +belongs_to+ association you must pass the name of the foreign key (in the example above +city_id+), not the name of association itself. If you specify +city+ instead of +city_id+ Active Record will raise an error along the lines of <tt> ActiveRecord::AssociationTypeMismatch: City(#17815740) expected, got String(#1138750) </tt> when you pass the +params+ hash to +Person.new+ or +update_attributes+. Another way of looking at this is that form helpers only edit attributes. You should also be aware of the potential security ramifications of allowing users to edit foreign keys directly. You may wish to consider the use of +attr_protected+ and +attr_accessible+. For further details on this, see the "Ruby On Rails Security Guide":security.html#mass-assignment.
<add>WARNING: If you are using +select+ (or similar helpers such as +collection_select+, +select_tag+) to set a +belongs_to+ association you must pass the name of the foreign key (in the example above +city_id+), not the name of association itself. If you specify +city+ instead of +city_id+ Active Record will raise an error along the lines of ` ActiveRecord::AssociationTypeMismatch: City(#17815740) expected, got String(#1138750) ` when you pass the +params+ hash to +Person.new+ or +update_attributes+. Another way of looking at this is that form helpers only edit attributes. You should also be aware of the potential security ramifications of allowing users to edit foreign keys directly. You may wish to consider the use of +attr_protected+ and +attr_accessible+. For further details on this, see the "Ruby On Rails Security Guide":security.html#mass-assignment.
<ide>
<ide> ### Option Tags from a Collection of Arbitrary Objects
<ide>
<ide> The following two forms both upload a file.
<ide> <% end %>
<ide> ```
<ide>
<del>NOTE: Since Rails 3.1, forms rendered using +form_for+ have their encoding set to <tt>multipart/form-data</tt> automatically once a +file_field+ is used inside the block. Previous versions required you to set this explicitly.
<add>NOTE: Since Rails 3.1, forms rendered using +form_for+ have their encoding set to `multipart/form-data` automatically once a +file_field+ is used inside the block. Previous versions required you to set this explicitly.
<ide>
<ide> Rails provides the usual pair of helpers: the barebones +file_field_tag+ and the model oriented +file_field+. The only difference with other helpers is that you cannot set a default value for file inputs as this would have no meaning. As you would expect in the first case the uploaded file is in +params[:picture]+ and in the second case in +params[:person][:picture]+.
<ide>
<ide> NOTE: If the user has not selected a file the corresponding parameter will be an
<ide>
<ide> ### Dealing with Ajax
<ide>
<del>Unlike other forms making an asynchronous file upload form is not as simple as providing +form_for+ with <tt>:remote => true</tt>. With an Ajax form the serialization is done by JavaScript running inside the browser and since JavaScript cannot read files from your hard drive the file cannot be uploaded. The most common workaround is to use an invisible iframe that serves as the target for the form submission.
<add>Unlike other forms making an asynchronous file upload form is not as simple as providing +form_for+ with `:remote => true`. With an Ajax form the serialization is done by JavaScript running inside the browser and since JavaScript cannot read files from your hard drive the file cannot be uploaded. The most common workaround is to use an invisible iframe that serves as the target for the form submission.
<ide>
<ide> Customizing Form Builders
<ide> -------------------------
<ide><path>guides/source/getting_started.md
<ide> Now that we have made the controller and view, we need to tell Rails when we wan
<ide>
<ide> To fix this, delete the +index.html+ file located inside the +public+ directory of the application.
<ide>
<del>You need to do this because Rails will serve any static file in the +public+ directory that matches a route in preference to any dynamic content you generate from the controllers. The +index.html+ file is special: it will be served if a request comes in at the root route, e.g. http://localhost:3000. If another request such as http://localhost:3000/welcome happened, a static file at <tt>public/welcome.html</tt> would be served first, but only if it existed.
<add>You need to do this because Rails will serve any static file in the +public+ directory that matches a route in preference to any dynamic content you generate from the controllers. The +index.html+ file is special: it will be served if a request comes in at the root route, e.g. http://localhost:3000. If another request such as http://localhost:3000/welcome happened, a static file at `public/welcome.html` would be served first, but only if it existed.
<ide>
<ide> Next, you have to tell Rails where your actual home page is located.
<ide>
<ide> following:
<ide>
<ide> This will now render the partial in +app/views/comments/_comment.html.erb+ once
<ide> for each comment that is in the [email protected]+ collection. As the +render+
<del>method iterates over the <tt>@post.comments</tt> collection, it assigns each
<add>method iterates over the `@post.comments` collection, it assigns each
<ide> comment to a local variable named the same as the partial, in this case
<ide> +comment+ which is then available in the partial for us to show.
<ide>
<ide> Then you make the +app/views/posts/show.html.erb+ look like the following:
<ide> ```
<ide>
<ide> The second render just defines the partial template we want to render,
<del><tt>comments/form</tt>. Rails is smart enough to spot the forward slash in that
<del>string and realize that you want to render the <tt>_form.html.erb</tt> file in
<del>the <tt>app/views/comments</tt> directory.
<add>`comments/form`. Rails is smart enough to spot the forward slash in that
<add>string and realize that you want to render the `_form.html.erb` file in
<add>the `app/views/comments` directory.
<ide>
<ide> The +@post+ object is available to any partials rendered in the view because we
<ide> defined it as an instance variable.
<ide> So first, let's add the delete link in the
<ide> </p>
<ide> ```
<ide>
<del>Clicking this new "Destroy Comment" link will fire off a <tt>DELETE
<del>/posts/:id/comments/:id</tt> to our +CommentsController+, which can then use
<add>Clicking this new "Destroy Comment" link will fire off a `DELETE
<add>/posts/:id/comments/:id` to our +CommentsController+, which can then use
<ide> this to find the comment we want to delete, so let's add a destroy action to our
<ide> controller:
<ide>
<ide> end
<ide> ```
<ide>
<ide> The +destroy+ action will find the post we are looking at, locate the comment
<del>within the <tt>@post.comments</tt> collection, and then remove it from the
<add>within the `@post.comments` collection, and then remove it from the
<ide> database and send us back to the show action for the post.
<ide>
<ide>
<ide> this situation.
<ide>
<ide> In the +PostsController+ we need to have a way to block access to the various
<ide> actions if the person is not authenticated, here we can use the Rails
<del><tt>http_basic_authenticate_with</tt> method, allowing access to the requested
<add>`http_basic_authenticate_with` method, allowing access to the requested
<ide> action if that method allows it.
<ide>
<ide> To use the authentication system, we specify it at the top of our
<ide><path>guides/source/initialization.md
<ide> command = aliases[command] || command
<ide> TIP: As you can see, an empty ARGV list will make Rails show the help
<ide> snippet.
<ide>
<del>If we used <tt>s</tt> rather than +server+, Rails will use the +aliases+ defined in the file and match them to their respective commands. With the +server+ command, Rails will run this code:
<add>If we used `s` rather than +server+, Rails will use the +aliases+ defined in the file and match them to their respective commands. With the +server+ command, Rails will run this code:
<ide>
<ide> ```ruby
<ide> when 'server'
<ide><path>guides/source/layouts_and_rendering.md
<ide> These two files for jQuery, +jquery.js+ and +jquery_ujs.js+ must be placed insid
<ide>
<ide> WARNING: If you are using the asset pipeline, this tag will render a +script+ tag for an asset called +defaults.js+, which would not exist in your application unless you've explicitly created it.
<ide>
<del>And you can in any case override the +:defaults+ expansion in <tt>config/application.rb</tt>:
<add>And you can in any case override the +:defaults+ expansion in `config/application.rb`:
<ide>
<ide> ```ruby
<ide> config.action_view.javascript_expansions[:defaults] = %w(foo.js bar.js)
<ide> And use them by referencing them exactly like +:defaults+:
<ide> <%= javascript_include_tag :projects %>
<ide> ```
<ide>
<del>When using <tt>:defaults</tt>, if an <tt>application.js</tt> file exists in <tt>public/javascripts</tt> it will be included as well at the end.
<add>When using `:defaults`, if an `application.js` file exists in `public/javascripts` it will be included as well at the end.
<ide>
<ide> Also, if the asset pipeline is disabled, the +:all+ expansion loads every JavaScript file in +public/javascripts+:
<ide>
<ide><path>guides/source/migrations.md
<ide> existing users.
<ide>
<ide> ### Using the change method
<ide>
<del>Rails 3.1 makes migrations smarter by providing a new <tt>change</tt> method.
<add>Rails 3.1 makes migrations smarter by providing a new `change` method.
<ide> This method is preferred for writing constructive migrations (adding columns or
<ide> tables). The migration knows how to migrate your database and reverse it when
<ide> the migration is rolled back without the need to write a separate +down+ method.
<ide> end
<ide>
<ide> ### Migrations are Classes
<ide>
<del>A migration is a subclass of <tt>ActiveRecord::Migration</tt> that implements
<add>A migration is a subclass of `ActiveRecord::Migration` that implements
<ide> two methods: +up+ (perform the required transformations) and +down+ (revert
<ide> them).
<ide>
<ide> method to execute arbitrary SQL.
<ide>
<ide> For more details and examples of individual methods, check the API documentation.
<ide> In particular the documentation for
<del>"<tt>ActiveRecord::ConnectionAdapters::SchemaStatements</tt>":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/SchemaStatements.html
<add>"`ActiveRecord::ConnectionAdapters::SchemaStatements`":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/SchemaStatements.html
<ide> (which provides the methods available in the +up+ and +down+ methods),
<del>"<tt>ActiveRecord::ConnectionAdapters::TableDefinition</tt>":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/TableDefinition.html
<add>"`ActiveRecord::ConnectionAdapters::TableDefinition`":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/TableDefinition.html
<ide> (which provides the methods available on the object yielded by +create_table+)
<ide> and
<del>"<tt>ActiveRecord::ConnectionAdapters::Table</tt>":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/Table.html
<add>"`ActiveRecord::ConnectionAdapters::Table`":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/Table.html
<ide> (which provides the methods available on the object yielded by +change_table+).
<ide>
<ide> ### When to Use the +change+ Method
<ide><path>guides/source/plugins.md
<ide> Run +rake+ one final time and you should see:
<ide> 7 tests, 7 assertions, 0 failures, 0 errors, 0 skips
<ide> ```
<ide>
<del>NOTE: The use of +write_attribute+ to write to the field in model is just one example of how a plugin can interact with the model, and will not always be the right method to use. For example, you could also use <tt>send("#{self.class.yaffle_text_field}=", string.to_squawk)</tt>.
<add>NOTE: The use of +write_attribute+ to write to the field in model is just one example of how a plugin can interact with the model, and will not always be the right method to use. For example, you could also use `send("#{self.class.yaffle_text_field}=", string.to_squawk)`.
<ide>
<ide> Generators
<ide> ----------
<ide><path>guides/source/rails_on_rack.md
<ide> Rails on Rack
<ide>
<ide> ### Rails Application's Rack Object
<ide>
<del><tt>ApplicationName::Application</tt> is the primary Rack application object of a Rails application. Any Rack compliant web server should be using +ApplicationName::Application+ object to serve a Rails application.
<add>`ApplicationName::Application` is the primary Rack application object of a Rails application. Any Rack compliant web server should be using +ApplicationName::Application+ object to serve a Rails application.
<ide>
<ide> ### +rails server+
<ide>
<del><tt>rails server</tt> does the basic job of creating a +Rack::Server+ object and starting the webserver.
<add>`rails server` does the basic job of creating a +Rack::Server+ object and starting the webserver.
<ide>
<ide> Here's how +rails server+ creates an instance of +Rack::Server+
<ide>
<ide> Purpose of each of this middlewares is explained in the "Internal Middlewares":#
<ide>
<ide> ### Configuring Middleware Stack
<ide>
<del>Rails provides a simple configuration interface +config.middleware+ for adding, removing and modifying the middlewares in the middleware stack via +application.rb+ or the environment specific configuration file <tt>environments/<environment>.rb</tt>.
<add>Rails provides a simple configuration interface +config.middleware+ for adding, removing and modifying the middlewares in the middleware stack via +application.rb+ or the environment specific configuration file `environments/<environment>.rb`.
<ide>
<ide> #### Adding a Middleware
<ide>
<ide> You can add a new middleware to the middleware stack using any of the following methods:
<ide>
<del>* <tt>config.middleware.use(new_middleware, args)</tt> - Adds the new middleware at the bottom of the middleware stack.
<add>* `config.middleware.use(new_middleware, args)` - Adds the new middleware at the bottom of the middleware stack.
<ide>
<del>* <tt>config.middleware.insert_before(existing_middleware, new_middleware, args)</tt> - Adds the new middleware before the specified existing middleware in the middleware stack.
<add>* `config.middleware.insert_before(existing_middleware, new_middleware, args)` - Adds the new middleware before the specified existing middleware in the middleware stack.
<ide>
<del>* <tt>config.middleware.insert_after(existing_middleware, new_middleware, args)</tt> - Adds the new middleware after the specified existing middleware in the middleware stack.
<add>* `config.middleware.insert_after(existing_middleware, new_middleware, args)` - Adds the new middleware after the specified existing middleware in the middleware stack.
<ide>
<ide> ```ruby
<ide> # config/application.rb
<ide> config.middleware.swap ActionDispatch::ShowExceptions, Lifo::ShowExceptions
<ide>
<ide> #### Middleware Stack is an Enumerable
<ide>
<del>The middleware stack behaves just like a normal +Enumerable+. You can use any +Enumerable+ methods to manipulate or interrogate the stack. The middleware stack also implements some +Array+ methods including <tt>[]</tt>, +unshift+ and +delete+. Methods described in the section above are just convenience methods.
<add>The middleware stack behaves just like a normal +Enumerable+. You can use any +Enumerable+ methods to manipulate or interrogate the stack. The middleware stack also implements some +Array+ methods including `[]`, +unshift+ and +delete+. Methods described in the section above are just convenience methods.
<ide>
<ide> Append following lines to your application configuration:
<ide>
<ide> config.middleware.delete "Rack::MethodOverride"
<ide> Much of Action Controller's functionality is implemented as Middlewares. The following list explains the purpose of each of them:
<ide>
<ide> *+ActionDispatch::Static+*
<del>* Used to serve static assets. Disabled if <tt>config.serve_static_assets</tt> is true.
<add>* Used to serve static assets. Disabled if `config.serve_static_assets` is true.
<ide>
<ide> *+Rack::Lock+*
<del>* Sets <tt>env["rack.multithread"]</tt> flag to +true+ and wraps the application within a Mutex.
<add>* Sets `env["rack.multithread"]` flag to +true+ and wraps the application within a Mutex.
<ide>
<ide> *+ActiveSupport::Cache::Strategy::LocalCache::Middleware+*
<ide> * Used for memory caching. This cache is not thread safe.
<ide> Much of Action Controller's functionality is implemented as Middlewares. The fol
<ide> * Sets an X-Runtime header, containing the time (in seconds) taken to execute the request.
<ide>
<ide> *+Rack::MethodOverride+*
<del>* Allows the method to be overridden if <tt>params[:_method]</tt> is set. This is the middleware which supports the PUT and DELETE HTTP method types.
<add>* Allows the method to be overridden if `params[:_method]` is set. This is the middleware which supports the PUT and DELETE HTTP method types.
<ide>
<ide> *+ActionDispatch::RequestId+*
<del>* Makes a unique +X-Request-Id+ header available to the response and enables the <tt>ActionDispatch::Request#uuid</tt> method.
<add>* Makes a unique +X-Request-Id+ header available to the response and enables the `ActionDispatch::Request#uuid` method.
<ide>
<ide> *+Rails::Rack::Logger+*
<ide> * Notifies the logs that the request has began. After request is complete, flushes all the logs.
<ide> Much of Action Controller's functionality is implemented as Middlewares. The fol
<ide> * Runs the prepare callbacks before serving the request.
<ide>
<ide> *+ActiveRecord::ConnectionAdapters::ConnectionManagement+*
<del>* Cleans active connections after each request, unless the <tt>rack.test</tt> key in the request environment is set to +true+.
<add>* Cleans active connections after each request, unless the `rack.test` key in the request environment is set to +true+.
<ide>
<ide> *+ActiveRecord::QueryCache+*
<ide> * Enables the Active Record query cache.
<ide> Much of Action Controller's functionality is implemented as Middlewares. The fol
<ide> * Responsible for storing the session in cookies.
<ide>
<ide> *+ActionDispatch::Flash+*
<del>* Sets up the flash keys. Only available if <tt>config.action_controller.session_store</tt> is set to a value.
<add>* Sets up the flash keys. Only available if `config.action_controller.session_store` is set to a value.
<ide>
<ide> *+ActionDispatch::ParamsParser+*
<del>* Parses out parameters from the request into <tt>params</tt>.
<add>* Parses out parameters from the request into `params`.
<ide>
<ide> *+ActionDispatch::Head+*
<ide> * Converts HEAD requests to +GET+ requests and serves them as so.
<ide><path>guides/source/routing.md
<ide> Rails Routing from the Outside In
<ide> This guide covers the user-facing features of Rails routing. By referring to this guide, you will be able to:
<ide>
<ide> * Understand the code in +routes.rb+
<del>* Construct your own routes, using either the preferred resourceful style or the <tt>match</tt> method
<add>* Construct your own routes, using either the preferred resourceful style or the `match` method
<ide> * Identify what parameters to expect an action to receive
<ide> * Automatically create paths and URLs using route helpers
<ide> * Use advanced techniques such as constraints and Rack endpoints
<ide> it asks the router to match it to a controller action. If the first matching rou
<ide> get "/patients/:id" => "patients#show"
<ide> ```
<ide>
<del>the request is dispatched to the +patients+ controller's +show+ action with <tt>{ :id => "17" }</tt> in +params+.
<add>the request is dispatched to the +patients+ controller's +show+ action with `{ :id => "17" }` in +params+.
<ide>
<ide> ### Generating Paths and URLs from Code
<ide>
<ide> it asks the router to map it to a controller action. If the first matching route
<ide> resources :photos
<ide> ```
<ide>
<del>Rails would dispatch that request to the +destroy+ method on the +photos+ controller with <tt>{ :id => "17" }</tt> in +params+.
<add>Rails would dispatch that request to the +destroy+ method on the +photos+ controller with `{ :id => "17" }` in +params+.
<ide>
<ide> ### CRUD, Verbs, and Actions
<ide>
<ide> You can specify static segments when creating a route:
<ide> get ':controller/:action/:id/with_user/:user_id'
<ide> ```
<ide>
<del>This route would respond to paths such as +/photos/show/1/with_user/2+. In this case, +params+ would be <tt>{ :controller => "photos", :action => "show", :id => "1", :user_id => "2" }</tt>.
<add>This route would respond to paths such as +/photos/show/1/with_user/2+. In this case, +params+ would be `{ :controller => "photos", :action => "show", :id => "1", :user_id => "2" }`.
<ide>
<ide> ### The Query String
<ide>
<ide> The +params+ will also include any parameters from the query string. For example
<ide> get ':controller/:action/:id'
<ide> ```
<ide>
<del>An incoming path of +/photos/show/1?user_id=2+ will be dispatched to the +show+ action of the +Photos+ controller. +params+ will be <tt>{ :controller => "photos", :action => "show", :id => "1", :user_id => "2" }</tt>.
<add>An incoming path of +/photos/show/1?user_id=2+ will be dispatched to the +show+ action of the +Photos+ controller. +params+ will be `{ :controller => "photos", :action => "show", :id => "1", :user_id => "2" }`.
<ide>
<ide> ### Defining Defaults
<ide>
<ide> Instead of a String, like +"posts#index"+, which corresponds to the +index+ acti
<ide> match "/application.js" => Sprockets, :via => :all
<ide> ```
<ide>
<del>As long as +Sprockets+ responds to +call+ and returns a <tt>[status, headers, body]</tt>, the router won't know the difference between the Rack application and an action. This is an appropriate use of +:via => :all+, as you will want to allow your Rack application to handle all verbs as it considers appropriate.
<add>As long as +Sprockets+ responds to +call+ and returns a `[status, headers, body]`, the router won't know the difference between the Rack application and an action. This is an appropriate use of +:via => :all+, as you will want to allow your Rack application to handle all verbs as it considers appropriate.
<ide>
<ide> NOTE: For the curious, +"posts#index"+ actually expands out to +PostsController.action(:index)+, which returns a valid Rack application.
<ide>
<ide><path>guides/source/ruby_on_rails_guides_guidelines.md
<ide> Capitalize all words except for internal articles, prepositions, conjunctions, a
<ide> Use the same typography as in regular text:
<ide>
<ide> ```
<del>##### The <tt>:content_type</tt> Option
<add>##### The `:content_type` Option
<ide> ```
<ide>
<ide> API Documentation Guidelines
<ide> To force processing all the guides, pass +ALL=1+.
<ide>
<ide> It is also recommended that you work with +WARNINGS=1+. This detects duplicate IDs and warns about broken internal links.
<ide>
<del>If you want to generate guides in a language other than English, you can keep them in a separate directory under +source+ (eg. <tt>source/es</tt>) and use the +GUIDES_LANGUAGE+ environment variable:
<add>If you want to generate guides in a language other than English, you can keep them in a separate directory under +source+ (eg. `source/es`) and use the +GUIDES_LANGUAGE+ environment variable:
<ide>
<ide> ```
<ide> bundle exec rake guides:generate GUIDES_LANGUAGE=es
<ide><path>guides/source/security.md
<ide> There are many other possibilities, including Ajax to attack the victim in the b
<ide> protect_from_forgery :secret => "123456789012345678901234567890..."
<ide> ```
<ide>
<del>This will automatically include a security token, calculated from the current session and the server-side secret, in all forms and Ajax requests generated by Rails. You won't need the secret, if you use CookieStorage as session storage. If the security token doesn't match what was expected, the session will be reset. *Note:* In Rails versions prior to 3.0.4, this raised an <tt>ActionController::InvalidAuthenticityToken</tt> error.
<add>This will automatically include a security token, calculated from the current session and the server-side secret, in all forms and Ajax requests generated by Rails. You won't need the secret, if you use CookieStorage as session storage. If the security token doesn't match what was expected, the session will be reset. *Note:* In Rails versions prior to 3.0.4, this raised an `ActionController::InvalidAuthenticityToken` error.
<ide>
<ide> It is common to use persistent cookies to store user information, with +cookies.permanent+ for example. In this case, the cookies will not be cleared and the out of the box CSRF protection will not be effective. If you are using a different cookie store than the session for this information, you must handle what to do with it yourself:
<ide>
<ide> params[:user] # => {:name => “ow3ned”, :admin => true}
<ide>
<ide> So if you create a new user using mass-assignment, it may be too easy to become an administrator.
<ide>
<del>Note that this vulnerability is not restricted to database columns. Any setter method, unless explicitly protected, is accessible via the <tt>attributes=</tt> method. In fact, this vulnerability is extended even further with the introduction of nested mass assignment (and nested object forms) in Rails 2.3. The +accepts_nested_attributes_for+ declaration provides us the ability to extend mass assignment to model associations (+has_many+, +has_one+, +has_and_belongs_to_many+). For example:
<add>Note that this vulnerability is not restricted to database columns. Any setter method, unless explicitly protected, is accessible via the `attributes=` method. In fact, this vulnerability is extended even further with the introduction of nested mass assignment (and nested object forms) in Rails 2.3. The +accepts_nested_attributes_for+ declaration provides us the ability to extend mass assignment to model associations (+has_many+, +has_one+, +has_and_belongs_to_many+). For example:
<ide>
<ide> ```ruby
<ide> class Person < ActiveRecord::Base
<ide> When assigning attributes in Active Record using +attributes=+ the :default role
<ide> @user.is_admin # => true
<ide> ```
<ide>
<del>In a similar way, +new+, +create+, <tt>create!</tt>, +update_attributes+, and +update_attributes!+ methods all respect mass-assignment security and accept either +:as+ or +:without_protection+ options. For example:
<add>In a similar way, +new+, +create+, `create!`, +update_attributes+, and +update_attributes!+ methods all respect mass-assignment security and accept either +:as+ or +:without_protection+ options. For example:
<ide>
<ide> ```ruby
<ide> @user = User.new({ :name => 'Sebastian', :is_admin => true }, :as => :admin)
<ide> Note that this protects you only from automatic bots, targeted tailor-made bots
<ide>
<ide> WARNING: _Tell Rails not to put passwords in the log files._
<ide>
<del>By default, Rails logs all requests being made to the web application. But log files can be a huge security issue, as they may contain login credentials, credit card numbers et cetera. When designing a web application security concept, you should also think about what will happen if an attacker got (full) access to the web server. Encrypting secrets and passwords in the database will be quite useless, if the log files list them in clear text. You can _(highlight)filter certain request parameters from your log files_ by appending them to <tt>config.filter_parameters</tt> in the application configuration. These parameters will be marked [FILTERED] in the log.
<add>By default, Rails logs all requests being made to the web application. But log files can be a huge security issue, as they may contain login credentials, credit card numbers et cetera. When designing a web application security concept, you should also think about what will happen if an attacker got (full) access to the web server. Encrypting secrets and passwords in the database will be quite useless, if the log files list them in clear text. You can _(highlight)filter certain request parameters from your log files_ by appending them to `config.filter_parameters` in the application configuration. These parameters will be marked [FILTERED] in the log.
<ide>
<ide> ```ruby
<ide> config.filter_parameters << :password
<ide><path>guides/source/upgrading_ruby_on_rails.md
<ide> The following changes are meant for upgrading your application to Rails 4.0.
<ide>
<ide> ### vendor/plugins
<ide>
<del>Rails 4.0 no longer supports loading plugins from <tt>vendor/plugins</tt>. You must replace any plugins by extracting them to gems and adding them to your Gemfile. If you choose not to make them gems, you can move them into, say, <tt>lib/my_plugin/*</tt> and add an appropriate initializer in <tt>config/initializers/my_plugin.rb</tt>.
<add>Rails 4.0 no longer supports loading plugins from `vendor/plugins`. You must replace any plugins by extracting them to gems and adding them to your Gemfile. If you choose not to make them gems, you can move them into, say, `lib/my_plugin/*` and add an appropriate initializer in `config/initializers/my_plugin.rb`.
<ide>
<ide> ### Identity Map
<ide>
<del>Rails 4.0 has removed the identity map from Active Record, due to "some inconsistencies with associations":https://github.com/rails/rails/commit/302c912bf6bcd0fa200d964ec2dc4a44abe328a6. If you have manually enabled it in your application, you will have to remove the following config that has no effect anymore: <tt>config.active_record.identity_map</tt>.
<add>Rails 4.0 has removed the identity map from Active Record, due to "some inconsistencies with associations":https://github.com/rails/rails/commit/302c912bf6bcd0fa200d964ec2dc4a44abe328a6. If you have manually enabled it in your application, you will have to remove the following config that has no effect anymore: `config.active_record.identity_map`.
<ide>
<ide> ### Active Record
<ide>
<del>The <tt>delete</tt> method in collection associations can now receive <tt>Fixnum</tt> or <tt>String</tt> arguments as record ids, besides records, pretty much like the <tt>destroy</tt> method does. Previously it raised <tt>ActiveRecord::AssociationTypeMismatch</tt> for such arguments. From Rails 4.0 on <tt>delete</tt> automatically tries to find the records matching the given ids before deleting them.
<add>The `delete` method in collection associations can now receive `Fixnum` or `String` arguments as record ids, besides records, pretty much like the `destroy` method does. Previously it raised `ActiveRecord::AssociationTypeMismatch` for such arguments. From Rails 4.0 on `delete` automatically tries to find the records matching the given ids before deleting them.
<ide>
<del>Rails 4.0 has changed how orders get stacked in +ActiveRecord::Relation+. In previous versions of rails new order was applied after previous defined order. But this is no long true. Check "ActiveRecord Query guide":active_record_querying.html#ordering for more information.
<add>Rails 4.0 has changed how orders get stacked in `ActiveRecord::Relation`. In previous versions of rails new order was applied after previous defined order. But this is no long true. Check "ActiveRecord Query guide":active_record_querying.html#ordering for more information.
<ide>
<del>Rails 4.0 has changed <tt>serialized_attributes</tt> and <tt>_attr_readonly</tt> to class methods only. Now you shouldn't use instance methods, it's deprecated. You must change them, e.g. <tt>self.serialized_attributes</tt> to <tt>self.class.serialized_attributes</tt>.
<add>Rails 4.0 has changed `serialized_attributes` and `attr_readonly` to class methods only. Now you shouldn't use instance methods, it's deprecated. You must change them, e.g. `self.serialized_attributes` to `self.class.serialized_attributes`.
<ide>
<ide> ### Active Model
<ide>
<del>Rails 4.0 has changed how errors attach with the <tt>ActiveModel::Validations::ConfirmationValidator</tt>. Now when confirmation validations fail the error will be attached to <tt>:#{attribute}_confirmation</tt> instead of <tt>attribute</tt>.
<add>Rails 4.0 has changed how errors attach with the `ActiveModel::Validations::ConfirmationValidator`. Now when confirmation validations fail the error will be attached to `:#{attribute}_confirmation` instead of `attribute`.
<ide>
<ide> ### Action Pack
<ide>
<del>Rails 4.0 changed how <tt>assert_generates</tt>, <tt>assert_recognizes</tt>, and <tt>assert_routing</tt> work. Now all these assertions raise <tt>Assertion</tt> instead of <tt>ActionController::RoutingError</tt>.
<add>Rails 4.0 changed how `assert_generates`, `assert_recognizes`, and `assert_routing` work. Now all these assertions raise `Assertion` instead of `ActionController::RoutingError`.
<ide>
<add><<<<<<< HEAD
<ide> Rails 4.0 also changed the way unicode character routes are drawn. Now you can draw unicode character routes directly. If you already draw such routes, you must change them, for example:
<ide>
<ide> <ruby>
<ide> becomes
<ide> <ruby>
<ide> get 'こんにちは', :controller => 'welcome', :action => 'index'
<ide> </ruby>
<add>=======
<add>Rails 4.0 also changed the way unicode character routes are drawn. Now you can draw unicode character routes directly. If you already draw such routes, you must change them, e.g. `get Rack::Utils.escape('こんにちは'), :controller => 'welcome', :action => 'index'` to `get 'こんにちは', :controller => 'welcome', :action => 'index'`.
<add>>>>>>>> Convert inline code tags to Markdown
<ide>
<ide> ### Active Support
<ide>
<del>Rails 4.0 Removed the <tt>j</tt> alias for <tt>ERB::Util#json_escape</tt> since <tt>j</tt> is already used for <tt>ActionView::Helpers::JavaScriptHelper#escape_javascript</tt>.
<add>Rails 4.0 Removed the `j` alias for `ERB::Util#json_escape` since `j` is already used for `ActionView::Helpers::JavaScriptHelper#escape_javascript`.
<ide>
<ide> ### Helpers Loading Order
<ide>
<del>The loading order of helpers from more than one directory has changed in Rails 4.0. Previously, helpers from all directories were gathered and then sorted alphabetically. After upgrade to Rails 4.0 helpers will preserve the order of loaded directories and will be sorted alphabetically only within each directory. Unless you explicitly use <tt>helpers_path</tt> parameter, this change will only impact the way of loading helpers from engines. If you rely on the fact that particular helper from engine loads before or after another helper from application or another engine, you should check if correct methods are available after upgrade. If you would like to change order in which engines are loaded, you can use <tt>config.railties_order=</tt> method.
<add>The loading order of helpers from more than one directory has changed in Rails 4.0. Previously, helpers from all directories were gathered and then sorted alphabetically. After upgrade to Rails 4.0 helpers will preserve the order of loaded directories and will be sorted alphabetically only within each directory. Unless you explicitly use `helpers_path` parameter, this change will only impact the way of loading helpers from engines. If you rely on the fact that particular helper from engine loads before or after another helper from application or another engine, you should check if correct methods are available after upgrade. If you would like to change order in which engines are loaded, you can use `config.railties_order=` method.
<ide>
<ide> Upgrading from Rails 3.1 to Rails 3.2
<ide> -------------------------------------
<ide> The following changes are meant for upgrading your application to Rails 3.2.2, t
<ide>
<ide> ### Gemfile
<ide>
<del>Make the following changes to your +Gemfile+.
<add>Make the following changes to your `Gemfile`.
<ide>
<ide> ```ruby
<ide> gem 'rails', '= 3.2.2'
<ide> config.active_record.auto_explain_threshold_in_seconds = 0.5
<ide>
<ide> ### config/environments/test.rb
<ide>
<del>The <tt>mass_assignment_sanitizer</tt> configuration setting should also be be added to <tt>config/environments/test.rb</tt>:
<add>The `mass_assignment_sanitizer` configuration setting should also be be added to `config/environments/test.rb`:
<ide>
<ide> ```ruby
<ide> # Raise exception on mass assignment protection for Active Record models
<ide> config.active_record.mass_assignment_sanitizer = :strict
<ide>
<ide> ### vendor/plugins
<ide>
<del>Rails 3.2 deprecates <tt>vendor/plugins</tt> and Rails 4.0 will remove them completely. While it's not strictly necessary as part of a Rails 3.2 upgrade, you can start replacing any plugins by extracting them to gems and adding them to your Gemfile. If you choose not to make them gems, you can move them into, say, <tt>lib/my_plugin/*</tt> and add an appropriate initializer in <tt>config/initializers/my_plugin.rb</tt>.
<add>Rails 3.2 deprecates `vendor/plugins` and Rails 4.0 will remove them completely. While it's not strictly necessary as part of a Rails 3.2 upgrade, you can start replacing any plugins by extracting them to gems and adding them to your Gemfile. If you choose not to make them gems, you can move them into, say, `lib/my_plugin/*` and add an appropriate initializer in `config/initializers/my_plugin.rb`.
<ide>
<ide> Upgrading from Rails 3.0 to Rails 3.1
<ide> -------------------------------------
<ide> The following changes are meant for upgrading your application to Rails 3.1.3, t
<ide>
<ide> ### Gemfile
<ide>
<del>Make the following changes to your +Gemfile+.
<add>Make the following changes to your `Gemfile`.
<ide>
<ide> ```ruby
<ide> gem 'rails', '= 3.1.3'
<ide> config.assets.prefix = '/asset-files'
<ide>
<ide> ### config/environments/development.rb
<ide>
<del>Remove the RJS setting <tt>config.action_view.debug_rjs = true</tt>.
<add>Remove the RJS setting `config.action_view.debug_rjs = true`.
<ide>
<ide> Add these settings if you enable the asset pipeline:
<ide> | 25 |
Javascript | Javascript | fix cache errors | f421d2b0565513e947ceef46e44980fbc9ff86dc | <ide><path>packager/react-packager/src/DependencyResolver/Cache/index.js
<ide> class Cache {
<ide> .then(values => {
<ide> var json = Object.create(null);
<ide> Object.keys(data).forEach((key, i) => {
<add> if (!values[i]) {
<add> return;
<add> }
<add>
<ide> json[key] = Object.create(null);
<ide> json[key].metadata = data[key].metadata;
<ide> json[key].data = values[i].data; | 1 |
PHP | PHP | attach all disk attachments and not only first one | d618cf3326468086ae9ab5ba1ee556be6182a481 | <ide><path>src/Illuminate/Mail/Mailable.php
<ide> protected function buildDiskAttachments($message)
<ide> FilesystemFactory::class
<ide> )->disk($attachment['disk']);
<ide>
<del> return $message->attachData(
<add> $message->attachData(
<ide> $storage->get($attachment['path']),
<ide> $attachment['name'] ?? basename($attachment['path']),
<ide> array_merge(['mime' => $storage->mimeType($attachment['path'])], $attachment['options']) | 1 |
Python | Python | show error if running with -b option on windows | bd4de0302e6c90d634c41f69007caa2a74dc1c7f | <ide><path>celery/apps/worker.py
<ide> def __init__(self, concurrency=None, loglevel=None, logfile=None,
<ide> self.hostname = hostname or socket.gethostname()
<ide> self.discard = discard
<ide> self.run_clockservice = run_clockservice
<add> if self.app.IS_WINDOWS and self.run_clockservice:
<add> self.die("-B option does not work on Windows. "
<add> "Please run celerybeat as a separate service.")
<ide> self.schedule = schedule or app.conf.CELERYBEAT_SCHEDULE_FILENAME
<ide> self.scheduler_cls = scheduler_cls or app.conf.CELERYBEAT_SCHEDULER
<ide> self.events = events | 1 |
Java | Java | fix javadoc formatting issues for headings | 508d2c7a77e004afc7eb2f028a2345244c62939b | <ide><path>spring-core/src/main/java/org/springframework/util/AntPathMatcher.java
<ide> public Map<String, String> extractUriTemplateVariables(String pattern, String pa
<ide> * the first pattern contains a file extension match (e.g., {@code *.html}).
<ide> * In that case, the second pattern will be merged into the first. Otherwise,
<ide> * an {@code IllegalArgumentException} will be thrown.
<del> * <h3>Examples</h3>
<add> * <h4>Examples</h4>
<ide> * <table border="1">
<ide> * <tr><th>Pattern 1</th><th>Pattern 2</th><th>Result</th></tr>
<ide> * <tr><td>{@code null}</td><td>{@code null}</td><td> </td></tr>
<ide><path>spring-test/src/main/java/org/springframework/test/context/TestPropertySource.java
<ide> * {@code Environment}'s set of {@code PropertySources}. Each location
<ide> * will be added to the enclosing {@code Environment} as its own property
<ide> * source, in the order declared.
<del> * <h3>Supported File Formats</h3>
<add> * <h4>Supported File Formats</h4>
<ide> * <p>Both traditional and XML-based properties file formats are supported
<ide> * — for example, {@code "classpath:/com/example/test.properties"}
<ide> * or {@code "file:/path/to/file.xml"}.
<del> * <h3>Path Resource Semantics</h3>
<add> * <h4>Path Resource Semantics</h4>
<ide> * <p>Each path will be interpreted as a Spring
<ide> * {@link org.springframework.core.io.Resource Resource}. A plain path
<ide> * — for example, {@code "test.properties"} — will be treated as a
<ide> * in paths (i.e., <code>${...}</code>) will be
<ide> * {@linkplain org.springframework.core.env.Environment#resolveRequiredPlaceholders(String) resolved}
<ide> * against the {@code Environment}.
<del> * <h3>Default Properties File Detection</h3>
<add> * <h4>Default Properties File Detection</h4>
<ide> * <p>See the class-level Javadoc for a discussion on detection of defaults.
<del> * <h3>Precedence</h3>
<add> * <h4>Precedence</h4>
<ide> * <p>Properties loaded from resource locations have lower precedence than
<ide> * inlined {@link #properties}.
<ide> * <p>This attribute may <strong>not</strong> be used in conjunction with
<ide> * {@code ApplicationContext} is loaded for the test. All key-value pairs
<ide> * will be added to the enclosing {@code Environment} as a single test
<ide> * {@code PropertySource} with the highest precedence.
<del> * <h3>Supported Syntax</h3>
<add> * <h4>Supported Syntax</h4>
<ide> * <p>The supported syntax for key-value pairs is the same as the
<ide> * syntax defined for entries in a Java
<ide> * {@linkplain java.util.Properties#load(java.io.Reader) properties file}:
<ide> * <li>{@code "key:value"}</li>
<ide> * <li>{@code "key value"}</li>
<ide> * </ul>
<del> * <h3>Precedence</h3>
<add> * <h4>Precedence</h4>
<ide> * <p>Properties declared via this attribute have higher precedence than
<ide> * properties loaded from resource {@link #locations}.
<ide> * <p>This attribute may be used in conjunction with {@link #value}
<ide><path>spring-test/src/main/java/org/springframework/test/context/jdbc/Sql.java
<ide> * {@link #value}, but it may be used instead of {@link #value}. Similarly,
<ide> * this attribute may be used in conjunction with or instead of
<ide> * {@link #statements}.
<del> * <h3>Path Resource Semantics</h3>
<add> * <h4>Path Resource Semantics</h4>
<ide> * <p>Each path will be interpreted as a Spring
<ide> * {@link org.springframework.core.io.Resource Resource}. A plain path
<ide> * — for example, {@code "schema.sql"} — will be treated as a
<ide> * {@link org.springframework.util.ResourceUtils#CLASSPATH_URL_PREFIX classpath:},
<ide> * {@link org.springframework.util.ResourceUtils#FILE_URL_PREFIX file:},
<ide> * {@code http:}, etc.) will be loaded using the specified resource protocol.
<del> * <h3>Default Script Detection</h3>
<add> * <h4>Default Script Detection</h4>
<ide> * <p>If no SQL scripts or {@link #statements} are specified, an attempt will
<ide> * be made to detect a <em>default</em> script depending on where this
<ide> * annotation is declared. If a default cannot be detected, an
<ide> * <em>Inlined SQL statements</em> to execute.
<ide> * <p>This attribute may be used in conjunction with or instead of
<ide> * {@link #scripts}.
<del> * <h3>Ordering</h3>
<add> * <h4>Ordering</h4>
<ide> * <p>Statements declared via this attribute will be executed after
<ide> * statements loaded from resource {@link #scripts}. If you wish to have
<ide> * inlined statements executed before scripts, simply declare multiple
<ide><path>spring-tx/src/main/java/org/springframework/transaction/annotation/TransactionManagementConfigurer.java
<ide> /*
<del> * Copyright 2002-2020 the original author or authors.
<add> * Copyright 2002-2022 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> public interface TransactionManagementConfigurer {
<ide> * Return the default transaction manager bean to use for annotation-driven database
<ide> * transaction management, i.e. when processing {@code @Transactional} methods.
<ide> * <p>There are two basic approaches to implementing this method:
<del> * <h3>1. Implement the method and annotate it with {@code @Bean}</h3>
<add> * <h4>1. Implement the method and annotate it with {@code @Bean}</h4>
<ide> * In this case, the implementing {@code @Configuration} class implements this method,
<ide> * marks it with {@code @Bean}, and configures and returns the transaction manager
<ide> * directly within the method body:
<ide> public interface TransactionManagementConfigurer {
<ide> * public PlatformTransactionManager annotationDrivenTransactionManager() {
<ide> * return new DataSourceTransactionManager(dataSource());
<ide> * }</pre>
<del> * <h3>2. Implement the method without {@code @Bean} and delegate to another existing
<del> * {@code @Bean} method</h3>
<add> * <h4>2. Implement the method without {@code @Bean} and delegate to another existing
<add> * {@code @Bean} method</h4>
<ide> * <pre class="code">
<ide> * @Bean
<ide> * public PlatformTransactionManager txManager() { | 4 |
PHP | PHP | fix passedparams with non string scalar values | 94de0857f33b20026791e190bb2725a882c88ea4 | <ide><path>src/Controller/ControllerFactory.php
<ide> protected function getActionArgs(Closure $action, array $passedParams): array
<ide> // Use any passed params as positional arguments
<ide> if ($passedParams) {
<ide> $argument = array_shift($passedParams);
<del> if ($type instanceof ReflectionNamedType) {
<add> if (is_string($argument) && $type instanceof ReflectionNamedType) {
<ide> $typedArgument = $this->coerceStringToType($argument, $type);
<ide>
<ide> if ($typedArgument === null) {
<ide><path>tests/TestCase/Controller/ControllerFactoryTest.php
<ide> public function testCreateWithContainerDependenciesWithObjectRouteParam(): void
<ide> $this->assertEquals($data->dep->id, $inject->id);
<ide> }
<ide>
<add> public function testCreateWithNonStringScalarRouteParam(): void
<add> {
<add> $request = new ServerRequest([
<add> 'url' => 'test_plugin_three/dependencies/required_typed',
<add> 'params' => [
<add> 'plugin' => null,
<add> 'controller' => 'Dependencies',
<add> 'action' => 'requiredTyped',
<add> 'pass' => [1.1, 2, true, ['foo' => 'bar']],
<add> ],
<add> ]);
<add> $controller = $this->factory->create($request);
<add> $response = $this->factory->invoke($controller);
<add>
<add> $expected = ['one' => 1.1, 'two' => 2, 'three' => true, 'four' => ['foo' => 'bar']];
<add> $data = json_decode((string)$response->getBody(), true);
<add> $this->assertSame($expected, $data);
<add> }
<add>
<ide> /**
<ide> * Ensure that a controllers startup process can emit a response
<ide> */ | 2 |
PHP | PHP | remove unused methods | 9a4f743ad67073229d8fa1cec645c62d97265331 | <ide><path>src/Illuminate/Database/Query/Builder.php
<ide> public function orWhereNotIn($column, $values)
<ide> return $this->whereNotIn($column, $values, 'or');
<ide> }
<ide>
<del> /**
<del> * Add a where in with a sub-select to the query.
<del> *
<del> * @param string $column
<del> * @param \Closure $callback
<del> * @param string $boolean
<del> * @param bool $not
<del> * @return $this
<del> */
<del> protected function whereInSub($column, Closure $callback, $boolean, $not)
<del> {
<del> $type = $not ? 'NotInSub' : 'InSub';
<del>
<del> // To create the exists sub-select, we will actually create a query and call the
<del> // provided callback with the query so the developer may set any of the query
<del> // conditions they want for the in clause, then we'll put it in this array.
<del> call_user_func($callback, $query = $this->forSubQuery());
<del>
<del> $this->wheres[] = compact('type', 'column', 'query', 'boolean');
<del>
<del> $this->addBinding($query->getBindings(), 'where');
<del>
<del> return $this;
<del> }
<del>
<del> /**
<del> * Add an external sub-select to the query.
<del> *
<del> * @param string $column
<del> * @param \Illuminate\Database\Query\Builder|static $query
<del> * @param string $boolean
<del> * @param bool $not
<del> * @return $this
<del> */
<del> protected function whereInExistingQuery($column, $query, $boolean, $not)
<del> {
<del> $type = $not ? 'NotInSub' : 'InSub';
<del>
<del> $this->wheres[] = compact('type', 'column', 'query', 'boolean');
<del>
<del> $this->addBinding($query->getBindings(), 'where');
<del>
<del> return $this;
<del> }
<del>
<ide> /**
<ide> * Add a "where in raw" clause for integer values to the query.
<ide> *
<ide><path>src/Illuminate/Database/Query/Grammars/Grammar.php
<ide> protected function whereNotInRaw(Builder $query, $where)
<ide> return '1 = 1';
<ide> }
<ide>
<del> /**
<del> * Compile a where in sub-select clause.
<del> *
<del> * @param \Illuminate\Database\Query\Builder $query
<del> * @param array $where
<del> * @return string
<del> */
<del> protected function whereInSub(Builder $query, $where)
<del> {
<del> return $this->wrap($where['column']).' in ('.$this->compileSelect($where['query']).')';
<del> }
<del>
<del> /**
<del> * Compile a where not in sub-select clause.
<del> *
<del> * @param \Illuminate\Database\Query\Builder $query
<del> * @param array $where
<del> * @return string
<del> */
<del> protected function whereNotInSub(Builder $query, $where)
<del> {
<del> return $this->wrap($where['column']).' not in ('.$this->compileSelect($where['query']).')';
<del> }
<del>
<ide> /**
<ide> * Compile a "where in raw" clause.
<ide> * | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.