content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
---|---|---|---|---|---|
Java | Java | fix slider height in fabric | 07fe994e262723b94263c848ca1733071dcd92cc | <ide><path>ReactAndroid/src/main/java/com/facebook/react/views/slider/ReactSliderManager.java
<ide> import com.facebook.react.bridge.ReadableMap;
<ide> import com.facebook.react.common.MapBuilder;
<ide> import com.facebook.react.uimanager.LayoutShadowNode;
<add>import com.facebook.react.uimanager.PixelUtil;
<ide> import com.facebook.react.uimanager.SimpleViewManager;
<ide> import com.facebook.react.uimanager.ThemedReactContext;
<ide> import com.facebook.react.uimanager.UIManagerModule;
<ide> public long measure(
<ide> ViewGroup.LayoutParams.WRAP_CONTENT, View.MeasureSpec.UNSPECIFIED);
<ide> reactSlider.measure(spec, spec);
<ide>
<del> return YogaMeasureOutput.make(reactSlider.getMeasuredWidth(), reactSlider.getMeasuredHeight());
<add> return YogaMeasureOutput.make(
<add> PixelUtil.toDIPFromPixel(reactSlider.getMeasuredWidth()),
<add> PixelUtil.toDIPFromPixel(reactSlider.getMeasuredHeight()));
<ide> }
<ide>
<ide> @Override | 1 |
Python | Python | add text preprocessing tests | 18d52e634d75074983cba62f51c3fc6737828d97 | <ide><path>keras/preprocessing/text.py
<ide> def one_hot(text, n, filters=base_filter(), lower=True, split=" "):
<ide>
<ide>
<ide> class Tokenizer(object):
<del> def __init__(self, nb_words=None, filters=base_filter(), lower=True, split=" "):
<add> def __init__(self, nb_words=None, filters=base_filter(),
<add> lower=True, split=' '):
<add> '''The class allows to vectorize a text corpus, by turning each
<add> text into either a sequence of integers (each integer being the index
<add> of a token in a dictionary) or into a vector where the coefficient
<add> for each token could be binary, based on word count, based on tf-idf...
<add>
<add> # Arguments
<add> nb_words: the maximum number of words to keep, based
<add> on word frequency. Only the most common `nb_words` words will
<add> be kept.
<add> filters: a string where each element is a character that will be
<add> filtered from the texts. The default is all punctuation, plus
<add> tabs and line breaks, minus the `'` character.
<add> lower: boolean. Whether to convert the texts to lowercase.
<add> split: character or string to use for token splitting.
<add>
<add> By default, all punctuation is removed, turning the texts into
<add> space-separated sequences of words
<add> (words maybe include the `'` character). These sequences are then
<add> splits into lists of tokens. They will then be indexed or vectorized.
<add>
<add> `0` is a reserved index that won't be assigned to any word.
<add> '''
<ide> self.word_counts = {}
<ide> self.word_docs = {}
<ide> self.filters = filters
<ide> def __init__(self, nb_words=None, filters=base_filter(), lower=True, split=" "):
<ide> def fit_on_texts(self, texts):
<ide> '''
<ide> required before using texts_to_sequences or texts_to_matrix
<del> @param texts: can be a list or a generator (for memory-efficiency)
<add>
<add> # Arguments
<add> texts: can be a list of strings,
<add> or a generator of strings (for memory-efficiency)
<ide> '''
<ide> self.document_count = 0
<ide> for text in texts:
<ide><path>tests/keras/preprocessing/test_sequence.py
<ide>
<ide> import pytest
<ide>
<del>from keras.preprocessing.sequence import (pad_sequences, make_sampling_table,
<del> skipgrams)
<add>from keras.preprocessing.sequence import pad_sequences
<add>from keras.preprocessing.sequence import make_sampling_table
<add>from keras.preprocessing.sequence import skipgrams
<ide>
<ide>
<ide> def test_pad_sequences(): | 2 |
Python | Python | use list(elem) instead of elem.getchildren() | b3fec669b16442b18cf9e894157904aa169b71d3 | <ide><path>libcloud/common/durabledns.py
<ide> def parse_body_and_error(self):
<ide> xml_obj = self.parse_body()
<ide>
<ide> # pylint: disable=no-member
<del> envelop_body = xml_obj.getchildren()[0]
<del> method_resp = envelop_body.getchildren()[0]
<add> envelop_body = list(xml_obj)[0]
<add> method_resp = list(envelop_body)[0]
<ide> # parse the xml_obj
<ide> # handle errors
<ide> if 'Fault' in method_resp.tag:
<del> fault = [fault for fault in method_resp.getchildren()
<add> fault = [fault for fault in list(method_resp)
<ide> if fault.tag == 'faultstring'][0]
<ide> error_dict['ERRORMESSAGE'] = fault.text.strip()
<ide> error_dict['ERRORCODE'] = self.status
<ide> errors.append(error_dict)
<ide>
<ide> # parsing response from listZonesResponse
<ide> if 'listZonesResponse' in method_resp.tag:
<del> answer = method_resp.getchildren()[0]
<add> answer = list(method_resp)[0]
<ide> for element in answer:
<del> zone_dict['id'] = element.getchildren()[0].text
<add> zone_dict['id'] = list(element)[0].text
<ide> objects.append(zone_dict)
<ide> # reset the zone_dict
<ide> zone_dict = {}
<ide> # parse response from listRecordsResponse
<ide> if 'listRecordsResponse' in method_resp.tag:
<del> answer = method_resp.getchildren()[0]
<add> answer = list(method_resp)[0]
<ide> for element in answer:
<del> for child in element.getchildren():
<add> for child in list(element):
<ide> if child.tag == 'id':
<ide> record_dict['id'] = child.text.strip()
<ide> objects.append(record_dict)
<ide> # reset the record_dict for later usage
<ide> record_dict = {}
<ide> # parse response from getZoneResponse
<ide> if 'getZoneResponse' in method_resp.tag:
<del> for child in method_resp.getchildren():
<add> for child in list(method_resp):
<ide> if child.tag == 'origin':
<ide> zone_dict['id'] = child.text.strip()
<ide> zone_dict['domain'] = child.text.strip()
<ide> def parse_body_and_error(self):
<ide> objects.append(zone_dict)
<ide> # parse response from getRecordResponse
<ide> if 'getRecordResponse' in method_resp.tag:
<del> answer = method_resp.getchildren()[0]
<del> for child in method_resp.getchildren():
<add> answer = list(method_resp)[0]
<add> for child in list(method_resp):
<ide> if child.tag == 'id' and child.text:
<ide> record_dict['id'] = child.text.strip()
<ide> elif child.tag == 'name' and child.text:
<ide> def parse_body_and_error(self):
<ide> objects.append(record_dict)
<ide> record_dict = {}
<ide> if 'createZoneResponse' in method_resp.tag:
<del> answer = method_resp.getchildren()[0]
<add> answer = list(method_resp)[0]
<ide> if answer.tag == 'return' and answer.text:
<ide> record_dict['id'] = answer.text.strip()
<ide> objects.append(record_dict)
<ide> # catch Record does not exists error when deleting record
<ide> if 'deleteRecordResponse' in method_resp.tag:
<del> answer = method_resp.getchildren()[0]
<add> answer = list(method_resp)[0]
<ide> if 'Record does not exists' in answer.text.strip():
<ide> errors.append({'ERRORMESSAGE': answer.text.strip(),
<ide> 'ERRORCODE': self.status})
<ide> # parse response in createRecordResponse
<ide> if 'createRecordResponse' in method_resp.tag:
<del> answer = method_resp.getchildren()[0]
<add> answer = list(method_resp)[0]
<ide> record_dict['id'] = answer.text.strip()
<ide> objects.append(record_dict)
<ide> record_dict = {}
<ide><path>libcloud/common/zonomi.py
<ide> def parse_body_and_errors(self):
<ide> errors.append(error_dict)
<ide>
<ide> # Data handling
<del> childrens = xml_body.getchildren()
<add> childrens = list(xml_body)
<ide> if len(childrens) == 3:
<ide> result_counts = childrens[1]
<ide> actions = childrens[2]
<ide>
<ide> if actions is not None:
<del> actions_childrens = actions.getchildren()
<add> actions_childrens = list(actions)
<ide> action = actions_childrens[0]
<del> action_childrens = action.getchildren()
<add> action_childrens = list(action)
<ide>
<ide> if action_childrens is not None:
<ide> for child in action_childrens:
<ide><path>libcloud/compute/drivers/ec2.py
<ide> def parse_error(self):
<ide> body=self.body, driver=EC2NodeDriver)
<ide>
<ide> for err in body.findall('Errors/Error'):
<del> code, message = err.getchildren()
<add> code, message = list(err)
<ide> err_list.append('%s: %s' % (code.text, message.text))
<ide> if code.text == 'InvalidClientTokenId':
<ide> raise InvalidCredsError(err_list[-1])
<ide><path>libcloud/dns/drivers/durabledns.py
<ide> def list_zones(self):
<ide> schema_params.get('method'),
<ide> attributes)
<ide> params = {'apiuser': self.key, 'apikey': self.secret}
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def list_records(self, zone):
<ide> attributes)
<ide> params = {'apiuser': self.key, 'apikey': self.secret,
<ide> 'zonename': zone.id}
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def get_zone(self, zone_id):
<ide> attributes)
<ide> params = {'apiuser': self.key, 'apikey': self.secret,
<ide> 'zonename': zone_id}
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def get_record(self, zone_id, record_id):
<ide> attributes)
<ide> params = {'apiuser': self.key, 'apikey': self.secret,
<ide> 'zonename': zone_id, 'recordid': record_id}
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def create_zone(self, domain, type='master', ttl=None, extra=None):
<ide> params = {'apiuser': self.key, 'apikey': self.secret,
<ide> 'zonename': domain, 'ttl': ttl or DEFAULT_TTL}
<ide> params.update(extra)
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def create_record(self, name, zone, type, data, extra=None):
<ide> 'zonename': zone.id, 'name': name, 'type': type,
<ide> 'data': data}
<ide> params.update(extra)
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def update_zone(self, zone, domain, type='master', ttl=None, extra=None):
<ide> params = {'apiuser': self.key, 'apikey': self.secret,
<ide> 'zonename': domain, 'ttl': ttl}
<ide> params.update(extra)
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def update_record(self, record, name, type, data, extra=None):
<ide> 'zonename': zone.id, 'id': record.id, 'name': name,
<ide> 'data': data}
<ide> params.update(extra)
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def delete_zone(self, zone):
<ide> attributes)
<ide> params = {'apiuser': self.key, 'apikey': self.secret,
<ide> 'zonename': zone.id}
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide> def delete_record(self, record):
<ide> attributes)
<ide> params = {'apiuser': self.key, 'apikey': self.secret,
<ide> 'zonename': record.zone.id, 'id': record.id}
<del> urn = schema.getchildren()[0]
<add> urn = list(schema)[0]
<ide> for child in urn:
<ide> key = child.tag.split(':')[2]
<ide> if key in attributes:
<ide><path>libcloud/storage/drivers/azure_blobs.py
<ide> def _xml_to_container(self, node):
<ide> 'meta_data': {}
<ide> }
<ide>
<del> for meta in metadata.getchildren():
<add> for meta in list(metadata):
<ide> extra['meta_data'][meta.tag] = meta.text
<ide>
<ide> return Container(name=name, extra=extra, driver=self)
<ide> def _xml_to_object(self, container, blob):
<ide> extra['md5_hash'] = value
<ide>
<ide> meta_data = {}
<del> for meta in metadata.getchildren():
<add> for meta in list(metadata):
<ide> meta_data[meta.tag] = meta.text
<ide>
<ide> return Object(name=name, size=size, hash=etag, meta_data=meta_data, | 5 |
Javascript | Javascript | use more es6 syntaxes in the shopping cart example | c4e6c3228e9cb8bd37d12756ec9049acc65007e1 | <ide><path>examples/shopping-cart/src/actions/index.js
<ide> import shop from '../api/shop'
<ide> import * as types from '../constants/ActionTypes'
<ide>
<del>function receiveProducts(products) {
<del> return {
<del> type: types.RECEIVE_PRODUCTS,
<del> products: products
<del> }
<del>}
<add>const receiveProducts = products => ({
<add> type: types.RECEIVE_PRODUCTS,
<add> products: products
<add>})
<ide>
<del>export function getAllProducts() {
<del> return dispatch => {
<del> shop.getProducts(products => {
<del> dispatch(receiveProducts(products))
<del> })
<del> }
<del>}
<add>export const getAllProducts = () => dispatch => shop.getProducts(products => {
<add> dispatch(receiveProducts(products))
<add>})
<ide>
<del>function addToCartUnsafe(productId) {
<del> return {
<del> type: types.ADD_TO_CART,
<del> productId
<del> }
<del>}
<add>const addToCartUnsafe = productId => ({
<add> type: types.ADD_TO_CART,
<add> productId
<add>})
<ide>
<del>export function addToCart(productId) {
<del> return (dispatch, getState) => {
<del> if (getState().products.byId[productId].inventory > 0) {
<del> dispatch(addToCartUnsafe(productId))
<del> }
<add>export const addToCart = productId => (dispatch, getState) => {
<add> if (getState().products.byId[productId].inventory > 0) {
<add> dispatch(addToCartUnsafe(productId))
<ide> }
<ide> }
<ide>
<del>export function checkout(products) {
<del> return (dispatch, getState) => {
<del> const cart = getState().cart
<add>export const checkout = (products) => (dispatch, getState) => {
<add> const cart = getState().cart
<ide>
<add> dispatch({
<add> type: types.CHECKOUT_REQUEST
<add> })
<add> shop.buyProducts(products, () => {
<ide> dispatch({
<del> type: types.CHECKOUT_REQUEST
<add> type: types.CHECKOUT_SUCCESS,
<add> cart
<ide> })
<del> shop.buyProducts(products, () => {
<del> dispatch({
<del> type: types.CHECKOUT_SUCCESS,
<del> cart
<del> })
<del> // Replace the line above with line below to rollback on failure:
<del> // dispatch({ type: types.CHECKOUT_FAILURE, cart })
<del> })
<del> }
<add> // Replace the line above with line below to rollback on failure:
<add> // dispatch({ type: types.CHECKOUT_FAILURE, cart })
<add> })
<ide> }
<ide><path>examples/shopping-cart/src/api/shop.js
<ide> import _products from './products.json'
<ide> const TIMEOUT = 100
<ide>
<ide> export default {
<del> getProducts(cb, timeout) {
<del> setTimeout(() => cb(_products), timeout || TIMEOUT)
<del> },
<del>
<del> buyProducts(payload, cb, timeout) {
<del> setTimeout(() => cb(), timeout || TIMEOUT)
<del> }
<add> getProducts: (cb, timeout) => setTimeout(() => cb(_products), timeout || TIMEOUT),
<add> buyProducts: (payload, cb, timeout) => setTimeout(() => cb(), timeout || TIMEOUT)
<ide> }
<ide><path>examples/shopping-cart/src/components/Cart.js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { PropTypes } from 'react'
<ide> import Product from './Product'
<ide>
<del>export default class Cart extends Component {
<del> render() {
<del> const { products, total, onCheckoutClicked } = this.props
<add>const Cart = ({ products, total, onCheckoutClicked }) => {
<add> const hasProducts = products.length > 0
<add> const nodes = !hasProducts ?
<add> <em>Please add some products to cart.</em> :
<add> products.map(product =>
<add> <Product
<add> title={product.title}
<add> price={product.price}
<add> quantity={product.quantity}
<add> key={product.id}/>
<add> )
<ide>
<del> const hasProducts = products.length > 0
<del> const nodes = !hasProducts ?
<del> <em>Please add some products to cart.</em> :
<del> products.map(product =>
<del> <Product
<del> title={product.title}
<del> price={product.price}
<del> quantity={product.quantity}
<del> key={product.id}/>
<del> )
<del>
<del> return (
<del> <div>
<del> <h3>Your Cart</h3>
<del> <div>{nodes}</div>
<del> <p>Total: ${total}</p>
<del> <button onClick={onCheckoutClicked}
<del> disabled={hasProducts ? '' : 'disabled'}>
<del> Checkout
<del> </button>
<del> </div>
<del> )
<del> }
<add> return <div>
<add> <h3>Your Cart</h3>
<add> <div>{nodes}</div>
<add> <p>Total: ${total}</p>
<add> <button onClick={onCheckoutClicked}
<add> disabled={hasProducts ? '' : 'disabled'}>
<add> Checkout
<add> </button>
<add> </div>
<ide> }
<ide>
<ide> Cart.propTypes = {
<ide> products: PropTypes.array,
<ide> total: PropTypes.string,
<ide> onCheckoutClicked: PropTypes.func
<ide> }
<add>
<add>export default Cart
<ide><path>examples/shopping-cart/src/components/Cart.spec.js
<ide> import { shallow } from 'enzyme'
<ide> import Cart from './Cart'
<ide> import Product from './Product'
<ide>
<del>function setup(total, products = []) {
<add>const setup = (total, products = []) => {
<ide> const actions = {
<ide> onCheckoutClicked: jest.fn()
<ide> }
<ide><path>examples/shopping-cart/src/components/Product.js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { PropTypes } from 'react'
<ide>
<del>export default class Product extends Component {
<del> render() {
<del> const { price, quantity, title } = this.props
<del> return <div> {title} - ${price} {quantity ? `x ${quantity}` : null} </div>
<del> }
<del>}
<add>const Product = ({ price, quantity, title }) => <div>
<add> {title} - ${price} {quantity ? `x ${quantity}` : null}
<add></div>
<ide>
<ide> Product.propTypes = {
<ide> price: PropTypes.number,
<ide> quantity: PropTypes.number,
<ide> title: PropTypes.string
<ide> }
<add>
<add>export default Product
<ide><path>examples/shopping-cart/src/components/Product.spec.js
<ide> import React from 'react'
<ide> import { shallow } from 'enzyme'
<ide> import Product from './Product'
<ide>
<del>function setup(props) {
<add>const setup = props => {
<ide> const component = shallow(
<ide> <Product {...props} />
<ide> )
<ide><path>examples/shopping-cart/src/components/ProductItem.js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { PropTypes } from 'react'
<ide> import Product from './Product'
<ide>
<del>export default class ProductItem extends Component {
<del> render() {
<del> const { product } = this.props
<del>
<del> return (
<del> <div
<del> style={{ marginBottom: 20 }}>
<del> <Product
<del> title={product.title}
<del> price={product.price} />
<del> <button
<del> onClick={this.props.onAddToCartClicked}
<del> disabled={product.inventory > 0 ? '' : 'disabled'}>
<del> {product.inventory > 0 ? 'Add to cart' : 'Sold Out'}
<del> </button>
<del> </div>
<del> )
<del> }
<del>}
<add>const ProductItem = ({ product, onAddToCartClicked }) => <div
<add> style={{ marginBottom: 20 }}>
<add> <Product
<add> title={product.title}
<add> price={product.price} />
<add> <button
<add> onClick={onAddToCartClicked}
<add> disabled={product.inventory > 0 ? '' : 'disabled'}>
<add> {product.inventory > 0 ? 'Add to cart' : 'Sold Out'}
<add> </button>
<add></div>
<ide>
<ide> ProductItem.propTypes = {
<ide> product: PropTypes.shape({
<ide> ProductItem.propTypes = {
<ide> }).isRequired,
<ide> onAddToCartClicked: PropTypes.func.isRequired
<ide> }
<add>
<add>export default ProductItem
<ide><path>examples/shopping-cart/src/components/ProductItem.spec.js
<ide> import { shallow } from 'enzyme'
<ide> import Product from './Product'
<ide> import ProductItem from './ProductItem'
<ide>
<del>function setup(product) {
<add>const setup = product => {
<ide> const actions = {
<ide> onAddToCartClicked: jest.fn()
<ide> }
<ide><path>examples/shopping-cart/src/components/ProductsList.js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { PropTypes } from 'react'
<ide>
<del>export default class ProductsList extends Component {
<del> render() {
<del> return (
<del> <div>
<del> <h3>{this.props.title}</h3>
<del> <div>{this.props.children}</div>
<del> </div>
<del> )
<del> }
<del>}
<add>const ProductsList = ({title, children}) => <div>
<add> <h3>{title}</h3>
<add> <div>{children}</div>
<add></div>
<ide>
<ide> ProductsList.propTypes = {
<ide> children: PropTypes.node,
<ide> title: PropTypes.string.isRequired
<ide> }
<add>
<add>export default ProductsList
<ide><path>examples/shopping-cart/src/components/ProductsList.spec.js
<ide> import React from 'react'
<ide> import { shallow } from 'enzyme'
<ide> import ProductsList from './ProductsList'
<ide>
<del>function setup(props) {
<add>const setup = props => {
<ide> const component = shallow(
<ide> <ProductsList title={props.title}>{props.children}</ProductsList>
<ide> )
<ide><path>examples/shopping-cart/src/containers/App.js
<del>import React, { Component } from 'react'
<add>import React from 'react'
<ide> import ProductsContainer from './ProductsContainer'
<ide> import CartContainer from './CartContainer'
<ide>
<del>export default class App extends Component {
<del> render() {
<del> return (
<del> <div>
<del> <h2>Shopping Cart Example</h2>
<del> <hr/>
<del> <ProductsContainer />
<del> <hr/>
<del> <CartContainer />
<del> </div>
<del> )
<del> }
<del>}
<add>const App = () => <div>
<add> <h2>Shopping Cart Example</h2>
<add> <hr/>
<add> <ProductsContainer />
<add> <hr/>
<add> <CartContainer />
<add></div>
<add>
<add>export default App
<ide><path>examples/shopping-cart/src/containers/CartContainer.js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { PropTypes } from 'react'
<ide> import { connect } from 'react-redux'
<ide> import { checkout } from '../actions'
<ide> import { getTotal, getCartProducts } from '../reducers'
<ide> import Cart from '../components/Cart'
<ide>
<del>class CartContainer extends Component {
<del> render() {
<del> const { products, total } = this.props
<del>
<del> return (
<del> <Cart
<del> products={products}
<del> total={total}
<del> onCheckoutClicked={() => this.props.checkout()} />
<del> )
<del> }
<del>}
<add>const CartContainer = ({ products, total, checkout }) => <Cart
<add> products={products}
<add> total={total}
<add> onCheckoutClicked={() => checkout(products)} />
<ide>
<ide> CartContainer.propTypes = {
<ide> products: PropTypes.arrayOf(PropTypes.shape({
<ide> CartContainer.propTypes = {
<ide> checkout: PropTypes.func.isRequired
<ide> }
<ide>
<del>const mapStateToProps = (state) => {
<del> return {
<del> products: getCartProducts(state),
<del> total: getTotal(state)
<del> }
<del>}
<add>const mapStateToProps = (state) => ({
<add> products: getCartProducts(state),
<add> total: getTotal(state)
<add>})
<ide>
<ide> export default connect(
<ide> mapStateToProps,
<ide><path>examples/shopping-cart/src/containers/ProductsContainer.js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { PropTypes } from 'react'
<ide> import { connect } from 'react-redux'
<ide> import { addToCart } from '../actions'
<ide> import { getVisibleProducts } from '../reducers/products'
<ide> import ProductItem from '../components/ProductItem'
<ide> import ProductsList from '../components/ProductsList'
<ide>
<del>class ProductsContainer extends Component {
<del> render() {
<del> const { products } = this.props
<del> return (
<del> <ProductsList title="Products">
<del> {products.map(product =>
<del> <ProductItem
<del> key={product.id}
<del> product={product}
<del> onAddToCartClicked={() => this.props.addToCart(product.id)} />
<del> )}
<del> </ProductsList>
<del> )
<del> }
<del>}
<add>const ProductsContainer = ({ products, addToCart }) => <ProductsList title="Products">
<add> {products.map(product =>
<add> <ProductItem
<add> key={product.id}
<add> product={product}
<add> onAddToCartClicked={() => addToCart(product.id)} />
<add> )}
<add></ProductsList>
<ide>
<ide> ProductsContainer.propTypes = {
<ide> products: PropTypes.arrayOf(PropTypes.shape({
<ide> ProductsContainer.propTypes = {
<ide> addToCart: PropTypes.func.isRequired
<ide> }
<ide>
<del>function mapStateToProps(state) {
<del> return {
<del> products: getVisibleProducts(state.products)
<del> }
<del>}
<add>const mapStateToProps = state => ({
<add> products: getVisibleProducts(state.products)
<add>})
<ide>
<ide> export default connect(
<ide> mapStateToProps,
<ide><path>examples/shopping-cart/src/reducers/cart.js
<ide> const initialState = {
<ide> quantityById: {}
<ide> }
<ide>
<del>function addedIds(state = initialState.addedIds, action) {
<add>const addedIds = (state = initialState.addedIds, action) => {
<ide> switch (action.type) {
<ide> case ADD_TO_CART:
<ide> if (state.indexOf(action.productId) !== -1) {
<ide> function addedIds(state = initialState.addedIds, action) {
<ide> }
<ide> }
<ide>
<del>function quantityById(state = initialState.quantityById, action) {
<add>const quantityById = (state = initialState.quantityById, action) => {
<ide> switch (action.type) {
<ide> case ADD_TO_CART:
<ide> const { productId } = action
<del> return Object.assign({}, state, {
<add> return { ...state,
<ide> [productId]: (state[productId] || 0) + 1
<del> })
<add> }
<ide> default:
<ide> return state
<ide> }
<ide> }
<ide>
<del>export default function cart(state = initialState, action) {
<add>export const getQuantity = (state, productId) =>
<add> state.quantityById[productId] || 0
<add>
<add>export const getAddedIds = state => state.addedIds
<add>
<add>const cart = (state = initialState, action) => {
<ide> switch (action.type) {
<ide> case CHECKOUT_REQUEST:
<ide> return initialState
<ide> export default function cart(state = initialState, action) {
<ide> }
<ide> }
<ide>
<del>export function getQuantity(state, productId) {
<del> return state.quantityById[productId] || 0
<del>}
<del>
<del>export function getAddedIds(state) {
<del> return state.addedIds
<del>}
<add>export default cart
<ide><path>examples/shopping-cart/src/reducers/index.js
<ide> export default combineReducers({
<ide> products
<ide> })
<ide>
<del>function getAddedIds(state) {
<del> return fromCart.getAddedIds(state.cart)
<del>}
<add>const getAddedIds = state => fromCart.getAddedIds(state.cart)
<ide>
<del>function getQuantity(state, id) {
<del> return fromCart.getQuantity(state.cart, id)
<del>}
<add>const getQuantity = (state, id) => fromCart.getQuantity(state.cart, id)
<ide>
<del>function getProduct(state, id) {
<del> return fromProducts.getProduct(state.products, id)
<del>}
<add>const getProduct = (state, id) => fromProducts.getProduct(state.products, id)
<ide>
<del>export function getTotal(state) {
<del> return getAddedIds(state).reduce((total, id) =>
<add>export const getTotal = state =>
<add> getAddedIds(state).reduce((total, id) =>
<ide> total + getProduct(state, id).price * getQuantity(state, id),
<ide> 0
<ide> ).toFixed(2)
<del>}
<ide>
<del>export function getCartProducts(state) {
<del> return getAddedIds(state).map(id => ({
<add>export const getCartProducts = state =>
<add> getAddedIds(state).map(id => ({
<ide> ...getProduct(state, id),
<ide> quantity: getQuantity(state, id)
<ide> }))
<del>}
<ide><path>examples/shopping-cart/src/reducers/products.js
<ide> import { combineReducers } from 'redux'
<ide> import { RECEIVE_PRODUCTS, ADD_TO_CART } from '../constants/ActionTypes'
<ide>
<del>function products(state, action) {
<add>const products = (state, action) => {
<ide> switch (action.type) {
<ide> case ADD_TO_CART:
<ide> return {
<ide> function products(state, action) {
<ide> }
<ide> }
<ide>
<del>function byId(state = {}, action) {
<add>const byId = (state = {}, action) => {
<ide> switch (action.type) {
<ide> case RECEIVE_PRODUCTS:
<ide> return {
<ide> function byId(state = {}, action) {
<ide> }
<ide> }
<ide>
<del>function visibleIds(state = [], action) {
<add>const visibleIds = (state = [], action) => {
<ide> switch (action.type) {
<ide> case RECEIVE_PRODUCTS:
<ide> return action.products.map(product => product.id)
<ide> export default combineReducers({
<ide> visibleIds
<ide> })
<ide>
<del>export function getProduct(state, id) {
<del> return state.byId[id]
<del>}
<add>export const getProduct = (state, id) => state.byId[id]
<ide>
<del>export function getVisibleProducts(state) {
<del> return state.visibleIds.map(id => getProduct(state, id))
<del>}
<add>export const getVisibleProducts = state => state.visibleIds.map(id =>
<add> getProduct(state, id)
<add>) | 16 |
Text | Text | fix code examples in stream.md | 85f7af77eeae1a3b15a1958102450c4fc9e06df8 | <ide><path>doc/api/stream.md
<ide> for (let i = 0; i < 100; i++) {
<ide> }
<ide> writer.end('This is the end\n');
<ide> writer.on('finish', () => {
<del> console.error('All writes are now complete.');
<add> console.log('All writes are now complete.');
<ide> });
<ide> ```
<ide>
<ide> a readable stream, adding this writable to its set of destinations.
<ide> const writer = getWritableStreamSomehow();
<ide> const reader = getReadableStreamSomehow();
<ide> writer.on('pipe', (src) => {
<del> console.error('something is piping into the writer');
<add> console.log('Something is piping into the writer.');
<ide> assert.equal(src, reader);
<ide> });
<ide> reader.pipe(writer);
<ide> This is also emitted in case this [`Writable`][] stream emits an error when a
<ide> const writer = getWritableStreamSomehow();
<ide> const reader = getReadableStreamSomehow();
<ide> writer.on('unpipe', (src) => {
<del> console.error('Something has stopped piping into the writer.');
<add> console.log('Something has stopped piping into the writer.');
<ide> assert.equal(src, reader);
<ide> });
<ide> reader.pipe(writer);
<ide> function write(data, cb) {
<ide>
<ide> // Wait for cb to be called before doing any other write.
<ide> write('hello', () => {
<del> console.log('write completed, do more writes now');
<add> console.log('Write completed, do more writes now.');
<ide> });
<ide> ```
<ide>
<ide> const readable = getReadableStreamSomehow();
<ide> readable.setEncoding('utf8');
<ide> readable.on('data', (chunk) => {
<ide> assert.equal(typeof chunk, 'string');
<del> console.log('got %d characters of string data', chunk.length);
<add> console.log('Got %d characters of string data:', chunk.length);
<ide> });
<ide> ```
<ide>
<ide> const writable = fs.createWriteStream('file.txt');
<ide> // but only for the first second
<ide> readable.pipe(writable);
<ide> setTimeout(() => {
<del> console.log('Stop writing to file.txt');
<add> console.log('Stop writing to file.txt.');
<ide> readable.unpipe(writable);
<del> console.log('Manually close the file stream');
<add> console.log('Manually close the file stream.');
<ide> writable.end();
<ide> }, 1000);
<ide> ```
<ide> const rs = fs.createReadStream('archive.tar');
<ide>
<ide> finished(rs, (err) => {
<ide> if (err) {
<del> console.error('Stream failed', err);
<add> console.error('Stream failed.', err);
<ide> } else {
<del> console.log('Stream is done reading');
<add> console.log('Stream is done reading.');
<ide> }
<ide> });
<ide>
<ide> const rs = fs.createReadStream('archive.tar');
<ide>
<ide> async function run() {
<ide> await finished(rs);
<del> console.log('Stream is done reading');
<add> console.log('Stream is done reading.');
<ide> }
<ide>
<ide> run().catch(console.error);
<ide> pipeline(
<ide> fs.createWriteStream('archive.tar.gz'),
<ide> (err) => {
<ide> if (err) {
<del> console.error('Pipeline failed', err);
<add> console.error('Pipeline failed.', err);
<ide> } else {
<del> console.log('Pipeline succeeded');
<add> console.log('Pipeline succeeded.');
<ide> }
<ide> }
<ide> );
<ide> async function run() {
<ide> zlib.createGzip(),
<ide> fs.createWriteStream('archive.tar.gz')
<ide> );
<del> console.log('Pipeline succeeded');
<add> console.log('Pipeline succeeded.');
<ide> }
<ide>
<ide> run().catch(console.error); | 1 |
Javascript | Javascript | update isdatasetvisible call | 200abc184ba8a9ff1a5ac091e10fd024690392fb | <ide><path>src/scales/scale.time.js
<ide> module.exports = function(Chart) {
<ide>
<ide> helpers.each(this.chart.data.datasets, function(dataset, datasetIndex) {
<ide> var momentsForDataset = [];
<del> var datasetVisible = helpers.isDatasetVisible(dataset);
<add> var datasetVisible = this.chart.isDatasetVisible(datasetIndex);
<ide>
<ide> if (typeof dataset.data[0] === 'object') {
<ide> helpers.each(dataset.data, function(value, index) { | 1 |
Text | Text | add new links to code structure | 7c68f34f7d2c70cdc8f115a2c34b7c507a1f888d | <ide><path>docs/faq/CodeStructure.md
<ide> Find the balance between these two extremes, and you will master Redux.
<ide> **Articles**
<ide> - [Where do I put my business logic in a React/Redux application?](https://medium.com/@jeffbski/where-do-i-put-my-business-logic-in-a-react-redux-application-9253ef91ce1)
<ide> - [How to Scale React Applications](https://www.smashingmagazine.com/2016/09/how-to-scale-react-applications/)
<add>- [The Tao of Redux, Part 2 - Practice and Philosophy. Thick and thin reducers.](http://blog.isquaredsoftware.com/2017/05/idiomatic-redux-tao-of-redux-part-2/#thick-and-thin-reducers)
<ide>
<ide> **Discussions**
<add>- [How putting too much logic in action creators could affect debugging](https://github.com/reactjs/redux/issues/384#issuecomment-127393209)
<ide> - [#1165: Where to put business logic / validation?](https://github.com/reactjs/redux/issues/1165)
<del>- [#1171: Recommendations for best practices regarding action-creators, reducers, and selectors](https://github.com/reactjs/redux/issues/1171 )
<add>- [#1171: Recommendations for best practices regarding action-creators, reducers, and selectors](https://github.com/reactjs/redux/issues/1171)
<ide> - [Stack Overflow: Accessing Redux state in an action creator?](http://stackoverflow.com/questions/35667249/accessing-redux-state-in-an-action-creator/35674575) | 1 |
PHP | PHP | fix typo in test methods names | e5bc19c54761ff53a8f297088a7b7d482e5b0a6b | <ide><path>tests/Database/DatabaseEloquentBelongsToManyWithDefaultAttributesTest.php
<ide> protected function tearDown(): void
<ide> m::close();
<ide> }
<ide>
<del> public function testwithPivotValueMethodSetsWhereConditionsForFetching()
<add> public function testWithPivotValueMethodSetsWhereConditionsForFetching()
<ide> {
<ide> $relation = $this->getMockBuilder(BelongsToMany::class)->setMethods(['touchIfTouching'])->setConstructorArgs($this->getRelationArguments())->getMock();
<ide> $relation->withPivotValue(['is_admin' => 1]);
<ide> }
<ide>
<del> public function testwithPivotValueMethodSetsDefaultArgumentsForInsertion()
<add> public function testWithPivotValueMethodSetsDefaultArgumentsForInsertion()
<ide> {
<ide> $relation = $this->getMockBuilder(BelongsToMany::class)->setMethods(['touchIfTouching'])->setConstructorArgs($this->getRelationArguments())->getMock();
<ide> $relation->withPivotValue(['is_admin' => 1]); | 1 |
Javascript | Javascript | fix typo in comment of jquery-bootstrap example | 08b6b28902e473ddbbbccfa8a65e83a4f5760e8a | <ide><path>examples/jquery-bootstrap/js/app.js
<ide> var BootstrapButton = React.createClass({
<ide> });
<ide>
<ide> var BootstrapModal = React.createClass({
<del> // The following two methods are the only places we need to
<add> // The following four methods are the only places we need to
<ide> // integrate with Bootstrap or jQuery!
<ide> componentDidMount: function() {
<ide> // When the component is added, turn it into a modal | 1 |
Javascript | Javascript | fix compiler tests | 3fdf0029b7c49b012fd6d000bd9e4e54e12d1a22 | <ide><path>test/Compiler-caching.test.js
<ide> describe("Compiler (caching)", () => {
<ide> options = new WebpackOptionsDefaulter().process(options);
<ide> options.cache = true;
<ide> options.entry = entry;
<add> options.optimization.moduleIds = "natural";
<ide> options.optimization.minimize = false;
<ide> options.context = path.join(__dirname, "fixtures");
<ide> options.output.path = "/"; | 1 |
Javascript | Javascript | fix style issue after eslint update | ec8e0ae697d96c417bda0bbe5be9712cf5923b1f | <ide><path>tools/eslint-rules/required-modules.js
<ide> module.exports = function(context) {
<ide> module.exports.schema = {
<ide> 'type': 'array',
<ide> 'items': [
<del> {
<del> 'enum': [0, 1, 2]
<del> }
<add> {
<add> 'enum': [0, 1, 2]
<add> }
<ide> ],
<ide> 'additionalItems': {
<ide> 'type': 'string' | 1 |
Go | Go | improve zfs init log message for zfs | 27b002f4a02e2d9f6eded9004b82cb81f121264f | <ide><path>daemon/graphdriver/zfs/zfs_linux.go
<ide> package zfs
<ide>
<ide> import (
<del> "fmt"
<del>
<ide> "github.com/docker/docker/daemon/graphdriver"
<ide> "github.com/sirupsen/logrus"
<del> "golang.org/x/sys/unix"
<ide> )
<ide>
<del>func checkRootdirFs(rootdir string) error {
<del> var buf unix.Statfs_t
<del> if err := unix.Statfs(rootdir, &buf); err != nil {
<del> return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
<add>func checkRootdirFs(rootDir string) error {
<add> fsMagic, err := graphdriver.GetFSMagic(rootDir)
<add> if err != nil {
<add> return err
<add> }
<add> backingFS := "unknown"
<add> if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
<add> backingFS = fsName
<ide> }
<ide>
<del> if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs {
<del> logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
<add> if fsMagic != graphdriver.FsMagicZfs {
<add> logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("driver", "zfs").Error("No zfs dataset found for root")
<ide> return graphdriver.ErrPrerequisites
<ide> }
<ide> | 1 |
Text | Text | update changelog for 2.4/2.5 releases | dd6a1bdbd0a05a06d7e388052c5760c6336b1012 | <ide><path>CHANGELOG.md
<ide> # Ember Changelog
<ide>
<del>### v2.3.0-beta.3 (December 19, 2015)
<del>
<add>### 2.5.0-beta.1
<add>
<add>- [#12829](https://github.com/emberjs/ember.js/pull/12829) [BUGFIX] Support tagless components in fastboot
<add>- [#12575](https://github.com/emberjs/ember.js/pull/12575) Make acceptance helpers fire native evets instead of jQuery ones.
<add>- [#12888](https://github.com/emberjs/ember.js/pull/12888) Add assertion for {{#each foo in bar}} syntax
<add>- [#12938](https://github.com/emberjs/ember.js/pull/12938) Expose `Router#transitionTo` as a public method
<add>- [#12929](https://github.com/emberjs/ember.js/pull/12929) [BUGFIX] Fix bug causing an initial capital to be interpreted as a global.
<add>- [#12928](https://github.com/emberjs/ember.js/pull/12928) [FEATURE ember-htmlbars-component-generation] Remove feature.
<add>- [#12998](https://github.com/emberjs/ember.js/pull/12998) Make "calling set on destroyed object" error more descriptive.
<add>- [#13007](https://github.com/emberjs/ember.js/pull/13007) [BUGFIX] Update Ember.compare to use operators
<add>- [#13024](https://github.com/emberjs/ember.js/pull/13024) [BUGFIX] Change internal async acceptance test helpers to be somewhat more efficient in determining router transition status.
<add>- [FEATURE] Add helper method named `Ember.assign` to roughly emulate `Object.assign`.
<add>
<add>### 2.4.0
<add>
<add>- [#12996](https://github.com/emberjs/ember.js/pull/12996) [BUGFIX] Fixes 12995 #with array yields true
<add>- [#13013](https://github.com/emberjs/ember.js/pull/13013) [BUGFIX] Do not set model on `{{render}}` rerender when only a single argument was provided.
<add>- [#13015](https://github.com/emberjs/ember.js/pull/13015) Add deprecation when legacy addon flag is enabled.
<add>- [#12922](https://github.com/emberjs/ember.js/pull/12922) [BUGFIX] Special case `{{#with}}` for `isTruthy`
<add>- [#12934](https://github.com/emberjs/ember.js/pull/12934) [BUGFIX] Ensure `Route#transitionTo` returns a `Transition` object.
<add>- [#12941](https://github.com/emberjs/ember.js/pull/12941) [BUGFIX] Update Backburner to latest ([compare view](https://github.com/ebryn/backburner.js/compare/22a4df33f23c40257bc49972e5833038452ded2e...325a969dbc7eae42dc1edfbf0ae9fb83923df5a6)).
<add>- [#12939](https://github.com/emberjs/ember.js/pull/12939) [BUGFIX] Avoid allocating a binding map in meta when possible.
<add>- [#12947](https://github.com/emberjs/ember.js/pull/12947) [BUGFIX] Avoid using prototype extensions if possible ({add,remove}ArrayObserver).
<add>- [#12942](https://github.com/emberjs/ember.js/pull/12942) [BUGFIX] Do not rely prototype extensions (objectAt).
<add>- [#12991](https://github.com/emberjs/ember.js/pull/12991) [BUGFIX] Fix a regression in `Ember.computed.sort`.
<add>- [#12491](https://github.com/emberjs/ember.js/pull/12491) [BUGFIX] allow watching of ES5+ Getter
<add>- [#12829](https://github.com/emberjs/ember.js/pull/12829) [BUGFIX] Support tagless components in fastboot
<add>- [#12847](https://github.com/emberjs/ember.js/pull/12847) [BUGFIX] Add warning for “deep @each” usage in dependent keys.
<add>- [#12848](https://github.com/emberjs/ember.js/pull/12848) Make dependencies that end in `@each` expand to `[]`.
<add>- [#12877](https://github.com/emberjs/ember.js/pull/12877) [BUGFIX] Upgrade htmlbars to 0.14.14. Fixes bug with lost cursor position with `<input oninput={{action 'foo'}}>`.
<add>- [#12908](https://github.com/emberjs/ember.js/pull/12908) [BUGFIX] Fix issue that prevented recomputation of `ArrayProxy` values under certain circumstances.
<add>- [#12348](https://github.com/emberjs/ember.js/pull/12348) Route#disconnectOutlet should not be private
<add>- [#12648](https://github.com/emberjs/ember.js/pull/12648) Move `packages/**/lib/main.js` to `packages/**/lib/index.js`.
<add>- [#12647](https://github.com/emberjs/ember.js/pull/12647) update cli
<add>- [#12638](https://github.com/emberjs/ember.js/pull/12638) Update references to find methods
<add>- [#12757](https://github.com/emberjs/ember.js/pull/12757) Update build related deps.
<add>- [#12662](https://github.com/emberjs/ember.js/pull/12662) correction for Ember.String.capitalize docs
<add>- [#12674](https://github.com/emberjs/ember.js/pull/12674) removed unused `name` prop
<add>- [#12664](https://github.com/emberjs/ember.js/pull/12664) Include NaN as a falsey value in the `with` helper's docstring
<add>- [#12698](https://github.com/emberjs/ember.js/pull/12698) convert all this._super.apply(this, arguments) to this._super(...arguments)
<add>
<add>### 2.3.1 (February 4, 2016)
<add>
<add>- [#12829](https://github.com/emberjs/ember.js/pull/12829) [BUGFIX] Support tagless components in fastboot.
<add>- [#12848](https://github.com/emberjs/ember.js/pull/12848) Make dependencies that end in `@each` expand to `[]`.
<add>- [#12877](https://github.com/emberjs/ember.js/pull/12877) [BUGFIX] Upgrade htmlbars to 0.14.14. Fixes bug with lost cursor position with `<input oninput={{action 'foo'}}>`.
<add>- [#12908](https://github.com/emberjs/ember.js/pull/12908) [BUGFIX] Fix issue that prevented recomputation of `ArrayProxy` values under certain circumstances.
<add>
<add>
<add>### 2.3.0 (January 17, 2016)
<add>
<add>- [#12712](https://github.com/emberjs/ember.js/pull/12712) [BUGFIX] Create a new hash parameter when creating a component cell
<add>- [#12746](https://github.com/emberjs/ember.js/pull/12746) [BUGFIX] Update htmlbars to 0.14.11 to fix [CVE-2015-7565](http://emberjs.com/blog/2016/01/14/security-releases-ember-1-11-4-1-12-2-1-13-12-2-0-3-2-1-2-2-2-1.html).
<add>- [#12752](https://github.com/emberjs/ember.js/pull/12752) [BUGFIX] Do not re-raise on errors handled in route error action.
<add>- [#12764](https://github.com/emberjs/ember.js/pull/12764) [BUGFIX] Read values of `action` helper parameters
<add>- [#12793](https://github.com/emberjs/ember.js/pull/12793) [BUGFIX] Remove jQuery version assertion.
<ide> - [#12659](https://github.com/emberjs/ember.js/pull/12659) [BUGFIX] Update HTMLBars to 0.14.7.
<ide> - [#12666](https://github.com/emberjs/ember.js/pull/12666) [BUGFIX] Prevent triggering V8 memory leak bug through registry / resolver access.
<ide> - [#12677](https://github.com/emberjs/ember.js/pull/12677) [BUGFIX] Remove FastBoot monkeypatches.
<ide> - [#12705](https://github.com/emberjs/ember.js/pull/12705) [BUGFIX] Fix FastBoot URL parsing crash.
<ide> - [#12728](https://github.com/emberjs/ember.js/pull/12728) [BUGFIX] Fix incorrect export for `Ember.computed.collect`.
<ide> - [#12731](https://github.com/emberjs/ember.js/pull/12731) [BUGFIX] Ensure `container` can still be provided to `.create` (prevents an error and provides a helpful deprecation).
<del>
<del>### v2.3.0-beta.2 (November 29, 2015)
<del>
<ide> - [#12626](https://github.com/emberjs/ember.js/pull/12626) [BUGFIX] Fix "rest" style positional params in contextual components when using dot syntax.
<ide> - [#12627](https://github.com/emberjs/ember.js/pull/12627) [CLEANUP] Remove unused `ENV` flags.
<ide> * `Ember.ENV.ENABLE_ALL_FEATURES` is removed (wasn't functionally different than `Ember.ENV.ENABLE_OPTIONAL_FEATURES`).
<ide> - [#12628](https://github.com/emberjs/ember.js/pull/12628) [BUGFIX] Fix processing arguments in rerender for contextual components.
<ide> - [#12629](https://github.com/emberjs/ember.js/pull/12629) [BUGFIX] Expose `ownerInjection` method on `ContainerProxy`.
<ide> - [#12636](https://github.com/emberjs/ember.js/pull/12636) [BUGFIX] Ensure `Ember.Mixin.prototype.toString` is defined (prevents issues with `Object.seal(Ember.Mixin.prototype)` in debug builds.
<del>
<del>### v2.3.0-beta.1 (November 16, 2015)
<del>
<ide> - [#12532](https://github.com/emberjs/ember.js/pull/12532) Bump RSVP dependency from 3.0.6 to 3.1.0.
<ide> - [#12422](https://github.com/emberjs/ember.js/pull/12422) / [#12495](https://github.com/emberjs/ember.js/pull/12495) / [#12517](https://github.com/emberjs/ember.js/pull/12517) / [#12561](https://github.com/emberjs/ember.js/pull/12561) / [#12542](https://github.com/emberjs/ember.js/pull/12542) / [#12570](https://github.com/emberjs/ember.js/pull/12570) [FEATURE ember-contextual-components]
<ide> - [#11874](https://github.com/emberjs/ember.js/pull/11874) / [#12562](https://github.com/emberjs/ember.js/pull/12562) / [#12557](https://github.com/emberjs/ember.js/pull/12557) / [#12578](https://github.com/emberjs/ember.js/pull/12578) / [#12599](https://github.com/emberjs/ember.js/pull/12599) / [#12570](https://github.com/emberjs/ember.js/pull/12570) / [#12604](https://github.com/emberjs/ember.js/pull/12604) / [#12609](https://github.com/emberjs/ember.js/pull/12609) [FEATURE ember-container-inject-owner] | 1 |
Text | Text | explain controller specific assets more thoroughly | 86c5cea9f414d34fd92adb064fde5ecc7b40c727 | <ide><path>guides/source/asset_pipeline.md
<ide> Assets can still be placed in the `public` hierarchy. Any assets under `public`
<ide>
<ide> In production, Rails precompiles these files to `public/assets` by default. The precompiled copies are then served as static assets by the web server. The files in `app/assets` are never served directly in production.
<ide>
<add>### Controller Specific Assets
<add>
<ide> When you generate a scaffold or a controller, Rails also generates a JavaScript file (or CoffeeScript file if the `coffee-rails` gem is in the `Gemfile`) and a Cascading Style Sheet file (or SCSS file if `sass-rails` is in the `Gemfile`) for that controller.
<ide>
<del>For example, if you generate a `ProjectsController`, Rails will also add a new file at `app/assets/javascripts/projects.js.coffee` and another at `app/assets/stylesheets/projects.css.scss`. You should put any JavaScript or CSS unique to a controller inside their respective asset files, as these files can then be loaded just for these controllers with lines such as `<%= javascript_include_tag params[:controller] %>` or `<%= stylesheet_link_tag params[:controller] %>`. Note that you have to set `config.assets.precompile` in `config/environments/production.rb` if you want to precomepile them and use in production mode. You can append them one by one or do something like this:
<add>For example, if you generate a `ProjectsController`, Rails will also add a new file at `app/assets/javascripts/projects.js.coffee` and another at `app/assets/stylesheets/projects.css.scss`. By default these files will be ready to use by your application immediately using the `require_tree` directive. See [Manifest Files and Directives](#manifest-files-and-directives) for more details on require_tree.
<ide>
<del> # config/environments/production.rb
<del> config.assets.precompile << Proc.new { |path|
<del> if path =~ /\.(css|js)\z/
<del> full_path = Rails.application.assets.resolve(path).to_path
<del> app_assets_path = Rails.root.join('app', 'assets').to_path
<del> if full_path.starts_with? app_assets_path
<del> puts "including asset: " + full_path
<del> true
<del> else
<del> puts "excluding asset: " + full_path
<del> false
<del> end
<del> else
<del> false
<del> end
<del> }
<add>You can also opt to include controller specific stylesheets and javascripts only in the controllers they belong to using the following: `<%= javascript_include_tag params[:controller] %>` or `<%= stylesheet_link_tag params[:controller] %>`. Ensure that you are not using the `require_tree` directive though, as this will result in your assets being included more than once.
<add>
<add>WARNING: When using asset precompiliation (the production default) you will need to ensure that your controller assets will be precompiled when loading them on a per page basis. By default .coffee and .scss files will not be precompiled on their own. This will result in false positives during development as these files will work just fine since assets will be compiled on the fly. When running in production however, you will see 500 errors since live compiliation is turned off by default. See [Precompiling Assets](#precompiling-assets) for more information on how precompiling works.
<ide>
<ide> NOTE: You must have an [ExecJS](https://github.com/sstephenson/execjs#readme) supported runtime in order to use CoffeeScript. If you are using Mac OS X or Windows you have a JavaScript runtime installed in your operating system. Check [ExecJS](https://github.com/sstephenson/execjs#readme) documentation to know all supported JavaScript runtimes.
<ide>
<add>You can also disable the generation of asset files when generating a controller by adding the following to your `config/application.rb` configuration:
<add>
<add> config.generators do |g|
<add> g.assets false
<add> end
<add>
<ide> ### Asset Organization
<ide>
<ide> Pipeline assets can be placed inside an application in one of three locations: `app/assets`, `lib/assets` or `vendor/assets`.
<ide> If you have other manifests or individual stylesheets and JavaScript files to in
<ide> config.assets.precompile += ['admin.js', 'admin.css', 'swfObject.js']
<ide> ```
<ide>
<add>Or you can opt to precompile all assets with something like this:
<add>
<add> # config/environments/production.rb
<add> config.assets.precompile << Proc.new { |path|
<add> if path =~ /\.(css|js)\z/
<add> full_path = Rails.application.assets.resolve(path).to_path
<add> app_assets_path = Rails.root.join('app', 'assets').to_path
<add> if full_path.starts_with? app_assets_path
<add> puts "including asset: " + full_path
<add> true
<add> else
<add> puts "excluding asset: " + full_path
<add> false
<add> end
<add> else
<add> false
<add> end
<add> }
<add>
<add>
<ide> NOTE. Always specify an expected compiled filename that ends with js or css, even if you want to add Sass or CoffeeScript files to the precompile array.
<ide>
<ide> The rake task also generates a `manifest.yml` that contains a list with all your assets and their respective fingerprints. This is used by the Rails helper methods to avoid handing the mapping requests back to Sprockets. A typical manifest file looks like: | 1 |
Go | Go | fix network with -b none | 3cb14df68c1a59981907fec3bccab80a1d0dda59 | <ide><path>daemon/container_linux.go
<ide> func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.
<ide> }
<ide>
<ide> func populateCommand(c *Container, env []string) error {
<del> en := &execdriver.Network{
<del> NamespacePath: c.NetworkSettings.SandboxKey,
<del> }
<add> var en *execdriver.Network
<add> if !c.daemon.config.DisableNetwork {
<add> en = &execdriver.Network{
<add> NamespacePath: c.NetworkSettings.SandboxKey,
<add> }
<ide>
<del> parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
<del> if parts[0] == "container" {
<del> nc, err := c.getNetworkedContainer()
<del> if err != nil {
<del> return err
<add> parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
<add> if parts[0] == "container" {
<add> nc, err := c.getNetworkedContainer()
<add> if err != nil {
<add> return err
<add> }
<add> en.ContainerID = nc.ID
<ide> }
<del> en.ContainerID = nc.ID
<ide> }
<ide>
<ide> ipc := &execdriver.Ipc{}
<ide> func (container *Container) getNetworkedContainer() (*Container, error) {
<ide> }
<ide>
<ide> func (container *Container) ReleaseNetwork() {
<del> if container.hostConfig.NetworkMode.IsContainer() {
<add> if container.hostConfig.NetworkMode.IsContainer() || container.daemon.config.DisableNetwork {
<ide> return
<ide> }
<ide>
<ide><path>daemon/execdriver/native/create.go
<ide> func generateIfaceName() (string, error) {
<ide> }
<ide>
<ide> func (d *driver) createNetwork(container *configs.Config, c *execdriver.Command) error {
<add> if c.Network == nil {
<add> return nil
<add> }
<ide> if c.Network.ContainerID != "" {
<ide> d.Lock()
<ide> active := d.activeContainers[c.Network.ContainerID]
<ide><path>integration-cli/docker_cli_daemon_test.go
<ide> func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) {
<ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
<ide> c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, check.Commentf("Something mounted from older daemon start: %s", mountOut))
<ide> }
<add>
<add>func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) {
<add> c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil)
<add>
<add> out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l")
<add> c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
<add> c.Assert(strings.Contains(out, "eth0"), check.Equals, false,
<add> check.Commentf("There shouldn't be eth0 in container when network is disabled: %s", out))
<add>} | 3 |
Text | Text | update advanced node and express text | b81dea1e827a290bd035c9f5f44e1b195d0098ab | <ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/authentication-strategies.md
<ide> dashedName: authentication-strategies
<ide>
<ide> A strategy is a way of authenticating a user. You can use a strategy for allowing users to authenticate based on locally saved information (if you have them register first) or from a variety of providers such as Google or GitHub. For this project, we will use Passport middleware. Passport provides a comprehensive set of strategies that support authentication using a username and password, GitHub, Google, and more.
<ide>
<del>Add `passport-local@~1.0.0` as a dependency and add it to your server as follows: `const LocalStrategy = require('passport-local');`
<add>`passport-local@~1.0.0` has already been added as a dependency, so add it to your server as follows: `const LocalStrategy = require('passport-local');`
<ide>
<ide> Now you will have to tell passport to **use** an instantiated LocalStrategy object with a few settings defined. Make sure this (as well as everything from this point on) is encapsulated in the database connection since it relies on it!
<ide>
<ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/authentication-with-socket.io.md
<ide> dashedName: authentication-with-socket-io
<ide>
<ide> Currently, you cannot determine who is connected to your web socket. While `req.user` contains the user object, that's only when your user interacts with the web server, and with web sockets you have no `req` (request) and therefore no user data. One way to solve the problem of knowing who is connected to your web socket is by parsing and decoding the cookie that contains the passport session then deserializing it to obtain the user object. Luckily, there is a package on NPM just for this that turns a once complex task into something simple!
<ide>
<del>Add `passport.socketio@~3.7.0`, `connect-mongo@~3.2.0`, and `cookie-parser@~1.4.5` as dependencies and require them as `passportSocketIo`, `MongoStore`, and `cookieParser` respectively. Also, we need to initialize a new memory store, from `express-session` which we previously required. It should look like this:
<add>`passport.socketio@~3.7.0`, `connect-mongo@~3.2.0`, and `cookie-parser@~1.4.5` have already been added as dependencies. Require them as `passportSocketIo`, `MongoStore`, and `cookieParser` respectively. Also, we need to initialize a new memory store, from `express-session` which we previously required. It should look like this:
<ide>
<ide> ```js
<ide> const MongoStore = require('connect-mongo')(session);
<ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/hashing-your-passwords.md
<ide> dashedName: hashing-your-passwords
<ide>
<ide> Going back to the information security section, you may remember that storing plaintext passwords is *never* okay. Now it is time to implement BCrypt to solve this issue.
<ide>
<del>Add `bcrypt@~5.0.0` as a dependency, and require it in your server. You will need to handle hashing in 2 key areas: where you handle registering/saving a new account, and when you check to see that a password is correct on login.
<add>`bcrypt@~5.0.0` has already been added as a dependency, so require it in your server. You will need to handle hashing in 2 key areas: where you handle registering/saving a new account, and when you check to see that a password is correct on login.
<ide>
<ide> Currently on our registration route, you insert a user's password into the database like so: `password: req.body.password`. An easy way to implement saving a hash instead is to add the following before your database logic `const hash = bcrypt.hashSync(req.body.password, 12);`, and replacing the `req.body.password` in the database saving with just `password: hash`.
<ide>
<ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/implementation-of-social-authentication-ii.md
<ide> dashedName: implementation-of-social-authentication-ii
<ide>
<ide> # --description--
<ide>
<del>The last part of setting up your GitHub authentication is to create the strategy itself. For this, you will need to add the dependency of `passport-github@~1.1.0` to your project and require it in your `auth.js` as `GithubStrategy` like this: `const GitHubStrategy = require('passport-github').Strategy;`. Do not forget to require and configure `dotenv` to use your environment variables.
<add>The last part of setting up your GitHub authentication is to create the strategy itself. `passport-github@~1.1.0` has already been added as a dependency, so require it in your `auth.js` file as `GithubStrategy` like this: `const GitHubStrategy = require('passport-github').Strategy;`. Do not forget to require and configure `dotenv` to use your environment variables.
<ide>
<ide> To set up the GitHub strategy, you have to tell Passport to use an instantiated `GitHubStrategy`, which accepts 2 arguments: an object (containing `clientID`, `clientSecret`, and `callbackURL`) and a function to be called when a user is successfully authenticated, which will determine if the user is new and what fields to save initially in the user's database object. This is common across many strategies, but some may require more information as outlined in that specific strategy's GitHub README. For example, Google requires a *scope* as well which determines what kind of information your request is asking to be returned and asks the user to approve such access.
<ide>
<ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/serialization-of-a-user-object.md
<ide> Serialization and deserialization are important concepts in regards to authentic
<ide>
<ide> To set this up properly, we need to have a serialize function and a deserialize function. In Passport, we create these with `passport.serializeUser( OURFUNCTION )` and `passport.deserializeUser( OURFUNCTION )`
<ide>
<del>The `serializeUser` is called with 2 arguments, the full user object and a callback used by passport. A unique key to identify that user should be returned in the callback, the easiest one to use being the user's `_id` in the object. It should be unique as it is generated by MongoDB. Similarly, `deserializeUser` is called with that key and a callback function for passport as well, but, this time, we have to take that key and return the full user object to the callback. To make a query search for a Mongo `_id`, you will have to create `const ObjectID = require('mongodb').ObjectID;`, and then to use it you call `new ObjectID(THE_ID)`. Be sure to add `mongodb@~3.6.0` as a dependency. You can see this in the examples below:
<add>The `serializeUser` is called with 2 arguments, the full user object and a callback used by passport. A unique key to identify that user should be returned in the callback, the easiest one to use being the user's `_id` in the object. It should be unique as it is generated by MongoDB. Similarly, `deserializeUser` is called with that key and a callback function for passport as well, but, this time, we have to take that key and return the full user object to the callback. To make a query search for a Mongo `_id`, you will have to create `const ObjectID = require('mongodb').ObjectID;`, and then to use it you call `new ObjectID(THE_ID)`. `mongodb@~3.6.0` has already been added as a dependency. You can see this in the examples below:
<ide>
<ide> ```js
<ide> passport.serializeUser((user, done) => {
<ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/set-up-a-template-engine.md
<ide> When you are done, make sure a working demo of your project is hosted somewhere
<ide>
<ide> A template engine enables you to use static template files (such as those written in *Pug*) in your app. At runtime, the template engine replaces variables in a template file with actual values which can be supplied by your server. Then it transforms the template into a static HTML file that is sent to the client. This approach makes it easier to design an HTML page and allows for displaying variables on the page without needing to make an API call from the client.
<ide>
<del>Add `pug@~3.0.0` as a dependency in your `package.json` file.
<add>`pug@~3.0.0` has already been installed, and is listed as a dependency in your `package.json` file.
<ide>
<ide> Express needs to know which template engine you are using. We will use the `set` method to assign `pug` as the `view engine` property's value: `app.set('view engine', 'pug')`
<ide>
<ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/set-up-passport.md
<ide> dashedName: set-up-passport
<ide>
<ide> # --description--
<ide>
<del>It's time to set up *Passport* so we can finally start allowing a user to register or login to an account! In addition to Passport, we will use Express-session to handle sessions. Using this middleware saves the session id as a cookie in the client and allows us to access the session data using that id on the server. This way we keep personal account information out of the cookie used by the client to verify to our server they are authenticated and just keep the *key* to access the data stored on the server.
<add>It's time to set up *Passport* so we can finally start allowing a user to register or login to an account! In addition to Passport, we will use Express-session to handle sessions. Express-session has a ton of advanced features you can use, but for now we're just going to use the basics! Using this middleware saves the session id as a cookie in the client and allows us to access the session data using that id on the server. This way we keep personal account information out of the cookie used by the client to verify to our server they are authenticated and just keep the *key* to access the data stored on the server.
<ide>
<del>To set up Passport for use in your project, you will need to add it as a dependency first in your package.json. `passport@~0.4.1`
<del>
<del>In addition, add Express-session as a dependency now as well. Express-session has a ton of advanced features you can use but for now we're just going to use the basics! `express-session@~1.17.1`
<add>`passport@~0.4.1` and `express-session@~1.17.1` are already installed, and are both listed as dependencies in your `package.json` file.
<ide>
<ide> You will need to set up the session settings now and initialize Passport. Be sure to first create the variables 'session' and 'passport' to require 'express-session' and 'passport' respectively.
<ide>
<ide><path>curriculum/challenges/english/06-quality-assurance/advanced-node-and-express/set-up-the-environment.md
<ide> dashedName: set-up-the-environment
<ide>
<ide> The following challenges will make use of the `chat.pug` file. So, in your `routes.js` file, add a GET route pointing to `/chat` which makes use of `ensureAuthenticated`, and renders `chat.pug`, with `{ user: req.user }` passed as an argument to the response. Now, alter your existing `/auth/github/callback` route to set the `req.session.user_id = req.user.id`, and redirect to `/chat`.
<ide>
<del>Add `socket.io@~2.3.0` as a dependency and require/instantiate it in your server defined as follows, with `http` (comes built-in with Nodejs):
<add>`socket.io@~2.3.0` has already been added as a dependency, so require/instantiate it in your server as follows with `http` (comes built-in with Nodejs):
<ide>
<ide> ```javascript
<ide> const http = require('http').createServer(app); | 8 |
Text | Text | add steps to contibuting.md | fbdd0ab71873374955e86fae859f882916c09b94 | <ide><path>CONTRIBUTING.md
<ide>
<ide> We want to make contributing to this project as easy and transparent as
<ide> possible. Hopefully this document makes the process for contributing clear and
<del>answers any questions you may have. If not, feel free to open an [Issue](https://github.com/facebook/immutable-js/issues).
<add>answers any questions you may have. If not, feel free to open an
<add>[Issue](https://github.com/facebook/immutable-js/issues).
<ide>
<ide> ## Pull Requests
<ide>
<ide> All active development of Immutable JS happens on GitHub. We actively welcome
<ide> your [pull requests](https://help.github.com/articles/creating-a-pull-request).
<ide>
<ide> 1. Fork the repo and create your branch from `master`.
<del> 2. If you've added code, add tests.
<del> 3. If you've changed APIs, update the documentation.
<del> 4. Ensure all tests pass. (`grunt test`)
<del> 5. Make sure your code passes lint. (`grunt lint`)
<del> 6. If you haven't already, complete the Contributor License Agreement ("CLA").
<add> 2. Install all dependencies. (`npm install`)
<add> 3. Install the grunt CLI tools. (`npm install -g grunt-cli`)
<add> 4. If you've added code, add tests.
<add> 5. If you've changed APIs, update the documentation.
<add> 6. Ensure all tests pass. (`grunt test`)
<add> 7. Make sure your code passes lint. (`grunt lint`)
<add> 8. Be sure to commit the generated JS in `/dist`.
<add> 8. If you haven't already, complete the Contributor License Agreement ("CLA").
<ide>
<ide> ## Contributor License Agreement ("CLA")
<ide>
<ide> outlined on that page and do not file a public issue.
<ide> * Trailing commas,
<ide> * Avd abbr wrds.
<ide>
<del>
<ide> ## License
<ide>
<ide> By contributing to Immutable JS, you agree that your contributions will be | 1 |
Go | Go | run testimportextremelylargeimageworks in parallel | ae3ca7bb9768c8136db373ccad95142168dae287 | <ide><path>integration/image/import_test.go
<ide> import (
<ide> "testing"
<ide>
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/internal/test/daemon"
<ide> "github.com/docker/docker/internal/testutil"
<ide> "gotest.tools/skip"
<ide> )
<ide>
<ide> // Ensure we don't regress on CVE-2017-14992.
<ide> func TestImportExtremelyLargeImageWorks(t *testing.T) {
<add> skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
<ide> skip.If(t, runtime.GOARCH == "arm64", "effective test will be time out")
<ide> skip.If(t, testEnv.OSType == "windows", "TODO enable on windows")
<add> t.Parallel()
<ide>
<del> defer setupTest(t)()
<del> client := testEnv.APIClient()
<add> // Spin up a new daemon, so that we can run this test in parallel (it's a slow test)
<add> d := daemon.New(t)
<add> d.Start(t)
<add> defer d.Stop(t)
<add>
<add> client := d.NewClientT(t)
<ide>
<ide> // Construct an empty tar archive with about 8GB of junk padding at the
<ide> // end. This should not cause any crashes (the padding should be mostly | 1 |
Javascript | Javascript | simplify defineproperty to match what mixin does | ac900848c109addd2429b7f8d8ca5f08cb78a2a3 | <ide><path>packages/ember-metal/lib/properties.js
<ide> var Descriptor = Ember.Descriptor = function() {};
<ide> // DEFINING PROPERTIES API
<ide> //
<ide>
<del>/** @private */
<del>function hasDesc(descs, keyName) {
<del> if (keyName === 'toString') return 'function' !== typeof descs.toString;
<del> else return !!descs[keyName];
<del>}
<del>
<ide> /**
<ide> @private
<ide>
<ide> function hasDesc(descs, keyName) {
<ide> }).property('firstName', 'lastName').cacheable());
<ide> */
<ide> Ember.defineProperty = function(obj, keyName, desc, val) {
<del> var meta = obj[META_KEY] || EMPTY_META,
<del> descs = meta && meta.descs,
<del> native = keyName in {},
<del> watching = !native && meta.watching[keyName],
<del> descriptor = desc instanceof Ember.Descriptor;
<del>
<del> var existingDesc = hasDesc(descs, keyName);
<del>
<del> if (val === undefined && descriptor) {
<add> var meta = metaFor(obj),
<add> descs = meta.descs,
<add> existingDesc = meta.descs[keyName];
<ide>
<del> if (existingDesc) { val = descs[keyName].teardown(obj, keyName); }
<del> else { val = obj[keyName]; }
<del>
<del> } else if (existingDesc) {
<del> // otherwise, tear down the descriptor, but use the provided
<del> // value as the new value instead of the descriptor's current
<del> // value.
<del> descs[keyName].teardown(obj, keyName);
<add> if (existingDesc instanceof Ember.Descriptor) {
<add> existingDesc.teardown(obj, keyName);
<ide> }
<ide>
<del> if (descriptor) {
<del> meta = metaFor(obj);
<del> descs = meta.descs;
<del>
<add> if (desc instanceof Ember.Descriptor) {
<ide> descs[keyName] = desc;
<del> obj[keyName] = val;
<del> desc.setup(obj, keyName, val);
<add> obj[keyName] = undefined; // make enumerable
<add> desc.setup(obj, keyName);
<ide> } else {
<del> if (!native && descs[keyName]) { metaFor(obj).descs[keyName] = null; }
<del>
<add> descs[keyName] = undefined; // shadow descriptor in proto
<ide> if (desc == null) {
<del> if (existingDesc) {
<del> objectDefineProperty(obj, keyName, {
<del> enumerable: true,
<del> configurable: true,
<del> writable: true,
<del> value: undefined
<del> });
<del> }
<ide> obj[keyName] = val;
<ide> } else {
<ide> // compatibility with ES5
<ide> Ember.defineProperty = function(obj, keyName, desc, val) {
<ide>
<ide> // if key is being watched, override chains that
<ide> // were initialized with the prototype
<del> if (watching) { Ember.overrideChains(obj, keyName, meta); }
<add> if (meta.watching[keyName]) { Ember.overrideChains(obj, keyName, meta); }
<ide>
<ide> return this;
<ide> }; | 1 |
Text | Text | add adversarial text to main readme | 94924d5d453c431193cf8dea06bba0f6954e6288 | <ide><path>README.md
<ide> running TensorFlow 0.12 or earlier, please
<ide>
<ide>
<ide> ## Models
<add>- [adversarial_text](adversarial_text): semi-supervised sequence learning with
<add> adversarial training.
<ide> - [autoencoder](autoencoder): various autoencoders.
<ide> - [compression](compression): compressing and decompressing images using a pre-trained Residual GRU network.
<ide> - [differential_privacy](differential_privacy): privacy-preserving student models from multiple teachers. | 1 |
Python | Python | add more examples to bert models for torchhub | cf44d9839202d4d67cdc66fbb46162904587409f | <ide><path>hubconfs/bert_hubconf.py
<ide> def bertTokenizer(*args, **kwargs):
<ide>
<ide> Example:
<ide> >>> sentence = 'Hello, World!'
<del> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False, force_reload=False)
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<ide> >>> toks = tokenizer.tokenize(sentence)
<ide> ['Hello', '##,', 'World', '##!']
<ide> >>> ids = tokenizer.convert_tokens_to_ids(toks)
<ide> def bertModel(*args, **kwargs):
<ide>
<ide> Example:
<ide> # Load the tokenizer
<del> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False, force_reload=False)
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<ide> # Prepare tokenized input
<ide> >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
<ide> >>> tokenized_text = tokenizer.tokenize(text)
<ide> def bertModel(*args, **kwargs):
<ide> >>> segments_tensors = torch.tensor([segments_ids])
<ide> tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]])
<ide> # Load bertModel
<del> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertModel', 'bert-base-cased', force_reload=False)
<add> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertModel', 'bert-base-cased')
<ide> >>> model.eval()
<ide> # Predict hidden states features for each layer
<ide> >>> with torch.no_grad():
<ide> def bertForNextSentencePrediction(*args, **kwargs):
<ide> BERT model with next sentence prediction head.
<ide> This module comprises the BERT model followed by the next sentence
<ide> classification head.
<add>
<add> Example:
<add> # Load the tokenizer
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<add> # Prepare tokenized input
<add> >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
<add> >>> tokenized_text = tokenizer.tokenize(text)
<add> >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
<add> >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
<add> >>> tokens_tensor = torch.tensor([indexed_tokens])
<add> >>> segments_tensors = torch.tensor([segments_ids])
<add> # Load bertForNextSentencePrediction
<add> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForNextSentencePrediction', 'bert-base-cased')
<add> >>> model.eval()
<add> # Predict the next sentence classification logits
<add> >>> with torch.no_grad():
<add> next_sent_classif_logits = model(tokens_tensor, segments_tensors)
<ide> """
<ide> model = BertForNextSentencePrediction.from_pretrained(*args, **kwargs)
<ide> return model
<ide> def bertForMaskedLM(*args, **kwargs):
<ide>
<ide> Example:
<ide> # Load the tokenizer
<del> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False, force_reload=False)
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<ide> # Prepare tokenized input
<ide> >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
<ide> >>> tokenized_text = tokenizer.tokenize(text)
<ide> def bertForMaskedLM(*args, **kwargs):
<ide> >>> tokens_tensor = torch.tensor([indexed_tokens])
<ide> >>> segments_tensors = torch.tensor([segments_ids])
<ide> # Load bertForMaskedLM
<del> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForMaskedLM', 'bert-base-cased', force_reload=False)
<add> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForMaskedLM', 'bert-base-cased')
<ide> >>> model.eval()
<ide> # Predict all tokens
<ide> >>> with torch.no_grad():
<ide> def bertForSequenceClassification(*args, **kwargs):
<ide> num_labels: the number (>=2) of classes for the classifier.
<ide>
<ide> Example:
<del> >>> torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForSequenceClassification', 'bert-base-cased', num_labels=2, force_reload=True)
<add> # Load the tokenizer
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<add> # Prepare tokenized input
<add> >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
<add> >>> tokenized_text = tokenizer.tokenize(text)
<add> >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
<add> >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
<add> >>> tokens_tensor = torch.tensor([indexed_tokens])
<add> >>> segments_tensors = torch.tensor([segments_ids])
<add> # Load bertForSequenceClassification
<add> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForSequenceClassification', 'bert-base-cased', num_labels=2)
<add> >>> model.eval()
<add> # Predict the sequence classification logits
<add> >>> with torch.no_grad():
<add> seq_classif_logits = model(tokens_tensor, segments_tensors)
<add> # Or get the sequence classification loss
<add> >>> labels = torch.tensor([1])
<add> >>> with torch.no_grad():
<add> seq_classif_loss = model(tokens_tensor, segments_tensors, labels=labels)
<ide> """
<ide> model = BertForSequenceClassification.from_pretrained(*args, **kwargs)
<ide> return model
<ide> def bertForMultipleChoice(*args, **kwargs):
<ide> num_choices: the number (>=2) of classes for the classifier.
<ide>
<ide> Example:
<del> >>> torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForMultipleChoice', 'bert-base-cased', num_choices=2, force_reload=True)
<add> # Load the tokenizer
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<add> # Prepare tokenized input
<add> >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
<add> >>> tokenized_text = tokenizer.tokenize(text)
<add> >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
<add> >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
<add> >>> tokens_tensor = torch.tensor([indexed_tokens, indexed_tokens]).unsqueeze(0)
<add> >>> segments_tensors = torch.tensor([segments_ids, segments_ids]).unsqueeze(0)
<add> # Load bertForMultipleChoice
<add> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForMultipleChoice', 'bert-base-cased', num_choices=2)
<add> >>> model.eval()
<add> # Predict the multiple choice logits
<add> >>> with torch.no_grad():
<add> multiple_choice_logits = model(tokens_tensor, segments_tensors)
<add> # Or get the multiple choice loss
<add> >>> labels = torch.tensor([1])
<add> >>> with torch.no_grad():
<add> multiple_choice_loss = model(tokens_tensor, segments_tensors, labels=labels)
<ide> """
<ide> model = BertForMultipleChoice.from_pretrained(*args, **kwargs)
<ide> return model
<ide> def bertForQuestionAnswering(*args, **kwargs):
<ide> BertForQuestionAnswering is a fine-tuning model that includes BertModel
<ide> with a token-level classifiers on top of the full sequence of last hidden
<ide> states.
<add>
<add> Example:
<add> # Load the tokenizer
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<add> # Prepare tokenized input
<add> >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
<add> >>> tokenized_text = tokenizer.tokenize(text)
<add> >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
<add> >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
<add> >>> tokens_tensor = torch.tensor([indexed_tokens])
<add> >>> segments_tensors = torch.tensor([segments_ids])
<add> # Load bertForQuestionAnswering
<add> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForQuestionAnswering', 'bert-base-cased')
<add> >>> model.eval()
<add> # Predict the start and end positions logits
<add> >>> with torch.no_grad():
<add> start_logits, end_logits = model(tokens_tensor, segments_tensors)
<add> # Or get the total loss which is the sum of the CrossEntropy loss for the start and end token positions
<add> >>> start_positions, end_positions = torch.tensor([12]), torch.tensor([14])
<add> >>> with torch.no_grad():
<add> multiple_choice_loss = model(tokens_tensor, segments_tensors, start_positions=start_positions, end_positions=end_positions)
<ide> """
<ide> model = BertForQuestionAnswering.from_pretrained(*args, **kwargs)
<ide> return model
<ide> def bertForTokenClassification(*args, **kwargs):
<ide> num_labels: the number (>=2) of classes for the classifier.
<ide>
<ide> Example:
<del> >>> torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForTokenClassification', 'bert-base-cased', num_labels=2, force_reload=True)
<add> # Load the tokenizer
<add> >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False)
<add> # Prepare tokenized input
<add> >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
<add> >>> tokenized_text = tokenizer.tokenize(text)
<add> >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
<add> >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
<add> >>> tokens_tensor = torch.tensor([indexed_tokens])
<add> >>> segments_tensors = torch.tensor([segments_ids])
<add> # Load bertForTokenClassification
<add> >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForTokenClassification', 'bert-base-cased', num_labels=2)
<add> >>> model.eval()
<add> # Predict the token classification logits
<add> >>> with torch.no_grad():
<add> classif_logits = model(tokens_tensor, segments_tensors)
<add> # Or get the token classification loss
<add> >>> labels = torch.tensor([[0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0]])
<add> >>> with torch.no_grad():
<add> classif_loss = model(tokens_tensor, segments_tensors, labels=labels)
<ide> """
<ide> model = BertForTokenClassification.from_pretrained(*args, **kwargs)
<ide> return model | 1 |
Javascript | Javascript | fix empty children handling for revalue | ab525c4784335113a9c60f6236a9ddc2a4c63af4 | <ide><path>d3.layout.js
<ide> d3.layout.hierarchy = function() {
<ide> // Also converts the data representation into a standard hierarchy structure.
<ide> function recurse(data, depth, nodes) {
<ide> var childs = children.call(hierarchy, data, depth),
<del> n,
<ide> node = d3_layout_hierarchyInline ? data : {data: data};
<ide> node.depth = depth;
<ide> nodes.push(node);
<ide> if (childs && (n = childs.length)) {
<ide> var i = -1,
<add> n,
<ide> c = node.children = [],
<ide> v = 0,
<ide> j = depth + 1;
<ide> d3.layout.hierarchy = function() {
<ide> function revalue(node, depth) {
<ide> var children = node.children,
<ide> v = 0;
<del> if (children) {
<add> if (children && (n = children.length)) {
<ide> var i = -1,
<del> n = children.length,
<add> n,
<ide> j = depth + 1;
<ide> while (++i < n) v += revalue(children[i], j);
<ide> } else if (value) {
<ide> d3.layout.hierarchy = function() {
<ide> return root;
<ide> };
<ide>
<add> // If the new API is used, enabling inlining.
<add> hierarchy.nodes = function(d) {
<add> d3_layout_hierarchyInline = true;
<add> return (hierarchy.nodes = hierarchy)(d);
<add> };
<add>
<ide> return hierarchy;
<ide> };
<ide>
<ide> d3.layout.cluster = function() {
<ide>
<ide> // First walk, computing the initial x & y values.
<ide> d3_layout_treeVisitAfter(root, function(node) {
<del> if (node.children) {
<del> node.x = d3_layout_clusterX(node.children);
<del> node.y = d3_layout_clusterY(node.children);
<add> var children = node.children;
<add> if (children && children.length) {
<add> node.x = d3_layout_clusterX(children);
<add> node.y = d3_layout_clusterY(children);
<ide> } else {
<ide> node.x = previousNode ? x += separation(node, previousNode) : 0;
<ide> node.y = 0;
<ide> function d3_layout_clusterX(children) {
<ide>
<ide> function d3_layout_clusterLeft(node) {
<ide> var children = node.children;
<del> return children ? d3_layout_clusterLeft(children[0]) : node;
<add> return children && children.length
<add> ? d3_layout_clusterLeft(children[0]) : node;
<ide> }
<ide>
<ide> function d3_layout_clusterRight(node) {
<del> var children = node.children;
<del> return children ? d3_layout_clusterRight(children[children.length - 1]) : node;
<add> var children = node.children,
<add> n;
<add> return children && (n = children.length)
<add> ? d3_layout_clusterRight(children[n - 1]) : node;
<ide> }
<ide> // Node-link tree diagram using the Reingold-Tilford "tidy" algorithm
<ide> d3.layout.tree = function() {
<ide><path>d3.layout.min.js
<del>(function(){function bc(a,b){var c=a.x+b[3],d=a.y+b[0],e=a.dx-b[1]-b[3],f=a.dy-b[0]-b[2];e<0&&(c+=e/2,e=0),f<0&&(d+=f/2,f=0);return{x:c,y:d,dx:e,dy:f}}function bb(a){return{x:a.x,y:a.y,dx:a.dx,dy:a.dy}}function ba(a,b,c){return a._tree.ancestor.parent==b.parent?a._tree.ancestor:c}function _(a,b,c){a=a._tree,b=b._tree;var d=c/(b.number-a.number);a.change+=d,b.change-=d,b.shift+=c,b.prelim+=c,b.mod+=c}function $(a){var b=0,c=0,d=a.children,e=d.length,f;while(--e>=0)f=d[e]._tree,f.prelim+=b,f.mod+=b,b+=f.shift+(c+=f.change)}function Z(a,b){function c(a,d){var e=a.children;if(e){var f,g=null,h=-1,i=e.length;while(++h<i)f=e[h],c(f,g),g=f}b(a,d)}c(a,null)}function Y(a,b){return a.depth-b.depth}function X(a,b){return b.x-a.x}function W(a,b){return a.x-b.x}function V(a,b){var c=a.children;if(c){var d,e=c.length,f=-1;while(++f<e)b(d=V(c[f],b),a)>0&&(a=d)}return a}function U(a){return a.children?a.children[a.children.length-1]:a._tree.thread}function T(a){return a.children?a.children[0]:a._tree.thread}function S(a,b){return a.parent==b.parent?1:2}function R(a){var b=a.children;return b?R(b[b.length-1]):a}function Q(a){var b=a.children;return b?Q(b[0]):a}function P(a){return a.reduce(function(a,b){return a+b.x},0)/a.length}function O(a){return 1+d3.max(a,function(a){return a.y})}function N(a,b,c){var d=b.r+c.r,e=a.r+c.r,f=b.x-a.x,g=b.y-a.y,h=Math.sqrt(f*f+g*g),i=(e*e+h*h-d*d)/(2*e*h),j=Math.acos(i),k=i*e,l=Math.sin(j)*e;f/=h,g/=h,c.x=a.x+k*f+l*g,c.y=a.y+k*g-l*f}function M(a,b,c,d){var e=a.children;a.x=b+=d*a.x,a.y=c+=d*a.y,a.r*=d;if(e){var f=-1,g=e.length;while(++f<g)M(e[f],b,c,d)}}function L(a){var b=a.children;b?(b.forEach(L),a.r=I(b)):a.r=Math.sqrt(a.value)}function K(a){delete a._pack_next,delete a._pack_prev}function J(a){a._pack_next=a._pack_prev=a}function I(a){function l(a){b=Math.min(a.x-a.r,b),c=Math.max(a.x+a.r,c),d=Math.min(a.y-a.r,d),e=Math.max(a.y+a.r,e)}var b=Infinity,c=-Infinity,d=Infinity,e=-Infinity,f=a.length,g,h,i,j,k;a.forEach(J),g=a[0],g.x=-g.r,g.y=0,l(g);if(f>1){h=a[1],h.x=h.r,h.y=0,l(h);if(f>2){i=a[2],N(g,h,i),l(i),F(g,i),g._pack_prev=i,F(i,h),h=g._pack_next;for(var m=3;m<f;m++){N(g,h,i=a[m]);var n=0,o=1,p=1;for(j=h._pack_next;j!==h;j=j._pack_next,o++)if(H(j,i)){n=1;break}if(n==1)for(k=g._pack_prev;k!==j._pack_prev;k=k._pack_prev,p++)if(H(k,i)){p<o&&(n=-1,j=k);break}n==0?(F(g,i),h=i,l(i)):n>0?(G(g,j),h=j,m--):(G(j,h),g=j,m--)}}}var q=(b+c)/2,r=(d+e)/2,s=0;for(var m=0;m<f;m++){var t=a[m];t.x-=q,t.y-=r,s=Math.max(s,t.r+Math.sqrt(t.x*t.x+t.y*t.y))}a.forEach(K);return s}function H(a,b){var c=b.x-a.x,d=b.y-a.y,e=a.r+b.r;return e*e-c*c-d*d>.001}function G(a,b){a._pack_next=b,b._pack_prev=a}function F(a,b){var c=a._pack_next;a._pack_next=b,b._pack_prev=a,b._pack_next=c,c._pack_prev=b}function E(a,b){return a.value-b.value}function C(a){return d3.merge(a.map(function(a){return(a.children||[]).map(function(b){return{source:a,target:b}})}))}function B(a,b){return b.value-a.value}function A(a){return a.value}function z(a){return a.children}function y(a,b){a.sort=d3.rebind(a,b.sort),a.children=d3.rebind(a,b.children),a.links=C,a.value=d3.rebind(a,b.value),a.nodes=function(b){D=!0;return(a.nodes=a)(b)};return a}function x(a){return[d3.min(a),d3.max(a)]}function w(a,b){var c=-1,d=+a[0],e=(a[1]-d)/b,f=[];while(++c<=b)f[c]=e*c+d;return f}function v(a,b){return w(a,Math.ceil(Math.log(b.length)/Math.LN2+1))}function u(a,b){return a+b[1]}function t(a){return a.reduce(u,0)}function s(a){var b=1,c=0,d=a[0][1],e,f=a.length;for(;b<f;++b)(e=a[b][1])>d&&(c=b,d=e);return c}function p(a,b,c){a.y0=b,a.y=c}function o(a){return a.y}function n(a){return a.x}function m(a){return 1}function l(a){return 20}function k(a){var b=0,c=0;a.count=0;if(!a.leaf){var d=a.nodes,e=d.length,f=-1,g;while(++f<e){g=d[f];if(g==null)continue;k(g),a.count+=g.count,b+=g.count*g.cx,c+=g.count*g.cy}}a.point&&(a.leaf||(a.point.x+=Math.random()-.5,a.point.y+=Math.random()-.5),a.count++,b+=a.point.x,c+=a.point.y),a.cx=b/a.count,a.cy=c/a.count}function j(){f.px+=d3.event.dx,f.py+=d3.event.dy,e.resume()}function i(){j(),f.fixed&=1,e=f=null}function h(a){a!==f&&(a.fixed&=1)}function g(a){a.fixed|=2}function c(a,c){if(a===c)return a;var d=b(a),e=b(c),f=d.pop(),g=e.pop(),h=null;while(f===g)h=f,f=d.pop(),g=e.pop();return h}function b(a){var b=[],c=a.parent;while(c!=null)b.push(a),a=c,c=c.parent;b.push(a);return b}function a(a){var b=a.source,d=a.target,e=c(b,d),f=[b];while(b!==e)b=b.parent,f.push(b);var g=f.length;while(d!==e)f.splice(g,0,d),d=d.parent;return f}d3.layout={},d3.layout.bundle=function(){return function(b){var c=[],d=-1,e=b.length;while(++d<e)c.push(a(b[d]));return c}},d3.layout.chord=function(){function k(){b.sort(function(a,b){return i(a.target.value,b.target.value)})}function j(){var a={},j=[],l=d3.range(e),m=[],n,o,p,q,r;b=[],c=[],n=0,q=-1;while(++q<e){o=0,r=-1;while(++r<e)o+=d[q][r];j.push(o),m.push(d3.range(e)),n+=o}g&&l.sort(function(a,b){return g(j[a],j[b])}),h&&m.forEach(function(a,b){a.sort(function(a,c){return h(d[b][a],d[b][c])})}),n=(2*Math.PI-f*e)/n,o=0,q=-1;while(++q<e){p=o,r=-1;while(++r<e){var s=l[q],t=m[q][r],u=d[s][t];a[s+"-"+t]={index:s,subindex:t,startAngle:o,endAngle:o+=u*n,value:u}}c.push({index:s,startAngle:p,endAngle:o,value:(o-p)/n}),o+=f}q=-1;while(++q<e){r=q-1;while(++r<e){var v=a[q+"-"+r],w=a[r+"-"+q];(v.value||w.value)&&b.push(v.value<w.value?{source:w,target:v}:{source:v,target:w})}}i&&k()}var a={},b,c,d,e,f=0,g,h,i;a.matrix=function(f){if(!arguments.length)return d;e=(d=f)&&d.length,b=c=null;return a},a.padding=function(d){if(!arguments.length)return f;f=d,b=c=null;return a},a.sortGroups=function(d){if(!arguments.length)return g;g=d,b=c=null;return a},a.sortSubgroups=function(c){if(!arguments.length)return h;h=c,b=null;return a},a.sortChords=function(c){if(!arguments.length)return i;i=c,b&&k();return a},a.chords=function(){b||j();return b},a.groups=function(){c||j();return c};return a},d3.layout.force=function(){function B(b){g(f=b),e=a}function A(){var a=v.length,d=w.length,e,f,g,h,i,j,l,m,p;for(f=0;f<d;++f){g=w[f],h=g.source,i=g.target,m=i.x-h.x,p=i.y-h.y;if(j=m*m+p*p)j=n*y[f]*((j=Math.sqrt(j))-x[f])/j,m*=j,p*=j,i.x-=m*(l=h.weight/(i.weight+h.weight)),i.y-=p*l,h.x+=m*(l=1-l),h.y+=p*l}if(l=n*s){m=c[0]/2,p=c[1]/2,f=-1;if(l)while(++f<a)g=v[f],g.x+=(m-g.x)*l,g.y+=(p-g.y)*l}if(l=n*r){k(e=d3.geom.quadtree(v)),f=-1;while(++f<a)(g=v[f]).fixed||e.visit(z(g,l))}f=-1;while(++f<a)g=v[f],g.fixed?(g.x=g.px,g.y=g.py):(g.x-=(g.px-(g.px=g.x))*o,g.y-=(g.py-(g.py=g.y))*o);b.tick.dispatch({type:"tick",alpha:n});return(n*=.99)<.005}function z(a,b){return function(c,d,e,f,g){if(c.point!==a){var h=c.cx-a.x,i=c.cy-a.y,j=1/Math.sqrt(h*h+i*i);if((f-d)*j<t){var k=b*c.count*j*j;a.px-=h*k,a.py-=i*k;return!0}if(c.point&&isFinite(j)){var k=b*j*j;a.px-=h*k,a.py-=i*k}}}}var a={},b=d3.dispatch("tick"),c=[1,1],d,n,o=.9,p=l,q=m,r=-30,s=.1,t=.8,u,v=[],w=[],x,y;a.on=function(c,d){b[c].add(d);return a},a.nodes=function(b){if(!arguments.length)return v;v=b;return a},a.links=function(b){if(!arguments.length)return w;w=b;return a},a.size=function(b){if(!arguments.length)return c;c=b;return a},a.linkDistance=function(b){if(!arguments.length)return p;p=d3.functor(b);return a},a.distance=a.linkDistance,a.linkStrength=function(b){if(!arguments.length)return q;q=d3.functor(b);return a},a.friction=function(b){if(!arguments.length)return o;o=b;return a},a.charge=function(b){if(!arguments.length)return r;r=b;return a},a.gravity=function(b){if(!arguments.length)return s;s=b;return a},a.theta=function(b){if(!arguments.length)return t;t=b;return a},a.start=function(){function l(){if(!i){i=[];for(d=0;d<e;++d)i[d]=[];for(d=0;d<f;++d){var a=w[d];i[a.source.index].push(a.target),i[a.target.index].push(a.source)}}return i[b]}function k(a,c){var d=l(b),e=-1,f=d.length,g;while(++e<f)if(!isNaN(g=d[e][a]))return g;return Math.random()*c}var b,d,e=v.length,f=w.length,g=c[0],h=c[1],i,j;for(b=0;b<e;++b)(j=v[b]).index=b,j.weight=0;x=[],y=[];for(b=0;b<f;++b)j=w[b],typeof j.source=="number"&&(j.source=v[j.source]),typeof j.target=="number"&&(j.target=v[j.target]),x[b]=p.call(this,j,b),y[b]=q.call(this,j,b),++j.source.weight,++j.target.weight;for(b=0;b<e;++b)j=v[b],isNaN(j.x)&&(j.x=k("x",g)),isNaN(j.y)&&(j.y=k("y",h)),isNaN(j.px)&&(j.px=j.x),isNaN(j.py)&&(j.py=j.y);return a.resume()},a.resume=function(){n=.1,d3.timer(A);return a},a.stop=function(){n=0;return a},a.drag=function(){d||(d=d3.behavior.drag().on("dragstart",B).on("drag",j).on("dragend",i)),this.on("mouseover.force",g).on("mouseout.force",h).call(d)};return a};var e,f;d3.layout.partition=function(){function e(e,f){var g=a.call(this,e,f);c(g[0],0,b[0],b[1]/d(g[0]));return g}function d(a){var b=a.children,c=0;if(b){var e=-1,f=b.length;while(++e<f)c=Math.max(c,d(b[e]))}return 1+c}function c(a,b,d,e){var f=a.children;a.x=b,a.y=a.depth*e,a.dx=d,a.dy=e;if(f){var g=-1,h=f.length,i,j;d=a.value?d/a.value:0;while(++g<h)c(i=f[g],b,j=i.value*d,e),b+=j}}var a=d3.layout.hierarchy(),b=[1,1];e.size=function(a){if(!arguments.length)return b;b=a;return e};return y(e,a)},d3.layout.pie=function(){function f(f,g){var h=+(typeof c=="function"?c.apply(this,arguments):c),i=(typeof e=="function"?e.apply(this,arguments):e)-c,j=d3.range(f.length);b!=null&&j.sort(function(a,c){return b(f[a],f[c])});var k=f.map(a);i/=k.reduce(function(a,b){return a+b},0);var l=j.map(function(a){return{data:f[a],value:d=k[a],startAngle:h,endAngle:h+=d*i}});return f.map(function(a,b){return l[j[b]]})}var a=Number,b=null,c=0,e=2*Math.PI;f.value=function(b){if(!arguments.length)return a;a=b;return f},f.sort=function(a){if(!arguments.length)return b;b=a;return f},f.startAngle=function(a){if(!arguments.length)return c;c=a;return f},f.endAngle=function(a){if(!arguments.length)return e;e=a;return f};return f},d3.layout.stack=function(){function g(h,i){var j=h.map(function(b,c){return a.call(g,b,c)}),k=j.map(function(a,b){return a.map(function(a,b){return[e.call(g,a,b),f.call(g,a,b)]})}),l=b.call(g,k,i);j=d3.permute(j,l),k=d3.permute(k,l);var m=c.call(g,k,i),n=j.length,o=j[0].length,p,q,r;for(q=0;q<o;++q){d.call(g,j[0][q],r=m[q],k[0][q][1]);for(p=1;p<n;++p)d.call(g,j[p][q],r+=k[p-1][q][1],k[p][q][1])}return h}var a=Object,b=q["default"],c=r.zero,d=p,e=n,f=o;g.values=function(b){if(!arguments.length)return a;a=b;return g},g.order=function(a){if(!arguments.length)return b;b=typeof a=="function"?a:q[a];return g},g.offset=function(a){if(!arguments.length)return c;c=typeof a=="function"?a:r[a];return g},g.x=function(a){if(!arguments.length)return e;e=a;return g},g.y=function(a){if(!arguments.length)return f;f=a;return g},g.out=function(a){if(!arguments.length)return d;d=a;return g};return g};var q={"inside-out":function(a){var b=a.length,c,d,e=a.map(s),f=a.map(t),g=d3.range(b).sort(function(a,b){return e[a]-e[b]}),h=0,i=0,j=[],k=[];for(c=0;c<b;++c)d=g[c],h<i?(h+=f[d],j.push(d)):(i+=f[d],k.push(d));return k.reverse().concat(j)},reverse:function(a){return d3.range(a.length).reverse()},"default":function(a){return d3.range(a.length)}},r={silhouette:function(a){var b=a.length,c=a[0].length,d=[],e=0,f,g,h,i=[];for(g=0;g<c;++g){for(f=0,h=0;f<b;f++)h+=a[f][g][1];h>e&&(e=h),d.push(h)}for(g=0;g<c;++g)i[g]=(e-d[g])/2;return i},wiggle:function(a){var b=a.length,c=a[0],d=c.length,e=0,f,g,h,i,j,k,l,m,n,o=[];o[0]=m=n=0;for(g=1;g<d;++g){for(f=0,i=0;f<b;++f)i+=a[f][g][1];for(f=0,j=0,l=c[g][0]-c[g-1][0];f<b;++f){for(h=0,k=(a[f][g][1]-a[f][g-1][1])/(2*l);h<f;++h)k+=(a[h][g][1]-a[h][g-1][1])/l;j+=k*a[f][g][1]}o[g]=m-=i?j/i*l:0,m<n&&(n=m)}for(g=0;g<d;++g)o[g]-=n;return o},expand:function(a){var b=a.length,c=a[0].length,d=1/b,e,f,g,h=[];for(f=0;f<c;++f){for(e=0,g=0;e<b;e++)g+=a[e][f][1];if(g)for(e=0;e<b;e++)a[e][f][1]/=g;else for(e=0;e<b;e++)a[e][f][1]=d}for(f=0;f<c;++f)h[f]=0;return h},zero:function(a){var b=-1,c=a[0].length,d=[];while(++b<c)d[b]=0;return d}};d3.layout.histogram=function(){function e(e,f){var g=[],h=e.map(b,this),i=c.call(this,h,f),j=d.call(this,i,h,f),k,f=-1,l=h.length,m=j.length-1,n=a?1:1/l,o;while(++f<m)k=g[f]=[],k.dx=j[f+1]-(k.x=j[f]),k.y=0;f=-1;while(++f<l)o=h[f],o>=i[0]&&o<=i[1]&&(k=g[d3.bisect(j,o,1,m)-1],k.y+=n,k.push(e[f]));return g}var a=!0,b=Number,c=x,d=v;e.value=function(a){if(!arguments.length)return b;b=a;return e},e.range=function(a){if(!arguments.length)return c;c=d3.functor(a);return e},e.bins=function(a){if(!arguments.length)return d;d=typeof a=="number"?function(b){return w(b,a)}:d3.functor(a);return e},e.frequency=function(b){if(!arguments.length)return a;a=!!b;return e};return e},d3.layout.hierarchy=function(){function g(a){var b=[];e(a,0,b);return b}function f(a,b){var d=a.children,e=0;if(d){var h=-1,i=d.length,j=b+1;while(++h<i)e+=f(d[h],j)}else c&&(e=+c.call(g,D?a:a.data,b)||0);c&&(a.value=e);return e}function e(f,h,i){var j=b.call(g,f,h),k,l=D?f:{data:f};l.depth=h,i.push(l);if(j&&(k=j.length)){var m=-1,n=l.children=[],o=0,p=h+1;while(++m<k)d=e(j[m],p,i),d.parent=l,n.push(d),o+=d.value;a&&n.sort(a),c&&(l.value=o)}else c&&(l.value=+c.call(g,f,h)||0);return l}var a=B,b=z,c=A;g.sort=function(b){if(!arguments.length)return a;a=b;return g},g.children=function(a){if(!arguments.length)return b;b=a;return g},g.value=function(a){if(!arguments.length)return c;c=a;return g},g.revalue=function(a){f(a,0);return a};return g};var D=!1;d3.layout.pack=function(){function c(c,d){var e=a.call(this,c,d),f=e[0];f.x=0,f.y=0,L(f);var g=b[0],h=b[1],i=1/Math.max(2*f.r/g,2*f.r/h);M(f,g/2,h/2,i);return e}var a=d3.layout.hierarchy().sort(E),b=[1,1];c.size=function(a){if(!arguments.length)return b;b=a;return c};return y(c,a)},d3.layout.cluster=function(){function d(d,e){var f=a.call(this,d,e),g=f[0],h,i=0,j,k;Z(g,function(a){a.children?(a.x=P(a.children),a.y=O(a.children)):(a.x=h?i+=b(a,h):0,a.y=0,h=a)});var l=Q(g),m=R(g),n=l.x-b(l,m)/2,o=m.x+b(m,l)/2;Z(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=(1-a.y/g.y)*c[1]});return f}var a=d3.layout.hierarchy().sort(null).value(null),b=S,c=[1,1];d.separation=function(a){if(!arguments.length)return b;b=a;return d},d.size=function(a){if(!arguments.length)return c;c=a;return d};return y(d,a)},d3.layout.tree=function(){function d(d,e){function j(a,c,d){if(c){var e=a,f=a,g=c,h=a.parent.children[0],i=e._tree.mod,j=f._tree.mod,k=g._tree.mod,l=h._tree.mod,m;while(g=U(g),e=T(e),g&&e)h=T(h),f=U(f),f._tree.ancestor=a,m=g._tree.prelim+k-e._tree.prelim-i+b(g,e),m>0&&(_(ba(g,a,d),a,m),i+=m,j+=m),k+=g._tree.mod,i+=e._tree.mod,l+=h._tree.mod,j+=f._tree.mod;g&&!U(f)&&(f._tree.thread=g,f._tree.mod+=k-j),e&&!T(h)&&(h._tree.thread=e,h._tree.mod+=i-l,d=a)}return d}function i(a,b){a.x=a._tree.prelim+b;var c=a.children;if(c){var d=-1,e=c.length;b+=a._tree.mod;while(++d<e)i(c[d],b)}}function h(a,c){var d=a.children,e=a._tree;if(d&&(f=d.length)){var f,g=d[0],i,k=g,l,m=-1;while(++m<f)l=d[m],h(l,i),k=j(l,i,k),i=l;$(a);var n=.5*(g._tree.prelim+l._tree.prelim);c?(e.prelim=c._tree.prelim+b(a,c),e.mod=e.prelim-n):e.prelim=n}else c&&(e.prelim=c._tree.prelim+b(a,c))}var f=a.call(this,d,e),g=f[0];Z(g,function(a,b){a._tree={ancestor:a,prelim:0,mod:0,change:0,shift:0,number:b?b._tree.number+1:0}}),h(g),i(g,-g._tree.prelim);var k=V(g,X),l=V(g,W),m=V(g,Y),n=k.x-b(k,l)/2,o=l.x+b(l,k)/2,p=m.depth||1;Z(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=a.depth/p*c[1],delete a._tree});return f}var a=d3.layout.hierarchy().sort(null).value(null),b=S,c=[1,1];d.separation=function(a){if(!arguments.length)return b;b=a;return d},d.size=function(a){if(!arguments.length)return c;c=a;return d};return y(d,a)},d3.layout.treemap=function(){function n(b){var d=g||a(b),e=d[0];e.x=0,e.y=0,e.dx=c[0],e.dy=c[1],g&&a.revalue(e),i([e],e.dx*e.dy/e.value),(g?k:j)(e),f&&(g=d);return d}function m(a,c,d,e){var f=-1,g=a.length,h=d.x,i=d.y,j=c?b(a.area/c):0,k;if(c==d.dx){if(e||j>d.dy)j=j?d.dy:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dy=j,h+=k.dx=j?b(k.area/j):0;k.z=!0,k.dx+=d.x+d.dx-h,d.y+=j,d.dy-=j}else{if(e||j>d.dx)j=j?d.dx:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dx=j,i+=k.dy=j?b(k.area/j):0;k.z=!1,k.dy+=d.y+d.dy-i,d.x+=j,d.dx-=j}}function l(a,b){var c=a.area,d,e=0,f=Infinity,g=-1,i=a.length;while(++g<i){if(!(d=a[g].area))continue;d<f&&(f=d),d>e&&(e=d)}c*=c,b*=b;return c?Math.max(b*e*h/c,c/(b*f*h)):Infinity}function k(a){if(!!a.children){var b=e(a),c=a.children.slice(),d,f=[];i(c,b.dx*b.dy/a.value),f.area=0;while(d=c.pop())f.push(d),f.area+=d.area,d.z!=null&&(m(f,d.z?b.dx:b.dy,b,!c.length),f.length=f.area=0);a.children.forEach(k)}}function j(a){if(!!a.children){var b=e(a),c=[],d=a.children.slice(),f,g=Infinity,h,k=Math.min(b.dx,b.dy),n;i(d,b.dx*b.dy/a.value),c.area=0;while((n=d.length)>0)c.push(f=d[n-1]),c.area+=f.area,(h=l(c,k))<=g?(d.pop(),g=h):(c.area-=c.pop().area,m(c,k,b,!1),k=Math.min(b.dx,b.dy),c.length=c.area=0,g=Infinity);c.length&&(m(c,k,b,!0),c.length=c.area=0),a.children.forEach(j)}}function i(a,b){var c=-1,d=a.length,e,f;while(++c<d)f=(e=a[c]).value*(b<0?0:b),e.area=isNaN(f)||f<=0?0:f}var a=d3.layout.hierarchy(),b=Math.round,c=[1,1],d=null,e=bb,f=!1,g,h=.5*(1+Math.sqrt(5));n.size=function(a){if(!arguments.length)return c;c=a;return n},n.padding=function(a){function c(b){return bc(b,a)}function b(b){var c=a.call(n,b,b.depth);return c==null?bb(b):bc(b,typeof c=="number"?[c,c,c,c]:c)}if(!arguments.length)return d;var f;e=(d=a)==null?bb:(f=typeof a)==="function"?b:f==="number"?(a=[a,a,a,a],c):c;return n},n.round=function(a){if(!arguments.length)return b!=Number;b=a?Math.round:Number;return n},n.sticky=function(a){if(!arguments.length)return f;f=a,g=null;return n},n.ratio=function(a){if(!arguments.length)return h;h=a;return n};return y(n,a)}})()
<ide>\ No newline at end of file
<add>(function(){function bc(a,b){var c=a.x+b[3],d=a.y+b[0],e=a.dx-b[1]-b[3],f=a.dy-b[0]-b[2];e<0&&(c+=e/2,e=0),f<0&&(d+=f/2,f=0);return{x:c,y:d,dx:e,dy:f}}function bb(a){return{x:a.x,y:a.y,dx:a.dx,dy:a.dy}}function ba(a,b,c){return a._tree.ancestor.parent==b.parent?a._tree.ancestor:c}function _(a,b,c){a=a._tree,b=b._tree;var d=c/(b.number-a.number);a.change+=d,b.change-=d,b.shift+=c,b.prelim+=c,b.mod+=c}function $(a){var b=0,c=0,d=a.children,e=d.length,f;while(--e>=0)f=d[e]._tree,f.prelim+=b,f.mod+=b,b+=f.shift+(c+=f.change)}function Z(a,b){function c(a,d){var e=a.children;if(e){var f,g=null,h=-1,i=e.length;while(++h<i)f=e[h],c(f,g),g=f}b(a,d)}c(a,null)}function Y(a,b){return a.depth-b.depth}function X(a,b){return b.x-a.x}function W(a,b){return a.x-b.x}function V(a,b){var c=a.children;if(c){var d,e=c.length,f=-1;while(++f<e)b(d=V(c[f],b),a)>0&&(a=d)}return a}function U(a){return a.children?a.children[a.children.length-1]:a._tree.thread}function T(a){return a.children?a.children[0]:a._tree.thread}function S(a,b){return a.parent==b.parent?1:2}function R(a){var b=a.children,c;return b&&(c=b.length)?R(b[c-1]):a}function Q(a){var b=a.children;return b&&b.length?Q(b[0]):a}function P(a){return a.reduce(function(a,b){return a+b.x},0)/a.length}function O(a){return 1+d3.max(a,function(a){return a.y})}function N(a,b,c){var d=b.r+c.r,e=a.r+c.r,f=b.x-a.x,g=b.y-a.y,h=Math.sqrt(f*f+g*g),i=(e*e+h*h-d*d)/(2*e*h),j=Math.acos(i),k=i*e,l=Math.sin(j)*e;f/=h,g/=h,c.x=a.x+k*f+l*g,c.y=a.y+k*g-l*f}function M(a,b,c,d){var e=a.children;a.x=b+=d*a.x,a.y=c+=d*a.y,a.r*=d;if(e){var f=-1,g=e.length;while(++f<g)M(e[f],b,c,d)}}function L(a){var b=a.children;b?(b.forEach(L),a.r=I(b)):a.r=Math.sqrt(a.value)}function K(a){delete a._pack_next,delete a._pack_prev}function J(a){a._pack_next=a._pack_prev=a}function I(a){function l(a){b=Math.min(a.x-a.r,b),c=Math.max(a.x+a.r,c),d=Math.min(a.y-a.r,d),e=Math.max(a.y+a.r,e)}var b=Infinity,c=-Infinity,d=Infinity,e=-Infinity,f=a.length,g,h,i,j,k;a.forEach(J),g=a[0],g.x=-g.r,g.y=0,l(g);if(f>1){h=a[1],h.x=h.r,h.y=0,l(h);if(f>2){i=a[2],N(g,h,i),l(i),F(g,i),g._pack_prev=i,F(i,h),h=g._pack_next;for(var m=3;m<f;m++){N(g,h,i=a[m]);var n=0,o=1,p=1;for(j=h._pack_next;j!==h;j=j._pack_next,o++)if(H(j,i)){n=1;break}if(n==1)for(k=g._pack_prev;k!==j._pack_prev;k=k._pack_prev,p++)if(H(k,i)){p<o&&(n=-1,j=k);break}n==0?(F(g,i),h=i,l(i)):n>0?(G(g,j),h=j,m--):(G(j,h),g=j,m--)}}}var q=(b+c)/2,r=(d+e)/2,s=0;for(var m=0;m<f;m++){var t=a[m];t.x-=q,t.y-=r,s=Math.max(s,t.r+Math.sqrt(t.x*t.x+t.y*t.y))}a.forEach(K);return s}function H(a,b){var c=b.x-a.x,d=b.y-a.y,e=a.r+b.r;return e*e-c*c-d*d>.001}function G(a,b){a._pack_next=b,b._pack_prev=a}function F(a,b){var c=a._pack_next;a._pack_next=b,b._pack_prev=a,b._pack_next=c,c._pack_prev=b}function E(a,b){return a.value-b.value}function C(a){return d3.merge(a.map(function(a){return(a.children||[]).map(function(b){return{source:a,target:b}})}))}function B(a,b){return b.value-a.value}function A(a){return a.value}function z(a){return a.children}function y(a,b){a.sort=d3.rebind(a,b.sort),a.children=d3.rebind(a,b.children),a.links=C,a.value=d3.rebind(a,b.value),a.nodes=function(b){D=!0;return(a.nodes=a)(b)};return a}function x(a){return[d3.min(a),d3.max(a)]}function w(a,b){var c=-1,d=+a[0],e=(a[1]-d)/b,f=[];while(++c<=b)f[c]=e*c+d;return f}function v(a,b){return w(a,Math.ceil(Math.log(b.length)/Math.LN2+1))}function u(a,b){return a+b[1]}function t(a){return a.reduce(u,0)}function s(a){var b=1,c=0,d=a[0][1],e,f=a.length;for(;b<f;++b)(e=a[b][1])>d&&(c=b,d=e);return c}function p(a,b,c){a.y0=b,a.y=c}function o(a){return a.y}function n(a){return a.x}function m(a){return 1}function l(a){return 20}function k(a){var b=0,c=0;a.count=0;if(!a.leaf){var d=a.nodes,e=d.length,f=-1,g;while(++f<e){g=d[f];if(g==null)continue;k(g),a.count+=g.count,b+=g.count*g.cx,c+=g.count*g.cy}}a.point&&(a.leaf||(a.point.x+=Math.random()-.5,a.point.y+=Math.random()-.5),a.count++,b+=a.point.x,c+=a.point.y),a.cx=b/a.count,a.cy=c/a.count}function j(){f.px+=d3.event.dx,f.py+=d3.event.dy,e.resume()}function i(){j(),f.fixed&=1,e=f=null}function h(a){a!==f&&(a.fixed&=1)}function g(a){a.fixed|=2}function c(a,c){if(a===c)return a;var d=b(a),e=b(c),f=d.pop(),g=e.pop(),h=null;while(f===g)h=f,f=d.pop(),g=e.pop();return h}function b(a){var b=[],c=a.parent;while(c!=null)b.push(a),a=c,c=c.parent;b.push(a);return b}function a(a){var b=a.source,d=a.target,e=c(b,d),f=[b];while(b!==e)b=b.parent,f.push(b);var g=f.length;while(d!==e)f.splice(g,0,d),d=d.parent;return f}d3.layout={},d3.layout.bundle=function(){return function(b){var c=[],d=-1,e=b.length;while(++d<e)c.push(a(b[d]));return c}},d3.layout.chord=function(){function k(){b.sort(function(a,b){return i(a.target.value,b.target.value)})}function j(){var a={},j=[],l=d3.range(e),m=[],n,o,p,q,r;b=[],c=[],n=0,q=-1;while(++q<e){o=0,r=-1;while(++r<e)o+=d[q][r];j.push(o),m.push(d3.range(e)),n+=o}g&&l.sort(function(a,b){return g(j[a],j[b])}),h&&m.forEach(function(a,b){a.sort(function(a,c){return h(d[b][a],d[b][c])})}),n=(2*Math.PI-f*e)/n,o=0,q=-1;while(++q<e){p=o,r=-1;while(++r<e){var s=l[q],t=m[q][r],u=d[s][t];a[s+"-"+t]={index:s,subindex:t,startAngle:o,endAngle:o+=u*n,value:u}}c.push({index:s,startAngle:p,endAngle:o,value:(o-p)/n}),o+=f}q=-1;while(++q<e){r=q-1;while(++r<e){var v=a[q+"-"+r],w=a[r+"-"+q];(v.value||w.value)&&b.push(v.value<w.value?{source:w,target:v}:{source:v,target:w})}}i&&k()}var a={},b,c,d,e,f=0,g,h,i;a.matrix=function(f){if(!arguments.length)return d;e=(d=f)&&d.length,b=c=null;return a},a.padding=function(d){if(!arguments.length)return f;f=d,b=c=null;return a},a.sortGroups=function(d){if(!arguments.length)return g;g=d,b=c=null;return a},a.sortSubgroups=function(c){if(!arguments.length)return h;h=c,b=null;return a},a.sortChords=function(c){if(!arguments.length)return i;i=c,b&&k();return a},a.chords=function(){b||j();return b},a.groups=function(){c||j();return c};return a},d3.layout.force=function(){function B(b){g(f=b),e=a}function A(){var a=v.length,d=w.length,e,f,g,h,i,j,l,m,p;for(f=0;f<d;++f){g=w[f],h=g.source,i=g.target,m=i.x-h.x,p=i.y-h.y;if(j=m*m+p*p)j=n*y[f]*((j=Math.sqrt(j))-x[f])/j,m*=j,p*=j,i.x-=m*(l=h.weight/(i.weight+h.weight)),i.y-=p*l,h.x+=m*(l=1-l),h.y+=p*l}if(l=n*s){m=c[0]/2,p=c[1]/2,f=-1;if(l)while(++f<a)g=v[f],g.x+=(m-g.x)*l,g.y+=(p-g.y)*l}if(l=n*r){k(e=d3.geom.quadtree(v)),f=-1;while(++f<a)(g=v[f]).fixed||e.visit(z(g,l))}f=-1;while(++f<a)g=v[f],g.fixed?(g.x=g.px,g.y=g.py):(g.x-=(g.px-(g.px=g.x))*o,g.y-=(g.py-(g.py=g.y))*o);b.tick.dispatch({type:"tick",alpha:n});return(n*=.99)<.005}function z(a,b){return function(c,d,e,f,g){if(c.point!==a){var h=c.cx-a.x,i=c.cy-a.y,j=1/Math.sqrt(h*h+i*i);if((f-d)*j<t){var k=b*c.count*j*j;a.px-=h*k,a.py-=i*k;return!0}if(c.point&&isFinite(j)){var k=b*j*j;a.px-=h*k,a.py-=i*k}}}}var a={},b=d3.dispatch("tick"),c=[1,1],d,n,o=.9,p=l,q=m,r=-30,s=.1,t=.8,u,v=[],w=[],x,y;a.on=function(c,d){b[c].add(d);return a},a.nodes=function(b){if(!arguments.length)return v;v=b;return a},a.links=function(b){if(!arguments.length)return w;w=b;return a},a.size=function(b){if(!arguments.length)return c;c=b;return a},a.linkDistance=function(b){if(!arguments.length)return p;p=d3.functor(b);return a},a.distance=a.linkDistance,a.linkStrength=function(b){if(!arguments.length)return q;q=d3.functor(b);return a},a.friction=function(b){if(!arguments.length)return o;o=b;return a},a.charge=function(b){if(!arguments.length)return r;r=b;return a},a.gravity=function(b){if(!arguments.length)return s;s=b;return a},a.theta=function(b){if(!arguments.length)return t;t=b;return a},a.start=function(){function l(){if(!i){i=[];for(d=0;d<e;++d)i[d]=[];for(d=0;d<f;++d){var a=w[d];i[a.source.index].push(a.target),i[a.target.index].push(a.source)}}return i[b]}function k(a,c){var d=l(b),e=-1,f=d.length,g;while(++e<f)if(!isNaN(g=d[e][a]))return g;return Math.random()*c}var b,d,e=v.length,f=w.length,g=c[0],h=c[1],i,j;for(b=0;b<e;++b)(j=v[b]).index=b,j.weight=0;x=[],y=[];for(b=0;b<f;++b)j=w[b],typeof j.source=="number"&&(j.source=v[j.source]),typeof j.target=="number"&&(j.target=v[j.target]),x[b]=p.call(this,j,b),y[b]=q.call(this,j,b),++j.source.weight,++j.target.weight;for(b=0;b<e;++b)j=v[b],isNaN(j.x)&&(j.x=k("x",g)),isNaN(j.y)&&(j.y=k("y",h)),isNaN(j.px)&&(j.px=j.x),isNaN(j.py)&&(j.py=j.y);return a.resume()},a.resume=function(){n=.1,d3.timer(A);return a},a.stop=function(){n=0;return a},a.drag=function(){d||(d=d3.behavior.drag().on("dragstart",B).on("drag",j).on("dragend",i)),this.on("mouseover.force",g).on("mouseout.force",h).call(d)};return a};var e,f;d3.layout.partition=function(){function e(e,f){var g=a.call(this,e,f);c(g[0],0,b[0],b[1]/d(g[0]));return g}function d(a){var b=a.children,c=0;if(b){var e=-1,f=b.length;while(++e<f)c=Math.max(c,d(b[e]))}return 1+c}function c(a,b,d,e){var f=a.children;a.x=b,a.y=a.depth*e,a.dx=d,a.dy=e;if(f){var g=-1,h=f.length,i,j;d=a.value?d/a.value:0;while(++g<h)c(i=f[g],b,j=i.value*d,e),b+=j}}var a=d3.layout.hierarchy(),b=[1,1];e.size=function(a){if(!arguments.length)return b;b=a;return e};return y(e,a)},d3.layout.pie=function(){function f(f,g){var h=+(typeof c=="function"?c.apply(this,arguments):c),i=(typeof e=="function"?e.apply(this,arguments):e)-c,j=d3.range(f.length);b!=null&&j.sort(function(a,c){return b(f[a],f[c])});var k=f.map(a);i/=k.reduce(function(a,b){return a+b},0);var l=j.map(function(a){return{data:f[a],value:d=k[a],startAngle:h,endAngle:h+=d*i}});return f.map(function(a,b){return l[j[b]]})}var a=Number,b=null,c=0,e=2*Math.PI;f.value=function(b){if(!arguments.length)return a;a=b;return f},f.sort=function(a){if(!arguments.length)return b;b=a;return f},f.startAngle=function(a){if(!arguments.length)return c;c=a;return f},f.endAngle=function(a){if(!arguments.length)return e;e=a;return f};return f},d3.layout.stack=function(){function g(h,i){var j=h.map(function(b,c){return a.call(g,b,c)}),k=j.map(function(a,b){return a.map(function(a,b){return[e.call(g,a,b),f.call(g,a,b)]})}),l=b.call(g,k,i);j=d3.permute(j,l),k=d3.permute(k,l);var m=c.call(g,k,i),n=j.length,o=j[0].length,p,q,r;for(q=0;q<o;++q){d.call(g,j[0][q],r=m[q],k[0][q][1]);for(p=1;p<n;++p)d.call(g,j[p][q],r+=k[p-1][q][1],k[p][q][1])}return h}var a=Object,b=q["default"],c=r.zero,d=p,e=n,f=o;g.values=function(b){if(!arguments.length)return a;a=b;return g},g.order=function(a){if(!arguments.length)return b;b=typeof a=="function"?a:q[a];return g},g.offset=function(a){if(!arguments.length)return c;c=typeof a=="function"?a:r[a];return g},g.x=function(a){if(!arguments.length)return e;e=a;return g},g.y=function(a){if(!arguments.length)return f;f=a;return g},g.out=function(a){if(!arguments.length)return d;d=a;return g};return g};var q={"inside-out":function(a){var b=a.length,c,d,e=a.map(s),f=a.map(t),g=d3.range(b).sort(function(a,b){return e[a]-e[b]}),h=0,i=0,j=[],k=[];for(c=0;c<b;++c)d=g[c],h<i?(h+=f[d],j.push(d)):(i+=f[d],k.push(d));return k.reverse().concat(j)},reverse:function(a){return d3.range(a.length).reverse()},"default":function(a){return d3.range(a.length)}},r={silhouette:function(a){var b=a.length,c=a[0].length,d=[],e=0,f,g,h,i=[];for(g=0;g<c;++g){for(f=0,h=0;f<b;f++)h+=a[f][g][1];h>e&&(e=h),d.push(h)}for(g=0;g<c;++g)i[g]=(e-d[g])/2;return i},wiggle:function(a){var b=a.length,c=a[0],d=c.length,e=0,f,g,h,i,j,k,l,m,n,o=[];o[0]=m=n=0;for(g=1;g<d;++g){for(f=0,i=0;f<b;++f)i+=a[f][g][1];for(f=0,j=0,l=c[g][0]-c[g-1][0];f<b;++f){for(h=0,k=(a[f][g][1]-a[f][g-1][1])/(2*l);h<f;++h)k+=(a[h][g][1]-a[h][g-1][1])/l;j+=k*a[f][g][1]}o[g]=m-=i?j/i*l:0,m<n&&(n=m)}for(g=0;g<d;++g)o[g]-=n;return o},expand:function(a){var b=a.length,c=a[0].length,d=1/b,e,f,g,h=[];for(f=0;f<c;++f){for(e=0,g=0;e<b;e++)g+=a[e][f][1];if(g)for(e=0;e<b;e++)a[e][f][1]/=g;else for(e=0;e<b;e++)a[e][f][1]=d}for(f=0;f<c;++f)h[f]=0;return h},zero:function(a){var b=-1,c=a[0].length,d=[];while(++b<c)d[b]=0;return d}};d3.layout.histogram=function(){function e(e,f){var g=[],h=e.map(b,this),i=c.call(this,h,f),j=d.call(this,i,h,f),k,f=-1,l=h.length,m=j.length-1,n=a?1:1/l,o;while(++f<m)k=g[f]=[],k.dx=j[f+1]-(k.x=j[f]),k.y=0;f=-1;while(++f<l)o=h[f],o>=i[0]&&o<=i[1]&&(k=g[d3.bisect(j,o,1,m)-1],k.y+=n,k.push(e[f]));return g}var a=!0,b=Number,c=x,d=v;e.value=function(a){if(!arguments.length)return b;b=a;return e},e.range=function(a){if(!arguments.length)return c;c=d3.functor(a);return e},e.bins=function(a){if(!arguments.length)return d;d=typeof a=="number"?function(b){return w(b,a)}:d3.functor(a);return e},e.frequency=function(b){if(!arguments.length)return a;a=!!b;return e};return e},d3.layout.hierarchy=function(){function g(a){var b=[];e(a,0,b);return b}function f(a,b){var d=a.children,e=0;if(d&&(i=d.length)){var h=-1,i,j=b+1;while(++h<i)e+=f(d[h],j)}else c&&(e=+c.call(g,D?a:a.data,b)||0);c&&(a.value=e);return e}function e(f,h,i){var j=b.call(g,f,h),k=D?f:{data:f};k.depth=h,i.push(k);if(j&&(m=j.length)){var l=-1,m,n=k.children=[],o=0,p=h+1;while(++l<m)d=e(j[l],p,i),d.parent=k,n.push(d),o+=d.value;a&&n.sort(a),c&&(k.value=o)}else c&&(k.value=+c.call(g,f,h)||0);return k}var a=B,b=z,c=A;g.sort=function(b){if(!arguments.length)return a;a=b;return g},g.children=function(a){if(!arguments.length)return b;b=a;return g},g.value=function(a){if(!arguments.length)return c;c=a;return g},g.revalue=function(a){f(a,0);return a},g.nodes=function(a){D=!0;return(g.nodes=g)(a)};return g};var D=!1;d3.layout.pack=function(){function c(c,d){var e=a.call(this,c,d),f=e[0];f.x=0,f.y=0,L(f);var g=b[0],h=b[1],i=1/Math.max(2*f.r/g,2*f.r/h);M(f,g/2,h/2,i);return e}var a=d3.layout.hierarchy().sort(E),b=[1,1];c.size=function(a){if(!arguments.length)return b;b=a;return c};return y(c,a)},d3.layout.cluster=function(){function d(d,e){var f=a.call(this,d,e),g=f[0],h,i=0,j,k;Z(g,function(a){var c=a.children;c&&c.length?(a.x=P(c),a.y=O(c)):(a.x=h?i+=b(a,h):0,a.y=0,h=a)});var l=Q(g),m=R(g),n=l.x-b(l,m)/2,o=m.x+b(m,l)/2;Z(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=(1-a.y/g.y)*c[1]});return f}var a=d3.layout.hierarchy().sort(null).value(null),b=S,c=[1,1];d.separation=function(a){if(!arguments.length)return b;b=a;return d},d.size=function(a){if(!arguments.length)return c;c=a;return d};return y(d,a)},d3.layout.tree=function(){function d(d,e){function j(a,c,d){if(c){var e=a,f=a,g=c,h=a.parent.children[0],i=e._tree.mod,j=f._tree.mod,k=g._tree.mod,l=h._tree.mod,m;while(g=U(g),e=T(e),g&&e)h=T(h),f=U(f),f._tree.ancestor=a,m=g._tree.prelim+k-e._tree.prelim-i+b(g,e),m>0&&(_(ba(g,a,d),a,m),i+=m,j+=m),k+=g._tree.mod,i+=e._tree.mod,l+=h._tree.mod,j+=f._tree.mod;g&&!U(f)&&(f._tree.thread=g,f._tree.mod+=k-j),e&&!T(h)&&(h._tree.thread=e,h._tree.mod+=i-l,d=a)}return d}function i(a,b){a.x=a._tree.prelim+b;var c=a.children;if(c){var d=-1,e=c.length;b+=a._tree.mod;while(++d<e)i(c[d],b)}}function h(a,c){var d=a.children,e=a._tree;if(d&&(f=d.length)){var f,g=d[0],i,k=g,l,m=-1;while(++m<f)l=d[m],h(l,i),k=j(l,i,k),i=l;$(a);var n=.5*(g._tree.prelim+l._tree.prelim);c?(e.prelim=c._tree.prelim+b(a,c),e.mod=e.prelim-n):e.prelim=n}else c&&(e.prelim=c._tree.prelim+b(a,c))}var f=a.call(this,d,e),g=f[0];Z(g,function(a,b){a._tree={ancestor:a,prelim:0,mod:0,change:0,shift:0,number:b?b._tree.number+1:0}}),h(g),i(g,-g._tree.prelim);var k=V(g,X),l=V(g,W),m=V(g,Y),n=k.x-b(k,l)/2,o=l.x+b(l,k)/2,p=m.depth||1;Z(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=a.depth/p*c[1],delete a._tree});return f}var a=d3.layout.hierarchy().sort(null).value(null),b=S,c=[1,1];d.separation=function(a){if(!arguments.length)return b;b=a;return d},d.size=function(a){if(!arguments.length)return c;c=a;return d};return y(d,a)},d3.layout.treemap=function(){function n(b){var d=g||a(b),e=d[0];e.x=0,e.y=0,e.dx=c[0],e.dy=c[1],g&&a.revalue(e),i([e],e.dx*e.dy/e.value),(g?k:j)(e),f&&(g=d);return d}function m(a,c,d,e){var f=-1,g=a.length,h=d.x,i=d.y,j=c?b(a.area/c):0,k;if(c==d.dx){if(e||j>d.dy)j=j?d.dy:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dy=j,h+=k.dx=j?b(k.area/j):0;k.z=!0,k.dx+=d.x+d.dx-h,d.y+=j,d.dy-=j}else{if(e||j>d.dx)j=j?d.dx:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dx=j,i+=k.dy=j?b(k.area/j):0;k.z=!1,k.dy+=d.y+d.dy-i,d.x+=j,d.dx-=j}}function l(a,b){var c=a.area,d,e=0,f=Infinity,g=-1,i=a.length;while(++g<i){if(!(d=a[g].area))continue;d<f&&(f=d),d>e&&(e=d)}c*=c,b*=b;return c?Math.max(b*e*h/c,c/(b*f*h)):Infinity}function k(a){if(!!a.children){var b=e(a),c=a.children.slice(),d,f=[];i(c,b.dx*b.dy/a.value),f.area=0;while(d=c.pop())f.push(d),f.area+=d.area,d.z!=null&&(m(f,d.z?b.dx:b.dy,b,!c.length),f.length=f.area=0);a.children.forEach(k)}}function j(a){if(!!a.children){var b=e(a),c=[],d=a.children.slice(),f,g=Infinity,h,k=Math.min(b.dx,b.dy),n;i(d,b.dx*b.dy/a.value),c.area=0;while((n=d.length)>0)c.push(f=d[n-1]),c.area+=f.area,(h=l(c,k))<=g?(d.pop(),g=h):(c.area-=c.pop().area,m(c,k,b,!1),k=Math.min(b.dx,b.dy),c.length=c.area=0,g=Infinity);c.length&&(m(c,k,b,!0),c.length=c.area=0),a.children.forEach(j)}}function i(a,b){var c=-1,d=a.length,e,f;while(++c<d)f=(e=a[c]).value*(b<0?0:b),e.area=isNaN(f)||f<=0?0:f}var a=d3.layout.hierarchy(),b=Math.round,c=[1,1],d=null,e=bb,f=!1,g,h=.5*(1+Math.sqrt(5));n.size=function(a){if(!arguments.length)return c;c=a;return n},n.padding=function(a){function c(b){return bc(b,a)}function b(b){var c=a.call(n,b,b.depth);return c==null?bb(b):bc(b,typeof c=="number"?[c,c,c,c]:c)}if(!arguments.length)return d;var f;e=(d=a)==null?bb:(f=typeof a)==="function"?b:f==="number"?(a=[a,a,a,a],c):c;return n},n.round=function(a){if(!arguments.length)return b!=Number;b=a?Math.round:Number;return n},n.sticky=function(a){if(!arguments.length)return f;f=a,g=null;return n},n.ratio=function(a){if(!arguments.length)return h;h=a;return n};return y(n,a)}})()
<ide>\ No newline at end of file
<ide><path>src/layout/cluster.js
<ide> d3.layout.cluster = function() {
<ide>
<ide> // First walk, computing the initial x & y values.
<ide> d3_layout_treeVisitAfter(root, function(node) {
<del> if (node.children) {
<del> node.x = d3_layout_clusterX(node.children);
<del> node.y = d3_layout_clusterY(node.children);
<add> var children = node.children;
<add> if (children && children.length) {
<add> node.x = d3_layout_clusterX(children);
<add> node.y = d3_layout_clusterY(children);
<ide> } else {
<ide> node.x = previousNode ? x += separation(node, previousNode) : 0;
<ide> node.y = 0;
<ide> function d3_layout_clusterX(children) {
<ide>
<ide> function d3_layout_clusterLeft(node) {
<ide> var children = node.children;
<del> return children ? d3_layout_clusterLeft(children[0]) : node;
<add> return children && children.length
<add> ? d3_layout_clusterLeft(children[0]) : node;
<ide> }
<ide>
<ide> function d3_layout_clusterRight(node) {
<del> var children = node.children;
<del> return children ? d3_layout_clusterRight(children[children.length - 1]) : node;
<add> var children = node.children,
<add> n;
<add> return children && (n = children.length)
<add> ? d3_layout_clusterRight(children[n - 1]) : node;
<ide> }
<ide><path>src/layout/hierarchy.js
<ide> d3.layout.hierarchy = function() {
<ide> // Also converts the data representation into a standard hierarchy structure.
<ide> function recurse(data, depth, nodes) {
<ide> var childs = children.call(hierarchy, data, depth),
<del> n,
<ide> node = d3_layout_hierarchyInline ? data : {data: data};
<ide> node.depth = depth;
<ide> nodes.push(node);
<ide> if (childs && (n = childs.length)) {
<ide> var i = -1,
<add> n,
<ide> c = node.children = [],
<ide> v = 0,
<ide> j = depth + 1;
<ide> d3.layout.hierarchy = function() {
<ide> function revalue(node, depth) {
<ide> var children = node.children,
<ide> v = 0;
<del> if (children) {
<add> if (children && (n = children.length)) {
<ide> var i = -1,
<del> n = children.length,
<add> n,
<ide> j = depth + 1;
<ide> while (++i < n) v += revalue(children[i], j);
<ide> } else if (value) {
<ide> d3.layout.hierarchy = function() {
<ide> return root;
<ide> };
<ide>
<add> // If the new API is used, enabling inlining.
<add> hierarchy.nodes = function(d) {
<add> d3_layout_hierarchyInline = true;
<add> return (hierarchy.nodes = hierarchy)(d);
<add> };
<add>
<ide> return hierarchy;
<ide> };
<ide>
<ide><path>test/layout/hierarchy-test.js
<ide> suite.addBatch({
<ide> "hierarchy": {
<ide> topic: d3.layout.hierarchy,
<ide> "doesn't overwrite the value of a node that has an empty children array": function(hierarchy) {
<del> assert.deepEqual(hierarchy({value: 1, children: []}), [
<del> {data: {value: 1, children: []}, depth: 0, value: 1}
<add> var nodes = hierarchy.nodes({value: 1, children: []});
<add> assert.deepEqual(nodes, [
<add> {children: [], depth: 0, value: 1}
<add> ]);
<add> hierarchy.revalue(nodes[0]);
<add> assert.deepEqual(nodes, [
<add> {children: [], depth: 0, value: 1}
<add> ]);
<add> },
<add> "a valueless node that has an empty children array gets a value of 0": function(hierarchy) {
<add> var nodes = hierarchy.nodes({children: []});
<add> assert.deepEqual(nodes, [
<add> {children: [], depth: 0, value: 0}
<add> ]);
<add> hierarchy.revalue(nodes[0]);
<add> assert.deepEqual(nodes, [
<add> {children: [], depth: 0, value: 0}
<ide> ]);
<ide> }
<ide> } | 5 |
Python | Python | adjust error [ci skip] | d38dc466c5d17cc66f6be4edc028e13e41788b6c | <ide><path>spacy/errors.py
<ide> class Errors:
<ide> "the documentation:\nhttps://nightly.spacy.io/usage/models")
<ide> E030 = ("Sentence boundaries unset. You can add the 'sentencizer' "
<ide> "component to the pipeline with: `nlp.add_pipe('sentencizer')`. "
<del> "Alternatively, add the dependency parser, or set sentence "
<del> "boundaries by setting `doc[i].is_sent_start`.")
<add> "Alternatively, add the dependency parser or sentence recognizer, "
<add> "or set sentence boundaries by setting `doc[i].is_sent_start`.")
<ide> E031 = ("Invalid token: empty string ('') at position {i}.")
<ide> E033 = ("Cannot load into non-empty Doc of length {length}.")
<ide> E035 = ("Error creating span with start {start} and end {end} for Doc of " | 1 |
Python | Python | extend list of abbreviations for ru language | a9756963e67cff6be5445ae441263f889c629123 | <ide><path>spacy/lang/ru/tokenizer_exceptions.py
<ide> from ...symbols import ORTH, NORM
<ide> from ...util import update_exc
<ide>
<del>
<ide> _exc = {}
<ide>
<ide> _abbrev_exc = [
<ide> {ORTH: "дек", NORM: "декабрь"},
<ide> ]
<ide>
<del>
<ide> for abbrev_desc in _abbrev_exc:
<ide> abbrev = abbrev_desc[ORTH]
<ide> for orth in (abbrev, abbrev.capitalize(), abbrev.upper()):
<ide> _exc[orth] = [{ORTH: orth, NORM: abbrev_desc[NORM]}]
<ide> _exc[orth + "."] = [{ORTH: orth + ".", NORM: abbrev_desc[NORM]}]
<ide>
<ide>
<del>_slang_exc = [
<add>for abbr in [
<add> # Year slang abbreviations
<ide> {ORTH: "2к15", NORM: "2015"},
<ide> {ORTH: "2к16", NORM: "2016"},
<ide> {ORTH: "2к17", NORM: "2017"},
<ide> {ORTH: "2к18", NORM: "2018"},
<ide> {ORTH: "2к19", NORM: "2019"},
<ide> {ORTH: "2к20", NORM: "2020"},
<del>]
<add> {ORTH: "2к21", NORM: "2021"},
<add> {ORTH: "2к22", NORM: "2022"},
<add> {ORTH: "2к23", NORM: "2023"},
<add> {ORTH: "2к24", NORM: "2024"},
<add> {ORTH: "2к25", NORM: "2025"},
<add>]:
<add> _exc[abbr[ORTH]] = [abbr]
<add>
<add>for abbr in [
<add> # Profession and academic titles abbreviations
<add> {ORTH: "ак.", NORM: "академик"},
<add> {ORTH: "акад.", NORM: "академик"},
<add> {ORTH: "д-р архитектуры", NORM: "доктор архитектуры"},
<add> {ORTH: "д-р биол. наук", NORM: "доктор биологических наук"},
<add> {ORTH: "д-р ветеринар. наук", NORM: "доктор ветеринарных наук"},
<add> {ORTH: "д-р воен. наук", NORM: "доктор военных наук"},
<add> {ORTH: "д-р геогр. наук", NORM: "доктор географических наук"},
<add> {ORTH: "д-р геол.-минерал. наук", NORM: "доктор геолого-минералогических наук"},
<add> {ORTH: "д-р искусствоведения", NORM: "доктор искусствоведения"},
<add> {ORTH: "д-р ист. наук", NORM: "доктор исторических наук"},
<add> {ORTH: "д-р культурологии", NORM: "доктор культурологии"},
<add> {ORTH: "д-р мед. наук", NORM: "доктор медицинских наук"},
<add> {ORTH: "д-р пед. наук", NORM: "доктор педагогических наук"},
<add> {ORTH: "д-р полит. наук", NORM: "доктор политических наук"},
<add> {ORTH: "д-р психол. наук", NORM: "доктор психологических наук"},
<add> {ORTH: "д-р с.-х. наук", NORM: "доктор сельскохозяйственных наук"},
<add> {ORTH: "д-р социол. наук", NORM: "доктор социологических наук"},
<add> {ORTH: "д-р техн. наук", NORM: "доктор технических наук"},
<add> {ORTH: "д-р фармацевт. наук", NORM: "доктор фармацевтических наук"},
<add> {ORTH: "д-р физ.-мат. наук", NORM: "доктор физико-математических наук"},
<add> {ORTH: "д-р филол. наук", NORM: "доктор филологических наук"},
<add> {ORTH: "д-р филос. наук", NORM: "доктор философских наук"},
<add> {ORTH: "д-р хим. наук", NORM: "доктор химических наук"},
<add> {ORTH: "д-р экон. наук", NORM: "доктор экономических наук"},
<add> {ORTH: "д-р юрид. наук", NORM: "доктор юридических наук"},
<add> {ORTH: "д-р", NORM: "доктор"},
<add> {ORTH: "д.б.н.", NORM: "доктор биологических наук"},
<add> {ORTH: "д.г.-м.н.", NORM: "доктор геолого-минералогических наук"},
<add> {ORTH: "д.г.н.", NORM: "доктор географических наук"},
<add> {ORTH: "д.и.н.", NORM: "доктор исторических наук"},
<add> {ORTH: "д.иск.", NORM: "доктор искусствоведения"},
<add> {ORTH: "д.м.н.", NORM: "доктор медицинских наук"},
<add> {ORTH: "д.п.н.", NORM: "доктор психологических наук"},
<add> {ORTH: "д.пед.н.", NORM: "доктор педагогических наук"},
<add> {ORTH: "д.полит.н.", NORM: "доктор политических наук"},
<add> {ORTH: "д.с.-х.н.", NORM: "доктор сельскохозяйственных наук"},
<add> {ORTH: "д.социол.н.", NORM: "доктор социологических наук"},
<add> {ORTH: "д.т.н.", NORM: "доктор технических наук"},
<add> {ORTH: "д.т.н", NORM: "доктор технических наук"},
<add> {ORTH: "д.ф.-м.н.", NORM: "доктор физико-математических наук"},
<add> {ORTH: "д.ф.н.", NORM: "доктор филологических наук"},
<add> {ORTH: "д.филос.н.", NORM: "доктор философских наук"},
<add> {ORTH: "д.фил.н.", NORM: "доктор филологических наук"},
<add> {ORTH: "д.х.н.", NORM: "доктор химических наук"},
<add> {ORTH: "д.э.н.", NORM: "доктор экономических наук"},
<add> {ORTH: "д.э.н", NORM: "доктор экономических наук"},
<add> {ORTH: "д.ю.н.", NORM: "доктор юридических наук"},
<add> {ORTH: "доц.", NORM: "доцент"},
<add> {ORTH: "и.о.", NORM: "исполняющий обязанности"},
<add> {ORTH: "к.б.н.", NORM: "кандидат биологических наук"},
<add> {ORTH: "к.воен.н.", NORM: "кандидат военных наук"},
<add> {ORTH: "к.г.-м.н.", NORM: "кандидат геолого-минералогических наук"},
<add> {ORTH: "к.г.н.", NORM: "кандидат географических наук"},
<add> {ORTH: "к.геогр.н", NORM: "кандидат географических наук"},
<add> {ORTH: "к.геогр.наук", NORM: "кандидат географических наук"},
<add> {ORTH: "к.и.н.", NORM: "кандидат исторических наук"},
<add> {ORTH: "к.иск.", NORM: "кандидат искусствоведения"},
<add> {ORTH: "к.м.н.", NORM: "кандидат медицинских наук"},
<add> {ORTH: "к.п.н.", NORM: "кандидат психологических наук"},
<add> {ORTH: "к.псх.н.", NORM: "кандидат психологических наук"},
<add> {ORTH: "к.пед.н.", NORM: "кандидат педагогических наук"},
<add> {ORTH: "канд.пед.наук", NORM: "кандидат педагогических наук"},
<add> {ORTH: "к.полит.н.", NORM: "кандидат политических наук"},
<add> {ORTH: "к.с.-х.н.", NORM: "кандидат сельскохозяйственных наук"},
<add> {ORTH: "к.социол.н.", NORM: "кандидат социологических наук"},
<add> {ORTH: "к.с.н.", NORM: "кандидат социологических наук"},
<add> {ORTH: "к.т.н.", NORM: "кандидат технических наук"},
<add> {ORTH: "к.ф.-м.н.", NORM: "кандидат физико-математических наук"},
<add> {ORTH: "к.ф.н.", NORM: "кандидат филологических наук"},
<add> {ORTH: "к.фил.н.", NORM: "кандидат филологических наук"},
<add> {ORTH: "к.филол.н", NORM: "кандидат филологических наук"},
<add> {ORTH: "к.фарм.наук", NORM: "кандидат фармакологических наук"},
<add> {ORTH: "к.фарм.н.", NORM: "кандидат фармакологических наук"},
<add> {ORTH: "к.фарм.н", NORM: "кандидат фармакологических наук"},
<add> {ORTH: "к.филос.наук", NORM: "кандидат философских наук"},
<add> {ORTH: "к.филос.н.", NORM: "кандидат философских наук"},
<add> {ORTH: "к.филос.н", NORM: "кандидат философских наук"},
<add> {ORTH: "к.х.н.", NORM: "кандидат химических наук"},
<add> {ORTH: "к.х.н", NORM: "кандидат химических наук"},
<add> {ORTH: "к.э.н.", NORM: "кандидат экономических наук"},
<add> {ORTH: "к.э.н", NORM: "кандидат экономических наук"},
<add> {ORTH: "к.ю.н.", NORM: "кандидат юридических наук"},
<add> {ORTH: "к.ю.н", NORM: "кандидат юридических наук"},
<add> {ORTH: "канд. архитектуры", NORM: "кандидат архитектуры"},
<add> {ORTH: "канд. биол. наук", NORM: "кандидат биологических наук"},
<add> {ORTH: "канд. ветеринар. наук", NORM: "кандидат ветеринарных наук"},
<add> {ORTH: "канд. воен. наук", NORM: "кандидат военных наук"},
<add> {ORTH: "канд. геогр. наук", NORM: "кандидат географических наук"},
<add> {ORTH: "канд. геол.-минерал. наук", NORM: "кандидат геолого-минералогических наук"},
<add> {ORTH: "канд. искусствоведения", NORM: "кандидат искусствоведения"},
<add> {ORTH: "канд. ист. наук", NORM: "кандидат исторических наук"},
<add> {ORTH: "к.ист.н.", NORM: "кандидат исторических наук"},
<add> {ORTH: "канд. культурологии", NORM: "кандидат культурологии"},
<add> {ORTH: "канд. мед. наук", NORM: "кандидат медицинских наук"},
<add> {ORTH: "канд. пед. наук", NORM: "кандидат педагогических наук"},
<add> {ORTH: "канд. полит. наук", NORM: "кандидат политических наук"},
<add> {ORTH: "канд. психол. наук", NORM: "кандидат психологических наук"},
<add> {ORTH: "канд. с.-х. наук", NORM: "кандидат сельскохозяйственных наук"},
<add> {ORTH: "канд. социол. наук", NORM: "кандидат социологических наук"},
<add> {ORTH: "к.соц.наук", NORM: "кандидат социологических наук"},
<add> {ORTH: "к.соц.н.", NORM: "кандидат социологических наук"},
<add> {ORTH: "к.соц.н", NORM: "кандидат социологических наук"},
<add> {ORTH: "канд. техн. наук", NORM: "кандидат технических наук"},
<add> {ORTH: "канд. фармацевт. наук", NORM: "кандидат фармацевтических наук"},
<add> {ORTH: "канд. физ.-мат. наук", NORM: "кандидат физико-математических наук"},
<add> {ORTH: "канд. филол. наук", NORM: "кандидат филологических наук"},
<add> {ORTH: "канд. филос. наук", NORM: "кандидат философских наук"},
<add> {ORTH: "канд. хим. наук", NORM: "кандидат химических наук"},
<add> {ORTH: "канд. экон. наук", NORM: "кандидат экономических наук"},
<add> {ORTH: "канд. юрид. наук", NORM: "кандидат юридических наук"},
<add> {ORTH: "в.н.с.", NORM: "ведущий научный сотрудник"},
<add> {ORTH: "мл. науч. сотр.", NORM: "младший научный сотрудник"},
<add> {ORTH: "м.н.с.", NORM: "младший научный сотрудник"},
<add> {ORTH: "проф.", NORM: "профессор"},
<add> {ORTH: "профессор.кафедры", NORM: "профессор кафедры"},
<add> {ORTH: "ст. науч. сотр.", NORM: "старший научный сотрудник"},
<add> {ORTH: "чл.-к.", NORM: "член корреспондент"},
<add> {ORTH: "чл.-корр.", NORM: "член-корреспондент"},
<add> {ORTH: "чл.-кор.", NORM: "член-корреспондент"},
<add> {ORTH: "дир.", NORM: "директор"},
<add> {ORTH: "зам. дир.", NORM: "заместитель директора"},
<add> {ORTH: "зав. каф.", NORM: "заведующий кафедрой"},
<add> {ORTH: "зав.кафедрой", NORM: "заведующий кафедрой"},
<add> {ORTH: "зав. кафедрой", NORM: "заведующий кафедрой"},
<add> {ORTH: "асп.", NORM: "аспирант"},
<add> {ORTH: "гл. науч. сотр.", NORM: "главный научный сотрудник"},
<add> {ORTH: "вед. науч. сотр.", NORM: "ведущий научный сотрудник"},
<add> {ORTH: "науч. сотр.", NORM: "научный сотрудник"},
<add> {ORTH: "к.м.с.", NORM: "кандидат в мастера спорта"},
<add>]:
<add> _exc[abbr[ORTH]] = [abbr]
<add>
<add>
<add>for abbr in [
<add> # Literary phrases abbreviations
<add> {ORTH: "и т.д.", NORM: "и так далее"},
<add> {ORTH: "и т.п.", NORM: "и тому подобное"},
<add> {ORTH: "т.д.", NORM: "так далее"},
<add> {ORTH: "т.п.", NORM: "тому подобное"},
<add> {ORTH: "т.е.", NORM: "то есть"},
<add> {ORTH: "т.к.", NORM: "так как"},
<add> {ORTH: "в т.ч.", NORM: "в том числе"},
<add> {ORTH: "и пр.", NORM: "и прочие"},
<add> {ORTH: "и др.", NORM: "и другие"},
<add> {ORTH: "т.н.", NORM: "так называемый"},
<add>]:
<add> _exc[abbr[ORTH]] = [abbr]
<add>
<add>
<add>for abbr in [
<add> # Appeal to a person abbreviations
<add> {ORTH: "г-н", NORM: "господин"},
<add> {ORTH: "г-да", NORM: "господа"},
<add> {ORTH: "г-жа", NORM: "госпожа"},
<add> {ORTH: "тов.", NORM: "товарищ"},
<add>]:
<add> _exc[abbr[ORTH]] = [abbr]
<add>
<add>
<add>for abbr in [
<add> # Time periods abbreviations
<add> {ORTH: "до н.э.", NORM: "до нашей эры"},
<add> {ORTH: "по н.в.", NORM: "по настоящее время"},
<add> {ORTH: "в н.в.", NORM: "в настоящее время"},
<add> {ORTH: "наст.", NORM: "настоящий"},
<add> {ORTH: "наст. время", NORM: "настоящее время"},
<add> {ORTH: "г.г.", NORM: "годы"},
<add> {ORTH: "гг.", NORM: "годы"},
<add> {ORTH: "т.г.", NORM: "текущий год"},
<add>]:
<add> _exc[abbr[ORTH]] = [abbr]
<add>
<add>
<add>for abbr in [
<add> # Address forming elements abbreviations
<add> {ORTH: "респ.", NORM: "республика"},
<add> {ORTH: "обл.", NORM: "область"},
<add> {ORTH: "г.ф.з.", NORM: "город федерального значения"},
<add> {ORTH: "а.обл.", NORM: "автономная область"},
<add> {ORTH: "а.окр.", NORM: "автономный округ"},
<add> {ORTH: "м.р-н", NORM: "муниципальный район"},
<add> {ORTH: "г.о.", NORM: "городской округ"},
<add> {ORTH: "г.п.", NORM: "городское поселение"},
<add> {ORTH: "с.п.", NORM: "сельское поселение"},
<add> {ORTH: "вн.р-н", NORM: "внутригородской район"},
<add> {ORTH: "вн.тер.г.", NORM: "внутригородская территория города"},
<add> {ORTH: "пос.", NORM: "поселение"},
<add> {ORTH: "р-н", NORM: "район"},
<add> {ORTH: "с/с", NORM: "сельсовет"},
<add> {ORTH: "г.", NORM: "город"},
<add> {ORTH: "п.г.т.", NORM: "поселок городского типа"},
<add> {ORTH: "пгт.", NORM: "поселок городского типа"},
<add> {ORTH: "р.п.", NORM: "рабочий поселок"},
<add> {ORTH: "рп.", NORM: "рабочий поселок"},
<add> {ORTH: "кп.", NORM: "курортный поселок"},
<add> {ORTH: "гп.", NORM: "городской поселок"},
<add> {ORTH: "п.", NORM: "поселок"},
<add> {ORTH: "в-ки", NORM: "выселки"},
<add> {ORTH: "г-к", NORM: "городок"},
<add> {ORTH: "з-ка", NORM: "заимка"},
<add> {ORTH: "п-к", NORM: "починок"},
<add> {ORTH: "киш.", NORM: "кишлак"},
<add> {ORTH: "п. ст. ", NORM: "поселок станция"},
<add> {ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"},
<add> {ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"},
<add> {ORTH: "ж/д б-ка", NORM: "железнодорожная будка"},
<add> {ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"},
<add> {ORTH: "ж/д к-ма", NORM: "железнодорожная казарма"},
<add> {ORTH: "ж/д к-т", NORM: "железнодорожный комбинат"},
<add> {ORTH: "ж/д пл-ма", NORM: "железнодорожная платформа"},
<add> {ORTH: "ж/д пл-ка", NORM: "железнодорожная площадка"},
<add> {ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"},
<add> {ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"},
<add> {ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"},
<add> {ORTH: "ж/д ст. ", NORM: "железнодорожная станция"},
<add> {ORTH: "м-ко", NORM: "местечко"},
<add> {ORTH: "д.", NORM: "деревня"},
<add> {ORTH: "с.", NORM: "село"},
<add> {ORTH: "сл.", NORM: "слобода"},
<add> {ORTH: "ст. ", NORM: "станция"},
<add> {ORTH: "ст-ца", NORM: "станица"},
<add> {ORTH: "у.", NORM: "улус"},
<add> {ORTH: "х.", NORM: "хутор"},
<add> {ORTH: "рзд.", NORM: "разъезд"},
<add> {ORTH: "зим.", NORM: "зимовье"},
<add> {ORTH: "б-г", NORM: "берег"},
<add> {ORTH: "ж/р", NORM: "жилой район"},
<add> {ORTH: "кв-л", NORM: "квартал"},
<add> {ORTH: "мкр.", NORM: "микрорайон"},
<add> {ORTH: "ост-в", NORM: "остров"},
<add> {ORTH: "платф.", NORM: "платформа"},
<add> {ORTH: "п/р", NORM: "промышленный район"},
<add> {ORTH: "р-н", NORM: "район"},
<add> {ORTH: "тер.", NORM: "территория"},
<add> {
<add> ORTH: "тер. СНО",
<add> NORM: "территория садоводческих некоммерческих объединений граждан",
<add> },
<add> {
<add> ORTH: "тер. ОНО",
<add> NORM: "территория огороднических некоммерческих объединений граждан",
<add> },
<add> {ORTH: "тер. ДНО", NORM: "территория дачных некоммерческих объединений граждан"},
<add> {ORTH: "тер. СНТ", NORM: "территория садоводческих некоммерческих товариществ"},
<add> {ORTH: "тер. ОНТ", NORM: "территория огороднических некоммерческих товариществ"},
<add> {ORTH: "тер. ДНТ", NORM: "территория дачных некоммерческих товариществ"},
<add> {ORTH: "тер. СПК", NORM: "территория садоводческих потребительских кооперативов"},
<add> {ORTH: "тер. ОПК", NORM: "территория огороднических потребительских кооперативов"},
<add> {ORTH: "тер. ДПК", NORM: "территория дачных потребительских кооперативов"},
<add> {ORTH: "тер. СНП", NORM: "территория садоводческих некоммерческих партнерств"},
<add> {ORTH: "тер. ОНП", NORM: "территория огороднических некоммерческих партнерств"},
<add> {ORTH: "тер. ДНП", NORM: "территория дачных некоммерческих партнерств"},
<add> {ORTH: "тер. ТСН", NORM: "территория товарищества собственников недвижимости"},
<add> {ORTH: "тер. ГСК", NORM: "территория гаражно-строительного кооператива"},
<add> {ORTH: "ус.", NORM: "усадьба"},
<add> {ORTH: "тер.ф.х.", NORM: "территория фермерского хозяйства"},
<add> {ORTH: "ю.", NORM: "юрты"},
<add> {ORTH: "ал.", NORM: "аллея"},
<add> {ORTH: "б-р", NORM: "бульвар"},
<add> {ORTH: "взв.", NORM: "взвоз"},
<add> {ORTH: "взд.", NORM: "въезд"},
<add> {ORTH: "дор.", NORM: "дорога"},
<add> {ORTH: "ззд.", NORM: "заезд"},
<add> {ORTH: "км", NORM: "километр"},
<add> {ORTH: "к-цо", NORM: "кольцо"},
<add> {ORTH: "лн.", NORM: "линия"},
<add> {ORTH: "мгстр.", NORM: "магистраль"},
<add> {ORTH: "наб.", NORM: "набережная"},
<add> {ORTH: "пер-д", NORM: "переезд"},
<add> {ORTH: "пер.", NORM: "переулок"},
<add> {ORTH: "пл-ка", NORM: "площадка"},
<add> {ORTH: "пл.", NORM: "площадь"},
<add> {ORTH: "пр-д", NORM: "проезд"},
<add> {ORTH: "пр-к", NORM: "просек"},
<add> {ORTH: "пр-ка", NORM: "просека"},
<add> {ORTH: "пр-лок", NORM: "проселок"},
<add> {ORTH: "пр-кт", NORM: "проспект"},
<add> {ORTH: "проул.", NORM: "проулок"},
<add> {ORTH: "рзд.", NORM: "разъезд"},
<add> {ORTH: "ряд", NORM: "ряд(ы)"},
<add> {ORTH: "с-р", NORM: "сквер"},
<add> {ORTH: "с-к", NORM: "спуск"},
<add> {ORTH: "сзд.", NORM: "съезд"},
<add> {ORTH: "туп.", NORM: "тупик"},
<add> {ORTH: "ул.", NORM: "улица"},
<add> {ORTH: "ш.", NORM: "шоссе"},
<add> {ORTH: "влд.", NORM: "владение"},
<add> {ORTH: "г-ж", NORM: "гараж"},
<add> {ORTH: "д.", NORM: "дом"},
<add> {ORTH: "двлд.", NORM: "домовладение"},
<add> {ORTH: "зд.", NORM: "здание"},
<add> {ORTH: "з/у", NORM: "земельный участок"},
<add> {ORTH: "кв.", NORM: "квартира"},
<add> {ORTH: "ком.", NORM: "комната"},
<add> {ORTH: "подв.", NORM: "подвал"},
<add> {ORTH: "кот.", NORM: "котельная"},
<add> {ORTH: "п-б", NORM: "погреб"},
<add> {ORTH: "к.", NORM: "корпус"},
<add> {ORTH: "ОНС", NORM: "объект незавершенного строительства"},
<add> {ORTH: "оф.", NORM: "офис"},
<add> {ORTH: "пав.", NORM: "павильон"},
<add> {ORTH: "помещ.", NORM: "помещение"},
<add> {ORTH: "раб.уч.", NORM: "рабочий участок"},
<add> {ORTH: "скл.", NORM: "склад"},
<add> {ORTH: "coop.", NORM: "сооружение"},
<add> {ORTH: "стр.", NORM: "строение"},
<add> {ORTH: "торг.зал", NORM: "торговый зал"},
<add> {ORTH: "а/п", NORM: "аэропорт"},
<add> {ORTH: "им.", NORM: "имени"},
<add>]:
<add> _exc[abbr[ORTH]] = [abbr]
<add>
<ide>
<del>for slang_desc in _slang_exc:
<del> _exc[slang_desc[ORTH]] = [slang_desc]
<add>for abbr in [
<add> # Others abbreviations
<add> {ORTH: "тыс.руб.", NORM: "тысяч рублей"},
<add> {ORTH: "тыс.", NORM: "тысяч"},
<add> {ORTH: "руб.", NORM: "рубль"},
<add> {ORTH: "долл.", NORM: "доллар"},
<add> {ORTH: "прим.", NORM: "примечание"},
<add> {ORTH: "прим.ред.", NORM: "примечание редакции"},
<add> {ORTH: "см. также", NORM: "смотри также"},
<add> {ORTH: "кв.м.", NORM: "квадрантный метр"},
<add> {ORTH: "м2", NORM: "квадрантный метр"},
<add> {ORTH: "б/у", NORM: "бывший в употреблении"},
<add> {ORTH: "сокр.", NORM: "сокращение"},
<add> {ORTH: "чел.", NORM: "человек"},
<add> {ORTH: "б.п.", NORM: "базисный пункт"},
<add>]:
<add> _exc[abbr[ORTH]] = [abbr]
<ide>
<ide>
<ide> TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc) | 1 |
Javascript | Javascript | prefix private modules with `_` | 2933b887cd425061192489dcef81816644957f4d | <ide><path>Libraries/EventEmitter/RCTDeviceEventEmitter.js
<ide> 'use strict';
<ide>
<ide> import EventEmitter from '../vendor/emitter/EventEmitter';
<del>import type EmitterSubscription from '../vendor/emitter/EmitterSubscription';
<del>import EventSubscriptionVendor from '../vendor/emitter/EventSubscriptionVendor';
<add>import type EmitterSubscription from '../vendor/emitter/_EmitterSubscription';
<add>import EventSubscriptionVendor from '../vendor/emitter/_EventSubscriptionVendor';
<ide>
<ide> function checkNativeEventModule(eventType: ?string) {
<ide> if (eventType) {
<add><path>Libraries/vendor/emitter/_EmitterSubscription.js
<del><path>Libraries/vendor/emitter/EmitterSubscription.js
<ide>
<ide> 'use strict';
<ide>
<del>const EventSubscription = require('./EventSubscription');
<del>
<ide> import type EventEmitter from './EventEmitter';
<del>import type EventSubscriptionVendor from './EventSubscriptionVendor';
<add>import EventSubscription from './_EventSubscription';
<add>import type EventSubscriptionVendor from './_EventSubscriptionVendor';
<ide>
<ide> /**
<ide> * EmitterSubscription represents a subscription with listener and context data.
<ide><path>Libraries/vendor/emitter/_EventEmitter.js
<ide>
<ide> const invariant = require('invariant');
<ide>
<del>import EmitterSubscription from './EmitterSubscription';
<del>import EventSubscriptionVendor from './EventSubscriptionVendor';
<add>import EmitterSubscription from './_EmitterSubscription';
<add>import EventSubscriptionVendor from './_EventSubscriptionVendor';
<ide>
<ide> const sparseFilterPredicate = () => true;
<ide>
<add><path>Libraries/vendor/emitter/_EventSubscription.js
<del><path>Libraries/vendor/emitter/EventSubscription.js
<ide>
<ide> 'use strict';
<ide>
<del>import type EventSubscriptionVendor from './EventSubscriptionVendor';
<add>import type EventSubscriptionVendor from './_EventSubscriptionVendor';
<ide>
<ide> /**
<ide> * EventSubscription represents a subscription to a particular event. It can
<add><path>Libraries/vendor/emitter/_EventSubscriptionVendor.js
<del><path>Libraries/vendor/emitter/EventSubscriptionVendor.js
<ide>
<ide> const invariant = require('invariant');
<ide>
<del>import type EventSubscription from './EventSubscription';
<add>import type EventSubscription from './_EventSubscription';
<ide>
<ide> /**
<ide> * EventSubscriptionVendor stores a set of EventSubscriptions that are | 5 |
Javascript | Javascript | use abstractmethoderror all over the project | 7a3d9e38de749a1803dbda8d6ba5bd2b04ee6f80 | <ide><path>lib/DependencyTemplate.js
<ide>
<ide> "use strict";
<ide>
<add>const AbstractMethodError = require("./AbstractMethodError");
<add>
<ide> /** @typedef {import("webpack-sources").ReplaceSource} ReplaceSource */
<ide> /** @typedef {import("./ChunkGraph")} ChunkGraph */
<ide> /** @typedef {import("./Dependency")} Dependency */
<ide> class DependencyTemplate {
<ide> * @returns {void}
<ide> */
<ide> apply(dependency, source, templateContext) {
<del> throw new Error("DependencyTemplate.apply must be overriden");
<add> throw new AbstractMethodError();
<ide> }
<ide> }
<ide>
<ide><path>lib/Generator.js
<ide>
<ide> "use strict";
<ide>
<add>const AbstractMethodError = require("./AbstractMethodError");
<add>
<ide> /** @typedef {import("webpack-sources").Source} Source */
<ide> /** @typedef {import("./ChunkGraph")} ChunkGraph */
<ide> /** @typedef {import("./DependencyTemplate")} DependencyTemplate */
<ide> class Generator {
<ide> * @returns {Set<string>} available types (do not mutate)
<ide> */
<ide> getTypes() {
<del> throw new Error("Generator.getTypes: must be overridden");
<add> throw new AbstractMethodError();
<ide> }
<ide>
<ide> /**
<ide> class Generator {
<ide> * @returns {number} estimate size of the module
<ide> */
<ide> getSize(module, type) {
<del> throw new Error("Generator.getSize: must be overridden");
<add> throw new AbstractMethodError();
<ide> }
<ide>
<ide> /**
<ide> class Generator {
<ide> module,
<ide> { dependencyTemplates, runtimeTemplate, moduleGraph, type }
<ide> ) {
<del> throw new Error("Generator.generate: must be overridden");
<add> throw new AbstractMethodError();
<ide> }
<ide> }
<ide>
<ide><path>lib/Module.js
<ide>
<ide> "use strict";
<ide>
<add>const AbstractMethodError = require("./AbstractMethodError");
<ide> const ChunkGraph = require("./ChunkGraph");
<ide> const DependenciesBlock = require("./DependenciesBlock");
<ide> const ModuleGraph = require("./ModuleGraph");
<ide> class Module extends DependenciesBlock {
<ide> * @returns {string} a unique identifier of the module
<ide> */
<ide> identifier() {
<del> throw new Error("Module.identifier: Must be overriden");
<add> throw new AbstractMethodError();
<ide> }
<ide>
<ide> /**
<ide> class Module extends DependenciesBlock {
<ide> * @returns {string} a user readable identifier of the module
<ide> */
<ide> readableIdentifier(requestShortener) {
<del> throw new Error("Module.readableIdentifier: Must be overriden");
<add> throw new AbstractMethodError();
<ide> }
<ide>
<ide> /**
<ide> class Module extends DependenciesBlock {
<ide> * @returns {void}
<ide> */
<ide> build(options, compilation, resolver, fs, callback) {
<del> throw new Error("Module.build: Must be overriden");
<add> throw new AbstractMethodError();
<ide> }
<ide>
<ide> /**
<ide> class Module extends DependenciesBlock {
<ide> */
<ide> source(sourceContext) {
<ide> if (this.codeGeneration === Module.prototype.codeGeneration) {
<del> throw new Error("Module.source: Must be overriden");
<add> throw new AbstractMethodError();
<ide> }
<ide> const sources = this.codeGeneration(sourceContext).sources;
<ide> return sourceContext.type
<ide> class Module extends DependenciesBlock {
<ide> * @returns {number} the estimated size of the module (must be non-zero)
<ide> */
<ide> size(type) {
<del> throw new Error("Module.size: Must be overriden");
<add> throw new AbstractMethodError();
<ide> }
<ide>
<ide> /**
<ide><path>lib/ModuleFactory.js
<ide>
<ide> "use strict";
<ide>
<add>const AbstractMethodError = require("./AbstractMethodError");
<add>
<ide> /** @typedef {import("./Dependency")} Dependency */
<ide> /** @typedef {import("./Module")} Module */
<ide>
<ide> class ModuleFactory {
<ide> * @returns {void}
<ide> */
<ide> create(data, callback) {
<del> throw new Error("ModuleFactory.create must be overridden");
<add> throw new AbstractMethodError();
<ide> }
<ide> }
<ide>
<ide><path>lib/RuntimeModule.js
<ide> "use strict";
<ide>
<ide> const OriginalSource = require("webpack-sources").OriginalSource;
<add>const AbstractMethodError = require("./AbstractMethodError");
<ide> const Module = require("./Module");
<ide>
<ide> /** @typedef {import("webpack-sources").Source} Source */
<ide> class RuntimeModule extends Module {
<ide> * @returns {string} runtime code
<ide> */
<ide> generate() {
<del> throw new Error(
<del> `RuntimeModule: generate() must be overriden in subclass ${this.name}`
<del> );
<add> throw new AbstractMethodError();
<ide> }
<ide>
<ide> /** | 5 |
Go | Go | remove use of iota for consts | 37dc2582d10ec6dfb7597e904f7cff57b6731edc | <ide><path>registry/types.go
<ide> func (av APIVersion) String() string {
<ide>
<ide> // API Version identifiers.
<ide> const (
<del> _ = iota
<del> APIVersion1 APIVersion = iota
<del> APIVersion2
<add> APIVersion1 APIVersion = 1
<add> APIVersion2 APIVersion = 2
<ide> )
<ide>
<ide> var apiVersions = map[APIVersion]string{ | 1 |
PHP | PHP | remove trailing whitespace | 8fc5726920374f40afa773c811eca0c2bc5f0a12 | <ide><path>lib/Cake/Model/Behavior/TranslateBehavior.php
<ide> public function afterSave(Model $model, $created) {
<ide> unset($this->runtime[$model->alias]['beforeValidate'], $this->runtime[$model->alias]['beforeSave']);
<ide> $conditions = array('model' => $model->alias, 'foreign_key' => $model->id);
<ide> $RuntimeModel = $this->translateModel($model);
<del>
<add>
<ide> $fields = array_merge($this->settings[$model->alias], $this->runtime[$model->alias]['fields']);
<ide> if ($created) {
<ide> foreach ($fields as $field) { | 1 |
PHP | PHP | close the cursor after checking the count | 711c47b35375ba38edd1bdcb31417b9bd1a0240b | <ide><path>tests/TestCase/Database/QueryTest.php
<ide> public function testUnionOrderBy()
<ide> ->union($union)
<ide> ->execute();
<ide> $this->assertCount(self::COMMENT_COUNT + self::ARTICLE_COUNT, $result);
<add>
<add> $rows = $result->fetchAll();
<add> $this->assertCount(self::COMMENT_COUNT + self::ARTICLE_COUNT, $result);
<ide> }
<ide>
<ide> /** | 1 |
Go | Go | fix race in access to nodes len | 03088ace1b625de68404367039865533812ca37f | <ide><path>libnetwork/networkdb/cluster.go
<ide> func (nDB *NetworkDB) clusterInit() error {
<ide>
<ide> nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
<ide> NumNodes: func() int {
<del> return len(nDB.nodes)
<add> nDB.RLock()
<add> num := len(nDB.nodes)
<add> nDB.RUnlock()
<add> return num
<ide> },
<ide> RetransmitMult: config.RetransmitMult,
<ide> }
<ide>
<ide> nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
<ide> NumNodes: func() int {
<del> return len(nDB.nodes)
<add> nDB.RLock()
<add> num := len(nDB.nodes)
<add> nDB.RUnlock()
<add> return num
<ide> },
<ide> RetransmitMult: config.RetransmitMult,
<ide> } | 1 |
Python | Python | update visualize_util.py to fix | 2431764fed27199fe6a0751f44312a0b0c04f889 | <ide><path>keras/utils/visualize_util.py
<ide> import pydot
<ide> # old pydot will not work with python3, must use one
<ide> # that works with python3 such as pydot2 or pydot
<del>
<add>from keras.models import Sequential, Graph
<ide>
<ide> def plot(model, to_file='model.png'):
<ide> | 1 |
Text | Text | combine performance docs | 7c66b2f28cf871e1850ea9930759acada679fec7 | <ide><path>docs/SUMMARY.md
<ide> * [Options](general/options.md)
<ide> * [Colors](general/colors.md)
<ide> * [Fonts](general/fonts.md)
<add> * [Performance](general/performance.md)
<ide> * [Configuration](configuration/README.md)
<ide> * [Animations](configuration/animations.md)
<ide> * [Layout](configuration/layout.md)
<ide><path>docs/charts/line.md
<ide> var stackedLine = new Chart(ctx, {
<ide> }
<ide> });
<ide> ```
<del>
<del>## High Performance Line Charts
<del>
<del>When charting a lot of data, the chart render time may start to get quite large. In that case, the following strategies can be used to improve performance.
<del>
<del>### Data Decimation
<del>
<del>Decimating your data will achieve the best results. When there is a lot of data to display on the graph, it doesn't make sense to show tens of thousands of data points on a graph that is only a few hundred pixels wide.
<del>
<del>There are many approaches to data decimation and selection of an algorithm will depend on your data and the results you want to achieve. For instance, [min/max](https://digital.ni.com/public.nsf/allkb/F694FFEEA0ACF282862576020075F784) decimation will preserve peaks in your data but could require up to 4 points for each pixel. This type of decimation would work well for a very noisy signal where you need to see data peaks.
<del>
<del>### Disable Bezier Curves
<del>
<del>If you are drawing lines on your chart, disabling bezier curves will improve render times since drawing a straight line is more performant than a bezier curve.
<del>
<del>To disable bezier curves for an entire chart:
<del>
<del>```javascript
<del>new Chart(ctx, {
<del> type: 'line',
<del> data: data,
<del> options: {
<del> elements: {
<del> line: {
<del> tension: 0 // disables bezier curves
<del> }
<del> }
<del> }
<del>});
<del>```
<del>
<del>### Disable Line Drawing
<del>
<del>If you have a lot of data points, it can be more performant to disable rendering of the line for a dataset and only draw points. Doing this means that there is less to draw on the canvas which will improve render performance.
<del>
<del>To disable lines:
<del>
<del>```javascript
<del>new Chart(ctx, {
<del> type: 'line',
<del> data: {
<del> datasets: [{
<del> showLine: false // disable for a single dataset
<del> }]
<del> },
<del> options: {
<del> showLines: false // disable for all datasets
<del> }
<del>});
<del>```
<del>
<del>### Disable Animations
<del>
<del>If your charts have long render times, it is a good idea to disable animations. Doing so will mean that the chart needs to only be rendered once during an update instead of multiple times. This will have the effect of reducing CPU usage and improving general page performance.
<del>
<del>To disable animations
<del>
<del>```javascript
<del>new Chart(ctx, {
<del> type: 'line',
<del> data: data,
<del> options: {
<del> animation: {
<del> duration: 0 // general animation time
<del> },
<del> hover: {
<del> animationDuration: 0 // duration of animations when hovering an item
<del> },
<del> responsiveAnimationDuration: 0 // animation duration after a resize
<del> }
<del>});
<del>```
<ide><path>docs/general/performance.md
<ide> # Performance
<ide>
<del>Chart.js charts are rendered on `canvas` elements, which makes rendering quite fast. For large datasets or performance sensitive applications, you may wish to consider the tips below:
<add>Chart.js charts are rendered on `canvas` elements, which makes rendering quite fast. For large datasets or performance sensitive applications, you may wish to consider the tips below.
<ide>
<del>* Set `animation: { duration: 0 }` to disable [animations](../configuration/animations.md).
<del>* [Specify a rotation value](https://www.chartjs.org/docs/latest/axes/cartesian/#tick-configuration) by setting `minRotation` and `maxRotation` to the same value
<del>* For large datasets:
<del> * You may wish to sample your data before providing it to Chart.js. E.g. if you have a data point for each day, you may find it more performant to pass in a data point for each week instead
<del> * Set the [`ticks.sampleSize`](../axes/cartesian/README.md#tick-configuration) option in order to render axes more quickly
<add>## Tick Calculation
<add>
<add>### Rotation
<add>
<add>[Specify a rotation value](https://www.chartjs.org/docs/latest/axes/cartesian/#tick-configuration) by setting `minRotation` and `maxRotation` to the same value, which avoids the chart from having to automatically determine a value to use.
<add>
<add>### Sampling
<add>
<add>Set the [`ticks.sampleSize`](../axes/cartesian/README.md#tick-configuration) option. This will determine how large your labels are by looking at only a subset of them in order to render axes more quickly. This works best if there is not a large variance in the size of your labels.
<add>
<add>## Disable Animations
<add>
<add>If your charts have long render times, it is a good idea to disable animations. Doing so will mean that the chart needs to only be rendered once during an update instead of multiple times. This will have the effect of reducing CPU usage and improving general page performance.
<add>
<add>To disable animations
<add>
<add>```javascript
<add>new Chart(ctx, {
<add> type: 'line',
<add> data: data,
<add> options: {
<add> animation: {
<add> duration: 0 // general animation time
<add> },
<add> hover: {
<add> animationDuration: 0 // duration of animations when hovering an item
<add> },
<add> responsiveAnimationDuration: 0 // animation duration after a resize
<add> }
<add>});
<add>```
<add>
<add>## Data Decimation
<add>
<add>Decimating your data will achieve the best results. When there is a lot of data to display on the graph, it doesn't make sense to show tens of thousands of data points on a graph that is only a few hundred pixels wide.
<add>
<add>There are many approaches to data decimation and selection of an algorithm will depend on your data and the results you want to achieve. For instance, [min/max](https://digital.ni.com/public.nsf/allkb/F694FFEEA0ACF282862576020075F784) decimation will preserve peaks in your data but could require up to 4 points for each pixel. This type of decimation would work well for a very noisy signal where you need to see data peaks.
<add>
<add>
<add>## Line Charts
<add>
<add>### Disable Bezier Curves
<add>
<add>If you are drawing lines on your chart, disabling bezier curves will improve render times since drawing a straight line is more performant than a bezier curve.
<add>
<add>To disable bezier curves for an entire chart:
<add>
<add>```javascript
<add>new Chart(ctx, {
<add> type: 'line',
<add> data: data,
<add> options: {
<add> elements: {
<add> line: {
<add> tension: 0 // disables bezier curves
<add> }
<add> }
<add> }
<add>});
<add>```
<add>
<add>### Disable Line Drawing
<add>
<add>If you have a lot of data points, it can be more performant to disable rendering of the line for a dataset and only draw points. Doing this means that there is less to draw on the canvas which will improve render performance.
<add>
<add>To disable lines:
<add>
<add>```javascript
<add>new Chart(ctx, {
<add> type: 'line',
<add> data: {
<add> datasets: [{
<add> showLine: false // disable for a single dataset
<add> }]
<add> },
<add> options: {
<add> showLines: false // disable for all datasets
<add> }
<add>});
<add>``` | 3 |
Javascript | Javascript | remove $flowfixme on view | f40a04ae3a89f0cc05b99b22ba5dde8cf070250e | <ide><path>Libraries/Components/View/ViewNativeComponent.js
<ide> const requireNativeComponent = require('requireNativeComponent');
<ide>
<ide> import type {ViewProps} from 'ViewPropTypes';
<ide>
<del>/* $FlowFixMe(>=0.89.0 site=react_native_fb) This comment suppresses an error
<del> * found when Flow v0.89 was deployed. To see the error, delete this comment
<del> * and run Flow. */
<ide> type ViewNativeComponentType = Class<ReactNative.NativeComponent<ViewProps>>;
<ide>
<ide> const NativeViewComponent = requireNativeComponent('RCTView'); | 1 |
Python | Python | use r-string to avoid raising python 3 syntaxerror | 65abca9a34f3fd6c48a24d6c5d1340cd080dfb92 | <ide><path>research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py
<ide> def worker(j):
<ide> t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1))
<ide>
<ide> # Action to take.
<del> action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', '$\Uparrow$ ']
<add> action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', r'$\Uparrow$ ']
<ide> t = ax.text(0.99, 0.99, action_latex[actions[step_number]],
<ide> horizontalalignment='right',
<ide> verticalalignment='top', | 1 |
Python | Python | fix tests after adding r4 instances | b5d6854da0949428073ba79c0ba57fbd8490eb72 | <ide><path>libcloud/test/compute/test_ec2.py
<ide> def test_list_sizes(self):
<ide> self.assertTrue('m2.4xlarge' in ids)
<ide>
<ide> if region_name == 'us-east-1':
<del> self.assertEqual(len(sizes), 55)
<add> self.assertEqual(len(sizes), 61)
<ide> self.assertTrue('cg1.4xlarge' in ids)
<ide> self.assertTrue('cc2.8xlarge' in ids)
<ide> self.assertTrue('cr1.8xlarge' in ids)
<ide> self.assertTrue('x1.32xlarge' in ids)
<ide> elif region_name == 'us-west-1':
<del> self.assertEqual(len(sizes), 46)
<add> self.assertEqual(len(sizes), 52)
<ide> if region_name == 'us-west-2':
<del> self.assertEqual(len(sizes), 53)
<add> self.assertEqual(len(sizes), 59)
<ide> elif region_name == 'ap-southeast-1':
<del> self.assertEqual(len(sizes), 45)
<add> self.assertEqual(len(sizes), 51)
<ide> elif region_name == 'ap-southeast-2':
<del> self.assertEqual(len(sizes), 49)
<add> self.assertEqual(len(sizes), 55)
<ide> elif region_name == 'eu-west-1':
<del> self.assertEqual(len(sizes), 53)
<add> self.assertEqual(len(sizes), 59)
<ide> elif region_name == 'ap-south-1':
<del> self.assertEqual(len(sizes), 29)
<add> self.assertEqual(len(sizes), 35)
<ide>
<ide> self.driver.region_name = region_old
<ide> | 1 |
Javascript | Javascript | fix missing param in benchmark-timers | a27e443df3ae17e9110df043ca2a54d4bd8686f0 | <ide><path>test/parallel/test-benchmark-timers.js
<ide> const runBenchmark = require('../common/benchmark');
<ide> runBenchmark('timers',
<ide> [
<ide> 'type=depth',
<add> 'n=1',
<ide> 'millions=0.000001',
<ide> 'thousands=0.001'
<ide> ], | 1 |
Javascript | Javascript | remove unused variable | a43b6fec454e1f00c326902349b26e5710c3d82e | <ide><path>src/renderers/dom/client/wrappers/__tests__/ReactDOMOption-test.js
<ide> describe('ReactDOMOption', function() {
<ide> spyOn(console, 'error');
<ide> stub = ReactTestUtils.renderIntoDocument(stub);
<ide>
<del> var node = React.findDOMNode(stub);
<del>
<ide> expect(console.error.calls.length).toBe(1);
<ide> expect(console.error.calls[0].args[0]).toContain(
<ide> 'Only strings and numbers are supported as <option> children.' | 1 |
Javascript | Javascript | remove tests placeholders | 8fe1a10ae143ba8cf09f590fe9af67918e663d99 | <ide><path>test/Connector.spec.js
<del>// import expect from 'expect';
<del>// import { Connector } from '../src';
<del>
<del>describe('Components', () => {
<del> describe('Connector', () => {
<del>
<del> it('should initialize');
<del> });
<del>});
<ide><path>test/Provider.spec.js
<del>// import expect from 'expect';
<del>// import { Provider } from '../src';
<del>
<del>describe('Components', () => {
<del> describe('Provider', () => {
<del>
<del> it('should initialize');
<del> });
<del>});
<ide><path>test/connect.spec.js
<del>// import expect from 'expect';
<del>// import { connect } from '../src';
<del>
<del>describe('Decorators', () => {
<del> describe('connect', () => {
<del>
<del> it('should return decorated component');
<del> });
<del>});
<ide><path>test/exports.spec.js
<del>import expect from 'expect';
<del>import * as redux from '../src';
<del>
<del>describe('Redux', () => {
<del>
<del> it('should export necessary components', () => {
<del> const imports = Object.keys(redux);
<del> expect(imports.length).toBe(5);
<del>
<del> expect(imports).toContain('createRedux');
<del> expect(imports).toContain('createDispatcher');
<del>
<del> expect(imports).toContain('compose');
<del> expect(imports).toContain('composeStores');
<del> expect(imports).toContain('bindActionCreators');
<del> });
<del>
<del>});
<ide><path>test/provide.spec.js
<del>// import expect from 'expect';
<del>// import { provide } from '../src';
<del>
<del>describe('Decorators', () => {
<del> describe('provide', () => {
<del>
<del> it('should return decorated component');
<del> });
<del>}); | 5 |
Javascript | Javascript | fix error message check after v8 update | 5746769d682aa91494b66de6d685fcb1257e4b80 | <ide><path>lib/internal/modules/cjs/loader.js
<ide> function enrichCJSError(err) {
<ide> usage. However, some cases are not matching, cases like import statement
<ide> after a comment block and/or after a variable definition.
<ide> */
<del> if (err.message.startsWith('Unexpected token export') ||
<add> if (err.message.startsWith('Unexpected token \'export\'') ||
<ide> (/^\s*import(?=[ {'"*])\s*(?![ (])/).test(lineWithErr)) {
<ide> process.emitWarning(
<ide> 'To load an ES module, set "type": "module" in the package.json or use ' +
<ide><path>test/parallel/test-v8-flags.js
<ide> assert(vm.runInThisContext('%_IsSmi(43)'));
<ide>
<ide> v8.setFlagsFromString('--noallow_natives_syntax');
<ide> assert.throws(function() { eval('%_IsSmi(44)'); },
<del> /^SyntaxError: Unexpected token %$/);
<add> /^SyntaxError: Unexpected token '%'$/);
<ide> assert.throws(function() { vm.runInThisContext('%_IsSmi(45)'); },
<del> /^SyntaxError: Unexpected token %$/);
<add> /^SyntaxError: Unexpected token '%'$/);
<ide><path>test/parallel/test-vm-basic.js
<ide> const vm = require('vm');
<ide> );
<ide> }, {
<ide> type: SyntaxError,
<del> message: 'Unexpected token }'
<add> message: "Unexpected token '}'"
<ide> });
<ide>
<ide> // Tests for failed argument validation | 3 |
PHP | PHP | remove the upgrade shell | 756eb3c7c6f3545dcd3e1b085c200beb41a39451 | <ide><path>src/Console/Command/UpgradeShell.php
<del><?php
<del>/**
<del> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<del> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<del> *
<del> * Licensed under The MIT License
<del> * For full copyright and license information, please see the LICENSE.txt
<del> * Redistributions of files must retain the above copyright notice.
<del> *
<del> * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<del> * @link http://cakephp.org CakePHP(tm) Project
<del> * @since CakePHP(tm) v 2.0
<del> * @license http://www.opensource.org/licenses/mit-license.php MIT License
<del> */
<del>namespace Cake\Console\Command;
<del>
<del>use Cake\Console\Shell;
<del>use Cake\Core\App;
<del>use Cake\Core\Plugin;
<del>use Cake\Utility\Folder;
<del>use Cake\Utility\Inflector;
<del>
<del>/**
<del> * A shell class to help developers upgrade applications to CakePHP 3.0
<del> *
<del> */
<del>class UpgradeShell extends Shell {
<del>
<del>/**
<del> * Files
<del> *
<del> * @var array
<del> */
<del> protected $_files = [];
<del>
<del>/**
<del> * Paths
<del> *
<del> * @var array
<del> */
<del> protected $_paths = [];
<del>
<del>/**
<del> * Shell startup, prints info message about dry run.
<del> *
<del> * @return void
<del> */
<del> public function startup() {
<del> parent::startup();
<del> if ($this->params['dryRun']) {
<del> $this->out(__d('cake_console', '<warning>Dry-run mode enabled!</warning>'), 1, Shell::QUIET);
<del> }
<del> }
<del>
<del>/**
<del> * Run all upgrade steps one at a time.
<del> *
<del> * @return void
<del> */
<del> public function all() {
<del> foreach ($this->OptionParser->subcommands() as $command) {
<del> $name = $command->name();
<del> if ($name === 'all') {
<del> continue;
<del> }
<del> $this->out(__d('cake_console', 'Running %s', $name));
<del> $this->$name();
<del> }
<del> }
<del>
<del>/**
<del> * Move files and folders to their new homes.
<del> *
<del> * @return void
<del> */
<del> public function locations() {
<del> $path = $this->_getPath();
<del>
<del> $moves = [
<del> 'Test' . DS . 'Case' => 'Test' . DS . 'TestCase',
<del> 'View' . DS . 'Elements' => 'Template' . DS . 'Element',
<del> 'View' . DS . 'Emails' => 'Template' . DS . 'Email',
<del> 'View' . DS . 'Layouts' => 'Template' . DS . 'Layout',
<del> 'Template' . DS . 'Layout' . DS . 'Emails' => 'Template' . DS . 'Layout' . DS . 'Email',
<del> 'View' . DS . 'Scaffolds' => 'Template' . DS . 'Scaffold',
<del> 'View' . DS . 'Errors' => 'Template' . DS . 'Error',
<del> 'View' . DS . 'Themed' => 'Template' . DS . 'Themed',
<del> ];
<del> $dry = $this->params['dryRun'];
<del>
<del> $this->out('<info>Creating "Template" folder</info>');
<del> if (!$dry) {
<del> mkdir('Template');
<del> }
<del>
<del> foreach ($moves as $old => $new) {
<del> $old = $path . DS . $old;
<del> $new = $path . DS . $new;
<del> if (!is_dir($old)) {
<del> continue;
<del> }
<del> $this->out(__d('cake_console', '<info>Moving %s to %s</info>', $old, $new));
<del> if ($dry) {
<del> continue;
<del> }
<del> if ($this->params['git']) {
<del> exec('git mv -f ' . escapeshellarg($old) . ' ' . escapeshellarg($old . '__'));
<del> exec('git mv -f ' . escapeshellarg($old . '__') . ' ' . escapeshellarg($new));
<del> } else {
<del> $Folder = new Folder($old);
<del> $Folder->move($new);
<del> }
<del> }
<del> }
<del>
<del>/**
<del> * Rename classes that have moved during 3.0
<del> *
<del> * @return void
<del> */
<del> public function rename_classes() {
<del> $path = $this->_getPath();
<del> $Folder = new Folder($path);
<del> $this->_paths = $Folder->tree(null, false, 'dir');
<del> $this->_findFiles('php');
<del> foreach ($this->_files as $filePath) {
<del> $this->_renameClasses($filePath, $this->params['dryRun']);
<del> }
<del> $this->out(__d('cake_console', '<success>Class names updated.</success>'));
<del> }
<del>
<del>/**
<del> * Rename the classes in a given file.
<del> *
<del> * @param string $path The path to operate on.
<del> * @param boolean $dryRun Whether or not dry run is on.
<del> * @return void
<del> */
<del> protected function _renameClasses($path, $dryRun) {
<del> $replacements = [
<del> 'Cake\Network\Http\HttpSocket' => 'Cake\Network\Http\Client',
<del> 'HttpSocket' => 'Client',
<del> 'Cake\Model\ConnectionManager' => 'Cake\Database\ConnectionManager',
<del> 'Cake\Configure\ConfigReaderInterface' => 'Cake\Configure\ConfigEngineInterface',
<del> 'ConfigReaderInterface' => 'ConfigEngineInterface',
<del> 'Cake\Configure\PhpReader' => 'Cake\Configure\Engine\PhpConfig',
<del> 'PhpReader' => 'PhpConfig',
<del> 'Cake\Configure\IniReader' => 'Cake\Configure\Engine\IniConfig',
<del> 'IniReader' => 'IniConfig',
<del> ];
<del> $contents = file_get_contents($path);
<del> $contents = str_replace(
<del> array_keys($replacements),
<del> array_values($replacements),
<del> $contents,
<del> $count
<del> );
<del> if ($count === 0) {
<del> $this->out(
<del> __d('cake_console', '<info>Skip %s as there are no renames to do.</info>', $path),
<del> 1,
<del> Shell::VERBOSE
<del> );
<del> return;
<del> }
<del> $this->_saveFile($path, $contents);
<del> }
<del>
<del>/**
<del> * Save a file conditionally depending on dryRun flag.
<del> *
<del> * @param string $path The path to update.
<del> * @param string $contents The contents to put in the file.
<del> * @return boolean
<del> */
<del> protected function _saveFile($path, $contents) {
<del> $result = true;
<del> if (!$this->params['dryRun']) {
<del> $result = file_put_contents($path, $contents);
<del> }
<del> if ($result) {
<del> $this->out(__d('cake_console', '<success>Done updating %s</success>', $path), 1);
<del> return;
<del> }
<del> $this->err(__d(
<del> 'cake_console',
<del> '<error>Error</error> Was unable to update %s',
<del> $path
<del> ));
<del> return $result;
<del> }
<del>
<del>/**
<del> * Convert App::uses() to normal use statements.
<del> *
<del> * @return void
<del> */
<del> public function app_uses() {
<del> $path = $this->_getPath();
<del> $Folder = new Folder($path);
<del> $this->_paths = $Folder->tree(null, false, 'dir');
<del> $this->_findFiles('php');
<del> foreach ($this->_files as $filePath) {
<del> $this->_replaceUses($filePath, $this->params['dryRun']);
<del> }
<del> $this->out(__d('cake_console', '<success>App::uses() replaced successfully</success>'));
<del> }
<del>
<del>/**
<del> * Replace all the App::uses() calls with `use`.
<del> *
<del> * @param string $file The file to search and replace.
<del> */
<del> protected function _replaceUses($file) {
<del> $pattern = '#App::uses\([\'"]([a-z0-9_]+)[\'"],\s*[\'"]([a-z0-9/_]+)(?:\.([a-z0-9/_]+))?[\'"]\)#i';
<del> $contents = file_get_contents($file);
<del>
<del> $self = $this;
<del>
<del> $replacement = function ($matches) use ($file) {
<del> $matches = $this->_mapClassName($matches);
<del> if (count($matches) === 4) {
<del> $use = $matches[3] . '\\' . $matches[2] . '\\' . $matches[1];
<del> } elseif ($matches[2] == 'Vendor') {
<del> $this->out(
<del> __d('cake_console', '<info>Skip %s as it is a vendor library.</info>', $matches[1]),
<del> 1,
<del> Shell::VERBOSE
<del> );
<del> return $matches[0];
<del> } else {
<del> $use = 'Cake\\' . str_replace('/', '\\', $matches[2]) . '\\' . $matches[1];
<del> }
<del>
<del> if (!class_exists($use)) {
<del> $use = 'App\\' . substr($use, 5);
<del> }
<del>
<del> return 'use ' . $use;
<del> };
<del>
<del> $contents = preg_replace_callback($pattern, $replacement, $contents, -1, $count);
<del>
<del> if (!$count) {
<del> $this->out(
<del> __d('cake_console', '<info>Skip %s as there are no App::uses()</info>', $file),
<del> 1,
<del> Shell::VERBOSE
<del> );
<del> return;
<del> }
<del>
<del> $this->out(__d('cake_console', '<info> * Updating App::uses()</info>'), 1, Shell::VERBOSE);
<del> $this->_saveFile($file, $contents);
<del> }
<del>
<del>/**
<del> * Convert old classnames to new ones.
<del> * Strips the Cake prefix off of classes that no longer have it.
<del> *
<del> * @param array $matches
<del> * @return array Class names with Cake prefixes removed
<del> */
<del> protected function _mapClassName($matches) {
<del> $rename = [
<del> 'CakePlugin',
<del> 'CakeEvent',
<del> 'CakeEventListener',
<del> 'CakeEventManager',
<del> 'CakeValidationRule',
<del> 'CakeSocket',
<del> 'CakeRoute',
<del> 'CakeRequest',
<del> 'CakeResponse',
<del> 'CakeSession',
<del> 'CakeLog',
<del> 'CakeNumber',
<del> 'CakeTime',
<del> 'CakeEmail',
<del> 'CakeLogInterface',
<del> 'CakeSessionHandlerInterface',
<del> ];
<del>
<del> if (empty($matches[3])) {
<del> unset($matches[3]);
<del> }
<del> if (in_array($matches[1], $rename)) {
<del> $matches[1] = substr($matches[1], 4);
<del> }
<del> return $matches;
<del> }
<del>
<del>/**
<del> * Add namespaces to files.
<del> *
<del> * @return void
<del> */
<del> public function namespaces() {
<del> $path = $this->_getPath();
<del> $ns = $this->params['namespace'];
<del>
<del> if ($ns === 'App' && isset($this->params['plugin'])) {
<del> $ns = Inflector::camelize($this->params['plugin']);
<del> }
<del>
<del> $Folder = new Folder($path);
<del> $exclude = ['vendor', 'Vendor', 'webroot', 'Plugin', 'tmp'];
<del> if (!empty($this->params['exclude'])) {
<del> $exclude = array_merge($exclude, explode(',', $this->params['exclude']));
<del> }
<del> list($dirs, $files) = $Folder->read(true, true, true);
<del>
<del> $this->_paths = $this->_filterPaths($dirs, $exclude);
<del> $this->_findFiles('php', ['index.php', 'test.php', 'cake.php']);
<del>
<del> foreach ($this->_files as $filePath) {
<del> $this->_addNamespace($path, $filePath, $ns, $this->params['dryRun']);
<del> }
<del> $this->out(__d('cake_console', '<success>Namespaces added successfully</success>'));
<del> }
<del>
<del>/**
<del> * Update fixtures
<del> *
<del> * @return void
<del> */
<del> public function fixtures() {
<del> $path = $this->_getPath();
<del>
<del> $app = rtrim(APP, DS);
<del> if ($path === $app || !empty($this->params['plugin'])) {
<del> $path .= DS . 'Test' . DS . 'Fixture' . DS;
<del> }
<del> $this->out(__d('cake_console', 'Processing fixtures on %s', $path));
<del> $this->_paths[] = realpath($path);
<del> $this->_findFiles('php');
<del> foreach ($this->_files as $file) {
<del> $this->out(__d('cake_console', 'Updating %s...', $file), 1, Shell::VERBOSE);
<del> $content = $this->_processFixture(file_get_contents($file));
<del> $this->_saveFile($file, $content);
<del> }
<del> }
<del>
<del>/**
<del> * Process fixture content and update it for 3.x
<del> *
<del> * @param string $content Fixture content.
<del> * @return string
<del> */
<del> protected function _processFixture($content) {
<del> // Serializes data from PHP data into PHP code.
<del> // Basically a code style conformant version of var_export()
<del> $export = function ($values) use (&$export) {
<del> $vals = [];
<del> if (!is_array($values)) {
<del> return $vals;
<del> }
<del> foreach ($values as $key => $val) {
<del> if (is_array($val)) {
<del> $vals[] = "'{$key}' => [" . implode(', ', $export($val)) . ']';
<del> } else {
<del> $val = var_export($val, true);
<del> if ($val === 'NULL') {
<del> $val = 'null';
<del> }
<del> if (!is_numeric($key)) {
<del> $vals[] = "'{$key}' => {$val}";
<del> } else {
<del> $vals[] = "{$val}";
<del> }
<del> }
<del> }
<del> return $vals;
<del> };
<del>
<del> // Process field property.
<del> $processor = function ($matches) use ($export) {
<del> //@codingStandardsIgnoreStart
<del> eval('$data = [' . $matches[2] . '];');
<del> //@codingStandardsIgnoreEnd
<del>
<del> $constraints = [];
<del> $out = [];
<del> foreach ($data as $field => $properties) {
<del> // Move primary key into a constraint
<del> if (isset($properties['key']) && $properties['key'] === 'primary') {
<del> $constraints['primary'] = [
<del> 'type' => 'primary',
<del> 'columns' => [$field]
<del> ];
<del> }
<del> if (isset($properties['key'])) {
<del> unset($properties['key']);
<del> }
<del> if ($field !== 'indexes' && $field !== 'tableParameters') {
<del> $out[$field] = $properties;
<del> }
<del> }
<del>
<del> // Process indexes. Unique keys work differently now.
<del> if (isset($data['indexes'])) {
<del> foreach ($data['indexes'] as $index => $indexProps) {
<del> if (isset($indexProps['column'])) {
<del> $indexProps['columns'] = $indexProps['column'];
<del> unset($indexProps['column']);
<del> }
<del> // Move unique indexes over
<del> if (!empty($indexProps['unique'])) {
<del> unset($indexProps['unique']);
<del> $constraints[$index] = ['type' => 'unique'] + $indexProps;
<del> continue;
<del> }
<del> $out['_indexes'][$index] = $indexProps;
<del> }
<del> }
<del> if (count($constraints)) {
<del> $out['_constraints'] = $constraints;
<del> }
<del>
<del> // Process table parameters
<del> if (isset($data['tableParameters'])) {
<del> $out['_options'] = $data['tableParameters'];
<del> }
<del> return $matches[1] . "\n\t\t" . implode(",\n\t\t", $export($out)) . "\n\t" . $matches[3];
<del> };
<del> $content = preg_replace_callback(
<del> '/(public \$fields\s+=\s+(?:array\(|\[))(.*?)(\);|\];)/ms',
<del> $processor,
<del> $content,
<del> -1,
<del> $count
<del> );
<del> if ($count) {
<del> $this->out(__d('cake_console', 'Updated $fields property'), 1, Shell::VERBOSE);
<del> }
<del> return $content;
<del> }
<del>
<del>/**
<del> * Rename collection classes
<del> *
<del> * @return void
<del> */
<del> public function rename_collections() {
<del> $path = $this->_getPath();
<del>
<del> $Folder = new Folder($path);
<del> $this->_paths = $Folder->tree(null, false, 'dir');
<del> $this->_findFiles('php');
<del> foreach ($this->_files as $filePath) {
<del> $patterns = [
<del> [
<del> 'Replace $this->_Collection with $this->_registry',
<del> '#\$this->_Collection#',
<del> '$this->_registry',
<del> ],
<del> [
<del> 'Replace ComponentCollection arguments',
<del> '#ComponentCollection\s+\$collection#',
<del> 'ComponentRegistry $registry',
<del> ],
<del> [
<del> 'Rename ComponentCollection',
<del> '#ComponentCollection#',
<del> 'ComponentRegistry',
<del> ],
<del> [
<del> 'Rename HelperCollection',
<del> '#HelperCollection#',
<del> 'HelperRegistry',
<del> ],
<del> [
<del> 'Rename TaskCollection',
<del> '#TaskCollection#',
<del> 'TaskRegistry',
<del> ],
<del> ];
<del> $this->_updateFileRegexp($filePath, $patterns);
<del> }
<del> $this->out(__d('cake_console', '<success>Collection class uses renamed successfully.</success>'));
<del> }
<del>
<del>/**
<del> * Update test case assertion methods.
<del> *
<del> * @return void
<del> */
<del> public function tests() {
<del> $path = $this->_getPath();
<del>
<del> $Folder = new Folder($path);
<del> $this->_paths = $Folder->tree(null, false, 'dir');
<del> $this->_findFiles('php');
<del> foreach ($this->_files as $filePath) {
<del> $patterns = [
<del> [
<del> 'Replace assertEqual() with assertEquals()',
<del> '#\$this-\>assertEqual\(#i',
<del> '$this->assertEquals(',
<del> ],
<del> [
<del> 'Replace assertNotEqual() with assertNotEquals()',
<del> '#\$this-\>assertNotEqual\(#i',
<del> '$this->assertNotEquals(',
<del> ],
<del> [
<del> 'Replace assertIdentical() with assertSame()',
<del> '#\$this-\>assertIdentical\(#i',
<del> '$this->assertSame(',
<del> ],
<del> [
<del> 'Replace assertNotIdentical() with assertNotSame()',
<del> '#\$this-\>assertNotIdentical\(#i',
<del> '$this->assertNotSame(',
<del> ],
<del> [
<del> 'Replace assertPattern() with assertRegExp()',
<del> '#\$this-\>assertPattern\(#i',
<del> '$this->assertRegExp(',
<del> ],
<del> [
<del> 'Replace assertNoPattern() with assertNotRegExp()',
<del> '#\$this-\>assertNoPattern\(#i',
<del> '$this->assertNotRegExp(',
<del> ],
<del> [
<del> 'Replace assertReference() with assertSame()',
<del> '#\$this-\>assertReference\(\$(.*?),\s*\'(.*?)\'\)#i',
<del> '$this->assertSame($\1, $\2)',
<del> ],
<del> [
<del> 'Replace assertIsA() with assertInstanceOf()',
<del> '#\$this-\>assertIsA\(\$(.*?),\s*\'(.*?)\'\)#i',
<del> '$this->assertInstanceOf(\'\2\', $\1)',
<del> ],
<del> [
<del> 'Replace assert*($is, $expected) with assert*($expected, $is) - except for assertTags()',
<del> '/\bassert((?!tags)\w+)\(\$(\w+),\s*\$expected\)/i',
<del> 'assert\1($expected, $\2)'
<del> ]
<del> ];
<del> $this->_updateFileRegexp($filePath, $patterns);
<del> }
<del> $this->out(__d('cake_console', '<success>Assertion methods renamed successfully.</success>'));
<del> }
<del>
<del>/**
<del> * Filter paths to remove webroot, Plugin, tmp directories.
<del> * @param array $paths A list of directory paths
<del> * @param array $directories A list of directories to exlcude
<del> * @return array
<del> */
<del> protected function _filterPaths($paths, $directories) {
<del> return array_filter($paths, function ($path) use ($directories) {
<del> foreach ($directories as $dir) {
<del> if (strpos($path, DS . $dir) !== false) {
<del> return false;
<del> }
<del> }
<del> return true;
<del> });
<del> }
<del>
<del>/**
<del> * Adds the namespace to a given file.
<del> *
<del> * @param string $path The path of the file's location
<del> * @param string $filePath The file to add a namespace to.
<del> * @param string $ns The base namespace to use.
<del> * @param boolean $dry Whether or not to operate in dry-run mode.
<del> * @return void
<del> */
<del> protected function _addNamespace($path, $filePath, $ns, $dry) {
<del> $result = true;
<del> $shortPath = str_replace($path, '', $filePath);
<del> $contents = file_get_contents($filePath);
<del> if (preg_match('/namespace\s+[a-z0-9\\\]+;/', $contents)) {
<del> $this->out(__d(
<del> 'cake_console',
<del> '<warning>Skipping %s as it already has a namespace.</warning>',
<del> $shortPath
<del> ));
<del> return;
<del> }
<del> $namespace = trim($ns . str_replace(DS, '\\', dirname($shortPath)), '\\');
<del> $patterns = [
<del> [
<del> 'namespace to ' . $namespace,
<del> '#^(<\?(?:php)?\s+(?:\/\*.*?\*\/\s{0,1})?)#s',
<del> "\\1namespace " . $namespace . ";\n",
<del> ]
<del> ];
<del> $this->_updateFileRegexp($filePath, $patterns);
<del> }
<del>
<del>/**
<del> * Updates files based on regular expressions.
<del> *
<del> * @param array $patterns Array of search and replacement patterns.
<del> * @return void
<del> */
<del> protected function _filesRegexpUpdate($patterns) {
<del> $this->_findFiles($this->params['ext']);
<del> foreach ($this->_files as $file) {
<del> $this->out(__d('cake_console', 'Updating %s...', $file), 1, Shell::VERBOSE);
<del> $this->_updateFileRegexp($file, $patterns);
<del> }
<del> }
<del>
<del>/**
<del> * Searches the paths and finds files based on extension.
<del> *
<del> * @param string $extensions
<del> * @param array $exclude An array if filenames to exlcude
<del> * @return void
<del> */
<del> protected function _findFiles($extensions = '', $exclude = []) {
<del> $this->_files = [];
<del> foreach ($this->_paths as $path) {
<del> if (!is_dir($path)) {
<del> continue;
<del> }
<del> $Iterator = new \RegexIterator(
<del> new \RecursiveIteratorIterator(new \RecursiveDirectoryIterator($path)),
<del> '/^.+\.(' . $extensions . ')$/i',
<del> \RegexIterator::MATCH
<del> );
<del> foreach ($Iterator as $file) {
<del> if ($file->isFile() && !in_array($file->getFilename(), $exclude)) {
<del> $this->_files[] = $file->getPathname();
<del> }
<del> }
<del> }
<del> }
<del>
<del>/**
<del> * Update a single file with an number of pcre pattern replacements.
<del> *
<del> * @param string $file The file to update
<del> * @param array $patterns The replacement patterns to run.
<del> * @return void
<del> */
<del> protected function _updateFileRegexp($file, $patterns) {
<del> $contents = file_get_contents($file);
<del>
<del> foreach ($patterns as $pattern) {
<del> $this->out(__d('cake_console', '<info> * Updating %s</info>', $pattern[0]), 1, Shell::VERBOSE);
<del> $contents = preg_replace($pattern[1], $pattern[2], $contents);
<del> }
<del> $this->_saveFile($file, $contents);
<del> }
<del>
<del>/**
<del> * Get the path to operate on. Uses either the first argument,
<del> * or the plugin parameter if its set.
<del> *
<del> * @return string
<del> */
<del> protected function _getPath() {
<del> $path = isset($this->args[0]) ? $this->args[0] : APP;
<del> if (isset($this->params['plugin'])) {
<del> $path = Plugin::path($this->params['plugin']);
<del> }
<del> return rtrim($path, DS);
<del> }
<del>
<del>/**
<del> * Get the option parser.
<del> *
<del> * @return ConsoleOptionParser
<del> */
<del> public function getOptionParser() {
<del> $plugin = [
<del> 'short' => 'p',
<del> 'help' => __d('cake_console', 'The plugin to update. Only the specified plugin will be updated.')
<del> ];
<del> $dryRun = [
<del> 'short' => 'd',
<del> 'help' => __d('cake_console', 'Dry run the update, no files will actually be modified.'),
<del> 'boolean' => true
<del> ];
<del> $git = [
<del> 'help' => __d('cake_console', 'Perform git operations. eg. git mv instead of just moving files.'),
<del> 'boolean' => true
<del> ];
<del> $namespace = [
<del> 'help' => __d('cake_console', 'Set the base namespace you want to use. Defaults to App or the plugin name.'),
<del> 'default' => 'App',
<del> ];
<del> $exclude = [
<del> 'help' => __d('cake_console', 'Comma separated list of top level diretories to exclude.'),
<del> 'default' => '',
<del> ];
<del> $path = [
<del> 'help' => __d('cake_console', 'The path to operate on. Will default to APP or the plugin option.'),
<del> 'required' => false,
<del> ];
<del>
<del> return parent::getOptionParser()
<del> ->description(__d('cake_console', "A shell to help automate upgrading from CakePHP 3.0 to 2.x. \n" .
<del> "Be sure to have a backup of your application before running these commands."))
<del> ->addSubcommand('all', [
<del> 'help' => __d('cake_console', 'Run all upgrade commands.'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun'), 'arguments' => compact('path')]
<del> ])
<del> ->addSubcommand('locations', [
<del> 'help' => __d('cake_console', 'Move files/directories around. Run this *before* adding namespaces with the namespaces command.'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun', 'git'), 'arguments' => compact('path')]
<del> ])
<del> ->addSubcommand('namespaces', [
<del> 'help' => __d('cake_console', 'Add namespaces to files based on their file path. Only run this *after* you have moved files with locations.'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun', 'namespace', 'exclude'), 'arguments' => compact('path')]
<del> ])
<del> ->addSubcommand('app_uses', [
<del> 'help' => __d('cake_console', 'Replace App::uses() with use statements'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun'), 'arguments' => compact('path')]
<del> ])
<del> ->addSubcommand('rename_classes', [
<del> 'help' => __d('cake_console', 'Rename classes that have been moved/renamed. Run after replacing App::uses().'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun'), 'arguments' => compact('path')]
<del> ])
<del> ->addSubcommand('fixtures', [
<del> 'help' => __d('cake_console', 'Update fixtures to use new index/constraint features. This is necessary before running tests.'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun'), 'arguments' => compact('path')],
<del> ])
<del> ->addSubcommand('rename_collections', [
<del> 'help' => __d('cake_console', 'Rename HelperCollection, ComponentCollection, and TaskCollection. Will also rename component constructor arguments and _Collection properties on all objects.'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun'), 'arguments' => compact('path')]
<del> ])
<del> ->addSubcommand('tests', [
<del> 'help' => __d('cake_console', 'Rename test case assertion methods.'),
<del> 'parser' => ['options' => compact('plugin', 'dryRun'), 'arguments' => compact('path')]
<del> ]);
<del> }
<del>
<del>}
<ide><path>tests/TestCase/Console/Command/CommandListShellTest.php
<ide> public function testMain() {
<ide> $expected = "/\[.*TestPluginTwo.*\] example, welcome/";
<ide> $this->assertRegExp($expected, $output);
<ide>
<del> $expected = "/\[.*CORE.*\] bake, i18n, server, test, upgrade/";
<add> $expected = "/\[.*CORE.*\] bake, i18n, server, test/";
<ide> $this->assertRegExp($expected, $output);
<ide>
<ide> $expected = "/\[.*app.*\] sample/";
<ide><path>tests/TestCase/Console/Command/CompletionShellTest.php
<ide> public function testCommands() {
<ide> $this->Shell->runCommand('commands', array());
<ide> $output = $this->Shell->stdout->output;
<ide>
<del> $expected = "TestPlugin.example TestPluginTwo.example TestPluginTwo.welcome bake i18n server test upgrade sample\n";
<add> $expected = "TestPlugin.example TestPluginTwo.example TestPluginTwo.welcome bake i18n server test sample\n";
<ide> $this->assertEquals($expected, $output);
<ide> }
<ide> | 3 |
Text | Text | fix the broken link | cfb07e642d23622412c5455ca70c42140268f9fe | <ide><path>docs/swarm/swarm-tutorial/index.md
<ide> If you are brand new to Docker, see [About Docker Engine](../../index.md).
<ide> To run this tutorial, you need the following:
<ide>
<ide> * [three networked host machines](#three-networked-host-machines)
<del>* [Docker Engine 1.12 or later installed](#docker-engine-1-12-or-later)
<add>* [Docker Engine 1.12 or later installed](#docker-engine-1-12-or-newer)
<ide> * [the IP address of the manager machine](#the-ip-address-of-the-manager-machine)
<ide> * [open ports between the hosts](#open-ports-between-the-hosts)
<ide> | 1 |
Python | Python | fix typo in celery.bin.multi document | 7cf08574449f7c051fca7fac44f41d56385466a7 | <ide><path>celery/bin/multi.py
<ide> $ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data
<ide> -Q default -L:4,5 DEBUG
<ide>
<del> $ # Additional options are added to each celery worker' comamnd,
<add> $ # Additional options are added to each celery worker' command,
<ide> $ # but you can also modify the options for ranges of, or specific workers
<ide>
<ide> $ # 3 workers: Two with 3 processes, and one with 10 processes. | 1 |
Text | Text | add v4.4.2 to changelog | c18edfca05598cdc6e683566ac0ffaa64dd9de04 | <ide><path>CHANGELOG.md
<ide> # Ember Changelog
<ide>
<add>### v4.4.2 (June 13, 2022)
<add>
<add>- [#20114](https://github.com/emberjs/ember.js/pull/20114) [BUGFIX] Fix generated import paths for test setup functions in addons
<add>
<ide> ### v4.5.0-beta.2 (June 6, 2022)
<ide>
<ide> - [#20082](https://github.com/emberjs/ember.js/pull/20082) [BUGFIX] Fix blueprint generation | 1 |
Python | Python | add compat for thinc.neural.optimizers.optimizer | 4d048e94d3eaa88e038e56967c0bf7599d11f6ae | <ide><path>spacy/compat.py
<ide> except ImportError:
<ide> cupy = None
<ide>
<add>try:
<add> from thinc.optimizers import Optimizer
<add>except ImportError:
<add> from thinc.optimizers import Adam as Optimizer
<ide>
<ide> pickle = pickle
<ide> copy_reg = copy_reg
<ide><path>spacy/language.py
<ide> from contextlib import contextmanager
<ide>
<ide> from thinc.neural import Model
<del>from thinc.neural.optimizers import Adam
<ide> import random
<ide> import ujson
<ide> from collections import OrderedDict
<ide> from .pipeline import NeuralDependencyParser, TokenVectorEncoder, NeuralTagger
<ide> from .pipeline import NeuralEntityRecognizer, SimilarityHook, TextCategorizer
<ide>
<add>from .compat import Optimizer
<ide> from .compat import json_dumps, izip, copy_reg
<ide> from .scorer import Scorer
<ide> from ._ml import link_vectors_to_models
<ide> def update(self, docs, golds, drop=0., sgd=None, losses=None):
<ide> return
<ide> if sgd is None:
<ide> if self._optimizer is None:
<del> self._optimizer = Adam(Model.ops, 0.001)
<add> self._optimizer = Optimizer(Model.ops, 0.001,
<add> beta1=0.9, beta2=0.0, nesterov=True)
<ide> sgd = self._optimizer
<ide> grads = {}
<ide> def get_grads(W, dW, key=None):
<ide> def resume_training(self, **cfg):
<ide> eps = util.env_opt('optimizer_eps', 1e-08)
<ide> L2 = util.env_opt('L2_penalty', 1e-6)
<ide> max_grad_norm = util.env_opt('grad_norm_clip', 1.)
<del> self._optimizer = Adam(Model.ops, learn_rate, L2=L2, beta1=beta1,
<del> beta2=beta2, eps=eps)
<add> self._optimizer = Optimizer(Model.ops, learn_rate, L2=L2, beta1=beta1,
<add> beta2=beta2, eps=eps, nesterov=True)
<ide> self._optimizer.max_grad_norm = max_grad_norm
<ide> self._optimizer.device = device
<ide> return self._optimizer
<ide> def begin_training(self, get_gold_tuples=None, **cfg):
<ide> eps = util.env_opt('optimizer_eps', 1e-08)
<ide> L2 = util.env_opt('L2_penalty', 1e-6)
<ide> max_grad_norm = util.env_opt('grad_norm_clip', 1.)
<del> self._optimizer = Adam(Model.ops, learn_rate, L2=L2, beta1=beta1,
<add> self._optimizer = Optimizer(Model.ops, learn_rate, L2=L2, beta1=beta1,
<ide> beta2=beta2, eps=eps)
<ide> self._optimizer.max_grad_norm = max_grad_norm
<ide> self._optimizer.device = device | 2 |
Python | Python | improve checks for sourced components | 1ad646cbcf0015cb3b944f98bef1b3a9eeb54e9f | <ide><path>spacy/errors.py
<ide> class Warnings:
<ide> "http://spacy.io/usage/v3#jupyter-notebook-gpu")
<ide> W112 = ("The model specified to use for initial vectors ({name}) has no "
<ide> "vectors. This is almost certainly a mistake.")
<add> W113 = ("Sourced component '{name}' may not work as expected: source "
<add> "vectors are not identical to current pipeline vectors.")
<ide>
<ide>
<ide> @add_codes
<ide> class Errors:
<ide> "returned the initialized nlp object instead?")
<ide> E944 = ("Can't copy pipeline component '{name}' from source '{model}': "
<ide> "not found in pipeline. Available components: {opts}")
<del> E945 = ("Can't copy pipeline component '{name}' from source. Expected loaded "
<del> "nlp object, but got: {source}")
<add> E945 = ("Can't copy pipeline component '{name}' from source. Expected "
<add> "loaded nlp object, but got: {source}")
<ide> E947 = ("`Matcher.add` received invalid `greedy` argument: expected "
<ide> "a string value from {expected} but got: '{arg}'")
<ide> E948 = ("`Matcher.add` received invalid 'patterns' argument: expected "
<ide><path>spacy/language.py
<ide> def create_pipe_from_source(
<ide> name (str): Optional alternative name to use in current pipeline.
<ide> RETURNS (Tuple[Callable, str]): The component and its factory name.
<ide> """
<del> # TODO: handle errors and mismatches (vectors etc.)
<del> if not isinstance(source, self.__class__):
<add> # Check source type
<add> if not isinstance(source, Language):
<ide> raise ValueError(Errors.E945.format(name=source_name, source=type(source)))
<add> # Check vectors, with faster checks first
<add> if self.vocab.vectors.shape != source.vocab.vectors.shape or \
<add> self.vocab.vectors.key2row != source.vocab.vectors.key2row or \
<add> self.vocab.vectors.to_bytes() != source.vocab.vectors.to_bytes():
<add> util.logger.warning(Warnings.W113.format(name=source_name))
<ide> if not source_name in source.component_names:
<ide> raise KeyError(
<ide> Errors.E944.format(
<ide><path>spacy/tests/pipeline/test_pipe_factories.py
<ide> import pytest
<add>import mock
<add>import logging
<ide> from spacy.language import Language
<ide> from spacy.lang.en import English
<ide> from spacy.lang.de import German
<ide> def test_pipe_factories_from_source():
<ide> nlp.add_pipe("custom", source=source_nlp)
<ide>
<ide>
<add>def test_pipe_factories_from_source_language_subclass():
<add> class CustomEnglishDefaults(English.Defaults):
<add> stop_words = set(["custom", "stop"])
<add>
<add> @registry.languages("custom_en")
<add> class CustomEnglish(English):
<add> lang = "custom_en"
<add> Defaults = CustomEnglishDefaults
<add>
<add> source_nlp = English()
<add> source_nlp.add_pipe("tagger")
<add>
<add> # custom subclass
<add> nlp = CustomEnglish()
<add> nlp.add_pipe("tagger", source=source_nlp)
<add> assert "tagger" in nlp.pipe_names
<add>
<add> # non-subclass
<add> nlp = German()
<add> nlp.add_pipe("tagger", source=source_nlp)
<add> assert "tagger" in nlp.pipe_names
<add>
<add> # mismatched vectors
<add> nlp = English()
<add> nlp.vocab.vectors.resize((1, 4))
<add> nlp.vocab.vectors.add("cat", vector=[1, 2, 3, 4])
<add> logger = logging.getLogger("spacy")
<add> with mock.patch.object(logger, "warning") as mock_warning:
<add> nlp.add_pipe("tagger", source=source_nlp)
<add> mock_warning.assert_called()
<add>
<add>
<ide> def test_pipe_factories_from_source_custom():
<ide> """Test adding components from a source model with custom components."""
<ide> name = "test_pipe_factories_from_source_custom" | 3 |
Java | Java | implement flex properties in java version as well | e63a7ea7bd229323edce4fca716a8423100ea4d9 | <ide><path>ReactAndroid/src/main/java/com/facebook/csslayout/CSSLayout.java
<ide> public class CSSLayout {
<ide> public float[] dimensions = new float[2];
<ide> public CSSDirection direction = CSSDirection.LTR;
<ide>
<del> public float flexBasis;
<add> public float computedFlexBasis;
<ide>
<ide> public int generationCount;
<ide> public CSSDirection lastParentDirection;
<ide> public void resetResult() {
<ide> Arrays.fill(dimensions, CSSConstants.UNDEFINED);
<ide> direction = CSSDirection.LTR;
<ide>
<del> flexBasis = 0;
<add> computedFlexBasis = 0;
<ide>
<ide> generationCount = 0;
<ide> lastParentDirection = null;
<ide><path>ReactAndroid/src/main/java/com/facebook/csslayout/CSSNode.java
<ide> public void setWrap(CSSWrap flexWrap) {
<ide> */
<ide> @Override
<ide> public float getFlex() {
<del> return style.flex;
<add> if (style.flexGrow > 0) {
<add> return style.flexGrow;
<add> } else if (style.flexShrink > 0) {
<add> return -style.flexShrink;
<add> }
<add>
<add> return 0;
<ide> }
<ide>
<ide> @Override
<ide> public void setFlex(float flex) {
<del> if (!valuesEqual(style.flex, flex)) {
<del> style.flex = flex;
<add> if (CSSConstants.isUndefined(flex) || flex == 0) {
<add> setFlexGrow(0);
<add> setFlexShrink(0);
<add> setFlexBasis(CSSConstants.UNDEFINED);
<add> } else if (flex > 0) {
<add> setFlexGrow(flex);
<add> setFlexShrink(0);
<add> setFlexBasis(0);
<add> } else {
<add> setFlexGrow(0);
<add> setFlexShrink(-flex);
<add> setFlexBasis(CSSConstants.UNDEFINED);
<add> }
<add> }
<add>
<add> @Override
<add> public float getFlexGrow() {
<add> return style.flexGrow;
<add> }
<add>
<add> @Override
<add> public void setFlexGrow(float flexGrow) {
<add> if (!valuesEqual(style.flexGrow, flexGrow)) {
<add> style.flexGrow = flexGrow;
<add> dirty();
<add> }
<add> }
<add>
<add> @Override
<add> public float getFlexShrink() {
<add> return style.flexShrink;
<add> }
<add>
<add> @Override
<add> public void setFlexShrink(float flexShrink) {
<add> if (!valuesEqual(style.flexShrink, flexShrink)) {
<add> style.flexShrink = flexShrink;
<ide> dirty();
<ide> }
<ide> }
<ide>
<add> @Override
<add> public float getFlexBasis() {
<add> return style.flexBasis;
<add> }
<add>
<add> @Override
<add> public void setFlexBasis(float flexBasis) {
<add> if (!valuesEqual(style.flexBasis, flexBasis)) {
<add> style.flexBasis = flexBasis;
<add> dirty();
<add> }
<add> }
<add>
<add>
<ide> /**
<ide> * Get this node's margin, as defined by style + default margin.
<ide> */
<ide><path>ReactAndroid/src/main/java/com/facebook/csslayout/CSSNodeAPI.java
<ide> void measure(
<ide> void setWrap(CSSWrap flexWrap);
<ide> float getFlex();
<ide> void setFlex(float flex);
<add> float getFlexGrow();
<add> void setFlexGrow(float flexGrow);
<add> float getFlexShrink();
<add> void setFlexShrink(float flexShrink);
<add> float getFlexBasis();
<add> void setFlexBasis(float flexBasis);
<ide> Spacing getMargin();
<ide> void setMargin(int spacingType, float margin);
<ide> Spacing getPadding();
<ide><path>ReactAndroid/src/main/java/com/facebook/csslayout/CSSNodeJNI.java
<ide> public void setFlex(float flex) {
<ide> jni_CSSNodeStyleSetFlex(mNativePointer, flex);
<ide> }
<ide>
<add> private native float jni_CSSNodeStyleGetFlexGrow(long nativePointer);
<add> @Override
<add> public float getFlexGrow() {
<add> assertNativeInstance();
<add> return jni_CSSNodeStyleGetFlexGrow(mNativePointer);
<add> }
<add>
<add> private native void jni_CSSNodeStyleSetFlexGrow(long nativePointer, float flexGrow);
<add> @Override
<add> public void setFlexGrow(float flexGrow) {
<add> assertNativeInstance();
<add> jni_CSSNodeStyleSetFlexGrow(mNativePointer, flexGrow);
<add> }
<add>
<add> private native float jni_CSSNodeStyleGetFlexShrink(long nativePointer);
<add> @Override
<add> public float getFlexShrink() {
<add> assertNativeInstance();
<add> return jni_CSSNodeStyleGetFlexShrink(mNativePointer);
<add> }
<add>
<add> private native void jni_CSSNodeStyleSetFlexShrink(long nativePointer, float flexShrink);
<add> @Override
<add> public void setFlexShrink(float flexShrink) {
<add> assertNativeInstance();
<add> jni_CSSNodeStyleSetFlexShrink(mNativePointer, flexShrink);
<add> }
<add>
<add> private native float jni_CSSNodeStyleGetFlexBasis(long nativePointer);
<add> @Override
<add> public float getFlexBasis() {
<add> assertNativeInstance();
<add> return jni_CSSNodeStyleGetFlexBasis(mNativePointer);
<add> }
<add>
<add> private native void jni_CSSNodeStyleSetFlexBasis(long nativePointer, float flexBasis);
<add> @Override
<add> public void setFlexBasis(float flexBasis) {
<add> assertNativeInstance();
<add> jni_CSSNodeStyleSetFlexBasis(mNativePointer, flexBasis);
<add> }
<add>
<ide> private native float jni_CSSNodeStyleGetMargin(long nativePointer, int edge);
<ide> @Override
<ide> public Spacing getMargin() {
<ide><path>ReactAndroid/src/main/java/com/facebook/csslayout/CSSStyle.java
<ide> public class CSSStyle {
<ide> public CSSPositionType positionType;
<ide> public CSSWrap flexWrap;
<ide> public CSSOverflow overflow;
<del> public float flex;
<add> public float flexGrow;
<add> public float flexShrink;
<add> public float flexBasis;
<ide>
<ide> public Spacing margin = new Spacing();
<ide> public Spacing padding = new Spacing();
<ide> void reset() {
<ide> positionType = CSSPositionType.RELATIVE;
<ide> flexWrap = CSSWrap.NOWRAP;
<ide> overflow = CSSOverflow.VISIBLE;
<del> flex = 0f;
<add> flexGrow = 0;
<add> flexShrink = 0;
<add> flexBasis = CSSConstants.UNDEFINED;
<ide>
<ide> margin.reset();
<ide> padding.reset();
<ide><path>ReactAndroid/src/main/java/com/facebook/csslayout/LayoutEngine.java
<ide> */
<ide> public class LayoutEngine {
<ide>
<del> private static final boolean POSITIVE_FLEX_IS_AUTO = false;
<del>
<ide> private static final int CSS_FLEX_DIRECTION_COLUMN =
<ide> CSSFlexDirection.COLUMN.ordinal();
<ide> private static final int CSS_FLEX_DIRECTION_COLUMN_REVERSE =
<ide> public class LayoutEngine {
<ide> };
<ide>
<ide> private static boolean isFlexBasisAuto(CSSNode node) {
<del> if (POSITIVE_FLEX_IS_AUTO) {
<del> // All flex values are auto.
<del> return true;
<del> } else {
<del> // A flex value > 0 implies a basis of zero.
<del> return node.style.flex <= 0;
<del> }
<add> return CSSConstants.isUndefined(node.style.flexBasis);
<ide> }
<ide>
<ide> private static float getFlexGrowFactor(CSSNode node) {
<del> // Flex grow is implied by positive values for flex.
<del> if (node.style.flex > 0) {
<del> return node.style.flex;
<del> }
<del> return 0;
<add> return node.style.flexGrow;
<ide> }
<ide>
<ide> private static float getFlexShrinkFactor(CSSNode node) {
<del> if (POSITIVE_FLEX_IS_AUTO) {
<del> // A flex shrink factor of 1 is implied by non-zero values for flex.
<del> if (node.style.flex != 0) {
<del> return 1;
<del> }
<del> } else {
<del> // A flex shrink factor of 1 is implied by negative values for flex.
<del> if (node.style.flex < 0) {
<del> return 1;
<del> }
<del> }
<del> return 0;
<add> return node.style.flexShrink;
<ide> }
<ide>
<ide>
<ide> private static void layoutNodeImpl(
<ide> if (isMainAxisRow && (child.style.dimensions[dim[CSS_FLEX_DIRECTION_ROW]] >= 0.0)) {
<ide>
<ide> // The width is definite, so use that as the flex basis.
<del> child.layout.flexBasis = Math.max(child.style.dimensions[DIMENSION_WIDTH], ((child.style.padding.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_ROW], leading[CSS_FLEX_DIRECTION_ROW]) + child.style.border.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_ROW], leading[CSS_FLEX_DIRECTION_ROW])) + (child.style.padding.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_ROW], trailing[CSS_FLEX_DIRECTION_ROW]) + child.style.border.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_ROW], trailing[CSS_FLEX_DIRECTION_ROW]))));
<add> child.layout.computedFlexBasis = Math.max(child.style.dimensions[DIMENSION_WIDTH], ((child.style.padding.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_ROW], leading[CSS_FLEX_DIRECTION_ROW]) + child.style.border.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_ROW], leading[CSS_FLEX_DIRECTION_ROW])) + (child.style.padding.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_ROW], trailing[CSS_FLEX_DIRECTION_ROW]) + child.style.border.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_ROW], trailing[CSS_FLEX_DIRECTION_ROW]))));
<ide> } else if (!isMainAxisRow && (child.style.dimensions[dim[CSS_FLEX_DIRECTION_COLUMN]] >= 0.0)) {
<ide>
<ide> // The height is definite, so use that as the flex basis.
<del> child.layout.flexBasis = Math.max(child.style.dimensions[DIMENSION_HEIGHT], ((child.style.padding.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_COLUMN], leading[CSS_FLEX_DIRECTION_COLUMN]) + child.style.border.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_COLUMN], leading[CSS_FLEX_DIRECTION_COLUMN])) + (child.style.padding.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_COLUMN], trailing[CSS_FLEX_DIRECTION_COLUMN]) + child.style.border.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_COLUMN], trailing[CSS_FLEX_DIRECTION_COLUMN]))));
<add> child.layout.computedFlexBasis = Math.max(child.style.dimensions[DIMENSION_HEIGHT], ((child.style.padding.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_COLUMN], leading[CSS_FLEX_DIRECTION_COLUMN]) + child.style.border.getWithFallback(leadingSpacing[CSS_FLEX_DIRECTION_COLUMN], leading[CSS_FLEX_DIRECTION_COLUMN])) + (child.style.padding.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_COLUMN], trailing[CSS_FLEX_DIRECTION_COLUMN]) + child.style.border.getWithFallback(trailingSpacing[CSS_FLEX_DIRECTION_COLUMN], trailing[CSS_FLEX_DIRECTION_COLUMN]))));
<ide> } else if (!isFlexBasisAuto(child) && !Float.isNaN(availableInnerMainDim)) {
<ide>
<ide> // If the basis isn't 'auto', it is assumed to be zero.
<del> child.layout.flexBasis = Math.max(0, ((child.style.padding.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.border.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis])) + (child.style.padding.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]) + child.style.border.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]))));
<add> child.layout.computedFlexBasis = Math.max(child.style.flexBasis, ((child.style.padding.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.border.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis])) + (child.style.padding.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]) + child.style.border.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]))));
<ide> } else {
<ide>
<ide> // Compute the flex basis and hypothetical main size (i.e. the clamped flex basis).
<ide> private static void layoutNodeImpl(
<ide> // Measure the child
<ide> layoutNodeInternal(layoutContext, child, childWidth, childHeight, direction, childWidthMeasureMode, childHeightMeasureMode, false, "measure");
<ide>
<del> child.layout.flexBasis = Math.max(isMainAxisRow ? child.layout.measuredDimensions[DIMENSION_WIDTH] : child.layout.measuredDimensions[DIMENSION_HEIGHT], ((child.style.padding.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.border.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis])) + (child.style.padding.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]) + child.style.border.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]))));
<add> child.layout.computedFlexBasis = Math.max(isMainAxisRow ? child.layout.measuredDimensions[DIMENSION_WIDTH] : child.layout.measuredDimensions[DIMENSION_HEIGHT], ((child.style.padding.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.border.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis])) + (child.style.padding.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]) + child.style.border.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]))));
<ide> }
<ide> }
<ide> }
<ide> private static void layoutNodeImpl(
<ide> child.lineIndex = lineCount;
<ide>
<ide> if (child.style.positionType != CSSPositionType.ABSOLUTE) {
<del> float outerFlexBasis = child.layout.flexBasis + (child.style.margin.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.margin.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]));
<add> float outerFlexBasis = child.layout.computedFlexBasis + (child.style.margin.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.margin.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis]));
<ide>
<ide> // If this is a multi-line flow and this item pushes us over the available size, we've
<ide> // hit the end of the current line. Break out of the loop and lay out the current line.
<ide> private static void layoutNodeImpl(
<ide> sizeConsumedOnCurrentLine += outerFlexBasis;
<ide> itemsOnLine++;
<ide>
<del> if ((child.style.positionType == CSSPositionType.RELATIVE && child.style.flex != 0)) {
<add> if ((child.style.positionType == CSSPositionType.RELATIVE && (child.style.flexGrow != 0 || child.style.flexShrink != 0))) {
<ide> totalFlexGrowFactors += getFlexGrowFactor(child);
<ide>
<ide> // Unlike the grow factor, the shrink factor is scaled relative to the child
<ide> // dimension.
<del> totalFlexShrinkScaledFactors += getFlexShrinkFactor(child) * child.layout.flexBasis;
<add> totalFlexShrinkScaledFactors += getFlexShrinkFactor(child) * child.layout.computedFlexBasis;
<ide> }
<ide>
<ide> // Store a private linked list of children that need to be layed out.
<ide> private static void layoutNodeImpl(
<ide> float deltaFlexGrowFactors = 0;
<ide> currentRelativeChild = firstRelativeChild;
<ide> while (currentRelativeChild != null) {
<del> childFlexBasis = currentRelativeChild.layout.flexBasis;
<add> childFlexBasis = currentRelativeChild.layout.computedFlexBasis;
<ide>
<ide> if (remainingFreeSpace < 0) {
<ide> flexShrinkScaledFactor = getFlexShrinkFactor(currentRelativeChild) * childFlexBasis;
<ide> private static void layoutNodeImpl(
<ide> deltaFreeSpace = 0;
<ide> currentRelativeChild = firstRelativeChild;
<ide> while (currentRelativeChild != null) {
<del> childFlexBasis = currentRelativeChild.layout.flexBasis;
<add> childFlexBasis = currentRelativeChild.layout.computedFlexBasis;
<ide> float updatedMainSize = childFlexBasis;
<ide>
<ide> if (remainingFreeSpace < 0) {
<ide> private static void layoutNodeImpl(
<ide> if (canSkipFlex) {
<ide> // If we skipped the flex step, then we can't rely on the measuredDims because
<ide> // they weren't computed. This means we can't call getDimWithMargin.
<del> mainDim += betweenMainDim + (child.style.margin.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.margin.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis])) + child.layout.flexBasis;
<add> mainDim += betweenMainDim + (child.style.margin.getWithFallback(leadingSpacing[mainAxis], leading[mainAxis]) + child.style.margin.getWithFallback(trailingSpacing[mainAxis], trailing[mainAxis])) + child.layout.computedFlexBasis;
<ide> crossDim = availableInnerCrossDim;
<ide> } else {
<ide> // The main dimension is the sum of all the elements dimension plus | 6 |
PHP | PHP | apply fixes from styleci | a5a01581871ab0fdf35f009cb9da01fa0a2a6107 | <ide><path>src/Illuminate/Database/Query/Builder.php
<ide> protected function invalidOperator($operator)
<ide> * @param mixed $operator
<ide> * @param mixed $value
<ide> * @return $this
<del> * {@inheritdoc}
<add> * {@inheritdoc}
<ide> */
<ide> public function orWhere($column, $operator = null, $value = null)
<ide> {
<ide> public function whereYear($column, $operator, $value = null, $boolean = 'and')
<ide> return $this->addDateBasedWhere('Year', $column, $operator, $value, $boolean);
<ide> }
<ide>
<del>
<ide> /**
<ide> * Add an "or where year" statement to the query.
<ide> * | 1 |
Ruby | Ruby | raise useful errors from make_relative_symlink | 727f2047602a593b57bf4aebc26242fcc63dd4c8 | <ide><path>Library/Homebrew/keg.rb
<ide> require "ostruct"
<ide>
<ide> class Keg < Pathname
<add> class LinkError < RuntimeError
<add> attr_reader :keg, :src, :dst
<add>
<add> def initialize(keg, src, dst)
<add> @src = src
<add> @dst = dst
<add> @keg = keg
<add> end
<add> end
<add>
<add> class ConflictError < LinkError
<add> def suggestion
<add> conflict = Keg.for(dst)
<add> rescue NotAKegError
<add> "already exists. You may want to remove it:\n rm #{dst}\n"
<add> else
<add> <<-EOS.undent
<add> is a symlink belonging to #{conflict.fname}. You can unlink it:
<add> brew unlink #{conflict.fname}
<add> EOS
<add> end
<add>
<add> def to_s
<add> s = []
<add> s << "Could not symlink #{src.relative_path_from(keg)}"
<add> s << "Target #{dst}" << suggestion
<add> s << <<-EOS.undent
<add> To force the link and overwrite all conflicting files:
<add> brew link --overwrite #{keg.fname}
<add>
<add> To list all files that would be deleted:
<add> brew link --overwrite --dry-run #{keg.fname}
<add> EOS
<add> s.join("\n")
<add> end
<add> end
<add>
<add> class DirectoryNotWritableError < LinkError
<add> def to_s; <<-EOS.undent
<add> Could not symlink #{src.relative_path_from(keg)}
<add> #{dst.dirname} is not writable.
<add> EOS
<add> end
<add> end
<add>
<ide> # locale-specific directories have the form language[_territory][.codeset][@modifier]
<ide> LOCALEDIR_RX = /(locale|man)\/([a-z]{2}|C|POSIX)(_[A-Z]{2})?(\.[a-zA-Z\-0-9]+(@.+)?)?/
<ide> INFOFILE_RX = %r[info/([^.].*?\.info|dir)$]
<ide> def make_relative_symlink dst, src, mode=OpenStruct.new
<ide> dst.delete if mode.overwrite && (dst.exist? || dst.symlink?)
<ide> dst.make_relative_symlink(src)
<ide> rescue Errno::EEXIST
<del> if dst.symlink? && dst.exist?
<del> raise <<-EOS.undent
<del> Could not symlink file: #{src}
<del> Target #{dst} already exists as a symlink to #{dst.readlink}.
<del> If this file is from another formula, you may need to
<del> `brew unlink` it. Otherwise, you may want to delete it.
<del> To force the link and overwrite all other conflicting files, do:
<del> brew link --overwrite formula_name
<del>
<del> To list all files that would be deleted:
<del> brew link --overwrite --dry-run formula_name
<del> EOS
<del> elsif dst.exist?
<del> raise <<-EOS.undent
<del> Could not symlink file: #{src}
<del> Target #{dst} already exists. You may need to delete it.
<del> To force the link and overwrite all other conflicting files, do:
<del> brew link --overwrite formula_name
<del>
<del> To list all files that would be deleted:
<del> brew link --overwrite --dry-run formula_name
<del> EOS
<add> if dst.exist?
<add> raise ConflictError.new(self, src, dst)
<ide> elsif dst.symlink?
<ide> dst.unlink
<ide> retry
<ide> end
<ide> rescue Errno::EACCES
<del> raise <<-EOS.undent
<del> Could not symlink file: #{src}
<del> #{dst.dirname} is not writable. You should change its permissions.
<del> EOS
<add> raise DirectoryNotWritableError.new(self, src, dst)
<ide> rescue SystemCallError
<del> raise <<-EOS.undent
<del> Could not symlink file: #{src}
<del> #{dst} may already exist.
<del> #{dst.dirname} may not be writable.
<del> EOS
<add> raise LinkError.new(self, src, dst)
<ide> end
<ide>
<ide> # symlinks the contents of self+foo recursively into #{HOMEBREW_PREFIX}/foo
<ide><path>Library/Homebrew/test/test_keg.rb
<ide> def test_linking_fails_when_already_linked
<ide>
<ide> def test_linking_fails_when_files_exist
<ide> touch HOMEBREW_PREFIX/"bin/helloworld"
<del> assert_raise RuntimeError do
<add> assert_raise Keg::ConflictError do
<ide> shutup { @keg.link }
<ide> end
<ide> end | 2 |
Java | Java | replace string comparison with enum | 63035a4c083807efce3ad0bb08e318811fa7558b | <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/ReactMarker.java
<ide> public class ReactMarker {
<ide>
<ide> public interface MarkerListener {
<del> void logMarker(String name, @Nullable String tag);
<add> void logMarker(ReactMarkerConstants name, @Nullable String tag);
<ide> };
<ide>
<ide> private static @Nullable MarkerListener sMarkerListener = null;
<ide> public static void logMarker(String name) {
<ide> }
<ide>
<ide> @DoNotStrip
<del> public static void logMarker(String name, String tag) {
<add> public static void logMarker(String name, @Nullable String tag) {
<add> if (sMarkerListener != null) {
<add> sMarkerListener.logMarker(ReactMarkerConstants.valueOf(name), tag);
<add> }
<add> }
<add>
<add> @DoNotStrip
<add> public static void logMarker(ReactMarkerConstants name) {
<add> logMarker(name, null);
<add> }
<add>
<add> @DoNotStrip
<add> public static void logMarker(ReactMarkerConstants name, @Nullable String tag) {
<ide> if (sMarkerListener != null) {
<ide> sMarkerListener.logMarker(name, tag);
<ide> }
<ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/ReactMarkerConstants.java
<ide> /**
<ide> * Constants used by ReactMarker.
<ide> */
<del>public class ReactMarkerConstants {
<del>
<del> // TODO convert to ints so we don't have to do String compares
<del> public static final String CREATE_REACT_CONTEXT_START = "CREATE_REACT_CONTEXT_START";
<del> public static final String CREATE_REACT_CONTEXT_END = "CREATE_REACT_CONTEXT_END";
<del> public static final String PROCESS_PACKAGES_START = "PROCESS_PACKAGES_START";
<del> public static final String PROCESS_PACKAGES_END = "PROCESS_PACKAGES_END";
<del> public static final String BUILD_NATIVE_MODULE_REGISTRY_START =
<del> "BUILD_NATIVE_MODULE_REGISTRY_START";
<del> public static final String BUILD_NATIVE_MODULE_REGISTRY_END =
<del> "BUILD_NATIVE_MODULE_REGISTRY_END";
<del> public static final String BUILD_JS_MODULE_CONFIG_START = "BUILD_JS_MODULE_CONFIG_START";
<del> public static final String BUILD_JS_MODULE_CONFIG_END = "BUILD_JS_MODULE_CONFIG_END";
<del> public static final String CREATE_CATALYST_INSTANCE_START = "CREATE_CATALYST_INSTANCE_START";
<del> public static final String CREATE_CATALYST_INSTANCE_END = "CREATE_CATALYST_INSTANCE_END";
<del> public static final String RUN_JS_BUNDLE_START = "RUN_JS_BUNDLE_START";
<del> public static final String RUN_JS_BUNDLE_END = "RUN_JS_BUNDLE_END";
<del> public static final String NATIVE_MODULE_INITIALIZE_START = "NativeModule_start";
<del> public static final String NATIVE_MODULE_INITIALIZE_END = "NativeModule_end";
<del> public static final String SETUP_REACT_CONTEXT_START = "SETUP_REACT_CONTEXT_START";
<del> public static final String SETUP_REACT_CONTEXT_END = "SETUP_REACT_CONTEXT_END";
<del> public static final String CREATE_UI_MANAGER_MODULE_START = "CREATE_UI_MANAGER_MODULE_START";
<del> public static final String CREATE_UI_MANAGER_MODULE_END = "CREATE_UI_MANAGER_MODULE_END";
<del> public static final String CREATE_VIEW_MANAGERS_START = "CREATE_VIEW_MANAGERS_START";
<del> public static final String CREATE_VIEW_MANAGERS_END = "CREATE_VIEW_MANAGERS_END";
<del> public static final String CREATE_UI_MANAGER_MODULE_CONSTANTS_START =
<del> "CREATE_UI_MANAGER_MODULE_CONSTANTS_START";
<del> public static final String CREATE_UI_MANAGER_MODULE_CONSTANTS_END =
<del> "CREATE_UI_MANAGER_MODULE_CONSTANTS_END";
<del> public static final String CREATE_MODULE_START = "CREATE_MODULE_START";
<del> public static final String CREATE_MODULE_END = "CREATE_MODULE_END";
<del> public static final String PROCESS_CORE_REACT_PACKAGE_START = "PROCESS_CORE_REACT_PACKAGE_START";
<del> public static final String PROCESS_CORE_REACT_PACKAGE_END = "PROCESS_CORE_REACT_PACKAGE_END";
<del> public static final String CORE_REACT_PACKAGE_GET_REACT_MODULE_INFO_PROVIDER_START =
<del> "CORE_REACT_PACKAGE_GET_REACT_MODULE_INFO_PROVIDER_START";
<del> public static final String CORE_REACT_PACKAGE_GET_REACT_MODULE_INFO_PROVIDER_END =
<del> "CORE_REACT_PACKAGE_GET_REACT_MODULE_INFO_PROVIDER_END";
<del> public static final String UI_MANAGER_MODULE_CONSTANTS_CONVERT_START =
<del> "UI_MANAGER_MODULE_CONSTANTS_CONVERT_START";
<del> public static final String UI_MANAGER_MODULE_CONSTANTS_CONVERT_END =
<del> "UI_MANAGER_MODULE_CONSTANTS_CONVERT_END";
<del> public static final String CREATE_I18N_MODULE_CONSTANTS_START =
<del> "CREATE_I18N_MODULE_CONSTANTS_START";
<del> public static final String CREATE_I18N_MODULE_CONSTANTS_END =
<del> "CREATE_I18N_MODULE_CONSTANTS_END";
<del> public static final String I18N_MODULE_CONSTANTS_CONVERT_START =
<del> "I18N_MODULE_CONSTANTS_CONVERT_START";
<del> public static final String I18N_MODULE_CONSTANTS_CONVERT_END =
<del> "I18N_MODULE_CONSTANTS_CONVERT_END";
<del> public static final String CREATE_I18N_ASSETS_MODULE_START =
<del> "CREATE_I18N_ASSETS_MODULE_START";
<del> public static final String CREATE_I18N_ASSETS_MODULE_END =
<del> "CREATE_I18N_ASSETS_MODULE_END";
<del> public static final String GET_CONSTANTS_START = "GET_CONSTANTS_START";
<del> public static final String GET_CONSTANTS_END = "GET_CONSTANTS_END";
<del> public static final String INITIALIZE_MODULE_START = "INITIALIZE_MODULE_START";
<del> public static final String INITIALIZE_MODULE_END = "INITIALIZE_MODULE_END";
<del> public static final String ON_HOST_RESUME_START = "ON_HOST_RESUME_START";
<del> public static final String ON_HOST_RESUME_END = "ON_HOST_RESUME_END";
<del> public static final String ON_HOST_PAUSE_START = "ON_HOST_PAUSE_START";
<del> public static final String ON_HOST_PAUSE_END = "ON_HOST_PAUSE_END";
<del> public static final String CONVERT_CONSTANTS_START = "CONVERT_CONSTANTS_START";
<del> public static final String CONVERT_CONSTANTS_END = "CONVERT_CONSTANTS_END";
<del> public static final String PRE_SETUP_REACT_CONTEXT_START = "PRE_SETUP_REACT_CONTEXT_START";
<del> public static final String PRE_SETUP_REACT_CONTEXT_END = "PRE_SETUP_REACT_CONTEXT_END";
<add>public enum ReactMarkerConstants {
<add> CREATE_REACT_CONTEXT_START,
<add> CREATE_REACT_CONTEXT_END,
<add> PROCESS_PACKAGES_START,
<add> PROCESS_PACKAGES_END,
<add> BUILD_NATIVE_MODULE_REGISTRY_START,
<add> BUILD_NATIVE_MODULE_REGISTRY_END,
<add> BUILD_JS_MODULE_CONFIG_START,
<add> BUILD_JS_MODULE_CONFIG_END,
<add> CREATE_CATALYST_INSTANCE_START,
<add> CREATE_CATALYST_INSTANCE_END,
<add> RUN_JS_BUNDLE_START,
<add> RUN_JS_BUNDLE_END,
<add> NATIVE_MODULE_INITIALIZE_START,
<add> NATIVE_MODULE_INITIALIZE_END,
<add> SETUP_REACT_CONTEXT_START,
<add> SETUP_REACT_CONTEXT_END,
<add> CREATE_UI_MANAGER_MODULE_START,
<add> CREATE_UI_MANAGER_MODULE_END,
<add> CREATE_VIEW_MANAGERS_START,
<add> CREATE_VIEW_MANAGERS_END,
<add> CREATE_UI_MANAGER_MODULE_CONSTANTS_START,
<add> CREATE_UI_MANAGER_MODULE_CONSTANTS_END,
<add> CREATE_MODULE_START,
<add> CREATE_MODULE_END,
<add> PROCESS_CORE_REACT_PACKAGE_START,
<add> PROCESS_CORE_REACT_PACKAGE_END,
<add> CORE_REACT_PACKAGE_GET_REACT_MODULE_INFO_PROVIDER_START,
<add> CORE_REACT_PACKAGE_GET_REACT_MODULE_INFO_PROVIDER_END,
<add> UI_MANAGER_MODULE_CONSTANTS_CONVERT_START,
<add> UI_MANAGER_MODULE_CONSTANTS_CONVERT_END,
<add> CREATE_I18N_MODULE_CONSTANTS_START,
<add> CREATE_I18N_MODULE_CONSTANTS_END,
<add> I18N_MODULE_CONSTANTS_CONVERT_START,
<add> I18N_MODULE_CONSTANTS_CONVERT_END,
<add> CREATE_I18N_ASSETS_MODULE_START,
<add> CREATE_I18N_ASSETS_MODULE_END,
<add> GET_CONSTANTS_START,
<add> GET_CONSTANTS_END,
<add> INITIALIZE_MODULE_START,
<add> INITIALIZE_MODULE_END,
<add> ON_HOST_RESUME_START,
<add> ON_HOST_RESUME_END,
<add> ON_HOST_PAUSE_START,
<add> ON_HOST_PAUSE_END,
<add> CONVERT_CONSTANTS_START,
<add> CONVERT_CONSTANTS_END,
<add> PRE_REACT_CONTEXT_END,
<add> UNPACKER_CHECK_START,
<add> UNPACKER_CHECK_END,
<add> UNPACKER_BUNDLE_EXTRACTED,
<add> UNPACKING_JS_BUNDLE_LOADER_CHECK_START,
<add> UNPACKING_JS_BUNDLE_LOADER_CHECK_END,
<add> UNPACKING_JS_BUNDLE_LOADER_EXTRACTED,
<add> loadApplicationScript_startStringConvert,
<add> loadApplicationScript_endStringConvert,
<add> PRE_SETUP_REACT_CONTEXT_START,
<add> PRE_SETUP_REACT_CONTEXT_END,
<ide> } | 2 |
Python | Python | fix tqdm logging level | 59b123bc50131aee4bfe6a434314fb5318de7955 | <ide><path>src/transformers/file_utils.py
<ide> def http_get(url, temp_file, proxies=None, resume_size=0, user_agent=None):
<ide> total=total,
<ide> initial=resume_size,
<ide> desc="Downloading",
<del> disable=bool(logger.level <= logging.INFO),
<add> disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
<ide> )
<ide> for chunk in response.iter_content(chunk_size=1024):
<ide> if chunk: # filter out keep-alive new chunks | 1 |
Ruby | Ruby | remove needless `silence_warnings` | 603475b76fe518fa290330817b851608b13143ea | <ide><path>railties/test/application/asset_debugging_test.rb
<ide> class AssetDebuggingTest < ActiveSupport::TestCase
<ide> include Rack::Test::Methods
<ide>
<ide> def setup
<del> # FIXME: shush Sass warning spam, not relevant to testing Railties
<del> Kernel.silence_warnings do
<del> build_app(initializers: true)
<del> end
<add> build_app(initializers: true)
<ide>
<ide> app_file "app/assets/javascripts/application.js", "//= require_tree ."
<ide> app_file "app/assets/javascripts/xmlhr.js", "function f1() { alert(); }"
<ide> def teardown
<ide> teardown_app
<ide> end
<ide>
<del> # FIXME: shush Sass warning spam, not relevant to testing Railties
<del> def get(*)
<del> Kernel.silence_warnings { super }
<del> end
<del>
<ide> test "assets are concatenated when debug is off and compile is off either if debug_assets param is provided" do
<ide> # config.assets.debug and config.assets.compile are false for production environment
<ide> ENV["RAILS_ENV"] = "production"
<ide><path>railties/test/application/assets_test.rb
<ide> def assert_no_file_exists(filename)
<ide>
<ide> add_to_env_config "development", "config.assets.digest = false"
<ide>
<del> # FIXME: shush Sass warning spam, not relevant to testing Railties
<del> Kernel.silence_warnings do
<del> require "#{app_path}/config/environment"
<del> end
<add> require "#{app_path}/config/environment"
<ide>
<ide> get "/assets/demo.js"
<ide> assert_equal 'a = "/assets/rails.png";', last_response.body.strip
<ide><path>railties/test/isolation/abstract_unit.rb
<ide> def app(env = "production")
<ide> @app ||= begin
<ide> ENV["RAILS_ENV"] = env
<ide>
<del> # FIXME: shush Sass warning spam, not relevant to testing Railties
<del> Kernel.silence_warnings do
<del> require "#{app_path}/config/environment"
<del> end
<add> require "#{app_path}/config/environment"
<ide>
<ide> Rails.application
<ide> end | 3 |
Javascript | Javascript | fix typo in withrouter | de6741f886178d8f9b4af6e109838642aebda1c7 | <ide><path>lib/router/with-router.js
<ide> export default function withRouter (ComposedComponent) {
<ide> router: PropTypes.object
<ide> }
<ide>
<del> static displayName = `withRoute(${displayName})`
<add> static displayName = `withRouter(${displayName})`
<ide>
<ide> render () {
<ide> const props = { | 1 |
PHP | PHP | add auth line | b54ef297b3c723c8438596c6e6afef93a7458b98 | <ide><path>resources/lang/en/auth.php
<ide> */
<ide>
<ide> 'failed' => 'These credentials do not match our records.',
<add> 'password' => 'The provided password is incorrect.',
<ide> 'throttle' => 'Too many login attempts. Please try again in :seconds seconds.',
<ide>
<ide> ]; | 1 |
Text | Text | add v3.6.1 to changelog | 9a958e2962e1e75f9905d58b618805043bd6fa6e | <ide><path>CHANGELOG.md
<ide> - [#17134](https://github.com/emberjs/ember.js/pull/17134) [CLEANUP] Remove deprecated '_router'
<ide> - [#17133](https://github.com/emberjs/ember.js/pull/17133) [CLEANUP] Remove deprecated 'property{Did,Will}Change'
<ide>
<add>### v3.6.1 (December 18, 2018)
<add>
<add>- [#17328](https://github.com/emberjs/ember.js/pull/17328) [BUGFIX] Ensure that delayed transition retrys work
<add>- [#17374](https://github.com/emberjs/ember.js/pull/17374) [BUGFIX] Fix cyclic references on Array.prototype
<add>
<ide> ### v3.6.0 (December 6, 2018)
<ide>
<ide> - [#17025](https://github.com/emberjs/ember.js/pull/17025) / [#17034](https://github.com/emberjs/ember.js/pull/17034) / [#17036](https://github.com/emberjs/ember.js/pull/17036) / [#17038](https://github.com/emberjs/ember.js/pull/17038) / [#17040](https://github.com/emberjs/ember.js/pull/17040) / [#17041](https://github.com/emberjs/ember.js/pull/17041) / [#17061](https://github.com/emberjs/ember.js/pull/17061) [FEATURE] Final stage of the router service RFC (see [emberjs/rfcs#95](https://github.com/emberjs/rfcs/blob/master/text/0095-router-service.md) | 1 |
Python | Python | use default-hostname as a class-attribute | ef7b7afa9aaca69ea31706fb38b27e8ee4da30be | <ide><path>libcloud/dns/drivers/rcodezero.py
<ide> from libcloud.dns.types import Provider, RecordType
<ide> from libcloud.utils.py3 import httplib
<ide>
<add>API_HOST = 'my.rcodezero.at'
<add>
<ide> __all__ = [
<ide> 'RcodeZeroDNSDriver',
<ide> ]
<ide> def parse_error(self):
<ide> class RcodeZeroConnection(ConnectionKey):
<ide> responseCls = RcodeZeroResponse
<ide>
<add> host = API_HOST
<add>
<ide> def add_default_headers(self, headers):
<ide> headers['Authorization'] = 'Bearer ' + self.key
<ide> headers['Accept'] = 'application/json'
<ide> class RcodeZeroDNSDriver(DNSDriver):
<ide> RecordType.TXT: 'TXT',
<ide> }
<ide>
<del> def __init__(self, key, secret=None, secure=True, host='my.rcodezero.at',
<add> def __init__(self, key, secret=None, secure=True, host=None,
<ide> port=None, api_version='v1', **kwargs):
<ide> """
<ide> :param key: API token to be used (required)
<ide> def __init__(self, key, secret=None, secure=True, host='my.rcodezero.at',
<ide> :type secure: ``bool``
<ide>
<ide> :param host: Hostname used for connections.
<del> Default: ``my.rcodezero.at``.
<ide> :type host: ``str``
<ide>
<ide> :param port: Port used for connections. | 1 |
Javascript | Javascript | fix return type of settimeout in net.socket | 4439009d6531230f2f15992483d136bcc416c6ae | <ide><path>lib/internal/stream_base_commons.js
<ide> function onStreamRead(arrayBuffer) {
<ide>
<ide> function setStreamTimeout(msecs, callback) {
<ide> if (this.destroyed)
<del> return;
<add> return this;
<ide>
<ide> this.timeout = msecs;
<ide>
<ide><path>test/parallel/test-net-socket-timeout.js
<ide> for (let i = 0; i < invalidCallbacks.length; i++) {
<ide> const server = net.Server();
<ide> server.listen(0, common.mustCall(() => {
<ide> const socket = net.createConnection(server.address().port);
<del> socket.setTimeout(1, common.mustCall(() => {
<del> socket.destroy();
<del> server.close();
<del> }));
<add> assert.strictEqual(
<add> socket.setTimeout(1, common.mustCall(() => {
<add> socket.destroy();
<add> assert.strictEqual(socket.setTimeout(1, common.mustNotCall()), socket);
<add> server.close();
<add> })),
<add> socket
<add> );
<ide> })); | 2 |
Python | Python | add example section in the -h tag | a4dbd65cc336c2e714c7d648837a1b982e0c3a0d | <ide><path>glances/core/glances_main.py
<ide> class GlancesMain(object):
<ide> username = "glances"
<ide> password = ""
<ide>
<add> # Exemple of use
<add> example_of_use = "\
<add>Examples of use:\n\
<add>\n\
<add>Monitor local machine (standalone mode):\n\
<add> $ glances\n\
<add>\n\
<add>Monitor local machine with the Web interface (Web UI):\n\
<add> $ glances -w\n\
<add> Glances web server started on http://0.0.0.0:61208/\n\
<add>\n\
<add>Monitor local machine and export stats to a CSV file (standalone mode):\n\
<add> $ glances --export-csv\n\
<add>\n\
<add>Monitor local machine and export stats to a InfluxDB server with 5s refresh time (standalone mode):\n\
<add> $ glances -t 5 --export-influxdb -t 5\n\
<add>\n\
<add>Start a Glances server (server mode):\n\
<add> $ glances -s\n\
<add>\n\
<add>Connect Glances to a Glances server (client mode):\n\
<add> $ glances -c <ip_server>\n\
<add>\n\
<add>Connect Glances to a Glances server and export stats to a StatsD server (client mode):\n\
<add> $ glances -c <ip_server> --export-statsd\n\
<add>\n\
<add>Start the client browser (browser mode):\n\
<add> $ glances --browser\n\
<add> "
<add>
<ide> def __init__(self):
<ide> """Manage the command line arguments."""
<ide> self.args = self.parse_args()
<ide> def init_args(self):
<ide> """Init all the command line arguments."""
<ide> _version = "Glances v" + version + " with psutil v" + psutil_version
<ide> parser = argparse.ArgumentParser(
<del> prog=appname, conflict_handler='resolve')
<add> prog=appname,
<add> conflict_handler='resolve',
<add> formatter_class=argparse.RawDescriptionHelpFormatter,
<add> epilog=self.example_of_use)
<ide> parser.add_argument(
<ide> '-V', '--version', action='version', version=_version)
<ide> parser.add_argument('-d', '--debug', action='store_true', default=False,
<ide> def init_args(self):
<ide> parser.add_argument('--disable-docker', action='store_true', default=False,
<ide> dest='disable_docker', help=_('disable Docker module'))
<ide> parser.add_argument('--disable-left-sidebar', action='store_true', default=False,
<del> dest='disable_left_sidebar', help=_('disable network, disk io, FS and sensors modules'))
<add> dest='disable_left_sidebar', help=_('disable network, disk io, FS and sensors modules (need Py3Sensors lib)'))
<ide> parser.add_argument('--disable-process', action='store_true', default=False,
<ide> dest='disable_process', help=_('disable process module'))
<ide> parser.add_argument('--disable-log', action='store_true', default=False,
<ide> def init_args(self):
<ide> parser.add_argument('--enable-process-extended', action='store_true', default=False,
<ide> dest='enable_process_extended', help=_('enable extended stats on top process'))
<ide> parser.add_argument('--enable-history', action='store_true', default=False,
<del> dest='enable_history', help=_('enable the history mode'))
<add> dest='enable_history', help=_('enable the history mode (need MatPlotLib lib)'))
<ide> parser.add_argument('--path-history', default=tempfile.gettempdir(),
<ide> dest='path_history', help=_('Set the export path for graph history'))
<ide> # Export modules feature
<ide> parser.add_argument('--export-csv', default=None,
<ide> dest='export_csv', help=_('export stats to a CSV file'))
<ide> parser.add_argument('--export-influxdb', action='store_true', default=False,
<del> dest='export_influxdb', help=_('export stats to an InfluxDB server'))
<add> dest='export_influxdb', help=_('export stats to an InfluxDB server (need InfluDB lib)'))
<ide> parser.add_argument('--export-statsd', action='store_true', default=False,
<del> dest='export_statsd', help=_('export stats to a Statsd server'))
<add> dest='export_statsd', help=_('export stats to a Statsd server (need StatsD lib)'))
<ide> # Client/Server option
<ide> parser.add_argument('-c', '--client', dest='client',
<ide> help=_('connect to a Glances server by IPv4/IPv6 address or hostname'))
<ide> def init_args(self):
<ide> parser.add_argument('-t', '--time', default=self.refresh_time, type=float,
<ide> dest='time', help=_('set refresh time in seconds [default: {0} sec]').format(self.refresh_time))
<ide> parser.add_argument('-w', '--webserver', action='store_true', default=False,
<del> dest='webserver', help=_('run Glances in web server mode'))
<add> dest='webserver', help=_('run Glances in web server mode (need Bootle lib)'))
<ide> # Display options
<ide> parser.add_argument('-f', '--process-filter', default=None, type=str,
<ide> dest='process_filter', help=_('set the process filter pattern (regular expression)')) | 1 |
Mixed | PHP | update reasons and readme | 67f70afe1f4cdb472ba6dc28b03090e5153fb167 | <ide><path>readme.md
<ide> # Laravel 4 Beta Change Log
<ide>
<add>## Beta 3
<add>
<add>- Fixed a few things in the ArrayStore session driver.
<add>- Improve reasons in Password Broker.
<add>
<ide> ## Beta 2
<ide>
<ide> - Migrated to ircmaxell's [password-compat](http://github.com/ircmaxell/password_compat) library for PHP 5.5 forward compatibility on hashes. No backward compatibility breaks.
<ide><path>src/Illuminate/Auth/Reminders/PasswordBroker.php
<ide> protected function validNewPasswords()
<ide> */
<ide> protected function makeErrorRedirect($reason = '')
<ide> {
<add> if ($reason != '') $reason = 'reminders.'.$reason;
<add>
<ide> return $this->redirect->refresh()->with('error', true)->with('reason', $reason);
<ide> }
<ide>
<ide><path>tests/Auth/AuthPasswordBrokerTest.php
<ide> public function testRedirectIsReturnedByResetWhenUserCredentialsInvalid()
<ide> $mocks['users']->shouldReceive('retrieveByCredentials')->once()->with(array('creds'))->andReturn(null);
<ide> $mocks['redirect']->shouldReceive('refresh')->andReturn($redirect = m::mock('Illuminate\Http\RedirectResponse'));
<ide> $redirect->shouldReceive('with')->once()->with('error', true)->andReturn($redirect);
<del> $redirect->shouldReceive('with')->once()->with('reason', 'user')->andReturn($redirect);
<add> $redirect->shouldReceive('with')->once()->with('reason', 'reminders.user')->andReturn($redirect);
<ide>
<ide> $this->assertInstanceof('Illuminate\Http\RedirectResponse', $broker->reset(array('creds'), function() {}));
<ide> }
<ide> public function testRedirectReturnedByRemindWhenPasswordsDontMatch()
<ide> $request->shouldReceive('input')->once()->with('password_confirmation')->andReturn('bar');
<ide> $mocks['redirect']->shouldReceive('refresh')->andReturn($redirect = m::mock('Illuminate\Http\RedirectResponse'));
<ide> $redirect->shouldReceive('with')->once()->with('error', true)->andReturn($redirect);
<del> $redirect->shouldReceive('with')->once()->with('reason', 'password')->andReturn($redirect);
<add> $redirect->shouldReceive('with')->once()->with('reason', 'reminders.password')->andReturn($redirect);
<ide>
<ide> $this->assertInstanceof('Illuminate\Http\RedirectResponse', $broker->reset(array('creds'), function() {}));
<ide> }
<ide> public function testRedirectReturnedByRemindWhenPasswordNotSet()
<ide> $request->shouldReceive('input')->once()->with('password_confirmation')->andReturn(null);
<ide> $mocks['redirect']->shouldReceive('refresh')->andReturn($redirect = m::mock('Illuminate\Http\RedirectResponse'));
<ide> $redirect->shouldReceive('with')->once()->with('error', true)->andReturn($redirect);
<del> $redirect->shouldReceive('with')->once()->with('reason', 'password')->andReturn($redirect);
<add> $redirect->shouldReceive('with')->once()->with('reason', 'reminders.password')->andReturn($redirect);
<ide>
<ide> $this->assertInstanceof('Illuminate\Http\RedirectResponse', $broker->reset(array('creds'), function() {}));
<ide> }
<ide> public function testRedirectReturnedByRemindWhenPasswordsLessThanSixCharacters()
<ide> $request->shouldReceive('input')->once()->with('password_confirmation')->andReturn('abc');
<ide> $mocks['redirect']->shouldReceive('refresh')->andReturn($redirect = m::mock('Illuminate\Http\RedirectResponse'));
<ide> $redirect->shouldReceive('with')->once()->with('error', true)->andReturn($redirect);
<del> $redirect->shouldReceive('with')->once()->with('reason', 'password')->andReturn($redirect);
<add> $redirect->shouldReceive('with')->once()->with('reason', 'reminders.password')->andReturn($redirect);
<ide>
<ide> $this->assertInstanceof('Illuminate\Http\RedirectResponse', $broker->reset(array('creds'), function() {}));
<ide> }
<ide> public function testRedirectReturnedByRemindWhenRecordDoesntExistInTable()
<ide> $mocks['reminders']->shouldReceive('exists')->with($user, 'token')->andReturn(false);
<ide> $mocks['redirect']->shouldReceive('refresh')->andReturn($redirect = m::mock('Illuminate\Http\RedirectResponse'));
<ide> $redirect->shouldReceive('with')->once()->with('error', true)->andReturn($redirect);
<del> $redirect->shouldReceive('with')->once()->with('reason', 'token')->andReturn($redirect);
<add> $redirect->shouldReceive('with')->once()->with('reason', 'reminders.token')->andReturn($redirect);
<ide>
<ide> $this->assertInstanceof('Illuminate\Http\RedirectResponse', $broker->reset(array('creds'), function() {}));
<ide> } | 3 |
Mixed | Python | fix paths to images in readme.md | 37afe55775676e2cb4cf6ed0cfc6c892855d6805 | <ide><path>README.md
<ide> following the ASF Policy.
<ide>
<ide> - **DAGs**: Overview of all DAGs in your environment.
<ide>
<del> 
<add> 
<ide>
<ide> - **Tree View**: Tree representation of a DAG that spans across time.
<ide>
<del> 
<add> 
<ide>
<ide> - **Graph View**: Visualization of a DAG's dependencies and their current status for a specific run.
<ide>
<del> 
<add> 
<ide>
<ide> - **Task Duration**: Total time spent on different tasks over time.
<ide>
<del> 
<add> 
<ide>
<ide> - **Gantt View**: Duration and overlap of a DAG.
<ide>
<del> 
<add> 
<ide>
<ide> - **Code View**: Quick way to view source code of a DAG.
<ide>
<del> 
<add> 
<ide>
<ide>
<ide> ## Contributing
<ide> If you would like to become a maintainer, please review the Apache Airflow
<ide>
<ide> ## Can I use the Apache Airflow logo in my presentation?
<ide>
<del>Yes! Be sure to abide by the Apache Foundation [trademark policies](https://www.apache.org/foundation/marks/#books) and the Apache Airflow [Brandbook](https://cwiki.apache.org/confluence/display/AIRFLOW/Brandbook). The most up to date logos are found in [this repo](/docs/img/logos) and on the Apache Software Foundation [website](https://www.apache.org/logos/about.html).
<add>Yes! Be sure to abide by the Apache Foundation [trademark policies](https://www.apache.org/foundation/marks/#books) and the Apache Airflow [Brandbook](https://cwiki.apache.org/confluence/display/AIRFLOW/Brandbook). The most up to date logos are found in [this repo](/docs/apache-airflow/img/logos) and on the Apache Software Foundation [website](https://www.apache.org/logos/about.html).
<ide>
<ide> ## Airflow merchandise
<ide>
<ide><path>airflow/jobs/scheduler_job.py
<ide> def _run_scheduler_loop(self) -> None:
<ide>
<ide> Following is a graphic representation of these steps.
<ide>
<del> .. image:: ../docs/img/scheduler_loop.jpg
<add> .. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
<ide>
<ide> :rtype: None
<ide> """ | 2 |
Ruby | Ruby | use global namespace rails | 4686ace999467935e659260d1a23c6617bc51986 | <ide><path>railties/lib/rails/generators/rails/plugin_new/templates/lib/%name%/engine.rb
<ide> module <%= camelized %>
<del> class Engine < Rails::Engine
<add> class Engine < ::Rails::Engine
<ide> <% if mountable? -%>
<ide> isolate_namespace <%= camelized %>
<ide> <% end -%>
<ide><path>railties/test/generators/plugin_new_generator_test.rb
<ide> def test_creating_engine_in_full_mode
<ide> assert_file "app/views"
<ide> assert_file "app/helpers"
<ide> assert_file "config/routes.rb", /Rails.application.routes.draw do/
<del> assert_file "lib/bukkits/engine.rb", /module Bukkits\n class Engine < Rails::Engine\n end\nend/
<add> assert_file "lib/bukkits/engine.rb", /module Bukkits\n class Engine < ::Rails::Engine\n end\nend/
<ide> assert_file "lib/bukkits.rb", /require "bukkits\/engine"/
<ide> end
<ide> | 2 |
Text | Text | add locate command | 3ec8875bf12609157da0af5a02f53330d5e98152 | <ide><path>guide/english/linux/basic-linux-commands/index.md
<ide> title: Basic Linux Commands
<ide>
<ide> When starting out with Linux, there are some basic commands everyone should know.
<ide>
<del> 1. **cd** - change directory
<add>1. **cd** - change directory
<ide> - `cd` followed by a directory or file path will take you inside that directory (folder).
<ide>
<ide> 2. **ls** - list command
<ide> When starting out with Linux, there are some basic commands everyone should know
<ide> 28. **chmod** - change permission
<ide> - Use 'chmod' to make a file executable and to change the permissions granted to it in Linux. For the example , imagine you have a file named `myFile.txt` . But it can't executable because you not have a permission to this file , you can use the command `chmod +x myFile.txt` or `chmod 755 myFile.txt`.
<ide>
<add>29. **locate**
<add>- Used to locate a file in the system
<add> Example:
<add> ```sh
<add> locate sample.txt
<add> /home/rajasimha/Desktop/sample.txt
<add> ```
<add>
<ide> ### Useful Resources for Practice:
<ide> - [JSLinux](https://bellard.org/jslinux/vm.html?url=https://bellard.org/jslinux/buildroot-x86.cfg): Run the terminal inside your browser. Great for practice.
<ide> - [LearnShell](https://www.learnshell.org/): Interactive Linux shell (terminal) tutorials. | 1 |
Mixed | Python | convert "dim_ordering" to "data_format" | 5adce5266f43f89dcf7f14758e5d895fbf6791ef | <ide><path>docs/templates/applications.md
<ide> Weights are downloaded automatically when instantiating a model. They are stored
<ide> - [ResNet50](#resnet50)
<ide> - [InceptionV3](#inceptionv3)
<ide>
<del>All of these architectures (except Xception) are compatible with both TensorFlow and Theano, and upon instantiation the models will be built according to the image dimension ordering set in your Keras configuration file at `~/.keras/keras.json`. For instance, if you have set `image_dim_ordering=tf`, then any model loaded from this repository will get built according to the TensorFlow dimension ordering convention, "Width-Height-Depth".
<add>All of these architectures (except Xception) are compatible with both TensorFlow and Theano, and upon instantiation the models will be built according to the image data format set in your Keras configuration file at `~/.keras/keras.json`. For instance, if you have set `image_data_format=tf`, then any model loaded from this repository will get built according to the TensorFlow data format convention, "Width-Height-Depth".
<ide>
<ide> The Xception model is only available for TensorFlow, due to its reliance on `SeparableConvolution` layers.
<ide>
<ide> from keras.applications.inception_v3 import InceptionV3
<ide> from keras.layers import Input
<ide>
<ide> # this could also be the output a different Keras model or layer
<del>input_tensor = Input(shape=(224, 224, 3)) # this assumes K.image_dim_ordering() == 'tf'
<add>input_tensor = Input(shape=(224, 224, 3)) # this assumes K.image_data_format() == 'channels_last'
<ide>
<ide> model = InceptionV3(input_tensor=input_tensor, weights='imagenet', include_top=True)
<ide> ```
<ide> and a top-5 validation accuracy of 0.945.
<ide>
<ide> Note that this model is only available for the TensorFlow backend,
<ide> due to its reliance on `SeparableConvolution` layers. Additionally it only supports
<del>the dimension ordering "tf" (width, height, channels).
<add>the data format "channels_last" (width, height, channels).
<ide>
<ide> The default input size for this model is 299x299.
<ide>
<ide> keras.applications.vgg16.VGG16(include_top=True, weights='imagenet', input_tenso
<ide> VGG16 model, with weights pre-trained on ImageNet.
<ide>
<ide> This model is available for both the Theano and TensorFlow backend, and can be built both
<del>with "th" dim ordering (channels, width, height) or "tf" dim ordering (width, height, channels).
<add>with "channels_first" data format (channels, width, height) or "channels_last" data format (width, height, channels).
<ide>
<ide> The default input size for this model is 224x224.
<ide>
<ide> The default input size for this model is 224x224.
<ide> - input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
<ide> - input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(224, 224, 3)` (with `tf` dim ordering)
<del> or `(3, 224, 244)` (with `th` dim ordering).
<add> has to be `(224, 224, 3)` (with `channels_last` data format)
<add> or `(3, 224, 244)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 48.
<ide> E.g. `(200, 200, 3)` would be one valid value.
<ide> keras.applications.vgg19.VGG19(include_top=True, weights='imagenet', input_tenso
<ide> VGG19 model, with weights pre-trained on ImageNet.
<ide>
<ide> This model is available for both the Theano and TensorFlow backend, and can be built both
<del>with "th" dim ordering (channels, width, height) or "tf" dim ordering (width, height, channels).
<add>with "channels_first" data format (channels, width, height) or "channels_last" data format (width, height, channels).
<ide>
<ide> The default input size for this model is 224x224.
<ide>
<ide> The default input size for this model is 224x224.
<ide> - input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
<ide> - input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(224, 224, 3)` (with `tf` dim ordering)
<del> or `(3, 224, 244)` (with `th` dim ordering).
<add> has to be `(224, 224, 3)` (with `channels_last` data format)
<add> or `(3, 224, 244)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 48.
<ide> E.g. `(200, 200, 3)` would be one valid value.
<ide> keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet', input
<ide> ResNet50 model, with weights pre-trained on ImageNet.
<ide>
<ide> This model is available for both the Theano and TensorFlow backend, and can be built both
<del>with "th" dim ordering (channels, width, height) or "tf" dim ordering (width, height, channels).
<add>with "channels_first" data format (channels, width, height) or "channels_last" data format (width, height, channels).
<ide>
<ide> The default input size for this model is 224x224.
<ide>
<ide> The default input size for this model is 224x224.
<ide> - input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
<ide> - input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(224, 224, 3)` (with `tf` dim ordering)
<del> or `(3, 224, 244)` (with `th` dim ordering).
<add> has to be `(224, 224, 3)` (with `channels_last` data format)
<add> or `(3, 224, 244)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 197.
<ide> E.g. `(200, 200, 3)` would be one valid value.
<ide> keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet'
<ide> Inception V3 model, with weights pre-trained on ImageNet.
<ide>
<ide> This model is available for both the Theano and TensorFlow backend, and can be built both
<del>with "th" dim ordering (channels, width, height) or "tf" dim ordering (width, height, channels).
<add>with "channels_first" data format (channels, width, height) or "channels_last" data format (width, height, channels).
<ide>
<ide> The default input size for this model is 299x299.
<ide>
<ide> The default input size for this model is 299x299.
<ide> - input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
<ide> - input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(299, 299, 3)` (with `tf` dim ordering)
<del> or `(3, 299, 299)` (with `th` dim ordering).
<add> has to be `(299, 299, 3)` (with `channels_last` data format)
<add> or `(3, 299, 299)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 139.
<ide> E.g. `(150, 150, 3)` would be one valid value.
<ide><path>docs/templates/backend.md
<ide> The default configuration file looks like this:
<ide>
<ide> ```
<ide> {
<del> "image_dim_ordering": "tf",
<add> "image_data_format": "channels_last",
<ide> "epsilon": 1e-07,
<ide> "floatx": "float32",
<ide> "backend": "tensorflow"
<ide> Using TensorFlow backend.
<ide>
<ide> ```
<ide> {
<del> "image_dim_ordering": "tf",
<add> "image_data_format": "channels_last",
<ide> "epsilon": 1e-07,
<ide> "floatx": "float32",
<ide> "backend": "tensorflow"
<ide> Using TensorFlow backend.
<ide>
<ide> You can change these settings by editing `~/.keras/keras.json`.
<ide>
<del>* `image_dim_ordering`: string, either `"tf"` or `"th"`. It specifies which dimension ordering convention Keras will follow. (`keras.backend.image_dim_ordering()` returns it.)
<del> - For 2D data (e.g. image), `"tf"` assumes `(rows, cols, channels)` while `"th"` assumes `(channels, rows, cols)`.
<del> - For 3D data, `"tf"` assumes `(conv_dim1, conv_dim2, conv_dim3, channels)` while `"th"` assumes `(channels, conv_dim1, conv_dim2, conv_dim3)`.
<add>* `image_data_format`: string, either `"channels_last"` or `"channels_first"`. It specifies which data format convention Keras will follow. (`keras.backend.image_data_format()` returns it.)
<add> - For 2D data (e.g. image), `"channels_last"` assumes `(rows, cols, channels)` while `"channels_first"` assumes `(channels, rows, cols)`.
<add> - For 3D data, `"channels_last"` assumes `(conv_dim1, conv_dim2, conv_dim3, channels)` while `"channels_first"` assumes `(channels, conv_dim1, conv_dim2, conv_dim3)`.
<ide> * `epsilon`: float, a numeric fuzzing constant used to avoid dividing by zero in some operations.
<ide> * `floatx`: string, `"float16"`, `"float32"`, or `"float64"`. Default float precision.
<ide> * `backend`: string, `"tensorflow"` or `"theano"`.
<ide><path>docs/templates/preprocessing/image.md
<ide> keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
<ide> horizontal_flip=False,
<ide> vertical_flip=False,
<ide> rescale=None,
<del> dim_ordering=K.image_dim_ordering())
<add> data_format=K.image_data_format())
<ide> ```
<ide>
<ide> Generate batches of tensor image data with real-time data augmentation. The data will be looped over (in batches) indefinitely.
<ide> Generate batches of tensor image data with real-time data augmentation. The data
<ide> - __rescale__: rescaling factor. Defaults to None. If None or 0, no rescaling is applied,
<ide> otherwise we multiply the data by the value provided (before applying
<ide> any other transformation).
<del> - __dim_ordering__: One of {"th", "tf"}.
<del> "tf" mode means that the images should have shape `(samples, height, width, channels)`,
<del> "th" mode means that the images should have shape `(samples, channels, height, width)`.
<del> It defaults to the `image_dim_ordering` value found in your
<add> - _data_format_: One of {"channels_first", "channels_last"}.
<add> "channels_last" mode means that the images should have shape `(samples, height, width, channels)`,
<add> "channels_first" mode means that the images should have shape `(samples, channels, height, width)`.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> - __Methods__:
<ide> - __fit(X)__: Compute the internal data stats related to the data-dependent transformations, based on an array of sample data.
<ide><path>examples/conv_filter_visualization.py
<ide> def deprocess_image(x):
<ide>
<ide> # convert to RGB array
<ide> x *= 255
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> x = x.transpose((1, 2, 0))
<ide> x = np.clip(x, 0, 255).astype('uint8')
<ide> return x
<ide> def normalize(x):
<ide> # we build a loss function that maximizes the activation
<ide> # of the nth filter of the layer considered
<ide> layer_output = layer_dict[layer_name].output
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> loss = K.mean(layer_output[:, filter_index, :, :])
<ide> else:
<ide> loss = K.mean(layer_output[:, :, :, filter_index])
<ide> def normalize(x):
<ide> step = 1.
<ide>
<ide> # we start from a gray image with some random noise
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> input_img_data = np.random.random((1, 3, img_width, img_height))
<ide> else:
<ide> input_img_data = np.random.random((1, img_width, img_height, 3))
<ide><path>examples/conv_lstm.py
<ide>
<ide> seq.add(Convolution3D(nb_filter=1, kernel_dim1=1, kernel_dim2=3,
<ide> kernel_dim3=3, activation='sigmoid',
<del> border_mode='same', dim_ordering='tf'))
<add> border_mode='same', data_format='channels_last'))
<ide>
<ide> seq.compile(loss='binary_crossentropy', optimizer='adadelta')
<ide>
<ide><path>examples/deep_dream.py
<ide> def preprocess_image(image_path):
<ide>
<ide>
<ide> def deprocess_image(x):
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> x = x.reshape((3, img_width, img_height))
<ide> x = x.transpose((1, 2, 0))
<ide> else:
<ide> def deprocess_image(x):
<ide> x = np.clip(x, 0, 255).astype('uint8')
<ide> return x
<ide>
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> img_size = (3, img_width, img_height)
<ide> else:
<ide> img_size = (img_width, img_height, 3)
<ide> def deprocess_image(x):
<ide>
<ide> def continuity_loss(x):
<ide> assert K.ndim(x) == 4
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> a = K.square(x[:, :, :img_width - 1, :img_height - 1] -
<ide> x[:, :, 1:, :img_height - 1])
<ide> b = K.square(x[:, :, :img_width - 1, :img_height - 1] -
<ide> def continuity_loss(x):
<ide> x = layer_dict[layer_name].output
<ide> shape = layer_dict[layer_name].output_shape
<ide> # we avoid border artifacts by only involving non-border pixels in the loss
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> loss -= coeff * K.sum(K.square(x[:, :, 2: shape[2] - 2, 2: shape[3] - 2])) / np.prod(shape[1:])
<ide> else:
<ide> loss -= coeff * K.sum(K.square(x[:, 2: shape[1] - 2, 2: shape[2] - 2, :])) / np.prod(shape[1:])
<ide><path>examples/image_ocr.py
<ide> def build_word_list(self, num_words, max_string_len=None, mono_fraction=0.5):
<ide> def get_batch(self, index, size, train):
<ide> # width and height are backwards from typical Keras convention
<ide> # because width is the time dimension when it gets fed into the RNN
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> X_data = np.ones([size, 1, self.img_w, self.img_h])
<ide> else:
<ide> X_data = np.ones([size, self.img_w, self.img_h, 1])
<ide> def get_batch(self, index, size, train):
<ide> # Mix in some blank inputs. This seems to be important for
<ide> # achieving translational invariance
<ide> if train and i > size - 4:
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> X_data[i, 0, 0:self.img_w, :] = self.paint_func('')[0, :, :].T
<ide> else:
<ide> X_data[i, 0:self.img_w, :, 0] = self.paint_func('',)[0, :, :].T
<ide> def get_batch(self, index, size, train):
<ide> label_length[i] = 1
<ide> source_str.append('')
<ide> else:
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> X_data[i, 0, 0:self.img_w, :] = self.paint_func(self.X_text[index + i])[0, :, :].T
<ide> else:
<ide> X_data[i, 0:self.img_w, :, 0] = self.paint_func(self.X_text[index + i])[0, :, :].T
<ide> def on_epoch_end(self, epoch, logs={}):
<ide> cols = 1
<ide> for i in range(self.num_display_words):
<ide> pylab.subplot(self.num_display_words // cols, cols, i + 1)
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> the_input = word_batch['the_input'][i, 0, :, :]
<ide> else:
<ide> the_input = word_batch['the_input'][i, :, :, 0]
<ide> def train(run_name, start_epoch, stop_epoch, img_w):
<ide> time_dense_size = 32
<ide> rnn_size = 512
<ide>
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> input_shape = (1, img_w, img_h)
<ide> else:
<ide> input_shape = (img_w, img_h, 1)
<ide><path>examples/mnist_acgan.py
<ide>
<ide> np.random.seed(1337)
<ide>
<del>K.set_image_dim_ordering('th')
<add>K.set_image_data_format('channels_first')
<ide>
<ide>
<ide> def build_generator(latent_size):
<ide><path>examples/mnist_cnn.py
<ide> # the data, shuffled and split between train and test sets
<ide> (X_train, y_train), (X_test, y_test) = mnist.load_data()
<ide>
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
<ide> X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
<ide> input_shape = (1, img_rows, img_cols)
<ide><path>examples/mnist_net2net.py
<ide> when a Dropout layer is used.
<ide>
<ide> Results
<del>- Tested with 'Theano' backend and 'th' image_dim_ordering.
<add>- Tested with 'Theano' backend and 'channels_first' image_data_format.
<ide> - Running on GPU GeForce GTX 980M
<ide> - Performance Comparisons - validation loss values during first 3 epochs:
<ide> (1) teacher_model: 0.075 0.041 0.041
<ide><path>examples/mnist_swwae.py
<ide> def getwhere(x):
<ide> 'of a gradient, which isn\'t '
<ide> 'supported for all TF ops.')
<ide>
<del># This example assume 'th' dim ordering.
<del>K.set_image_dim_ordering('th')
<add># This example assume 'channels_first' data format.
<add>K.set_image_data_format('channels_first')
<ide>
<ide> # input image dimensions
<ide> img_rows, img_cols = 28, 28
<ide><path>examples/mnist_transfer_cnn.py
<ide> # convolution kernel size
<ide> kernel_size = 3
<ide>
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> input_shape = (1, img_rows, img_cols)
<ide> else:
<ide> input_shape = (img_rows, img_cols, 1)
<ide><path>examples/neural_doodle.py
<ide> def preprocess_image(image_path):
<ide>
<ide>
<ide> def deprocess_image(x):
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> x = x.reshape((3, img_nrows, img_ncols))
<ide> x = x.transpose((1, 2, 0))
<ide> else:
<ide> def kmeans(xs, k):
<ide> def load_mask_labels():
<ide> '''Load both target and style masks.
<ide> A mask image (nr x nc) with m labels/colors will be loaded
<del> as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf'
<add> as a 4D boolean tensor: (1, m, nr, nc) for 'channels_first' or (1, nr, nc, m) for 'channels_last'
<ide> '''
<ide> target_mask_img = load_img(target_mask_path,
<ide> target_size=(img_nrows, img_ncols))
<ide> target_mask_img = img_to_array(target_mask_img)
<ide> style_mask_img = load_img(style_mask_path,
<ide> target_size=(img_nrows, img_ncols))
<ide> style_mask_img = img_to_array(style_mask_img)
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
<ide> target_mask_img.reshape((3, -1)).T])
<ide> else:
<ide> def load_mask_labels():
<ide> target_mask_label = labels[img_nrows *
<ide> img_ncols:].reshape((img_nrows, img_ncols))
<ide>
<del> stack_axis = 0 if K.image_dim_ordering() == 'th' else -1
<add> stack_axis = 0 if K.image_data_format() == 'channels_first' else -1
<ide> style_mask = np.stack([style_mask_label == r for r in xrange(nb_labels)],
<ide> axis=stack_axis)
<ide> target_mask = np.stack([target_mask_label == r for r in xrange(nb_labels)],
<ide> def load_mask_labels():
<ide> np.expand_dims(target_mask, axis=0))
<ide>
<ide> # Create tensor variables for images
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> shape = (1, nb_colors, img_nrows, img_ncols)
<ide> else:
<ide> shape = (1, img_nrows, img_ncols, nb_colors)
<ide> def region_style_loss(style_image, target_image, style_mask, target_mask):
<ide> '''
<ide> assert 3 == K.ndim(style_image) == K.ndim(target_image)
<ide> assert 2 == K.ndim(style_mask) == K.ndim(target_mask)
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> masked_style = style_image * style_mask
<ide> masked_target = target_image * target_mask
<ide> nb_channels = K.shape(style_image)[0]
<ide> def style_loss(style_image, target_image, style_masks, target_masks):
<ide> assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
<ide> loss = K.variable(0)
<ide> for i in xrange(nb_labels):
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> style_mask = style_masks[i, :, :]
<ide> target_mask = target_masks[i, :, :]
<ide> else:
<ide> def content_loss(content_image, target_image):
<ide>
<ide> def total_variation_loss(x):
<ide> assert 4 == K.ndim(x)
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
<ide> x[:, :, 1:, :img_ncols - 1])
<ide> b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
<ide> def total_variation_loss(x):
<ide>
<ide>
<ide> def eval_loss_and_grads(x):
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> x = x.reshape((1, 3, img_nrows, img_ncols))
<ide> else:
<ide> x = x.reshape((1, img_nrows, img_ncols, 3))
<ide> def grads(self, x):
<ide> evaluator = Evaluator()
<ide>
<ide> # Generate images by iterative optimization
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128.
<ide> else:
<ide> x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128.
<ide><path>examples/neural_style_transfer.py
<ide> def preprocess_image(image_path):
<ide>
<ide>
<ide> def deprocess_image(x):
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> x = x.reshape((3, img_nrows, img_ncols))
<ide> x = x.transpose((1, 2, 0))
<ide> else:
<ide> def deprocess_image(x):
<ide> style_reference_image = K.variable(preprocess_image(style_reference_image_path))
<ide>
<ide> # this will contain our generated image
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
<ide> else:
<ide> combination_image = K.placeholder((1, img_nrows, img_ncols, 3))
<ide> def deprocess_image(x):
<ide>
<ide> def gram_matrix(x):
<ide> assert K.ndim(x) == 3
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> features = K.batch_flatten(x)
<ide> else:
<ide> features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
<ide> def content_loss(base, combination):
<ide>
<ide> def total_variation_loss(x):
<ide> assert K.ndim(x) == 4
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])
<ide> b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])
<ide> else:
<ide> def total_variation_loss(x):
<ide>
<ide>
<ide> def eval_loss_and_grads(x):
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> x = x.reshape((1, 3, img_nrows, img_ncols))
<ide> else:
<ide> x = x.reshape((1, img_nrows, img_ncols, 3))
<ide> def grads(self, x):
<ide>
<ide> # run scipy-based optimization (L-BFGS) over the pixels of the generated image
<ide> # so as to minimize the neural style loss
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128.
<ide> else:
<ide> x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128.
<ide><path>examples/variational_autoencoder_deconv.py
<ide> nb_conv = 3
<ide>
<ide> batch_size = 100
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> original_img_size = (img_chns, img_rows, img_cols)
<ide> else:
<ide> original_img_size = (img_rows, img_cols, img_chns)
<ide> def sampling(args):
<ide> decoder_hid = Dense(intermediate_dim, activation='relu')
<ide> decoder_upsample = Dense(nb_filters * 14 * 14, activation='relu')
<ide>
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> output_shape = (batch_size, nb_filters, 14, 14)
<ide> else:
<ide> output_shape = (batch_size, 14, 14, nb_filters)
<ide> def sampling(args):
<ide> border_mode='same',
<ide> subsample=(1, 1),
<ide> activation='relu')
<del>if K.image_dim_ordering() == 'th':
<add>if K.image_data_format() == 'channels_first':
<ide> output_shape = (batch_size, nb_filters, 29, 29)
<ide> else:
<ide> output_shape = (batch_size, 29, 29, nb_filters)
<ide><path>keras/applications/audio_conv_utils.py
<ide> 'sad', 'House', 'happy']
<ide>
<ide>
<del>def preprocess_input(audio_path, dim_ordering='default'):
<add>def preprocess_input(audio_path, data_format='default'):
<ide> """Reads an audio file and outputs a Mel-spectrogram.
<ide>
<ide> # Arguments
<ide> audio_path: path to the target audio file.
<del> dim_ordering: data format for the output spectrogram image.
<add> data_format: data format for the output spectrogram image.
<ide>
<ide> # Returns
<ide> 3D Numpy tensor encoding the Mel-spectrogram.
<ide>
<ide> # Raises
<ide> ImportError: if librosa is not available.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> assert dim_ordering in {'tf', 'th'}
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> assert data_format in {'channels_last', 'channels_first'}
<ide>
<ide> if librosa is None:
<ide> raise ImportError('Librosa is required to process audio files. '
<ide> def preprocess_input(audio_path, dim_ordering='default'):
<ide> n_fft=n_fft, n_mels=n_mels) ** 2,
<ide> ref_power=1.0)
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> x = np.expand_dims(x, axis=0)
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> x = np.expand_dims(x, axis=3)
<ide> return x
<ide>
<ide><path>keras/applications/imagenet_utils.py
<ide> CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
<ide>
<ide>
<del>def preprocess_input(x, dim_ordering='default'):
<add>def preprocess_input(x, data_format='default'):
<ide> """Preprocesses a tensor encoding a batch of images.
<ide>
<ide> # Arguments
<ide> x: input Numpy tensor, 4D.
<del> dim_ordering: data format of the image tensor.
<add> data_format: data format of the image tensor.
<ide>
<ide> # Returns
<ide> Preprocessed tensor.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> assert dim_ordering in {'tf', 'th'}
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> assert data_format in {'channels_last', 'channels_first'}
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> # 'RGB'->'BGR'
<ide> x = x[:, ::-1, :, :]
<ide> # Zero-center by mean pixel
<ide> def decode_predictions(preds, top=5):
<ide> def _obtain_input_shape(input_shape,
<ide> default_size,
<ide> min_size,
<del> dim_ordering,
<add> data_format,
<ide> include_top):
<ide> """Internal utility to compute/validate an ImageNet model's input shape.
<ide>
<ide> def _obtain_input_shape(input_shape,
<ide> or a user-provided shape to be validated.
<ide> default_size: default input width/height for the model.
<ide> min_size: minimum input width/height accepted by the model.
<del> dim_ordering: image data format to use.
<add> data_format: image data format to use.
<ide> include_top: whether the model is expected to
<ide> be linked to a classifier via a Flatten layer.
<ide>
<ide> def _obtain_input_shape(input_shape,
<ide> # Raises
<ide> ValueError: in case of invalid argument values.
<ide> """
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> default_shape = (3, default_size, default_size)
<ide> else:
<ide> default_shape = (default_size, default_size, 3)
<ide> def _obtain_input_shape(input_shape,
<ide> '`input_shape` should be ' + str(default_shape) + '.')
<ide> input_shape = default_shape
<ide> else:
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> if input_shape is not None:
<ide> if len(input_shape) != 3:
<ide> raise ValueError('`input_shape` must be a tuple of three integers.')
<ide><path>keras/applications/inception_v3.py
<ide> def conv2d_bn(x, nb_filter, nb_row, nb_col,
<ide> else:
<ide> bn_name = None
<ide> conv_name = None
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> bn_axis = 1
<ide> else:
<ide> bn_axis = 3
<ide> def InceptionV3(include_top=True, weights='imagenet',
<ide> optionally loading weights pre-trained
<ide> on ImageNet. Note that when using TensorFlow,
<ide> for best performance you should set
<del> `image_dim_ordering="tf"` in your Keras config
<add> `image_data_format="channels_last"` in your Keras config
<ide> at ~/.keras/keras.json.
<ide>
<ide> The model and the weights are compatible with both
<del> TensorFlow and Theano. The dimension ordering
<add> TensorFlow and Theano. The data format
<ide> convention used by the model is the one
<ide> specified in your Keras config file.
<ide>
<ide> def InceptionV3(include_top=True, weights='imagenet',
<ide> to use as image input for the model.
<ide> input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(299, 299, 3)` (with `tf` dim ordering)
<del> or `(3, 299, 299)` (with `th` dim ordering).
<add> has to be `(299, 299, 3)` (with `channels_last` data format)
<add> or `(3, 299, 299)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 139.
<ide> E.g. `(150, 150, 3)` would be one valid value.
<ide> def InceptionV3(include_top=True, weights='imagenet',
<ide> input_shape = _obtain_input_shape(input_shape,
<ide> default_size=299,
<ide> min_size=139,
<del> dim_ordering=K.image_dim_ordering(),
<add> data_format=K.image_data_format(),
<ide> include_top=include_top)
<ide>
<ide> if input_tensor is None:
<ide> def InceptionV3(include_top=True, weights='imagenet',
<ide> else:
<ide> img_input = input_tensor
<ide>
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> channel_axis = 1
<ide> else:
<ide> channel_axis = 3
<ide> def InceptionV3(include_top=True, weights='imagenet',
<ide>
<ide> # load weights
<ide> if weights == 'imagenet':
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> if include_top:
<ide> weights_path = get_file('inception_v3_weights_th_dim_ordering_th_kernels.h5',
<ide> TH_WEIGHTS_PATH,
<ide> def InceptionV3(include_top=True, weights='imagenet',
<ide> if K.backend() == 'tensorflow':
<ide> warnings.warn('You are using the TensorFlow backend, yet you '
<ide> 'are using the Theano '
<del> 'image dimension ordering convention '
<del> '(`image_dim_ordering="th"`). '
<add> 'image data format convention '
<add> '(`image_data_format="channels_first"`). '
<ide> 'For best performance, set '
<del> '`image_dim_ordering="tf"` in '
<add> '`image_data_format="channels_last"` in '
<ide> 'your Keras config '
<ide> 'at ~/.keras/keras.json.')
<ide> convert_all_kernels_in_model(model)
<ide><path>keras/applications/music_tagger_crnn.py
<ide> from ..utils.layer_utils import convert_all_kernels_in_model
<ide> from .audio_conv_utils import decode_predictions, preprocess_input
<ide>
<del>TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.3/music_tagger_crnn_weights_tf_kernels_th_dim_ordering.h5'
<del>TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.3/music_tagger_crnn_weights_tf_kernels_tf_dim_ordering.h5'
<add>TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.3/music_tagger_crnn_weights_tf_kernels_th_data_format.h5'
<add>TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.3/music_tagger_crnn_weights_tf_kernels_tf_data_format.h5'
<ide>
<ide>
<ide> def MusicTaggerCRNN(weights='msd', input_tensor=None,
<ide> def MusicTaggerCRNN(weights='msd', input_tensor=None,
<ide> optionally loading weights pre-trained
<ide> on Million Song Dataset. Note that when using TensorFlow,
<ide> for best performance you should set
<del> `image_dim_ordering="tf"` in your Keras config
<add> `image_data_format="channels_last"` in your Keras config
<ide> at ~/.keras/keras.json.
<ide>
<ide> The model and the weights are compatible with both
<del> TensorFlow and Theano. The dimension ordering
<add> TensorFlow and Theano. The data format
<ide> convention used by the model is the one
<ide> specified in your Keras config file.
<ide>
<ide> def MusicTaggerCRNN(weights='msd', input_tensor=None,
<ide> raise ValueError('If using `weights` as msd with `include_top`'
<ide> ' as true, `classes` should be 50')
<ide> # Determine proper input shape
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> input_shape = (1, 96, 1366)
<ide> else:
<ide> input_shape = (96, 1366, 1)
<ide> def MusicTaggerCRNN(weights='msd', input_tensor=None,
<ide> melgram_input = input_tensor
<ide>
<ide> # Determine input axis
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> channel_axis = 1
<ide> freq_axis = 2
<ide> time_axis = 3
<ide> def MusicTaggerCRNN(weights='msd', input_tensor=None,
<ide> x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
<ide>
<ide> # reshaping
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> x = Permute((3, 1, 2))(x)
<ide> x = Reshape((15, 128))(x)
<ide>
<ide> def MusicTaggerCRNN(weights='msd', input_tensor=None,
<ide> return model
<ide> else:
<ide> # Load weights
<del> if K.image_dim_ordering() == 'tf':
<del> weights_path = get_file('music_tagger_crnn_weights_tf_kernels_tf_dim_ordering.h5',
<add> if K.image_data_format() == 'channels_last':
<add> weights_path = get_file('music_tagger_crnn_weights_tf_kernels_tf_data_format.h5',
<ide> TF_WEIGHTS_PATH,
<ide> cache_subdir='models')
<ide> else:
<del> weights_path = get_file('music_tagger_crnn_weights_tf_kernels_th_dim_ordering.h5',
<add> weights_path = get_file('music_tagger_crnn_weights_tf_kernels_th_data_format.h5',
<ide> TH_WEIGHTS_PATH,
<ide> cache_subdir='models')
<ide> model.load_weights(weights_path, by_name=True)
<ide><path>keras/applications/resnet50.py
<ide> def identity_block(input_tensor, kernel_size, filters, stage, block):
<ide> block: 'a','b'..., current block label, used for generating layer names
<ide> """
<ide> nb_filter1, nb_filter2, nb_filter3 = filters
<del> if K.image_dim_ordering() == 'tf':
<add> if K.image_data_format() == 'channels_last':
<ide> bn_axis = 3
<ide> else:
<ide> bn_axis = 1
<ide> def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2))
<ide> And the shortcut should have subsample=(2,2) as well
<ide> """
<ide> nb_filter1, nb_filter2, nb_filter3 = filters
<del> if K.image_dim_ordering() == 'tf':
<add> if K.image_data_format() == 'channels_last':
<ide> bn_axis = 3
<ide> else:
<ide> bn_axis = 1
<ide> def ResNet50(include_top=True, weights='imagenet',
<ide> optionally loading weights pre-trained
<ide> on ImageNet. Note that when using TensorFlow,
<ide> for best performance you should set
<del> `image_dim_ordering="tf"` in your Keras config
<add> `image_data_format="channels_last"` in your Keras config
<ide> at ~/.keras/keras.json.
<ide>
<ide> The model and the weights are compatible with both
<del> TensorFlow and Theano. The dimension ordering
<add> TensorFlow and Theano. The data format
<ide> convention used by the model is the one
<ide> specified in your Keras config file.
<ide>
<ide> def ResNet50(include_top=True, weights='imagenet',
<ide> to use as image input for the model.
<ide> input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(224, 224, 3)` (with `tf` dim ordering)
<del> or `(3, 224, 244)` (with `th` dim ordering).
<add> has to be `(224, 224, 3)` (with `channels_last` data format)
<add> or `(3, 224, 244)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 197.
<ide> E.g. `(200, 200, 3)` would be one valid value.
<ide> def ResNet50(include_top=True, weights='imagenet',
<ide> input_shape = _obtain_input_shape(input_shape,
<ide> default_size=224,
<ide> min_size=197,
<del> dim_ordering=K.image_dim_ordering(),
<add> data_format=K.image_data_format(),
<ide> include_top=include_top)
<ide>
<ide> if input_tensor is None:
<ide> def ResNet50(include_top=True, weights='imagenet',
<ide> img_input = Input(tensor=input_tensor, shape=input_shape)
<ide> else:
<ide> img_input = input_tensor
<del> if K.image_dim_ordering() == 'tf':
<add> if K.image_data_format() == 'channels_last':
<ide> bn_axis = 3
<ide> else:
<ide> bn_axis = 1
<ide> def ResNet50(include_top=True, weights='imagenet',
<ide>
<ide> # load weights
<ide> if weights == 'imagenet':
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> if include_top:
<ide> weights_path = get_file('resnet50_weights_th_dim_ordering_th_kernels.h5',
<ide> TH_WEIGHTS_PATH,
<ide> def ResNet50(include_top=True, weights='imagenet',
<ide> if K.backend() == 'tensorflow':
<ide> warnings.warn('You are using the TensorFlow backend, yet you '
<ide> 'are using the Theano '
<del> 'image dimension ordering convention '
<del> '(`image_dim_ordering="th"`). '
<add> 'image data format convention '
<add> '(`image_data_format="channels_first"`). '
<ide> 'For best performance, set '
<del> '`image_dim_ordering="tf"` in '
<add> '`image_data_format="channels_last"` in '
<ide> 'your Keras config '
<ide> 'at ~/.keras/keras.json.')
<ide> convert_all_kernels_in_model(model)
<ide><path>keras/applications/vgg16.py
<ide> def VGG16(include_top=True, weights='imagenet',
<ide> optionally loading weights pre-trained
<ide> on ImageNet. Note that when using TensorFlow,
<ide> for best performance you should set
<del> `image_dim_ordering="tf"` in your Keras config
<add> `image_data_format="channels_last"` in your Keras config
<ide> at ~/.keras/keras.json.
<ide>
<ide> The model and the weights are compatible with both
<del> TensorFlow and Theano. The dimension ordering
<add> TensorFlow and Theano. The data format
<ide> convention used by the model is the one
<ide> specified in your Keras config file.
<ide>
<ide> def VGG16(include_top=True, weights='imagenet',
<ide> to use as image input for the model.
<ide> input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(224, 224, 3)` (with `tf` dim ordering)
<del> or `(3, 224, 244)` (with `th` dim ordering).
<add> has to be `(224, 224, 3)` (with `channels_last` data format)
<add> or `(3, 224, 244)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 48.
<ide> E.g. `(200, 200, 3)` would be one valid value.
<ide> def VGG16(include_top=True, weights='imagenet',
<ide> input_shape = _obtain_input_shape(input_shape,
<ide> default_size=224,
<ide> min_size=48,
<del> dim_ordering=K.image_dim_ordering(),
<add> data_format=K.image_data_format(),
<ide> include_top=include_top)
<ide>
<ide> if input_tensor is None:
<ide> def VGG16(include_top=True, weights='imagenet',
<ide>
<ide> # load weights
<ide> if weights == 'imagenet':
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> if include_top:
<ide> weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5',
<ide> TH_WEIGHTS_PATH,
<ide> def VGG16(include_top=True, weights='imagenet',
<ide> if K.backend() == 'tensorflow':
<ide> warnings.warn('You are using the TensorFlow backend, yet you '
<ide> 'are using the Theano '
<del> 'image dimension ordering convention '
<del> '(`image_dim_ordering="th"`). '
<add> 'image data format convention '
<add> '(`image_data_format="channels_first"`). '
<ide> 'For best performance, set '
<del> '`image_dim_ordering="tf"` in '
<add> '`image_data_format="channels_last"` in '
<ide> 'your Keras config '
<ide> 'at ~/.keras/keras.json.')
<ide> convert_all_kernels_in_model(model)
<ide><path>keras/applications/vgg19.py
<ide> def VGG19(include_top=True, weights='imagenet',
<ide> optionally loading weights pre-trained
<ide> on ImageNet. Note that when using TensorFlow,
<ide> for best performance you should set
<del> `image_dim_ordering="tf"` in your Keras config
<add> `image_data_format="channels_last"` in your Keras config
<ide> at ~/.keras/keras.json.
<ide>
<ide> The model and the weights are compatible with both
<del> TensorFlow and Theano. The dimension ordering
<add> TensorFlow and Theano. The data format
<ide> convention used by the model is the one
<ide> specified in your Keras config file.
<ide>
<ide> def VGG19(include_top=True, weights='imagenet',
<ide> to use as image input for the model.
<ide> input_shape: optional shape tuple, only to be specified
<ide> if `include_top` is False (otherwise the input shape
<del> has to be `(224, 224, 3)` (with `tf` dim ordering)
<del> or `(3, 224, 244)` (with `th` dim ordering).
<add> has to be `(224, 224, 3)` (with `channels_last` data format)
<add> or `(3, 224, 244)` (with `channels_first` data format).
<ide> It should have exactly 3 inputs channels,
<ide> and width and height should be no smaller than 48.
<ide> E.g. `(200, 200, 3)` would be one valid value.
<ide> def VGG19(include_top=True, weights='imagenet',
<ide> input_shape = _obtain_input_shape(input_shape,
<ide> default_size=224,
<ide> min_size=48,
<del> dim_ordering=K.image_dim_ordering(),
<add> data_format=K.image_data_format(),
<ide> include_top=include_top)
<ide>
<ide> if input_tensor is None:
<ide> def VGG19(include_top=True, weights='imagenet',
<ide>
<ide> # load weights
<ide> if weights == 'imagenet':
<del> if K.image_dim_ordering() == 'th':
<add> if K.image_data_format() == 'channels_first':
<ide> if include_top:
<ide> weights_path = get_file('vgg19_weights_th_dim_ordering_th_kernels.h5',
<ide> TH_WEIGHTS_PATH,
<ide> def VGG19(include_top=True, weights='imagenet',
<ide> if K.backend() == 'tensorflow':
<ide> warnings.warn('You are using the TensorFlow backend, yet you '
<ide> 'are using the Theano '
<del> 'image dimension ordering convention '
<del> '(`image_dim_ordering="th"`). '
<add> 'image data format convention '
<add> '(`image_data_format="channels_first"`). '
<ide> 'For best performance, set '
<del> '`image_dim_ordering="tf"` in '
<add> '`image_data_format="channels_last"` in '
<ide> 'your Keras config '
<ide> 'at ~/.keras/keras.json.')
<ide> convert_all_kernels_in_model(model)
<ide><path>keras/applications/xception.py
<ide> def Xception(include_top=True, weights='imagenet',
<ide> optionally loading weights pre-trained
<ide> on ImageNet. This model is available for TensorFlow only,
<ide> and can only be used with inputs following the TensorFlow
<del> dimension ordering `(width, height, channels)`.
<del> You should set `image_dim_ordering="tf"` in your Keras config
<add> data format `(width, height, channels)`.
<add> You should set `image_data_format="channels_last"` in your Keras config
<ide> located at ~/.keras/keras.json.
<ide>
<ide> Note that the default input image size for this model is 299x299.
<ide> def Xception(include_top=True, weights='imagenet',
<ide> if K.backend() != 'tensorflow':
<ide> raise RuntimeError('The Xception model is only available with '
<ide> 'the TensorFlow backend.')
<del> if K.image_dim_ordering() != 'tf':
<add> if K.image_data_format() != 'channels_last':
<ide> warnings.warn('The Xception model is only available for the '
<del> 'input dimension ordering "tf" '
<add> 'input data format "channels_last" '
<ide> '(width, height, channels). '
<ide> 'However your settings specify the default '
<del> 'dimension ordering "th" (channels, width, height). '
<del> 'You should set `image_dim_ordering="tf"` in your Keras '
<add> 'data format "channels_first" (channels, width, height). '
<add> 'You should set `image_data_format="channels_last"` in your Keras '
<ide> 'config located at ~/.keras/keras.json. '
<ide> 'The model being returned right now will expect inputs '
<del> 'to follow the "tf" dimension ordering.')
<del> K.set_image_dim_ordering('tf')
<del> old_dim_ordering = 'th'
<add> 'to follow the "channels_last" data format.')
<add> K.set_image_data_format('channels_last')
<add> old_data_format = 'channels_first'
<ide> else:
<del> old_dim_ordering = None
<add> old_data_format = None
<ide>
<ide> # Determine proper input shape
<ide> input_shape = _obtain_input_shape(input_shape,
<ide> default_size=299,
<ide> min_size=71,
<del> dim_ordering=K.image_dim_ordering(),
<add> data_format=K.image_data_format(),
<ide> include_top=include_top)
<ide>
<ide> if input_tensor is None:
<ide> def Xception(include_top=True, weights='imagenet',
<ide> cache_subdir='models')
<ide> model.load_weights(weights_path)
<ide>
<del> if old_dim_ordering:
<del> K.set_image_dim_ordering(old_dim_ordering)
<add> if old_data_format:
<add> K.set_image_data_format(old_data_format)
<ide> return model
<ide>
<ide>
<ide><path>keras/backend/__init__.py
<ide> from .common import set_floatx
<ide> from .common import get_uid
<ide> from .common import cast_to_floatx
<del>from .common import image_dim_ordering
<del>from .common import set_image_dim_ordering
<add>from .common import image_data_format
<add>from .common import set_image_data_format
<ide> from .common import is_keras_tensor
<ide> from .common import legacy_weight_ordering
<ide> from .common import set_legacy_weight_ordering
<ide> assert isinstance(_epsilon, float)
<ide> _backend = _config.get('backend', _BACKEND)
<ide> assert _backend in {'theano', 'tensorflow'}
<del> _image_dim_ordering = _config.get('image_dim_ordering',
<del> image_dim_ordering())
<del> assert _image_dim_ordering in {'tf', 'th'}
<add> _image_data_format = _config.get('image_data_format',
<add> image_data_format())
<add> assert _image_data_format in {'channels_last', 'channels_first'}
<ide>
<ide> set_floatx(_floatx)
<ide> set_epsilon(_epsilon)
<del> set_image_dim_ordering(_image_dim_ordering)
<add> set_image_data_format(_image_data_format)
<ide> _BACKEND = _backend
<ide>
<ide> # save config file
<ide> if not os.path.exists(_config_path):
<ide> _config = {'floatx': floatx(),
<ide> 'epsilon': epsilon(),
<ide> 'backend': _BACKEND,
<del> 'image_dim_ordering': image_dim_ordering()}
<add> 'image_data_format': image_data_format()}
<ide> with open(_config_path, 'w') as f:
<ide> f.write(json.dumps(_config, indent=4))
<ide>
<ide><path>keras/backend/common.py
<ide> _FLOATX = 'float32'
<ide> _EPSILON = 10e-8
<ide> _UID_PREFIXES = defaultdict(int)
<del>_IMAGE_DIM_ORDERING = 'tf'
<add>_IMAGE_DATA_FORMAT = 'channels_last'
<ide> _LEGACY_WEIGHT_ORDERING = False
<ide>
<ide>
<ide> def cast_to_floatx(x):
<ide> return np.asarray(x, dtype=_FLOATX)
<ide>
<ide>
<del>def image_dim_ordering():
<del> """Returns the default image dimension ordering
<del> convention ('th' or 'tf').
<add>def image_data_format():
<add> """Returns the default image data format
<add> convention ('channels_first' or 'channels_last').
<ide>
<ide> # Returns
<del> A string, either `'th'` or `'tf'`
<add> A string, either `'channels_first'` or `'channels_last'`
<ide>
<ide> # Example
<ide> ```python
<del> >>> keras.backend.image_dim_ordering()
<del> 'th'
<add> >>> keras.backend.image_data_format()
<add> 'channels_first'
<ide> ```
<ide> """
<del> return _IMAGE_DIM_ORDERING
<add> return _IMAGE_DATA_FORMAT
<ide>
<ide>
<del>def set_image_dim_ordering(dim_ordering):
<add>def set_image_data_format(data_format):
<ide> """Sets the value of the image dimension
<del> ordering convention ('th' or 'tf').
<add> ordering convention ('channels_first' or 'channels_last').
<ide>
<ide> # Arguments
<del> dim_ordering: string. `'th'` or `'tf'`.
<add> data_format: string. `'channels_first'` or `'channels_last'`.
<ide>
<ide> # Example
<ide> ```python
<ide> >>> from keras import backend as K
<del> >>> K.image_dim_ordering()
<del> 'th'
<del> >>> K.set_image_dim_ordering('tf')
<del> >>> K.image_dim_ordering()
<del> 'tf'
<add> >>> K.image_data_format()
<add> 'channels_first'
<add> >>> K.set_image_data_format('channels_last')
<add> >>> K.image_data_format()
<add> 'channels_last'
<ide> ```
<ide> """
<del> global _IMAGE_DIM_ORDERING
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<del> _IMAGE_DIM_ORDERING = str(dim_ordering)
<add> global _IMAGE_DATA_FORMAT
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('Unknown data_format:', data_format)
<add> _IMAGE_DATA_FORMAT = str(data_format)
<ide>
<ide>
<ide> def get_uid(prefix=''):
<ide><path>keras/backend/tensorflow_backend.py
<ide> import numpy as np
<ide> import os
<ide> import warnings
<del>from .common import floatx, _EPSILON, image_dim_ordering, reset_uids
<add>from .common import floatx, _EPSILON, image_data_format, reset_uids
<ide> py_all = all
<ide>
<ide> # INTERNAL UTILS
<ide> def permute_dimensions(x, pattern):
<ide> return tf.transpose(x, perm=pattern)
<ide>
<ide>
<del>def resize_images(X, height_factor, width_factor, dim_ordering):
<add>def resize_images(X, height_factor, width_factor, data_format):
<ide> """Resizes the images contained in a 4D tensor of shape
<del> - `[batch, channels, height, width]` (for 'th' dim_ordering)
<del> - `[batch, height, width, channels]` (for 'tf' dim_ordering)
<add> - `[batch, channels, height, width]` (for 'channels_first' data_format)
<add> - `[batch, height, width, channels]` (for 'channels_last' data_format)
<ide> by a factor of `(height_factor, width_factor)`. Both factors should be
<ide> positive integers.
<ide>
<ide> # Returns
<ide> A tensor.
<ide> """
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> original_shape = int_shape(X)
<ide> new_shape = tf.shape(X)[2:]
<ide> new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
<ide> def resize_images(X, height_factor, width_factor, dim_ordering):
<ide> X.set_shape((None, None, original_shape[2] * height_factor if original_shape[2] is not None else None,
<ide> original_shape[3] * width_factor if original_shape[3] is not None else None))
<ide> return X
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> original_shape = int_shape(X)
<ide> new_shape = tf.shape(X)[1:3]
<ide> new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
<ide> def resize_images(X, height_factor, width_factor, dim_ordering):
<ide> original_shape[2] * width_factor if original_shape[2] is not None else None, None))
<ide> return X
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide>
<ide>
<del>def resize_volumes(X, depth_factor, height_factor, width_factor, dim_ordering):
<add>def resize_volumes(X, depth_factor, height_factor, width_factor, data_format):
<ide> """Resizes the volume contained in a 5D tensor of shape
<del> - `[batch, channels, depth, height, width]` (for 'th' dim_ordering)
<del> - `[batch, depth, height, width, channels]` (for 'tf' dim_ordering)
<add> - `[batch, channels, depth, height, width]` (for 'channels_first' data_format)
<add> - `[batch, depth, height, width, channels]` (for 'channels_last' data_format)
<ide> by a factor of `(depth_factor, height_factor, width_factor)`.
<ide> All three factors should be positive integers.
<ide>
<ide> # Returns
<ide> A tensor.
<ide> """
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> output = repeat_elements(X, depth_factor, axis=2)
<ide> output = repeat_elements(output, height_factor, axis=3)
<ide> output = repeat_elements(output, width_factor, axis=4)
<ide> return output
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> output = repeat_elements(X, depth_factor, axis=1)
<ide> output = repeat_elements(output, height_factor, axis=2)
<ide> output = repeat_elements(output, width_factor, axis=3)
<ide> return output
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide>
<ide>
<ide> def repeat_elements(x, rep, axis):
<ide> def asymmetric_temporal_padding(x, left_pad=1, right_pad=1):
<ide> return tf.pad(x, pattern)
<ide>
<ide>
<del>def spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):
<add>def spatial_2d_padding(x, padding=(1, 1), data_format='default'):
<ide> """Pads the 2nd and 3rd dimensions of a 4D tensor
<ide> with "padding[0]" and "padding[1]" (resp.) zeros left and right.
<ide>
<ide> # Returns
<ide> A padded 4D tensor.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> pattern = [[0, 0], [0, 0],
<ide> [padding[0], padding[0]], [padding[1], padding[1]]]
<ide> else:
<ide> def spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):
<ide>
<ide> def asymmetric_spatial_2d_padding(x, top_pad=1, bottom_pad=1,
<ide> left_pad=1, right_pad=1,
<del> dim_ordering='default'):
<add> data_format='default'):
<ide> """Pad the rows and columns of a 4D tensor
<ide> with "top_pad", "bottom_pad", "left_pad", "right_pad" (resp.) zeros
<ide> rows on top, bottom; cols on left, right.
<ide>
<ide> # Returns
<ide> A padded 4D tensor.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> pattern = [[0, 0],
<ide> [0, 0],
<ide> [top_pad, bottom_pad],
<ide> def asymmetric_spatial_2d_padding(x, top_pad=1, bottom_pad=1,
<ide> return tf.pad(x, pattern)
<ide>
<ide>
<del>def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='default'):
<add>def spatial_3d_padding(x, padding=(1, 1, 1), data_format='default'):
<ide> """Pads 5D tensor with zeros for the depth, height, width dimension with
<ide> "padding[0]", "padding[1]" and "padding[2]" (resp.) zeros left and right
<ide>
<del> For 'tf' dim_ordering, the 2nd, 3rd and 4th dimension will be padded.
<del> For 'th' dim_ordering, the 3rd, 4th and 5th dimension will be padded.
<add> For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded.
<add> For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded.
<ide>
<ide> # Returns
<ide> A padded 5D tensor.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> pattern = [
<ide> [0, 0],
<ide> [0, 0],
<ide> def in_top_k(predictions, targets, k):
<ide>
<ide> # CONVOLUTIONS
<ide>
<del>def _preprocess_deconv_output_shape(x, shape, dim_ordering):
<del> if dim_ordering == 'th':
<add>def _preprocess_deconv_output_shape(x, shape, data_format):
<add> if data_format == 'channels_first':
<ide> shape = (shape[0], shape[2], shape[3], shape[1])
<ide>
<ide> if shape[0] is None:
<ide> shape = (tf.shape(x)[0], ) + tuple(shape[1:])
<ide> return shape
<ide>
<ide>
<del>def _preprocess_conv2d_input(x, dim_ordering):
<add>def _preprocess_conv2d_input(x, data_format):
<ide> if dtype(x) == 'float64':
<ide> x = tf.cast(x, 'float32')
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH input shape: (samples, input_depth, rows, cols)
<ide> def _preprocess_conv2d_input(x, dim_ordering):
<ide> return x
<ide>
<ide>
<del>def _preprocess_conv3d_input(x, dim_ordering):
<add>def _preprocess_conv3d_input(x, data_format):
<ide> if dtype(x) == 'float64':
<ide> x = tf.cast(x, 'float32')
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
<ide> def _preprocess_conv3d_input(x, dim_ordering):
<ide> return x
<ide>
<ide>
<del>def _preprocess_conv2d_kernel(kernel, dim_ordering):
<add>def _preprocess_conv2d_kernel(kernel, data_format):
<ide> if dtype(kernel) == 'float64':
<ide> kernel = tf.cast(kernel, 'float32')
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH kernel shape: (depth, input_depth, rows, cols)
<ide> def _preprocess_conv2d_kernel(kernel, dim_ordering):
<ide> return kernel
<ide>
<ide>
<del>def _preprocess_conv3d_kernel(kernel, dim_ordering):
<add>def _preprocess_conv3d_kernel(kernel, data_format):
<ide> if dtype(kernel) == 'float64':
<ide> kernel = tf.cast(kernel, 'float32')
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH kernel shape: (out_depth, input_depth, kernel_dim1, kernel_dim2, kernel_dim3)
<ide> def _preprocess_border_mode(border_mode):
<ide> return padding
<ide>
<ide>
<del>def _postprocess_conv2d_output(x, dim_ordering):
<del> if dim_ordering == 'th':
<add>def _postprocess_conv2d_output(x, data_format):
<add> if data_format == 'channels_first':
<ide> x = tf.transpose(x, (0, 3, 1, 2))
<ide>
<ide> if floatx() == 'float64':
<ide> x = tf.cast(x, 'float64')
<ide> return x
<ide>
<ide>
<del>def _postprocess_conv3d_output(x, dim_ordering):
<del> if dim_ordering == 'th':
<add>def _postprocess_conv3d_output(x, data_format):
<add> if data_format == 'channels_first':
<ide> x = tf.transpose(x, (0, 4, 1, 2, 3))
<ide>
<ide> if floatx() == 'float64':
<ide> def conv1d(x, kernel, stride=1, border_mode='valid',
<ide>
<ide>
<ide> def conv2d(x, kernel, strides=(1, 1), border_mode='valid',
<del> dim_ordering='default',
<add> data_format='default',
<ide> image_shape=None, filter_shape=None, filter_dilation=(1, 1)):
<ide> """2D convolution.
<ide>
<ide> # Arguments
<ide> kernel: kernel tensor.
<ide> strides: strides tuple.
<ide> border_mode: string, `"same"` or `"valid"`.
<del> dim_ordering: `"tf"` or `"th"`.
<del> Whether to use Theano or TensorFlow dimension ordering
<add> data_format: `"channels_last"` or `"channels_first"`.
<add> Whether to use Theano or TensorFlow data format
<ide> for inputs/kernels/ouputs.
<ide>
<ide> # Returns
<ide> A tensor, result of 2D convolution.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<del> x = _preprocess_conv2d_input(x, dim_ordering)
<del> kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
<add> x = _preprocess_conv2d_input(x, data_format)
<add> kernel = _preprocess_conv2d_kernel(kernel, data_format)
<ide> padding = _preprocess_border_mode(border_mode)
<ide> if filter_dilation == (1, 1):
<ide> strides = (1,) + strides + (1,)
<ide> def conv2d(x, kernel, strides=(1, 1), border_mode='valid',
<ide> assert filter_dilation[0] == filter_dilation[1]
<ide> assert strides == (1, 1), 'Invalid strides for dilated convolution'
<ide> x = tf.nn.atrous_conv2d(x, kernel, filter_dilation[0], padding=padding)
<del> return _postprocess_conv2d_output(x, dim_ordering)
<add> return _postprocess_conv2d_output(x, data_format)
<ide>
<ide>
<ide> def deconv2d(x, kernel, output_shape, strides=(1, 1),
<ide> border_mode='valid',
<del> dim_ordering='default',
<add> data_format='default',
<ide> image_shape=None, filter_shape=None):
<ide> """2D deconvolution (i.e. transposed convolution).
<ide>
<ide> def deconv2d(x, kernel, output_shape, strides=(1, 1),
<ide> output_shape: 1D int tensor for the output shape.
<ide> strides: strides tuple.
<ide> border_mode: string, `"same"` or `"valid"`.
<del> dim_ordering: `"tf"` or `"th"`.
<del> Whether to use Theano or TensorFlow dimension ordering
<add> data_format: `"channels_last"` or `"channels_first"`.
<add> Whether to use Theano or TensorFlow data format
<ide> for inputs/kernels/ouputs.
<ide>
<ide> # Returns
<ide> A tensor, result of transposed 2D convolution.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<del> x = _preprocess_conv2d_input(x, dim_ordering)
<del> output_shape = _preprocess_deconv_output_shape(x, output_shape, dim_ordering)
<del> kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
<add> x = _preprocess_conv2d_input(x, data_format)
<add> output_shape = _preprocess_deconv_output_shape(x, output_shape, data_format)
<add> kernel = _preprocess_conv2d_kernel(kernel, data_format)
<ide> kernel = tf.transpose(kernel, (0, 1, 3, 2))
<ide> padding = _preprocess_border_mode(border_mode)
<ide> strides = (1,) + strides + (1,)
<ide>
<ide> x = tf.nn.conv2d_transpose(x, kernel, output_shape, strides,
<ide> padding=padding)
<del> return _postprocess_conv2d_output(x, dim_ordering)
<add> return _postprocess_conv2d_output(x, data_format)
<ide>
<ide>
<ide> def atrous_conv2d(x, kernel, rate=1,
<ide> border_mode='valid',
<del> dim_ordering='default',
<add> data_format='default',
<ide> image_shape=None, filter_shape=None):
<ide> """Atrous 2D convolution. Also as known as dilated convolution.
<ide>
<ide> def atrous_conv2d(x, kernel, rate=1,
<ide> output_shape: 1D int tensor for the output shape.
<ide> strides: strides tuple.
<ide> border_mode: string, `"same"` or `"valid"`.
<del> dim_ordering: `"tf"` or `"th"`.
<del> Whether to use Theano or TensorFlow dimension ordering
<add> data_format: `"channels_last"` or `"channels_first"`.
<add> Whether to use Theano or TensorFlow data format
<ide> for inputs/kernels/ouputs.
<ide>
<ide> # Returns
<ide> A tensor, result of atrous transposed 2D convolution.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide> if rate == 1:
<ide> return conv2d(x, kernel, strides=(1, 1), border_mode=border_mode,
<del> dim_ordering=dim_ordering)
<add> data_format=data_format)
<ide>
<del> x = _preprocess_conv2d_input(x, dim_ordering)
<del> kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
<add> x = _preprocess_conv2d_input(x, data_format)
<add> kernel = _preprocess_conv2d_kernel(kernel, data_format)
<ide> padding = _preprocess_border_mode(border_mode)
<ide>
<ide> x = tf.nn.atrous_conv2d(x, kernel, rate, padding)
<del> return _postprocess_conv2d_output(x, dim_ordering)
<add> return _postprocess_conv2d_output(x, data_format)
<ide>
<ide>
<ide> def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
<del> border_mode='valid', dim_ordering='default'):
<add> border_mode='valid', data_format='default'):
<ide> """2-D convolution with separable filters.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<del> x = _preprocess_conv2d_input(x, dim_ordering)
<add> x = _preprocess_conv2d_input(x, data_format)
<ide> depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel,
<del> dim_ordering)
<add> data_format)
<ide> pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel,
<del> dim_ordering)
<add> data_format)
<ide> padding = _preprocess_border_mode(border_mode)
<ide> strides = (1,) + strides + (1,)
<ide>
<ide> x = tf.nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel,
<ide> strides, padding)
<del> return _postprocess_conv2d_output(x, dim_ordering)
<add> return _postprocess_conv2d_output(x, data_format)
<ide>
<ide>
<ide> def conv3d(x, kernel, strides=(1, 1, 1),
<del> border_mode='valid', dim_ordering='default',
<add> border_mode='valid', data_format='default',
<ide> volume_shape=None, filter_shape=None):
<ide> """3D convolution.
<ide>
<ide> # Arguments
<ide> kernel: kernel tensor.
<ide> strides: strides tuple.
<ide> border_mode: string, `"same"` or `"valid"`.
<del> dim_ordering: `"tf"` or `"th"`.
<del> Whether to use Theano or TensorFlow dimension ordering
<add> data_format: `"channels_last"` or `"channels_first"`.
<add> Whether to use Theano or TensorFlow data format
<ide> for inputs/kernels/ouputs.
<ide>
<ide> # Returns
<ide> A tensor, result of 3D convolution.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<del> x = _preprocess_conv3d_input(x, dim_ordering)
<del> kernel = _preprocess_conv3d_kernel(kernel, dim_ordering)
<add> x = _preprocess_conv3d_input(x, data_format)
<add> kernel = _preprocess_conv3d_kernel(kernel, data_format)
<ide> padding = _preprocess_border_mode(border_mode)
<ide> strides = (1,) + strides + (1,)
<ide>
<ide> x = tf.nn.conv3d(x, kernel, strides, padding)
<del> return _postprocess_conv3d_output(x, dim_ordering)
<add> return _postprocess_conv3d_output(x, data_format)
<ide>
<ide>
<ide> def pool2d(x, pool_size, strides=(1, 1),
<del> border_mode='valid', dim_ordering='default',
<add> border_mode='valid', data_format='default',
<ide> pool_mode='max'):
<ide> """2D Pooling.
<ide>
<ide> # Arguments
<ide> pool_size: tuple of 2 integers.
<ide> strides: tuple of 2 integers.
<ide> border_mode: one of `"valid"`, `"same"`.
<del> dim_ordering: one of `"th"`, `"tf"`.
<add> data_format: one of `"channels_first"`, `"channels_last"`.
<ide> pool_mode: one of `"max"`, `"avg"`.
<ide>
<ide> # Returns
<ide> A tensor, result of 2D pooling.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<ide> padding = _preprocess_border_mode(border_mode)
<ide> strides = (1,) + strides + (1,)
<ide> pool_size = (1,) + pool_size + (1,)
<ide>
<del> x = _preprocess_conv2d_input(x, dim_ordering)
<add> x = _preprocess_conv2d_input(x, data_format)
<ide>
<ide> if pool_mode == 'max':
<ide> x = tf.nn.max_pool(x, pool_size, strides, padding=padding)
<ide> def pool2d(x, pool_size, strides=(1, 1),
<ide> else:
<ide> raise ValueError('Invalid pooling mode:', pool_mode)
<ide>
<del> return _postprocess_conv2d_output(x, dim_ordering)
<add> return _postprocess_conv2d_output(x, data_format)
<ide>
<ide>
<ide> def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<del> dim_ordering='default', pool_mode='max'):
<add> data_format='default', pool_mode='max'):
<ide> """3D Pooling.
<ide>
<ide> # Arguments
<ide> pool_size: tuple of 3 integers.
<ide> strides: tuple of 3 integers.
<ide> border_mode: one of `"valid"`, `"same"`.
<del> dim_ordering: one of `"th"`, `"tf"`.
<add> data_format: one of `"channels_first"`, `"channels_last"`.
<ide> pool_mode: one of `"max"`, `"avg"`.
<ide>
<ide> # Returns
<ide> A tensor, result of 3D pooling.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<ide> padding = _preprocess_border_mode(border_mode)
<ide> strides = (1,) + strides + (1,)
<ide> pool_size = (1,) + pool_size + (1,)
<ide>
<del> x = _preprocess_conv3d_input(x, dim_ordering)
<add> x = _preprocess_conv3d_input(x, data_format)
<ide>
<ide> if pool_mode == 'max':
<ide> x = tf.nn.max_pool3d(x, pool_size, strides, padding=padding)
<ide> def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<ide> else:
<ide> raise ValueError('Invalid pooling mode:', pool_mode)
<ide>
<del> return _postprocess_conv3d_output(x, dim_ordering)
<add> return _postprocess_conv3d_output(x, data_format)
<ide>
<ide>
<ide> # RANDOMNESS
<ide><path>keras/backend/theano_backend.py
<ide> from theano.sandbox.softsign import softsign as T_softsign
<ide> import inspect
<ide> import numpy as np
<del>from .common import _FLOATX, floatx, _EPSILON, image_dim_ordering
<add>from .common import _FLOATX, floatx, _EPSILON, image_data_format
<ide> py_all = all
<ide>
<ide>
<ide> def repeat_elements(x, rep, axis):
<ide> return T.repeat(x, rep, axis=axis)
<ide>
<ide>
<del>def resize_images(X, height_factor, width_factor, dim_ordering):
<add>def resize_images(X, height_factor, width_factor, data_format):
<ide> """Resize the images contained in a 4D tensor of shape
<del> - [batch, channels, height, width] (for 'th' dim_ordering)
<del> - [batch, height, width, channels] (for 'tf' dim_ordering)
<add> - [batch, channels, height, width] (for 'channels_first' data_format)
<add> - [batch, height, width, channels] (for 'channels_last' data_format)
<ide> by a factor of (height_factor, width_factor). Both factors should be
<ide> positive integers.
<ide> """
<ide> # TODO: `keras_shape` inference.
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> output = repeat_elements(X, height_factor, axis=2)
<ide> output = repeat_elements(output, width_factor, axis=3)
<ide> return output
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> output = repeat_elements(X, height_factor, axis=1)
<ide> output = repeat_elements(output, width_factor, axis=2)
<ide> return output
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide>
<ide>
<del>def resize_volumes(X, depth_factor, height_factor, width_factor, dim_ordering):
<add>def resize_volumes(X, depth_factor, height_factor, width_factor, data_format):
<ide> """Resize the volume contained in a 5D tensor of shape
<del> - [batch, channels, depth, height, width] (for 'th' dim_ordering)
<del> - [batch, depth, height, width, channels] (for 'tf' dim_ordering)
<add> - [batch, channels, depth, height, width] (for 'channels_first' data_format)
<add> - [batch, depth, height, width, channels] (for 'channels_last' data_format)
<ide> by a factor of (depth_factor, height_factor, width_factor).
<ide> Both factors should be positive integers.
<ide> """
<ide> # TODO: `keras_shape` inference.
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> output = repeat_elements(X, depth_factor, axis=2)
<ide> output = repeat_elements(output, height_factor, axis=3)
<ide> output = repeat_elements(output, width_factor, axis=4)
<ide> return output
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> output = repeat_elements(X, depth_factor, axis=1)
<ide> output = repeat_elements(output, height_factor, axis=2)
<ide> output = repeat_elements(output, width_factor, axis=3)
<ide> return output
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide>
<ide>
<ide> def repeat(x, n):
<ide> def asymmetric_temporal_padding(x, left_pad=1, right_pad=1):
<ide> return T.set_subtensor(output[:, left_pad:x.shape[1] + left_pad, :], x)
<ide>
<ide>
<del>def spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):
<add>def spatial_2d_padding(x, padding=(1, 1), data_format='default'):
<ide> """Pad the 2nd and 3rd dimensions of a 4D tensor
<ide> with "padding[0]" and "padding[1]" (resp.) zeros left and right.
<ide> """
<ide> # TODO: `keras_shape` inference.
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<ide> input_shape = x.shape
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> output_shape = (input_shape[0],
<ide> input_shape[1],
<ide> input_shape[2] + 2 * padding[0],
<ide> def spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):
<ide> slice(padding[0], input_shape[2] + padding[0]),
<ide> slice(padding[1], input_shape[3] + padding[1]))
<ide>
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> output_shape = (input_shape[0],
<ide> input_shape[1] + 2 * padding[0],
<ide> input_shape[2] + 2 * padding[1],
<ide> def spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):
<ide> slice(padding[1], input_shape[2] + padding[1]),
<ide> slice(None))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide> return T.set_subtensor(output[indices], x)
<ide>
<ide>
<ide> def asymmetric_spatial_2d_padding(x, top_pad=1, bottom_pad=1,
<ide> left_pad=1, right_pad=1,
<del> dim_ordering='default'):
<add> data_format='default'):
<ide> """Pad the rows and columns of a 4D tensor
<ide> with "top_pad", "bottom_pad", "left_pad", "right_pad" (resp.) zeros
<ide> rows on top, bottom; cols on left, right.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<ide> input_shape = x.shape
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> output_shape = (input_shape[0],
<ide> input_shape[1],
<ide> input_shape[2] + top_pad + bottom_pad,
<ide> def asymmetric_spatial_2d_padding(x, top_pad=1, bottom_pad=1,
<ide> slice(top_pad, input_shape[2] + top_pad),
<ide> slice(left_pad, input_shape[3] + left_pad))
<ide>
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> output_shape = (input_shape[0],
<ide> input_shape[1] + top_pad + bottom_pad,
<ide> input_shape[2] + left_pad + right_pad,
<ide> def asymmetric_spatial_2d_padding(x, top_pad=1, bottom_pad=1,
<ide> slice(left_pad, input_shape[2] + left_pad),
<ide> slice(None))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide> return T.set_subtensor(output[indices], x)
<ide>
<ide>
<del>def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='default'):
<add>def spatial_3d_padding(x, padding=(1, 1, 1), data_format='default'):
<ide> """Pad the 2nd, 3rd and 4th dimensions of a 5D tensor
<ide> with "padding[0]", "padding[1]" and "padding[2]" (resp.) zeros left and right.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<ide>
<ide> input_shape = x.shape
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> output_shape = (input_shape[0],
<ide> input_shape[1],
<ide> input_shape[2] + 2 * padding[0],
<ide> def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='default'):
<ide> slice(padding[1], input_shape[3] + padding[1]),
<ide> slice(padding[2], input_shape[4] + padding[2]))
<ide>
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> output_shape = (input_shape[0],
<ide> input_shape[1] + 2 * padding[0],
<ide> input_shape[2] + 2 * padding[1],
<ide> def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='default'):
<ide> slice(padding[2], input_shape[3] + padding[2]),
<ide> slice(None))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide> return T.set_subtensor(output[indices], x)
<ide>
<ide>
<ide> def in_top_k(predictions, targets, k):
<ide>
<ide> # CONVOLUTIONS
<ide>
<del>def _preprocess_conv2d_input(x, dim_ordering):
<del> if dim_ordering == 'tf':
<add>def _preprocess_conv2d_input(x, data_format):
<add> if data_format == 'channels_last':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH input shape: (samples, input_depth, rows, cols)
<ide> def _preprocess_conv2d_input(x, dim_ordering):
<ide> return x
<ide>
<ide>
<del>def _preprocess_conv3d_input(x, dim_ordering):
<del> if dim_ordering == 'tf':
<add>def _preprocess_conv3d_input(x, data_format):
<add> if data_format == 'channels_last':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH input shape: (samples, input_depth, rows, cols, slices)
<ide> def _preprocess_conv3d_input(x, dim_ordering):
<ide> return x
<ide>
<ide>
<del>def _preprocess_conv2d_kernel(kernel, dim_ordering):
<del> if dim_ordering == 'tf':
<add>def _preprocess_conv2d_kernel(kernel, data_format):
<add> if data_format == 'channels_last':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH kernel shape: (depth, input_depth, rows, cols)
<ide> def _preprocess_conv2d_kernel(kernel, dim_ordering):
<ide> return kernel
<ide>
<ide>
<del>def _preprocess_conv3d_kernel(kernel, dim_ordering):
<del> if dim_ordering == 'tf':
<add>def _preprocess_conv3d_kernel(kernel, data_format):
<add> if data_format == 'channels_last':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH kernel shape: (depth, input_depth, rows, cols, slices)
<ide> def _preprocess_border_mode(border_mode):
<ide> return th_border_mode
<ide>
<ide>
<del>def _preprocess_conv2d_image_shape(dim_ordering, image_shape):
<add>def _preprocess_conv2d_image_shape(data_format, image_shape):
<ide> # Theano might not accept long type
<ide> def int_or_none(value):
<ide> try:
<ide> return int(value)
<ide> except TypeError:
<ide> return None
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> if image_shape:
<ide> image_shape = (image_shape[0], image_shape[3],
<ide> image_shape[1], image_shape[2])
<ide> def int_or_none(value):
<ide> return image_shape
<ide>
<ide>
<del>def _preprocess_conv3d_volume_shape(dim_ordering, volume_shape):
<add>def _preprocess_conv3d_volume_shape(data_format, volume_shape):
<ide> # Theano might not accept long type
<ide> def int_or_none(value):
<ide> try:
<ide> return int(value)
<ide> except TypeError:
<ide> return None
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> if volume_shape:
<ide> volume_shape = (volume_shape[0], volume_shape[4],
<ide> volume_shape[1], volume_shape[2], volume_shape[3])
<ide> def int_or_none(value):
<ide> return volume_shape
<ide>
<ide>
<del>def _preprocess_conv2d_filter_shape(dim_ordering, filter_shape):
<add>def _preprocess_conv2d_filter_shape(data_format, filter_shape):
<ide> # Theano might not accept long type
<ide> def int_or_none(value):
<ide> try:
<ide> return int(value)
<ide> except TypeError:
<ide> return None
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> if filter_shape:
<ide> filter_shape = (filter_shape[3], filter_shape[2],
<ide> filter_shape[0], filter_shape[1])
<ide> def int_or_none(value):
<ide> return filter_shape
<ide>
<ide>
<del>def _preprocess_conv3d_filter_shape(dim_ordering, filter_shape):
<add>def _preprocess_conv3d_filter_shape(data_format, filter_shape):
<ide> # Theano might not accept long type
<ide> def int_or_none(value):
<ide> try:
<ide> return int(value)
<ide> except TypeError:
<ide> return None
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> if filter_shape:
<ide> filter_shape = (filter_shape[4], filter_shape[3],
<ide> filter_shape[0], filter_shape[1], filter_shape[2])
<ide> def int_or_none(value):
<ide> return filter_shape
<ide>
<ide>
<del>def _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering):
<add>def _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel, strides, data_format):
<ide> if border_mode == 'same':
<ide> if np_kernel.shape[2] % 2 == 0:
<ide> conv_out = conv_out[:, :, :(x.shape[2] + strides[0] - 1) // strides[0], :]
<ide> if np_kernel.shape[3] % 2 == 0:
<ide> conv_out = conv_out[:, :, :, :(x.shape[3] + strides[1] - 1) // strides[1]]
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> conv_out = conv_out.dimshuffle((0, 2, 3, 1))
<ide> return conv_out
<ide>
<ide>
<del>def _postprocess_conv3d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering):
<add>def _postprocess_conv3d_output(conv_out, x, border_mode, np_kernel, strides, data_format):
<ide> if border_mode == 'same':
<ide> if np_kernel.shape[2] % 2 == 0:
<ide> conv_out = conv_out[:, :, :(x.shape[2] + strides[0] - 1) // strides[0], :, :]
<ide> if np_kernel.shape[3] % 2 == 0:
<ide> conv_out = conv_out[:, :, :, :(x.shape[3] + strides[1] - 1) // strides[1], :]
<ide> if np_kernel.shape[4] % 2 == 0:
<ide> conv_out = conv_out[:, :, :, :, :(x.shape[4] + strides[2] - 1) // strides[2]]
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))
<ide> return conv_out
<ide>
<ide> def conv1d(x, kernel, stride=1, border_mode='valid',
<ide>
<ide>
<ide> def conv2d(x, kernel, strides=(1, 1), border_mode='valid',
<del> dim_ordering='default', image_shape=None,
<add> data_format='default', image_shape=None,
<ide> filter_shape=None, filter_dilation=(1, 1)):
<ide> """2D convolution.
<ide>
<ide> # Arguments
<ide> kernel: kernel tensor.
<ide> strides: strides tuple.
<ide> border_mode: string, "same" or "valid".
<del> dim_ordering: "tf" or "th".
<del> Whether to use Theano or TensorFlow dimension ordering
<add> data_format: "channels_last" or "channels_first".
<add> Whether to use Theano or TensorFlow data format
<ide> in inputs/kernels/ouputs.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ', dim_ordering)
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ', data_format)
<ide>
<del> x = _preprocess_conv2d_input(x, dim_ordering)
<del> kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
<add> x = _preprocess_conv2d_input(x, data_format)
<add> kernel = _preprocess_conv2d_kernel(kernel, data_format)
<ide> th_border_mode = _preprocess_border_mode(border_mode)
<ide> np_kernel = kernel.eval()
<del> image_shape = _preprocess_conv2d_image_shape(dim_ordering, image_shape)
<del> filter_shape = _preprocess_conv2d_filter_shape(dim_ordering, filter_shape)
<add> image_shape = _preprocess_conv2d_image_shape(data_format, image_shape)
<add> filter_shape = _preprocess_conv2d_filter_shape(data_format, filter_shape)
<ide>
<ide> # TODO: remove the if statement when theano with no filter dilation is deprecated.
<ide> if filter_dilation == (1, 1):
<ide> def conv2d(x, kernel, strides=(1, 1), border_mode='valid',
<ide> filter_dilation=filter_dilation)
<ide>
<ide> conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
<del> strides, dim_ordering)
<add> strides, data_format)
<ide> return conv_out
<ide>
<ide>
<ide> def deconv2d(x, kernel, output_shape, strides=(1, 1),
<ide> border_mode='valid',
<del> dim_ordering='default',
<add> data_format='default',
<ide> image_shape=None, filter_shape=None):
<ide> """2D deconvolution (transposed convolution).
<ide>
<ide> def deconv2d(x, kernel, output_shape, strides=(1, 1),
<ide> output_shape: desired dimensions of output.
<ide> strides: strides tuple.
<ide> border_mode: string, "same" or "valid".
<del> dim_ordering: "tf" or "th".
<del> Whether to use Theano or TensorFlow dimension ordering
<add> data_format: "channels_last" or "channels_first".
<add> Whether to use Theano or TensorFlow data format
<ide> in inputs/kernels/ouputs.
<ide> """
<ide> flip_filters = False
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering ' + dim_ordering)
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + data_format)
<ide>
<del> x = _preprocess_conv2d_input(x, dim_ordering)
<del> kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
<add> x = _preprocess_conv2d_input(x, data_format)
<add> kernel = _preprocess_conv2d_kernel(kernel, data_format)
<ide> kernel = kernel.dimshuffle((1, 0, 2, 3))
<ide> th_border_mode = _preprocess_border_mode(border_mode)
<ide> np_kernel = kernel.eval()
<del> filter_shape = _preprocess_conv2d_filter_shape(dim_ordering, filter_shape)
<add> filter_shape = _preprocess_conv2d_filter_shape(data_format, filter_shape)
<ide> filter_shape = tuple(filter_shape[i] for i in (1, 0, 2, 3))
<ide>
<ide> op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=output_shape,
<ide> def deconv2d(x, kernel, output_shape, strides=(1, 1),
<ide> conv_out = op(kernel, x, output_shape[2:])
<ide>
<ide> conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
<del> strides, dim_ordering)
<add> strides, data_format)
<ide> return conv_out
<ide>
<ide>
<ide> def atrous_conv2d(x, kernel, rate=1,
<ide> border_mode='valid',
<del> dim_ordering='default',
<add> data_format='default',
<ide> image_shape=None, filter_shape=None):
<ide> raise NotImplementedError
<ide>
<ide>
<ide> def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
<del> border_mode='valid', dim_ordering='default'):
<add> border_mode='valid', data_format='default'):
<ide> raise NotImplementedError
<ide>
<ide>
<ide> def conv3d(x, kernel, strides=(1, 1, 1),
<del> border_mode='valid', dim_ordering='default',
<add> border_mode='valid', data_format='default',
<ide> volume_shape=None, filter_shape=None,
<ide> filter_dilation=(1, 1, 1)):
<ide> """3D convolution.
<ide> def conv3d(x, kernel, strides=(1, 1, 1),
<ide> kernel: kernel tensor.
<ide> strides: strides tuple.
<ide> border_mode: string, "same" or "valid".
<del> dim_ordering: "tf" or "th".
<del> Whether to use Theano or TensorFlow dimension ordering
<add> data_format: "channels_last" or "channels_first".
<add> Whether to use Theano or TensorFlow data format
<ide> in inputs/kernels/ouputs.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide>
<ide> # TODO: remove this if statement when Theano without AbstractConv3d is deprecated
<ide> if not hasattr(T.nnet, 'conv3d'):
<ide> def conv3d(x, kernel, strides=(1, 1, 1),
<ide> '0.9.0dev3 or newer.')
<ide>
<ide> return _old_theano_conv3d(x, kernel, strides, border_mode,
<del> dim_ordering, volume_shape, filter_shape)
<add> data_format, volume_shape, filter_shape)
<ide>
<del> x = _preprocess_conv3d_input(x, dim_ordering)
<del> kernel = _preprocess_conv3d_kernel(kernel, dim_ordering)
<add> x = _preprocess_conv3d_input(x, data_format)
<add> kernel = _preprocess_conv3d_kernel(kernel, data_format)
<ide> th_border_mode = _preprocess_border_mode(border_mode)
<ide> np_kernel = kernel.eval()
<del> volume_shape = _preprocess_conv3d_volume_shape(dim_ordering, volume_shape)
<del> filter_shape = _preprocess_conv3d_filter_shape(dim_ordering, filter_shape)
<add> volume_shape = _preprocess_conv3d_volume_shape(data_format, volume_shape)
<add> filter_shape = _preprocess_conv3d_filter_shape(data_format, filter_shape)
<ide>
<ide> conv_out = T.nnet.conv3d(x, kernel,
<ide> border_mode=th_border_mode,
<ide> def conv3d(x, kernel, strides=(1, 1, 1),
<ide> filter_dilation=filter_dilation)
<ide>
<ide> conv_out = _postprocess_conv3d_output(conv_out, x, border_mode, np_kernel,
<del> strides, dim_ordering)
<add> strides, data_format)
<ide> return conv_out
<ide>
<ide>
<ide> # TODO: remove this function when theano without AbstractConv3d is deprecated
<ide> def _old_theano_conv3d(x, kernel, strides=(1, 1, 1),
<del> border_mode='valid', dim_ordering='default',
<add> border_mode='valid', data_format='default',
<ide> volume_shape=None, filter_shape=None):
<ide> """
<ide> Run on cuDNN if available.
<ide> border_mode: string, "same" or "valid".
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide> if border_mode not in {'same', 'valid'}:
<ide> raise ValueError('Invalid border mode:', border_mode)
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> # TF uses the last dimension as channel dimension,
<ide> # instead of the 2nd one.
<ide> # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
<ide> def _old_theano_conv3d(x, kernel, strides=(1, 1, 1),
<ide> if strides != (1, 1, 1):
<ide> conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))
<ide>
<ide> return conv_out
<ide>
<ide>
<ide> def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
<del> dim_ordering='default', pool_mode='max'):
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> data_format='default', pool_mode='max'):
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide>
<ide> assert pool_size[0] >= 1 and pool_size[1] >= 1
<ide>
<ide> def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
<ide> else:
<ide> raise ValueError('Invalid border mode:', border_mode)
<ide>
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> x = x.dimshuffle((0, 3, 1, 2))
<ide>
<ide> if pool_mode == 'max':
<ide> def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
<ide> : expected_width,
<ide> : expected_height]
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> pool_out = pool_out.dimshuffle((0, 2, 3, 1))
<ide> return pool_out
<ide>
<ide>
<ide> def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<del> dim_ordering='default', pool_mode='max'):
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> data_format='default', pool_mode='max'):
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide>
<ide> # TODO: remove this if statement when Theano without pool_3d is deprecated
<ide> # (pool_3d was introduced after 0.9.0dev3)
<ide> if not hasattr(T.signal.pool, 'pool_3d'):
<ide> return _old_theano_pool3d(x, pool_size, strides, border_mode,
<del> dim_ordering, pool_mode)
<add> data_format, pool_mode)
<ide>
<ide> if border_mode == 'same':
<ide> w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
<ide> def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<ide> padding = (0, 0, 0)
<ide> else:
<ide> raise ValueError('Invalid border mode:', border_mode)
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> x = x.dimshuffle((0, 4, 1, 2, 3))
<ide>
<ide> if pool_mode == 'max':
<ide> def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<ide> : expected_height,
<ide> : expected_depth]
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
<ide> return pool_out
<ide>
<ide>
<ide> # TODO: remove this function when Theano without pool_3d is deprecated
<ide> # (pool_3d was introduced after 0.9.0dev3)
<ide> def _old_theano_pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<del> dim_ordering='default', pool_mode='max'):
<del> if dim_ordering == 'default':
<del> dim_ordering = image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> data_format='default', pool_mode='max'):
<add> if data_format == 'default':
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide>
<ide> if border_mode == 'same':
<ide> # TODO: add implementation for border_mode="same"
<ide> def _old_theano_pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<ide> else:
<ide> raise ValueError('Invalid border mode:', border_mode)
<ide>
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering:', dim_ordering)
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format:', data_format)
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> x = x.dimshuffle((0, 4, 1, 2, 3))
<ide>
<ide> if pool_mode == 'max':
<ide> def _old_theano_pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
<ide> else:
<ide> raise ValueError('Invalid pooling mode:', pool_mode)
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
<ide> return pool_out
<ide>
<ide><path>keras/constraints.py
<ide> class MaxNorm(Constraint):
<ide> has shape `(input_dim, output_dim)`,
<ide> set `axis` to `0` to constrain each weight vector
<ide> of length `(input_dim,)`.
<del> In a `Convolution2D` layer with `dim_ordering="tf"`,
<add> In a `Convolution2D` layer with `data_format="channels_last"`,
<ide> the weight tensor has shape
<ide> `(rows, cols, input_depth, output_depth)`,
<ide> set `axis` to `[0, 1, 2]`
<ide> class UnitNorm(Constraint):
<ide> has shape `(input_dim, output_dim)`,
<ide> set `axis` to `0` to constrain each weight vector
<ide> of length `(input_dim,)`.
<del> In a `Convolution2D` layer with `dim_ordering="tf"`,
<add> In a `Convolution2D` layer with `data_format="channels_last"`,
<ide> the weight tensor has shape
<ide> `(rows, cols, input_depth, output_depth)`,
<ide> set `axis` to `[0, 1, 2]`
<ide><path>keras/datasets/cifar10.py
<ide> def load_data():
<ide> y_train = np.reshape(y_train, (len(y_train), 1))
<ide> y_test = np.reshape(y_test, (len(y_test), 1))
<ide>
<del> if K.image_dim_ordering() == 'tf':
<add> if K.image_data_format() == 'channels_last':
<ide> x_train = x_train.transpose(0, 2, 3, 1)
<ide> x_test = x_test.transpose(0, 2, 3, 1)
<ide>
<ide><path>keras/datasets/cifar100.py
<ide> def load_data(label_mode='fine'):
<ide> y_train = np.reshape(y_train, (len(y_train), 1))
<ide> y_test = np.reshape(y_test, (len(y_test), 1))
<ide>
<del> if K.image_dim_ordering() == 'tf':
<add> if K.image_data_format() == 'channels_last':
<ide> x_train = x_train.transpose(0, 2, 3, 1)
<ide> x_test = x_test.transpose(0, 2, 3, 1)
<ide>
<ide><path>keras/initializations.py
<ide> from .utils.generic_utils import get_from_module
<ide>
<ide>
<del>def get_fans(shape, dim_ordering='th'):
<add>def get_fans(shape, data_format='channels_first'):
<ide> if len(shape) == 2:
<ide> fan_in = shape[0]
<ide> fan_out = shape[1]
<ide> elif len(shape) == 4 or len(shape) == 5:
<ide> # Assuming convolution kernels (2D or 3D).
<ide> # TH kernel shape: (depth, input_depth, ...)
<ide> # TF kernel shape: (..., input_depth, depth)
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> receptive_field_size = np.prod(shape[2:])
<ide> fan_in = shape[1] * receptive_field_size
<ide> fan_out = shape[0] * receptive_field_size
<del> elif dim_ordering == 'tf':
<add> elif data_format == 'channels_last':
<ide> receptive_field_size = np.prod(shape[:2])
<ide> fan_in = shape[-2] * receptive_field_size
<ide> fan_out = shape[-1] * receptive_field_size
<ide> else:
<del> raise ValueError('Invalid dim_ordering: ' + dim_ordering)
<add> raise ValueError('Invalid data_format: ' + data_format)
<ide> else:
<ide> # No specific assumptions.
<ide> fan_in = np.sqrt(np.prod(shape))
<ide> fan_out = np.sqrt(np.prod(shape))
<ide> return fan_in, fan_out
<ide>
<ide>
<del>def uniform(shape, scale=0.05, name=None, dim_ordering='th'):
<add>def uniform(shape, scale=0.05, name=None, data_format='channels_first'):
<ide> return K.random_uniform_variable(shape, -scale, scale, name=name)
<ide>
<ide>
<del>def normal(shape, scale=0.05, name=None, dim_ordering='th'):
<add>def normal(shape, scale=0.05, name=None, data_format='channels_first'):
<ide> return K.random_normal_variable(shape, 0.0, scale, name=name)
<ide>
<ide>
<del>def lecun_uniform(shape, name=None, dim_ordering='th'):
<add>def lecun_uniform(shape, name=None, data_format='channels_first'):
<ide> """LeCun uniform variance scaling initializer.
<ide>
<ide> # References
<ide> LeCun 98, Efficient Backprop,
<ide> http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
<ide> """
<del> fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
<add> fan_in, fan_out = get_fans(shape, data_format=data_format)
<ide> scale = np.sqrt(3. / fan_in)
<ide> return uniform(shape, scale, name=name)
<ide>
<ide>
<del>def glorot_normal(shape, name=None, dim_ordering='th'):
<add>def glorot_normal(shape, name=None, data_format='channels_first'):
<ide> """Glorot normal variance scaling initializer.
<ide>
<ide> # References
<ide> Glorot & Bengio, AISTATS 2010
<ide> """
<del> fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
<add> fan_in, fan_out = get_fans(shape, data_format=data_format)
<ide> s = np.sqrt(2. / (fan_in + fan_out))
<ide> return normal(shape, s, name=name)
<ide>
<ide>
<del>def glorot_uniform(shape, name=None, dim_ordering='th'):
<del> fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
<add>def glorot_uniform(shape, name=None, data_format='channels_first'):
<add> fan_in, fan_out = get_fans(shape, data_format=data_format)
<ide> s = np.sqrt(6. / (fan_in + fan_out))
<ide> return uniform(shape, s, name=name)
<ide>
<ide>
<del>def he_normal(shape, name=None, dim_ordering='th'):
<add>def he_normal(shape, name=None, data_format='channels_first'):
<ide> """He normal variance scaling initializer.
<ide>
<ide> # References
<ide> He et al., http://arxiv.org/abs/1502.01852
<ide> """
<del> fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
<add> fan_in, fan_out = get_fans(shape, data_format=data_format)
<ide> s = np.sqrt(2. / fan_in)
<ide> return normal(shape, s, name=name)
<ide>
<ide>
<del>def he_uniform(shape, name=None, dim_ordering='th'):
<add>def he_uniform(shape, name=None, data_format='channels_first'):
<ide> """He uniform variance scaling initializer.
<ide> """
<del> fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
<add> fan_in, fan_out = get_fans(shape, data_format=data_format)
<ide> s = np.sqrt(6. / fan_in)
<ide> return uniform(shape, s, name=name)
<ide>
<ide>
<del>def orthogonal(shape, scale=1.1, name=None, dim_ordering='th'):
<add>def orthogonal(shape, scale=1.1, name=None, data_format='channels_first'):
<ide> """Orthogonal initializer.
<ide>
<ide> # References
<ide> def orthogonal(shape, scale=1.1, name=None, dim_ordering='th'):
<ide> return K.variable(scale * q[:shape[0], :shape[1]], name=name)
<ide>
<ide>
<del>def identity(shape, scale=1, name=None, dim_ordering='th'):
<add>def identity(shape, scale=1, name=None, data_format='channels_first'):
<ide> if len(shape) != 2 or shape[0] != shape[1]:
<ide> raise ValueError('Identity matrix initialization can only be used '
<ide> 'for 2D square matrices.')
<ide> else:
<ide> return K.variable(scale * np.identity(shape[0]), name=name)
<ide>
<ide>
<del>def zero(shape, name=None, dim_ordering='th'):
<add>def zero(shape, name=None, data_format='channels_first'):
<ide> return K.zeros(shape, name=name)
<ide>
<ide>
<del>def one(shape, name=None, dim_ordering='th'):
<add>def one(shape, name=None, data_format='channels_first'):
<ide> return K.ones(shape, name=name)
<ide>
<ide>
<ide><path>keras/layers/convolutional.py
<ide> def build(self, input_shape):
<ide>
<ide> self.W = self.add_weight(self.W_shape,
<ide> initializer=functools.partial(self.init,
<del> dim_ordering='th'),
<add> data_format='channels_first'),
<ide> name='{}_W'.format(self.name),
<ide> regularizer=self.W_regularizer,
<ide> constraint=self.W_constraint)
<ide> def call(self, x, mask=None):
<ide> x = K.expand_dims(x, 2) # add a dummy dimension
<ide> output = K.conv2d(x, self.W, strides=self.subsample,
<ide> border_mode=self.border_mode,
<del> dim_ordering='tf')
<add> data_format='channels_last')
<ide> output = K.squeeze(output, 2) # remove the dummy dimension
<ide> if self.bias:
<ide> output += K.reshape(self.b, (1, 1, self.nb_filter))
<ide> def call(self, x, mask=None):
<ide> x = K.expand_dims(x, 2) # add a dummy dimension
<ide> output = K.conv2d(x, self.W, strides=self.subsample,
<ide> border_mode=self.border_mode,
<del> dim_ordering='tf',
<add> data_format='channels_last',
<ide> filter_dilation=(self.atrous_rate, self.atrous_rate))
<ide> output = K.squeeze(output, 2) # remove the dummy dimension
<ide> if self.bias:
<ide> class Convolution2D(Layer):
<ide> (eg. maxnorm, nonneg), applied to the main weights matrix.
<ide> b_constraint: instance of the [constraints](../constraints.md) module,
<ide> applied to the bias.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide> bias: whether to include a bias
<ide> (i.e. make the layer affine rather than linear).
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(samples, nb_filter, new_rows, new_cols)` if dim_ordering='th'
<add> `(samples, nb_filter, new_rows, new_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, new_rows, new_cols, nb_filter)` if dim_ordering='tf'.
<add> `(samples, new_rows, new_cols, nb_filter)` if data_format='channels_last'.
<ide> `rows` and `cols` values might have changed due to padding.
<ide> """
<ide>
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> init='glorot_uniform', activation=None, weights=None,
<del> border_mode='valid', subsample=(1, 1), dim_ordering='default',
<add> border_mode='valid', subsample=(1, 1), data_format='default',
<ide> W_regularizer=None, b_regularizer=None,
<ide> activity_regularizer=None,
<ide> W_constraint=None, b_constraint=None,
<ide> bias=True, **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> if border_mode not in {'valid', 'same', 'full'}:
<ide> raise ValueError('Invalid border mode for Convolution2D:', border_mode)
<ide> self.nb_filter = nb_filter
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> self.activation = activations.get(activation)
<ide> self.border_mode = border_mode
<ide> self.subsample = tuple(subsample)
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide>
<ide> self.W_regularizer = regularizers.get(W_regularizer)
<ide> self.b_regularizer = regularizers.get(b_regularizer)
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> super(Convolution2D, self).__init__(**kwargs)
<ide>
<ide> def build(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> stack_size = input_shape[1]
<ide> self.W_shape = (self.nb_filter, stack_size, self.nb_row, self.nb_col)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> stack_size = input_shape[3]
<ide> self.W_shape = (self.nb_row, self.nb_col, stack_size, self.nb_filter)
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> self.W = self.add_weight(self.W_shape,
<ide> initializer=functools.partial(self.init,
<del> dim_ordering=self.dim_ordering),
<add> data_format=self.data_format),
<ide> name='{}_W'.format(self.name),
<ide> regularizer=self.W_regularizer,
<ide> constraint=self.W_constraint)
<ide> def build(self, input_shape):
<ide> self.built = True
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = input_shape[2]
<ide> cols = input_shape[3]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = input_shape[1]
<ide> cols = input_shape[2]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> rows = conv_output_length(rows, self.nb_row,
<ide> self.border_mode, self.subsample[0])
<ide> cols = conv_output_length(cols, self.nb_col,
<ide> self.border_mode, self.subsample[1])
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], self.nb_filter, rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], rows, cols, self.nb_filter)
<ide>
<ide> def call(self, x, mask=None):
<ide> output = K.conv2d(x, self.W, strides=self.subsample,
<ide> border_mode=self.border_mode,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> filter_shape=self.W_shape)
<ide> if self.bias:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> output = self.activation(output)
<ide> return output
<ide>
<ide> def get_config(self):
<ide> 'activation': self.activation.__name__,
<ide> 'border_mode': self.border_mode,
<ide> 'subsample': self.subsample,
<del> 'dim_ordering': self.dim_ordering,
<add> 'data_format': self.data_format,
<ide> 'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
<ide> 'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
<ide> 'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
<ide> class Deconvolution2D(Convolution2D):
<ide> (eg. maxnorm, nonneg), applied to the main weights matrix.
<ide> b_constraint: instance of the [constraints](../constraints.md) module,
<ide> applied to the bias.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide> bias: whether to include a bias
<ide> (i.e. make the layer affine rather than linear).
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(samples, nb_filter, new_rows, new_cols)` if dim_ordering='th'
<add> `(samples, nb_filter, new_rows, new_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, new_rows, new_cols, nb_filter)` if dim_ordering='tf'.
<add> `(samples, new_rows, new_cols, nb_filter)` if data_format='channels_last'.
<ide> `rows` and `cols` values might have changed due to padding.
<ide>
<ide> # References
<ide> class Deconvolution2D(Convolution2D):
<ide> def __init__(self, nb_filter, nb_row, nb_col, output_shape,
<ide> init='glorot_uniform', activation=None, weights=None,
<ide> border_mode='valid', subsample=(1, 1),
<del> dim_ordering='default',
<add> data_format='default',
<ide> W_regularizer=None, b_regularizer=None, activity_regularizer=None,
<ide> W_constraint=None, b_constraint=None,
<ide> bias=True, **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> if border_mode not in {'valid', 'same', 'full'}:
<ide> raise ValueError('Invalid border mode for Deconvolution2D:', border_mode)
<ide>
<ide> def __init__(self, nb_filter, nb_row, nb_col, output_shape,
<ide> weights=weights,
<ide> border_mode=border_mode,
<ide> subsample=subsample,
<del> dim_ordering=dim_ordering,
<add> data_format=data_format,
<ide> W_regularizer=W_regularizer,
<ide> b_regularizer=b_regularizer,
<ide> activity_regularizer=activity_regularizer,
<ide> def __init__(self, nb_filter, nb_row, nb_col, output_shape,
<ide> **kwargs)
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = self.output_shape_[2]
<ide> cols = self.output_shape_[3]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = self.output_shape_[1]
<ide> cols = self.output_shape_[2]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], self.nb_filter, rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], rows, cols, self.nb_filter)
<ide>
<ide> def call(self, x, mask=None):
<ide> output = K.deconv2d(x, self.W, self.output_shape_,
<ide> strides=self.subsample,
<ide> border_mode=self.border_mode,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> filter_shape=self.W_shape)
<ide> if self.bias:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> output = self.activation(output)
<ide> return output
<ide>
<ide> class AtrousConvolution2D(Convolution2D):
<ide> (eg. maxnorm, nonneg), applied to the main weights matrix.
<ide> b_constraint: instance of the [constraints](../constraints.md) module,
<ide> applied to the bias.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide> bias: whether to include a bias
<ide> (i.e. make the layer affine rather than linear).
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(samples, nb_filter, new_rows, new_cols)` if dim_ordering='th'
<add> `(samples, nb_filter, new_rows, new_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, new_rows, new_cols, nb_filter)` if dim_ordering='tf'.
<add> `(samples, new_rows, new_cols, nb_filter)` if data_format='channels_last'.
<ide> `rows` and `cols` values might have changed due to padding.
<ide>
<ide> # References
<ide> class AtrousConvolution2D(Convolution2D):
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> init='glorot_uniform', activation=None, weights=None,
<ide> border_mode='valid', subsample=(1, 1),
<del> atrous_rate=(1, 1), dim_ordering='default',
<add> atrous_rate=(1, 1), data_format='default',
<ide> W_regularizer=None, b_regularizer=None,
<ide> activity_regularizer=None,
<ide> W_constraint=None, b_constraint=None,
<ide> bias=True, **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide>
<ide> if border_mode not in {'valid', 'same', 'full'}:
<ide> raise ValueError('Invalid border mode for AtrousConv2D:', border_mode)
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> weights=weights,
<ide> border_mode=border_mode,
<ide> subsample=subsample,
<del> dim_ordering=dim_ordering,
<add> data_format=data_format,
<ide> W_regularizer=W_regularizer,
<ide> b_regularizer=b_regularizer,
<ide> activity_regularizer=activity_regularizer,
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> **kwargs)
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = input_shape[2]
<ide> cols = input_shape[3]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = input_shape[1]
<ide> cols = input_shape[2]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> rows = conv_output_length(rows, self.nb_row, self.border_mode,
<ide> self.subsample[0],
<ide> def get_output_shape_for(self, input_shape):
<ide> self.subsample[1],
<ide> dilation=self.atrous_rate[1])
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], self.nb_filter, rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], rows, cols, self.nb_filter)
<ide>
<ide> def call(self, x, mask=None):
<ide> output = K.conv2d(x, self.W, strides=self.subsample,
<ide> border_mode=self.border_mode,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> filter_shape=self.W_shape,
<ide> filter_dilation=self.atrous_rate)
<ide> if self.bias:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> output = self.activation(output)
<ide> return output
<ide>
<ide> class SeparableConvolution2D(Layer):
<ide> (eg. maxnorm, nonneg), applied to the pointwise weights matrix.
<ide> b_constraint: instance of the [constraints](../constraints.md) module,
<ide> applied to the bias.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide> bias: whether to include a bias
<ide> (i.e. make the layer affine rather than linear).
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(samples, nb_filter, new_rows, new_cols)` if dim_ordering='th'
<add> `(samples, nb_filter, new_rows, new_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, new_rows, new_cols, nb_filter)` if dim_ordering='tf'.
<add> `(samples, new_rows, new_cols, nb_filter)` if data_format='channels_last'.
<ide> `rows` and `cols` values might have changed due to padding.
<ide> """
<ide>
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> init='glorot_uniform', activation=None, weights=None,
<ide> border_mode='valid', subsample=(1, 1),
<del> depth_multiplier=1, dim_ordering='default',
<add> depth_multiplier=1, data_format='default',
<ide> depthwise_regularizer=None, pointwise_regularizer=None,
<ide> b_regularizer=None, activity_regularizer=None,
<ide> depthwise_constraint=None, pointwise_constraint=None,
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> raise RuntimeError('SeparableConv2D is only available '
<ide> 'with TensorFlow for the time being.')
<ide>
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide>
<ide> if border_mode not in {'valid', 'same'}:
<ide> raise ValueError('Invalid border mode for SeparableConv2D:', border_mode)
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> self.border_mode = border_mode
<ide> self.subsample = tuple(subsample)
<ide> self.depth_multiplier = depth_multiplier
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide>
<ide> self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
<ide> self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> super(SeparableConvolution2D, self).__init__(**kwargs)
<ide>
<ide> def build(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> stack_size = input_shape[1]
<ide> depthwise_shape = (self.depth_multiplier, stack_size, self.nb_row, self.nb_col)
<ide> pointwise_shape = (self.nb_filter, self.depth_multiplier * stack_size, 1, 1)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> stack_size = input_shape[3]
<ide> depthwise_shape = (self.nb_row, self.nb_col, stack_size, self.depth_multiplier)
<ide> pointwise_shape = (1, 1, self.depth_multiplier * stack_size, self.nb_filter)
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> self.depthwise_kernel = self.add_weight(depthwise_shape,
<ide> initializer=functools.partial(self.init,
<del> dim_ordering=self.dim_ordering),
<add> data_format=self.data_format),
<ide> regularizer=self.depthwise_regularizer,
<ide> constraint=self.depthwise_constraint,
<ide> name='{}_depthwise_kernel'.format(self.name))
<ide> self.pointwise_kernel = self.add_weight(pointwise_shape,
<ide> initializer=functools.partial(self.init,
<del> dim_ordering=self.dim_ordering),
<add> data_format=self.data_format),
<ide> regularizer=self.pointwise_regularizer,
<ide> constraint=self.pointwise_constraint,
<ide> name='{}_pointwise_kernel'.format(self.name))
<ide> def build(self, input_shape):
<ide> self.built = True
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = input_shape[2]
<ide> cols = input_shape[3]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = input_shape[1]
<ide> cols = input_shape[2]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> rows = conv_output_length(rows, self.nb_row,
<ide> self.border_mode, self.subsample[0])
<ide> cols = conv_output_length(cols, self.nb_col,
<ide> self.border_mode, self.subsample[1])
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], self.nb_filter, rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], rows, cols, self.nb_filter)
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<ide> output = K.separable_conv2d(x, self.depthwise_kernel,
<ide> self.pointwise_kernel,
<ide> strides=self.subsample,
<ide> border_mode=self.border_mode,
<del> dim_ordering=self.dim_ordering)
<add> data_format=self.data_format)
<ide> if self.bias:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> output = self.activation(output)
<ide> return output
<ide>
<ide> def get_config(self):
<ide> 'border_mode': self.border_mode,
<ide> 'subsample': self.subsample,
<ide> 'depth_multiplier': self.depth_multiplier,
<del> 'dim_ordering': self.dim_ordering,
<add> 'data_format': self.data_format,
<ide> 'depthwise_regularizer': self.depthwise_regularizer.get_config() if self.depthwise_regularizer else None,
<ide> 'pointwise_regularizer': self.depthwise_regularizer.get_config() if self.depthwise_regularizer else None,
<ide> 'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
<ide> class Convolution3D(Layer):
<ide> (eg. maxnorm, nonneg), applied to the main weights matrix.
<ide> b_constraint: instance of the [constraints](../constraints.md) module,
<ide> applied to the bias.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide> bias: whether to include a bias
<ide> (i.e. make the layer affine rather than linear).
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<del> `(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if dim_ordering='th'
<add> `(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 5D tensor with shape:
<del> `(samples, nb_filter, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if dim_ordering='th'
<add> `(samples, nb_filter, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, nb_filter)` if dim_ordering='tf'.
<add> `(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, nb_filter)` if data_format='channels_last'.
<ide> `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have changed due to padding.
<ide> """
<ide>
<ide> def __init__(self, nb_filter, kernel_dim1, kernel_dim2, kernel_dim3,
<ide> init='glorot_uniform', activation=None, weights=None,
<del> border_mode='valid', subsample=(1, 1, 1), dim_ordering='default',
<add> border_mode='valid', subsample=(1, 1, 1), data_format='default',
<ide> W_regularizer=None, b_regularizer=None, activity_regularizer=None,
<ide> W_constraint=None, b_constraint=None,
<ide> bias=True, **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide>
<ide> if border_mode not in {'valid', 'same', 'full'}:
<ide> raise ValueError('Invalid border mode for Convolution3D:', border_mode)
<ide> def __init__(self, nb_filter, kernel_dim1, kernel_dim2, kernel_dim3,
<ide> self.activation = activations.get(activation)
<ide> self.border_mode = border_mode
<ide> self.subsample = tuple(subsample)
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide>
<ide> self.W_regularizer = regularizers.get(W_regularizer)
<ide> self.b_regularizer = regularizers.get(b_regularizer)
<ide> def build(self, input_shape):
<ide> assert len(input_shape) == 5
<ide> self.input_spec = [InputSpec(shape=input_shape)]
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> stack_size = input_shape[1]
<ide> self.W_shape = (self.nb_filter, stack_size,
<ide> self.kernel_dim1, self.kernel_dim2, self.kernel_dim3)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> stack_size = input_shape[4]
<ide> self.W_shape = (self.kernel_dim1, self.kernel_dim2, self.kernel_dim3,
<ide> stack_size, self.nb_filter)
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> self.W = self.add_weight(self.W_shape,
<ide> initializer=functools.partial(self.init,
<del> dim_ordering=self.dim_ordering),
<add> data_format=self.data_format),
<ide> name='{}_W'.format(self.name),
<ide> regularizer=self.W_regularizer,
<ide> constraint=self.W_constraint)
<ide> def build(self, input_shape):
<ide> self.built = True
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> conv_dim1 = input_shape[2]
<ide> conv_dim2 = input_shape[3]
<ide> conv_dim3 = input_shape[4]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> conv_dim1 = input_shape[1]
<ide> conv_dim2 = input_shape[2]
<ide> conv_dim3 = input_shape[3]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> conv_dim1 = conv_output_length(conv_dim1, self.kernel_dim1,
<ide> self.border_mode, self.subsample[0])
<ide> def get_output_shape_for(self, input_shape):
<ide> conv_dim3 = conv_output_length(conv_dim3, self.kernel_dim3,
<ide> self.border_mode, self.subsample[2])
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], self.nb_filter, conv_dim1, conv_dim2, conv_dim3)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], conv_dim1, conv_dim2, conv_dim3, self.nb_filter)
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<ide> input_shape = self.input_spec[0].shape
<ide> output = K.conv3d(x, self.W, strides=self.subsample,
<ide> border_mode=self.border_mode,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> volume_shape=input_shape,
<ide> filter_shape=self.W_shape)
<ide> if self.bias:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> output += K.reshape(self.b, (1, self.nb_filter, 1, 1, 1))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> output += K.reshape(self.b, (1, 1, 1, 1, self.nb_filter))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> output = self.activation(output)
<ide> return output
<ide>
<ide> def get_config(self):
<ide> 'kernel_dim1': self.kernel_dim1,
<ide> 'kernel_dim2': self.kernel_dim2,
<ide> 'kernel_dim3': self.kernel_dim3,
<del> 'dim_ordering': self.dim_ordering,
<add> 'data_format': self.data_format,
<ide> 'init': self.init.__name__,
<ide> 'activation': self.activation.__name__,
<ide> 'border_mode': self.border_mode,
<ide> class UpSampling2D(Layer):
<ide>
<ide> # Arguments
<ide> size: tuple of 2 integers. The upsampling factors for rows and columns.
<del> dim_ordering: 'th' or 'tf'.
<del> In 'th' mode, the channels dimension (the depth)
<del> is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'.
<add> In 'channels_first' mode, the channels dimension (the depth)
<add> is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(samples, channels, upsampled_rows, upsampled_cols)` if dim_ordering='th'
<add> `(samples, channels, upsampled_rows, upsampled_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, upsampled_rows, upsampled_cols, channels)` if dim_ordering='tf'.
<add> `(samples, upsampled_rows, upsampled_cols, channels)` if data_format='channels_last'.
<ide> """
<ide>
<del> def __init__(self, size=(2, 2), dim_ordering='default', **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> def __init__(self, size=(2, 2), data_format='default', **kwargs):
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.size = tuple(size)
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=4)]
<ide> super(UpSampling2D, self).__init__(**kwargs)
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> width = self.size[0] * input_shape[2] if input_shape[2] is not None else None
<ide> height = self.size[1] * input_shape[3] if input_shape[3] is not None else None
<ide> return (input_shape[0],
<ide> input_shape[1],
<ide> width,
<ide> height)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> width = self.size[0] * input_shape[1] if input_shape[1] is not None else None
<ide> height = self.size[1] * input_shape[2] if input_shape[2] is not None else None
<ide> return (input_shape[0],
<ide> width,
<ide> height,
<ide> input_shape[3])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<ide> return K.resize_images(x, self.size[0], self.size[1],
<del> self.dim_ordering)
<add> self.data_format)
<ide>
<ide> def get_config(self):
<ide> config = {'size': self.size}
<ide> class UpSampling3D(Layer):
<ide>
<ide> # Arguments
<ide> size: tuple of 3 integers. The upsampling factors for dim1, dim2 and dim3.
<del> dim_ordering: 'th' or 'tf'.
<del> In 'th' mode, the channels dimension (the depth)
<del> is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'.
<add> In 'channels_first' mode, the channels dimension (the depth)
<add> is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<del> `(samples, channels, dim1, dim2, dim3)` if dim_ordering='th'
<add> `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, dim1, dim2, dim3, channels)` if dim_ordering='tf'.
<add> `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 5D tensor with shape:
<del> `(samples, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)` if dim_ordering='th'
<add> `(samples, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)` if data_format='channels_last'.
<ide> """
<ide>
<del> def __init__(self, size=(2, 2, 2), dim_ordering='default', **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> def __init__(self, size=(2, 2, 2), data_format='default', **kwargs):
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.size = tuple(size)
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=5)]
<ide> super(UpSampling3D, self).__init__(**kwargs)
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> dim1 = self.size[0] * input_shape[2] if input_shape[2] is not None else None
<ide> dim2 = self.size[1] * input_shape[3] if input_shape[3] is not None else None
<ide> dim3 = self.size[2] * input_shape[4] if input_shape[4] is not None else None
<ide> def get_output_shape_for(self, input_shape):
<ide> dim1,
<ide> dim2,
<ide> dim3)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> dim1 = self.size[0] * input_shape[1] if input_shape[1] is not None else None
<ide> dim2 = self.size[1] * input_shape[2] if input_shape[2] is not None else None
<ide> dim3 = self.size[2] * input_shape[3] if input_shape[3] is not None else None
<ide> def get_output_shape_for(self, input_shape):
<ide> dim3,
<ide> input_shape[4])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<ide> return K.resize_volumes(x, self.size[0], self.size[1], self.size[2],
<del> self.dim_ordering)
<add> self.data_format)
<ide>
<ide> def get_config(self):
<ide> config = {'size': self.size}
<ide> class ZeroPadding2D(Layer):
<ide> - If dictionary: should contain the keys
<ide> {'top_pad', 'bottom_pad', 'left_pad', 'right_pad'}.
<ide> If any key is missing, default value of 0 will be used for the missing key.
<del> dim_ordering: 'th' or 'tf'.
<del> In 'th' mode, the channels dimension (the depth)
<del> is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'.
<add> In 'channels_first' mode, the channels dimension (the depth)
<add> is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(samples, channels, padded_rows, padded_cols)` if dim_ordering='th'
<add> `(samples, channels, padded_rows, padded_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, padded_rows, padded_cols, channels)` if dim_ordering='tf'.
<add> `(samples, padded_rows, padded_cols, channels)` if data_format='channels_last'.
<ide> """
<ide>
<ide> def __init__(self,
<ide> padding=(1, 1),
<del> dim_ordering='default',
<add> data_format='default',
<ide> **kwargs):
<ide> super(ZeroPadding2D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide>
<ide> self.padding = padding
<ide> if isinstance(padding, dict):
<ide> def __init__(self,
<ide> 'of length 2 or 4, or dict. '
<ide> 'Found: ' + str(padding))
<ide>
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=4)]
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = input_shape[2] + self.top_pad + self.bottom_pad if input_shape[2] is not None else None
<ide> cols = input_shape[3] + self.left_pad + self.right_pad if input_shape[3] is not None else None
<ide> return (input_shape[0],
<ide> input_shape[1],
<ide> rows,
<ide> cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = input_shape[1] + self.top_pad + self.bottom_pad if input_shape[1] is not None else None
<ide> cols = input_shape[2] + self.left_pad + self.right_pad if input_shape[2] is not None else None
<ide> return (input_shape[0],
<ide> rows,
<ide> cols,
<ide> input_shape[3])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<ide> return K.asymmetric_spatial_2d_padding(x,
<ide> top_pad=self.top_pad,
<ide> bottom_pad=self.bottom_pad,
<ide> left_pad=self.left_pad,
<ide> right_pad=self.right_pad,
<del> dim_ordering=self.dim_ordering)
<add> data_format=self.data_format)
<ide>
<ide> def get_config(self):
<ide> config = {'padding': self.padding}
<ide> class ZeroPadding3D(Layer):
<ide> How many zeros to add at the beginning and end of
<ide> the 3 padding dimensions (axis 3, 4 and 5).
<ide> Currently only symmetric padding is supported.
<del> dim_ordering: 'th' or 'tf'.
<del> In 'th' mode, the channels dimension (the depth)
<del> is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'.
<add> In 'channels_first' mode, the channels dimension (the depth)
<add> is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<ide> class ZeroPadding3D(Layer):
<ide> `(samples, depth, first_padded_axis, second_padded_axis, third_axis_to_pad)`
<ide> """
<ide>
<del> def __init__(self, padding=(1, 1, 1), dim_ordering='default', **kwargs):
<add> def __init__(self, padding=(1, 1, 1), data_format='default', **kwargs):
<ide> super(ZeroPadding3D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.padding = tuple(padding)
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=5)]
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> dim1 = input_shape[2] + 2 * self.padding[0] if input_shape[2] is not None else None
<ide> dim2 = input_shape[3] + 2 * self.padding[1] if input_shape[3] is not None else None
<ide> dim3 = input_shape[4] + 2 * self.padding[2] if input_shape[4] is not None else None
<ide> def get_output_shape_for(self, input_shape):
<ide> dim1,
<ide> dim2,
<ide> dim3)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> dim1 = input_shape[1] + 2 * self.padding[0] if input_shape[1] is not None else None
<ide> dim2 = input_shape[2] + 2 * self.padding[1] if input_shape[2] is not None else None
<ide> dim3 = input_shape[3] + 2 * self.padding[2] if input_shape[3] is not None else None
<ide> def get_output_shape_for(self, input_shape):
<ide> dim3,
<ide> input_shape[4])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<ide> return K.spatial_3d_padding(x, padding=self.padding,
<del> dim_ordering=self.dim_ordering)
<add> data_format=self.data_format)
<ide>
<ide> def get_config(self):
<ide> config = {'padding': self.padding}
<ide> class Cropping2D(Layer):
<ide> cropping: tuple of tuple of int (length 2)
<ide> How many units should be trimmed off at the beginning and end of
<ide> the 2 cropping dimensions (width, height).
<del> dim_ordering: 'th' or 'tf'.
<del> In 'th' mode, the channels dimension (the depth)
<del> is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'.
<add> In 'channels_first' mode, the channels dimension (the depth)
<add> is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<ide> class Cropping2D(Layer):
<ide> ```
<ide> """
<ide>
<del> def __init__(self, cropping=((0, 0), (0, 0)), dim_ordering='default', **kwargs):
<add> def __init__(self, cropping=((0, 0), (0, 0)), data_format='default', **kwargs):
<ide> super(Cropping2D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.cropping = tuple(cropping)
<ide> if len(self.cropping) != 2:
<ide> raise ValueError('`cropping` must be a tuple length of 2.')
<ide> if len(self.cropping[0]) != 2:
<ide> raise ValueError('`cropping[0]` must be a tuple length of 2.')
<ide> if len(self.cropping[1]) != 2:
<ide> raise ValueError('`cropping[1]` must be a tuple length of 2.')
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=4)]
<ide>
<ide> def build(self, input_shape):
<ide> self.input_spec = [InputSpec(shape=input_shape)]
<ide> self.built = True
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0],
<ide> input_shape[1],
<ide> input_shape[2] - self.cropping[0][0] - self.cropping[0][1],
<ide> input_shape[3] - self.cropping[1][0] - self.cropping[1][1])
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0],
<ide> input_shape[1] - self.cropping[0][0] - self.cropping[0][1],
<ide> input_shape[2] - self.cropping[1][0] - self.cropping[1][1],
<ide> input_shape[3])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> if self.cropping[0][1] == self.cropping[1][1] == 0:
<ide> return x[:,
<ide> :,
<ide> def call(self, x, mask=None):
<ide> :,
<ide> self.cropping[0][0]:-self.cropping[0][1],
<ide> self.cropping[1][0]:-self.cropping[1][1]]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> if self.cropping[0][1] == self.cropping[1][1] == 0:
<ide> return x[:,
<ide> self.cropping[0][0]:,
<ide> class Cropping3D(Layer):
<ide> cropping: tuple of tuple of int (length 3)
<ide> How many units should be trimmed off at the beginning and end of
<ide> the 3 cropping dimensions (kernel_dim1, kernel_dim2, kernerl_dim3).
<del> dim_ordering: 'th' or 'tf'.
<del> In 'th' mode, the channels dimension (the depth)
<del> is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'.
<add> In 'channels_first' mode, the channels dimension (the depth)
<add> is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<ide> class Cropping3D(Layer):
<ide> """
<ide>
<ide> def __init__(self, cropping=((1, 1), (1, 1), (1, 1)),
<del> dim_ordering='default', **kwargs):
<add> data_format='default', **kwargs):
<ide> super(Cropping3D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.cropping = tuple(cropping)
<ide> if len(self.cropping) != 3:
<ide> raise ValueError('`cropping` must be a tuple length of 3.')
<ide> def __init__(self, cropping=((1, 1), (1, 1), (1, 1)),
<ide> raise ValueError('`cropping[1]` must be a tuple length of 2.')
<ide> if len(self.cropping[2]) != 2:
<ide> raise ValueError('`cropping[2]` must be a tuple length of 2.')
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=5)]
<ide>
<ide> def build(self, input_shape):
<ide> self.input_spec = [InputSpec(shape=input_shape)]
<ide> self.built = True
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1] if input_shape[2] is not None else None
<ide> dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1] if input_shape[3] is not None else None
<ide> dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1] if input_shape[4] is not None else None
<ide> def get_output_shape_for(self, input_shape):
<ide> dim1,
<ide> dim2,
<ide> dim3)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1] if input_shape[1] is not None else None
<ide> dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1] if input_shape[2] is not None else None
<ide> dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1] if input_shape[3] is not None else None
<ide> def get_output_shape_for(self, input_shape):
<ide> dim3,
<ide> input_shape[4])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> def call(self, x, mask=None):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
<ide> return x[:,
<ide> :,
<ide> def call(self, x, mask=None):
<ide> self.cropping[0][0]:-self.cropping[0][1],
<ide> self.cropping[1][0]:-self.cropping[1][1],
<ide> self.cropping[2][0]:-self.cropping[2][1]]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
<ide> return x[:,
<ide> self.cropping[0][0]:,
<ide><path>keras/layers/convolutional_recurrent.py
<ide> class ConvRecurrent2D(Layer):
<ide>
<ide> def __init__(self, weights=None, nb_row=None, nb_col=None, nb_filter=None,
<ide> return_sequences=False, go_backwards=False, stateful=False,
<del> dim_ordering=None, **kwargs):
<add> data_format=None, **kwargs):
<ide> self.return_sequences = return_sequences
<ide> self.go_backwards = go_backwards
<ide> self.stateful = stateful
<ide> self.initial_weights = weights
<ide> self.nb_row = nb_row
<ide> self.nb_col = nb_col
<ide> self.nb_filter = nb_filter
<del> self.dim_ordering = dim_ordering
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=5)]
<ide>
<ide> super(ConvRecurrent2D, self).__init__(**kwargs)
<ide> def compute_mask(self, input, mask):
<ide>
<ide> def get_output_shape_for(self, input_shape):
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = input_shape[3]
<ide> cols = input_shape[4]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = input_shape[2]
<ide> cols = input_shape[3]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> rows = conv_output_length(rows, self.nb_row,
<ide> self.border_mode, self.subsample[0])
<ide> cols = conv_output_length(cols, self.nb_col,
<ide> self.border_mode, self.subsample[1])
<ide>
<ide> if self.return_sequences:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], input_shape[1],
<ide> self.nb_filter, rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], input_shape[1],
<ide> rows, cols, self.nb_filter)
<ide> else:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], self.nb_filter, rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], rows, cols, self.nb_filter)
<ide>
<ide> def step(self, x, states):
<ide> class ConvLSTM2D(ConvRecurrent2D):
<ide> """Convolutional LSTM.
<ide>
<ide> # Input shape
<del> - if dim_ordering='th'
<add> - if data_format='channels_first'
<ide> 5D tensor with shape:
<ide> `(samples,time, channels, rows, cols)`
<del> - if dim_ordering='tf'
<add> - if data_format='channels_last'
<ide> 5D tensor with shape:
<ide> `(samples,time, rows, cols, channels)`
<ide>
<ide> # Output shape
<ide> - if `return_sequences`
<del> - if dim_ordering='th'
<add> - if data_format='channels_first'
<ide> 5D tensor with shape:
<ide> `(samples, time, nb_filter, output_row, output_col)`
<del> - if dim_ordering='tf'
<add> - if data_format='channels_last'
<ide> 5D tensor with shape:
<ide> `(samples, time, output_row, output_col, nb_filter)`
<ide> - else
<del> - if dim_ordering ='th'
<add> - if data_format ='channels_first'
<ide> 4D tensor with shape:
<ide> `(samples, nb_filter, output_row, output_col)`
<del> - if dim_ordering='tf'
<add> - if data_format='channels_last'
<ide> 4D tensor with shape:
<ide> `(samples, output_row, output_col, nb_filter)`
<ide>
<ide> class ConvLSTM2D(ConvRecurrent2D):
<ide> border_mode: 'valid' or 'same'.
<ide> subsample: tuple of length 2. Factor by which to subsample output.
<ide> Also called strides elsewhere.
<del> dim_ordering: 'tf' if the feature are at the last dimension or 'th'
<add> data_format: 'channels_last' if the feature are at the last dimension or 'channels_first'
<ide> stateful : Boolean (default False). If True, the last state
<ide> for each sample at index i in a batch will be used as initial
<ide> state for the sample of index i in the following batch.
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> init='glorot_uniform', inner_init='orthogonal',
<ide> forget_bias_init='one', activation='tanh',
<ide> inner_activation='hard_sigmoid',
<del> dim_ordering='default',
<add> data_format='default',
<ide> border_mode='valid', subsample=(1, 1),
<ide> W_regularizer=None, U_regularizer=None, b_regularizer=None,
<ide> dropout_W=0., dropout_U=0., **kwargs):
<ide>
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering must be in {tf,th}', dim_ordering)
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format must be in {tf,th}', data_format)
<ide> self.nb_filter = nb_filter
<ide> self.nb_row = nb_row
<ide> self.nb_col = nb_col
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> self.border_mode = border_mode
<ide> self.subsample = subsample
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> warnings.warn('Be carefull if used with convolution3D layers:\n'
<ide> 'th in convolution 3D corresponds to '
<ide> '(samples, channels, conv_dim1, conv_dim2,'
<ide> 'conv_dim3)\n'
<ide> 'while for this network it corresponds to: '
<ide> '(samples, time, channels, rows, cols)')
<del> self.dim_ordering = dim_ordering
<add> self.data_format = data_format
<ide>
<ide> kwargs['nb_filter'] = nb_filter
<ide> kwargs['nb_row'] = nb_row
<ide> kwargs['nb_col'] = nb_col
<del> kwargs['dim_ordering'] = dim_ordering
<add> kwargs['data_format'] = data_format
<ide>
<ide> self.W_regularizer = regularizers.get(W_regularizer)
<ide> self.U_regularizer = regularizers.get(U_regularizer)
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> def build(self, input_shape):
<ide> self.input_spec = [InputSpec(shape=input_shape)]
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> stack_size = input_shape[2]
<ide> self.W_shape = (self.nb_filter, stack_size,
<ide> self.nb_row, self.nb_col)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> stack_size = input_shape[4]
<ide> self.W_shape = (self.nb_row, self.nb_col,
<ide> stack_size, self.nb_filter)
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> self.W_shape1 = (self.nb_filter, self.nb_filter,
<ide> self.nb_row, self.nb_col)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> self.W_shape1 = (self.nb_row, self.nb_col,
<ide> self.nb_filter, self.nb_filter)
<ide>
<ide> def conv_step(self, x, W, b=None, border_mode='valid'):
<ide>
<ide> conv_out = K.conv2d(x, W, strides=self.subsample,
<ide> border_mode=border_mode,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> image_shape=(input_shape[0],
<ide> input_shape[2],
<ide> input_shape[3],
<ide> input_shape[4]),
<ide> filter_shape=self.W_shape)
<ide> if b:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> conv_out = conv_out + K.reshape(b, (1, self.nb_filter, 1, 1))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> conv_out = conv_out + K.reshape(b, (1, 1, 1, self.nb_filter))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> return conv_out
<ide>
<ide> def conv_step_hidden(self, x, W, border_mode='valid'):
<ide>
<ide> conv_out = K.conv2d(x, W, strides=(1, 1),
<ide> border_mode=border_mode,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> image_shape=(input_shape[0],
<ide> out_row, out_col,
<ide> out_filter),
<ide> def get_config(self):
<ide> 'inner_init': self.inner_init.__name__,
<ide> 'forget_bias_init': self.forget_bias_init.__name__,
<ide> 'activation': self.activation.__name__,
<del> 'dim_ordering': self.dim_ordering,
<add> 'data_format': self.data_format,
<ide> 'border_mode': self.border_mode,
<ide> 'inner_activation': self.inner_activation.__name__}
<ide> base_config = super(ConvLSTM2D, self).get_config()
<ide><path>keras/layers/core.py
<ide> class SpatialDropout2D(Dropout):
<ide>
<ide> # Arguments
<ide> p: float between 0 and 1. Fraction of the input units to drop.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> Same as input
<ide> class SpatialDropout2D(Dropout):
<ide> - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
<ide> """
<ide>
<del> def __init__(self, p, dim_ordering='default', **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
<del> self.dim_ordering = dim_ordering
<add> def __init__(self, p, data_format='default', **kwargs):
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {"channels_last", "channels_first"}'
<add> self.data_format = data_format
<ide> super(SpatialDropout2D, self).__init__(p, **kwargs)
<ide>
<ide> def _get_noise_shape(self, x):
<ide> input_shape = K.shape(x)
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> noise_shape = (input_shape[0], input_shape[1], 1, 1)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> noise_shape = (input_shape[0], 1, 1, input_shape[3])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> return noise_shape
<ide>
<ide>
<ide> class SpatialDropout3D(Dropout):
<ide>
<ide> # Arguments
<ide> p: float between 0 and 1. Fraction of the input units to drop.
<del> dim_ordering: 'th' or 'tf'.
<del> In 'th' mode, the channels dimension (the depth)
<del> is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'.
<add> In 'channels_first' mode, the channels dimension (the depth)
<add> is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<del> `(samples, channels, dim1, dim2, dim3)` if dim_ordering='th'
<add> `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, dim1, dim2, dim3, channels)` if dim_ordering='tf'.
<add> `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> Same as input
<ide> class SpatialDropout3D(Dropout):
<ide> - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
<ide> """
<ide>
<del> def __init__(self, p, dim_ordering='default', **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
<del> self.dim_ordering = dim_ordering
<add> def __init__(self, p, data_format='default', **kwargs):
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {"channels_last", "channels_first"}'
<add> self.data_format = data_format
<ide> super(SpatialDropout3D, self).__init__(p, **kwargs)
<ide>
<ide> def _get_noise_shape(self, x):
<ide> input_shape = K.shape(x)
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide> return noise_shape
<ide>
<ide>
<ide><path>keras/layers/local.py
<ide> def __init__(self, nb_filter, filter_length,
<ide> '(only "valid" is supported):', border_mode)
<ide> self.nb_filter = nb_filter
<ide> self.filter_length = filter_length
<del> self.init = initializations.get(init, dim_ordering='th')
<add> self.init = initializations.get(init, data_format='channels_first')
<ide> self.activation = activations.get(activation)
<ide>
<ide> self.border_mode = border_mode
<ide> class LocallyConnected2D(Layer):
<ide> (eg. maxnorm, nonneg), applied to the main weights matrix.
<ide> b_constraint: instance of the [constraints](../constraints.md) module,
<ide> applied to the bias.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<ide> bias: whether to include a bias (i.e. make the layer affine rather than linear).
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(samples, nb_filter, new_rows, new_cols)` if dim_ordering='th'
<add> `(samples, nb_filter, new_rows, new_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, new_rows, new_cols, nb_filter)` if dim_ordering='tf'.
<add> `(samples, new_rows, new_cols, nb_filter)` if data_format='channels_last'.
<ide> `rows` and `cols` values might have changed due to padding.
<ide> """
<ide>
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide> init='glorot_uniform', activation=None, weights=None,
<ide> border_mode='valid', subsample=(1, 1),
<del> dim_ordering='default',
<add> data_format='default',
<ide> W_regularizer=None, b_regularizer=None, activity_regularizer=None,
<ide> W_constraint=None, b_constraint=None,
<ide> bias=True, **kwargs):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> if border_mode != 'valid':
<ide> raise ValueError('Invalid border mode for LocallyConnected2D '
<ide> '(only "valid" is supported):', border_mode)
<ide> self.nb_filter = nb_filter
<ide> self.nb_row = nb_row
<ide> self.nb_col = nb_col
<del> self.init = initializations.get(init, dim_ordering=dim_ordering)
<add> self.init = initializations.get(init, data_format=data_format)
<ide> self.activation = activations.get(activation)
<ide>
<ide> self.border_mode = border_mode
<ide> self.subsample = tuple(subsample)
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('`dim_ordering` must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('`data_format` must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide>
<ide> self.W_regularizer = regularizers.get(W_regularizer)
<ide> self.b_regularizer = regularizers.get(b_regularizer)
<ide> def __init__(self, nb_filter, nb_row, nb_col,
<ide>
<ide> def build(self, input_shape):
<ide> output_shape = self.get_output_shape_for(input_shape)
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> _, nb_filter, output_row, output_col = output_shape
<ide> input_filter = input_shape[1]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> _, output_row, output_col, nb_filter = output_shape
<ide> input_filter = input_shape[3]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> self.output_row = output_row
<ide> self.output_col = output_col
<ide> def build(self, input_shape):
<ide> self.built = True
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = input_shape[2]
<ide> cols = input_shape[3]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = input_shape[1]
<ide> cols = input_shape[2]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> rows = conv_output_length(rows, self.nb_row,
<ide> self.border_mode, self.subsample[0])
<ide> cols = conv_output_length(cols, self.nb_col,
<ide> self.border_mode, self.subsample[1])
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], self.nb_filter, rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], rows, cols, self.nb_filter)
<ide>
<ide> def call(self, x, mask=None):
<ide> stride_row, stride_col = self.subsample
<ide> _, feature_dim, nb_filter = self.W_shape
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> if K.backend() == 'theano':
<ide> output = []
<ide> for i in range(self.output_row):
<ide> def call(self, x, mask=None):
<ide> output = K.batch_dot(x_aggregate, self.W)
<ide> output = K.reshape(output, (self.output_row, self.output_col, -1, nb_filter))
<ide> output = K.permute_dimensions(output, (2, 3, 0, 1))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> xs = []
<ide> for i in range(self.output_row):
<ide> for j in range(self.output_col):
<ide> def call(self, x, mask=None):
<ide> output = K.reshape(output, (self.output_row, self.output_col, -1, nb_filter))
<ide> output = K.permute_dimensions(output, (2, 0, 1, 3))
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> if self.bias:
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> output += K.reshape(self.b, (1, nb_filter, self.output_row, self.output_col))
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> output += K.reshape(self.b, (1, self.output_row, self.output_col, nb_filter))
<ide>
<ide> output = self.activation(output)
<ide> def get_config(self):
<ide> 'activation': self.activation.__name__,
<ide> 'border_mode': self.border_mode,
<ide> 'subsample': self.subsample,
<del> 'dim_ordering': self.dim_ordering,
<add> 'data_format': self.data_format,
<ide> 'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
<ide> 'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
<ide> 'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
<ide><path>keras/layers/pooling.py
<ide> def get_output_shape_for(self, input_shape):
<ide> return (input_shape[0], length, input_shape[2])
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> raise NotImplementedError
<ide>
<ide> def call(self, x, mask=None):
<ide> x = K.expand_dims(x, 2) # add dummy last dimension
<ide> output = self._pooling_function(inputs=x, pool_size=self.pool_size,
<ide> strides=self.st,
<ide> border_mode=self.border_mode,
<del> dim_ordering='tf')
<add> data_format='channels_last')
<ide> return K.squeeze(output, 2) # remove dummy last dimension
<ide>
<ide> def get_config(self):
<ide> def __init__(self, pool_length=2, stride=None,
<ide> border_mode, **kwargs)
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> output = K.pool2d(inputs, pool_size, strides,
<del> border_mode, dim_ordering, pool_mode='max')
<add> border_mode, data_format, pool_mode='max')
<ide> return output
<ide>
<ide>
<ide> def __init__(self, pool_length=2, stride=None,
<ide> border_mode, **kwargs)
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> output = K.pool2d(inputs, pool_size, strides,
<del> border_mode, dim_ordering, pool_mode='avg')
<add> border_mode, data_format, pool_mode='avg')
<ide> return output
<ide>
<ide>
<ide> class _Pooling2D(Layer):
<ide> """
<ide>
<ide> def __init__(self, pool_size=(2, 2), strides=None, border_mode='valid',
<del> dim_ordering='default', **kwargs):
<add> data_format='default', **kwargs):
<ide> super(_Pooling2D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.pool_size = tuple(pool_size)
<ide> if strides is None:
<ide> strides = self.pool_size
<ide> self.strides = tuple(strides)
<ide> if border_mode not in {'valid', 'same'}:
<ide> raise ValueError('`border_mode` must be in {valid, same}.')
<ide> self.border_mode = border_mode
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('`dim_ordering` must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('`data_format` must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=4)]
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> rows = input_shape[2]
<ide> cols = input_shape[3]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> rows = input_shape[1]
<ide> cols = input_shape[2]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> rows = conv_output_length(rows, self.pool_size[0],
<ide> self.border_mode, self.strides[0])
<ide> cols = conv_output_length(cols, self.pool_size[1],
<ide> self.border_mode, self.strides[1])
<ide>
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0], input_shape[1], rows, cols)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0], rows, cols, input_shape[3])
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> raise NotImplementedError
<ide>
<ide> def call(self, x, mask=None):
<ide> output = self._pooling_function(inputs=x,
<ide> pool_size=self.pool_size,
<ide> strides=self.strides,
<ide> border_mode=self.border_mode,
<del> dim_ordering=self.dim_ordering)
<add> data_format=self.data_format)
<ide> return output
<ide>
<ide> def get_config(self):
<ide> config = {'pool_size': self.pool_size,
<ide> 'border_mode': self.border_mode,
<ide> 'strides': self.strides,
<del> 'dim_ordering': self.dim_ordering}
<add> 'data_format': self.data_format}
<ide> base_config = super(_Pooling2D, self).get_config()
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<ide> class MaxPooling2D(_Pooling2D):
<ide> strides: tuple of 2 integers, or None. Strides values.
<ide> If None, it will default to `pool_size`.
<ide> border_mode: 'valid' or 'same'.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(nb_samples, channels, pooled_rows, pooled_cols)` if dim_ordering='th'
<add> `(nb_samples, channels, pooled_rows, pooled_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, pooled_rows, pooled_cols, channels)` if dim_ordering='tf'.
<add> `(samples, pooled_rows, pooled_cols, channels)` if data_format='channels_last'.
<ide> """
<ide>
<ide> def __init__(self, pool_size=(2, 2), strides=None, border_mode='valid',
<del> dim_ordering='default', **kwargs):
<add> data_format='default', **kwargs):
<ide> super(MaxPooling2D, self).__init__(pool_size, strides, border_mode,
<del> dim_ordering, **kwargs)
<add> data_format, **kwargs)
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> output = K.pool2d(inputs, pool_size, strides,
<del> border_mode, dim_ordering,
<add> border_mode, data_format,
<ide> pool_mode='max')
<ide> return output
<ide>
<ide> class AveragePooling2D(_Pooling2D):
<ide> strides: tuple of 2 integers, or None. Strides values.
<ide> If None, it will default to `pool_size`.
<ide> border_mode: 'valid' or 'same'.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 4D tensor with shape:
<del> `(nb_samples, channels, pooled_rows, pooled_cols)` if dim_ordering='th'
<add> `(nb_samples, channels, pooled_rows, pooled_cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, pooled_rows, pooled_cols, channels)` if dim_ordering='tf'.
<add> `(samples, pooled_rows, pooled_cols, channels)` if data_format='channels_last'.
<ide> """
<ide>
<ide> def __init__(self, pool_size=(2, 2), strides=None, border_mode='valid',
<del> dim_ordering='default', **kwargs):
<add> data_format='default', **kwargs):
<ide> super(AveragePooling2D, self).__init__(pool_size, strides, border_mode,
<del> dim_ordering, **kwargs)
<add> data_format, **kwargs)
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> output = K.pool2d(inputs, pool_size, strides,
<del> border_mode, dim_ordering, pool_mode='avg')
<add> border_mode, data_format, pool_mode='avg')
<ide> return output
<ide>
<ide>
<ide> class _Pooling3D(Layer):
<ide> """
<ide>
<ide> def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid',
<del> dim_ordering='default', **kwargs):
<add> data_format='default', **kwargs):
<ide> super(_Pooling3D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.pool_size = tuple(pool_size)
<ide> if strides is None:
<ide> strides = self.pool_size
<ide> self.strides = tuple(strides)
<ide> if border_mode not in {'valid', 'same'}:
<ide> raise ValueError('`border_mode` must be in {valid, same}.')
<ide> self.border_mode = border_mode
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('`dim_ordering` must be in {tf, th}.')
<del> self.dim_ordering = dim_ordering
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('`data_format` must be in {"channels_last", "channels_first"}.')
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=5)]
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> len_dim1 = input_shape[2]
<ide> len_dim2 = input_shape[3]
<ide> len_dim3 = input_shape[4]
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> len_dim1 = input_shape[1]
<ide> len_dim2 = input_shape[2]
<ide> len_dim3 = input_shape[3]
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', self.dim_ordering)
<add> raise ValueError('Invalid data_format:', self.data_format)
<ide>
<ide> len_dim1 = conv_output_length(len_dim1, self.pool_size[0],
<ide> self.border_mode, self.strides[0])
<ide> len_dim2 = conv_output_length(len_dim2, self.pool_size[1],
<ide> self.border_mode, self.strides[1])
<ide> len_dim3 = conv_output_length(len_dim3, self.pool_size[2],
<ide> self.border_mode, self.strides[2])
<del> if self.dim_ordering == 'th':
<add> if self.data_format == 'channels_first':
<ide> return (input_shape[0],
<ide> input_shape[1],
<ide> len_dim1, len_dim2, len_dim3)
<del> elif self.dim_ordering == 'tf':
<add> elif self.data_format == 'channels_last':
<ide> return (input_shape[0],
<ide> len_dim1, len_dim2, len_dim3,
<ide> input_shape[4])
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> raise NotImplementedError
<ide>
<ide> def call(self, x, mask=None):
<ide> output = self._pooling_function(inputs=x, pool_size=self.pool_size,
<ide> strides=self.strides,
<ide> border_mode=self.border_mode,
<del> dim_ordering=self.dim_ordering)
<add> data_format=self.data_format)
<ide> return output
<ide>
<ide> def get_config(self):
<ide> config = {'pool_size': self.pool_size,
<ide> 'border_mode': self.border_mode,
<ide> 'strides': self.strides,
<del> 'dim_ordering': self.dim_ordering}
<add> 'data_format': self.data_format}
<ide> base_config = super(_Pooling3D, self).get_config()
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<ide> class MaxPooling3D(_Pooling3D):
<ide> (2, 2, 2) will halve the size of the 3D input in each dimension.
<ide> strides: tuple of 3 integers, or None. Strides values.
<ide> border_mode: 'valid' or 'same'.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<del> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if dim_ordering='th'
<add> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 5D tensor with shape:
<del> `(nb_samples, channels, pooled_dim1, pooled_dim2, pooled_dim3)` if dim_ordering='th'
<add> `(nb_samples, channels, pooled_dim1, pooled_dim2, pooled_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, pooled_dim1, pooled_dim2, pooled_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, pooled_dim1, pooled_dim2, pooled_dim3, channels)` if data_format='channels_last'.
<ide> """
<ide>
<ide> def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid',
<del> dim_ordering='default', **kwargs):
<add> data_format='default', **kwargs):
<ide> super(MaxPooling3D, self).__init__(pool_size, strides, border_mode,
<del> dim_ordering, **kwargs)
<add> data_format, **kwargs)
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> output = K.pool3d(inputs, pool_size, strides,
<del> border_mode, dim_ordering, pool_mode='max')
<add> border_mode, data_format, pool_mode='max')
<ide> return output
<ide>
<ide>
<ide> class AveragePooling3D(_Pooling3D):
<ide> (2, 2, 2) will halve the size of the 3D input in each dimension.
<ide> strides: tuple of 3 integers, or None. Strides values.
<ide> border_mode: 'valid' or 'same'.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<del> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if dim_ordering='th'
<add> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 5D tensor with shape:
<del> `(nb_samples, channels, pooled_dim1, pooled_dim2, pooled_dim3)` if dim_ordering='th'
<add> `(nb_samples, channels, pooled_dim1, pooled_dim2, pooled_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, pooled_dim1, pooled_dim2, pooled_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, pooled_dim1, pooled_dim2, pooled_dim3, channels)` if data_format='channels_last'.
<ide> """
<ide>
<ide> def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid',
<del> dim_ordering='default', **kwargs):
<add> data_format='default', **kwargs):
<ide> super(AveragePooling3D, self).__init__(pool_size, strides, border_mode,
<del> dim_ordering, **kwargs)
<add> data_format, **kwargs)
<ide>
<ide> def _pooling_function(self, inputs, pool_size, strides,
<del> border_mode, dim_ordering):
<add> border_mode, data_format):
<ide> output = K.pool3d(inputs, pool_size, strides,
<del> border_mode, dim_ordering,
<add> border_mode, data_format,
<ide> pool_mode='avg')
<ide> return output
<ide>
<ide> class _GlobalPooling2D(Layer):
<ide> """Abstract class for different global pooling 2D layers.
<ide> """
<ide>
<del> def __init__(self, dim_ordering='default', **kwargs):
<add> def __init__(self, data_format='default', **kwargs):
<ide> super(_GlobalPooling2D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> self.dim_ordering = dim_ordering
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=4)]
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> return (input_shape[0], input_shape[3])
<ide> else:
<ide> return (input_shape[0], input_shape[1])
<ide> def call(self, x, mask=None):
<ide> raise NotImplementedError
<ide>
<ide> def get_config(self):
<del> config = {'dim_ordering': self.dim_ordering}
<add> config = {'data_format': self.data_format}
<ide> base_config = super(_GlobalPooling2D, self).get_config()
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<ide> class GlobalAveragePooling2D(_GlobalPooling2D):
<ide> """Global average pooling operation for spatial data.
<ide>
<ide> # Arguments
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 2D tensor with shape:
<ide> `(nb_samples, channels)`
<ide> """
<ide>
<ide> def call(self, x, mask=None):
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> return K.mean(x, axis=[1, 2])
<ide> else:
<ide> return K.mean(x, axis=[2, 3])
<ide> class GlobalMaxPooling2D(_GlobalPooling2D):
<ide> """Global max pooling operation for spatial data.
<ide>
<ide> # Arguments
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 4D tensor with shape:
<del> `(samples, channels, rows, cols)` if dim_ordering='th'
<add> `(samples, channels, rows, cols)` if data_format='channels_first'
<ide> or 4D tensor with shape:
<del> `(samples, rows, cols, channels)` if dim_ordering='tf'.
<add> `(samples, rows, cols, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 2D tensor with shape:
<ide> `(nb_samples, channels)`
<ide> """
<ide>
<ide> def call(self, x, mask=None):
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> return K.max(x, axis=[1, 2])
<ide> else:
<ide> return K.max(x, axis=[2, 3])
<ide> class _GlobalPooling3D(Layer):
<ide> """Abstract class for different global pooling 3D layers.
<ide> """
<ide>
<del> def __init__(self, dim_ordering='default', **kwargs):
<add> def __init__(self, data_format='default', **kwargs):
<ide> super(_GlobalPooling3D, self).__init__(**kwargs)
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> self.dim_ordering = dim_ordering
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> self.data_format = data_format
<ide> self.input_spec = [InputSpec(ndim=5)]
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> return (input_shape[0], input_shape[4])
<ide> else:
<ide> return (input_shape[0], input_shape[1])
<ide> def call(self, x, mask=None):
<ide> raise NotImplementedError
<ide>
<ide> def get_config(self):
<del> config = {'dim_ordering': self.dim_ordering}
<add> config = {'data_format': self.data_format}
<ide> base_config = super(_GlobalPooling3D, self).get_config()
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<ide> class GlobalAveragePooling3D(_GlobalPooling3D):
<ide> """Global Average pooling operation for 3D data.
<ide>
<ide> # Arguments
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<del> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if dim_ordering='th'
<add> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 2D tensor with shape:
<ide> `(nb_samples, channels)`
<ide> """
<ide>
<ide> def call(self, x, mask=None):
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> return K.mean(x, axis=[1, 2, 3])
<ide> else:
<ide> return K.mean(x, axis=[2, 3, 4])
<ide> class GlobalMaxPooling3D(_GlobalPooling3D):
<ide> """Global Max pooling operation for 3D data.
<ide>
<ide> # Arguments
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode is it at index 4.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode is it at index 4.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide>
<ide> # Input shape
<ide> 5D tensor with shape:
<del> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if dim_ordering='th'
<add> `(samples, channels, len_pool_dim1, len_pool_dim2, len_pool_dim3)` if data_format='channels_first'
<ide> or 5D tensor with shape:
<del> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if dim_ordering='tf'.
<add> `(samples, len_pool_dim1, len_pool_dim2, len_pool_dim3, channels)` if data_format='channels_last'.
<ide>
<ide> # Output shape
<ide> 2D tensor with shape:
<ide> `(nb_samples, channels)`
<ide> """
<ide>
<ide> def call(self, x, mask=None):
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> return K.max(x, axis=[1, 2, 3])
<ide> else:
<ide> return K.max(x, axis=[2, 3, 4])
<ide><path>keras/preprocessing/image.py
<ide> def flip_axis(x, axis):
<ide> return x
<ide>
<ide>
<del>def array_to_img(x, dim_ordering='default', scale=True):
<add>def array_to_img(x, data_format='default', scale=True):
<ide> """Converts a 3D Numpy array to a PIL Image instance.
<ide>
<ide> # Arguments
<ide> x: Input Numpy array.
<del> dim_ordering: Image data format.
<add> data_format: Image data format.
<ide> scale: Whether to rescale image values
<ide> to be within [0, 255].
<ide>
<ide> def array_to_img(x, dim_ordering='default', scale=True):
<ide>
<ide> # Raises
<ide> ImportError: if PIL is not available.
<del> ValueError: if invalid `x` or `dim_ordering` is passed.
<add> ValueError: if invalid `x` or `data_format` is passed.
<ide> """
<ide> if pil_image is None:
<ide> raise ImportError('Could not import PIL.Image. '
<ide> def array_to_img(x, dim_ordering='default', scale=True):
<ide> raise ValueError('Expected image array to have rank 3 (single image). '
<ide> 'Got array with shape:', x.shape)
<ide>
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Invalid data_format:', data_format)
<ide>
<ide> # Original Numpy array x has format (height, width, channel)
<ide> # or (channel, height, width)
<ide> # but target PIL image has format (width, height, channel)
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> x = x.transpose(1, 2, 0)
<ide> if scale:
<ide> x += max(-np.min(x), 0)
<ide> def array_to_img(x, dim_ordering='default', scale=True):
<ide> raise ValueError('Unsupported channel number: ', x.shape[2])
<ide>
<ide>
<del>def img_to_array(img, dim_ordering='default'):
<add>def img_to_array(img, data_format='default'):
<ide> """Converts a PIL Image instance to a Numpy array.
<ide>
<ide> # Arguments
<ide> img: PIL Image instance.
<del> dim_ordering: Image data format.
<add> data_format: Image data format.
<ide>
<ide> # Returns
<ide> A 3D Numpy array (float32).
<ide>
<ide> # Raises
<del> ValueError: if invalid `img` or `dim_ordering` is passed.
<add> ValueError: if invalid `img` or `data_format` is passed.
<ide> """
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<del> if dim_ordering not in {'th', 'tf'}:
<del> raise ValueError('Unknown dim_ordering: ', dim_ordering)
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format: ', data_format)
<ide> # Numpy array x has format (height, width, channel)
<ide> # or (channel, height, width)
<ide> # but original PIL image has format (width, height, channel)
<ide> x = np.asarray(img, dtype='float32')
<ide> if len(x.shape) == 3:
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> x = x.transpose(2, 0, 1)
<ide> elif len(x.shape) == 2:
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> x = x.reshape((1, x.shape[0], x.shape[1]))
<ide> else:
<ide> x = x.reshape((x.shape[0], x.shape[1], 1))
<ide> class ImageDataGenerator(object):
<ide> The function should take one argument:
<ide> one image (Numpy tensor with rank 3),
<ide> and should output a Numpy tensor with the same shape.
<del> dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
<del> (the depth) is at index 1, in 'tf' mode it is at index 3.
<del> It defaults to the `image_dim_ordering` value found in your
<add> data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
<add> (the depth) is at index 1, in 'channels_last' mode it is at index 3.
<add> It defaults to the `image_data_format` value found in your
<ide> Keras config file at `~/.keras/keras.json`.
<del> If you never set it, then it will be "tf".
<add> If you never set it, then it will be "channels_last".
<ide> """
<ide>
<ide> def __init__(self,
<ide> def __init__(self,
<ide> vertical_flip=False,
<ide> rescale=None,
<ide> preprocessing_function=None,
<del> dim_ordering='default'):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> data_format='default'):
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.featurewise_center = featurewise_center
<ide> self.samplewise_center = samplewise_center
<ide> self.featurewise_std_normalization = featurewise_std_normalization
<ide> def __init__(self,
<ide> self.rescale = rescale
<ide> self.preprocessing_function = preprocessing_function
<ide>
<del> if dim_ordering not in {'tf', 'th'}:
<del> raise ValueError('dim_ordering should be "tf" (channel after row and '
<del> 'column) or "th" (channel before row and column). '
<del> 'Received arg: ', dim_ordering)
<del> self.dim_ordering = dim_ordering
<del> if dim_ordering == 'th':
<add> if data_format not in {'channels_last', 'channels_first'}:
<add> raise ValueError('data_format should be "channels_last" (channel after row and '
<add> 'column) or "channels_first" (channel before row and column). '
<add> 'Received arg: ', data_format)
<add> self.data_format = data_format
<add> if data_format == 'channels_first':
<ide> self.channel_axis = 1
<ide> self.row_axis = 2
<ide> self.col_axis = 3
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> self.channel_axis = 3
<ide> self.row_axis = 1
<ide> self.col_axis = 2
<ide> def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
<ide> batch_size=batch_size,
<ide> shuffle=shuffle,
<ide> seed=seed,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> save_to_dir=save_to_dir,
<ide> save_prefix=save_prefix,
<ide> save_format=save_format)
<ide> def flow_from_directory(self, directory,
<ide> directory, self,
<ide> target_size=target_size, color_mode=color_mode,
<ide> classes=classes, class_mode=class_mode,
<del> dim_ordering=self.dim_ordering,
<add> data_format=self.data_format,
<ide> batch_size=batch_size, shuffle=shuffle, seed=seed,
<ide> save_to_dir=save_to_dir,
<ide> save_prefix=save_prefix,
<ide> def fit(self, x,
<ide> if x.shape[self.channel_axis] not in {1, 3, 4}:
<ide> raise ValueError(
<ide> 'Expected input to be images (as Numpy array) '
<del> 'following the dimension ordering convention "' + self.dim_ordering + '" '
<add> 'following the data format convention "' + self.data_format + '" '
<ide> '(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
<ide> 'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
<ide> 'However, it was passed an array with shape ' + str(x.shape) +
<ide> class NumpyArrayIterator(Iterator):
<ide>
<ide> def __init__(self, x, y, image_data_generator,
<ide> batch_size=32, shuffle=False, seed=None,
<del> dim_ordering='default',
<add> data_format='default',
<ide> save_to_dir=None, save_prefix='', save_format='jpeg'):
<ide> if y is not None and len(x) != len(y):
<ide> raise ValueError('X (images tensor) and y (labels) '
<ide> 'should have the same length. '
<ide> 'Found: X.shape = %s, y.shape = %s' %
<ide> (np.asarray(x).shape, np.asarray(y).shape))
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.x = np.asarray(x)
<ide> if self.x.ndim != 4:
<ide> raise ValueError('Input data in `NumpyArrayIterator` '
<ide> 'should have rank 4. You passed an array '
<ide> 'with shape', self.x.shape)
<del> channels_axis = 3 if dim_ordering == 'tf' else 1
<add> channels_axis = 3 if data_format == 'channels_last' else 1
<ide> if self.x.shape[channels_axis] not in {1, 3, 4}:
<ide> raise ValueError('NumpyArrayIterator is set to use the '
<del> 'dimension ordering convention "' + dim_ordering + '" '
<add> 'data format convention "' + data_format + '" '
<ide> '(channels on axis ' + str(channels_axis) + '), i.e. expected '
<ide> 'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
<ide> 'However, it was passed an array with shape ' + str(self.x.shape) +
<ide> def __init__(self, x, y, image_data_generator,
<ide> else:
<ide> self.y = None
<ide> self.image_data_generator = image_data_generator
<del> self.dim_ordering = dim_ordering
<add> self.data_format = data_format
<ide> self.save_to_dir = save_to_dir
<ide> self.save_prefix = save_prefix
<ide> self.save_format = save_format
<ide> def next(self):
<ide> batch_x[i] = x
<ide> if self.save_to_dir:
<ide> for i in range(current_batch_size):
<del> img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
<add> img = array_to_img(batch_x[i], self.data_format, scale=True)
<ide> fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
<ide> index=current_index + i,
<ide> hash=np.random.randint(1e4),
<ide> class DirectoryIterator(Iterator):
<ide>
<ide> def __init__(self, directory, image_data_generator,
<ide> target_size=(256, 256), color_mode='rgb',
<del> dim_ordering='default',
<add> data_format='default',
<ide> classes=None, class_mode='categorical',
<ide> batch_size=32, shuffle=True, seed=None,
<ide> save_to_dir=None, save_prefix='', save_format='jpeg',
<ide> follow_links=False):
<del> if dim_ordering == 'default':
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format == 'default':
<add> data_format = K.image_data_format()
<ide> self.directory = directory
<ide> self.image_data_generator = image_data_generator
<ide> self.target_size = tuple(target_size)
<ide> if color_mode not in {'rgb', 'grayscale'}:
<ide> raise ValueError('Invalid color mode:', color_mode,
<ide> '; expected "rgb" or "grayscale".')
<ide> self.color_mode = color_mode
<del> self.dim_ordering = dim_ordering
<add> self.data_format = data_format
<ide> if self.color_mode == 'rgb':
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> self.image_shape = self.target_size + (3,)
<ide> else:
<ide> self.image_shape = (3,) + self.target_size
<ide> else:
<del> if self.dim_ordering == 'tf':
<add> if self.data_format == 'channels_last':
<ide> self.image_shape = self.target_size + (1,)
<ide> else:
<ide> self.image_shape = (1,) + self.target_size
<ide> def next(self):
<ide> img = load_img(os.path.join(self.directory, fname),
<ide> grayscale=grayscale,
<ide> target_size=self.target_size)
<del> x = img_to_array(img, dim_ordering=self.dim_ordering)
<add> x = img_to_array(img, data_format=self.data_format)
<ide> x = self.image_data_generator.random_transform(x)
<ide> x = self.image_data_generator.standardize(x)
<ide> batch_x[i] = x
<ide> # optionally save augmented images to disk for debugging purposes
<ide> if self.save_to_dir:
<ide> for i in range(current_batch_size):
<del> img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
<add> img = array_to_img(batch_x[i], self.data_format, scale=True)
<ide> fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
<ide> index=current_index + i,
<ide> hash=np.random.randint(1e4),
<ide><path>keras/utils/np_utils.py
<ide> def categorical_probas_to_classes(p):
<ide> return np.argmax(p, axis=1)
<ide>
<ide>
<del>def convert_kernel(kernel, dim_ordering=None):
<add>def convert_kernel(kernel, data_format=None):
<ide> """Converts a Numpy kernel matrix from Theano format to TensorFlow format.
<ide>
<ide> Also works reciprocally, since the transformation is its own inverse.
<ide>
<ide> # Arguments
<ide> kernel: Numpy array (4D or 5D).
<del> dim_ordering: the data format.
<add> data_format: the data format.
<ide>
<ide> # Returns
<ide> The converted kernel.
<ide>
<ide> # Raises
<del> ValueError: in case of invalid kernel shape or invalid dim_ordering.
<add> ValueError: in case of invalid kernel shape or invalid data_format.
<ide> """
<del> if dim_ordering is None:
<del> dim_ordering = K.image_dim_ordering()
<add> if data_format is None:
<add> data_format = K.image_data_format()
<ide> if not 4 <= kernel.ndim <= 5:
<ide> raise ValueError('Invalid kernel shape:', kernel.shape)
<ide>
<ide> slices = [slice(None, None, -1) for _ in range(kernel.ndim)]
<ide> no_flip = (slice(None, None), slice(None, None))
<del> if dim_ordering == 'th': # (out_depth, input_depth, ...)
<add> if data_format == 'channels_first': # (out_depth, input_depth, ...)
<ide> slices[:2] = no_flip
<del> elif dim_ordering == 'tf': # (..., input_depth, out_depth)
<add> elif data_format == 'channels_last': # (..., input_depth, out_depth)
<ide> slices[-2:] = no_flip
<ide> else:
<del> raise ValueError('Invalid dim_ordering:', dim_ordering)
<add> raise ValueError('Invalid data_format:', data_format)
<ide>
<ide> return np.copy(kernel[slices])
<ide>
<ide><path>tests/keras/backend/test_backends.py
<ide> def test_conv2d(self):
<ide>
<ide> kernel_val = np.random.random(kernel_shape) - 0.5
<ide>
<del> kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='th'))
<add> kernel_th = KTH.variable(convert_kernel(kernel_val, data_format='channels_first'))
<ide> kernel_tf = KTF.variable(kernel_val)
<ide>
<del> zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='th'))
<del> ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='th'))
<add> zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_first'))
<add> ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_first'))
<ide>
<ide> assert zth.shape == ztf.shape
<ide> assert_allclose(zth, ztf, atol=1e-05)
<ide> def test_conv2d(self):
<ide>
<ide> kernel_val = np.random.random(kernel_shape) - 0.5
<ide>
<del> kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
<add> kernel_th = KTH.variable(convert_kernel(kernel_val, data_format='channels_last'))
<ide> kernel_tf = KTF.variable(kernel_val)
<ide>
<del> zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='tf'))
<del> ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='tf'))
<add> zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_last'))
<add> ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_last'))
<ide>
<ide> assert zth.shape == ztf.shape
<ide> assert_allclose(zth, ztf, atol=1e-05)
<ide> def test_conv3d(self):
<ide> # TH kernel shape: (depth, input_depth, x, y, z)
<ide> # TF kernel shape: (x, y, z, input_depth, depth)
<ide>
<del> # test in dim_ordering = th
<add> # test in data_format = th
<ide> for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
<ide> for kernel_shape in [(4, 3, 2, 2, 2), (4, 3, 3, 2, 4)]:
<ide> xval = np.random.random(input_shape)
<ide> def test_conv3d(self):
<ide>
<ide> kernel_val = np.random.random(kernel_shape) - 0.5
<ide>
<del> kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='th'))
<add> kernel_th = KTH.variable(convert_kernel(kernel_val, data_format='channels_first'))
<ide> kernel_tf = KTF.variable(kernel_val)
<ide>
<del> zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='th'))
<del> ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='th'))
<add> zth = KTH.eval(KTH.conv3d(xth, kernel_th, data_format='channels_first'))
<add> ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, data_format='channels_first'))
<ide>
<ide> assert zth.shape == ztf.shape
<ide> assert_allclose(zth, ztf, atol=1e-05)
<ide>
<del> # test in dim_ordering = tf
<add> # test in data_format = tf
<ide> input_shape = (1, 2, 2, 2, 1)
<ide> kernel_shape = (2, 2, 2, 1, 1)
<ide>
<ide> def test_conv3d(self):
<ide>
<ide> kernel_val = np.random.random(kernel_shape) - 0.5
<ide>
<del> kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
<add> kernel_th = KTH.variable(convert_kernel(kernel_val, data_format='channels_last'))
<ide> kernel_tf = KTF.variable(kernel_val)
<ide>
<del> zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='tf'))
<del> ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='tf'))
<add> zth = KTH.eval(KTH.conv3d(xth, kernel_th, data_format='channels_last'))
<add> ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, data_format='channels_last'))
<ide>
<ide> assert zth.shape == ztf.shape
<ide> assert_allclose(zth, ztf, atol=1e-05)
<ide><path>tests/keras/layers/test_convolutional.py
<ide> def test_deconvolution_2d():
<ide> 'output_shape': (batch_size, nb_filter, rows, cols),
<ide> 'border_mode': border_mode,
<ide> 'subsample': subsample,
<del> 'dim_ordering': 'th'},
<add> 'data_format': 'channels_first'},
<ide> input_shape=(nb_samples, stack_size, nb_row, nb_col),
<ide> fixed_batch_size=True)
<ide>
<ide> def test_deconvolution_2d():
<ide> 'nb_col': 3,
<ide> 'output_shape': (batch_size, nb_filter, rows, cols),
<ide> 'border_mode': border_mode,
<del> 'dim_ordering': 'th',
<add> 'data_format': 'channels_first',
<ide> 'W_regularizer': 'l2',
<ide> 'b_regularizer': 'l2',
<ide> 'activity_regularizer': 'activity_l2',
<ide> def test_globalpooling_1d():
<ide> @keras_test
<ide> def test_globalpooling_2d():
<ide> layer_test(pooling.GlobalMaxPooling2D,
<del> kwargs={'dim_ordering': 'th'},
<add> kwargs={'data_format': 'channels_first'},
<ide> input_shape=(3, 4, 5, 6))
<ide> layer_test(pooling.GlobalMaxPooling2D,
<del> kwargs={'dim_ordering': 'tf'},
<add> kwargs={'data_format': 'channels_last'},
<ide> input_shape=(3, 5, 6, 4))
<ide> layer_test(pooling.GlobalAveragePooling2D,
<del> kwargs={'dim_ordering': 'th'},
<add> kwargs={'data_format': 'channels_first'},
<ide> input_shape=(3, 4, 5, 6))
<ide> layer_test(pooling.GlobalAveragePooling2D,
<del> kwargs={'dim_ordering': 'tf'},
<add> kwargs={'data_format': 'channels_last'},
<ide> input_shape=(3, 5, 6, 4))
<ide>
<ide>
<ide> @keras_test
<ide> def test_globalpooling_3d():
<ide> layer_test(pooling.GlobalMaxPooling3D,
<del> kwargs={'dim_ordering': 'th'},
<add> kwargs={'data_format': 'channels_first'},
<ide> input_shape=(3, 4, 3, 4, 3))
<ide> layer_test(pooling.GlobalMaxPooling3D,
<del> kwargs={'dim_ordering': 'tf'},
<add> kwargs={'data_format': 'channels_last'},
<ide> input_shape=(3, 4, 3, 4, 3))
<ide> layer_test(pooling.GlobalAveragePooling3D,
<del> kwargs={'dim_ordering': 'th'},
<add> kwargs={'data_format': 'channels_first'},
<ide> input_shape=(3, 4, 3, 4, 3))
<ide> layer_test(pooling.GlobalAveragePooling3D,
<del> kwargs={'dim_ordering': 'tf'},
<add> kwargs={'data_format': 'channels_last'},
<ide> input_shape=(3, 4, 3, 4, 3))
<ide>
<ide>
<ide> def test_zero_padding_2d():
<ide> stack_size = 2
<ide> input_nb_row = 4
<ide> input_nb_col = 5
<del> dim_ordering = K.image_dim_ordering()
<del> assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
<add> data_format = K.image_data_format()
<add> assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {"channels_last", "channels_first"}'
<ide>
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> input = np.ones((nb_samples, input_nb_row, input_nb_col, stack_size))
<del> elif dim_ordering == 'th':
<add> elif data_format == 'channels_first':
<ide> input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
<ide>
<ide> # basic test
<ide> def test_zero_padding_2d():
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> for offset in [0, 1, -1, -2]:
<ide> assert_allclose(np_output[:, offset, :, :], 0.)
<ide> assert_allclose(np_output[:, :, offset, :], 0.)
<ide> assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
<del> elif dim_ordering == 'th':
<add> elif data_format == 'channels_first':
<ide> for offset in [0, 1, -1, -2]:
<ide> assert_allclose(np_output[:, :, offset, :], 0.)
<ide> assert_allclose(np_output[:, :, :, offset], 0.)
<ide> def test_zero_padding_2d():
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<del> if dim_ordering == 'tf':
<add> if data_format == 'channels_last':
<ide> for top_offset in [0]:
<ide> assert_allclose(np_output[:, top_offset, :, :], 0.)
<ide> for bottom_offset in [-1, -2]:
<ide> def test_zero_padding_2d():
<ide> for right_offset in [-1, -2, -3, -4]:
<ide> assert_allclose(np_output[:, :, right_offset, :], 0.)
<ide> assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
<del> elif dim_ordering == 'th':
<add> elif data_format == 'channels_first':
<ide> for top_offset in [0]:
<ide> assert_allclose(np_output[:, :, top_offset, :], 0.)
<ide> for bottom_offset in [-1, -2]:
<ide> def test_upsampling_2d():
<ide> input_nb_row = 11
<ide> input_nb_col = 12
<ide>
<del> for dim_ordering in ['th', 'tf']:
<del> if dim_ordering == 'th':
<add> for data_format in ['channels_first', 'channels_last']:
<add> if data_format == 'channels_first':
<ide> input = np.random.rand(nb_samples, stack_size, input_nb_row,
<ide> input_nb_col)
<ide> else: # tf
<ide> def test_upsampling_2d():
<ide> for length_col in [2, 3, 9]:
<ide> layer = convolutional.UpSampling2D(
<ide> size=(length_row, length_col),
<del> dim_ordering=dim_ordering)
<add> data_format=data_format)
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> assert np_output.shape[2] == length_row * input_nb_row
<ide> assert np_output.shape[3] == length_col * input_nb_col
<ide> else: # tf
<ide> assert np_output.shape[1] == length_row * input_nb_row
<ide> assert np_output.shape[2] == length_col * input_nb_col
<ide>
<ide> # compare with numpy
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> expected_out = np.repeat(input, length_row, axis=2)
<ide> expected_out = np.repeat(expected_out, length_col, axis=3)
<ide> else: # tf
<ide> def test_upsampling_3d():
<ide> input_len_dim2 = 11
<ide> input_len_dim3 = 12
<ide>
<del> for dim_ordering in ['th', 'tf']:
<del> if dim_ordering == 'th':
<add> for data_format in ['channels_first', 'channels_last']:
<add> if data_format == 'channels_first':
<ide> input = np.random.rand(nb_samples, stack_size, input_len_dim1, input_len_dim2,
<ide> input_len_dim3)
<ide> else: # tf
<ide> def test_upsampling_3d():
<ide> for length_dim3 in [2, 3, 9]:
<ide> layer = convolutional.UpSampling3D(
<ide> size=(length_dim1, length_dim2, length_dim3),
<del> dim_ordering=dim_ordering)
<add> data_format=data_format)
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> assert np_output.shape[2] == length_dim1 * input_len_dim1
<ide> assert np_output.shape[3] == length_dim2 * input_len_dim2
<ide> assert np_output.shape[4] == length_dim3 * input_len_dim3
<ide> def test_upsampling_3d():
<ide> assert np_output.shape[3] == length_dim3 * input_len_dim3
<ide>
<ide> # compare with numpy
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> expected_out = np.repeat(input, length_dim1, axis=2)
<ide> expected_out = np.repeat(expected_out, length_dim2, axis=3)
<ide> expected_out = np.repeat(expected_out, length_dim3, axis=4)
<ide> def test_cropping_2d():
<ide> input_len_dim1 = 8
<ide> input_len_dim2 = 8
<ide> cropping = ((2, 2), (3, 3))
<del> dim_ordering = K.image_dim_ordering()
<add> data_format = K.image_data_format()
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> input = np.random.rand(nb_samples, stack_size,
<ide> input_len_dim1, input_len_dim2)
<ide> else:
<ide> def test_cropping_2d():
<ide> # basic test
<ide> layer_test(convolutional.Cropping2D,
<ide> kwargs={'cropping': cropping,
<del> 'dim_ordering': dim_ordering},
<add> 'data_format': data_format},
<ide> input_shape=input.shape)
<ide> # correctness test
<ide> layer = convolutional.Cropping2D(cropping=cropping,
<del> dim_ordering=dim_ordering)
<add> data_format=data_format)
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<ide> # compare with numpy
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> expected_out = input[:,
<ide> :,
<ide> cropping[0][0]: -cropping[0][1],
<ide> def test_cropping_2d():
<ide> # another correctness test (no cropping)
<ide> cropping = ((0, 0), (0, 0))
<ide> layer = convolutional.Cropping2D(cropping=cropping,
<del> dim_ordering=dim_ordering)
<add> data_format=data_format)
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<ide> def test_cropping_3d():
<ide> input_len_dim2 = 8
<ide> input_len_dim3 = 8
<ide> cropping = ((2, 2), (3, 3), (2, 3))
<del> dim_ordering = K.image_dim_ordering()
<add> data_format = K.image_data_format()
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> input = np.random.rand(nb_samples, stack_size,
<ide> input_len_dim1, input_len_dim2, input_len_dim3)
<ide> else:
<ide> def test_cropping_3d():
<ide> # basic test
<ide> layer_test(convolutional.Cropping3D,
<ide> kwargs={'cropping': cropping,
<del> 'dim_ordering': dim_ordering},
<add> 'data_format': data_format},
<ide> input_shape=input.shape)
<ide> # correctness test
<ide> layer = convolutional.Cropping3D(cropping=cropping,
<del> dim_ordering=dim_ordering)
<add> data_format=data_format)
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<ide> # compare with numpy
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> expected_out = input[:,
<ide> :,
<ide> cropping[0][0]: -cropping[0][1],
<ide> def test_cropping_3d():
<ide> # another correctness test (no cropping)
<ide> cropping = ((0, 0), (0, 0), (0, 0))
<ide> layer = convolutional.Cropping3D(cropping=cropping,
<del> dim_ordering=dim_ordering)
<add> data_format=data_format)
<ide> layer.build(input.shape)
<ide> output = layer(K.variable(input))
<ide> np_output = K.eval(output)
<ide><path>tests/keras/layers/test_convolutional_recurrent.py
<ide> def test_recurrent_convolutional():
<ide> input_nb_row = 5
<ide> input_nb_col = 5
<ide> sequence_len = 2
<del> for dim_ordering in ['th', 'tf']:
<add> for data_format in ['channels_first', 'channels_last']:
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> input = np.random.rand(nb_samples, sequence_len,
<ide> input_channel,
<ide> input_nb_row, input_nb_col)
<ide> def test_recurrent_convolutional():
<ide> for return_sequences in [True, False]:
<ide> # test for ouptput shape:
<ide> output = layer_test(convolutional_recurrent.ConvLSTM2D,
<del> kwargs={'dim_ordering': dim_ordering,
<add> kwargs={'data_format': data_format,
<ide> 'return_sequences': return_sequences,
<ide> 'nb_filter': nb_filter,
<ide> 'nb_row': nb_row,
<ide> def test_recurrent_convolutional():
<ide>
<ide> output_shape = [nb_samples, input_nb_row, input_nb_col]
<ide>
<del> if dim_ordering == 'th':
<add> if data_format == 'channels_first':
<ide> output_shape.insert(1, nb_filter)
<ide> else:
<ide> output_shape.insert(3, nb_filter)
<ide> def test_recurrent_convolutional():
<ide> assert output.shape == tuple(output_shape)
<ide>
<ide> # No need to check statefulness for both
<del> if dim_ordering == 'th' or return_sequences:
<add> if data_format == 'channels_first' or return_sequences:
<ide> continue
<ide>
<ide> # Tests for statefulness
<ide> model = Sequential()
<del> kwargs = {'dim_ordering': dim_ordering,
<add> kwargs = {'data_format': data_format,
<ide> 'return_sequences': return_sequences,
<ide> 'nb_filter': nb_filter,
<ide> 'nb_row': nb_row,
<ide> def test_recurrent_convolutional():
<ide> assert(out4.max() != out5.max())
<ide>
<ide> # check regularizers
<del> kwargs = {'dim_ordering': dim_ordering,
<add> kwargs = {'data_format': data_format,
<ide> 'return_sequences': return_sequences,
<ide> 'nb_filter': nb_filter,
<ide> 'nb_row': nb_row,
<ide> def test_recurrent_convolutional():
<ide>
<ide> # check dropout
<ide> layer_test(convolutional_recurrent.ConvLSTM2D,
<del> kwargs={'dim_ordering': dim_ordering,
<add> kwargs={'data_format': data_format,
<ide> 'return_sequences': return_sequences,
<ide> 'nb_filter': nb_filter,
<ide> 'nb_row': nb_row,
<ide><path>tests/keras/layers/test_local.py
<ide> def test_locallyconnected_2d():
<ide> 'b_regularizer': 'l2',
<ide> 'activity_regularizer': 'activity_l2',
<ide> 'subsample': subsample,
<del> 'dim_ordering': 'tf'},
<add> 'data_format': 'channels_last'},
<ide> input_shape=(nb_samples, nb_row, nb_col, stack_size))
<ide>
<ide> layer_test(local.LocallyConnected2D,
<ide> def test_locallyconnected_2d():
<ide> 'b_regularizer': 'l2',
<ide> 'activity_regularizer': 'activity_l2',
<ide> 'subsample': subsample,
<del> 'dim_ordering': 'th'},
<add> 'data_format': 'channels_first'},
<ide> input_shape=(nb_samples, stack_size, nb_row, nb_col))
<ide>
<ide>
<ide><path>tests/keras/preprocessing/test_image.py
<ide> def test_image_data_generator_invalid_data(self):
<ide> featurewise_std_normalization=True,
<ide> samplewise_std_normalization=True,
<ide> zca_whitening=True,
<del> dim_ordering='tf')
<add> data_format='channels_last')
<ide> # Test fit with invalid data
<ide> with pytest.raises(ValueError):
<ide> x = np.random.random((3, 10, 10))
<ide> def test_image_data_generator_fit(self):
<ide> featurewise_std_normalization=True,
<ide> samplewise_std_normalization=True,
<ide> zca_whitening=True,
<del> dim_ordering='tf')
<add> data_format='channels_last')
<ide> # Test grayscale
<ide> x = np.random.random((32, 10, 10, 1))
<ide> generator.fit(x)
<ide> def test_image_data_generator_fit(self):
<ide> featurewise_std_normalization=True,
<ide> samplewise_std_normalization=True,
<ide> zca_whitening=True,
<del> dim_ordering='th')
<add> data_format='channels_first')
<ide> # Test grayscale
<ide> x = np.random.random((32, 1, 10, 10))
<ide> generator.fit(x)
<ide> def test_directory_iterator(self):
<ide> def test_img_utils(self):
<ide> height, width = 10, 8
<ide>
<del> # Test th dim ordering
<add> # Test th data format
<ide> x = np.random.random((3, height, width))
<del> img = image.array_to_img(x, dim_ordering='th')
<add> img = image.array_to_img(x, data_format='channels_first')
<ide> assert img.size == (width, height)
<del> x = image.img_to_array(img, dim_ordering='th')
<add> x = image.img_to_array(img, data_format='channels_first')
<ide> assert x.shape == (3, height, width)
<ide> # Test 2D
<ide> x = np.random.random((1, height, width))
<del> img = image.array_to_img(x, dim_ordering='th')
<add> img = image.array_to_img(x, data_format='channels_first')
<ide> assert img.size == (width, height)
<del> x = image.img_to_array(img, dim_ordering='th')
<add> x = image.img_to_array(img, data_format='channels_first')
<ide> assert x.shape == (1, height, width)
<ide>
<del> # Test tf dim ordering
<add> # Test tf data format
<ide> x = np.random.random((height, width, 3))
<del> img = image.array_to_img(x, dim_ordering='tf')
<add> img = image.array_to_img(x, data_format='channels_last')
<ide> assert img.size == (width, height)
<del> x = image.img_to_array(img, dim_ordering='tf')
<add> x = image.img_to_array(img, data_format='channels_last')
<ide> assert x.shape == (height, width, 3)
<ide> # Test 2D
<ide> x = np.random.random((height, width, 1))
<del> img = image.array_to_img(x, dim_ordering='tf')
<add> img = image.array_to_img(x, data_format='channels_last')
<ide> assert img.size == (width, height)
<del> x = image.img_to_array(img, dim_ordering='tf')
<add> x = image.img_to_array(img, data_format='channels_last')
<ide> assert x.shape == (height, width, 1)
<ide>
<ide> | 43 |
PHP | PHP | fix nth where step <= offset | 98aa74abd91eefa7fd14cc5a649a6fa264bbdf97 | <ide><path>src/Illuminate/Collections/Collection.php
<ide> public function nth($step, $offset = 0)
<ide>
<ide> $position = 0;
<ide>
<del> foreach ($this->items as $item) {
<del> if ($position % $step === $offset) {
<add> foreach ($this->slice($offset)->items as $item) {
<add> if ($position % $step === 0) {
<ide> $new[] = $item;
<ide> }
<ide>
<ide><path>src/Illuminate/Collections/LazyCollection.php
<ide> public function nth($step, $offset = 0)
<ide> return new static(function () use ($step, $offset) {
<ide> $position = 0;
<ide>
<del> foreach ($this as $item) {
<del> if ($position % $step === $offset) {
<add> foreach ($this->slice($offset) as $item) {
<add> if ($position % $step === 0) {
<ide> yield $item;
<ide> }
<ide>
<ide><path>tests/Support/SupportCollectionTest.php
<ide> public function testNth($collection)
<ide> $this->assertEquals(['b', 'f'], $data->nth(4, 1)->all());
<ide> $this->assertEquals(['c'], $data->nth(4, 2)->all());
<ide> $this->assertEquals(['d'], $data->nth(4, 3)->all());
<add> $this->assertEquals(['c', 'e'], $data->nth(2, 2)->all());
<add> $this->assertEquals(['c', 'd', 'e', 'f'], $data->nth(1, 2)->all());
<ide> }
<ide>
<ide> /** | 3 |
Python | Python | fix public address on nic | 6b627abcd361568377698ba02e04b1749254de8c | <ide><path>libcloud/compute/drivers/azure_arm.py
<ide> def ex_delete_network_security_group(self, name, resource_group,
<ide> data=data,
<ide> method='DELETE')
<ide>
<del> def ex_create_network(self, name, resource_group, location=None, addressSpace="10.0.0.0/16"):
<add> def ex_create_network(self, name, resource_group, location=None, addressSpace="10.0.0.0/16", networkSecurityGroup=''):
<ide> """
<ide> Create a virtual network.
<ide>
<ide> def ex_create_network(self, name, resource_group, location=None, addressSpace="1
<ide> {
<ide> "name": "Default",
<ide> "properties": {
<add> "networkSecurityGroup": {"id":networkSecurityGroup},
<ide> "addressPrefix": "10.0.0.0/24"
<ide> }
<ide> }
<ide> def ex_create_network_interface(self, name, subnet, resource_group,
<ide> "properties": {
<ide> "ipConfigurations": [{
<ide> "name": name,
<del> "networkSecurityGroup": networkSecurityGroup,
<ide> "properties": {
<add> "publicIPAddress": {
<add> "id": public_ip.id
<add> },
<ide> "subnet": {
<ide> "id": subnet.id
<ide> }, | 1 |
Java | Java | use callonqueue() instead of latch | 01e291751a35299a8059f6a14173c57000f7f6ac | <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/CatalystInstanceImpl.java
<ide> import java.util.Collection;
<ide> import java.util.concurrent.Callable;
<ide> import java.util.concurrent.CopyOnWriteArrayList;
<del>import java.util.concurrent.CountDownLatch;
<ide> import java.util.concurrent.TimeUnit;
<ide> import java.util.concurrent.atomic.AtomicInteger;
<ide>
<ide> private ReactBridge initializeBridge(
<ide> @Override
<ide> public void runJSBundle() {
<ide> try {
<del> final CountDownLatch initLatch = new CountDownLatch(1);
<del> mCatalystQueueConfiguration.getJSQueueThread().runOnQueue(
<del> new Runnable() {
<add> mJSBundleHasLoaded = mCatalystQueueConfiguration.getJSQueueThread().callOnQueue(
<add> new Callable<Boolean>() {
<ide> @Override
<del> public void run() {
<add> public Boolean call() throws Exception {
<ide> Assertions.assertCondition(!mJSBundleHasLoaded, "JS bundle was already loaded!");
<del> mJSBundleHasLoaded = true;
<ide>
<ide> incrementPendingJSCalls();
<ide>
<ide> public void run() {
<ide> Systrace.endSection(Systrace.TRACE_TAG_REACT_JAVA_BRIDGE);
<ide> }
<ide>
<del> initLatch.countDown();
<add> return true;
<ide> }
<del> });
<del> Assertions.assertCondition(
<del> initLatch.await(LOAD_JS_BUNDLE_TIMEOUT_MS, TimeUnit.MILLISECONDS),
<del> "Timed out loading JS!");
<del> } catch (InterruptedException e) {
<del> throw new RuntimeException(e);
<add> }).get(LOAD_JS_BUNDLE_TIMEOUT_MS, TimeUnit.MILLISECONDS);
<add> } catch (Exception t) {
<add> throw new RuntimeException(t);
<ide> }
<ide> }
<ide> | 1 |
Ruby | Ruby | add flat-.pkg support to curldownloadstrategy | 4490b739d5e71912bdd99daa5ab02f9c0b3433e8 | <ide><path>Library/Homebrew/download_strategy.rb
<ide> def fetch
<ide> def stage
<ide> if @tarball_path.extname == '.jar'
<ide> magic_bytes = nil
<add> elsif @tarball_path.extname == '.pkg'
<add> # Use more than 4 characters to not clash with magicbytes
<add> magic_bytes = "____pkg"
<ide> else
<ide> # get the first four bytes
<ide> File.open(@tarball_path) { |f| magic_bytes = f.read(4) }
<ide> def stage
<ide> # TODO check if it's really a tar archive
<ide> safe_system '/usr/bin/tar', 'xf', @tarball_path
<ide> chdir
<add> when '____pkg'
<add> safe_system '/usr/sbin/pkgutil', '--expand', @tarball_path, File.basename(@url)
<add> chdir
<ide> when 'Rar!'
<ide> quiet_safe_system 'unrar', 'x', {:quiet_flag => '-inul'}, @tarball_path
<ide> else | 1 |
Python | Python | improve word embedding example, py3 compatibility | 52251f523f6700c50881ebc28edf13913ef0fde5 | <ide><path>examples/skipgram_word_embeddings.py
<ide> nb_epoch = 1
<ide> dim_proj = 256 # embedding space dimension
<ide>
<del>save = False
<del>load = False
<add>save = True
<add>load_model = False
<add>load_tokenizer = False
<ide> train_model = True
<ide> save_dir = os.path.expanduser("~/.keras/models")
<del>model_load_fname = "HN_skipgram_model_full_256.pkl"
<del>model_save_fname = "HN_skipgram_model_full_256.pkl"
<add>model_load_fname = "HN_skipgram_model.pkl"
<add>model_save_fname = "HN_skipgram_model.pkl"
<ide> tokenizer_fname = "HN_tokenizer.pkl"
<ide>
<ide> data_path = os.path.expanduser("~/")+"HNCommentsAll.1perline.json"
<ide> def text_generator(path=data_path):
<ide> f.close()
<ide>
<ide> # model management
<del>if load:
<add>if load_tokenizer:
<ide> print('Load tokenizer...')
<del> tokenizer = six.moves.cPickle.load(open(os.path.join(save_dir, tokenizer_fname)))
<del> print('Load model...')
<del> model = six.moves.cPickle.load(open(os.path.join(save_dir, model_load_fname)))
<add> tokenizer = six.moves.cPickle.load(open(os.path.join(save_dir, tokenizer_fname), 'rb'))
<ide> else:
<ide> print("Fit tokenizer...")
<ide> tokenizer = text.Tokenizer(nb_words=max_features)
<ide> def text_generator(path=data_path):
<ide> print("Save tokenizer...")
<ide> if not os.path.exists(save_dir):
<ide> os.makedirs(save_dir)
<del> six.moves.cPickle.dump(tokenizer, open(os.path.join(save_dir, tokenizer_fname), "w"))
<add> six.moves.cPickle.dump(tokenizer, open(os.path.join(save_dir, tokenizer_fname), "wb"))
<ide>
<ide> # training process
<ide> if train_model:
<del> if not load:
<add> if load_model:
<add> print('Load model...')
<add> model = six.moves.cPickle.load(open(os.path.join(save_dir, model_load_fname), 'rb'))
<add> else:
<ide> print('Build model...')
<ide> model = Sequential()
<ide> model.add(WordContextProduct(max_features, proj_dim=dim_proj, init="normal"))
<del> model.compile(loss='mse', optimizer='rmsprop')
<add> model.compile(loss='hinge', optimizer='adam')
<ide>
<ide> sampling_table = sequence.make_sampling_table(max_features)
<ide>
<ide> def text_generator(path=data_path):
<ide> print("Saving model...")
<ide> if not os.path.exists(save_dir):
<ide> os.makedirs(save_dir)
<del> six.moves.cPickle.dump(model, open(os.path.join(save_dir, model_save_fname), "w"))
<add> six.moves.cPickle.dump(model, open(os.path.join(save_dir, model_save_fname), "wb"))
<ide>
<ide>
<ide> print("It's test time!") | 1 |
Ruby | Ruby | add failing test cases | 44b819616ad17e1f4bddd332d9bdce87b85e8627 | <ide><path>railties/test/generators/shared_generator_tests.rb
<ide> def test_skip_keeps
<ide> assert_file ".gitignore" do |content|
<ide> assert_no_match(/\.keep/, content)
<ide> end
<del>
<add> assert_directory("app/assets/images")
<add> assert_directory("app/models/concerns")
<ide> assert_no_file("app/models/concerns/.keep")
<ide> end
<ide> | 1 |
Python | Python | fix serialization of empty doc + unit test | 59000ee21dcacb091fd3493bdfe4ea57e664e110 | <ide><path>spacy/tests/regression/test_issue5141.py
<add>from spacy.tokens import DocBin
<add>
<add>
<add>def test_issue5141(en_vocab):
<add> """ Ensure an empty DocBin does not crash on serialization """
<add> doc_bin = DocBin(attrs=["DEP", "HEAD"])
<add> assert list(doc_bin.get_docs(en_vocab)) == []
<add> doc_bin_bytes = doc_bin.to_bytes()
<add>
<add> doc_bin_2 = DocBin().from_bytes(doc_bin_bytes)
<add> assert list(doc_bin_2.get_docs(en_vocab)) == []
<ide><path>spacy/tokens/_serialize.py
<ide> def to_bytes(self):
<ide> for tokens in self.tokens:
<ide> assert len(tokens.shape) == 2, tokens.shape # this should never happen
<ide> lengths = [len(tokens) for tokens in self.tokens]
<add> tokens = numpy.vstack(self.tokens) if self.tokens else numpy.asarray([])
<add> spaces = numpy.vstack(self.spaces) if self.spaces else numpy.asarray([])
<add>
<ide> msg = {
<ide> "attrs": self.attrs,
<del> "tokens": numpy.vstack(self.tokens).tobytes("C"),
<del> "spaces": numpy.vstack(self.spaces).tobytes("C"),
<add> "tokens": tokens.tobytes("C"),
<add> "spaces": spaces.tobytes("C"),
<ide> "lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
<ide> "strings": list(self.strings),
<ide> "cats": self.cats, | 2 |
Python | Python | update esm checkpoints to point to `facebook/` | 5fda1fbd4625e93d023fe02153ec4a05b26b16cc | <ide><path>src/transformers/models/esm/modeling_esm.py
<ide>
<ide> logger = logging.get_logger(__name__)
<ide>
<del>_CHECKPOINT_FOR_DOC = "Rocketknight1/esm2_t6_8M_UR50D"
<add>_CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
<ide> _CONFIG_FOR_DOC = "EsmConfig"
<ide> _TOKENIZER_FOR_DOC = "EsmTokenizer"
<ide>
<ide> ESM_PRETRAINED_MODEL_ARCHIVE_LIST = [
<del> "Rocketknight1/esm2_t6_8M_UR50D",
<del> "Rocketknight1/esm2_t12_35M_UR50D",
<add> "facebook/esm2_t6_8M_UR50D",
<add> "facebook/esm2_t12_35M_UR50D",
<ide> # This is not a complete list of all ESM models!
<ide> # See all ESM models at https://huggingface.co/models?filter=esm
<ide> ]
<ide><path>src/transformers/models/esm/modeling_tf_esm.py
<ide>
<ide> logger = logging.get_logger(__name__)
<ide>
<del>_CHECKPOINT_FOR_DOC = "Rocketknight1/esm2_t6_8M_UR50D"
<add>_CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
<ide> _CONFIG_FOR_DOC = "EsmConfig"
<ide> _TOKENIZER_FOR_DOC = "EsmTokenizer"
<ide>
<ide> TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST = [
<del> "Rocketknight1/esm2_t6_8M_UR50D",
<del> "Rocketknight1/esm2_t12_35M_UR50D",
<add> "facebook/esm2_t6_8M_UR50D",
<add> "facebook/esm2_t12_35M_UR50D",
<ide> # This is not a complete list of all ESM models!
<ide> # See all ESM models at https://huggingface.co/models?filter=esm
<ide> ]
<ide><path>src/transformers/models/esm/tokenization_esm.py
<ide>
<ide> PRETRAINED_VOCAB_FILES_MAP = {
<ide> "vocab_file": {
<del> "Rocketknight1/esm2_t6_8M_UR50D": (
<del> "https://huggingface.co/Rocketknight1/esm2_t6_8M_UR50D/resolve/main/vocab.txt"
<del> ),
<del> "Rocketknight1/esm2_t12_35M_UR50D": (
<del> "https://huggingface.co/Rocketknight1/esm2_t12_35M_UR50D/resolve/main/vocab.txt"
<del> ),
<add> "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
<add> "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
<ide> },
<ide> }
<ide>
<ide> PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
<del> "Rocketknight1/esm2_t6_8M_UR50D": 1024,
<del> "Rocketknight1/esm2_t12_35M_UR50D": 1024,
<add> "facebook/esm2_t6_8M_UR50D": 1024,
<add> "facebook/esm2_t12_35M_UR50D": 1024,
<ide> }
<ide>
<ide> | 3 |
Text | Text | fix url in displayname description | b4f4f10478c4b552b6cf98d9c6f2709316b9e4fa | <ide><path>docs/docs/ref-03-component-specs.md
<ide> The `mixins` array allows you to use mixins to share behavior among multiple com
<ide> string displayName
<ide> ```
<ide>
<del>The `displayName` string is used in debugging messages. JSX sets this value automatically, see [JSX in Depth](react/docs/jsx-in-depth.html#react-composite-components).
<add>The `displayName` string is used in debugging messages. JSX sets this value automatically, see [JSX in Depth](/react/docs/jsx-in-depth.html#react-composite-components).
<ide>
<ide>
<ide> ## Lifecycle Methods | 1 |
Javascript | Javascript | enable another test case in test-net-pingpong | d964b4c5aafd431cfb63529a462871c446522f7a | <ide><path>test/simple/test-net-pingpong.js
<ide> function pingPongTest(port, host) {
<ide> /* All are run at once, so run on different ports */
<ide> if (!process.useUV) {
<ide> // these tests will not run yet with net_uv TODO: remove when net_uv supports dns
<del> pingPongTest(20989, 'localhost');
<ide> pingPongTest(20997, '::1');
<ide> pingPongTest('/tmp/pingpong.sock');
<ide> }
<ide> pingPongTest(20988);
<add>pingPongTest(20989, 'localhost');
<ide>
<ide> process.addListener('exit', function () {
<ide> if (!process.useUV) {
<ide> assert.equal(4, tests_run);
<ide> } else {
<del> assert.equal(1, tests_run);
<add> assert.equal(2, tests_run);
<ide> }
<ide> console.log('done');
<ide> }); | 1 |
Javascript | Javascript | use `trim` helper | 96fa70511d0177270369b00471d4020617a45537 | <ide><path>test/ng/compileSpec.js
<ide> describe('$compile', function() {
<ide> it('should allow nested transclude directives with sync template containing sync template', inject(function($compile, $rootScope) {
<ide> element = $compile('<div sync-sync>transcluded content</div>')($rootScope);
<ide> $rootScope.$digest();
<del> expect(element.text().trim()).toEqual('transcluded content');
<add> expect(trim(element.text())).toEqual('transcluded content');
<ide> }));
<ide>
<ide> it('should allow nested transclude directives with sync template containing async template', inject(function($compile, $rootScope) {
<ide> element = $compile('<div sync-async>transcluded content</div>')($rootScope);
<ide> $rootScope.$digest();
<del> expect(element.text().trim()).toEqual('transcluded content');
<add> expect(trim(element.text())).toEqual('transcluded content');
<ide> }));
<ide>
<ide> it('should allow nested transclude directives with async template containing sync template', inject(function($compile, $rootScope) {
<ide> element = $compile('<div async-sync>transcluded content</div>')($rootScope);
<ide> $rootScope.$digest();
<del> expect(element.text().trim()).toEqual('transcluded content');
<add> expect(trim(element.text())).toEqual('transcluded content');
<ide> }));
<ide>
<ide> it('should allow nested transclude directives with async template containing asynch template', inject(function($compile, $rootScope) {
<ide> element = $compile('<div async-async>transcluded content</div>')($rootScope);
<ide> $rootScope.$digest();
<del> expect(element.text().trim()).toEqual('transcluded content');
<add> expect(trim(element.text())).toEqual('transcluded content');
<ide> }));
<ide> });
<ide> | 1 |
Text | Text | relax requirements for setaad in ccm mode | 9e340357df763fcaa06376e14857dec468787e99 | <ide><path>doc/api/crypto.md
<ide> mode must adhere to certain restrictions when using the cipher API:
<ide> bytes (`7 ≤ N ≤ 13`).
<ide> - The length of the plaintext is limited to `2 ** (8 * (15 - N))` bytes.
<ide> - When decrypting, the authentication tag must be set via `setAuthTag()` before
<del> specifying additional authenticated data or calling `update()`.
<add> calling `update()`.
<ide> Otherwise, decryption will fail and `final()` will throw an error in
<ide> compliance with section 2.6 of [RFC 3610][].
<ide> - Using stream methods such as `write(data)`, `end(data)` or `pipe()` in CCM | 1 |
Ruby | Ruby | drop version to 1.6+ | 86a838c03f5bea58cb4e19ef77d805a0af93829b | <ide><path>Library/Homebrew/test/language/java_spec.rb
<ide> describe Language::Java do
<ide> describe "::java_home" do
<ide> it "returns valid JAVA_HOME if version is specified", :needs_java do
<del> java_home = described_class.java_home("1.8+")
<add> java_home = described_class.java_home("1.6+")
<ide> expect(java_home/"bin/java").to be_an_executable
<ide> end
<ide> | 1 |
PHP | PHP | add application.buildcontainer event | 6edcd5450a63eebd65c88cf0bb0fa6747bdf4e47 | <ide><path>src/Http/BaseApplication.php
<ide> protected function buildContainer(): ContainerInterface
<ide> $plugin->services($container);
<ide> }
<ide>
<add> $event = $this->dispatchEvent('Application.buildContainer', ['container' => $container]);
<add> if ($event->getResult() instanceof ContainerInterface) {
<add> return $event->getResult();
<add> }
<add>
<ide> return $container;
<ide> }
<ide>
<ide><path>tests/TestCase/Http/BaseApplicationTest.php
<ide>
<ide> use Cake\Core\BasePlugin;
<ide> use Cake\Core\Configure;
<add>use Cake\Core\Container;
<ide> use Cake\Core\ContainerInterface;
<ide> use Cake\Http\BaseApplication;
<ide> use Cake\Http\MiddlewareQueue;
<ide> public function testGetContainer()
<ide> $this->assertInstanceOf(ContainerInterface::class, $container);
<ide> $this->assertSame($container, $app->getContainer(), 'Should return a reference');
<ide> }
<add>
<add> public function testBuildContainerEvent()
<add> {
<add> $app = $this->getMockForAbstractClass(BaseApplication::class, [$this->path]);
<add> $called = false;
<add> $app->getEventManager()->on('Application.buildContainer', function ($event, $container) use (&$called) {
<add> $this->assertInstanceOf(BaseApplication::class, $event->getSubject());
<add> $this->assertInstanceOf(ContainerInterface::class, $container);
<add> $called = true;
<add> });
<add>
<add> $container = $app->getContainer();
<add> $this->assertInstanceOf(ContainerInterface::class, $container);
<add> $this->assertTrue($called, 'Listener should be called');
<add> }
<add>
<add> public function testBuildContainerEventReplaceContainer()
<add> {
<add> $app = $this->getMockForAbstractClass(BaseApplication::class, [$this->path]);
<add> $app->getEventManager()->on('Application.buildContainer', function () {
<add> $new = new Container();
<add> $new->add('testing', 'yes');
<add>
<add> return $new;
<add> });
<add>
<add> $container = $app->getContainer();
<add> $this->assertInstanceOf(ContainerInterface::class, $container);
<add> $this->assertTrue($container->has('testing'));
<add> }
<ide> } | 2 |
Ruby | Ruby | pass array to puts instead of iterating over it | d2aeadb1cd087081c7631622e01d8522e63e27eb | <ide><path>Library/Homebrew/cmd/doctor.rb
<ide> def doctor
<ide> checks = Checks.new
<ide>
<ide> if ARGV.include? '--list-checks'
<del> checks.methods.grep(/^check_/).sort.each { |m| puts m }
<add> puts checks.methods.grep(/^check_/).sort
<ide> exit
<ide> end
<ide> | 1 |
Text | Text | add x-csrftoken http header in swagger-ui example | a9e55334e7d42c03929b33708cee6f0bd908e7c2 | <ide><path>docs/topics/documenting-your-api.md
<ide> this:
<ide> SwaggerUIBundle.presets.apis,
<ide> SwaggerUIBundle.SwaggerUIStandalonePreset
<ide> ],
<del> layout: "BaseLayout"
<add> layout: "BaseLayout",
<add> requestInterceptor: (request) => {
<add> request.headers['X-CSRFToken'] = "{{ csrf_token }}"
<add> return request;
<add> }
<ide> })
<ide> </script>
<ide> </body> | 1 |
PHP | PHP | fix dirty associations that use formatresults | 431666a83dbc08cd1625674b37c77f23b98cdda2 | <ide><path>src/ORM/Association.php
<ide> protected function _formatAssociationResults(Query $query, Query $surrogate, arr
<ide> }
<ide>
<ide> /** @var \Cake\Collection\CollectionInterface $results */
<del> return $results->insert($property, $extracted);
<add> return $results
<add> ->insert($property, $extracted)
<add> ->map(function ($result) {
<add> $result->clean();
<add>
<add> return $result;
<add> });
<ide> }, Query::PREPEND);
<ide> }
<ide>
<ide><path>tests/TestCase/ORM/Association/BelongsToTest.php
<ide> public function testAttachToNoForeignKeySelect()
<ide> $this->expectExceptionMessage('Unable to load `Authors` association. Ensure foreign key in `Articles`');
<ide> $query->first();
<ide> }
<add>
<add> /**
<add> * Test that formatResults in a joined association finder doesn't dirty
<add> * the root entity.
<add> *
<add> * @return void
<add> */
<add> public function testAttachToFormatResultsNoDirtyResults()
<add> {
<add> $this->setAppNamespace('TestApp');
<add> $articles = $this->getTableLocator()->get('Articles');
<add> $articles->belongsTo('Authors')
<add> ->setFinder('formatted');
<add>
<add> $query = $articles->find()
<add> ->where(['Articles.id' => 1])
<add> ->contain('Authors');
<add> $result = $query->firstOrFail();
<add>
<add> $this->assertNotEmpty($result->author);
<add> $this->assertNotEmpty($result->author->formatted);
<add> $this->assertFalse($result->isDirty(), 'Record should be clean as it was pulled from the db.');
<add>>>>>>>> 165bdab680... Fix dirty associations that use formatResults
<add> }
<ide> }
<ide><path>tests/TestCase/ORM/Behavior/TranslateBehaviorTest.php
<ide> public function testFindSingleLocaleBelongsToMany()
<ide> $this->assertSame('Translated Info', $result->tags[0]->special_tags[0]->extra_info);
<ide> }
<ide>
<add> /**
<add> * Tests that parent entity isn't dirty when containing a translated association
<add> *
<add> * @return void
<add> */
<add> public function testGetAssociationNotDirtyBelongsTo()
<add> {
<add> $table = $this->getTableLocator()->get('Articles');
<add> $authors = $table->belongsTo('Authors')->getTarget();
<add> $authors->addBehavior('Translate', ['fields' => ['name']]);
<add>
<add> $authors->setLocale('eng');
<add>
<add> $entity = $table->get(1);
<add> $this->assertNotEmpty($entity);
<add> $entity = $table->loadInto($entity, ['Authors']);
<add> $this->assertFalse($entity->isDirty());
<add> $this->assertNotEmpty($entity->author);
<add> $this->assertFalse($entity->author->isDirty());
<add>
<add> $entity = $table->get(1, ['contain' => ['Authors']]);
<add> $this->assertNotEmpty($entity);
<add> $this->assertFalse($entity->isDirty());
<add> $this->assertNotEmpty($entity->author);
<add> $this->assertFalse($entity->author->isDirty());
<add> }
<add>
<add> /**
<add> * Tests that parent entity isn't dirty when containing a translated association
<add> *
<add> * @return void
<add> */
<add> public function testGetAssociationNotDirtyHasOne()
<add> {
<add> $table = $this->getTableLocator()->get('Authors');
<add> $table->hasOne('Articles');
<add> $table->Articles->addBehavior('Translate', ['fields' => ['title']]);
<add>
<add> $entity = $table->get(1);
<add> $this->assertNotEmpty($entity);
<add> $entity = $table->loadInto($entity, ['Articles']);
<add> $this->assertFalse($entity->isDirty());
<add> $this->assertNotEmpty($entity->article);
<add> $this->assertFalse($entity->article->isDirty());
<add>
<add> $entity = $table->get(1, ['contain' => 'Articles']);
<add> $this->assertNotEmpty($entity);
<add> $this->assertFalse($entity->isDirty());
<add> $this->assertNotEmpty($entity->article);
<add> $this->assertFalse($entity->article->isDirty());
<add> }
<add>
<ide> /**
<ide> * Tests that updating an existing record translations work
<ide> *
<ide><path>tests/test_app/TestApp/Model/Table/ArticlesTable.php
<ide> public function initialize(array $config): void
<ide> * Find published
<ide> *
<ide> * @param \Cake\ORM\Query $query The query
<add> * @param array $options The options
<ide> * @return \Cake\ORM\Query
<ide> */
<ide> public function findPublished($query, array $options = [])
<ide><path>tests/test_app/TestApp/Model/Table/AuthorsTable.php
<ide> public function findByAuthor(Query $query, array $options = [])
<ide>
<ide> return $query;
<ide> }
<add>
<add> /**
<add> * Finder that applies a formatter to test dirty associations
<add> *
<add> * @param \Cake\ORM\Query $query The query
<add> * @param array $options The options
<add> * @return \Cake\ORM\Query
<add> */
<add> public function findFormatted(Query $query, array $options = [])
<add> {
<add> return $query->formatResults(function ($results) {
<add> return $results->map(function ($author) {
<add> $author->formatted = $author->name . '!!';
<add>
<add> return $author;
<add> });
<add> });
<add> }
<ide> } | 5 |
Javascript | Javascript | fix a crash when rounding very small numbers | 7c2062f2895376e823a4c0ac04671297d734fe99 | <ide><path>d3.js
<ide> var d3_format_types = {
<ide> f: function(x, p) { return x.toFixed(p); },
<ide> r: function(x, p) {
<ide> var n = 1 + Math.floor(1e-15 + Math.log(x) / Math.LN10);
<del> return d3.round(x, p - n).toFixed(Math.max(0, p - n));
<add> return d3.round(x, p - n).toFixed(Math.max(0, Math.min(20, p - n)));
<ide> }
<ide> };
<ide>
<ide><path>d3.min.js
<del>(function(){function cs(){return"circle"}function cr(){return 64}function cq(a,b){var c=(a.ownerSVGElement||a).createSVGPoint();if(cp<0&&(window.scrollX||window.scrollY)){var d=d3.select(document.body).append("svg:svg").style("position","absolute").style("top",0).style("left",0),e=d[0][0].getScreenCTM();cp=!e.f&&!e.e,d.remove()}cp?(c.x=b.pageX,c.y=b.pageY):(c.x=b.clientX,c.y=b.clientY),c=c.matrixTransform(a.getScreenCTM().inverse());return[c.x,c.y]}function co(a){return function(){var b=a.apply(this,arguments),c=b[0],d=b[1]+bA;return[c*Math.cos(d),c*Math.sin(d)]}}function cn(a){return[a.x,a.y]}function cm(a){return a.endAngle}function cl(a){return a.startAngle}function ck(a){return a.radius}function cj(a){return a.target}function ci(a){return a.source}function ch(a){return function(b,c){return a[c][1]}}function cg(a){return function(b,c){return a[c][0]}}function cf(a){function i(f){if(f.length<1)return null;var i=bH(this,f,b,d),j=bH(this,f,b===c?cg(i):c,d===e?ch(i):e);return"M"+g(a(j),h)+"L"+g(a(i.reverse()),h)+"Z"}var b=bI,c=bI,d=0,e=bJ,f="linear",g=bK[f],h=.7;i.x=function(a){if(!arguments.length)return c;b=c=a;return i},i.x0=function(a){if(!arguments.length)return b;b=a;return i},i.x1=function(a){if(!arguments.length)return c;c=a;return i},i.y=function(a){if(!arguments.length)return e;d=e=a;return i},i.y0=function(a){if(!arguments.length)return d;d=a;return i},i.y1=function(a){if(!arguments.length)return e;e=a;return i},i.interpolate=function(a){if(!arguments.length)return f;g=bK[f=a];return i},i.tension=function(a){if(!arguments.length)return h;h=a;return i};return i}function ce(a){var b,c=-1,d=a.length,e,f;while(++c<d)b=a[c],e=b[0],f=b[1]+bA,b[0]=e*Math.cos(f),b[1]=e*Math.sin(f);return a}function cd(a){return a.length<3?bL(a):a[0]+bR(a,cc(a))}function cc(a){var b=[],c,d,e,f,g=cb(a),h=-1,i=a.length-1;while(++h<i)c=ca(a[h],a[h+1]),Math.abs(c)<1e-6?g[h]=g[h+1]=0:(d=g[h]/c,e=g[h+1]/c,f=d*d+e*e,f>9&&(f=c*3/Math.sqrt(f),g[h]=f*d,g[h+1]=f*e));h=-1;while(++h<=i)f=(a[Math.min(i,h+1)][0]-a[Math.max(0,h-1)][0])/(6*(1+g[h]*g[h])),b.push([f||0,g[h]*f||0]);return b}function cb(a){var b=0,c=a.length-1,d=[],e=a[0],f=a[1],g=d[0]=ca(e,f);while(++b<c)d[b]=g+(g=ca(e=f,f=a[b+1]));d[b]=g;return d}function ca(a,b){return(b[1]-a[1])/(b[0]-a[0])}function b_(a,b,c){a.push("C",bX(bY,b),",",bX(bY,c),",",bX(bZ,b),",",bX(bZ,c),",",bX(b$,b),",",bX(b$,c))}function bX(a,b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]}function bW(a,b){var c=a.length-1,d=a[0][0],e=a[0][1],f=a[c][0]-d,g=a[c][1]-e,h=-1,i,j;while(++h<=c)i=a[h],j=h/c,i[0]=b*i[0]+(1-b)*(d+j*f),i[1]=b*i[1]+(1-b)*(e+j*g);return bT(a)}function bV(a){var b,c=-1,d=a.length,e=d+4,f,g=[],h=[];while(++c<4)f=a[c%d],g.push(f[0]),h.push(f[1]);b=[bX(b$,g),",",bX(b$,h)],--c;while(++c<e)f=a[c%d],g.shift(),g.push(f[0]),h.shift(),h.push(f[1]),b_(b,g,h);return b.join("")}function bU(a){if(a.length<4)return bL(a);var b=[],c=-1,d=a.length,e,f=[0],g=[0];while(++c<3)e=a[c],f.push(e[0]),g.push(e[1]);b.push(bX(b$,f)+","+bX(b$,g)),--c;while(++c<d)e=a[c],f.shift(),f.push(e[0]),g.shift(),g.push(e[1]),b_(b,f,g);return b.join("")}function bT(a){if(a.length<3)return bL(a);var b=[],c=1,d=a.length,e=a[0],f=e[0],g=e[1],h=[f,f,f,(e=a[1])[0]],i=[g,g,g,e[1]];b.push(f,",",g),b_(b,h,i);while(++c<d)e=a[c],h.shift(),h.push(e[0]),i.shift(),i.push(e[1]),b_(b,h,i);c=-1;while(++c<2)h.shift(),h.push(e[0]),i.shift(),i.push(e[1]),b_(b,h,i);return b.join("")}function bS(a,b){var c=[],d=(1-b)/2,e,f=a[0],g=a[1],h=1,i=a.length;while(++h<i)e=f,f=g,g=a[h],c.push([d*(g[0]-e[0]),d*(g[1]-e[1])]);return c}function bR(a,b){if(b.length<1||a.length!=b.length&&a.length!=b.length+2)return bL(a);var c=a.length!=b.length,d="",e=a[0],f=a[1],g=b[0],h=g,i=1;c&&(d+="Q"+(f[0]-g[0]*2/3)+","+(f[1]-g[1]*2/3)+","+f[0]+","+f[1],e=a[1],i=2);if(b.length>1){h=b[1],f=a[i],i++,d+="C"+(e[0]+g[0])+","+(e[1]+g[1])+","+(f[0]-h[0])+","+(f[1]-h[1])+","+f[0]+","+f[1];for(var j=2;j<b.length;j++,i++)f=a[i],h=b[j],d+="S"+(f[0]-h[0])+","+(f[1]-h[1])+","+f[0]+","+f[1]}if(c){var k=a[i];d+="Q"+(f[0]+h[0]*2/3)+","+(f[1]+h[1]*2/3)+","+k[0]+","+k[1]}return d}function bQ(a,b,c){return a.length<3?bL(a):a[0]+bR(a,bS(a,b))}function bP(a,b){return a.length<3?bL(a):a[0]+bR((a.push(a[0]),a),bS([a[a.length-2]].concat(a,[a[1]]),b))}function bO(a,b){return a.length<4?bL(a):a[1]+bR(a.slice(1,a.length-1),bS(a,b))}function bN(a){var b=[],c=0,d=a.length,e=a[0];b.push(e[0],",",e[1]);while(++c<d)b.push("H",(e=a[c])[0],"V",e[1]);return b.join("")}function bM(a){var b=[],c=0,d=a.length,e=a[0];b.push(e[0],",",e[1]);while(++c<d)b.push("V",(e=a[c])[1],"H",e[0]);return b.join("")}function bL(a){var b=[],c=0,d=a.length,e=a[0];b.push(e[0],",",e[1]);while(++c<d)b.push("L",(e=a[c])[0],",",e[1]);return b.join("")}function bJ(a){return a[1]}function bI(a){return a[0]}function bH(a,b,c,d){var e=[],f=-1,g=b.length,h=typeof c=="function",i=typeof d=="function",j;if(h&&i)while(++f<g)e.push([c.call(a,j=b[f],f),d.call(a,j,f)]);else if(h)while(++f<g)e.push([c.call(a,b[f],f),d]);else if(i)while(++f<g)e.push([c,d.call(a,b[f],f)]);else while(++f<g)e.push([c,d]);return e}function bG(a){function g(d){return d.length<1?null:"M"+e(a(bH(this,d,b,c)),f)}var b=bI,c=bJ,d="linear",e=bK[d],f=.7;g.x=function(a){if(!arguments.length)return b;b=a;return g},g.y=function(a){if(!arguments.length)return c;c=a;return g},g.interpolate=function(a){if(!arguments.length)return d;e=bK[d=a];return g},g.tension=function(a){if(!arguments.length)return f;f=a;return g};return g}function bF(a){return a.endAngle}function bE(a){return a.startAngle}function bD(a){return a.outerRadius}function bC(a){return a.innerRadius}function bv(a){return function(b){return b<0?-Math.pow(-b,a):Math.pow(b,a)}}function bu(a){return a.toPrecision(1)}function bt(a){return-Math.log(-a)/Math.LN10}function bs(a){return Math.log(a)/Math.LN10}function br(a,b,c,d){var e=[],f=[],g=0,h=a.length;while(++g<h)e.push(c(a[g-1],a[g])),f.push(d(b[g-1],b[g]));return function(b){var c=d3.bisect(a,b,1,a.length-1)-1;return f[c](e[c](b))}}function bq(a,b,c,d){var e=c(a[0],a[1]),f=d(b[0],b[1]);return function(a){return f(e(a))}}function bp(a,b){return d3.format(",."+Math.max(0,-Math.floor(Math.log(bn(a,b)[2])/Math.LN10+.01))+"f")}function bo(a,b){return d3.range.apply(d3,bn(a,b))}function bn(a,b){var c=bi(a),d=c[1]-c[0],e=Math.pow(10,Math.floor(Math.log(d/b)/Math.LN10)),f=b/d*e;f<=.15?e*=10:f<=.35?e*=5:f<=.75&&(e*=2),c[0]=Math.ceil(c[0]/e)*e,c[1]=Math.floor(c[1]/e)*e+e*.5,c[2]=e;return c}function bm(a){a=Math.pow(10,Math.round(Math.log(a)/Math.LN10)-1);return{floor:function(b){return Math.floor(b/a)*a},ceil:function(b){return Math.ceil(b/a)*a}}}function bl(a,b){a.range=d3.rebind(a,b.range),a.rangeRound=d3.rebind(a,b.rangeRound),a.interpolate=d3.rebind(a,b.interpolate),a.clamp=d3.rebind(a,b.clamp);return a}function bk(){return Math}function bj(a,b){var c=0,d=a.length-1,e=a[c],f=a[d],g;f<e&&(g=c,c=d,d=g,g=e,e=f,f=g),b=b(f-e),a[c]=b.floor(e),a[d]=b.ceil(f);return a}function bi(a){var b=a[0],c=a[a.length-1];return b<c?[b,c]:[c,b]}function bh(){}function bf(){var a=null,b=bb,c=Infinity;while(b)b.flush?b=a?a.next=b.next:bb=b.next:(c=Math.min(c,b.then+b.delay),b=(a=b).next);return c}function be(){var a,b=Date.now(),c=bb;while(c)a=b-c.then,a>=c.delay&&(c.flush=c.callback(a)),c=c.next;var d=bf()-b;d>24?(isFinite(d)&&(clearTimeout(bd),bd=setTimeout(be,d)),bc=0):(bc=1,bg(be))}function ba(a){return typeof a=="function"?function(b,c,d){var e=a.call(this,b,c)+"";return d!=e&&d3.interpolate(d,e)}:(a=a+"",function(b,c,d){return d!=a&&d3.interpolate(d,a)})}function _(a){function n(b){var h=!0,l=-1;a.each(function(){if(i[++l]!==2){var a=(b-j[l])/k[l],n=this.__transition__,o,p,q=e[l];if(a<1){h=!1;if(a<0)return}else a=1;if(i[l]){if(!n||n.active!==c){i[l]=2;return}}else{if(!n||n.active>c){i[l]=2;return}i[l]=1,g.start.dispatch.apply(this,arguments),q=e[l]={},n.active=c;for(p in d)if(o=d[p].apply(this,arguments))q[p]=o}o=m(a);for(p in q)q[p].call(this,o);if(a===1){i[l]=2;if(n.active===c){var r=n.owner;r===c&&(delete this.__transition__,f&&this.parentNode&&this.parentNode.removeChild(this)),$=c,g.end.dispatch.apply(this,arguments),$=0,n.owner=r}}}});return h}var b={},c=$||++Z,d={},e=[],f=!1,g=d3.dispatch("start","end"),i=[],j=[],k=[],l,m=d3.ease("cubic-in-out");a.each(function(){(this.__transition__||(this.__transition__={})).owner=c}),b.delay=function(c){var d=Infinity,e=-1;typeof c=="function"?a.each(function(a,b){var f=j[++e]=+c.apply(this,arguments);f<d&&(d=f)}):(d=+c,a.each(function(a,b){j[++e]=d})),d3.timer(n,d);return b},b.duration=function(c){var d=-1;typeof c=="function"?(l=0,a.each(function(a,b){var e=k[++d]=+c.apply(this,arguments);e>l&&(l=e)})):(l=+c,a.each(function(a,b){k[++d]=l}));return b},b.ease=function(a){m=typeof a=="function"?a:d3.ease.apply(d3,arguments);return b},b.attrTween=function(a,c){function f(b,d){var e=c.call(this,b,d,this.getAttributeNS(a.space,a.local));return e&&function(b){this.setAttributeNS(a.space,a.local,e(b))}}function e(b,d){var e=c.call(this,b,d,this.getAttribute(a));return e&&function(b){this.setAttribute(a,e(b))}}d["attr."+a]=a.local?f:e;return b},b.attr=function(a,c){return b.attrTween(a,ba(c))},b.styleTween=function(a,c,e){function f(b,d){var f=c.call(this,b,d,window.getComputedStyle(this,null).getPropertyValue(a));return f&&function(b){this.style.setProperty(a,f(b),e)}}arguments.length<3&&(e=null),d["style."+a]=f;return b},b.style=function(a,c,d){arguments.length<3&&(d=null);return b.styleTween(a,ba(c),d)},b.text=function(a){d.text=function(b,c){this.textContent=typeof a=="function"?a.call(this,b,c):a};return b},b.select=function(b){var c,d=_(a.select(b)).ease(m);c=-1,d.delay(function(a,b){return j[++c]}),c=-1,d.duration(function(a,b){return k[++c]});return d},b.selectAll=function(b){var c,d=_(a.selectAll(b)).ease(m);c=-1,d.delay(function(a,b){return j[b?c:++c]}),c=-1,d.duration(function(a,b){return k[b?c:++c]});return d},b.remove=function(){f=!0;return b},b.each=function(a,c){g[a].add(c);return b},b.call=h;return b.delay(0).duration(250)}function Y(a){return{__data__:a}}function X(a){arguments.length||(a=d3.ascending);return function(b,c){return a(b&&b.__data__,c&&c.__data__)}}function W(a){function b(b){var c=[],d,e,f,g;for(var h=0,i=a.length;h<i;h++){f=a[h],c.push(d=[]),d.parentNode=f.parentNode;for(var j=0,k=f.length;j<k;j++)(g=f[j])?(d.push(e=b(f.parentNode)),e.__data__=g.__data__):d.push(null)}return V(c)}a.append=function(a){function d(b){return b.appendChild(document.createElementNS(a.space,a.local))}function c(b){return b.appendChild(document.createElement(a))}a=d3.ns.qualify(a);return b(a.local?d:c)},a.insert=function(a,c){function e(b){return b.insertBefore(document.createElementNS(a.space,a.local),S(c,b))}function d(b){return b.insertBefore(document.createElement(a),S(c,b))}a=d3.ns.qualify(a);return b(a.local?e:d)};return a}function V(a){function d(b){for(var c=0,d=a.length;c<d;c++){var e=a[c];for(var f=0,g=e.length;f<g;f++){var h=e[f];if(h)return b.call(h,h.__data__,f)}}return null}function c(b){var c=[],d,e,f;for(var g=0,h=a.length;g<h;g++){e=a[g];for(var i=0,j=e.length;i<j;i++)if(f=e[i])c.push(d=b(f)),d.parentNode=f}return V(c)}function b(b){var c=[],d,e,f,g;for(var h=0,i=a.length;h<i;h++){f=a[h],c.push(d=[]),d.parentNode=f.parentNode;for(var j=0,k=f.length;j<k;j++)(g=f[j])?(d.push(e=b(g)),e&&"__data__"in g&&(e.__data__=g.__data__)):d.push(null)}return V(c)}a.select=function(a){return b(function(b){return S(a,b)})},a.selectAll=function(a){return c(function(b){return T(a,b)})},a.filter=function(b){var c=[],d,e,f;for(var g=0,h=a.length;g<h;g++){e=a[g],c.push(d=[]),d.parentNode=e.parentNode;for(var i=0,j=e.length;i<j;i++)(f=e[i])&&b.call(f,f.__data__,i)&&d.push(f)}return V(c)},a.map=function(b){var c,d;for(var e=0,f=a.length;e<f;e++){c=a[e];for(var g=0,h=c.length;g<h;g++)if(d=c[g])d.__data__=b.call(d,d.__data__,g)}return a},a.data=function(b,c){function g(a,b){var g=0,h=a.length,i=b.length,j=Math.min(h,i),k=Math.max(h,i),l=[],m=[],n=[],o,p;if(c){var q={},r=[],s,t=b.length;for(g=0;g<h;g++)s=c.call(o=a[g],o.__data__,g),s in q?n[t++]=o:q[s]=o,r.push(s);for(g=0;g<i;g++)o=q[s=c.call(b,p=b[g],g)],o?(o.__data__=p,l[g]=o,m[g]=n[g]=null):(m[g]=Y(p),l[g]=n[g]=null),delete q[s];for(g=0;g<h;g++)r[g]in q&&(n[g]=a[g])}else{for(;g<j;g++)o=a[g],p=b[g],o?(o.__data__=p,l[g]=o,m[g]=n[g]=null):(m[g]=Y(p),l[g]=n[g]=null);for(;g<i;g++)m[g]=Y(b[g]),l[g]=n[g]=null;for(;g<k;g++)n[g]=a[g],m[g]=l[g]=null}m.parentNode=l.parentNode=n.parentNode=a.parentNode,d.push(m),e.push(l),f.push(n)}var d=[],e=[],f=[],h=-1,i=a.length,j;if(typeof b=="function")while(++h<i)g(j=a[h],b.call(j,j.parentNode.__data__,h));else while(++h<i)g(j=a[h],b);var k=V(e);k.enter=function(){return W(d)},k.exit=function(){return V(f)};return k},a.each=function(b){for(var c=0,d=a.length;c<d;c++){var e=a[c];for(var f=0,g=e.length;f<g;f++){var h=e[f];h&&b.call(h,h.__data__,f)}}return a},a.empty=function(){return!d(function(){return!0})},a.node=function(){return d(function(){return this})},a.attr=function(b,c){function j(){var a=c.apply(this,arguments);a==null?this.removeAttributeNS(b.space,b.local):this.setAttributeNS(b.space,b.local,a)}function i(){var a=c.apply(this,arguments);a==null?this.removeAttribute(b):this.setAttribute(b,a)}function h(){this.setAttributeNS(b.space,b.local,c)}function g(){this.setAttribute(b,c)}function f(){this.removeAttributeNS(b.space,b.local)}function e(){this.removeAttribute(b)}b=d3.ns.qualify(b);return arguments.length<2?d(b.local?function(){return this.getAttributeNS(b.space,b.local)}:function(){return this.getAttribute(b)}):a.each(c==null?b.local?f:e:typeof c=="function"?b.local?j:i:b.local?h:g)},a.classed=function(b,c){function i(){(c.apply(this,arguments)?f:h).call(this)}function h(){if(a=this.classList)return a.remove(b);var a=this.className,c=a.baseVal!=null,d=c?a.baseVal:a;d=g(d.replace(e," ")),c?a.baseVal=d:this.className=d}function f(){if(a=this.classList)return a.add(b);var a=this.className,c=a.baseVal!=null,d=c?a.baseVal:a;e.lastIndex=0,e.test(d)||(d=g(d+" "+b),c?a.baseVal=d:this.className=d)}var e=new RegExp("(^|\\s+)"+d3.requote(b)+"(\\s+|$)","g");return arguments.length<2?d(function(){if(a=this.classList)return a.contains(b);var a=this.className;e.lastIndex=0;return e.test(a.baseVal!=null?a.baseVal:a)}):a.each(typeof c=="function"?i:c?f:h)},a.style=function(b,c,e){function h(){var a=c.apply(this,arguments);a==null?this.style.removeProperty(b):this.style.setProperty(b,a,e)}function g(){this.style.setProperty(b,c,e)}function f(){this.style.removeProperty(b)}arguments.length<3&&(e="");return arguments.length<2?d(function(){return window.getComputedStyle(this,null).getPropertyValue(b)}):a.each(c==null?f:typeof c=="function"?h:g)},a.property=function(b,c){function g(){var a=c.apply(this,arguments);a==null?delete this[b]:this[b]=a}function f(){this[b]=c}function e(){delete this[b]}b=d3.ns.qualify(b);return arguments.length<2?d(function(){return this[b]}):a.each(c==null?e:typeof c=="function"?g:f)},a.text=function(b){function e(){this.textContent=b.apply(this,arguments)}function c(){this.textContent=b}return arguments.length<1?d(function(){return this.textContent}):a.each(typeof b=="function"?e:c)},a.html=function(b){function e(){this.innerHTML=b.apply(this,arguments)}function c(){this.innerHTML=b}return arguments.length<1?d(function(){return this.innerHTML}):a.each(typeof b=="function"?e:c)},a.append=function(a){function d(b){return b.appendChild(document.createElementNS(a.space,a.local))}function c(b){return b.appendChild(document.createElement(a))}a=d3.ns.qualify(a);return b(a.local?d:c)},a.insert=function(a,c){function e(b){return b.insertBefore(document.createElementNS(a.space,a.local),S(c,b))}function d(b){return b.insertBefore(document.createElement(a),S(c,b))}a=d3.ns.qualify(a);return b(a.local?e:d)},a.remove=function(){return a.each(function(){var a=this.parentNode;a&&a.removeChild(this)})},a.sort=function(b){b=X.apply(this,arguments);for(var c=0,d=a.length;c<d;c++){var e=a[c];e.sort(b);for(var f=1,g=e.length,h=e[0];f<g;f++){var i=e[f];i&&(h&&h.parentNode.insertBefore(i,h.nextSibling),h=i)}}return a},a.on=function(b,c,d){arguments.length<3&&(d=!1);var e=b.indexOf("."),f=e===-1?b:b.substring(0,e),g="__on"+b;return a.each(function(a,b){function h(a){var d=d3.event;d3.event=a;try{c.call(this,e.__data__,b)}finally{d3.event=d}}this[g]&&this.removeEventListener(f,this[g],d),c&&this.addEventListener(f,this[g]=h,d);var e=this})},a.transition=function(){return _(a)},a.call=h;return a}function R(a,b,c){function g(a){return Math.round(f(a)*255)}function f(a){a>360?a-=360:a<0&&(a+=360);return a<60?d+(e-d)*a/60:a<180?e:a<240?d+(e-d)*(240-a)/60:d}var d,e;a=a%360,a<0&&(a+=360),b=b<0?0:b>1?1:b,c=c<0?0:c>1?1:c,e=c<=.5?c*(1+b):c+b-c*b,d=2*c-e;return H(g(a+120),g(a),g(a-120))}function Q(a,b,c){this.h=a,this.s=b,this.l=c}function P(a,b,c){return new Q(a,b,c)}function M(a){var b=parseFloat(a);return a.charAt(a.length-1)==="%"?Math.round(b*2.55):b}function L(a,b,c){var d=Math.min(a/=255,b/=255,c/=255),e=Math.max(a,b,c),f=e-d,g,h,i=(e+d)/2;f?(h=i<.5?f/(e+d):f/(2-e-d),a==e?g=(b-c)/f+(b<c?6:0):b==e?g=(c-a)/f+2:g=(a-b)/f+4,g*=60):h=g=0;return P(g,h,i)}function K(a,b,c){var d=0,e=0,f=0,g,h,i;g=/([a-z]+)\((.*)\)/i.exec(a);if(g){h=g[2].split(",");switch(g[1]){case"hsl":return c(parseFloat(h[0]),parseFloat(h[1])/100,parseFloat(h[2])/100);case"rgb":return b(M(h[0]),M(h[1]),M(h[2]))}}if(i=N[a])return b(i.r,i.g,i.b);a!=null&&a.charAt(0)==="#"&&(a.length===4?(d=a.charAt(1),d+=d,e=a.charAt(2),e+=e,f=a.charAt(3),f+=f):a.length===7&&(d=a.substring(1,3),e=a.substring(3,5),f=a.substring(5,7)),d=parseInt(d,16),e=parseInt(e,16),f=parseInt(f,16));return b(d,e,f)}function J(a){return a<16?"0"+a.toString(16):a.toString(16)}function I(a,b,c){this.r=a,this.g=b,this.b=c}function H(a,b,c){return new I(a,b,c)}function G(a,b){b=1/(b-(a=+a));return function(c){return Math.max(0,Math.min(1,(c-a)*b))}}function F(a,b){b=1/(b-(a=+a));return function(c){return(c-a)*b}}function E(a){return a in D||/\bcolor\b/.test(a)?d3.interpolateRgb:d3.interpolate}function B(a){return a<1/2.75?7.5625*a*a:a<2/2.75?7.5625*(a-=1.5/2.75)*a+.75:a<2.5/2.75?7.5625*(a-=2.25/2.75)*a+.9375:7.5625*(a-=2.625/2.75)*a+.984375}function A(a){a||(a=1.70158);return function(b){return b*b*((a+1)*b-a)}}function z(a,b){var c;arguments.length<2&&(b=.45),arguments.length<1?(a=1,c=b/4):c=b/(2*Math.PI)*Math.asin(1/a);return function(d){return 1+a*Math.pow(2,10*-d)*Math.sin((d-c)*2*Math.PI/b)}}function y(a){return 1-Math.sqrt(1-a*a)}function x(a){return Math.pow(2,10*(a-1))}function w(a){return 1-Math.cos(a*Math.PI/2)}function v(a){return function(b){return Math.pow(b,a)}}function u(a){return a}function t(a){return function(b){return.5*(b<.5?a(2*b):2-a(2-2*b))}}function s(a){return function(b){return 1-a(1-b)}}function n(a){var b=a.lastIndexOf("."),c=b>=0?a.substring(b):(b=a.length,""),d=[];while(b>0)d.push(a.substring(b-=3,b+3));return d.reverse().join(",")+c}function m(a){return a+""}function j(a){var b={},c=[];b.add=function(a){for(var d=0;d<c.length;d++)if(c[d].listener==a)return b;c.push({listener:a,on:!0});return b},b.remove=function(a){for(var d=0;d<c.length;d++){var e=c[d];if(e.listener==a){e.on=!1,c=c.slice(0,d).concat(c.slice(d+1));break}}return b},b.dispatch=function(){var a=c;for(var b=0,d=a.length;b<d;b++){var e=a[b];e.on&&e.listener.apply(this,arguments)}};return b}function h(a){a.apply(this,(arguments[0]=this,arguments));return this}function g(a){return a.replace(/(^\s+)|(\s+$)/g,"").replace(/\s+/g," ")}function f(a){return a==null}function e(a){return a.length}function c(a){return Array.prototype.slice.call(a)}function b(a){var b=-1,c=a.length,d=[];while(++b<c)d.push(a[b]);return d}d3={version:"1.29.4"},Date.now||(Date.now=function(){return+(new Date)}),Object.create||(Object.create=function(a){function b(){}b.prototype=a;return new b});var a=c;try{a(document.documentElement.childNodes)[0].nodeType}catch(d){a=b}d3.functor=function(a){return typeof a=="function"?a:function(){return a}},d3.rebind=function(a,b){return function(){var c=b.apply(a,arguments);return arguments.length?a:c}},d3.ascending=function(a,b){return a<b?-1:a>b?1:a>=b?0:NaN},d3.descending=function(a,b){return b<a?-1:b>a?1:b>=a?0:NaN},d3.min=function(a,b){var c=-1,d=a.length,e,f;if(arguments.length===1){while(++c<d&&((e=a[c])==null||e!=e))e=undefined;while(++c<d)(f=a[c])!=null&&e>f&&(e=f)}else{while(++c<d&&((e=b.call(a,a[c],c))==null||e!=e))e=undefined;while(++c<d)(f=b.call(a,a[c],c))!=null&&e>f&&(e=f)}return e},d3.max=function(a,b){var c=-1,d=a.length,e,f;if(arguments.length===1){while(++c<d&&((e=a[c])==null||e!=e))e=undefined;while(++c<d)(f=a[c])!=null&&f>e&&(e=f)}else{while(++c<d&&((e=b.call(a,a[c],c))==null||e!=e))e=undefined;while(++c<d)(f=b.call(a,a[c],c))!=null&&f>e&&(e=f)}return e},d3.sum=function(a,b){var c=0,d=a.length,e,f=-1;if(arguments.length===1)while(++f<d)isNaN(e=+a[f])||(c+=e);else while(++f<d)isNaN(e=+b.call(a,a[f],f))||(c+=e);return c},d3.quantile=function(a,b){var c=(a.length-1)*b+1,d=Math.floor(c),e=a[d-1],f=c-d;return f?e+f*(a[d]-e):e},d3.zip=function(){if(!(f=arguments.length))return[];for(var a=-1,b=d3.min(arguments,e),c=Array(b);++a<b;)for(var d=-1,f,g=c[a]=Array(f);++d<f;)g[d]=arguments[d][a];return c},d3.bisectLeft=function(a,b,c,d){arguments.length<3&&(c=0),arguments.length<4&&(d=a.length);while(c<d){var e=c+d>>1;a[e]<b?c=e+1:d=e}return c},d3.bisect=d3.bisectRight=function(a,b,c,d){arguments.length<3&&(c=0),arguments.length<4&&(d=a.length);while(c<d){var e=c+d>>1;b<a[e]?d=e:c=e+1}return c},d3.first=function(a,b){var c=0,d=a.length,e=a[0],f;arguments.length===1&&(b=d3.ascending);while(++c<d)b.call(a,e,f=a[c])>0&&(e=f);return e},d3.last=function(a,b){var c=0,d=a.length,e=a[0],f;arguments.length===1&&(b=d3.ascending);while(++c<d)b.call(a,e,f=a[c])<=0&&(e=f);return e},d3.nest=function(){function g(a,d){if(d>=b.length)return a;var e=[],f=c[d++],h;for(h in a)e.push({key:h,values:g(a[h],d)});f&&e.sort(function(a,b){return f(a.key,b.key)});return e}function f(c,g){if(g>=b.length)return e?e.call(a,c):d?c.sort(d):c;var h=-1,i=c.length,j=b[g++],k,l,m={};while(++h<i)(k=j(l=c[h]))in m?m[k].push(l):m[k]=[l];for(k in m)m[k]=f(m[k],g);return m}var a={},b=[],c=[],d,e;a.map=function(a){return f(a,0)},a.entries=function(a){return g(f(a,0),0)},a.key=function(c){b.push(c);return a},a.sortKeys=function(d){c[b.length-1]=d;return a},a.sortValues=function(b){d=b;return a},a.rollup=function(b){e=b;return a};return a},d3.keys=function(a){var b=[];for(var c in a)b.push(c);return b},d3.values=function(a){var b=[];for(var c in a)b.push(a[c]);return b},d3.entries=function(a){var b=[];for(var c in a)b.push({key:c,value:a[c]});return b},d3.permute=function(a,b){var c=[],d=-1,e=b.length;while(++d<e)c[d]=a[b[d]];return c},d3.merge=function(a){return Array.prototype.concat.apply([],a)},d3.split=function(a,b){var c=[],d=[],e,g=-1,h=a.length;arguments.length<2&&(b=f);while(++g<h)b.call(d,e=a[g],g)?d=[]:(d.length||c.push(d),d.push(e));return c},d3.range=function(a,b,c){arguments.length<3&&(c=1,arguments.length<2&&(b=a,a=0));if((b-a)/c==Infinity)throw new Error("infinite range");var d=[],e=-1,f;if(c<0)while((f=a+c*++e)>b)d.push(f);else while((f=a+c*++e)<b)d.push(f);return d},d3.requote=function(a){return a.replace(i,"\\$&")};var i=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g;d3.round=function(a,b){return b?Math.round(a*Math.pow(10,b))*Math.pow(10,-b):Math.round(a)},d3.xhr=function(a,b,c){var d=new XMLHttpRequest;arguments.length<3?c=b:b&&d.overrideMimeType&&d.overrideMimeType(b),d.open("GET",a,!0),d.onreadystatechange=function(){d.readyState===4&&c(d.status<300?d:null)},d.send(null)},d3.text=function(a,b,c){function d(a){c(a&&a.responseText)}arguments.length<3&&(c=b,b=null),d3.xhr(a,b,d)},d3.json=function(a,b){d3.text(a,"application/json",function(a){b(a?JSON.parse(a):null)})},d3.html=function(a,b){d3.text(a,"text/html",function(a){if(a!=null){var c=document.createRange();c.selectNode(document.body),a=c.createContextualFragment(a)}b(a)})},d3.xml=function(a,b,c){function d(a){c(a&&a.responseXML)}arguments.length<3&&(c=b,b=null),d3.xhr(a,b,d)},d3.ns={prefix:{svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},qualify:function(a){var b=a.indexOf(":");return b<0?a:{space:d3.ns.prefix[a.substring(0,b)],local:a.substring(b+1)}}},d3.dispatch=function(a){var b={},c;for(var d=0,e=arguments.length;d<e;d++)c=arguments[d],b[c]=j(c);return b},d3.format=function(a){var b=k.exec(a),c=b[1]||" ",d=b[3]||"",e=b[5],f=+b[6],g=b[7],h=b[8],i=b[9],j=!1,o=!1;h&&(h=h.substring(1)),e&&(c="0",g&&(f-=Math.floor((f-1)/4)));switch(i){case"n":g=!0,i="g";break;case"%":j=!0,i="f";break;case"p":j=!0,i="r";break;case"d":o=!0,h="0"}i=l[i]||m;return function(a){var b=j?a*100:+a,k=b<0&&(b=-b)?"−":d;if(o&&b%1)return"";a=i(b,h);if(e){var l=a.length+k.length;l<f&&(a=Array(f-l+1).join(c)+a),g&&(a=n(a)),a=k+a}else{g&&(a=n(a)),a=k+a;var l=a.length;l<f&&(a=Array(f-l+1).join(c)+a)}j&&(a+="%");return a}};var k=/(?:([^{])?([<>=^]))?([+\- ])?(#)?(0)?([0-9]+)?(,)?(\.[0-9]+)?([a-zA-Z%])?/,l={g:function(a,b){return a.toPrecision(b)},e:function(a,b){return a.toExponential(b)},f:function(a,b){return a.toFixed(b)},r:function(a,b){var c=1+Math.floor(1e-15+Math.log(a)/Math.LN10);return d3.round(a,b-c).toFixed(Math.max(0,b-c))}},o=v(2),p=v(3),q={linear:function(){return u},poly:v,quad:function(){return o},cubic:function(){return p},sin:function(){return w},exp:function(){return x},circle:function(){return y},elastic:z,back:A,bounce:function(){return B}},r={"in":function(a){return a},out:s,"in-out":t,"out-in":function(a){return t(s(a))}};d3.ease=function(a){var b=a.indexOf("-"),c=b>=0?a.substring(0,b):a,d=b>=0?a.substring(b+1):"in";return r[d](q[c].apply(null,Array.prototype.slice.call(arguments,1)))},d3.event=null,d3.interpolate=function(a,b){var c=d3.interpolators.length,d;while(--c>=0&&!(d=d3.interpolators[c](a,b)));return d},d3.interpolateNumber=function(a,b){b-=a;return function(c){return a+b*c}},d3.interpolateRound=function(a,b){b-=a;return function(c){return Math.round(a+b*c)}},d3.interpolateString=function(a,b){var c,d,e,f=0,g=0,h=[],i=[],j,k;C.lastIndex=0;for(d=0;c=C.exec(b);++d)c.index&&h.push(b.substring(f,g=c.index)),i.push({i:h.length,x:c[0]}),h.push(null),f=C.lastIndex;f<b.length&&h.push(b.substring(f));for(d=0,j=i.length;(c=C.exec(a))&&d<j;++d){k=i[d];if(k.x==c[0]){if(k.i)if(h[k.i+1]==null){h[k.i-1]+=k.x,h.splice(k.i,1);for(e=d+1;e<j;++e)i[e].i--}else{h[k.i-1]+=k.x+h[k.i+1],h.splice(k.i,2);for(e=d+1;e<j;++e)i[e].i-=2}else if(h[k.i+1]==null)h[k.i]=k.x;else{h[k.i]=k.x+h[k.i+1],h.splice(k.i+1,1);for(e=d+1;e<j;++e)i[e].i--}i.splice(d,1),j--,d--}else k.x=d3.interpolateNumber(parseFloat(c[0]),parseFloat(k.x))}while(d<j)k=i.pop(),h[k.i+1]==null?h[k.i]=k.x:(h[k.i]=k.x+h[k.i+1],h.splice(k.i+1,1)),j--;return h.length===1?h[0]==null?i[0].x:function(){return b}:function(a){for(d=0;d<j;++d)h[(k=i[d]).i]=k.x(a);return h.join("")}},d3.interpolateRgb=function(a,b){a=d3.rgb(a),b=d3.rgb(b);var c=a.r,d=a.g,e=a.b,f=b.r-c,g=b.g-d,h=b.b-e;return function(a){return"rgb("+Math.round(c+f*a)+","+Math.round(d+g*a)+","+Math.round(e+h*a)+")"}},d3.interpolateHsl=function(a,b){a=d3.hsl(a),b=d3.hsl(b);var c=a.h,d=a.s,e=a.l,f=b.h-c,g=b.s-d,h=b.l-e;return function(a){return R(c+f*a,d+g*a,e+h*a).toString()}},d3.interpolateArray=function(a,b){var c=[],d=[],e=a.length,f=b.length,g=Math.min(a.length,b.length),h;for(h=0;h<g;++h)c.push(d3.interpolate(a[h],b[h]));for(;h<e;++h)d[h]=a[h];for(;h<f;++h)d[h]=b[h];return function(a){for(h=0;h<g;++h)d[h]=c[h](a);return d}},d3.interpolateObject=function(a,b){var c={},d={},e;for(e in a)e in b?c[e]=E(e)(a[e],b[e]):d[e]=a[e];for(e in b)e in a||(d[e]=b[e]);return function(a){for(e in c)d[e]=c[e](a);return d}};var C=/[-+]?(?:\d+\.\d+|\d+\.|\.\d+|\d+)(?:[eE][-]?\d+)?/g,D={background:1,fill:1,stroke:1};d3.interpolators=[d3.interpolateObject,function(a,b){return b instanceof Array&&d3.interpolateArray(a,b)},function(a,b){return typeof b=="string"&&d3.interpolateString(String(a),b)},function(a,b){return(typeof b=="string"?b in N||/^(#|rgb\(|hsl\()/.test(b):b instanceof I||b instanceof Q)&&d3.interpolateRgb(String(a),b)},function(a,b){return typeof b=="number"&&d3.interpolateNumber(+a,b)}],d3.rgb=function(a,b,c){return arguments.length===1?K(""+a,H,R):H(~~a,~~b,~~c)},I.prototype.brighter=function(a){a=Math.pow(.7,arguments.length?a:1);var b=this.r,c=this.g,d=this.b,e=30;if(!b&&!c&&!d)return H(e,e,e);b&&b<e&&(b=e),c&&c<e&&(c=e),d&&d<e&&(d=e);return H(Math.min(255,Math.floor(b/a)),Math.min(255,Math.floor(c/a)),Math.min(255,Math.floor(d/a)))},I.prototype.darker=function(a){a=Math.pow(.7,arguments.length?a:1);return H(Math.max(0,Math.floor(a*this.r)),Math.max(0,Math.floor(a*this.g)),Math.max(0,Math.floor(a*this.b)))},I.prototype.hsl=function(){return L(this.r,this.g,this.b)},I.prototype.toString=function(){return"#"+J(this.r)+J(this.g)+J(this.b)};var N={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"};for(var O in N)N[O]=K(N[O],H,R);d3.hsl=function(a,b,c){return arguments.length===1?K(""+a,L,P):P(+a,+b,+c)},Q.prototype.brighter=function(a){a=Math.pow(.7,arguments.length?a:1);return P(this.h,this.s,this.l/a)},Q.prototype.darker=function(a){a=Math.pow(.7,arguments.length?a:1);return P(this.h,this.s,a*this.l)},Q.prototype.rgb=function(){return R(this.h,this.s,this.l)},Q.prototype.toString=function(){return"hsl("+this.h+","+this.s*100+"%,"+this.l*100+"%)"};var S=function(a,b){return b.querySelector(a)},T=function(b,c){return a(c.querySelectorAll(b))};typeof Sizzle=="function"&&(S=function(a,b){return Sizzle(a,b)[0]},T=function(a,b){return Sizzle.uniqueSort(Sizzle(a,b))});var U=V([[document]]);U[0].parentNode=document.documentElement,d3.select=function(a){return typeof a=="string"?U.select(a):V([[a]])},d3.selectAll=function(b){return typeof b=="string"?U.selectAll(b):V([a(b)])},d3.transition=U.transition;var Z=0,$=0,bb=null,bc
<del>,bd;d3.timer=function(a,b){var c=Date.now(),d=!1,e,f=bb;if(arguments.length<2)b=0;else if(!isFinite(b))return;while(f){if(f.callback===a){f.then=c,f.delay=b,d=!0;break}e=f,f=f.next}d||(bb={callback:a,then:c,delay:b,next:bb}),bc||(bd=clearTimeout(bd),bc=1,bg(be))},d3.timer.flush=function(){var a,b=Date.now(),c=bb;while(c)a=b-c.then,c.delay||(c.flush=c.callback(a)),c=c.next;bf()};var bg=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(a){setTimeout(a,17)};d3.scale={},d3.scale.linear=function(){function h(a){return e(a)}function g(){var g=a.length==2?bq:br,i=d?G:F;e=g(a,b,i,c),f=g(b,a,i,d3.interpolate);return h}var a=[0,1],b=[0,1],c=d3.interpolate,d=!1,e,f;h.invert=function(a){return f(a)},h.domain=function(b){if(!arguments.length)return a;a=b.map(Number);return g()},h.range=function(a){if(!arguments.length)return b;b=a;return g()},h.rangeRound=function(a){return h.range(a).interpolate(d3.interpolateRound)},h.clamp=function(a){if(!arguments.length)return d;d=a;return g()},h.interpolate=function(a){if(!arguments.length)return c;c=a;return g()},h.ticks=function(b){return bo(a,b)},h.tickFormat=function(b){return bp(a,b)},h.nice=function(){bj(a,bm);return g()};return g()},d3.scale.log=function(){function d(c){return a(b(c))}var a=d3.scale.linear(),b=bs,c=b.pow;d.invert=function(b){return c(a.invert(b))},d.domain=function(e){if(!arguments.length)return a.domain().map(c);b=e[0]<0?bt:bs,c=b.pow,a.domain(e.map(b));return d},d.nice=function(){a.domain(bj(a.domain(),bk));return d},d.ticks=function(){var d=bi(a.domain()),e=[];if(d.every(isFinite)){var f=Math.floor(d[0]),g=Math.ceil(d[1]),h=c(d[0]),i=c(d[1]);if(b===bt){e.push(c(f));for(;f++<g;)for(var j=9;j>0;j--)e.push(c(f)*j)}else{for(;f<g;f++)for(var j=1;j<10;j++)e.push(c(f)*j);e.push(c(f))}for(f=0;e[f]<h;f++);for(g=e.length;e[g-1]>i;g--);e=e.slice(f,g)}return e},d.tickFormat=function(){return bu};return bl(d,a)},bs.pow=function(a){return Math.pow(10,a)},bt.pow=function(a){return-Math.pow(10,-a)},d3.scale.pow=function(){function e(b){return a(c(b))}var a=d3.scale.linear(),b=1,c=Number,d=c;e.invert=function(b){return d(a.invert(b))},e.domain=function(f){if(!arguments.length)return a.domain().map(d);c=bv(b),d=bv(1/b),a.domain(f.map(c));return e},e.ticks=function(a){return bo(e.domain(),a)},e.tickFormat=function(a){return bp(e.domain(),a)},e.nice=function(){return e.domain(bj(e.domain(),bm))},e.exponent=function(a){if(!arguments.length)return b;var c=e.domain();b=a;return e.domain(c)};return bl(e,a)},d3.scale.sqrt=function(){return d3.scale.pow().exponent(.5)},d3.scale.ordinal=function(){function f(d){var e=d in b?b[d]:b[d]=a.push(d)-1;return c[e%c.length]}var a=[],b={},c=[],d=0,e=bh;f.domain=function(c){if(!arguments.length)return a;a=c,b={};var d=-1,g=-1,h=a.length;while(++d<h)c=a[d],c in b||(b[c]=++g);e();return f},f.range=function(a){if(!arguments.length)return c;c=a,e=bh;return f},f.rangePoints=function(b,g){arguments.length<2&&(g=0),(e=function(){var e=b[0],f=b[1],h=(f-e)/(a.length-1+g);c=a.length==1?[(e+f)/2]:d3.range(e+h*g/2,f+h/2,h),d=0})();return f},f.rangeBands=function(b,g){arguments.length<2&&(g=0),(e=function(){var e=b[0],f=b[1],h=(f-e)/(a.length+g);c=d3.range(e+h*g,f,h),d=h*(1-g)})();return f},f.rangeRoundBands=function(b,g){arguments.length<2&&(g=0),(e=function(){var e=b[0],f=b[1],h=f-e,i=Math.floor(h/(a.length+g)),j=h-(a.length-g)*i;c=d3.range(e+Math.round(j/2),f,i),d=Math.round(i*(1-g))})();return f},f.rangeBand=function(){return d};return f},d3.scale.category10=function(){return d3.scale.ordinal().range(bw)},d3.scale.category20=function(){return d3.scale.ordinal().range(bx)},d3.scale.category20b=function(){return d3.scale.ordinal().range(by)},d3.scale.category20c=function(){return d3.scale.ordinal().range(bz)};var bw=["#1f77b4","#ff7f0e","#2ca02c","#d62728","#9467bd","#8c564b","#e377c2","#7f7f7f","#bcbd22","#17becf"],bx=["#1f77b4","#aec7e8","#ff7f0e","#ffbb78","#2ca02c","#98df8a","#d62728","#ff9896","#9467bd","#c5b0d5","#8c564b","#c49c94","#e377c2","#f7b6d2","#7f7f7f","#c7c7c7","#bcbd22","#dbdb8d","#17becf","#9edae5"],by=["#393b79","#5254a3","#6b6ecf","#9c9ede","#637939","#8ca252","#b5cf6b","#cedb9c","#8c6d31","#bd9e39","#e7ba52","#e7cb94","#843c39","#ad494a","#d6616b","#e7969c","#7b4173","#a55194","#ce6dbd","#de9ed6"],bz=["#3182bd","#6baed6","#9ecae1","#c6dbef","#e6550d","#fd8d3c","#fdae6b","#fdd0a2","#31a354","#74c476","#a1d99b","#c7e9c0","#756bb1","#9e9ac8","#bcbddc","#dadaeb","#636363","#969696","#bdbdbd","#d9d9d9"];d3.scale.quantile=function(){function e(a){return isNaN(a=+a)?NaN:b[d3.bisect(c,a)]}function d(){var d=0,e=a.length,f=b.length;c.length=Math.max(0,f-1);while(++d<f)c[d-1]=d3.quantile(a,d/f)}var a=[],b=[],c=[];e.domain=function(b){if(!arguments.length)return a;a=b.filter(function(a){return!isNaN(a)}).sort(d3.ascending),d();return e},e.range=function(a){if(!arguments.length)return b;b=a,d();return e},e.quantiles=function(){return c};return e},d3.scale.quantize=function(){function f(b){return e[Math.max(0,Math.min(d,Math.floor(c*(b-a))))]}var a=0,b=1,c=2,d=1,e=[0,1];f.domain=function(d){if(!arguments.length)return[a,b];a=+d[0],b=+d[d.length-1],c=e.length/(b-a);return f},f.range=function(g){if(!arguments.length)return e;e=g,c=e.length/(b-a),d=e.length-1;return f};return f},d3.svg={},d3.svg.arc=function(){function e(){var e=a.apply(this,arguments),f=b.apply(this,arguments),g=c.apply(this,arguments)+bA,h=d.apply(this,arguments)+bA,i=(h<g&&(i=g,g=h,h=i),h-g),j=i<Math.PI?"0":"1",k=Math.cos(g),l=Math.sin(g),m=Math.cos(h),n=Math.sin(h);return i>=bB?e?"M0,"+f+"A"+f+","+f+" 0 1,1 0,"+ -f+"A"+f+","+f+" 0 1,1 0,"+f+"M0,"+e+"A"+e+","+e+" 0 1,1 0,"+ -e+"A"+e+","+e+" 0 1,1 0,"+e+"Z":"M0,"+f+"A"+f+","+f+" 0 1,1 0,"+ -f+"A"+f+","+f+" 0 1,1 0,"+f+"Z":e?"M"+f*k+","+f*l+"A"+f+","+f+" 0 "+j+",1 "+f*m+","+f*n+"L"+e*m+","+e*n+"A"+e+","+e+" 0 "+j+",0 "+e*k+","+e*l+"Z":"M"+f*k+","+f*l+"A"+f+","+f+" 0 "+j+",1 "+f*m+","+f*n+"L0,0"+"Z"}var a=bC,b=bD,c=bE,d=bF;e.innerRadius=function(b){if(!arguments.length)return a;a=d3.functor(b);return e},e.outerRadius=function(a){if(!arguments.length)return b;b=d3.functor(a);return e},e.startAngle=function(a){if(!arguments.length)return c;c=d3.functor(a);return e},e.endAngle=function(a){if(!arguments.length)return d;d=d3.functor(a);return e},e.centroid=function(){var e=(a.apply(this,arguments)+b.apply(this,arguments))/2,f=(c.apply(this,arguments)+d.apply(this,arguments))/2+bA;return[Math.cos(f)*e,Math.sin(f)*e]};return e};var bA=-Math.PI/2,bB=2*Math.PI-1e-6;d3.svg.line=function(){return bG(Object)};var bK={linear:bL,"step-before":bM,"step-after":bN,basis:bT,"basis-open":bU,"basis-closed":bV,bundle:bW,cardinal:bQ,"cardinal-open":bO,"cardinal-closed":bP,monotone:cd},bY=[0,2/3,1/3,0],bZ=[0,1/3,2/3,0],b$=[0,1/6,2/3,1/6];d3.svg.line.radial=function(){var a=bG(ce);a.radius=a.x,delete a.x,a.angle=a.y,delete a.y;return a},d3.svg.area=function(){return cf(Object)},d3.svg.area.radial=function(){var a=cf(ce);a.radius=a.x,delete a.x,a.innerRadius=a.x0,delete a.x0,a.outerRadius=a.x1,delete a.x1,a.angle=a.y,delete a.y,a.startAngle=a.y0,delete a.y0,a.endAngle=a.y1,delete a.y1;return a},d3.svg.chord=function(){function j(a,b,c,d){return"Q 0,0 "+d}function i(a,b){return"A"+a+","+a+" 0 0,1 "+b}function h(a,b){return a.a0==b.a0&&a.a1==b.a1}function g(a,b,f,g){var h=b.call(a,f,g),i=c.call(a,h,g),j=d.call(a,h,g)+bA,k=e.call(a,h,g)+bA;return{r:i,a0:j,a1:k,p0:[i*Math.cos(j),i*Math.sin(j)],p1:[i*Math.cos(k),i*Math.sin(k)]}}function f(c,d){var e=g(this,a,c,d),f=g(this,b,c,d);return"M"+e.p0+i(e.r,e.p1)+(h(e,f)?j(e.r,e.p1,e.r,e.p0):j(e.r,e.p1,f.r,f.p0)+i(f.r,f.p1)+j(f.r,f.p1,e.r,e.p0))+"Z"}var a=ci,b=cj,c=ck,d=bE,e=bF;f.radius=function(a){if(!arguments.length)return c;c=d3.functor(a);return f},f.source=function(b){if(!arguments.length)return a;a=d3.functor(b);return f},f.target=function(a){if(!arguments.length)return b;b=d3.functor(a);return f},f.startAngle=function(a){if(!arguments.length)return d;d=d3.functor(a);return f},f.endAngle=function(a){if(!arguments.length)return e;e=d3.functor(a);return f};return f},d3.svg.diagonal=function(){function d(d,e){var f=a.call(this,d,e),g=b.call(this,d,e),h=(f.y+g.y)/2,i=[f,{x:f.x,y:h},{x:g.x,y:h},g];i=i.map(c);return"M"+i[0]+"C"+i[1]+" "+i[2]+" "+i[3]}var a=ci,b=cj,c=cn;d.source=function(b){if(!arguments.length)return a;a=d3.functor(b);return d},d.target=function(a){if(!arguments.length)return b;b=d3.functor(a);return d},d.projection=function(a){if(!arguments.length)return c;c=a;return d};return d},d3.svg.diagonal.radial=function(){var a=d3.svg.diagonal(),b=cn,c=a.projection;a.projection=function(a){return arguments.length?c(co(b=a)):b};return a},d3.svg.mouse=function(a){return cq(a,d3.event)};var cp=/WebKit/.test(navigator.userAgent)?-1:0;d3.svg.touches=function(b){var c=d3.event.touches;return c?a(c).map(function(a){var c=cq(b,a);c.identifier=a.identifier;return c}):[]},d3.svg.symbol=function(){function c(c,d){return(ct[a.call(this,c,d)]||ct.circle)(b.call(this,c,d))}var a=cs,b=cr;c.type=function(b){if(!arguments.length)return a;a=d3.functor(b);return c},c.size=function(a){if(!arguments.length)return b;b=d3.functor(a);return c};return c};var ct={circle:function(a){var b=Math.sqrt(a/Math.PI);return"M0,"+b+"A"+b+","+b+" 0 1,1 0,"+ -b+"A"+b+","+b+" 0 1,1 0,"+b+"Z"},cross:function(a){var b=Math.sqrt(a/5)/2;return"M"+ -3*b+","+ -b+"H"+ -b+"V"+ -3*b+"H"+b+"V"+ -b+"H"+3*b+"V"+b+"H"+b+"V"+3*b+"H"+ -b+"V"+b+"H"+ -3*b+"Z"},diamond:function(a){var b=Math.sqrt(a/(2*cv)),c=b*cv;return"M0,"+ -b+"L"+c+",0"+" 0,"+b+" "+ -c+",0"+"Z"},square:function(a){var b=Math.sqrt(a)/2;return"M"+ -b+","+ -b+"L"+b+","+ -b+" "+b+","+b+" "+ -b+","+b+"Z"},"triangle-down":function(a){var b=Math.sqrt(a/cu),c=b*cu/2;return"M0,"+c+"L"+b+","+ -c+" "+ -b+","+ -c+"Z"},"triangle-up":function(a){var b=Math.sqrt(a/cu),c=b*cu/2;return"M0,"+ -c+"L"+b+","+c+" "+ -b+","+c+"Z"}};d3.svg.symbolTypes=d3.keys(ct);var cu=Math.sqrt(3),cv=Math.tan(30*Math.PI/180)})()
<ide>\ No newline at end of file
<add>(function(){function cs(){return"circle"}function cr(){return 64}function cq(a,b){var c=(a.ownerSVGElement||a).createSVGPoint();if(cp<0&&(window.scrollX||window.scrollY)){var d=d3.select(document.body).append("svg:svg").style("position","absolute").style("top",0).style("left",0),e=d[0][0].getScreenCTM();cp=!e.f&&!e.e,d.remove()}cp?(c.x=b.pageX,c.y=b.pageY):(c.x=b.clientX,c.y=b.clientY),c=c.matrixTransform(a.getScreenCTM().inverse());return[c.x,c.y]}function co(a){return function(){var b=a.apply(this,arguments),c=b[0],d=b[1]+bA;return[c*Math.cos(d),c*Math.sin(d)]}}function cn(a){return[a.x,a.y]}function cm(a){return a.endAngle}function cl(a){return a.startAngle}function ck(a){return a.radius}function cj(a){return a.target}function ci(a){return a.source}function ch(a){return function(b,c){return a[c][1]}}function cg(a){return function(b,c){return a[c][0]}}function cf(a){function i(f){if(f.length<1)return null;var i=bH(this,f,b,d),j=bH(this,f,b===c?cg(i):c,d===e?ch(i):e);return"M"+g(a(j),h)+"L"+g(a(i.reverse()),h)+"Z"}var b=bI,c=bI,d=0,e=bJ,f="linear",g=bK[f],h=.7;i.x=function(a){if(!arguments.length)return c;b=c=a;return i},i.x0=function(a){if(!arguments.length)return b;b=a;return i},i.x1=function(a){if(!arguments.length)return c;c=a;return i},i.y=function(a){if(!arguments.length)return e;d=e=a;return i},i.y0=function(a){if(!arguments.length)return d;d=a;return i},i.y1=function(a){if(!arguments.length)return e;e=a;return i},i.interpolate=function(a){if(!arguments.length)return f;g=bK[f=a];return i},i.tension=function(a){if(!arguments.length)return h;h=a;return i};return i}function ce(a){var b,c=-1,d=a.length,e,f;while(++c<d)b=a[c],e=b[0],f=b[1]+bA,b[0]=e*Math.cos(f),b[1]=e*Math.sin(f);return a}function cd(a){return a.length<3?bL(a):a[0]+bR(a,cc(a))}function cc(a){var b=[],c,d,e,f,g=cb(a),h=-1,i=a.length-1;while(++h<i)c=ca(a[h],a[h+1]),Math.abs(c)<1e-6?g[h]=g[h+1]=0:(d=g[h]/c,e=g[h+1]/c,f=d*d+e*e,f>9&&(f=c*3/Math.sqrt(f),g[h]=f*d,g[h+1]=f*e));h=-1;while(++h<=i)f=(a[Math.min(i,h+1)][0]-a[Math.max(0,h-1)][0])/(6*(1+g[h]*g[h])),b.push([f||0,g[h]*f||0]);return b}function cb(a){var b=0,c=a.length-1,d=[],e=a[0],f=a[1],g=d[0]=ca(e,f);while(++b<c)d[b]=g+(g=ca(e=f,f=a[b+1]));d[b]=g;return d}function ca(a,b){return(b[1]-a[1])/(b[0]-a[0])}function b_(a,b,c){a.push("C",bX(bY,b),",",bX(bY,c),",",bX(bZ,b),",",bX(bZ,c),",",bX(b$,b),",",bX(b$,c))}function bX(a,b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]}function bW(a,b){var c=a.length-1,d=a[0][0],e=a[0][1],f=a[c][0]-d,g=a[c][1]-e,h=-1,i,j;while(++h<=c)i=a[h],j=h/c,i[0]=b*i[0]+(1-b)*(d+j*f),i[1]=b*i[1]+(1-b)*(e+j*g);return bT(a)}function bV(a){var b,c=-1,d=a.length,e=d+4,f,g=[],h=[];while(++c<4)f=a[c%d],g.push(f[0]),h.push(f[1]);b=[bX(b$,g),",",bX(b$,h)],--c;while(++c<e)f=a[c%d],g.shift(),g.push(f[0]),h.shift(),h.push(f[1]),b_(b,g,h);return b.join("")}function bU(a){if(a.length<4)return bL(a);var b=[],c=-1,d=a.length,e,f=[0],g=[0];while(++c<3)e=a[c],f.push(e[0]),g.push(e[1]);b.push(bX(b$,f)+","+bX(b$,g)),--c;while(++c<d)e=a[c],f.shift(),f.push(e[0]),g.shift(),g.push(e[1]),b_(b,f,g);return b.join("")}function bT(a){if(a.length<3)return bL(a);var b=[],c=1,d=a.length,e=a[0],f=e[0],g=e[1],h=[f,f,f,(e=a[1])[0]],i=[g,g,g,e[1]];b.push(f,",",g),b_(b,h,i);while(++c<d)e=a[c],h.shift(),h.push(e[0]),i.shift(),i.push(e[1]),b_(b,h,i);c=-1;while(++c<2)h.shift(),h.push(e[0]),i.shift(),i.push(e[1]),b_(b,h,i);return b.join("")}function bS(a,b){var c=[],d=(1-b)/2,e,f=a[0],g=a[1],h=1,i=a.length;while(++h<i)e=f,f=g,g=a[h],c.push([d*(g[0]-e[0]),d*(g[1]-e[1])]);return c}function bR(a,b){if(b.length<1||a.length!=b.length&&a.length!=b.length+2)return bL(a);var c=a.length!=b.length,d="",e=a[0],f=a[1],g=b[0],h=g,i=1;c&&(d+="Q"+(f[0]-g[0]*2/3)+","+(f[1]-g[1]*2/3)+","+f[0]+","+f[1],e=a[1],i=2);if(b.length>1){h=b[1],f=a[i],i++,d+="C"+(e[0]+g[0])+","+(e[1]+g[1])+","+(f[0]-h[0])+","+(f[1]-h[1])+","+f[0]+","+f[1];for(var j=2;j<b.length;j++,i++)f=a[i],h=b[j],d+="S"+(f[0]-h[0])+","+(f[1]-h[1])+","+f[0]+","+f[1]}if(c){var k=a[i];d+="Q"+(f[0]+h[0]*2/3)+","+(f[1]+h[1]*2/3)+","+k[0]+","+k[1]}return d}function bQ(a,b,c){return a.length<3?bL(a):a[0]+bR(a,bS(a,b))}function bP(a,b){return a.length<3?bL(a):a[0]+bR((a.push(a[0]),a),bS([a[a.length-2]].concat(a,[a[1]]),b))}function bO(a,b){return a.length<4?bL(a):a[1]+bR(a.slice(1,a.length-1),bS(a,b))}function bN(a){var b=[],c=0,d=a.length,e=a[0];b.push(e[0],",",e[1]);while(++c<d)b.push("H",(e=a[c])[0],"V",e[1]);return b.join("")}function bM(a){var b=[],c=0,d=a.length,e=a[0];b.push(e[0],",",e[1]);while(++c<d)b.push("V",(e=a[c])[1],"H",e[0]);return b.join("")}function bL(a){var b=[],c=0,d=a.length,e=a[0];b.push(e[0],",",e[1]);while(++c<d)b.push("L",(e=a[c])[0],",",e[1]);return b.join("")}function bJ(a){return a[1]}function bI(a){return a[0]}function bH(a,b,c,d){var e=[],f=-1,g=b.length,h=typeof c=="function",i=typeof d=="function",j;if(h&&i)while(++f<g)e.push([c.call(a,j=b[f],f),d.call(a,j,f)]);else if(h)while(++f<g)e.push([c.call(a,b[f],f),d]);else if(i)while(++f<g)e.push([c,d.call(a,b[f],f)]);else while(++f<g)e.push([c,d]);return e}function bG(a){function g(d){return d.length<1?null:"M"+e(a(bH(this,d,b,c)),f)}var b=bI,c=bJ,d="linear",e=bK[d],f=.7;g.x=function(a){if(!arguments.length)return b;b=a;return g},g.y=function(a){if(!arguments.length)return c;c=a;return g},g.interpolate=function(a){if(!arguments.length)return d;e=bK[d=a];return g},g.tension=function(a){if(!arguments.length)return f;f=a;return g};return g}function bF(a){return a.endAngle}function bE(a){return a.startAngle}function bD(a){return a.outerRadius}function bC(a){return a.innerRadius}function bv(a){return function(b){return b<0?-Math.pow(-b,a):Math.pow(b,a)}}function bu(a){return a.toPrecision(1)}function bt(a){return-Math.log(-a)/Math.LN10}function bs(a){return Math.log(a)/Math.LN10}function br(a,b,c,d){var e=[],f=[],g=0,h=a.length;while(++g<h)e.push(c(a[g-1],a[g])),f.push(d(b[g-1],b[g]));return function(b){var c=d3.bisect(a,b,1,a.length-1)-1;return f[c](e[c](b))}}function bq(a,b,c,d){var e=c(a[0],a[1]),f=d(b[0],b[1]);return function(a){return f(e(a))}}function bp(a,b){return d3.format(",."+Math.max(0,-Math.floor(Math.log(bn(a,b)[2])/Math.LN10+.01))+"f")}function bo(a,b){return d3.range.apply(d3,bn(a,b))}function bn(a,b){var c=bi(a),d=c[1]-c[0],e=Math.pow(10,Math.floor(Math.log(d/b)/Math.LN10)),f=b/d*e;f<=.15?e*=10:f<=.35?e*=5:f<=.75&&(e*=2),c[0]=Math.ceil(c[0]/e)*e,c[1]=Math.floor(c[1]/e)*e+e*.5,c[2]=e;return c}function bm(a){a=Math.pow(10,Math.round(Math.log(a)/Math.LN10)-1);return{floor:function(b){return Math.floor(b/a)*a},ceil:function(b){return Math.ceil(b/a)*a}}}function bl(a,b){a.range=d3.rebind(a,b.range),a.rangeRound=d3.rebind(a,b.rangeRound),a.interpolate=d3.rebind(a,b.interpolate),a.clamp=d3.rebind(a,b.clamp);return a}function bk(){return Math}function bj(a,b){var c=0,d=a.length-1,e=a[c],f=a[d],g;f<e&&(g=c,c=d,d=g,g=e,e=f,f=g),b=b(f-e),a[c]=b.floor(e),a[d]=b.ceil(f);return a}function bi(a){var b=a[0],c=a[a.length-1];return b<c?[b,c]:[c,b]}function bh(){}function bf(){var a=null,b=bb,c=Infinity;while(b)b.flush?b=a?a.next=b.next:bb=b.next:(c=Math.min(c,b.then+b.delay),b=(a=b).next);return c}function be(){var a,b=Date.now(),c=bb;while(c)a=b-c.then,a>=c.delay&&(c.flush=c.callback(a)),c=c.next;var d=bf()-b;d>24?(isFinite(d)&&(clearTimeout(bd),bd=setTimeout(be,d)),bc=0):(bc=1,bg(be))}function ba(a){return typeof a=="function"?function(b,c,d){var e=a.call(this,b,c)+"";return d!=e&&d3.interpolate(d,e)}:(a=a+"",function(b,c,d){return d!=a&&d3.interpolate(d,a)})}function _(a){function n(b){var h=!0,l=-1;a.each(function(){if(i[++l]!==2){var a=(b-j[l])/k[l],n=this.__transition__,o,p,q=e[l];if(a<1){h=!1;if(a<0)return}else a=1;if(i[l]){if(!n||n.active!==c){i[l]=2;return}}else{if(!n||n.active>c){i[l]=2;return}i[l]=1,g.start.dispatch.apply(this,arguments),q=e[l]={},n.active=c;for(p in d)if(o=d[p].apply(this,arguments))q[p]=o}o=m(a);for(p in q)q[p].call(this,o);if(a===1){i[l]=2;if(n.active===c){var r=n.owner;r===c&&(delete this.__transition__,f&&this.parentNode&&this.parentNode.removeChild(this)),$=c,g.end.dispatch.apply(this,arguments),$=0,n.owner=r}}}});return h}var b={},c=$||++Z,d={},e=[],f=!1,g=d3.dispatch("start","end"),i=[],j=[],k=[],l,m=d3.ease("cubic-in-out");a.each(function(){(this.__transition__||(this.__transition__={})).owner=c}),b.delay=function(c){var d=Infinity,e=-1;typeof c=="function"?a.each(function(a,b){var f=j[++e]=+c.apply(this,arguments);f<d&&(d=f)}):(d=+c,a.each(function(a,b){j[++e]=d})),d3.timer(n,d);return b},b.duration=function(c){var d=-1;typeof c=="function"?(l=0,a.each(function(a,b){var e=k[++d]=+c.apply(this,arguments);e>l&&(l=e)})):(l=+c,a.each(function(a,b){k[++d]=l}));return b},b.ease=function(a){m=typeof a=="function"?a:d3.ease.apply(d3,arguments);return b},b.attrTween=function(a,c){function f(b,d){var e=c.call(this,b,d,this.getAttributeNS(a.space,a.local));return e&&function(b){this.setAttributeNS(a.space,a.local,e(b))}}function e(b,d){var e=c.call(this,b,d,this.getAttribute(a));return e&&function(b){this.setAttribute(a,e(b))}}d["attr."+a]=a.local?f:e;return b},b.attr=function(a,c){return b.attrTween(a,ba(c))},b.styleTween=function(a,c,e){function f(b,d){var f=c.call(this,b,d,window.getComputedStyle(this,null).getPropertyValue(a));return f&&function(b){this.style.setProperty(a,f(b),e)}}arguments.length<3&&(e=null),d["style."+a]=f;return b},b.style=function(a,c,d){arguments.length<3&&(d=null);return b.styleTween(a,ba(c),d)},b.text=function(a){d.text=function(b,c){this.textContent=typeof a=="function"?a.call(this,b,c):a};return b},b.select=function(b){var c,d=_(a.select(b)).ease(m);c=-1,d.delay(function(a,b){return j[++c]}),c=-1,d.duration(function(a,b){return k[++c]});return d},b.selectAll=function(b){var c,d=_(a.selectAll(b)).ease(m);c=-1,d.delay(function(a,b){return j[b?c:++c]}),c=-1,d.duration(function(a,b){return k[b?c:++c]});return d},b.remove=function(){f=!0;return b},b.each=function(a,c){g[a].add(c);return b},b.call=h;return b.delay(0).duration(250)}function Y(a){return{__data__:a}}function X(a){arguments.length||(a=d3.ascending);return function(b,c){return a(b&&b.__data__,c&&c.__data__)}}function W(a){function b(b){var c=[],d,e,f,g;for(var h=0,i=a.length;h<i;h++){f=a[h],c.push(d=[]),d.parentNode=f.parentNode;for(var j=0,k=f.length;j<k;j++)(g=f[j])?(d.push(e=b(f.parentNode)),e.__data__=g.__data__):d.push(null)}return V(c)}a.append=function(a){function d(b){return b.appendChild(document.createElementNS(a.space,a.local))}function c(b){return b.appendChild(document.createElement(a))}a=d3.ns.qualify(a);return b(a.local?d:c)},a.insert=function(a,c){function e(b){return b.insertBefore(document.createElementNS(a.space,a.local),S(c,b))}function d(b){return b.insertBefore(document.createElement(a),S(c,b))}a=d3.ns.qualify(a);return b(a.local?e:d)};return a}function V(a){function d(b){for(var c=0,d=a.length;c<d;c++){var e=a[c];for(var f=0,g=e.length;f<g;f++){var h=e[f];if(h)return b.call(h,h.__data__,f)}}return null}function c(b){var c=[],d,e,f;for(var g=0,h=a.length;g<h;g++){e=a[g];for(var i=0,j=e.length;i<j;i++)if(f=e[i])c.push(d=b(f)),d.parentNode=f}return V(c)}function b(b){var c=[],d,e,f,g;for(var h=0,i=a.length;h<i;h++){f=a[h],c.push(d=[]),d.parentNode=f.parentNode;for(var j=0,k=f.length;j<k;j++)(g=f[j])?(d.push(e=b(g)),e&&"__data__"in g&&(e.__data__=g.__data__)):d.push(null)}return V(c)}a.select=function(a){return b(function(b){return S(a,b)})},a.selectAll=function(a){return c(function(b){return T(a,b)})},a.filter=function(b){var c=[],d,e,f;for(var g=0,h=a.length;g<h;g++){e=a[g],c.push(d=[]),d.parentNode=e.parentNode;for(var i=0,j=e.length;i<j;i++)(f=e[i])&&b.call(f,f.__data__,i)&&d.push(f)}return V(c)},a.map=function(b){var c,d;for(var e=0,f=a.length;e<f;e++){c=a[e];for(var g=0,h=c.length;g<h;g++)if(d=c[g])d.__data__=b.call(d,d.__data__,g)}return a},a.data=function(b,c){function g(a,b){var g=0,h=a.length,i=b.length,j=Math.min(h,i),k=Math.max(h,i),l=[],m=[],n=[],o,p;if(c){var q={},r=[],s,t=b.length;for(g=0;g<h;g++)s=c.call(o=a[g],o.__data__,g),s in q?n[t++]=o:q[s]=o,r.push(s);for(g=0;g<i;g++)o=q[s=c.call(b,p=b[g],g)],o?(o.__data__=p,l[g]=o,m[g]=n[g]=null):(m[g]=Y(p),l[g]=n[g]=null),delete q[s];for(g=0;g<h;g++)r[g]in q&&(n[g]=a[g])}else{for(;g<j;g++)o=a[g],p=b[g],o?(o.__data__=p,l[g]=o,m[g]=n[g]=null):(m[g]=Y(p),l[g]=n[g]=null);for(;g<i;g++)m[g]=Y(b[g]),l[g]=n[g]=null;for(;g<k;g++)n[g]=a[g],m[g]=l[g]=null}m.parentNode=l.parentNode=n.parentNode=a.parentNode,d.push(m),e.push(l),f.push(n)}var d=[],e=[],f=[],h=-1,i=a.length,j;if(typeof b=="function")while(++h<i)g(j=a[h],b.call(j,j.parentNode.__data__,h));else while(++h<i)g(j=a[h],b);var k=V(e);k.enter=function(){return W(d)},k.exit=function(){return V(f)};return k},a.each=function(b){for(var c=0,d=a.length;c<d;c++){var e=a[c];for(var f=0,g=e.length;f<g;f++){var h=e[f];h&&b.call(h,h.__data__,f)}}return a},a.empty=function(){return!d(function(){return!0})},a.node=function(){return d(function(){return this})},a.attr=function(b,c){function j(){var a=c.apply(this,arguments);a==null?this.removeAttributeNS(b.space,b.local):this.setAttributeNS(b.space,b.local,a)}function i(){var a=c.apply(this,arguments);a==null?this.removeAttribute(b):this.setAttribute(b,a)}function h(){this.setAttributeNS(b.space,b.local,c)}function g(){this.setAttribute(b,c)}function f(){this.removeAttributeNS(b.space,b.local)}function e(){this.removeAttribute(b)}b=d3.ns.qualify(b);return arguments.length<2?d(b.local?function(){return this.getAttributeNS(b.space,b.local)}:function(){return this.getAttribute(b)}):a.each(c==null?b.local?f:e:typeof c=="function"?b.local?j:i:b.local?h:g)},a.classed=function(b,c){function i(){(c.apply(this,arguments)?f:h).call(this)}function h(){if(a=this.classList)return a.remove(b);var a=this.className,c=a.baseVal!=null,d=c?a.baseVal:a;d=g(d.replace(e," ")),c?a.baseVal=d:this.className=d}function f(){if(a=this.classList)return a.add(b);var a=this.className,c=a.baseVal!=null,d=c?a.baseVal:a;e.lastIndex=0,e.test(d)||(d=g(d+" "+b),c?a.baseVal=d:this.className=d)}var e=new RegExp("(^|\\s+)"+d3.requote(b)+"(\\s+|$)","g");return arguments.length<2?d(function(){if(a=this.classList)return a.contains(b);var a=this.className;e.lastIndex=0;return e.test(a.baseVal!=null?a.baseVal:a)}):a.each(typeof c=="function"?i:c?f:h)},a.style=function(b,c,e){function h(){var a=c.apply(this,arguments);a==null?this.style.removeProperty(b):this.style.setProperty(b,a,e)}function g(){this.style.setProperty(b,c,e)}function f(){this.style.removeProperty(b)}arguments.length<3&&(e="");return arguments.length<2?d(function(){return window.getComputedStyle(this,null).getPropertyValue(b)}):a.each(c==null?f:typeof c=="function"?h:g)},a.property=function(b,c){function g(){var a=c.apply(this,arguments);a==null?delete this[b]:this[b]=a}function f(){this[b]=c}function e(){delete this[b]}b=d3.ns.qualify(b);return arguments.length<2?d(function(){return this[b]}):a.each(c==null?e:typeof c=="function"?g:f)},a.text=function(b){function e(){this.textContent=b.apply(this,arguments)}function c(){this.textContent=b}return arguments.length<1?d(function(){return this.textContent}):a.each(typeof b=="function"?e:c)},a.html=function(b){function e(){this.innerHTML=b.apply(this,arguments)}function c(){this.innerHTML=b}return arguments.length<1?d(function(){return this.innerHTML}):a.each(typeof b=="function"?e:c)},a.append=function(a){function d(b){return b.appendChild(document.createElementNS(a.space,a.local))}function c(b){return b.appendChild(document.createElement(a))}a=d3.ns.qualify(a);return b(a.local?d:c)},a.insert=function(a,c){function e(b){return b.insertBefore(document.createElementNS(a.space,a.local),S(c,b))}function d(b){return b.insertBefore(document.createElement(a),S(c,b))}a=d3.ns.qualify(a);return b(a.local?e:d)},a.remove=function(){return a.each(function(){var a=this.parentNode;a&&a.removeChild(this)})},a.sort=function(b){b=X.apply(this,arguments);for(var c=0,d=a.length;c<d;c++){var e=a[c];e.sort(b);for(var f=1,g=e.length,h=e[0];f<g;f++){var i=e[f];i&&(h&&h.parentNode.insertBefore(i,h.nextSibling),h=i)}}return a},a.on=function(b,c,d){arguments.length<3&&(d=!1);var e=b.indexOf("."),f=e===-1?b:b.substring(0,e),g="__on"+b;return a.each(function(a,b){function h(a){var d=d3.event;d3.event=a;try{c.call(this,e.__data__,b)}finally{d3.event=d}}this[g]&&this.removeEventListener(f,this[g],d),c&&this.addEventListener(f,this[g]=h,d);var e=this})},a.transition=function(){return _(a)},a.call=h;return a}function R(a,b,c){function g(a){return Math.round(f(a)*255)}function f(a){a>360?a-=360:a<0&&(a+=360);return a<60?d+(e-d)*a/60:a<180?e:a<240?d+(e-d)*(240-a)/60:d}var d,e;a=a%360,a<0&&(a+=360),b=b<0?0:b>1?1:b,c=c<0?0:c>1?1:c,e=c<=.5?c*(1+b):c+b-c*b,d=2*c-e;return H(g(a+120),g(a),g(a-120))}function Q(a,b,c){this.h=a,this.s=b,this.l=c}function P(a,b,c){return new Q(a,b,c)}function M(a){var b=parseFloat(a);return a.charAt(a.length-1)==="%"?Math.round(b*2.55):b}function L(a,b,c){var d=Math.min(a/=255,b/=255,c/=255),e=Math.max(a,b,c),f=e-d,g,h,i=(e+d)/2;f?(h=i<.5?f/(e+d):f/(2-e-d),a==e?g=(b-c)/f+(b<c?6:0):b==e?g=(c-a)/f+2:g=(a-b)/f+4,g*=60):h=g=0;return P(g,h,i)}function K(a,b,c){var d=0,e=0,f=0,g,h,i;g=/([a-z]+)\((.*)\)/i.exec(a);if(g){h=g[2].split(",");switch(g[1]){case"hsl":return c(parseFloat(h[0]),parseFloat(h[1])/100,parseFloat(h[2])/100);case"rgb":return b(M(h[0]),M(h[1]),M(h[2]))}}if(i=N[a])return b(i.r,i.g,i.b);a!=null&&a.charAt(0)==="#"&&(a.length===4?(d=a.charAt(1),d+=d,e=a.charAt(2),e+=e,f=a.charAt(3),f+=f):a.length===7&&(d=a.substring(1,3),e=a.substring(3,5),f=a.substring(5,7)),d=parseInt(d,16),e=parseInt(e,16),f=parseInt(f,16));return b(d,e,f)}function J(a){return a<16?"0"+a.toString(16):a.toString(16)}function I(a,b,c){this.r=a,this.g=b,this.b=c}function H(a,b,c){return new I(a,b,c)}function G(a,b){b=1/(b-(a=+a));return function(c){return Math.max(0,Math.min(1,(c-a)*b))}}function F(a,b){b=1/(b-(a=+a));return function(c){return(c-a)*b}}function E(a){return a in D||/\bcolor\b/.test(a)?d3.interpolateRgb:d3.interpolate}function B(a){return a<1/2.75?7.5625*a*a:a<2/2.75?7.5625*(a-=1.5/2.75)*a+.75:a<2.5/2.75?7.5625*(a-=2.25/2.75)*a+.9375:7.5625*(a-=2.625/2.75)*a+.984375}function A(a){a||(a=1.70158);return function(b){return b*b*((a+1)*b-a)}}function z(a,b){var c;arguments.length<2&&(b=.45),arguments.length<1?(a=1,c=b/4):c=b/(2*Math.PI)*Math.asin(1/a);return function(d){return 1+a*Math.pow(2,10*-d)*Math.sin((d-c)*2*Math.PI/b)}}function y(a){return 1-Math.sqrt(1-a*a)}function x(a){return Math.pow(2,10*(a-1))}function w(a){return 1-Math.cos(a*Math.PI/2)}function v(a){return function(b){return Math.pow(b,a)}}function u(a){return a}function t(a){return function(b){return.5*(b<.5?a(2*b):2-a(2-2*b))}}function s(a){return function(b){return 1-a(1-b)}}function n(a){var b=a.lastIndexOf("."),c=b>=0?a.substring(b):(b=a.length,""),d=[];while(b>0)d.push(a.substring(b-=3,b+3));return d.reverse().join(",")+c}function m(a){return a+""}function j(a){var b={},c=[];b.add=function(a){for(var d=0;d<c.length;d++)if(c[d].listener==a)return b;c.push({listener:a,on:!0});return b},b.remove=function(a){for(var d=0;d<c.length;d++){var e=c[d];if(e.listener==a){e.on=!1,c=c.slice(0,d).concat(c.slice(d+1));break}}return b},b.dispatch=function(){var a=c;for(var b=0,d=a.length;b<d;b++){var e=a[b];e.on&&e.listener.apply(this,arguments)}};return b}function h(a){a.apply(this,(arguments[0]=this,arguments));return this}function g(a){return a.replace(/(^\s+)|(\s+$)/g,"").replace(/\s+/g," ")}function f(a){return a==null}function e(a){return a.length}function c(a){return Array.prototype.slice.call(a)}function b(a){var b=-1,c=a.length,d=[];while(++b<c)d.push(a[b]);return d}d3={version:"1.29.4"},Date.now||(Date.now=function(){return+(new Date)}),Object.create||(Object.create=function(a){function b(){}b.prototype=a;return new b});var a=c;try{a(document.documentElement.childNodes)[0].nodeType}catch(d){a=b}d3.functor=function(a){return typeof a=="function"?a:function(){return a}},d3.rebind=function(a,b){return function(){var c=b.apply(a,arguments);return arguments.length?a:c}},d3.ascending=function(a,b){return a<b?-1:a>b?1:a>=b?0:NaN},d3.descending=function(a,b){return b<a?-1:b>a?1:b>=a?0:NaN},d3.min=function(a,b){var c=-1,d=a.length,e,f;if(arguments.length===1){while(++c<d&&((e=a[c])==null||e!=e))e=undefined;while(++c<d)(f=a[c])!=null&&e>f&&(e=f)}else{while(++c<d&&((e=b.call(a,a[c],c))==null||e!=e))e=undefined;while(++c<d)(f=b.call(a,a[c],c))!=null&&e>f&&(e=f)}return e},d3.max=function(a,b){var c=-1,d=a.length,e,f;if(arguments.length===1){while(++c<d&&((e=a[c])==null||e!=e))e=undefined;while(++c<d)(f=a[c])!=null&&f>e&&(e=f)}else{while(++c<d&&((e=b.call(a,a[c],c))==null||e!=e))e=undefined;while(++c<d)(f=b.call(a,a[c],c))!=null&&f>e&&(e=f)}return e},d3.sum=function(a,b){var c=0,d=a.length,e,f=-1;if(arguments.length===1)while(++f<d)isNaN(e=+a[f])||(c+=e);else while(++f<d)isNaN(e=+b.call(a,a[f],f))||(c+=e);return c},d3.quantile=function(a,b){var c=(a.length-1)*b+1,d=Math.floor(c),e=a[d-1],f=c-d;return f?e+f*(a[d]-e):e},d3.zip=function(){if(!(f=arguments.length))return[];for(var a=-1,b=d3.min(arguments,e),c=Array(b);++a<b;)for(var d=-1,f,g=c[a]=Array(f);++d<f;)g[d]=arguments[d][a];return c},d3.bisectLeft=function(a,b,c,d){arguments.length<3&&(c=0),arguments.length<4&&(d=a.length);while(c<d){var e=c+d>>1;a[e]<b?c=e+1:d=e}return c},d3.bisect=d3.bisectRight=function(a,b,c,d){arguments.length<3&&(c=0),arguments.length<4&&(d=a.length);while(c<d){var e=c+d>>1;b<a[e]?d=e:c=e+1}return c},d3.first=function(a,b){var c=0,d=a.length,e=a[0],f;arguments.length===1&&(b=d3.ascending);while(++c<d)b.call(a,e,f=a[c])>0&&(e=f);return e},d3.last=function(a,b){var c=0,d=a.length,e=a[0],f;arguments.length===1&&(b=d3.ascending);while(++c<d)b.call(a,e,f=a[c])<=0&&(e=f);return e},d3.nest=function(){function g(a,d){if(d>=b.length)return a;var e=[],f=c[d++],h;for(h in a)e.push({key:h,values:g(a[h],d)});f&&e.sort(function(a,b){return f(a.key,b.key)});return e}function f(c,g){if(g>=b.length)return e?e.call(a,c):d?c.sort(d):c;var h=-1,i=c.length,j=b[g++],k,l,m={};while(++h<i)(k=j(l=c[h]))in m?m[k].push(l):m[k]=[l];for(k in m)m[k]=f(m[k],g);return m}var a={},b=[],c=[],d,e;a.map=function(a){return f(a,0)},a.entries=function(a){return g(f(a,0),0)},a.key=function(c){b.push(c);return a},a.sortKeys=function(d){c[b.length-1]=d;return a},a.sortValues=function(b){d=b;return a},a.rollup=function(b){e=b;return a};return a},d3.keys=function(a){var b=[];for(var c in a)b.push(c);return b},d3.values=function(a){var b=[];for(var c in a)b.push(a[c]);return b},d3.entries=function(a){var b=[];for(var c in a)b.push({key:c,value:a[c]});return b},d3.permute=function(a,b){var c=[],d=-1,e=b.length;while(++d<e)c[d]=a[b[d]];return c},d3.merge=function(a){return Array.prototype.concat.apply([],a)},d3.split=function(a,b){var c=[],d=[],e,g=-1,h=a.length;arguments.length<2&&(b=f);while(++g<h)b.call(d,e=a[g],g)?d=[]:(d.length||c.push(d),d.push(e));return c},d3.range=function(a,b,c){arguments.length<3&&(c=1,arguments.length<2&&(b=a,a=0));if((b-a)/c==Infinity)throw new Error("infinite range");var d=[],e=-1,f;if(c<0)while((f=a+c*++e)>b)d.push(f);else while((f=a+c*++e)<b)d.push(f);return d},d3.requote=function(a){return a.replace(i,"\\$&")};var i=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g;d3.round=function(a,b){return b?Math.round(a*Math.pow(10,b))*Math.pow(10,-b):Math.round(a)},d3.xhr=function(a,b,c){var d=new XMLHttpRequest;arguments.length<3?c=b:b&&d.overrideMimeType&&d.overrideMimeType(b),d.open("GET",a,!0),d.onreadystatechange=function(){d.readyState===4&&c(d.status<300?d:null)},d.send(null)},d3.text=function(a,b,c){function d(a){c(a&&a.responseText)}arguments.length<3&&(c=b,b=null),d3.xhr(a,b,d)},d3.json=function(a,b){d3.text(a,"application/json",function(a){b(a?JSON.parse(a):null)})},d3.html=function(a,b){d3.text(a,"text/html",function(a){if(a!=null){var c=document.createRange();c.selectNode(document.body),a=c.createContextualFragment(a)}b(a)})},d3.xml=function(a,b,c){function d(a){c(a&&a.responseXML)}arguments.length<3&&(c=b,b=null),d3.xhr(a,b,d)},d3.ns={prefix:{svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},qualify:function(a){var b=a.indexOf(":");return b<0?a:{space:d3.ns.prefix[a.substring(0,b)],local:a.substring(b+1)}}},d3.dispatch=function(a){var b={},c;for(var d=0,e=arguments.length;d<e;d++)c=arguments[d],b[c]=j(c);return b},d3.format=function(a){var b=k.exec(a),c=b[1]||" ",d=b[3]||"",e=b[5],f=+b[6],g=b[7],h=b[8],i=b[9],j=!1,o=!1;h&&(h=h.substring(1)),e&&(c="0",g&&(f-=Math.floor((f-1)/4)));switch(i){case"n":g=!0,i="g";break;case"%":j=!0,i="f";break;case"p":j=!0,i="r";break;case"d":o=!0,h="0"}i=l[i]||m;return function(a){var b=j?a*100:+a,k=b<0&&(b=-b)?"−":d;if(o&&b%1)return"";a=i(b,h);if(e){var l=a.length+k.length;l<f&&(a=Array(f-l+1).join(c)+a),g&&(a=n(a)),a=k+a}else{g&&(a=n(a)),a=k+a;var l=a.length;l<f&&(a=Array(f-l+1).join(c)+a)}j&&(a+="%");return a}};var k=/(?:([^{])?([<>=^]))?([+\- ])?(#)?(0)?([0-9]+)?(,)?(\.[0-9]+)?([a-zA-Z%])?/,l={g:function(a,b){return a.toPrecision(b)},e:function(a,b){return a.toExponential(b)},f:function(a,b){return a.toFixed(b)},r:function(a,b){var c=1+Math.floor(1e-15+Math.log(a)/Math.LN10);return d3.round(a,b-c).toFixed(Math.max(0,Math.min(20,b-c)))}},o=v(2),p=v(3),q={linear:function(){return u},poly:v,quad:function(){return o},cubic:function(){return p},sin:function(){return w},exp:function(){return x},circle:function(){return y},elastic:z,back:A,bounce:function(){return B}},r={"in":function(a){return a},out:s,"in-out":t,"out-in":function(a){return t(s(a))}};d3.ease=function(a){var b=a.indexOf("-"),c=b>=0?a.substring(0,b):a,d=b>=0?a.substring(b+1):"in";return r[d](q[c].apply(null,Array.prototype.slice.call(arguments,1)))},d3.event=null,d3.interpolate=function(a,b){var c=d3.interpolators.length,d;while(--c>=0&&!(d=d3.interpolators[c](a,b)));return d},d3.interpolateNumber=function(a,b){b-=a;return function(c){return a+b*c}},d3.interpolateRound=function(a,b){b-=a;return function(c){return Math.round(a+b*c)}},d3.interpolateString=function(a,b){var c,d,e,f=0,g=0,h=[],i=[],j,k;C.lastIndex=0;for(d=0;c=C.exec(b);++d)c.index&&h.push(b.substring(f,g=c.index)),i.push({i:h.length,x:c[0]}),h.push(null),f=C.lastIndex;f<b.length&&h.push(b.substring(f));for(d=0,j=i.length;(c=C.exec(a))&&d<j;++d){k=i[d];if(k.x==c[0]){if(k.i)if(h[k.i+1]==null){h[k.i-1]+=k.x,h.splice(k.i,1);for(e=d+1;e<j;++e)i[e].i--}else{h[k.i-1]+=k.x+h[k.i+1],h.splice(k.i,2);for(e=d+1;e<j;++e)i[e].i-=2}else if(h[k.i+1]==null)h[k.i]=k.x;else{h[k.i]=k.x+h[k.i+1],h.splice(k.i+1,1);for(e=d+1;e<j;++e)i[e].i--}i.splice(d,1),j--,d--}else k.x=d3.interpolateNumber(parseFloat(c[0]),parseFloat(k.x))}while(d<j)k=i.pop(),h[k.i+1]==null?h[k.i]=k.x:(h[k.i]=k.x+h[k.i+1],h.splice(k.i+1,1)),j--;return h.length===1?h[0]==null?i[0].x:function(){return b}:function(a){for(d=0;d<j;++d)h[(k=i[d]).i]=k.x(a);return h.join("")}},d3.interpolateRgb=function(a,b){a=d3.rgb(a),b=d3.rgb(b);var c=a.r,d=a.g,e=a.b,f=b.r-c,g=b.g-d,h=b.b-e;return function(a){return"rgb("+Math.round(c+f*a)+","+Math.round(d+g*a)+","+Math.round(e+h*a)+")"}},d3.interpolateHsl=function(a,b){a=d3.hsl(a),b=d3.hsl(b);var c=a.h,d=a.s,e=a.l,f=b.h-c,g=b.s-d,h=b.l-e;return function(a){return R(c+f*a,d+g*a,e+h*a).toString()}},d3.interpolateArray=function(a,b){var c=[],d=[],e=a.length,f=b.length,g=Math.min(a.length,b.length),h;for(h=0;h<g;++h)c.push(d3.interpolate(a[h],b[h]));for(;h<e;++h)d[h]=a[h];for(;h<f;++h)d[h]=b[h];return function(a){for(h=0;h<g;++h)d[h]=c[h](a);return d}},d3.interpolateObject=function(a,b){var c={},d={},e;for(e in a)e in b?c[e]=E(e)(a[e],b[e]):d[e]=a[e];for(e in b)e in a||(d[e]=b[e]);return function(a){for(e in c)d[e]=c[e](a);return d}};var C=/[-+]?(?:\d+\.\d+|\d+\.|\.\d+|\d+)(?:[eE][-]?\d+)?/g,D={background:1,fill:1,stroke:1};d3.interpolators=[d3.interpolateObject,function(a,b){return b instanceof Array&&d3.interpolateArray(a,b)},function(a,b){return typeof b=="string"&&d3.interpolateString(String(a),b)},function(a,b){return(typeof b=="string"?b in N||/^(#|rgb\(|hsl\()/.test(b):b instanceof I||b instanceof Q)&&d3.interpolateRgb(String(a),b)},function(a,b){return typeof b=="number"&&d3.interpolateNumber(+a,b)}],d3.rgb=function(a,b,c){return arguments.length===1?K(""+a,H,R):H(~~a,~~b,~~c)},I.prototype.brighter=function(a){a=Math.pow(.7,arguments.length?a:1);var b=this.r,c=this.g,d=this.b,e=30;if(!b&&!c&&!d)return H(e,e,e);b&&b<e&&(b=e),c&&c<e&&(c=e),d&&d<e&&(d=e);return H(Math.min(255,Math.floor(b/a)),Math.min(255,Math.floor(c/a)),Math.min(255,Math.floor(d/a)))},I.prototype.darker=function(a){a=Math.pow(.7,arguments.length?a:1);return H(Math.max(0,Math.floor(a*this.r)),Math.max(0,Math.floor(a*this.g)),Math.max(0,Math.floor(a*this.b)))},I.prototype.hsl=function(){return L(this.r,this.g,this.b)},I.prototype.toString=function(){return"#"+J(this.r)+J(this.g)+J(this.b)};var N={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"};for(var O in N)N[O]=K(N[O],H,R);d3.hsl=function(a,b,c){return arguments.length===1?K(""+a,L,P):P(+a,+b,+c)},Q.prototype.brighter=function(a){a=Math.pow(.7,arguments.length?a:1);return P(this.h,this.s,this.l/a)},Q.prototype.darker=function(a){a=Math.pow(.7,arguments.length?a:1);return P(this.h,this.s,a*this.l)},Q.prototype.rgb=function(){return R(this.h,this.s,this.l)},Q.prototype.toString=function(){return"hsl("+this.h+","+this.s*100+"%,"+this.l*100+"%)"};var S=function(a,b){return b.querySelector(a)},T=function(b,c){return a(c.querySelectorAll(b))};typeof Sizzle=="function"&&(S=function(a,b){return Sizzle(a,b)[0]},T=function(a,b){return Sizzle.uniqueSort(Sizzle(a,b))});var U=V([[document]]);U[0].parentNode=document.documentElement,d3.select=function(a){return typeof a=="string"?U.select(a):V([[a]])},d3.selectAll=function(b){return typeof b=="string"?U.selectAll(b):V([a(b)])},d3.transition=U.transition;var Z=0,$=0
<add>,bb=null,bc,bd;d3.timer=function(a,b){var c=Date.now(),d=!1,e,f=bb;if(arguments.length<2)b=0;else if(!isFinite(b))return;while(f){if(f.callback===a){f.then=c,f.delay=b,d=!0;break}e=f,f=f.next}d||(bb={callback:a,then:c,delay:b,next:bb}),bc||(bd=clearTimeout(bd),bc=1,bg(be))},d3.timer.flush=function(){var a,b=Date.now(),c=bb;while(c)a=b-c.then,c.delay||(c.flush=c.callback(a)),c=c.next;bf()};var bg=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(a){setTimeout(a,17)};d3.scale={},d3.scale.linear=function(){function h(a){return e(a)}function g(){var g=a.length==2?bq:br,i=d?G:F;e=g(a,b,i,c),f=g(b,a,i,d3.interpolate);return h}var a=[0,1],b=[0,1],c=d3.interpolate,d=!1,e,f;h.invert=function(a){return f(a)},h.domain=function(b){if(!arguments.length)return a;a=b.map(Number);return g()},h.range=function(a){if(!arguments.length)return b;b=a;return g()},h.rangeRound=function(a){return h.range(a).interpolate(d3.interpolateRound)},h.clamp=function(a){if(!arguments.length)return d;d=a;return g()},h.interpolate=function(a){if(!arguments.length)return c;c=a;return g()},h.ticks=function(b){return bo(a,b)},h.tickFormat=function(b){return bp(a,b)},h.nice=function(){bj(a,bm);return g()};return g()},d3.scale.log=function(){function d(c){return a(b(c))}var a=d3.scale.linear(),b=bs,c=b.pow;d.invert=function(b){return c(a.invert(b))},d.domain=function(e){if(!arguments.length)return a.domain().map(c);b=e[0]<0?bt:bs,c=b.pow,a.domain(e.map(b));return d},d.nice=function(){a.domain(bj(a.domain(),bk));return d},d.ticks=function(){var d=bi(a.domain()),e=[];if(d.every(isFinite)){var f=Math.floor(d[0]),g=Math.ceil(d[1]),h=c(d[0]),i=c(d[1]);if(b===bt){e.push(c(f));for(;f++<g;)for(var j=9;j>0;j--)e.push(c(f)*j)}else{for(;f<g;f++)for(var j=1;j<10;j++)e.push(c(f)*j);e.push(c(f))}for(f=0;e[f]<h;f++);for(g=e.length;e[g-1]>i;g--);e=e.slice(f,g)}return e},d.tickFormat=function(){return bu};return bl(d,a)},bs.pow=function(a){return Math.pow(10,a)},bt.pow=function(a){return-Math.pow(10,-a)},d3.scale.pow=function(){function e(b){return a(c(b))}var a=d3.scale.linear(),b=1,c=Number,d=c;e.invert=function(b){return d(a.invert(b))},e.domain=function(f){if(!arguments.length)return a.domain().map(d);c=bv(b),d=bv(1/b),a.domain(f.map(c));return e},e.ticks=function(a){return bo(e.domain(),a)},e.tickFormat=function(a){return bp(e.domain(),a)},e.nice=function(){return e.domain(bj(e.domain(),bm))},e.exponent=function(a){if(!arguments.length)return b;var c=e.domain();b=a;return e.domain(c)};return bl(e,a)},d3.scale.sqrt=function(){return d3.scale.pow().exponent(.5)},d3.scale.ordinal=function(){function f(d){var e=d in b?b[d]:b[d]=a.push(d)-1;return c[e%c.length]}var a=[],b={},c=[],d=0,e=bh;f.domain=function(c){if(!arguments.length)return a;a=c,b={};var d=-1,g=-1,h=a.length;while(++d<h)c=a[d],c in b||(b[c]=++g);e();return f},f.range=function(a){if(!arguments.length)return c;c=a,e=bh;return f},f.rangePoints=function(b,g){arguments.length<2&&(g=0),(e=function(){var e=b[0],f=b[1],h=(f-e)/(a.length-1+g);c=a.length==1?[(e+f)/2]:d3.range(e+h*g/2,f+h/2,h),d=0})();return f},f.rangeBands=function(b,g){arguments.length<2&&(g=0),(e=function(){var e=b[0],f=b[1],h=(f-e)/(a.length+g);c=d3.range(e+h*g,f,h),d=h*(1-g)})();return f},f.rangeRoundBands=function(b,g){arguments.length<2&&(g=0),(e=function(){var e=b[0],f=b[1],h=f-e,i=Math.floor(h/(a.length+g)),j=h-(a.length-g)*i;c=d3.range(e+Math.round(j/2),f,i),d=Math.round(i*(1-g))})();return f},f.rangeBand=function(){return d};return f},d3.scale.category10=function(){return d3.scale.ordinal().range(bw)},d3.scale.category20=function(){return d3.scale.ordinal().range(bx)},d3.scale.category20b=function(){return d3.scale.ordinal().range(by)},d3.scale.category20c=function(){return d3.scale.ordinal().range(bz)};var bw=["#1f77b4","#ff7f0e","#2ca02c","#d62728","#9467bd","#8c564b","#e377c2","#7f7f7f","#bcbd22","#17becf"],bx=["#1f77b4","#aec7e8","#ff7f0e","#ffbb78","#2ca02c","#98df8a","#d62728","#ff9896","#9467bd","#c5b0d5","#8c564b","#c49c94","#e377c2","#f7b6d2","#7f7f7f","#c7c7c7","#bcbd22","#dbdb8d","#17becf","#9edae5"],by=["#393b79","#5254a3","#6b6ecf","#9c9ede","#637939","#8ca252","#b5cf6b","#cedb9c","#8c6d31","#bd9e39","#e7ba52","#e7cb94","#843c39","#ad494a","#d6616b","#e7969c","#7b4173","#a55194","#ce6dbd","#de9ed6"],bz=["#3182bd","#6baed6","#9ecae1","#c6dbef","#e6550d","#fd8d3c","#fdae6b","#fdd0a2","#31a354","#74c476","#a1d99b","#c7e9c0","#756bb1","#9e9ac8","#bcbddc","#dadaeb","#636363","#969696","#bdbdbd","#d9d9d9"];d3.scale.quantile=function(){function e(a){return isNaN(a=+a)?NaN:b[d3.bisect(c,a)]}function d(){var d=0,e=a.length,f=b.length;c.length=Math.max(0,f-1);while(++d<f)c[d-1]=d3.quantile(a,d/f)}var a=[],b=[],c=[];e.domain=function(b){if(!arguments.length)return a;a=b.filter(function(a){return!isNaN(a)}).sort(d3.ascending),d();return e},e.range=function(a){if(!arguments.length)return b;b=a,d();return e},e.quantiles=function(){return c};return e},d3.scale.quantize=function(){function f(b){return e[Math.max(0,Math.min(d,Math.floor(c*(b-a))))]}var a=0,b=1,c=2,d=1,e=[0,1];f.domain=function(d){if(!arguments.length)return[a,b];a=+d[0],b=+d[d.length-1],c=e.length/(b-a);return f},f.range=function(g){if(!arguments.length)return e;e=g,c=e.length/(b-a),d=e.length-1;return f};return f},d3.svg={},d3.svg.arc=function(){function e(){var e=a.apply(this,arguments),f=b.apply(this,arguments),g=c.apply(this,arguments)+bA,h=d.apply(this,arguments)+bA,i=(h<g&&(i=g,g=h,h=i),h-g),j=i<Math.PI?"0":"1",k=Math.cos(g),l=Math.sin(g),m=Math.cos(h),n=Math.sin(h);return i>=bB?e?"M0,"+f+"A"+f+","+f+" 0 1,1 0,"+ -f+"A"+f+","+f+" 0 1,1 0,"+f+"M0,"+e+"A"+e+","+e+" 0 1,1 0,"+ -e+"A"+e+","+e+" 0 1,1 0,"+e+"Z":"M0,"+f+"A"+f+","+f+" 0 1,1 0,"+ -f+"A"+f+","+f+" 0 1,1 0,"+f+"Z":e?"M"+f*k+","+f*l+"A"+f+","+f+" 0 "+j+",1 "+f*m+","+f*n+"L"+e*m+","+e*n+"A"+e+","+e+" 0 "+j+",0 "+e*k+","+e*l+"Z":"M"+f*k+","+f*l+"A"+f+","+f+" 0 "+j+",1 "+f*m+","+f*n+"L0,0"+"Z"}var a=bC,b=bD,c=bE,d=bF;e.innerRadius=function(b){if(!arguments.length)return a;a=d3.functor(b);return e},e.outerRadius=function(a){if(!arguments.length)return b;b=d3.functor(a);return e},e.startAngle=function(a){if(!arguments.length)return c;c=d3.functor(a);return e},e.endAngle=function(a){if(!arguments.length)return d;d=d3.functor(a);return e},e.centroid=function(){var e=(a.apply(this,arguments)+b.apply(this,arguments))/2,f=(c.apply(this,arguments)+d.apply(this,arguments))/2+bA;return[Math.cos(f)*e,Math.sin(f)*e]};return e};var bA=-Math.PI/2,bB=2*Math.PI-1e-6;d3.svg.line=function(){return bG(Object)};var bK={linear:bL,"step-before":bM,"step-after":bN,basis:bT,"basis-open":bU,"basis-closed":bV,bundle:bW,cardinal:bQ,"cardinal-open":bO,"cardinal-closed":bP,monotone:cd},bY=[0,2/3,1/3,0],bZ=[0,1/3,2/3,0],b$=[0,1/6,2/3,1/6];d3.svg.line.radial=function(){var a=bG(ce);a.radius=a.x,delete a.x,a.angle=a.y,delete a.y;return a},d3.svg.area=function(){return cf(Object)},d3.svg.area.radial=function(){var a=cf(ce);a.radius=a.x,delete a.x,a.innerRadius=a.x0,delete a.x0,a.outerRadius=a.x1,delete a.x1,a.angle=a.y,delete a.y,a.startAngle=a.y0,delete a.y0,a.endAngle=a.y1,delete a.y1;return a},d3.svg.chord=function(){function j(a,b,c,d){return"Q 0,0 "+d}function i(a,b){return"A"+a+","+a+" 0 0,1 "+b}function h(a,b){return a.a0==b.a0&&a.a1==b.a1}function g(a,b,f,g){var h=b.call(a,f,g),i=c.call(a,h,g),j=d.call(a,h,g)+bA,k=e.call(a,h,g)+bA;return{r:i,a0:j,a1:k,p0:[i*Math.cos(j),i*Math.sin(j)],p1:[i*Math.cos(k),i*Math.sin(k)]}}function f(c,d){var e=g(this,a,c,d),f=g(this,b,c,d);return"M"+e.p0+i(e.r,e.p1)+(h(e,f)?j(e.r,e.p1,e.r,e.p0):j(e.r,e.p1,f.r,f.p0)+i(f.r,f.p1)+j(f.r,f.p1,e.r,e.p0))+"Z"}var a=ci,b=cj,c=ck,d=bE,e=bF;f.radius=function(a){if(!arguments.length)return c;c=d3.functor(a);return f},f.source=function(b){if(!arguments.length)return a;a=d3.functor(b);return f},f.target=function(a){if(!arguments.length)return b;b=d3.functor(a);return f},f.startAngle=function(a){if(!arguments.length)return d;d=d3.functor(a);return f},f.endAngle=function(a){if(!arguments.length)return e;e=d3.functor(a);return f};return f},d3.svg.diagonal=function(){function d(d,e){var f=a.call(this,d,e),g=b.call(this,d,e),h=(f.y+g.y)/2,i=[f,{x:f.x,y:h},{x:g.x,y:h},g];i=i.map(c);return"M"+i[0]+"C"+i[1]+" "+i[2]+" "+i[3]}var a=ci,b=cj,c=cn;d.source=function(b){if(!arguments.length)return a;a=d3.functor(b);return d},d.target=function(a){if(!arguments.length)return b;b=d3.functor(a);return d},d.projection=function(a){if(!arguments.length)return c;c=a;return d};return d},d3.svg.diagonal.radial=function(){var a=d3.svg.diagonal(),b=cn,c=a.projection;a.projection=function(a){return arguments.length?c(co(b=a)):b};return a},d3.svg.mouse=function(a){return cq(a,d3.event)};var cp=/WebKit/.test(navigator.userAgent)?-1:0;d3.svg.touches=function(b){var c=d3.event.touches;return c?a(c).map(function(a){var c=cq(b,a);c.identifier=a.identifier;return c}):[]},d3.svg.symbol=function(){function c(c,d){return(ct[a.call(this,c,d)]||ct.circle)(b.call(this,c,d))}var a=cs,b=cr;c.type=function(b){if(!arguments.length)return a;a=d3.functor(b);return c},c.size=function(a){if(!arguments.length)return b;b=d3.functor(a);return c};return c};var ct={circle:function(a){var b=Math.sqrt(a/Math.PI);return"M0,"+b+"A"+b+","+b+" 0 1,1 0,"+ -b+"A"+b+","+b+" 0 1,1 0,"+b+"Z"},cross:function(a){var b=Math.sqrt(a/5)/2;return"M"+ -3*b+","+ -b+"H"+ -b+"V"+ -3*b+"H"+b+"V"+ -b+"H"+3*b+"V"+b+"H"+b+"V"+3*b+"H"+ -b+"V"+b+"H"+ -3*b+"Z"},diamond:function(a){var b=Math.sqrt(a/(2*cv)),c=b*cv;return"M0,"+ -b+"L"+c+",0"+" 0,"+b+" "+ -c+",0"+"Z"},square:function(a){var b=Math.sqrt(a)/2;return"M"+ -b+","+ -b+"L"+b+","+ -b+" "+b+","+b+" "+ -b+","+b+"Z"},"triangle-down":function(a){var b=Math.sqrt(a/cu),c=b*cu/2;return"M0,"+c+"L"+b+","+ -c+" "+ -b+","+ -c+"Z"},"triangle-up":function(a){var b=Math.sqrt(a/cu),c=b*cu/2;return"M0,"+ -c+"L"+b+","+c+" "+ -b+","+c+"Z"}};d3.svg.symbolTypes=d3.keys(ct);var cu=Math.sqrt(3),cv=Math.tan(30*Math.PI/180)})()
<ide>\ No newline at end of file
<ide><path>src/core/format.js
<ide> var d3_format_types = {
<ide> f: function(x, p) { return x.toFixed(p); },
<ide> r: function(x, p) {
<ide> var n = 1 + Math.floor(1e-15 + Math.log(x) / Math.LN10);
<del> return d3.round(x, p - n).toFixed(Math.max(0, p - n));
<add> return d3.round(x, p - n).toFixed(Math.max(0, Math.min(20, p - n)));
<ide> }
<ide> };
<ide>
<ide><path>test/core/format-test.js
<ide> suite.addBatch({
<ide> assert.strictEqual(format(".5r")(123.45), "123.45");
<ide> assert.strictEqual(format(".6r")(123.45), "123.450");
<ide> },
<add> "can round very small numbers": function(format) {
<add> var f = format(".2r");
<add> assert.strictEqual(f(1e-22), "0.00000000000000000000");
<add> },
<ide> "can group thousands": function(format) {
<ide> var f = format(",d");
<ide> assert.strictEqual(f(0), "0"); | 4 |
Text | Text | add test to return early pattern | f7578406249329889ce45472ced16833db1c59e6 | <ide><path>curriculum/challenges/english/02-javascript-algorithms-and-data-structures/basic-javascript/return-early-pattern-for-functions.english.md
<ide> tests:
<ide> testString: assert(abTest(2,8) === 18 );
<ide> - text: <code>abTest(3,3)</code> should return <code>12</code>
<ide> testString: assert(abTest(3,3) === 12 );
<del>
<add> - text: <code>abTest(0,0)</code> should return <code>0</code>
<add> testString: assert(abTest(0,0) === 0);
<add>
<ide> ```
<ide>
<ide> </section> | 1 |
Text | Text | add roadmap, i18n, tracing, evangelism wgs | ae7a23351f7e5b06ab88b7ea9c18d7c5b2e287e9 | <ide><path>WORKING_GROUPS.md
<ide> Its responsibilities are:
<ide> The current members can be found in their
<ide> [README](https://github.com/iojs/build#people).
<ide>
<add>### Tracing
<add>
<add>The tracing working group's purpose is to increase the
<add>transparency of software written in io.js.
<add>
<add>Its responsibilities are:
<add>* Collaboration with V8 to integrate with `trace_event`.
<add>* Maintinence and iteration on AsyncWrap.
<add>* Maintinence and improvements to system tracing support (DTrace, LTTng, etc).
<add>* Documention of tracing and debugging techniques.
<add>* Fostering a tracing and debugging ecosystem.
<add>
<add>The current members can be found in their
<add>[README](https://github.com/iojs/tracing-wg#people).
<add>
<add>### i18n
<add>
<add>The i18n working groups handle more than just translations. They
<add>are endpoints for community members to collaborate with each
<add>other in their language of choice.
<add>
<add>Each team is organized around a common spoken language. Each
<add>language community might then produce multiple localizations for
<add>various project resources.
<add>
<add>Their responsibilities are:
<add>* Translation of any io.js materials they believe are relevant to their
<add>community.
<add>* Review processes for keeping translations up
<add>to date and of high quality.
<add>* Social media channels in their language.
<add>* Promotion of io.js speakers for meetups and conferences in their
<add>language.
<add>
<add>Membership is maintained by each language community.
<add>
<add>* [iojs-bg](http://github.com/iojs/iojs-bg)
<add>* [iojs-bn](http://github.com/iojs/iojs-bn)
<add>* [iojs-cn](http://github.com/iojs/iojs-cn)
<add>* [iojs-cs](http://github.com/iojs/iojs-cs)
<add>* [iojs-da](http://github.com/iojs/iojs-da)
<add>* [iojs-de](http://github.com/iojs/iojs-de)
<add>* [iojs-el](http://github.com/iojs/iojs-el)
<add>* [iojs-es](http://github.com/iojs/iojs-es)
<add>* [iojs-fa](http://github.com/iojs/iojs-fa)
<add>* [iojs-fi](http://github.com/iojs/iojs-fi)
<add>* [iojs-fr](http://github.com/iojs/iojs-fr)
<add>* [iojs-he](http://github.com/iojs/iojs-he)
<add>* [iojs-hi](http://github.com/iojs/iojs-hi)
<add>* [iojs-hu](http://github.com/iojs/iojs-hu)
<add>* [iojs-id](http://github.com/iojs/iojs-id)
<add>* [iojs-it](http://github.com/iojs/iojs-it)
<add>* [iojs-ja](http://github.com/iojs/iojs-ja)
<add>* [iojs-ka](http://github.com/iojs/iojs-ka)
<add>* [iojs-ko](http://github.com/iojs/iojs-ko)
<add>* [iojs-mk](http://github.com/iojs/iojs-mk)
<add>* [iojs-nl](http://github.com/iojs/iojs-nl)
<add>* [iojs-no](http://github.com/iojs/iojs-no)
<add>* [iojs-pl](http://github.com/iojs/iojs-pl)
<add>* [iojs-pt](http://github.com/iojs/iojs-pt)
<add>* [iojs-ro](http://github.com/iojs/iojs-ro)
<add>* [iojs-ru](http://github.com/iojs/iojs-ru)
<add>* [iojs-sv](http://github.com/iojs/iojs-sv)
<add>* [iojs-ta](http://github.com/iojs/iojs-ta)
<add>* [iojs-tr](http://github.com/iojs/iojs-tr)
<add>* [iojs-tw](http://github.com/iojs/iojs-tw)
<add>* [iojs-uk](http://github.com/iojs/iojs-uk)
<add>* [iojs-vi](http://github.com/iojs/iojs-vi)
<add>
<add>### Evangelism
<add>
<add>The evangelism working group promotes the accomplishments
<add>of io.js and lets the community know how they can get involved.
<add>
<add>Their responsibilities are:
<add>* Project messaging.
<add>* Official project social media.
<add>* Promotion of speakers for meetups and conferences.
<add>* Promotion of community events.
<add>* Publishing regular update summaries and other promotional
<add>content.
<add>
<add>The current members can be found in their
<add>[README](https://github.com/iojs/evangelism#people).
<add>
<add>### Roadmap
<add>
<add>The roadmap working group is responsible for user community outreach
<add>and the translation of their concerns into a plan of action for io.js.
<add>
<add>The final [ROADMAP](./ROADMAP.md) document is still owned by the TC and requires
<add>the same approval for changes as any other project asset.
<add>
<add>Their responsibilities are:
<add>* Attract and summarize user community needs and feedback.
<add>* Find or potentially create tools that allow for broader participation.
<add>* Create Pull Requests for relevant changes to [Roadmap.md](./ROADMAP.md)
<add>
<add>The current members can be found in their
<add>[README](https://github.com/iojs/roadmap#people).
<add>
<ide> ## Starting a WG
<ide>
<ide> A Working Group is established by first defining a charter that can be | 1 |
PHP | PHP | move debug methods to the base query builder | a43424de299ec02e9e08a05c78d460ec1d054cd3 | <ide><path>src/Illuminate/Database/Eloquent/Builder.php
<ide> public function getMacro($name)
<ide> return Arr::get($this->localMacros, $name);
<ide> }
<ide>
<del> /**
<del> * Debug the current query builder instance.
<del> *
<del> * @return void
<del> */
<del> public function dd()
<del> {
<del> dd($this->toSql(), $this->getBindings());
<del> }
<del>
<ide> /**
<ide> * Dynamically access builder proxies.
<ide> *
<ide><path>src/Illuminate/Database/Query/Builder.php
<ide> public function __call($method, $parameters)
<ide>
<ide> static::throwBadMethodCallException($method);
<ide> }
<add>
<add> /**
<add> * Debug the current query builder instance.
<add> *
<add> * @return void
<add> */
<add> public function dump()
<add> {
<add> dump($this->toSql(), $this->getBindings());
<add> }
<add>
<add> /**
<add> * Debug the current query builder instance.
<add> *
<add> * @return void
<add> */
<add> public function dd()
<add> {
<add> dd($this->toSql(), $this->getBindings());
<add> }
<ide> } | 2 |
Python | Python | add unit tests | e1fa3d72e4a2569691aeb9f9003bdac52de7ce3e | <ide><path>numpy/core/tests/test_ufunc.py
<ide> def __rmul__(self, other):
<ide> assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
<ide> assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
<ide>
<add> def test_inplace_fancy_indexing(self):
<add> a = np.array([1, 2, 3])
<add> np.negative.at(a, [0, 0, 1, 2])
<add> assert_equal(a, [1, -2, -3])
<add>
<add> a = np.array([1, 2, 3])
<add> np.add.at(a, [0, 1, 1, 2], 1)
<add> assert_equal(a, [2, 4, 4])
<add>
<add> a = np.array([1, 2, 3])
<add> np.add.at(a, [0, 1, 2, 2], np.array([1, 2, 3]))
<add> assert_equal(a, [2, 4, 9])
<add>
<add>
<ide> if __name__ == "__main__":
<ide> run_module_suite() | 1 |
Python | Python | fix lint issue | c778f1d42816575198759eb97a33c63855269971 | <ide><path>docs/examples/compute/dimensiondata/Nodes_Create_mcp2_Uncustomised.py
<ide> # Get dimension data driver
<ide> libcloud.security.VERIFY_SSL_CERT = True
<ide> cls = get_driver(Provider.DIMENSIONDATA)
<del>driver = cls('myusername','mypassword', region='dd-au')
<add>driver = cls('myusername', 'mypassword', region='dd-au')
<ide>
<ide> # Get location
<ide> location = driver.ex_get_location_by_id(id='AU9')
<ide>
<ide> # Get network domain by location
<ide> networkDomainName = "Test Apache Libcloud"
<ide> network_domains = driver.ex_list_network_domains(location=location)
<del>my_network_domain = [d for d in network_domains if d.name == networkDomainName][0]
<add>my_network_domain = [d for d in network_domains if d.name ==
<add> networkDomainName][0]
<ide>
<ide> vlan = driver.ex_list_vlans(name='Libcloud Test VLAN')[0]
<ide> | 1 |
Python | Python | update code after code review on pr by @kami | f0cc015f437d5119e223c47cd09566735c6dd362 | <ide><path>docs/examples/backup/create_backup_from_node.py
<ide> import time
<ide> from pprint import pprint
<ide>
<del>from libcloud.backup.types import Provider, BackupTargetJobStatusType
<add>from libcloud.backup.types import BackupTargetJobStatusType
<add>from libcloud.backup.types import Provider as BackupProvider
<ide> from libcloud.backup.providers import get_driver as get_backup_driver
<ide>
<ide> from libcloud.compute.providers import get_driver as get_compute_driver
<add>from libcloud.compute.types import Provider as ComputeProvider
<ide>
<del>backup_driver = get_backup_driver(Provider.AZURE)('username', 'api key')
<del>compute_driver = get_compute_driver(Provider.AZURE)('username', 'api key')
<add>backup_driver = get_backup_driver(BackupProvider.AZURE)('username', 'api key')
<add>compute_driver = get_compute_driver(ComputeProvider.AZURE)('username', 'api key')
<ide>
<ide> nodes = compute_driver.list_nodes()
<ide>
<ide> else:
<ide> job = backup_driver.get_target_job(job.id)
<ide>
<del> print('Job is now at %s percent complete' % job.progress)
<add> print('Job is now at %s percent complete' % (job.progress))
<ide> time.sleep(20)
<ide>
<del>print('Job is completed with status- %s' % job.status)
<add>print('Job is completed with status- %s' % (job.status))
<ide>
<ide> print('Getting a list of recovery points')
<ide> recovery_points = backup_driver.list_recovery_points(new_target)
<ide><path>libcloud/backup/base.py
<ide> def __init__(self, id, name, address, type, driver, extra=None):
<ide> :type address: ``str``
<ide>
<ide> :param type: Backup target type (Physical, Virtual, ...).
<del> :type type: :class:`BackupTargetType`
<add> :type type: :class:`.BackupTargetType`
<ide>
<ide> :param driver: BackupDriver instance.
<del> :type driver: :class:`BackupDriver`
<add> :type driver: :class:`.BackupDriver`
<ide>
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide> def __init__(self, id, status, progress, target, driver, extra=None):
<ide> :type progress: ``int``
<ide>
<ide> :param target: BackupTarget instance.
<del> :type target: :class:`BackupTarget`
<add> :type target: :class:`.BackupTarget`
<ide>
<ide> :param driver: BackupDriver instance.
<del> :type driver: :class:`BackupDriver`
<add> :type driver: :class:`.BackupDriver`
<ide>
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide> def __init__(self, id, date, target, driver, extra=None):
<ide> :type date: :class:`datetime.datetime`
<ide>
<ide> :param target: BackupTarget instance.
<del> :type target: :class:`BackupTarget`
<add> :type target: :class:`.BackupTarget`
<ide>
<ide> :param driver: BackupDriver instance.
<del> :type driver: :class:`BackupDriver`
<add> :type driver: :class:`.BackupDriver`
<ide>
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide> def recover(self, path=None):
<ide> :param path: The part of the recovery point to recover (optional)
<ide> :type path: ``str``
<ide>
<del> :rtype: Instance of :class:`BackupTargetJob`
<add> :rtype: Instance of :class:`.BackupTargetJob`
<ide> """
<ide> return self.driver.recover_target(target=self.target,
<ide> recovery_point=self, path=path)
<ide> def recover_to(self, recovery_target, path=None):
<ide> Recover this recovery point out of place
<ide>
<ide> :param recovery_target: Backup target with to recover the data to
<del> :type recovery_target: Instance of :class:`BackupTarget`
<add> :type recovery_target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param path: The part of the recovery point to recover (optional)
<ide> :type path: ``str``
<ide>
<del> :rtype: Instance of :class:`BackupTargetJob`
<add> :rtype: Instance of :class:`.BackupTargetJob`
<ide> """
<ide> return self.driver.recover_target_out_of_place(
<ide> target=self.target,
<ide> def list_targets(self):
<ide> """
<ide> List all backuptargets
<ide>
<del> :rtype: ``list`` of :class:`BackupTarget`
<add> :rtype: ``list`` of :class:`.BackupTarget`
<ide> """
<ide> raise NotImplementedError(
<ide> 'list_targets not implemented for this driver')
<ide> def create_target(self, name, address,
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide>
<del> :rtype: Instance of :class:`BackupTarget`
<add> :rtype: Instance of :class:`.BackupTarget`
<ide> """
<ide> raise NotImplementedError(
<ide> 'create_target not implemented for this driver')
<ide>
<ide> def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL,
<ide> extra=None):
<ide> """
<del> Creates a new backup target from an existing node
<add> Creates a new backup target from an existing node.
<add> By default, this will use the first public IP of the node
<ide>
<ide> :param node: The Node to backup
<ide> :type node: ``Node``
<ide> def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL,
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide>
<del> :rtype: Instance of :class:`BackupTarget`
<add> :rtype: Instance of :class:`.BackupTarget`
<ide> """
<ide> return self.create_target(name=node.name,
<ide> address=node.public_ips[0],
<ide> type=type,
<ide> extra=None)
<ide>
<del> def create_target_from_container(self, container,
<add> def create_target_from_storage_container(self, container,
<ide> type=BackupTargetType.OBJECT,
<ide> extra=None):
<ide> """
<ide> def create_target_from_container(self, container,
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide>
<del> :rtype: Instance of :class:`BackupTarget`
<add> :rtype: Instance of :class:`.BackupTarget`
<ide> """
<ide> return self.create_target(name=container.name,
<ide> address=container.get_cdn_url(),
<ide> def update_target(self, target, name, address, extra):
<ide> Update the properties of a backup target
<ide>
<ide> :param target: Backup target to update
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param name: Name of the target
<ide> :type name: ``str``
<ide> def update_target(self, target, name, address, extra):
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide>
<del> :rtype: Instance of :class:`BackupTarget`
<add> :rtype: Instance of :class:`.BackupTarget`
<ide> """
<ide> raise NotImplementedError(
<ide> 'update_target not implemented for this driver')
<ide> def delete_target(self, target):
<ide> Delete a backup target
<ide>
<ide> :param target: Backup target to delete
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide> """
<ide> raise NotImplementedError(
<ide> 'delete_target not implemented for this driver')
<ide> def list_recovery_points(self, target, start_date=None, end_date=None):
<ide> List the recovery points available for a target
<ide>
<ide> :param target: Backup target to delete
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param start_date: The start date to show jobs between (optional)
<ide> :type start_date: :class:`datetime.datetime`
<ide>
<ide> :param end_date: The end date to show jobs between (optional)
<ide> :type end_date: :class:`datetime.datetime``
<ide>
<del> :rtype: ``list`` of :class:`BackupTargetRecoveryPoint`
<add> :rtype: ``list`` of :class:`.BackupTargetRecoveryPoint`
<ide> """
<ide> raise NotImplementedError(
<ide> 'list_recovery_points not implemented for this driver')
<ide> def recover_target(self, target, recovery_point, path=None):
<ide> Recover a backup target to a recovery point
<ide>
<ide> :param target: Backup target to delete
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param recovery_point: Backup target with the backup data
<del> :type recovery_point: Instance of :class:`BackupTarget`
<add> :type recovery_point: Instance of :class:`.BackupTarget`
<ide>
<ide> :param path: The part of the recovery point to recover (optional)
<ide> :type path: ``str``
<ide>
<del> :rtype: Instance of :class:`BackupTargetJob`
<add> :rtype: Instance of :class:`.BackupTargetJob`
<ide> """
<ide> raise NotImplementedError(
<ide> 'recover_target not implemented for this driver')
<ide> def recover_target_out_of_place(self, target, recovery_point,
<ide> Recover a backup target to a recovery point out-of-place
<ide>
<ide> :param target: Backup target with the backup data
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param recovery_point: Backup target with the backup data
<del> :type recovery_point: Instance of :class:`BackupTarget`
<add> :type recovery_point: Instance of :class:`.BackupTarget`
<ide>
<ide> :param recovery_target: Backup target with to recover the data to
<del> :type recovery_target: Instance of :class:`BackupTarget`
<add> :type recovery_target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param path: The part of the recovery point to recover (optional)
<ide> :type path: ``str``
<ide> def get_target_job(self, target, id):
<ide> Get a specific backup job by ID
<ide>
<ide> :param target: Backup target with the backup data
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param id: Backup target with the backup data
<del> :type id: Instance of :class:`BackupTarget`
<add> :type id: Instance of :class:`.BackupTarget`
<ide>
<ide> :rtype: :class:`BackupTargetJob`
<ide> """
<ide> def list_target_jobs(self, target):
<ide> List the backup jobs on a target
<ide>
<ide> :param target: Backup target with the backup data
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<del> :rtype: ``list`` of :class:`BackupTargetJob`
<add> :rtype: ``list`` of :class:`.BackupTargetJob`
<ide> """
<ide> raise NotImplementedError(
<ide> 'list_target_jobs not implemented for this driver')
<ide> def create_target_job(self, target, extra=None):
<ide> Create a new backup job on a target
<ide>
<ide> :param target: Backup target with the backup data
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param extra: (optional) Extra attributes (driver specific).
<ide> :type extra: ``dict``
<ide> def resume_target_job(self, target, job):
<ide> Resume a suspended backup job on a target
<ide>
<ide> :param target: Backup target with the backup data
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param job: Backup target job to resume
<del> :type job: Instance of :class:`BackupTargetJob`
<add> :type job: Instance of :class:`.BackupTargetJob`
<ide>
<ide> :rtype: ``bool``
<ide> """
<ide> def suspend_target_job(self, target, job):
<ide> Suspend a running backup job on a target
<ide>
<ide> :param target: Backup target with the backup data
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param job: Backup target job to suspend
<del> :type job: Instance of :class:`BackupTargetJob`
<add> :type job: Instance of :class:`.BackupTargetJob`
<ide>
<ide> :rtype: ``bool``
<ide> """
<ide> def cancel_target_job(self, target, job):
<ide> Cancel a backup job on a target
<ide>
<ide> :param target: Backup target with the backup data
<del> :type target: Instance of :class:`BackupTarget`
<add> :type target: Instance of :class:`.BackupTarget`
<ide>
<ide> :param job: Backup target job to cancel
<del> :type job: Instance of :class:`BackupTargetJob`
<add> :type job: Instance of :class:`.BackupTargetJob`
<ide>
<ide> :rtype: ``bool``
<ide> """ | 2 |
Python | Python | add checks to build cleaner model cards | 801ec115cfc364e30696f1e2f2506a5159c13632 | <ide><path>src/transformers/modelcard.py
<ide> def _insert_values_as_list(metadata, name, values):
<ide> return metadata
<ide> if isinstance(values, str):
<ide> values = [values]
<add> values = [v for v in values if v is not None]
<ide> if len(values) == 0:
<ide> return metadata
<ide> metadata[name] = values
<ide> def create_model_index(self, metric_mapping):
<ide> }
<ide> )
<ide>
<del> model_index["results"].append(result)
<add> # Remove partial results to avoid the model card being rejected.
<add> if "task" in result and "dataset" in result and "metrics" in result:
<add> model_index["results"].append(result)
<add> else:
<add> logger.info(f"Dropping the following result as it does not have all the necessary field:\n{result}")
<ide>
<ide> return [model_index]
<ide> | 1 |
Java | Java | fix crash when double launching activity | de4cb7d40318094543f2a160c88e8013772d9f7d | <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/ReactContext.java
<ide> public void handleException(RuntimeException e) {
<ide>
<ide> /**
<ide> * Same as {@link Activity#startActivityForResult(Intent, int)}, this just redirects the call to
<del> * the current activity.
<add> * the current activity. Returns whether the activity was started, as this might fail if this
<add> * was called before the context is in the right state.
<ide> */
<del> public void startActivityForResult(Intent intent, int code, Bundle bundle) {
<del> Assertions.assertNotNull(mCurrentActivity);
<add> public boolean startActivityForResult(Intent intent, int code, Bundle bundle) {
<add> if (mCurrentActivity == null) {
<add> return false;
<add> }
<ide> mCurrentActivity.startActivityForResult(intent, code, bundle);
<add> return true;
<ide> }
<ide> } | 1 |
Go | Go | add failing testcase for single quotes in cmd | 9edf96782470deb15deec3be07e3988164454148 | <ide><path>integration-cli/docker_cli_build_test.go
<ide> func TestBuildExoticShellInterpolation(t *testing.T) {
<ide>
<ide> logDone("build - exotic shell interpolation")
<ide> }
<add>
<add>func TestBuildVerifySingleQuoteFails(t *testing.T) {
<add> // This testcase is supposed to generate an error because the
<add> // JSON array we're passing in on the CMD uses single quotes instead
<add> // of double quotes (per the JSON spec). This means we interpret it
<add> // as a "string" insead of "JSON array" and pass it on to "sh -c" and
<add> // it should barf on it.
<add> name := "testbuildsinglequotefails"
<add> defer deleteImages(name)
<add>
<add> _, err := buildImage(name,
<add> `FROM busybox
<add> CMD [ '/bin/sh', '-c', 'echo hi' ]`,
<add> true)
<add> _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", name))
<add>
<add> if err == nil {
<add> t.Fatal("The image was not supposed to be able to run")
<add> }
<add>
<add> logDone("build - verify single quotes fail")
<add>} | 1 |
PHP | PHP | update param type | 982481f4786648b5de3f6c27fb5795baa2d7ebe9 | <ide><path>src/Database/TypeMap.php
<ide> public function getTypes(): array
<ide> * the column type will be looked for inside the default mapping. If neither exist,
<ide> * null will be returned.
<ide> *
<del> * @param string $column The type for a given column
<add> * @param string|int $column The type for a given column
<ide> * @return string|null
<ide> */
<ide> public function type($column): ?string | 1 |
Go | Go | return closed channel if oom notification fails | 7061a993c5b620d6e68450f1b90f3458bfa1add0 | <ide><path>daemon/execdriver/native/driver.go
<ide> func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
<ide> startCallback(&c.ProcessConfig, pid)
<ide> }
<ide>
<del> oomKillNotification, err := cont.NotifyOOM()
<del> if err != nil {
<del> oomKillNotification = nil
<del> logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
<del> }
<add> oom := notifyOnOOM(cont)
<ide> waitF := p.Wait
<ide> if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) {
<ide> // we need such hack for tracking processes with inerited fds,
<ide> func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
<ide> ps = execErr.ProcessState
<ide> }
<ide> cont.Destroy()
<del>
<del> _, oomKill := <-oomKillNotification
<del>
<add> _, oomKill := <-oom
<ide> return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil
<ide> }
<ide>
<add>// notifyOnOOM returns a channel that signals if the container received an OOM notification
<add>// for any process. If it is unable to subscribe to OOM notifications then a closed
<add>// channel is returned as it will be non-blocking and return the correct result when read.
<add>func notifyOnOOM(container libcontainer.Container) <-chan struct{} {
<add> oom, err := container.NotifyOOM()
<add> if err != nil {
<add> logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
<add> c := make(chan struct{})
<add> close(c)
<add> return c
<add> }
<add> return oom
<add>}
<add>
<ide> func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) {
<ide> return func() (*os.ProcessState, error) {
<ide> pid, err := p.Pid() | 1 |
Text | Text | add introduction to machine learning article | 90a9f27440b9edb7d64b0afa79ad9bf9f072db97 | <ide><path>guide/english/machine-learning/backpropagation/index.md
<ide> This article should only be understood in the greater contexts of neural network
<ide>
<ide> **In-depth:**
<ide> * Lecture 4 CS231n [Introduction to Neural Networks](https://youtu.be/d14TUNcbn1k?t=354)
<add>* [Free Code Camps Introduction to Machine Learning] (https://guide.freecodecamp.org/machine-learning/)
<ide> * [In depth, wiki style article](https://brilliant.org/wiki/backpropagation/)
<ide> * [Article on computation graphs](http://colah.github.io/posts/2015-08-Backprop/)
<ide> * [A Step by Step Backpropagation Example](https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/) | 1 |
Go | Go | skip privileged tests when non-root | e8648fa19f065de4a4cffcd48d6525dfc4edc11a | <ide><path>plugin/manager_linux_test.go
<ide> func (e *executorWithRunning) Signal(id string, signal int) error {
<ide> }
<ide>
<ide> func TestPluginAlreadyRunningOnStartup(t *testing.T) {
<add> skip.If(t, os.Getuid() != 0, "skipping test that requires root")
<ide> t.Parallel()
<ide>
<ide> root, err := ioutil.TempDir("", t.Name()) | 1 |
Javascript | Javascript | check window before using navigator | 0f7cc2ba845e34d7a312819e27bbfa88cdb7d49f | <ide><path>packages/react-events/src/Press.js
<ide> type PressEvent = {|
<ide> shiftKey: boolean,
<ide> |};
<ide>
<del>const isMac = /^Mac/.test(navigator.platform);
<add>const isMac =
<add> typeof window !== 'undefined' && window.navigator != null
<add> ? /^Mac/.test(window.navigator.platform)
<add> : false;
<ide> const DEFAULT_PRESS_END_DELAY_MS = 0;
<ide> const DEFAULT_PRESS_START_DELAY_MS = 0;
<ide> const DEFAULT_LONG_PRESS_DELAY_MS = 500; | 1 |
Text | Text | fix node versions in changelog's v8 paragraph | 0a86241840e0e5c839724da3268417b87ad69100 | <ide><path>CHANGELOG.md
<ide> repository and therefore can be seen as an extension to v0.11.
<ide>
<ide> ### General
<ide>
<del>- The V8 JavaScript engine bundled with io.js was upgraded dramatically, from version 3.14.5 in Node.js v0.10.45 and 3.26.33 in Node.js v0.10.14 to 3.31.74.1 for io.js v1.0.0. This brings along many fixes, performance improvements, as well as additional support for new ES6 language features! For more information on this, check out [the io.js ES6 page](https://iojs.org/es6.html).
<add>- The V8 JavaScript engine bundled with io.js was upgraded dramatically, from version 3.14.5 in Node.js v0.10.35 and 3.26.33 in Node.js v0.11.14 to 3.31.74.1 for io.js v1.0.0. This brings along many fixes and performance improvements, as well as additional support for new ES6 language features! For more information on this, check out [the io.js ES6 page](https://iojs.org/es6.html).
<ide> - Other bundled technologies were upgraded:
<ide> - libuv: 0.10.30 to 1.2.0
<ide> - http_parser: 1.0 to 2.3 | 1 |
Text | Text | remove wait period for npm pull requests | 5f9794ea9aaaf5597974a65460e1f3069f68475f | <ide><path>doc/guides/maintaining-npm.md
<ide> New pull requests should be opened when a "next" version of npm has
<ide> been released. Once the "next" version has been promoted to "latest"
<ide> the PR should be updated as necessary.
<ide>
<del>One week after the "latest" release has been promoted, it can land on master
<del>assuming no major regressions are found. There are no additional constraints
<del>for Semver-Major releases.
<del>
<ide> The specific Node.js release streams the new version will be able to land into
<ide> are at the discretion of the release and LTS teams.
<ide> | 1 |
PHP | PHP | ignore missing methods | ba60590aa0e62a1c0ee271c0008d73d127cc4feb | <ide><path>src/Illuminate/Support/Facades/Facade.php
<ide> protected static function createFreshMockInstance($name)
<ide>
<ide> $mock->shouldAllowMockingProtectedMethods();
<ide>
<add> $mock->shouldIgnoreMissing();
<add>
<ide> if (isset(static::$app)) {
<ide> static::$app->instance($name, $mock);
<ide> } | 1 |
Python | Python | add bilateral filter | c1a4cc96c8028d786af151f4177e2ef54250186e | <ide><path>digital_image_processing/filters/bilateral_filter.py
<add>"""
<add>Implementation of Bilateral filter
<add>
<add>Inputs:
<add> img: A 2d image with values in between 0 and 1
<add> varS: variance in space dimension.
<add> varI: variance in Intensity.
<add> N: Kernel size(Must be an odd number)
<add>Output:
<add> img:A 2d zero padded image with values in between 0 and 1
<add>"""
<add>
<add>import cv2
<add>import numpy as np
<add>import math
<add>import sys
<add>
<add>
<add>def vec_gaussian(img: np.ndarray, variance: float) -> np.ndarray:
<add> # For applying gaussian function for each element in matrix.
<add> sigma = math.sqrt(variance)
<add> cons = 1 / (sigma * math.sqrt(2 * math.pi))
<add> return cons * np.exp(-((img / sigma) ** 2) * 0.5)
<add>
<add>
<add>def get_slice(img: np.ndarray, x: int, y: int, kernel_size: int) -> np.ndarray:
<add> half = kernel_size // 2
<add> return img[x - half : x + half + 1, y - half : y + half + 1]
<add>
<add>
<add>def get_gauss_kernel(kernel_size: int, spatial_variance: float) -> np.ndarray:
<add> # Creates a gaussian kernel of given dimension.
<add> arr = np.zeros((kernel_size, kernel_size))
<add> for i in range(0, kernel_size):
<add> for j in range(0, kernel_size):
<add> arr[i, j] = math.sqrt(
<add> abs(i - kernel_size // 2) ** 2 + abs(j - kernel_size // 2) ** 2
<add> )
<add> return vec_gaussian(arr, spatial_variance)
<add>
<add>
<add>def bilateral_filter(
<add> img: np.ndarray,
<add> spatial_variance: float,
<add> intensity_variance: float,
<add> kernel_size: int,
<add>) -> np.ndarray:
<add> img2 = np.zeros(img.shape)
<add> gaussKer = get_gauss_kernel(kernel_size, spatial_variance)
<add> sizeX, sizeY = img.shape
<add> for i in range(kernel_size // 2, sizeX - kernel_size // 2):
<add> for j in range(kernel_size // 2, sizeY - kernel_size // 2):
<add>
<add> imgS = get_slice(img, i, j, kernel_size)
<add> imgI = imgS - imgS[kernel_size // 2, kernel_size // 2]
<add> imgIG = vec_gaussian(imgI, intensity_variance)
<add> weights = np.multiply(gaussKer, imgIG)
<add> vals = np.multiply(imgS, weights)
<add> val = np.sum(vals) / np.sum(weights)
<add> img2[i, j] = val
<add> return img2
<add>
<add>
<add>def parse_args(args: list) -> tuple:
<add> filename = args[1] if args[1:] else "../image_data/lena.jpg"
<add> spatial_variance = float(args[2]) if args[2:] else 1.0
<add> intensity_variance = float(args[3]) if args[3:] else 1.0
<add> if args[4:]:
<add> kernel_size = int(args[4])
<add> kernel_size = kernel_size + abs(kernel_size % 2 - 1)
<add> else:
<add> kernel_size = 5
<add> return filename, spatial_variance, intensity_variance, kernel_size
<add>
<add>
<add>if __name__ == "__main__":
<add> filename, spatial_variance, intensity_variance, kernel_size = parse_args(sys.argv)
<add> img = cv2.imread(filename, 0)
<add> cv2.imshow("input image", img)
<add>
<add> out = img / 255
<add> out = out.astype("float32")
<add> out = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
<add> out = out * 255
<add> out = np.uint8(out)
<add> cv2.imshow("output image", out)
<add> cv2.waitKey(0)
<add> cv2.destroyAllWindows() | 1 |
Javascript | Javascript | add a comment explaining replace action | 7a16fed9cbc4e720eb2ea13adda46f5ee5ed1b13 | <ide><path>src/createStore.js
<ide> export default function createStore(reducer, preloadedState, enhancer) {
<ide> }
<ide>
<ide> currentReducer = nextReducer
<add>
<add> // This action has a similiar effect to ActionTypes.INIT.
<add> // Any reducers that existed in both the new and old rootReducer
<add> // will receive the previous state. This effectively populates
<add> // the new state tree with any relevant data from the old one.
<ide> dispatch({ type: ActionTypes.REPLACE })
<ide> }
<ide> | 1 |
Ruby | Ruby | fix broken rmdir script | 7a22cda8ddc5dcd8304beab7767132bd4dcdafb9 | <ide><path>Library/Homebrew/cask/pkg.rb
<ide> def special?(path)
<ide>
<ide> sig { params(path: T.any(Pathname, T::Array[Pathname])).void }
<ide> def rmdir(path)
<add> return unless path.exist?
<add>
<ide> @command.run!(
<ide> "/usr/bin/xargs",
<ide> args: ["-0", "--", "/bin/bash", "-c", RMDIR_SH, "--"], | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.