content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
---|---|---|---|---|---|
Javascript | Javascript | add mailmap support for co-authored-by tags | 713046d9eb12344a6b36846fe8b1a986cbb5fe97 | <ide><path>tools/update-authors.js
<ide> // Passing --dry will redirect output to stdout rather than write to 'AUTHORS'.
<ide> 'use strict';
<ide> const { spawn } = require('child_process');
<add>const path = require('path');
<ide> const fs = require('fs');
<ide> const readline = require('readline');
<ide>
<ide> else
<ide>
<ide> output.write('# Authors ordered by first contribution.\n\n');
<ide>
<add>const mailmap = new Map();
<add>{
<add> const lines = fs.readFileSync(path.resolve(__dirname, '../', '.mailmap'),
<add> { encoding: 'utf8' }).split('\n');
<add> for (let line of lines) {
<add> line = line.trim();
<add> if (line.startsWith('#') || line === '') continue;
<add>
<add> let match;
<add> // Replaced Name <[email protected]>
<add> if (match = line.match(/^([^<]+)\s+(<[^>]+>)$/)) {
<add> mailmap.set(match[2], { author: match[1] });
<add> // <[email protected]> <[email protected]>
<add> } else if (match = line.match(/^<([^>]+)>\s+(<[^>]+>)$/)) {
<add> mailmap.set(match[2], { email: match[1] });
<add> // Replaced Name <[email protected]> <[email protected]>
<add> } else if (match = line.match(/^([^<]+)\s+(<[^>]+>)\s+(<[^>]+>)$/)) {
<add> mailmap.set(match[3], {
<add> author: match[1], email: match[2]
<add> });
<add> // Replaced Name <[email protected]> Original Name <[email protected]>
<add> } else if (match =
<add> line.match(/^([^<]+)\s+(<[^>]+>)\s+([^<]+)\s+(<[^>]+>)$/)) {
<add> mailmap.set(match[3] + '\0' + match[4], {
<add> author: match[1], email: match[2]
<add> });
<add> } else {
<add> console.warn('Unknown .mailmap format:', line);
<add> }
<add> }
<add>}
<add>
<ide> const seen = new Set();
<ide>
<ide> // Support regular git author metadata, as well as `Author:` and
<ide> rl.on('line', (line) => {
<ide> const match = line.match(authorRe);
<ide> if (!match) return;
<ide>
<del> const { author, email } = match.groups;
<add> let { author, email } = match.groups;
<add>
<add> const replacement = mailmap.get(author + '\0' + email) || mailmap.get(email);
<add> if (replacement) {
<add> ({ author, email } = { author, email, ...replacement });
<add> }
<add>
<ide> if (seen.has(email) ||
<ide> /@chromium\.org/.test(email) ||
<ide> email === '<[email protected]>') { | 1 |
Javascript | Javascript | convert presentation mode to es6 syntax | ccdc7ba3c8b5c4a89703f907e153b43c590888d7 | <ide><path>web/pdf_presentation_mode.js
<ide>
<ide> import { normalizeWheelEventDelta } from './ui_utils';
<ide>
<del>var DELAY_BEFORE_RESETTING_SWITCH_IN_PROGRESS = 1500; // in ms
<del>var DELAY_BEFORE_HIDING_CONTROLS = 3000; // in ms
<del>var ACTIVE_SELECTOR = 'pdfPresentationMode';
<del>var CONTROLS_SELECTOR = 'pdfPresentationModeControls';
<add>const DELAY_BEFORE_RESETTING_SWITCH_IN_PROGRESS = 1500; // in ms
<add>const DELAY_BEFORE_HIDING_CONTROLS = 3000; // in ms
<add>const ACTIVE_SELECTOR = 'pdfPresentationMode';
<add>const CONTROLS_SELECTOR = 'pdfPresentationModeControls';
<add>const MOUSE_SCROLL_COOLDOWN_TIME = 50; // in ms
<add>const PAGE_SWITCH_THRESHOLD = 0.1;
<add>
<add>// Number of CSS pixels for a movement to count as a swipe.
<add>const SWIPE_MIN_DISTANCE_THRESHOLD = 50;
<add>
<add>// Swipe angle deviation from the x or y axis before it is not
<add>// considered a swipe in that direction any more.
<add>const SWIPE_ANGLE_THRESHOLD = Math.PI / 6;
<ide>
<ide> /**
<ide> * @typedef {Object} PDFPresentationModeOptions
<ide> var CONTROLS_SELECTOR = 'pdfPresentationModeControls';
<ide> * to the context menu in Presentation Mode.
<ide> */
<ide>
<del>/**
<del> * @class
<del> */
<del>var PDFPresentationMode = (function PDFPresentationModeClosure() {
<add>class PDFPresentationMode {
<ide> /**
<del> * @constructs PDFPresentationMode
<ide> * @param {PDFPresentationModeOptions} options
<ide> */
<del> function PDFPresentationMode(options) {
<add> constructor(options) {
<ide> this.container = options.container;
<ide> this.viewer = options.viewer || options.container.firstElementChild;
<ide> this.pdfViewer = options.pdfViewer;
<ide> var PDFPresentationMode = (function PDFPresentationModeClosure() {
<ide> this.touchSwipeState = null;
<ide>
<ide> if (contextMenuItems) {
<del> contextMenuItems.contextFirstPage.addEventListener('click',
<del> function PDFPresentationMode_contextFirstPageClick(e) {
<add> contextMenuItems.contextFirstPage.addEventListener('click', () => {
<ide> this.contextMenuOpen = false;
<ide> this.eventBus.dispatch('firstpage');
<del> }.bind(this));
<del> contextMenuItems.contextLastPage.addEventListener('click',
<del> function PDFPresentationMode_contextLastPageClick(e) {
<add> });
<add> contextMenuItems.contextLastPage.addEventListener('click', () => {
<ide> this.contextMenuOpen = false;
<ide> this.eventBus.dispatch('lastpage');
<del> }.bind(this));
<del> contextMenuItems.contextPageRotateCw.addEventListener('click',
<del> function PDFPresentationMode_contextPageRotateCwClick(e) {
<add> });
<add> contextMenuItems.contextPageRotateCw.addEventListener('click', () => {
<ide> this.contextMenuOpen = false;
<ide> this.eventBus.dispatch('rotatecw');
<del> }.bind(this));
<del> contextMenuItems.contextPageRotateCcw.addEventListener('click',
<del> function PDFPresentationMode_contextPageRotateCcwClick(e) {
<add> });
<add> contextMenuItems.contextPageRotateCcw.addEventListener('click', () => {
<ide> this.contextMenuOpen = false;
<ide> this.eventBus.dispatch('rotateccw');
<del> }.bind(this));
<add> });
<ide> }
<ide> }
<ide>
<del> PDFPresentationMode.prototype = {
<del> /**
<del> * Request the browser to enter fullscreen mode.
<del> * @returns {boolean} Indicating if the request was successful.
<del> */
<del> request: function PDFPresentationMode_request() {
<del> if (this.switchInProgress || this.active ||
<del> !this.viewer.hasChildNodes()) {
<del> return false;
<del> }
<del> this._addFullscreenChangeListeners();
<del> this._setSwitchInProgress();
<del> this._notifyStateChange();
<add> /**
<add> * Request the browser to enter fullscreen mode.
<add> * @returns {boolean} Indicating if the request was successful.
<add> */
<add> request() {
<add> if (this.switchInProgress || this.active || !this.viewer.hasChildNodes()) {
<add> return false;
<add> }
<add> this._addFullscreenChangeListeners();
<add> this._setSwitchInProgress();
<add> this._notifyStateChange();
<add>
<add> if (this.container.requestFullscreen) {
<add> this.container.requestFullscreen();
<add> } else if (this.container.mozRequestFullScreen) {
<add> this.container.mozRequestFullScreen();
<add> } else if (this.container.webkitRequestFullscreen) {
<add> this.container.webkitRequestFullscreen(Element.ALLOW_KEYBOARD_INPUT);
<add> } else if (this.container.msRequestFullscreen) {
<add> this.container.msRequestFullscreen();
<add> } else {
<add> return false;
<add> }
<ide>
<del> if (this.container.requestFullscreen) {
<del> this.container.requestFullscreen();
<del> } else if (this.container.mozRequestFullScreen) {
<del> this.container.mozRequestFullScreen();
<del> } else if (this.container.webkitRequestFullscreen) {
<del> this.container.webkitRequestFullscreen(Element.ALLOW_KEYBOARD_INPUT);
<del> } else if (this.container.msRequestFullscreen) {
<del> this.container.msRequestFullscreen();
<del> } else {
<del> return false;
<del> }
<add> this.args = {
<add> page: this.pdfViewer.currentPageNumber,
<add> previousScale: this.pdfViewer.currentScaleValue,
<add> };
<ide>
<del> this.args = {
<del> page: this.pdfViewer.currentPageNumber,
<del> previousScale: this.pdfViewer.currentScaleValue,
<del> };
<add> return true;
<add> }
<ide>
<del> return true;
<del> },
<add> /**
<add> * @private
<add> */
<add> _mouseWheel(evt) {
<add> if (!this.active) {
<add> return;
<add> }
<ide>
<del> /**
<del> * @private
<del> */
<del> _mouseWheel: function PDFPresentationMode_mouseWheel(evt) {
<del> if (!this.active) {
<del> return;
<add> evt.preventDefault();
<add>
<add> var delta = normalizeWheelEventDelta(evt);
<add> var currentTime = (new Date()).getTime();
<add> var storedTime = this.mouseScrollTimeStamp;
<add>
<add> // If we've already switched page, avoid accidentally switching again.
<add> if (currentTime > storedTime &&
<add> currentTime - storedTime < MOUSE_SCROLL_COOLDOWN_TIME) {
<add> return;
<add> }
<add> // If the scroll direction changed, reset the accumulated scroll delta.
<add> if ((this.mouseScrollDelta > 0 && delta < 0) ||
<add> (this.mouseScrollDelta < 0 && delta > 0)) {
<add> this._resetMouseScrollState();
<add> }
<add> this.mouseScrollDelta += delta;
<add>
<add> if (Math.abs(this.mouseScrollDelta) >= PAGE_SWITCH_THRESHOLD) {
<add> var totalDelta = this.mouseScrollDelta;
<add> this._resetMouseScrollState();
<add> var success = totalDelta > 0 ? this._goToPreviousPage()
<add> : this._goToNextPage();
<add> if (success) {
<add> this.mouseScrollTimeStamp = currentTime;
<ide> }
<add> }
<add> }
<ide>
<del> evt.preventDefault();
<add> get isFullscreen() {
<add> return !!(document.fullscreenElement || document.mozFullScreen ||
<add> document.webkitIsFullScreen || document.msFullscreenElement);
<add> }
<ide>
<del> var delta = normalizeWheelEventDelta(evt);
<add> /**
<add> * @private
<add> */
<add> _goToPreviousPage() {
<add> var page = this.pdfViewer.currentPageNumber;
<add> // If we're at the first page, we don't need to do anything.
<add> if (page <= 1) {
<add> return false;
<add> }
<add> this.pdfViewer.currentPageNumber = (page - 1);
<add> return true;
<add> }
<ide>
<del> var MOUSE_SCROLL_COOLDOWN_TIME = 50;
<del> var PAGE_SWITCH_THRESHOLD = 0.1;
<add> /**
<add> * @private
<add> */
<add> _goToNextPage() {
<add> var page = this.pdfViewer.currentPageNumber;
<add> // If we're at the last page, we don't need to do anything.
<add> if (page >= this.pdfViewer.pagesCount) {
<add> return false;
<add> }
<add> this.pdfViewer.currentPageNumber = (page + 1);
<add> return true;
<add> }
<ide>
<del> var currentTime = (new Date()).getTime();
<del> var storedTime = this.mouseScrollTimeStamp;
<add> /**
<add> * @private
<add> */
<add> _notifyStateChange() {
<add> this.eventBus.dispatch('presentationmodechanged', {
<add> source: this,
<add> active: this.active,
<add> switchInProgress: !!this.switchInProgress,
<add> });
<add> }
<ide>
<del> // If we've already switched page, avoid accidentally switching again.
<del> if (currentTime > storedTime &&
<del> currentTime - storedTime < MOUSE_SCROLL_COOLDOWN_TIME) {
<del> return;
<del> }
<del> // If the scroll direction changed, reset the accumulated scroll delta.
<del> if ((this.mouseScrollDelta > 0 && delta < 0) ||
<del> (this.mouseScrollDelta < 0 && delta > 0)) {
<del> this._resetMouseScrollState();
<del> }
<del> this.mouseScrollDelta += delta;
<del>
<del> if (Math.abs(this.mouseScrollDelta) >= PAGE_SWITCH_THRESHOLD) {
<del> var totalDelta = this.mouseScrollDelta;
<del> this._resetMouseScrollState();
<del> var success = totalDelta > 0 ? this._goToPreviousPage()
<del> : this._goToNextPage();
<del> if (success) {
<del> this.mouseScrollTimeStamp = currentTime;
<del> }
<del> }
<del> },
<del>
<del> get isFullscreen() {
<del> return !!(document.fullscreenElement ||
<del> document.mozFullScreen ||
<del> document.webkitIsFullScreen ||
<del> document.msFullscreenElement);
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _goToPreviousPage: function PDFPresentationMode_goToPreviousPage() {
<del> var page = this.pdfViewer.currentPageNumber;
<del> // If we're at the first page, we don't need to do anything.
<del> if (page <= 1) {
<del> return false;
<del> }
<del> this.pdfViewer.currentPageNumber = (page - 1);
<del> return true;
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _goToNextPage: function PDFPresentationMode_goToNextPage() {
<del> var page = this.pdfViewer.currentPageNumber;
<del> // If we're at the last page, we don't need to do anything.
<del> if (page >= this.pdfViewer.pagesCount) {
<del> return false;
<del> }
<del> this.pdfViewer.currentPageNumber = (page + 1);
<del> return true;
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _notifyStateChange: function PDFPresentationMode_notifyStateChange() {
<del> this.eventBus.dispatch('presentationmodechanged', {
<del> source: this,
<del> active: this.active,
<del> switchInProgress: !!this.switchInProgress
<del> });
<del> },
<del>
<del> /**
<del> * Used to initialize a timeout when requesting Presentation Mode,
<del> * i.e. when the browser is requested to enter fullscreen mode.
<del> * This timeout is used to prevent the current page from being scrolled
<del> * partially, or completely, out of view when entering Presentation Mode.
<del> * NOTE: This issue seems limited to certain zoom levels (e.g. page-width).
<del> * @private
<del> */
<del> _setSwitchInProgress: function PDFPresentationMode_setSwitchInProgress() {
<del> if (this.switchInProgress) {
<del> clearTimeout(this.switchInProgress);
<del> }
<del> this.switchInProgress = setTimeout(function switchInProgressTimeout() {
<del> this._removeFullscreenChangeListeners();
<del> delete this.switchInProgress;
<del> this._notifyStateChange();
<del> }.bind(this), DELAY_BEFORE_RESETTING_SWITCH_IN_PROGRESS);
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _resetSwitchInProgress:
<del> function PDFPresentationMode_resetSwitchInProgress() {
<del> if (this.switchInProgress) {
<del> clearTimeout(this.switchInProgress);
<del> delete this.switchInProgress;
<del> }
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _enter: function PDFPresentationMode_enter() {
<del> this.active = true;
<del> this._resetSwitchInProgress();
<add> /**
<add> * Used to initialize a timeout when requesting Presentation Mode,
<add> * i.e. when the browser is requested to enter fullscreen mode.
<add> * This timeout is used to prevent the current page from being scrolled
<add> * partially, or completely, out of view when entering Presentation Mode.
<add> * NOTE: This issue seems limited to certain zoom levels (e.g. page-width).
<add> *
<add> * @private
<add> */
<add> _setSwitchInProgress() {
<add> if (this.switchInProgress) {
<add> clearTimeout(this.switchInProgress);
<add> }
<add> this.switchInProgress = setTimeout(() => {
<add> this._removeFullscreenChangeListeners();
<add> delete this.switchInProgress;
<ide> this._notifyStateChange();
<del> this.container.classList.add(ACTIVE_SELECTOR);
<add> }, DELAY_BEFORE_RESETTING_SWITCH_IN_PROGRESS);
<add> }
<ide>
<del> // Ensure that the correct page is scrolled into view when entering
<del> // Presentation Mode, by waiting until fullscreen mode in enabled.
<del> setTimeout(function enterPresentationModeTimeout() {
<del> this.pdfViewer.currentPageNumber = this.args.page;
<del> this.pdfViewer.currentScaleValue = 'page-fit';
<del> }.bind(this), 0);
<add> /**
<add> * @private
<add> */
<add> _resetSwitchInProgress() {
<add> if (this.switchInProgress) {
<add> clearTimeout(this.switchInProgress);
<add> delete this.switchInProgress;
<add> }
<add> }
<ide>
<del> this._addWindowListeners();
<del> this._showControls();
<del> this.contextMenuOpen = false;
<del> this.container.setAttribute('contextmenu', 'viewerContextMenu');
<del>
<del> // Text selection is disabled in Presentation Mode, thus it's not possible
<del> // for the user to deselect text that is selected (e.g. with "Select all")
<del> // when entering Presentation Mode, hence we remove any active selection.
<del> window.getSelection().removeAllRanges();
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _exit: function PDFPresentationMode_exit() {
<del> var page = this.pdfViewer.currentPageNumber;
<del> this.container.classList.remove(ACTIVE_SELECTOR);
<del>
<del> // Ensure that the correct page is scrolled into view when exiting
<del> // Presentation Mode, by waiting until fullscreen mode is disabled.
<del> setTimeout(function exitPresentationModeTimeout() {
<del> this.active = false;
<del> this._removeFullscreenChangeListeners();
<del> this._notifyStateChange();
<del>
<del> this.pdfViewer.currentScaleValue = this.args.previousScale;
<del> this.pdfViewer.currentPageNumber = page;
<del> this.args = null;
<del> }.bind(this), 0);
<del>
<del> this._removeWindowListeners();
<del> this._hideControls();
<del> this._resetMouseScrollState();
<del> this.container.removeAttribute('contextmenu');
<del> this.contextMenuOpen = false;
<del> },
<add> /**
<add> * @private
<add> */
<add> _enter() {
<add> this.active = true;
<add> this._resetSwitchInProgress();
<add> this._notifyStateChange();
<add> this.container.classList.add(ACTIVE_SELECTOR);
<add>
<add> // Ensure that the correct page is scrolled into view when entering
<add> // Presentation Mode, by waiting until fullscreen mode in enabled.
<add> setTimeout(() => {
<add> this.pdfViewer.currentPageNumber = this.args.page;
<add> this.pdfViewer.currentScaleValue = 'page-fit';
<add> }, 0);
<add>
<add> this._addWindowListeners();
<add> this._showControls();
<add> this.contextMenuOpen = false;
<add> this.container.setAttribute('contextmenu', 'viewerContextMenu');
<ide>
<del> /**
<del> * @private
<del> */
<del> _mouseDown: function PDFPresentationMode_mouseDown(evt) {
<del> if (this.contextMenuOpen) {
<del> this.contextMenuOpen = false;
<add> // Text selection is disabled in Presentation Mode, thus it's not possible
<add> // for the user to deselect text that is selected (e.g. with "Select all")
<add> // when entering Presentation Mode, hence we remove any active selection.
<add> window.getSelection().removeAllRanges();
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _exit() {
<add> var page = this.pdfViewer.currentPageNumber;
<add> this.container.classList.remove(ACTIVE_SELECTOR);
<add>
<add> // Ensure that the correct page is scrolled into view when exiting
<add> // Presentation Mode, by waiting until fullscreen mode is disabled.
<add> setTimeout(() => {
<add> this.active = false;
<add> this._removeFullscreenChangeListeners();
<add> this._notifyStateChange();
<add>
<add> this.pdfViewer.currentScaleValue = this.args.previousScale;
<add> this.pdfViewer.currentPageNumber = page;
<add> this.args = null;
<add> }, 0);
<add>
<add> this._removeWindowListeners();
<add> this._hideControls();
<add> this._resetMouseScrollState();
<add> this.container.removeAttribute('contextmenu');
<add> this.contextMenuOpen = false;
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _mouseDown(evt) {
<add> if (this.contextMenuOpen) {
<add> this.contextMenuOpen = false;
<add> evt.preventDefault();
<add> return;
<add> }
<add> if (evt.button === 0) {
<add> // Enable clicking of links in presentation mode. Note: only links
<add> // pointing to destinations in the current PDF document work.
<add> var isInternalLink = (evt.target.href &&
<add> evt.target.classList.contains('internalLink'));
<add> if (!isInternalLink) {
<add> // Unless an internal link was clicked, advance one page.
<ide> evt.preventDefault();
<del> return;
<del> }
<del> if (evt.button === 0) {
<del> // Enable clicking of links in presentation mode. Please note:
<del> // Only links pointing to destinations in the current PDF document work.
<del> var isInternalLink = (evt.target.href &&
<del> evt.target.classList.contains('internalLink'));
<del> if (!isInternalLink) {
<del> // Unless an internal link was clicked, advance one page.
<del> evt.preventDefault();
<del> this.pdfViewer.currentPageNumber += (evt.shiftKey ? -1 : 1);
<del> }
<del> }
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _contextMenu: function PDFPresentationMode_contextMenu() {
<del> this.contextMenuOpen = true;
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _showControls: function PDFPresentationMode_showControls() {
<del> if (this.controlsTimeout) {
<del> clearTimeout(this.controlsTimeout);
<del> } else {
<del> this.container.classList.add(CONTROLS_SELECTOR);
<del> }
<del> this.controlsTimeout = setTimeout(function showControlsTimeout() {
<del> this.container.classList.remove(CONTROLS_SELECTOR);
<del> delete this.controlsTimeout;
<del> }.bind(this), DELAY_BEFORE_HIDING_CONTROLS);
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _hideControls: function PDFPresentationMode_hideControls() {
<del> if (!this.controlsTimeout) {
<del> return;
<add> this.pdfViewer.currentPageNumber += (evt.shiftKey ? -1 : 1);
<ide> }
<add> }
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _contextMenu() {
<add> this.contextMenuOpen = true;
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _showControls() {
<add> if (this.controlsTimeout) {
<ide> clearTimeout(this.controlsTimeout);
<add> } else {
<add> this.container.classList.add(CONTROLS_SELECTOR);
<add> }
<add> this.controlsTimeout = setTimeout(() => {
<ide> this.container.classList.remove(CONTROLS_SELECTOR);
<ide> delete this.controlsTimeout;
<del> },
<del>
<del> /**
<del> * Resets the properties used for tracking mouse scrolling events.
<del> * @private
<del> */
<del> _resetMouseScrollState:
<del> function PDFPresentationMode_resetMouseScrollState() {
<del> this.mouseScrollTimeStamp = 0;
<del> this.mouseScrollDelta = 0;
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _touchSwipe: function PDFPresentationMode_touchSwipe(evt) {
<del> if (!this.active) {
<del> return;
<del> }
<add> }, DELAY_BEFORE_HIDING_CONTROLS);
<add> }
<ide>
<del> // Must move at least these many CSS pixels for it to count as a swipe
<del> var SWIPE_MIN_DISTANCE_THRESHOLD = 50;
<del> // The swipe angle is allowed to deviate from the x or y axis by this much
<del> // before it is not considered a swipe in that direction any more.
<del> var SWIPE_ANGLE_THRESHOLD = Math.PI / 6;
<add> /**
<add> * @private
<add> */
<add> _hideControls() {
<add> if (!this.controlsTimeout) {
<add> return;
<add> }
<add> clearTimeout(this.controlsTimeout);
<add> this.container.classList.remove(CONTROLS_SELECTOR);
<add> delete this.controlsTimeout;
<add> }
<ide>
<del> if (evt.touches.length > 1) {
<del> // Multiple touch points detected, cancel the swipe.
<del> this.touchSwipeState = null;
<del> return;
<del> }
<del> switch (evt.type) {
<del> case 'touchstart':
<del> this.touchSwipeState = {
<del> startX: evt.touches[0].pageX,
<del> startY: evt.touches[0].pageY,
<del> endX: evt.touches[0].pageX,
<del> endY: evt.touches[0].pageY
<del> };
<del> break;
<del> case 'touchmove':
<del> if (this.touchSwipeState === null) {
<del> return;
<del> }
<del> this.touchSwipeState.endX = evt.touches[0].pageX;
<del> this.touchSwipeState.endY = evt.touches[0].pageY;
<del> // Do a preventDefault to avoid the swipe from triggering browser
<del> // gestures (Chrome in particular has some sort of swipe gesture in
<del> // fullscreen mode).
<del> evt.preventDefault();
<del> break;
<del> case 'touchend':
<del> if (this.touchSwipeState === null) {
<del> return;
<del> }
<del> var delta = 0;
<del> var dx = this.touchSwipeState.endX - this.touchSwipeState.startX;
<del> var dy = this.touchSwipeState.endY - this.touchSwipeState.startY;
<del> var absAngle = Math.abs(Math.atan2(dy, dx));
<del> if (Math.abs(dx) > SWIPE_MIN_DISTANCE_THRESHOLD &&
<del> (absAngle <= SWIPE_ANGLE_THRESHOLD ||
<del> absAngle >= (Math.PI - SWIPE_ANGLE_THRESHOLD))) {
<del> // horizontal swipe
<del> delta = dx;
<del> } else if (Math.abs(dy) > SWIPE_MIN_DISTANCE_THRESHOLD &&
<del> Math.abs(absAngle - (Math.PI / 2)) <= SWIPE_ANGLE_THRESHOLD) {
<del> // vertical swipe
<del> delta = dy;
<del> }
<del> if (delta > 0) {
<del> this._goToPreviousPage();
<del> } else if (delta < 0) {
<del> this._goToNextPage();
<del> }
<del> break;
<del> }
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _addWindowListeners: function PDFPresentationMode_addWindowListeners() {
<del> this.showControlsBind = this._showControls.bind(this);
<del> this.mouseDownBind = this._mouseDown.bind(this);
<del> this.mouseWheelBind = this._mouseWheel.bind(this);
<del> this.resetMouseScrollStateBind = this._resetMouseScrollState.bind(this);
<del> this.contextMenuBind = this._contextMenu.bind(this);
<del> this.touchSwipeBind = this._touchSwipe.bind(this);
<del>
<del> window.addEventListener('mousemove', this.showControlsBind);
<del> window.addEventListener('mousedown', this.mouseDownBind);
<del> window.addEventListener('wheel', this.mouseWheelBind);
<del> window.addEventListener('keydown', this.resetMouseScrollStateBind);
<del> window.addEventListener('contextmenu', this.contextMenuBind);
<del> window.addEventListener('touchstart', this.touchSwipeBind);
<del> window.addEventListener('touchmove', this.touchSwipeBind);
<del> window.addEventListener('touchend', this.touchSwipeBind);
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _removeWindowListeners:
<del> function PDFPresentationMode_removeWindowListeners() {
<del> window.removeEventListener('mousemove', this.showControlsBind);
<del> window.removeEventListener('mousedown', this.mouseDownBind);
<del> window.removeEventListener('wheel', this.mouseWheelBind);
<del> window.removeEventListener('keydown', this.resetMouseScrollStateBind);
<del> window.removeEventListener('contextmenu', this.contextMenuBind);
<del> window.removeEventListener('touchstart', this.touchSwipeBind);
<del> window.removeEventListener('touchmove', this.touchSwipeBind);
<del> window.removeEventListener('touchend', this.touchSwipeBind);
<del>
<del> delete this.showControlsBind;
<del> delete this.mouseDownBind;
<del> delete this.mouseWheelBind;
<del> delete this.resetMouseScrollStateBind;
<del> delete this.contextMenuBind;
<del> delete this.touchSwipeBind;
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _fullscreenChange: function PDFPresentationMode_fullscreenChange() {
<del> if (this.isFullscreen) {
<del> this._enter();
<del> } else {
<del> this._exit();
<del> }
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _addFullscreenChangeListeners:
<del> function PDFPresentationMode_addFullscreenChangeListeners() {
<del> this.fullscreenChangeBind = this._fullscreenChange.bind(this);
<del>
<del> window.addEventListener('fullscreenchange', this.fullscreenChangeBind);
<del> window.addEventListener('mozfullscreenchange', this.fullscreenChangeBind);
<del> if (typeof PDFJSDev === 'undefined' ||
<del> !PDFJSDev.test('FIREFOX || MOZCENTRAL')) {
<del> window.addEventListener('webkitfullscreenchange',
<del> this.fullscreenChangeBind);
<del> window.addEventListener('MSFullscreenChange',
<del> this.fullscreenChangeBind);
<del> }
<del> },
<del>
<del> /**
<del> * @private
<del> */
<del> _removeFullscreenChangeListeners:
<del> function PDFPresentationMode_removeFullscreenChangeListeners() {
<del> window.removeEventListener('fullscreenchange', this.fullscreenChangeBind);
<del> window.removeEventListener('mozfullscreenchange',
<del> this.fullscreenChangeBind);
<del> if (typeof PDFJSDev === 'undefined' ||
<del> !PDFJSDev.test('FIREFOX || MOZCENTRAL')) {
<del> window.removeEventListener('webkitfullscreenchange',
<del> this.fullscreenChangeBind);
<del> window.removeEventListener('MSFullscreenChange',
<del> this.fullscreenChangeBind);
<del> }
<add> /**
<add> * Resets the properties used for tracking mouse scrolling events.
<add> *
<add> * @private
<add> */
<add> _resetMouseScrollState() {
<add> this.mouseScrollTimeStamp = 0;
<add> this.mouseScrollDelta = 0;
<add> }
<ide>
<del> delete this.fullscreenChangeBind;
<add> /**
<add> * @private
<add> */
<add> _touchSwipe(evt) {
<add> if (!this.active) {
<add> return;
<add> }
<add> if (evt.touches.length > 1) {
<add> // Multiple touch points detected; cancel the swipe.
<add> this.touchSwipeState = null;
<add> return;
<add> }
<add>
<add> switch (evt.type) {
<add> case 'touchstart':
<add> this.touchSwipeState = {
<add> startX: evt.touches[0].pageX,
<add> startY: evt.touches[0].pageY,
<add> endX: evt.touches[0].pageX,
<add> endY: evt.touches[0].pageY,
<add> };
<add> break;
<add> case 'touchmove':
<add> if (this.touchSwipeState === null) {
<add> return;
<add> }
<add> this.touchSwipeState.endX = evt.touches[0].pageX;
<add> this.touchSwipeState.endY = evt.touches[0].pageY;
<add> // Avoid the swipe from triggering browser gestures (Chrome in
<add> // particular has some sort of swipe gesture in fullscreen mode).
<add> evt.preventDefault();
<add> break;
<add> case 'touchend':
<add> if (this.touchSwipeState === null) {
<add> return;
<add> }
<add> var delta = 0;
<add> var dx = this.touchSwipeState.endX - this.touchSwipeState.startX;
<add> var dy = this.touchSwipeState.endY - this.touchSwipeState.startY;
<add> var absAngle = Math.abs(Math.atan2(dy, dx));
<add> if (Math.abs(dx) > SWIPE_MIN_DISTANCE_THRESHOLD &&
<add> (absAngle <= SWIPE_ANGLE_THRESHOLD ||
<add> absAngle >= (Math.PI - SWIPE_ANGLE_THRESHOLD))) {
<add> // Horizontal swipe.
<add> delta = dx;
<add> } else if (Math.abs(dy) > SWIPE_MIN_DISTANCE_THRESHOLD &&
<add> Math.abs(absAngle - (Math.PI / 2)) <= SWIPE_ANGLE_THRESHOLD) {
<add> // Vertical swipe.
<add> delta = dy;
<add> }
<add> if (delta > 0) {
<add> this._goToPreviousPage();
<add> } else if (delta < 0) {
<add> this._goToNextPage();
<add> }
<add> break;
<add> }
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _addWindowListeners() {
<add> this.showControlsBind = this._showControls.bind(this);
<add> this.mouseDownBind = this._mouseDown.bind(this);
<add> this.mouseWheelBind = this._mouseWheel.bind(this);
<add> this.resetMouseScrollStateBind = this._resetMouseScrollState.bind(this);
<add> this.contextMenuBind = this._contextMenu.bind(this);
<add> this.touchSwipeBind = this._touchSwipe.bind(this);
<add>
<add> window.addEventListener('mousemove', this.showControlsBind);
<add> window.addEventListener('mousedown', this.mouseDownBind);
<add> window.addEventListener('wheel', this.mouseWheelBind);
<add> window.addEventListener('keydown', this.resetMouseScrollStateBind);
<add> window.addEventListener('contextmenu', this.contextMenuBind);
<add> window.addEventListener('touchstart', this.touchSwipeBind);
<add> window.addEventListener('touchmove', this.touchSwipeBind);
<add> window.addEventListener('touchend', this.touchSwipeBind);
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _removeWindowListeners() {
<add> window.removeEventListener('mousemove', this.showControlsBind);
<add> window.removeEventListener('mousedown', this.mouseDownBind);
<add> window.removeEventListener('wheel', this.mouseWheelBind);
<add> window.removeEventListener('keydown', this.resetMouseScrollStateBind);
<add> window.removeEventListener('contextmenu', this.contextMenuBind);
<add> window.removeEventListener('touchstart', this.touchSwipeBind);
<add> window.removeEventListener('touchmove', this.touchSwipeBind);
<add> window.removeEventListener('touchend', this.touchSwipeBind);
<add>
<add> delete this.showControlsBind;
<add> delete this.mouseDownBind;
<add> delete this.mouseWheelBind;
<add> delete this.resetMouseScrollStateBind;
<add> delete this.contextMenuBind;
<add> delete this.touchSwipeBind;
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _fullscreenChange() {
<add> if (this.isFullscreen) {
<add> this._enter();
<add> } else {
<add> this._exit();
<add> }
<add> }
<add>
<add> /**
<add> * @private
<add> */
<add> _addFullscreenChangeListeners() {
<add> this.fullscreenChangeBind = this._fullscreenChange.bind(this);
<add>
<add> window.addEventListener('fullscreenchange', this.fullscreenChangeBind);
<add> window.addEventListener('mozfullscreenchange', this.fullscreenChangeBind);
<add> if (typeof PDFJSDev === 'undefined' ||
<add> !PDFJSDev.test('FIREFOX || MOZCENTRAL')) {
<add> window.addEventListener('webkitfullscreenchange',
<add> this.fullscreenChangeBind);
<add> window.addEventListener('MSFullscreenChange',
<add> this.fullscreenChangeBind);
<ide> }
<del> };
<add> }
<ide>
<del> return PDFPresentationMode;
<del>})();
<add> /**
<add> * @private
<add> */
<add> _removeFullscreenChangeListeners() {
<add> window.removeEventListener('fullscreenchange', this.fullscreenChangeBind);
<add> window.removeEventListener('mozfullscreenchange',
<add> this.fullscreenChangeBind);
<add> if (typeof PDFJSDev === 'undefined' ||
<add> !PDFJSDev.test('FIREFOX || MOZCENTRAL')) {
<add> window.removeEventListener('webkitfullscreenchange',
<add> this.fullscreenChangeBind);
<add> window.removeEventListener('MSFullscreenChange',
<add> this.fullscreenChangeBind);
<add> }
<add>
<add> delete this.fullscreenChangeBind;
<add> }
<add>}
<ide>
<ide> export {
<ide> PDFPresentationMode, | 1 |
PHP | PHP | add default timeout to notpwnedverifier | 3a0af0a42cc22b030089ecc794e0211da6f64650 | <ide><path>src/Illuminate/Validation/NotPwnedVerifier.php
<ide> class NotPwnedVerifier implements UncompromisedVerifier
<ide> */
<ide> protected $factory;
<ide>
<add> /**
<add> * The number of seconds the request can run before timing out.
<add> *
<add> * @var int
<add> */
<add> public $timeout;
<add>
<ide> /**
<ide> * Create a new uncompromised verifier.
<ide> *
<ide> * @param \Illuminate\Http\Client\Factory $factory
<add> * @param int|null $timeout
<ide> * @return void
<ide> */
<del> public function __construct($factory)
<add> public function __construct($factory, $timeout = null)
<ide> {
<ide> $this->factory = $factory;
<add> $this->timeout = $timeout ?? 15;
<ide> }
<ide>
<ide> /**
<ide> protected function search($hashPrefix)
<ide> try {
<ide> $response = $this->factory->withHeaders([
<ide> 'Add-Padding' => true,
<del> ])->get(
<add> ])->timeout($this->timeout)->get(
<ide> 'https://api.pwnedpasswords.com/range/'.$hashPrefix
<ide> );
<ide> } catch (Exception $e) {
<ide><path>tests/Validation/ValidationNotPwnedVerifierTest.php
<ide> public function testApiResponseGoesWrong()
<ide> ->with(['Add-Padding' => true])
<ide> ->andReturn($httpFactory);
<ide>
<add> $httpFactory
<add> ->shouldReceive('timeout')
<add> ->once()
<add> ->with(15)
<add> ->andReturn($httpFactory);
<add>
<ide> $httpFactory->shouldReceive('get')
<ide> ->once()
<ide> ->andReturn($response);
<ide> public function testApiGoesDown()
<ide> ->with(['Add-Padding' => true])
<ide> ->andReturn($httpFactory);
<ide>
<add> $httpFactory
<add> ->shouldReceive('timeout')
<add> ->once()
<add> ->with(15)
<add> ->andReturn($httpFactory);
<add>
<ide> $httpFactory->shouldReceive('get')
<ide> ->once()
<ide> ->andReturn($response);
<ide> public function testDnsDown()
<ide> ->with(['Add-Padding' => true])
<ide> ->andReturn($httpFactory);
<ide>
<add> $httpFactory
<add> ->shouldReceive('timeout')
<add> ->once()
<add> ->with(15)
<add> ->andReturn($httpFactory);
<add>
<ide> $httpFactory
<ide> ->shouldReceive('get')
<ide> ->once() | 2 |
Java | Java | add test for combination of fixed date fields | 72895f081026df7e0b34807729d9cdea6c7ff4ec | <ide><path>spring-context/src/test/java/org/springframework/scheduling/support/CronExpressionTests.java
<ide> void monthSequence() {
<ide> assertThat(expression.next(last)).isEqualTo(expected);
<ide> }
<ide>
<add> @Test
<add> public void fixedDays() {
<add> CronExpression expression = CronExpression.parse("0 0 0 29 2 WED");
<add>
<add> LocalDateTime last = LocalDateTime.of(2012, 2, 29, 1, 0);
<add> assertThat(last.getDayOfWeek()).isEqualTo(WEDNESDAY);
<add>
<add> LocalDateTime actual = expression.next(last);
<add> assertThat(actual).isNotNull();
<add> assertThat(actual.getDayOfMonth()).isEqualTo(29);
<add> assertThat(actual.getDayOfWeek()).isEqualTo(WEDNESDAY);
<add> }
<add>
<ide> } | 1 |
Text | Text | fix http2 sample code for http2.md | 0eb72684518b74c0b0d7d773cb71986b7418cb9a | <ide><path>doc/api/http2.md
<ide> const {
<ide> HTTP2_HEADER_CONTENT_TYPE
<ide> } = http2.constants;
<ide>
<del>const server = http.createServer();
<add>const server = http2.createServer();
<ide> server.on('stream', (stream, headers, flags) => {
<ide> const method = headers[HTTP2_HEADER_METHOD];
<ide> const path = headers[HTTP2_HEADER_PATH];
<ide> const {
<ide>
<ide> const options = getOptionsSomehow();
<ide>
<del>const server = http.createSecureServer(options);
<add>const server = http2.createSecureServer(options);
<ide> server.on('stream', (stream, headers, flags) => {
<ide> const method = headers[HTTP2_HEADER_METHOD];
<ide> const path = headers[HTTP2_HEADER_PATH];
<ide> to [`response.writeHead()`][] given precedence.
<ide>
<ide> ```js
<ide> // returns content-type = text/plain
<del>const server = http.createServer((req, res) => {
<add>const server = http2.createServer((req, res) => {
<ide> res.setHeader('Content-Type', 'text/html');
<ide> res.setHeader('X-Foo', 'bar');
<ide> res.writeHead(200, { 'Content-Type': 'text/plain' });
<ide> via `response.connection`.
<ide> Example:
<ide>
<ide> ```js
<del>const http = require('http');
<del>const server = http.createServer((req, res) => {
<add>const http2 = require('http2');
<add>const server = http2.createServer((req, res) => {
<ide> const ip = req.socket.remoteAddress;
<ide> const port = req.socket.remotePort;
<ide> res.end(`Your IP address is ${ip} and your source port is ${port}.`); | 1 |
Ruby | Ruby | remove meaningless use of relation#all | 3d1bc89ec8e6558bac81c4e9fce82b586f742629 | <ide><path>activerecord/lib/active_record/aggregations.rb
<ide> def clear_aggregation_cache #:nodoc:
<ide> # by specifying an instance of the value object in the conditions hash. The following example
<ide> # finds all customers with +balance_amount+ equal to 20 and +balance_currency+ equal to "USD":
<ide> #
<del> # Customer.where(balance: Money.new(20, "USD")).all
<add> # Customer.where(balance: Money.new(20, "USD"))
<ide> #
<ide> module ClassMethods
<ide> # Adds reader and writer methods for manipulating a value object:
<ide><path>activerecord/lib/active_record/associations.rb
<ide> def association_instance_set(name, association)
<ide> # * <tt>Project#project_manager, Project#project_manager=(project_manager), Project#project_manager.nil?,</tt>
<ide> # * <tt>Project#milestones.empty?, Project#milestones.size, Project#milestones, Project#milestones<<(milestone),</tt>
<ide> # <tt>Project#milestones.delete(milestone), Project#milestones.destroy(mileston), Project#milestones.find(milestone_id),</tt>
<del> # <tt>Project#milestones.all(options), Project#milestones.build, Project#milestones.create</tt>
<add> # <tt>Project#milestones.build, Project#milestones.create</tt>
<ide> # * <tt>Project#categories.empty?, Project#categories.size, Project#categories, Project#categories<<(category1),</tt>
<ide> # <tt>Project#categories.delete(category1), Project#categories.destroy(category1)</tt>
<ide> #
<ide> def association_instance_set(name, association)
<ide> # other than the main one. If this is the case Active Record falls back to the previously
<ide> # used LEFT OUTER JOIN based strategy. For example
<ide> #
<del> # Post.includes([:author, :comments]).where(['comments.approved = ?', true]).all
<add> # Post.includes([:author, :comments]).where(['comments.approved = ?', true])
<ide> #
<ide> # This will result in a single SQL query with joins along the lines of:
<ide> # <tt>LEFT OUTER JOIN comments ON comments.post_id = posts.id</tt> and
<ide><path>activerecord/lib/active_record/locking/pessimistic.rb
<ide> module Locking
<ide> #
<ide> # Account.transaction do
<ide> # # select * from accounts where ...
<del> # accounts = Account.where(...).all
<add> # accounts = Account.where(...)
<ide> # account1 = accounts.detect { |account| ... }
<ide> # account2 = accounts.detect { |account| ... }
<ide> # # select * from accounts where id=? for update | 3 |
Go | Go | remove unnecessary abstraction nlines | 02a021119fb2b3e051b98817831a8c1a8a9fd464 | <ide><path>integration-cli/docker_cli_rmi_test.go
<ide> func TestRmiTag(t *testing.T) {
<ide> dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3")
<ide> {
<ide> imagesAfter, _, _ := dockerCmd(t, "images", "-a")
<del> if nLines(imagesAfter) != nLines(imagesBefore)+3 {
<add> if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+3 {
<ide> t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
<ide> }
<ide> }
<ide> dockerCmd(t, "rmi", "utest/docker:tag2")
<ide> {
<ide> imagesAfter, _, _ := dockerCmd(t, "images", "-a")
<del> if nLines(imagesAfter) != nLines(imagesBefore)+2 {
<add> if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 {
<ide> t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
<ide> }
<ide>
<ide> }
<ide> dockerCmd(t, "rmi", "utest:5000/docker:tag3")
<ide> {
<ide> imagesAfter, _, _ := dockerCmd(t, "images", "-a")
<del> if nLines(imagesAfter) != nLines(imagesBefore)+1 {
<add> if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+1 {
<ide> t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
<ide> }
<ide>
<ide> }
<ide> dockerCmd(t, "rmi", "utest:tag1")
<ide> {
<ide> imagesAfter, _, _ := dockerCmd(t, "images", "-a")
<del> if nLines(imagesAfter) != nLines(imagesBefore)+0 {
<add> if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+0 {
<ide> t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
<ide> }
<ide>
<ide><path>integration-cli/utils.go
<ide> func stripTrailingCharacters(target string) string {
<ide> return target
<ide> }
<ide>
<del>func nLines(s string) int {
<del> return strings.Count(s, "\n")
<del>}
<del>
<ide> func unmarshalJSON(data []byte, result interface{}) error {
<ide> err := json.Unmarshal(data, result)
<ide> if err != nil { | 2 |
Javascript | Javascript | add timeout option to abort request | 5487bdb3d1c905fb9453644f7e290c75dcee14c1 | <ide><path>src/service/browser.js
<ide> function Browser(window, document, body, XHR, $log, $sniffer) {
<ide> * <li><tt>X-Requested-With</tt>: <tt>XMLHttpRequest</tt></li>
<ide> * </ul>
<ide> *
<add> * @param {number=} timeout Timeout in ms, when the request will be aborted
<ide> * @returns {XMLHttpRequest|undefined} Raw XMLHttpRequest object or undefined when JSONP method
<ide> *
<ide> * @description
<ide> * Send ajax request
<ide> *
<ide> * TODO(vojta): change signature of this method to (method, url, data, headers, callback)
<ide> */
<del> self.xhr = function(method, url, post, callback, headers) {
<add> self.xhr = function(method, url, post, callback, headers, timeout) {
<ide> outstandingRequestCount ++;
<ide> if (lowercase(method) == 'jsonp') {
<ide> var callbackId = ("angular_" + Math.random() + '_' + (idCounter++)).replace(/\d\./, '');
<ide> function Browser(window, document, body, XHR, $log, $sniffer) {
<ide> if (value) xhr.setRequestHeader(key, value);
<ide> });
<ide>
<add> var status;
<ide> xhr.send(post || '');
<ide>
<ide> // IE6, IE7 bug - does sync when serving from cache
<ide> if (xhr.readyState == 4) {
<ide> setTimeout(function() {
<del> completeOutstandingRequest(callback, fixStatus(xhr.status), xhr.responseText);
<add> completeOutstandingRequest(callback, fixStatus(status || xhr.status), xhr.responseText);
<ide> }, 0);
<ide> } else {
<ide> xhr.onreadystatechange = function() {
<ide> if (xhr.readyState == 4) {
<del> completeOutstandingRequest(callback, fixStatus(xhr.status), xhr.responseText);
<add> completeOutstandingRequest(callback, fixStatus(status || xhr.status),
<add> xhr.responseText);
<ide> }
<ide> };
<ide> }
<ide>
<add> if (timeout > 0) {
<add> setTimeout(function() {
<add> status = -1;
<add> xhr.abort();
<add> }, timeout);
<add> }
<add>
<ide> return xhr;
<ide> }
<ide> };
<ide><path>test/service/browserSpecs.js
<ide> describe('browser', function() {
<ide> expect(browser.xhr('GET', '/url', null, noop)).toBe(xhr);
<ide> });
<ide>
<add> it('should abort request on timeout', function() {
<add> var callback = jasmine.createSpy('done').andCallFake(function(status, response) {
<add> expect(status).toBe(-1);
<add> });
<add>
<add> browser.xhr('GET', '/url', null, callback, {}, 2000);
<add> xhr.abort = jasmine.createSpy('xhr.abort');
<add>
<add> fakeWindow.setTimeout.flush();
<add> expect(xhr.abort).toHaveBeenCalledOnce();
<add>
<add> xhr.status = 0;
<add> xhr.readyState = 4;
<add> xhr.onreadystatechange();
<add> expect(callback).toHaveBeenCalledOnce();
<add> });
<add>
<ide> it('should be async even if xhr.send() is sync', function() {
<ide> // IE6, IE7 is sync when serving from cache
<ide> var xhr; | 2 |
Python | Python | add separable conv2d for cntk | 06eaeebecfb73c23bfd531013ca172ee3bf5069c | <ide><path>keras/backend/cntk_backend.py
<ide> def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,
<ide>
<ide> def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
<ide> padding='valid', data_format=None, dilation_rate=(1, 1)):
<del> raise NotImplementedError
<add> if data_format is None:
<add> data_format = image_data_format()
<add> if data_format not in {'channels_first', 'channels_last'}:
<add> raise ValueError('Unknown data_format ' + str(data_format))
<add>
<add> x = _preprocess_conv2d_input(x, data_format)
<add> depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
<add> depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
<add> (-1, 1) + depthwise_kernel.shape[2:])
<add> pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
<add> padding = _preprocess_border_mode(padding)
<add>
<add> if dilation_rate == (1, 1):
<add> strides = (1,) + strides
<add> x = C.convolution(depthwise_kernel, x,
<add> strides=strides,
<add> auto_padding=[False, padding, padding],
<add> groups=x.shape[0])
<add> x = C.convolution(pointwise_kernel, x,
<add> strides=(1, 1, 1),
<add> auto_padding=[False])
<add> else:
<add> if dilation_rate[0] != dilation_rate[1]:
<add> raise ValueError('CNTK Backend: non-square dilation_rate is '
<add> 'not supported.')
<add> if strides != (1, 1):
<add> raise ValueError('Invalid strides for dilated convolution')
<add> x = C.convolution(depthwise_kernel, x,
<add> strides=dilation_rate[0],
<add> auto_padding=[False, padding, padding])
<add> x = C.convolution(pointwise_kernel, x,
<add> strides=(1, 1, 1),
<add> auto_padding=[False])
<add> return _postprocess_conv2d_output(x, data_format)
<ide>
<ide>
<ide> def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',
<ide><path>tests/keras/backend/backend_test.py
<ide> def cntk_func_two_tensor(function_name, x_shape, y, **kwargs):
<ide> return KC.function([xc, yc], [output_cntk])
<ide>
<ide>
<add>def cntk_func_three_tensor(function_name, x_shape, y, z, **kwargs):
<add> xc = KC.placeholder(x_shape)
<add> output_cntk = getattr(KC, function_name)(xc, KC.variable(y), KC.variable(z), **kwargs)
<add> return KC.function([xc], [output_cntk])
<add>
<add>
<ide> def parse_shape_or_val(shape_or_val):
<ide> if isinstance(shape_or_val, np.ndarray):
<ide> return shape_or_val.shape, shape_or_val
<ide> def test_conv3d(self):
<ide> with pytest.raises(ValueError):
<ide> k.conv3d(k.variable(xval), k.variable(kernel_val), data_format='channels_middle')
<ide>
<add> def test_separable_conv2d(self):
<add> for (input_shape, data_format) in [((2, 3, 4, 5), 'channels_first'),
<add> ((2, 3, 5, 6), 'channels_first'),
<add> ((1, 6, 5, 3), 'channels_last')]:
<add> input_depth = input_shape[1] if data_format == 'channels_first' else input_shape[-1]
<add> _, x_val = parse_shape_or_val(input_shape)
<add> x_tf = KTF.variable(x_val)
<add> for kernel_shape in [(2, 2), (4, 3)]:
<add> for depth_multiplier in [1, 2]:
<add> _, depthwise_val = parse_shape_or_val(kernel_shape + (input_depth, depth_multiplier))
<add> _, pointwise_val = parse_shape_or_val((1, 1) + (input_depth * depth_multiplier, 7))
<add>
<add> z_tf = KTF.eval(KTF.separable_conv2d(x_tf, KTF.variable(depthwise_val),
<add> KTF.variable(pointwise_val),
<add> data_format=data_format))
<add> z_c = cntk_func_three_tensor('separable_conv2d', input_shape,
<add> depthwise_val,
<add> pointwise_val,
<add> data_format=data_format)([x_val])[0]
<add> assert_allclose(z_tf, z_c, 1e-3)
<add>
<add> # Test invalid use cases
<add> for k in [KTF, KC]:
<add> with pytest.raises(ValueError):
<add> k.separable_conv2d(k.variable(x_val),
<add> k.variable(depthwise_val),
<add> k.variable(pointwise_val),
<add> data_format='channels_middle')
<add>
<ide> @pytest.mark.parametrize('k', [KTF], ids=['TensorFlow'])
<ide> def test_depthwise_conv_2d(self, k):
<ide> for data_format in ['channels_first', 'channels_last']:
<ide><path>tests/keras/layers/convolutional_test.py
<ide> def test_separable_conv_1d():
<ide> batch_input_shape=(None, 5, None))])
<ide>
<ide>
<del>@pytest.mark.skipif(K.backend() != 'tensorflow', reason='Requires TF backend')
<add>@pytest.mark.skipif(K.backend() == 'theano', reason='Theano does not support it yet')
<ide> @keras_test
<ide> def test_separable_conv_2d():
<ide> num_samples = 2
<ide> def test_separable_conv_2d():
<ide> continue
<ide> if dilation_rate != (1, 1) and strides != (1, 1):
<ide> continue
<add> if dilation_rate != (1, 1) and K.backend() == 'cntk':
<add> continue
<ide>
<ide> layer_test(convolutional.SeparableConv2D,
<ide> kwargs={'filters': filters, | 3 |
Javascript | Javascript | add comments to ff rgb format fix | ddff0f642acbf650b53d1ddffe544900569ab983 | <ide><path>examples/js/SimulationRenderer.js
<ide> function SimulationRenderer( WIDTH, renderer ) {
<ide>
<ide> }
<ide>
<del> var texture = new THREE.DataTexture( a, WIDTH, WIDTH, THREE.RGBAFormat, THREE.FloatType );
<add> var texture = new THREE.DataTexture( a, WIDTH, WIDTH, THREE.RGBAFormat, THREE.FloatType ); // was RGB format. changed to RGBA format. see discussion in #8415 / #8450
<ide> texture.needsUpdate = true;
<ide>
<ide> return texture;
<ide><path>examples/js/postprocessing/AdaptiveToneMappingPass.js
<ide> THREE.AdaptiveToneMappingPass.prototype = {
<ide> this.previousLuminanceRT.dispose();
<ide>
<ide> }
<del> var pars = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat };
<add>
<add> var pars = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat }; // was RGB format. changed to RGBA format. see discussion in #8415 / #8450
<ide>
<ide> this.luminanceRT = new THREE.WebGLRenderTarget( this.resolution, this.resolution, pars );
<ide> this.luminanceRT.texture.generateMipmaps = false; | 2 |
Javascript | Javascript | move dev-only flags to only exist on composites | e01bf78a79b3562f2f6563b150b9c6affee5f2b9 | <ide><path>src/renderers/shared/reconciler/ReactCompositeComponent.js
<ide> var ReactCompositeComponentMixin = {
<ide>
<ide> // ComponentWillUnmount shall only be called once
<ide> this._calledComponentWillUnmount = false;
<add>
<add> if (__DEV__) {
<add> this._warnedAboutRefsInRender = false;
<add> }
<ide> },
<ide>
<ide> /**
<ide><path>src/renderers/shared/reconciler/instantiateReactComponent.js
<ide> function instantiateReactComponent(node) {
<ide> instance._mountIndex = 0;
<ide> instance._mountImage = null;
<ide>
<del> if (__DEV__) {
<del> instance._isOwnerNecessary = false;
<del> instance._warnedAboutRefsInRender = false;
<del> }
<del>
<ide> if (__DEV__) {
<ide> var debugID = isEmpty ? 0 : nextDebugID++;
<ide> instance._debugID = debugID; | 2 |
Javascript | Javascript | use correct tooltip events in each chart | a10e245e5ad743686f691d549b2ec3257c7650cb | <ide><path>src/Chart.Doughnut.js
<ide>
<ide> //Set up tooltip events on the chart
<ide> if (this.options.showTooltips) {
<del> helpers.bindEvents(this, this.options.tooltipEvents, this.onHover);
<add> helpers.bindEvents(this, this.options.events, this.onHover);
<ide> }
<ide>
<ide> // Create new slice for each piece of data
<ide><path>src/Chart.PolarArea.js
<ide>
<ide> //Set up tooltip events on the chart
<ide> if (this.options.showTooltips){
<del> helpers.bindEvents(this, this.options.tooltipEvents, function(evt){
<add> helpers.bindEvents(this, this.options.events, function(evt){
<ide> var activeSegments = (evt.type !== 'mouseout') ? this.getSegmentsAtEvent(evt) : [];
<ide> helpers.each(this.segments,function(segment){
<ide> segment.restore(["fillColor"]);
<ide><path>src/Chart.Radar.js
<ide>
<ide> //Set up tooltip events on the chart
<ide> if (this.options.showTooltips){
<del> helpers.bindEvents(this, this.options.tooltipEvents, function(evt){
<add> helpers.bindEvents(this, this.options.events, function(evt){
<ide> var activePointsCollection = (evt.type !== 'mouseout') ? this.getPointsAtEvent(evt) : [];
<ide>
<ide> this.eachPoints(function(point){
<ide><path>src/Chart.Scatter.js
<ide> });
<ide>
<ide> // Events
<del> helpers.bindEvents(this, this.options.tooltipEvents, this.events);
<add> helpers.bindEvents(this, this.options.events, this.events);
<ide>
<ide> // Build Scale
<ide> this.buildScale(); | 4 |
PHP | PHP | remove components constant | af505df885affc9f4f4a14be4616ae848de1b829 | <ide><path>lib/Cake/bootstrap.php
<ide> */
<ide> define('BEHAVIORS', MODELS.'Behavior'.DS);
<ide>
<del>/**
<del> * Path to the application's components directory.
<del> */
<del> define('COMPONENTS', CONTROLLERS.'Component'.DS);
<del>
<ide> /**
<ide> * Path to the application's libs directory.
<ide> */ | 1 |
Ruby | Ruby | pluralize rerun snippet heading | 7e9775bdb03f2f6648fe58958645fd8e31b5c79b | <ide><path>railties/lib/rails/test_unit/reporter.rb
<ide> class TestUnitReporter < Minitest::StatisticsReporter
<ide> def report
<ide> return if passed?
<ide> io.puts
<del> io.puts "Failed test:"
<add> io.puts "Failed tests:"
<ide> io.puts
<ide> io.puts aggregated_results
<ide> end | 1 |
Java | Java | introduce base class for responsebodyresulthandler | 1b308cffbf5a6d6b4e71b8b991ae698f822ab5f1 | <ide><path>spring-web-reactive/src/main/java/org/springframework/web/reactive/result/ContentNegotiatingResultHandlerSupport.java
<add>/*
<add> * Copyright 2002-2016 the original author or authors.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License");
<add> * you may not use this file except in compliance with the License.
<add> * You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software
<add> * distributed under the License is distributed on an "AS IS" BASIS,
<add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add> * See the License for the specific language governing permissions and
<add> * limitations under the License.
<add> */
<add>package org.springframework.web.reactive.result;
<add>
<add>import java.util.ArrayList;
<add>import java.util.Collections;
<add>import java.util.Comparator;
<add>import java.util.LinkedHashSet;
<add>import java.util.List;
<add>import java.util.Optional;
<add>import java.util.Set;
<add>
<add>import org.springframework.core.Ordered;
<add>import org.springframework.core.convert.ConversionService;
<add>import org.springframework.http.MediaType;
<add>import org.springframework.util.Assert;
<add>import org.springframework.web.reactive.HandlerMapping;
<add>import org.springframework.web.reactive.accept.RequestedContentTypeResolver;
<add>import org.springframework.web.server.ServerWebExchange;
<add>
<add>/**
<add> * Base class for {@link org.springframework.web.reactive.HandlerResultHandler
<add> * HandlerResultHandler} implementations that perform content negotiation.
<add> *
<add> * @author Rossen Stoyanchev
<add> */
<add>public abstract class ContentNegotiatingResultHandlerSupport implements Ordered {
<add>
<add> private static final MediaType MEDIA_TYPE_APPLICATION_ALL = new MediaType("application");
<add>
<add>
<add> private final ConversionService conversionService;
<add>
<add> private final RequestedContentTypeResolver contentTypeResolver;
<add>
<add> private int order = LOWEST_PRECEDENCE;
<add>
<add>
<add> protected ContentNegotiatingResultHandlerSupport(ConversionService conversionService,
<add> RequestedContentTypeResolver contentTypeResolver) {
<add>
<add> Assert.notNull(conversionService, "'conversionService' is required.");
<add> Assert.notNull(contentTypeResolver, "'contentTypeResolver' is required.");
<add> this.conversionService = conversionService;
<add> this.contentTypeResolver = contentTypeResolver;
<add> }
<add>
<add>
<add> /**
<add> * Return the configured {@link ConversionService}.
<add> */
<add> public ConversionService getConversionService() {
<add> return this.conversionService;
<add> }
<add>
<add> /**
<add> * Return the configured {@link RequestedContentTypeResolver}.
<add> */
<add> public RequestedContentTypeResolver getContentTypeResolver() {
<add> return this.contentTypeResolver;
<add> }
<add>
<add> /**
<add> * Set the order for this result handler relative to others.
<add> * <p>By default set to {@link Ordered#LOWEST_PRECEDENCE}, however see
<add> * Javadoc of sub-classes which may change this default.
<add> * @param order the order
<add> */
<add> public void setOrder(int order) {
<add> this.order = order;
<add> }
<add>
<add> @Override
<add> public int getOrder() {
<add> return this.order;
<add> }
<add>
<add>
<add> /**
<add> * Select the best media type for the current request through a content
<add> * negotiation algorithm.
<add> * @param exchange the current request
<add> * @param producibleTypes the media types that can be produced for the current request
<add> * @return the selected media type or {@code null}
<add> */
<add> protected MediaType selectMediaType(ServerWebExchange exchange, List<MediaType> producibleTypes) {
<add>
<add> List<MediaType> acceptableTypes = getAcceptableTypes(exchange);
<add> producibleTypes = getProducibleTypes(exchange, producibleTypes);
<add>
<add> Set<MediaType> compatibleMediaTypes = new LinkedHashSet<>();
<add> for (MediaType acceptable : acceptableTypes) {
<add> for (MediaType producible : producibleTypes) {
<add> if (acceptable.isCompatibleWith(producible)) {
<add> compatibleMediaTypes.add(selectMoreSpecificMediaType(acceptable, producible));
<add> }
<add> }
<add> }
<add>
<add> List<MediaType> result = new ArrayList<>(compatibleMediaTypes);
<add> MediaType.sortBySpecificityAndQuality(result);
<add>
<add> for (MediaType mediaType : compatibleMediaTypes) {
<add> if (mediaType.isConcrete()) {
<add> return mediaType;
<add> }
<add> else if (mediaType.equals(MediaType.ALL) || mediaType.equals(MEDIA_TYPE_APPLICATION_ALL)) {
<add> return MediaType.APPLICATION_OCTET_STREAM;
<add> }
<add> }
<add>
<add> return null;
<add> }
<add>
<add> private List<MediaType> getAcceptableTypes(ServerWebExchange exchange) {
<add> List<MediaType> mediaTypes = this.contentTypeResolver.resolveMediaTypes(exchange);
<add> return (mediaTypes.isEmpty() ? Collections.singletonList(MediaType.ALL) : mediaTypes);
<add> }
<add>
<add> private List<MediaType> getProducibleTypes(ServerWebExchange exchange, List<MediaType> mediaTypes) {
<add> Optional<?> optional = exchange.getAttribute(HandlerMapping.PRODUCIBLE_MEDIA_TYPES_ATTRIBUTE);
<add> if (optional.isPresent()) {
<add> Set<MediaType> set = (Set<MediaType>) optional.get();
<add> return new ArrayList<>(set);
<add> }
<add> return mediaTypes;
<add> }
<add>
<add> private MediaType selectMoreSpecificMediaType(MediaType acceptable, MediaType producible) {
<add> producible = producible.copyQualityValue(acceptable);
<add> Comparator<MediaType> comparator = MediaType.SPECIFICITY_COMPARATOR;
<add> return (comparator.compare(acceptable, producible) <= 0 ? acceptable : producible);
<add> }
<add>
<add>}
<ide><path>spring-web-reactive/src/main/java/org/springframework/web/reactive/result/method/annotation/ResponseBodyResultHandler.java
<ide>
<ide> package org.springframework.web.reactive.result.method.annotation;
<ide>
<del>import java.util.ArrayList;
<del>import java.util.Collections;
<del>import java.util.Comparator;
<del>import java.util.LinkedHashSet;
<ide> import java.util.List;
<ide> import java.util.Optional;
<del>import java.util.Set;
<ide> import java.util.stream.Collectors;
<ide>
<ide> import org.reactivestreams.Publisher;
<ide> import org.springframework.util.Assert;
<ide> import org.springframework.web.bind.annotation.ResponseBody;
<ide> import org.springframework.web.method.HandlerMethod;
<del>import org.springframework.web.reactive.HandlerMapping;
<ide> import org.springframework.web.reactive.HandlerResult;
<ide> import org.springframework.web.reactive.HandlerResultHandler;
<ide> import org.springframework.web.reactive.accept.HeaderContentTypeResolver;
<ide> import org.springframework.web.reactive.accept.RequestedContentTypeResolver;
<add>import org.springframework.web.reactive.result.ContentNegotiatingResultHandlerSupport;
<ide> import org.springframework.web.server.NotAcceptableStatusException;
<ide> import org.springframework.web.server.ServerWebExchange;
<ide>
<ide> * with {@code @ResponseBody} writing to the body of the request or response with
<ide> * an {@link HttpMessageConverter}.
<ide> *
<add> * <p>By default the order for the result handler is set to 0. It is generally
<add> * safe and expected it will be ordered ahead of other result handlers since it
<add> * only gets involved based on the presence of an {@code @ResponseBody}
<add> * annotation.
<add> *
<ide> * @author Rossen Stoyanchev
<ide> * @author Stephane Maldini
<ide> * @author Sebastien Deleuze
<ide> * @author Arjen Poutsma
<ide> */
<del>public class ResponseBodyResultHandler implements HandlerResultHandler, Ordered {
<del>
<del> private static final MediaType MEDIA_TYPE_APPLICATION_ALL = new MediaType("application");
<add>public class ResponseBodyResultHandler extends ContentNegotiatingResultHandlerSupport
<add> implements HandlerResultHandler, Ordered {
<ide>
<ide> private final List<HttpMessageConverter<?>> messageConverters;
<ide>
<del> private final ConversionService conversionService;
<del>
<del> private final RequestedContentTypeResolver contentTypeResolver;
<del>
<del> private final List<MediaType> supportedMediaTypes;
<del>
<del> private int order = 0;
<del>
<ide>
<ide> /**
<ide> * Constructor with message converters and a {@code ConversionService} only
<ide> public ResponseBodyResultHandler(List<HttpMessageConverter<?>> converters,
<ide> * Constructor with message converters, a {@code ConversionService}, and a
<ide> * {@code RequestedContentTypeResolver}.
<ide> *
<del> * @param messageConverters converters for writing the response body with
<add> * @param converters converters for writing the response body with
<ide> * @param conversionService for converting other reactive types (e.g.
<ide> * rx.Observable, rx.Single, etc.) to Flux or Mono
<add> * @param contentTypeResolver for resolving the requested content type
<ide> */
<del> public ResponseBodyResultHandler(List<HttpMessageConverter<?>> messageConverters,
<add> public ResponseBodyResultHandler(List<HttpMessageConverter<?>> converters,
<ide> ConversionService conversionService, RequestedContentTypeResolver contentTypeResolver) {
<ide>
<del> Assert.notEmpty(messageConverters, "At least one message converter is required.");
<del> Assert.notNull(conversionService, "'conversionService' is required.");
<del> Assert.notNull(contentTypeResolver, "'contentTypeResolver' is required.");
<del>
<del> this.messageConverters = messageConverters;
<del> this.conversionService = conversionService;
<del> this.contentTypeResolver = contentTypeResolver;
<del> this.supportedMediaTypes = initSupportedMediaTypes(messageConverters);
<del> }
<del>
<del> private static List<MediaType> initSupportedMediaTypes(List<HttpMessageConverter<?>> converters) {
<del> Set<MediaType> set = new LinkedHashSet<>();
<del> converters.forEach(converter -> set.addAll(converter.getWritableMediaTypes()));
<del> List<MediaType> result = new ArrayList<>(set);
<del> MediaType.sortBySpecificity(result);
<del> return Collections.unmodifiableList(result);
<del> }
<del>
<del>
<del> /**
<del> * Set the order for this result handler relative to others.
<del> * <p>By default this is set to 0 and is generally save to be ahead of other
<del> * result handlers since it only gets involved if the method (or class) is
<del> * annotated with {@code @ResponseBody}.
<del> * @param order the order
<del> */
<del> public void setOrder(int order) {
<del> this.order = order;
<del> }
<del>
<del> @Override
<del> public int getOrder() {
<del> return this.order;
<add> super(conversionService, contentTypeResolver);
<add> Assert.notEmpty(converters, "At least one message converter is required.");
<add> this.messageConverters = converters;
<add> setOrder(0);
<ide> }
<ide>
<ide>
<ide> public Mono<Void> handleResult(ServerWebExchange exchange, HandlerResult result)
<ide> ResolvableType elementType;
<ide> ResolvableType returnType = result.getReturnValueType();
<ide>
<del> if (this.conversionService.canConvert(returnType.getRawClass(), Publisher.class)) {
<add> if (getConversionService().canConvert(returnType.getRawClass(), Publisher.class)) {
<ide> Optional<Object> optionalValue = result.getReturnValue();
<ide> if (optionalValue.isPresent()) {
<del> publisher = this.conversionService.convert(optionalValue.get(), Publisher.class);
<add> publisher = getConversionService().convert(optionalValue.get(), Publisher.class);
<ide> }
<ide> else {
<ide> publisher = Mono.empty();
<ide> public Mono<Void> handleResult(ServerWebExchange exchange, HandlerResult result)
<ide> elementType = returnType;
<ide> }
<ide>
<del> List<MediaType> compatibleMediaTypes = getCompatibleMediaTypes(exchange, elementType);
<del> if (compatibleMediaTypes.isEmpty()) {
<del> if (result.getReturnValue().isPresent()) {
<del> List<MediaType> mediaTypes = getProducibleMediaTypes(exchange, elementType);
<del> return Mono.error(new NotAcceptableStatusException(mediaTypes));
<del> }
<del> return Mono.empty();
<del> }
<add> List<MediaType> producibleTypes = getProducibleMediaTypes(elementType);
<add> MediaType bestMediaType = selectMediaType(exchange, producibleTypes);
<ide>
<del> MediaType bestMediaType = selectBestMediaType(compatibleMediaTypes);
<ide> if (bestMediaType != null) {
<ide> for (HttpMessageConverter<?> converter : this.messageConverters) {
<ide> if (converter.canWrite(elementType, bestMediaType)) {
<ide> public Mono<Void> handleResult(ServerWebExchange exchange, HandlerResult result)
<ide> }
<ide> }
<ide>
<del> return Mono.error(new NotAcceptableStatusException(this.supportedMediaTypes));
<add> return Mono.error(new NotAcceptableStatusException(producibleTypes));
<ide> }
<ide>
<del> private List<MediaType> getCompatibleMediaTypes(ServerWebExchange exchange,
<del> ResolvableType elementType) {
<del>
<del> List<MediaType> acceptableMediaTypes = getAcceptableMediaTypes(exchange);
<del> List<MediaType> producibleMediaTypes = getProducibleMediaTypes(exchange, elementType);
<del>
<del> Set<MediaType> compatibleMediaTypes = new LinkedHashSet<>();
<del> for (MediaType acceptable : acceptableMediaTypes) {
<del> for (MediaType producible : producibleMediaTypes) {
<del> if (acceptable.isCompatibleWith(producible)) {
<del> compatibleMediaTypes.add(selectMoreSpecificMediaType(acceptable, producible));
<del> }
<del> }
<del> }
<del>
<del> List<MediaType> result = new ArrayList<>(compatibleMediaTypes);
<del> MediaType.sortBySpecificityAndQuality(result);
<del> return result;
<del> }
<del>
<del> private List<MediaType> getAcceptableMediaTypes(ServerWebExchange exchange) {
<del> List<MediaType> mediaTypes = this.contentTypeResolver.resolveMediaTypes(exchange);
<del> return (mediaTypes.isEmpty() ? Collections.singletonList(MediaType.ALL) : mediaTypes);
<del> }
<del>
<del> private List<MediaType> getProducibleMediaTypes(ServerWebExchange exchange, ResolvableType type) {
<del> Optional<?> optional = exchange.getAttribute(HandlerMapping.PRODUCIBLE_MEDIA_TYPES_ATTRIBUTE);
<del> if (optional.isPresent()) {
<del> Set<MediaType> mediaTypes = (Set<MediaType>) optional.get();
<del> return new ArrayList<>(mediaTypes);
<del> }
<del> else {
<del> return this.messageConverters.stream()
<del> .filter(converter -> converter.canWrite(type, null))
<del> .flatMap(converter -> converter.getWritableMediaTypes().stream())
<del> .collect(Collectors.toList());
<del> }
<del> }
<del>
<del> private MediaType selectMoreSpecificMediaType(MediaType acceptable, MediaType producible) {
<del> producible = producible.copyQualityValue(acceptable);
<del> Comparator<MediaType> comparator = MediaType.SPECIFICITY_COMPARATOR;
<del> return (comparator.compare(acceptable, producible) <= 0 ? acceptable : producible);
<del> }
<del>
<del> private MediaType selectBestMediaType(List<MediaType> compatibleMediaTypes) {
<del> for (MediaType mediaType : compatibleMediaTypes) {
<del> if (mediaType.isConcrete()) {
<del> return mediaType;
<del> }
<del> else if (mediaType.equals(MediaType.ALL) || mediaType.equals(MEDIA_TYPE_APPLICATION_ALL)) {
<del> return MediaType.APPLICATION_OCTET_STREAM;
<del> }
<del> }
<del> return null;
<add> private List<MediaType> getProducibleMediaTypes(ResolvableType type) {
<add> return this.messageConverters.stream()
<add> .filter(converter -> converter.canWrite(type, null))
<add> .flatMap(converter -> converter.getWritableMediaTypes().stream())
<add> .collect(Collectors.toList());
<ide> }
<ide>
<ide> } | 2 |
PHP | PHP | apply fixes from styleci | 019277c92d8f5c00fc307ea73ece23d242fa73c9 | <ide><path>src/Illuminate/Validation/Rules/Password.php
<ide> public function setValidator($validator)
<ide> $this->validator = $validator;
<ide>
<ide> return $this;
<del> }
<add> }
<ide>
<ide> /**
<ide> * Set the data under validation. | 1 |
Javascript | Javascript | mock the lookup function in parallel tests | 0fb1e0768945fa5f4d232a77e3303d1e25e89a5f | <ide><path>test/parallel/test-http-client-req-error-dont-double-fire.js
<ide> 'use strict';
<add>
<add>// This tests that the error emitted on the socket does
<add>// not get fired again when the 'error' event handler throws
<add>// an error.
<add>
<ide> const assert = require('assert');
<ide> const http = require('http');
<ide> const common = require('../common');
<add>const { addresses } = require('../common/internet');
<add>const { errorLookupMock } = require('../common/dns');
<add>
<add>const host = addresses.INVALID_HOST;
<ide>
<del>// Invalid hostname as per https://tools.ietf.org/html/rfc2606#section-2
<del>const host = 'this.hostname.is.invalid';
<del>const req = http.get({ host });
<add>const req = http.get({
<add> host,
<add> lookup: common.mustCall(errorLookupMock())
<add>});
<ide> const err = new Error('mock unexpected code error');
<ide> req.on('error', common.mustCall(() => {
<ide> throw err;
<ide><path>test/parallel/test-net-better-error-messages-port-hostname.js
<ide> 'use strict';
<add>
<add>// This tests that the error thrown from net.createConnection
<add>// comes with host and port properties.
<add>// See https://github.com/nodejs/node-v0.x-archive/issues/7005
<add>
<ide> const common = require('../common');
<ide> const net = require('net');
<ide> const assert = require('assert');
<ide>
<add>const { addresses } = require('../common/internet');
<add>const {
<add> errorLookupMock,
<add> mockedErrorCode
<add>} = require('../common/dns');
<add>
<ide> // Using port 0 as hostname used is already invalid.
<del>const c = net.createConnection(0, 'this.hostname.is.invalid');
<add>const c = net.createConnection({
<add> port: 0,
<add> host: addresses.INVALID_HOST,
<add> lookup: common.mustCall(errorLookupMock())
<add>});
<ide>
<ide> c.on('connect', common.mustNotCall());
<ide>
<ide> c.on('error', common.mustCall(function(e) {
<del> // If Name Service Switch is available on the operating system then it
<del> // might be configured differently (/etc/nsswitch.conf).
<del> // If the system is configured with no dns the error code will be EAI_AGAIN,
<del> // but if there are more services after the dns entry, for example some
<del> // linux distributions ship a myhostname service by default which would
<del> // still produce the ENOTFOUND error.
<del> assert.ok(e.code === 'ENOTFOUND' || e.code === 'EAI_AGAIN');
<add> assert.strictEqual(e.code, mockedErrorCode);
<ide> assert.strictEqual(e.port, 0);
<del> assert.strictEqual(e.hostname, 'this.hostname.is.invalid');
<add> assert.strictEqual(e.hostname, addresses.INVALID_HOST);
<ide> }));
<ide><path>test/parallel/test-net-connect-immediate-finish.js
<ide> // USE OR OTHER DEALINGS IN THE SOFTWARE.
<ide>
<ide> 'use strict';
<add>
<add>// This tests that if the socket is still in the 'connecting' state
<add>// when the user calls socket.end() ('finish'), the socket would emit
<add>// 'connect' and defer the handling until the 'connect' event is handled.
<add>
<ide> const common = require('../common');
<ide> const assert = require('assert');
<ide> const net = require('net');
<ide>
<add>const { addresses } = require('../common/internet');
<add>const {
<add> errorLookupMock,
<add> mockedErrorCode,
<add> mockedSysCall
<add>} = require('../common/dns');
<add>
<ide> const client = net.connect({
<del> host: 'this.hostname.is.invalid',
<del> port: common.PORT
<add> host: addresses.INVALID_HOST,
<add> port: common.PORT,
<add> lookup: common.mustCall(errorLookupMock())
<ide> });
<ide>
<ide> client.once('error', common.mustCall((err) => {
<ide> assert(err);
<ide> assert.strictEqual(err.code, err.errno);
<del> // If Name Service Switch is available on the operating system then it
<del> // might be configured differently (/etc/nsswitch.conf).
<del> // If the system is configured with no dns the error code will be EAI_AGAIN,
<del> // but if there are more services after the dns entry, for example some
<del> // linux distributions ship a myhostname service by default which would
<del> // still produce the ENOTFOUND error.
<del> assert.ok(err.code === 'ENOTFOUND' || err.code === 'EAI_AGAIN');
<add> assert.strictEqual(err.code, mockedErrorCode);
<ide> assert.strictEqual(err.host, err.hostname);
<del> assert.strictEqual(err.host, 'this.hostname.is.invalid');
<del> assert.strictEqual(err.syscall, 'getaddrinfo');
<add> assert.strictEqual(err.host, addresses.INVALID_HOST);
<add> assert.strictEqual(err.syscall, mockedSysCall);
<ide> }));
<ide>
<ide> client.end(); | 3 |
Text | Text | fix description of `docker swarm join --help` | 77dd8474a7b4447d7c5b1d257afe1bb2f6443172 | <ide><path>docs/reference/commandline/swarm_join.md
<ide> Usage: docker swarm join [OPTIONS] HOST:PORT
<ide> Join a swarm as a node and/or manager
<ide>
<ide> Options:
<del> --advertise-addr value Advertised address (format: <ip|interface>[:port])
<del> --help Print usage
<del> --listen-addr value Listen address (format: <ip|interface>[:port)
<del> --token string Token for entry into the swarm
<add> --advertise-addr string Advertised address (format: <ip|interface>[:port])
<add> --help Print usage
<add> --listen-addr node-addr Listen address (format: <ip|interface>[:port]) (default 0.0.0.0:2377)
<add> --token string Token for entry into the swarm
<ide> ```
<ide>
<ide> Join a node to a swarm. The node joins as a manager node or worker node based upon the token you | 1 |
Python | Python | fix metrics issue in evaluate | 53a05b6e4c1e7ee2c6d13eda9826f5bc9a321391 | <ide><path>keras/models.py
<ide> def _test_loop(self, f, ins, batch_size=128, verbose=0):
<ide> for batch_out in enumerate(batch_outs):
<ide> outs.append(0.)
<ide> for i, batch_out in enumerate(batch_outs):
<del> outs[i] += batch_out
<add> outs[i] += batch_out * len(batch_ids)
<ide> else:
<ide> if batch_index == 0:
<ide> outs.append(0.) | 1 |
Javascript | Javascript | use inlabelrange when tooltips are in label mode | d3538a1fbde0d0468f123531dd92eae215d75bd3 | <ide><path>src/core/core.controller.js
<ide>
<ide> helpers.each(this.data.datasets, function(dataset, datasetIndex) {
<ide> helpers.each(dataset.metaData, function(element, index) {
<del> if (element.inRange(eventPosition.x, eventPosition.y)) {
<add> if (element.inLabelRange(eventPosition.x, eventPosition.y)) {
<ide> elementsArray.push(element);
<ide> }
<ide> }, this); | 1 |
PHP | PHP | fix cs error | d54cc5e6778c24b85e89f1d9a6ef341ee4f79fe5 | <ide><path>src/TestSuite/TestEmailTransport.php
<ide> public function send(Email $email): array
<ide> static::$emails[] = $email;
<ide>
<ide> return [
<del> 'result' => 'Success'
<add> 'result' => 'Success',
<ide> ];
<ide> }
<ide>
<ide><path>tests/TestCase/Shell/CompletionShellTest.php
<ide> namespace Cake\Test\TestCase\Shell;
<ide>
<ide> use Cake\Console\ConsoleIo;
<del>use Cake\Console\ConsoleOutput;
<ide> use Cake\Core\Plugin;
<ide> use Cake\TestSuite\Stub\ConsoleOutput as StubOutput;
<ide> use Cake\TestSuite\TestCase; | 2 |
Go | Go | move image_export to a job | 5264914e574c20c8fefcd6e5d858f51f341dd9da | <ide><path>api.go
<ide> func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
<ide> }
<ide>
<ide> func getImagesGet(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
<del> name := vars["name"]
<add> if vars == nil {
<add> return fmt.Errorf("Missing parameter")
<add> }
<ide> if version > 1.0 {
<ide> w.Header().Set("Content-Type", "application/x-tar")
<ide> }
<del> return srv.ImageExport(name, w)
<add> job := srv.Eng.Job("image_export", vars["name"])
<add> if err := job.Stdout.Add(w); err != nil {
<add> return err
<add> }
<add> return job.Run()
<ide> }
<ide>
<ide> func postImagesLoad(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
<ide><path>server.go
<ide> func jobInitApi(job *engine.Job) engine.Status {
<ide> job.Error(err)
<ide> return engine.StatusErr
<ide> }
<add> if err := job.Eng.Register("image_export", srv.ImageExport); err != nil {
<add> job.Error(err)
<add> return engine.StatusErr
<add> }
<ide> return engine.StatusOK
<ide> }
<ide>
<ide> func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
<ide> // uncompressed tar ball.
<ide> // name is the set of tags to export.
<ide> // out is the writer where the images are written to.
<del>func (srv *Server) ImageExport(name string, out io.Writer) error {
<add>func (srv *Server) ImageExport(job *engine.Job) engine.Status {
<add> if len(job.Args) != 1 {
<add> job.Errorf("Usage: %s CONTAINER\n", job.Name)
<add> return engine.StatusErr
<add> }
<add> name := job.Args[0]
<ide> // get image json
<ide> tempdir, err := ioutil.TempDir("", "docker-export-")
<ide> if err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide> defer os.RemoveAll(tempdir)
<ide>
<ide> utils.Debugf("Serializing %s", name)
<ide>
<ide> rootRepo, err := srv.runtime.repositories.Get(name)
<ide> if err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide> if rootRepo != nil {
<ide> for _, id := range rootRepo {
<ide> image, err := srv.ImageInspect(id)
<ide> if err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide>
<ide> if err := srv.exportImage(image, tempdir); err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide> }
<ide>
<ide> func (srv *Server) ImageExport(name string, out io.Writer) error {
<ide> rootRepoJson, _ := json.Marshal(rootRepoMap)
<ide>
<ide> if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide> } else {
<ide> image, err := srv.ImageInspect(name)
<ide> if err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide> if err := srv.exportImage(image, tempdir); err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide> }
<ide>
<ide> fs, err := archive.Tar(tempdir, archive.Uncompressed)
<ide> if err != nil {
<del> return err
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<ide>
<del> if _, err := io.Copy(out, fs); err != nil {
<del> return err
<add> if _, err := io.Copy(job.Stdout, fs); err != nil {
<add> job.Error(err)
<add> return engine.StatusErr
<ide> }
<del> return nil
<add> return engine.StatusOK
<ide> }
<ide>
<ide> func (srv *Server) exportImage(image *Image, tempdir string) error { | 2 |
Javascript | Javascript | remove position defaults | fc0a056fee9d33b20883bdf449493baf4d7acec6 | <ide><path>src/controllers/controller.bubble.js
<ide> defaults.set('bubble', {
<ide> },
<ide> scales: {
<ide> x: {
<del> type: 'linear',
<del> position: 'bottom'
<add> type: 'linear'
<ide> },
<ide> y: {
<del> type: 'linear',
<del> position: 'left'
<add> type: 'linear'
<ide> }
<ide> },
<ide>
<ide><path>src/controllers/controller.horizontalBar.js
<ide> defaults.set('horizontalBar', {
<ide> scales: {
<ide> x: {
<ide> type: 'linear',
<del> position: 'bottom',
<ide> beginAtZero: true
<ide> },
<ide> y: {
<ide> type: 'category',
<del> position: 'left',
<ide> offset: true,
<ide> gridLines: {
<ide> offsetGridLines: true
<ide><path>src/controllers/controller.scatter.js
<ide> import defaults from '../core/core.defaults';
<ide> defaults.set('scatter', {
<ide> scales: {
<ide> x: {
<del> type: 'linear',
<del> position: 'bottom'
<add> type: 'linear'
<ide> },
<ide> y: {
<del> type: 'linear',
<del> position: 'left'
<add> type: 'linear'
<ide> }
<ide> },
<ide> | 3 |
Javascript | Javascript | apply suggestions from code review | 54de31c005a6aac4220ac6cd1f8e0847e21ac4f9 | <ide><path>src/path-watcher.js
<ide> class NSFWNativeWatcher extends NativeWatcher {
<ide> if (event.file) {
<ide> payload.path = path.join(event.directory, event.file);
<ide> } else {
<del> payload.oldPath = path.join(event.directory, event.oldFile??'');
<del> payload.path = path.join(event.directory, event.newFile??'');
<add> payload.oldPath = path.join(event.directory, event.oldFile == undefined ? '' : event.oldFile);
<add> payload.path = path.join(event.directory, event.newFile == undefined ? '' : event.newFile);
<ide> }
<ide>
<ide> return payload; | 1 |
Python | Python | remove pylint comments | 3613c3defc39c236fb1592c4f7ba1a9cc887343a | <ide><path>keras/__init__.py
<ide> from keras.engine.training import Model
<ide>
<ide> # isort: off
<del># pylint: disable=unused-import
<add>
<ide> from tensorflow.python import tf2
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide><path>keras/activations.py
<ide> def softmax(x, axis=-1):
<ide> )
<ide>
<ide> # Cache the logits to use for crossentropy loss.
<del> output._keras_logits = x # pylint: disable=protected-access
<add> output._keras_logits = x
<ide> return output
<ide>
<ide>
<ide> def sigmoid(x):
<ide> """
<ide> output = tf.sigmoid(x)
<ide> # Cache the logits to use for crossentropy loss.
<del> output._keras_logits = x # pylint: disable=protected-access
<add> output._keras_logits = x
<ide> return output
<ide>
<ide>
<ide><path>keras/activations_test.py
<ide> def gelu(x, approximate=False):
<ide> )
<ide> )
<ide> else:
<del> from scipy.stats import (
<del> norm, # pylint: disable=g-import-not-at-top
<del> )
<add> from scipy.stats import norm
<ide>
<ide> return x * norm.cdf(x)
<ide>
<ide><path>keras/api/tests/api_compatibility_test.py
<ide> def _AssertProtoDictEquals(
<ide> verbose_diff_message = diff_message
<ide> else:
<ide> # Do not truncate diff
<del> self.maxDiff = None # pylint: disable=invalid-name
<add> self.maxDiff = None
<ide> # Now we can run an actual proto diff.
<ide> try:
<ide> self.assertProtoEquals(expected_dict[key], actual_dict[key])
<ide><path>keras/applications/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras Applications are premade architectures with pre-trained weights."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> from keras.applications.convnext import ConvNeXtBase
<ide> from keras.applications.convnext import ConvNeXtLarge
<ide><path>keras/applications/applications_load_weight_test.py
<ide> def test_application_pretrained_weights_loading(self):
<ide> for app in apps:
<ide> try:
<ide> model = app(weights="imagenet")
<del> except Exception: # pylint: disable=broad-except
<add> except Exception:
<ide> self.skipTest("TODO(b/227700184): Re-enable.")
<ide> self.assertShapeEqual(model.output_shape, (None, _IMAGENET_CLASSES))
<ide> x = _get_elephant(model.input_shape[1:3])
<ide><path>keras/applications/convnext.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=missing-docstring
<del># pylint: disable=g-classes-have-attributes
<del># pylint: disable=g-direct-tensorflow-import
<add>
<add>
<ide> """ConvNeXt models for Keras.
<ide>
<ide> References:
<ide> def ConvNeXtXLarge(
<ide>
<ide>
<ide> @keras_export("keras.applications.convnext.preprocess_input")
<del>def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
<add>def preprocess_input(x, data_format=None):
<ide> """A placeholder method for backward compatibility.
<ide>
<ide> The preprocessing logic has been included in the convnext model
<ide><path>keras/applications/densenet.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """DenseNet models for Keras.
<ide>
<ide> Reference:
<ide><path>keras/applications/efficientnet.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=missing-docstring
<add>
<add>
<ide> """EfficientNet models for Keras.
<ide>
<ide> Reference:
<ide> def EfficientNetB7(
<ide>
<ide>
<ide> @keras_export("keras.applications.efficientnet.preprocess_input")
<del>def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
<add>def preprocess_input(x, data_format=None):
<ide> """A placeholder method for backward compatibility.
<ide>
<ide> The preprocessing logic has been included in the efficientnet model
<ide><path>keras/applications/efficientnet_v2.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=missing-docstring
<add>
<add>
<ide> """EfficientNet V2 models for Keras.
<ide>
<ide> Reference:
<ide> def EfficientNetV2L(
<ide>
<ide>
<ide> @keras_export("keras.applications.efficientnet_v2.preprocess_input")
<del>def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
<add>def preprocess_input(x, data_format=None):
<ide> """A placeholder method for backward compatibility.
<ide>
<ide> The preprocessing logic has been included in the EfficientNetV2 model
<ide><path>keras/applications/inception_resnet_v2.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """Inception-ResNet V2 model for Keras.
<ide>
<ide> Reference:
<ide><path>keras/applications/inception_v3.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """Inception V3 model for Keras.
<ide>
<ide> Reference:
<ide><path>keras/applications/mobilenet.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """MobileNet v1 models for Keras.
<ide>
<ide> MobileNet is a general architecture and can be used for multiple use cases.
<ide><path>keras/applications/mobilenet_v2.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """MobileNet v2 models for Keras.
<ide>
<ide> MobileNetV2 is a general architecture and can be used for multiple use cases.
<ide><path>keras/applications/mobilenet_v3.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=missing-function-docstring
<add>
<add>
<ide> """MobileNet v3 models for Keras."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide> def _inverted_res_block(
<ide>
<ide>
<ide> @keras_export("keras.applications.mobilenet_v3.preprocess_input")
<del>def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
<add>def preprocess_input(x, data_format=None):
<ide> """A placeholder method for backward compatibility.
<ide>
<ide> The preprocessing logic has been included in the mobilenet_v3 model
<ide><path>keras/applications/nasnet.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """NASNet-A models for Keras.
<ide>
<ide> NASNet refers to Neural Architecture Search Network, a family of models
<ide><path>keras/applications/regnet.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=missing-docstring
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> """RegNet models for Keras.
<ide>
<ide> def RegNetY320(
<ide>
<ide>
<ide> @keras_export("keras.applications.regnet.preprocess_input")
<del>def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
<add>def preprocess_input(x, data_format=None):
<ide> """A placeholder method for backward compatibility.
<ide>
<ide> The preprocessing logic has been included in the regnet model
<ide><path>keras/applications/resnet.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """ResNet models for Keras.
<ide>
<ide> Reference:
<ide><path>keras/applications/resnet_rs.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=missing-function-docstring
<add>
<add>
<ide> """ResNet-RS models for Keras.
<ide>
<ide> Reference:
<ide> def ResNetRS(
<ide> weights="imagenet",
<ide> input_tensor=None,
<ide> classes=1000,
<del> # pylint: disable=g-bare-generic
<ide> classifier_activation: Union[str, Callable] = "softmax",
<ide> include_preprocessing=True,
<ide> ):
<ide> def ResNetRS420(
<ide> )
<ide>
<ide>
<del># pylint: disable=unused-argument
<ide> @keras_export("keras.applications.resnet_rs.preprocess_input")
<ide> def preprocess_input(x, data_format=None):
<ide> """A placeholder method for backward compatibility.
<ide><path>keras/applications/resnet_v2.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """ResNet v2 models for Keras.
<ide>
<ide> Reference:
<ide><path>keras/applications/vgg16.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """VGG16 model for Keras.
<ide>
<ide> Reference:
<ide><path>keras/applications/vgg19.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """VGG19 model for Keras.
<ide>
<ide> Reference:
<ide><path>keras/applications/xception.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<add>
<ide> """Xception V1 model for Keras.
<ide>
<ide> On ImageNet, this model gets to a top-1 validation accuracy of 0.790
<ide><path>keras/backend.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<del># pylint: disable=redefined-outer-name
<del># pylint: disable=redefined-builtin
<del># pylint: disable=g-classes-have-attributes
<del># pylint: disable=g-bad-import-order
<del># pylint: disable=missing-function-docstring
<add>
<add>
<ide> """Keras backend API."""
<ide>
<ide> import collections
<ide> def _current_graph(op_input_list, graph=None):
<ide> op_input, (tf.Operation, tf.Tensor, tf.__internal__.CompositeTensor)
<ide> ) and (
<ide> (not isinstance(op_input, tf.Tensor)) or type(op_input) == tf.Tensor
<del> ): # pylint: disable=unidiomatic-typecheck
<add> ):
<ide> graph_element = op_input
<ide> else:
<ide> graph_element = _as_graph_element(op_input)
<ide> def tensor_spec_to_placeholder(tensorspec):
<ide> # when the placeholder is built in a top-level eager context
<ide> # (intended to be used with keras.backend.function)
<ide> from keras.engine import (
<del> input_layer, # pylint: disable=g-import-not-at-top
<add> input_layer,
<ide> )
<ide>
<ide> x = input_layer.Input(tensor=x)
<ide> def is_placeholder(x):
<ide> try:
<ide> if tf.compat.v1.executing_eagerly_outside_functions():
<ide> return hasattr(x, "_is_backend_placeholder")
<del> from keras.utils import tf_utils # pylint: disable=g-import-not-at-top
<add> from keras.utils import tf_utils
<ide>
<ide> if tf_utils.is_extension_type(x):
<ide> flat_components = tf.nest.flatten(x, expand_composites=True)
<ide> class to walkaround this issue until it is resolved on TF side.
<ide> self._generator = None
<ide> elif self._rng_type == self.RNG_STATEFUL:
<ide> from keras.utils import (
<del> tf_utils, # pylint: disable=g-import-not-at-top
<add> tf_utils,
<ide> )
<ide>
<ide> with tf_utils.maybe_init_scope(self):
<ide> def batch_get_value(tensors):
<ide> """
<ide> if tf.executing_eagerly():
<ide> return [x.numpy() for x in tensors]
<del> elif tf.inside_function(): # pylint: disable=protected-access
<add> elif tf.inside_function():
<ide> raise RuntimeError("Cannot get value inside Tensorflow graph function.")
<ide> if tensors:
<ide> return get_session(tensors).run(tensors)
<ide> def _eval_if_composite(self, tensor):
<ide> # the CompositeTensors. E.g., if output_structure contains a
<ide> # SparseTensor, then this ensures that we return its value as a
<ide> # SparseTensorValue rather than a SparseTensor.
<del> from keras.utils import tf_utils # pylint: disable=g-import-not-at-top
<add> from keras.utils import tf_utils
<ide>
<ide> if tf_utils.is_extension_type(tensor):
<ide> return self._session.run(tensor)
<ide> def function(inputs, outputs, updates=None, name=None, **kwargs):
<ide> "`updates` argument is not supported during "
<ide> "eager execution. You passed: %s" % (updates,)
<ide> )
<del> from keras import models # pylint: disable=g-import-not-at-top
<del> from keras.utils import tf_utils # pylint: disable=g-import-not-at-top
<add> from keras import models
<add> from keras.utils import tf_utils
<ide>
<ide> model = models.Model(inputs=inputs, outputs=outputs)
<ide>
<ide> def in_train_phase(x, alt, training=None):
<ide> the `training` flag defaults to `K.learning_phase()`.
<ide> """
<ide> from keras.engine import (
<del> base_layer_utils, # pylint: disable=g-import-not-at-top
<add> base_layer_utils,
<ide> )
<ide>
<ide> if training is None:
<ide> def categorical_crossentropy(target, output, from_logits=False, axis=-1):
<ide> # Use logits whenever they are available. `softmax` and `sigmoid`
<ide> # activations cache logits on the `output` Tensor.
<ide> if hasattr(output, "_keras_logits"):
<del> output = output._keras_logits # pylint: disable=protected-access
<add> output = output._keras_logits
<ide> if from_logits:
<ide> warnings.warn(
<ide> '"`categorical_crossentropy` received `from_logits=True`, but '
<ide> def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
<ide> # Use logits whenever they are available. `softmax` and `sigmoid`
<ide> # activations cache logits on the `output` Tensor.
<ide> if hasattr(output, "_keras_logits"):
<del> output = output._keras_logits # pylint: disable=protected-access
<add> output = output._keras_logits
<ide> if from_logits:
<ide> warnings.warn(
<ide> '"`sparse_categorical_crossentropy` received '
<ide> def binary_crossentropy(target, output, from_logits=False):
<ide> # Use logits whenever they are available. `softmax` and `sigmoid`
<ide> # activations cache logits on the `output` Tensor.
<ide> if hasattr(output, "_keras_logits"):
<del> output = output._keras_logits # pylint: disable=protected-access
<add> output = output._keras_logits
<ide> if from_logits:
<ide> warnings.warn(
<ide> '"`binary_crossentropy` received `from_logits=True`, '
<ide> def _create_session(distribution_strategy):
<ide> distribution_strategy.configure(session_config)
<ide> master = (
<ide> distribution_strategy.extended._tpu_cluster_resolver.master()
<del> ) # pylint: disable=protected-access
<add> )
<ide> session = tf.compat.v1.Session(config=session_config, target=master)
<ide> else:
<ide> worker_context = dc.get_current_worker_context()
<ide> def __getitem__(self, key):
<ide>
<ide> value = self._get_recursive(key)
<ide> if value is None:
<del> value = self[
<del> key
<del> ] = self.default_factory() # pylint:disable=not-callable
<add> value = self[key] = self.default_factory()
<ide> return value
<ide>
<ide> def setdefault(self, key=None, default=None, kwargs=None):
<ide><path>keras/benchmarks/eager_microbenchmarks_test.py
<ide> def call(self, x):
<ide> x = tf.convert_to_tensor([[1.0]])
<ide>
<ide> def fn():
<del> layer(x) # pylint: disable=not-callable
<add> layer(x)
<ide>
<ide> self._run(fn, 10000)
<ide>
<ide> def benchmark_op_layer_call_overhead(self):
<ide> model = tf.keras.Model(inputs=model_input, outputs=model_output)
<ide>
<ide> def fn():
<del> model(x) # pylint: disable=not-callable
<add> model(x)
<ide>
<ide> fn()
<ide> self._run(fn, 100)
<ide> def fn():
<ide> self._run(fn, 10000)
<ide>
<ide>
<del>class KerasLayerCallOverheadBenchmarks( # pylint: disable=undefined-variable
<add>class KerasLayerCallOverheadBenchmarks(
<ide> MicroBenchmarksBase, metaclass=tf.__internal__.test.ParameterizedBenchmark
<ide> ):
<ide>
<ide><path>keras/benchmarks/keras_cpu_benchmark_test.py
<ide> _OPTIMIZER = "rmsprop"
<ide>
<ide>
<del>class KerasModelCPUBenchmark( # pylint: disable=undefined-variable
<add>class KerasModelCPUBenchmark(
<ide> tf.test.Benchmark, metaclass=tf.__internal__.test.ParameterizedBenchmark
<ide> ):
<ide> """Required Arguments for measure_performance.
<ide><path>keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py
<ide> def build(self, input_shape):
<ide> trainable=True,
<ide> )
<ide>
<del> def call(self, inputs): # pylint: disable=arguments-differ
<add> def call(self, inputs):
<ide> inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True)
<ide> pos = tf.nn.relu(inputs)
<ide> neg = tf.nn.relu(-inputs)
<ide><path>keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py
<ide> def _build_model(self):
<ide> embedding_layer = TokenAndPositionEmbedding(
<ide> self.max_len, self.max_feature, embed_dim
<ide> )
<del> x = embedding_layer(inputs) # pylint: disable=not-callable
<add> x = embedding_layer(inputs)
<ide> transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
<del> x = transformer_block(x) # pylint: disable=not-callable
<add> x = transformer_block(x)
<ide> x = tf.keras.layers.GlobalAvgPool1D()(x)
<ide> x = tf.keras.layers.Dropout(0.1)(x)
<ide> x = tf.keras.layers.Dense(20, activation="relu")(x)
<ide> def separate_heads(self, x, batch_size):
<ide> x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))
<ide> return tf.transpose(x, perm=[0, 2, 1, 3])
<ide>
<del> def call(self, inputs): # pylint: disable=arguments-differ
<add> def call(self, inputs):
<ide> # x.shape = [batch_size, seq_len, embedding_dim]
<ide> batch_size = tf.shape(inputs)[0]
<ide> query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim)
<ide> def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
<ide> self.dropout1 = tf.keras.layers.Dropout(rate)
<ide> self.dropout2 = tf.keras.layers.Dropout(rate)
<ide>
<del> def call(self, inputs, training): # pylint: disable=arguments-differ
<del> attn_output = self.att(inputs) # pylint: disable=not-callable
<add> def call(self, inputs, training):
<add> attn_output = self.att(inputs)
<ide> attn_output = self.dropout1(attn_output, training=training)
<ide> out1 = self.layernorm1(inputs + attn_output)
<ide> ffn_output = self.ffn(out1)
<ide> def __init__(self, maxlen, vocab_size, embed_dim):
<ide> input_dim=maxlen, output_dim=embed_dim
<ide> )
<ide>
<del> def call(self, x): # pylint: disable=arguments-differ
<add> def call(self, x):
<ide> maxlen = tf.shape(x)[-1]
<ide> positions = tf.range(start=0, limit=maxlen, delta=1)
<ide> positions = self.pos_emb(positions)
<ide><path>keras/benchmarks/layer_benchmarks/layer_benchmarks_test.py
<ide> def _layer_call_backward(layer, x):
<ide> ]
<ide>
<ide>
<del>class KerasLayerBenchmarks( # pylint: disable=undefined-variable
<add>class KerasLayerBenchmarks(
<ide> layer_benchmarks_test_base.LayerBenchmarksBase,
<ide> metaclass=tf.__internal__.test.ParameterizedBenchmark,
<ide> ):
<ide><path>keras/benchmarks/metrics_memory_benchmark_test.py
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> try:
<del> import memory_profiler # pylint:disable=g-import-not-at-top
<add> import memory_profiler
<ide> except ImportError:
<ide> memory_profiler = None
<ide>
<ide><path>keras/benchmarks/model_components_benchmarks_test.py
<ide> def benchmark_keras_model_subclassed(self):
<ide> model = SubclassedKerasModel()
<ide> data = tf.random.uniform((10, 10))
<ide>
<del> func = lambda: model(data) # pylint: disable=not-callable
<add> func = lambda: model(data)
<ide> # First call is more expensive (creates variables etc.), discount that.
<ide> func()
<ide>
<ide> def benchmark_keras_model_subclassed(self):
<ide> def benchmark_keras_model_functional(self):
<ide> model = make_keras_model()
<ide> data = tf.random.uniform((10, 10))
<del> func = lambda: model(data) # pylint: disable=not-callable
<add> func = lambda: model(data)
<ide> # Symmetry with benchmark_keras_model_subclassed
<ide> func()
<del> assert np.equal(
<del> func(), SubclassedKerasModel()(data)
<del> ).all() # pylint: disable=not-callable
<add> assert np.equal(func(), SubclassedKerasModel()(data)).all()
<ide> self._run(func, 30000)
<ide>
<ide> def benchmark_keras_model_sequential(self):
<ide><path>keras/benchmarks/model_memory_profile.py
<ide> from absl import logging
<ide>
<ide> try:
<del> import memory_profiler # pylint:disable=g-import-not-at-top
<add> import memory_profiler
<ide> except ImportError:
<ide> memory_profiler = None
<ide>
<ide><path>keras/callbacks.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-import-not-at-top
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> """Callbacks: utilities called at certain points during model training."""
<ide>
<ide> import collections
<ide> def configure_callbacks(
<ide> callback_list = CallbackList(callbacks)
<ide>
<ide> # Set callback model
<del> callback_model = (
<del> model._get_callback_model()
<del> ) # pylint: disable=protected-access
<add> callback_model = model._get_callback_model()
<ide> callback_list.set_model(callback_model)
<ide>
<ide> set_callback_parameters(
<ide> def __init__(
<ide> self.set_params(params)
<ide>
<ide> # Performance optimization: determines if batch hooks need to be called.
<del> # pylint: disable=protected-access
<add>
<ide> self._supports_tf_logs = all(
<ide> getattr(cb, "_supports_tf_logs", False) for cb in self.callbacks
<ide> )
<ide> def __init__(
<ide> self._should_call_predict_batch_hooks = any(
<ide> cb._implements_predict_batch_hooks() for cb in self.callbacks
<ide> )
<del> # pylint: enable=protected-access
<ide>
<ide> self._disallow_batch_hooks_in_ps_strategy()
<ide>
<ide> def __iter__(self):
<ide>
<ide> def _disallow_batch_hooks_in_ps_strategy(self):
<ide> """Error out if batch-level callbacks are passed with PSStrategy."""
<del> # pylint: disable=protected-access
<add>
<ide> strategy = tf.distribute.get_strategy()
<ide> if strategy._should_use_with_coordinator:
<ide> unsupported_callbacks = []
<ide> def _disallow_batch_hooks_in_ps_strategy(self):
<ide> "`ParameterServerStrategy`. Found unsupported "
<ide> f"callbacks: {unsupported_callbacks}"
<ide> )
<del> # pylint: enable=protected-access
<ide>
<ide>
<ide> @keras_export("keras.callbacks.Callback")
<ide> class Callback:
<ide> """
<ide>
<ide> def __init__(self):
<del> self.validation_data = None # pylint: disable=g-missing-from-attributes
<add> self.validation_data = None
<ide> self.model = None
<ide> # Whether this Callback should only run on the chief worker in a
<ide> # Multi-Worker setting.
<ide> def set_params(self, params):
<ide> self._call_batch_hooks = self.verbose == 1
<ide> if self.target is None:
<ide> try:
<del> self._train_step = (
<del> self.model._train_counter
<del> ) # pylint: disable=protected-access
<del> self._test_step = (
<del> self.model._test_counter
<del> ) # pylint: disable=protected-access
<del> self._predict_step = (
<del> self.model._predict_counter
<del> ) # pylint: disable=protected-access
<add> self._train_step = self.model._train_counter
<add> self._test_step = self.model._test_counter
<add> self._predict_step = self.model._predict_counter
<ide> except AttributeError:
<ide> self._call_batch_hooks = True
<ide>
<ide> def _maybe_init_progbar(self):
<ide> unit_name="step" if self.use_steps else "sample",
<ide> )
<ide>
<del> self.progbar._update_stateful_metrics(
<del> self.stateful_metrics
<del> ) # pylint: disable=protected-access
<add> self.progbar._update_stateful_metrics(self.stateful_metrics)
<ide>
<ide> def _implements_train_batch_hooks(self):
<ide> return self._call_batch_hooks
<ide> def on_epoch_begin(self, epoch, logs=None):
<ide>
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> self.epochs_since_last_save += 1
<del> # pylint: disable=protected-access
<add>
<ide> if self.save_freq == "epoch":
<ide> self._save_model(epoch=epoch, batch=None, logs=logs)
<ide>
<ide> def _save_model(self, epoch, batch, logs):
<ide>
<ide> def _get_file_path(self, epoch, batch, logs):
<ide> """Returns the file path for checkpoint."""
<del> # pylint: disable=protected-access
<add>
<ide> try:
<ide> # `filepath` may contain placeholders such as
<ide> # `{epoch:02d}`,`{batch:02d}` and `{mape:.2f}`. A mismatch between
<ide> def __init__(self, backup_dir, save_freq="epoch"):
<ide> def on_train_begin(self, logs=None):
<ide> # TrainingState is used to manage the training state needed for
<ide> # failure-recovery of a worker in training.
<del> # pylint: disable=protected-access
<ide>
<ide> if self.model._distribution_strategy and not isinstance(
<ide> self.model.distribute_strategy, self._supported_strategies
<ide> def _implements_train_batch_hooks(self):
<ide> return self._save_freq != "epoch"
<ide>
<ide> def on_train_end(self, logs=None):
<del> # pylint: disable=protected-access
<add>
<ide> # On exit of training, delete the training state backup file that was
<ide> # saved for the purpose of worker recovery.
<ide> self._training_state.delete_backup()
<ide> def keras_model_summary(name, data, step=None):
<ide>
<ide> try:
<ide> json_string = data.to_json()
<del> except Exception as exc: # pylint: disable=broad-except
<add> except Exception as exc:
<ide> # An exception should not break a model code.
<ide> logging.warning(
<ide> "Model failed to serialize as JSON. Ignoring... %s", exc
<ide> def keras_model_summary(name, data, step=None):
<ide>
<ide> @keras_export("keras.callbacks.TensorBoard", v1=[])
<ide> class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
<del> # pylint: disable=line-too-long
<add>
<ide> """Enable visualizations for TensorBoard.
<ide>
<ide> TensorBoard is a visualization tool provided with TensorFlow.
<ide> def my_summary(x):
<ide> ```
<ide> """
<ide>
<del> # pylint: enable=line-too-long
<del>
<ide> def __init__(
<ide> self,
<ide> log_dir="logs",
<ide> def set_model(self, model):
<ide> self._log_write_dir = self._get_log_write_dir()
<ide>
<ide> self._train_dir = os.path.join(self._log_write_dir, "train")
<del> self._train_step = (
<del> self.model._train_counter
<del> ) # pylint: disable=protected-access
<add> self._train_step = self.model._train_counter
<ide>
<ide> self._val_dir = os.path.join(self._log_write_dir, "validation")
<del> self._val_step = (
<del> self.model._test_counter
<del> ) # pylint: disable=protected-access
<add> self._val_step = self.model._test_counter
<ide>
<ide> self._writers = {} # Resets writers.
<ide>
<ide> def _write_keras_model_train_graph(self):
<ide> # If the train_function is a `tf.function`, we can write out a
<ide> # graph
<ide> if hasattr(train_fn, "function_spec"):
<del> tf.summary.graph(
<del> train_fn._concrete_stateful_fn.graph
<del> ) # pylint: disable=protected-access
<add> tf.summary.graph(train_fn._concrete_stateful_fn.graph)
<ide>
<ide> def _write_keras_model_summary(self):
<ide> """Writes Keras graph network summary to TensorBoard."""
<ide> def _write_keras_model_summary(self):
<ide> summary_writable = (
<ide> self.model._is_graph_network
<ide> or self.model.__class__.__name__ == "Sequential"
<del> ) # pylint: disable=protected-access
<add> )
<ide> if summary_writable:
<ide> keras_model_summary("keras", self.model, step=0)
<ide>
<ide><path>keras/callbacks_test.py
<ide> from tensorflow.python.platform import tf_logging as logging
<ide>
<ide> try:
<del> import h5py # pylint:disable=g-import-not-at-top
<add> import h5py
<ide> except ImportError:
<ide> h5py = None
<ide>
<ide> try:
<del> import requests # pylint:disable=g-import-not-at-top
<add> import requests
<ide> except ImportError:
<ide> requests = None
<ide>
<ide><path>keras/callbacks_v1.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-import-not-at-top
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> """Callbacks: utilities called at certain points during model training."""
<ide>
<ide> import os
<ide>
<ide> @keras_export(v1=["keras.callbacks.TensorBoard"])
<ide> class TensorBoard(callbacks.TensorBoard):
<del> # pylint: disable=line-too-long
<add>
<ide> """Enable visualizations for TensorBoard.
<ide>
<ide> TensorBoard is a visualization tool provided with TensorFlow.
<ide> class TensorBoard(callbacks.TensorBoard):
<ide> @end_compatibility
<ide> """
<ide>
<del> # pylint: enable=line-too-long
<del>
<ide> def __init__(
<ide> self,
<ide> log_dir="./logs",
<ide> def set_model(self, model):
<ide> if self.embeddings_freq and self.embeddings_data is not None:
<ide> # Avoid circular dependency.
<ide> from keras.engine import (
<del> training_utils_v1, # pylint: disable=g-import-not-at-top
<add> training_utils_v1,
<ide> )
<ide>
<ide> self.embeddings_data = training_utils_v1.standardize_input_data(
<ide> def on_epoch_begin(self, epoch, logs=None):
<ide>
<ide> # check if histogram summary should be run for this epoch
<ide> if self.histogram_freq and epoch % self.histogram_freq == 0:
<del> # pylint: disable=protected-access
<add>
<ide> # add the histogram summary op if it should run this epoch
<ide> self.model._make_test_function()
<ide> if self.merged not in self.model.test_function.fetches:
<ide> self.model.test_function.fetches.append(self.merged)
<ide> self.model.test_function.fetch_callbacks[
<ide> self.merged
<ide> ] = self._fetch_callback
<del> # pylint: enable=protected-access
<ide>
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> """Checks if summary ops should run next epoch, logs scalar
<ide> def on_epoch_end(self, epoch, logs=None):
<ide>
<ide> # pop the histogram summary op after each epoch
<ide> if self.histogram_freq:
<del> # pylint: disable=protected-access
<add>
<ide> if self.merged in self.model.test_function.fetches:
<ide> self.model.test_function.fetches.remove(self.merged)
<ide> if self.merged in self.model.test_function.fetch_callbacks:
<ide> self.model.test_function.fetch_callbacks.pop(self.merged)
<del> # pylint: enable=protected-access
<ide>
<ide> if self.embeddings_data is None and self.embeddings_freq:
<ide> raise ValueError(
<ide><path>keras/constraints.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> """Constraints: functions that impose constraints on weight values."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide> def _kernel_constraint(self, kernel):
<ide> backend.cast(tf.math.floormod(kernel_shape, 2), "bool"),
<ide> lambda: kernel[start - 1 : start, start - 1 : start],
<ide> lambda: kernel[start - 1 : start, start - 1 : start]
<del> + backend.zeros( # pylint: disable=g-long-lambda
<del> (2, 2), dtype=kernel.dtype
<del> ),
<add> + backend.zeros((2, 2), dtype=kernel.dtype),
<ide> )
<ide> index = backend.switch(
<ide> backend.cast(tf.math.floormod(kernel_shape, 2), "bool"),
<ide><path>keras/datasets/boston_housing.py
<ide> def load_data(path="boston_housing.npz", test_split=0.2, seed=113):
<ide> origin=origin_folder + "boston_housing.npz",
<ide> file_hash="f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5", # noqa: E501
<ide> )
<del> with np.load(
<del> path, allow_pickle=True
<del> ) as f: # pylint: disable=unexpected-keyword-arg
<add> with np.load(path, allow_pickle=True) as f:
<ide> x = f["x"]
<ide> y = f["y"]
<ide>
<ide><path>keras/datasets/imdb.py
<ide> def load_data(
<ide> origin=origin_folder + "imdb.npz",
<ide> file_hash="69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f", # noqa: E501
<ide> )
<del> with np.load(
<del> path, allow_pickle=True
<del> ) as f: # pylint: disable=unexpected-keyword-arg
<add> with np.load(path, allow_pickle=True) as f:
<ide> x_train, labels_train = f["x_train"], f["y_train"]
<ide> x_test, labels_test = f["x_test"], f["y_test"]
<ide>
<ide><path>keras/datasets/mnist.py
<ide> def load_data(path="mnist.npz"):
<ide> origin=origin_folder + "mnist.npz",
<ide> file_hash="731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1", # noqa: E501
<ide> )
<del> with np.load(
<del> path, allow_pickle=True
<del> ) as f: # pylint: disable=unexpected-keyword-arg
<add> with np.load(path, allow_pickle=True) as f:
<ide> x_train, y_train = f["x_train"], f["y_train"]
<ide> x_test, y_test = f["x_test"], f["y_test"]
<ide>
<ide><path>keras/datasets/reuters.py
<ide> def load_data(
<ide> origin=origin_folder + "reuters.npz",
<ide> file_hash="d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916", # noqa: E501
<ide> )
<del> with np.load(
<del> path, allow_pickle=True
<del> ) as f: # pylint: disable=unexpected-keyword-arg
<add> with np.load(path, allow_pickle=True) as f:
<ide> xs, labels = f["x"], f["y"]
<ide>
<ide> rng = np.random.RandomState(seed)
<ide><path>keras/distribute/__init__.py
<ide> # ==============================================================================
<ide> """Keras' Distribution Strategy library."""
<ide>
<del># pylint: disable=unused-import
<add>
<ide> from keras.distribute import sidecar_evaluator
<ide><path>keras/distribute/distribute_coordinator_utils.py
<ide> def __enter__(self):
<ide> "You cannot run distribute coordinator in a `worker_fn`.\t"
<ide> + self._debug_message()
<ide> )
<del> # pylint: disable=protected-access
<add>
<ide> _worker_context.current = self
<ide>
<ide> def __exit__(
<ide> self, unused_exception_type, unused_exception_value, unused_traceback
<ide> ):
<del> # pylint: disable=protected-access
<add>
<ide> _worker_context.current = None
<ide>
<ide> def _get_master_target(self):
<ide> def join(self):
<ide> def _configure_session_config_for_std_servers(
<ide> strategy, eval_strategy, session_config, cluster_spec, task_type, task_id
<ide> ):
<del> # pylint: disable=g-doc-args
<add>
<ide> """Call strategy's `configure` to mutate the session_config.
<ide>
<ide> The session_config is currently needed as default config for a TensorFlow
<ide> def run_distribute_coordinator(
<ide> # TODO(yuefengz): validate cluster_spec.
<ide> cluster_spec = normalize_cluster_spec(cluster_spec)
<ide> elif hasattr(strategy.extended, "_cluster_resolver"):
<del> cluster_resolver = (
<del> strategy.extended._cluster_resolver
<del> ) # pylint: disable=protected-access
<add> cluster_resolver = strategy.extended._cluster_resolver
<ide> task_type = cluster_resolver.task_type
<ide> task_id = cluster_resolver.task_id
<ide> rpc_layer = cluster_resolver.rpc_layer or rpc_layer
<ide><path>keras/distribute/distributed_file_utils.py
<ide>
<ide>
<ide> def _get_base_dirpath(strategy):
<del> task_id = strategy.extended._task_id # pylint: disable=protected-access
<add> task_id = strategy.extended._task_id
<ide> return "workertemp_" + str(task_id)
<ide>
<ide>
<ide> def write_dirpath(dirpath, strategy):
<ide> # If strategy is still not available, this is not in distributed
<ide> # training. Fallback to original dirpath.
<ide> return dirpath
<del> if (
<del> not strategy.extended._in_multi_worker_mode()
<del> ): # pylint: disable=protected-access
<add> if not strategy.extended._in_multi_worker_mode():
<ide> return dirpath
<ide> if strategy.extended.should_checkpoint:
<ide> return dirpath
<ide><path>keras/distribute/distributed_training_utils.py
<ide> # core MirroredStrategy only. Remove this check when contrib MirroredStrategy is
<ide> # no longer needed.
<ide> def global_batch_size_supported(distribution_strategy):
<del> return (
<del> distribution_strategy.extended._global_batch_size
<del> ) # pylint: disable=protected-access
<add> return distribution_strategy.extended._global_batch_size
<ide>
<ide>
<ide> def call_replica_local_fn(fn, *args, **kwargs):
<ide><path>keras/distribute/distributed_training_utils_v1.py
<ide> # isort: off
<ide> from tensorflow.python.platform import tf_logging as logging
<ide>
<del># pylint:disable=protected-access
<del>
<ide>
<ide> def set_weights(distribution_strategy, dist_model, weights):
<ide> """Sets the weights of the replicated models.
<ide> def flatten_per_replica_values(distribution_strategy, per_replica_values):
<ide> List of values of all the PerReplica objects.
<ide>
<ide> """
<del> # pylint: disable=g-complex-comprehension
<add>
<ide> # This function takes a PerReplica object or a list of PerReplica objects
<ide> # and returns all the values associated with it.
<ide> return [
<ide> def validate_all_tensor_shapes(x, x_values):
<ide>
<ide> def _wait_for_variable_initialization(session):
<ide> """Utility to wait for variables to be initialized."""
<del> all_variables = backend._get_variables(
<del> backend.get_graph()
<del> ) # pylint: disable=protected-access
<add> all_variables = backend._get_variables(backend.get_graph())
<ide> candidate_vars = []
<ide> for v in all_variables:
<ide> if not getattr(v, "_keras_initialized", False):
<ide> def _wait_for_variable_initialization(session):
<ide> for flag, v in zip(is_initialized, candidate_vars):
<ide> if not flag:
<ide> uninitialized_vars.append(v)
<del> v._keras_initialized = True # pylint: disable=protected-access
<add> v._keras_initialized = True
<ide> if not uninitialized_vars:
<ide> break
<ide>
<ide>
<ide> def init_restore_or_wait_for_variables():
<ide> """Initialize or restore variables or wait for variables to be
<ide> initialized."""
<del> backend._initialize_variables(
<del> backend._get_session()
<del> ) # pylint: disable=protected-access
<add> backend._initialize_variables(backend._get_session())
<ide>
<ide>
<ide> def validate_inputs(x, y):
<ide> def _build_network_on_replica(model, mode, inputs=None, targets=None):
<ide> A new model with shared layers with the old model.
<ide> """
<ide> # Need to do imports here since we run into a circular dependency error.
<del> from keras import models # pylint: disable=g-import-not-at-top
<del> from keras.engine import sequential # pylint: disable=g-import-not-at-top
<add> from keras import models
<add> from keras.engine import sequential
<ide>
<ide> # We rely on the internal methods to avoid having share_weights weights in
<ide> # the public API.
<ide> def _clone_and_build_model(model, mode, inputs=None, targets=None):
<ide> """Clone and build the given keras_model."""
<ide> # We need to set the import here since we run into a circular dependency
<ide> # error.
<del> from keras import models # pylint: disable=g-import-not-at-top
<add> from keras import models
<ide>
<ide> cloned_model = models.clone_model(model, input_tensors=inputs)
<ide>
<ide> def filter_distributed_callbacks(callbacks_list, model):
<ide> callback
<ide> for callback in callbacks_list
<ide> if not callback._chief_worker_only
<del> ] # pylint: disable=protected-access
<add> ]
<ide>
<ide>
<ide> def _update_sample_weight_modes(model, mode, sample_weights):
<ide><path>keras/distribute/mirrored_strategy_test.py
<ide> def loss_fn(ctx):
<ide> optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.25)
<ide> update_ops = optimizer._distributed_apply(
<ide> distribution, grads_and_vars
<del> ) # pylint: disable=protected-access
<add> )
<ide>
<ide> if not tf.executing_eagerly():
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide><path>keras/distribute/mirrored_variable_test.py
<ide> def assertAllDifferent(self, objs):
<ide>
<ide> def _is_mirrored(self, val):
<ide> if distributed_training_utils.is_distributed_variable(val):
<del> if val._policy: # pylint: disable=protected-access
<del> return (
<del> val._policy._is_mirrored()
<del> ) # pylint: disable=protected-access
<add> if val._policy:
<add> return val._policy._is_mirrored()
<ide> # Since `Mirrored` is a private symbol in tf.distribute, we're checking
<ide> # with `DistributedValues` as an approximation.
<ide> return isinstance(val, tf.distribute.DistributedValues)
<ide><path>keras/distribute/multi_worker_testing_utils.py
<ide>
<ide> _portpicker_import_error = None
<ide> try:
<del> import portpicker # pylint: disable=g-import-not-at-top
<add> import portpicker
<ide> except (
<ide> ImportError,
<ide> ModuleNotFoundError,
<del>) as _error: # pylint: disable=invalid-name
<add>) as _error:
<ide> _portpicker_import_error = _error
<ide> portpicker = None
<ide>
<ide> def make_parameter_server_cluster(num_workers, num_ps):
<ide> def pick_unused_port():
<ide> """Returns an unused and unassigned local port."""
<ide> if _portpicker_import_error:
<del> raise _portpicker_import_error # pylint: disable=raising-bad-type
<add> raise _portpicker_import_error
<ide>
<ide> global ASSIGNED_PORTS
<ide> with lock:
<ide> def _create_cluster(
<ide> ):
<ide> """Creates and starts local servers and returns the cluster_spec dict."""
<ide> if _portpicker_import_error:
<del> raise _portpicker_import_error # pylint: disable=raising-bad-type
<add> raise _portpicker_import_error
<ide> worker_ports = [pick_unused_port() for _ in range(num_workers)]
<ide> ps_ports = [pick_unused_port() for _ in range(num_ps)]
<ide>
<ide><path>keras/dtensor/integration_test_utils.py
<ide> from keras.dtensor import layout_map as layout_map_lib
<ide> from keras.utils import np_utils
<ide>
<del># pylint: disable=missing-function-docstring
<del>
<ide> NUM_CLASS = 10 # MNIST has 10 digits
<ide>
<ide>
<ide><path>keras/dtensor/layout_map.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=missing-class-docstring
<ide>
<ide> # We will skip the path for certain attributes when mapping the layout, e.g.
<ide> # model._self_tracked_trackables, or layer._trainable_weights/
<ide> def _map_subclass_model_variable(model, layout_map):
<ide> # Note that the model._flatten is a method from tf.Module, and it returns
<ide> # duplicated items (since some of the items have different paths).
<ide> for path, variable in model._flatten(
<del> predicate=_is_lazy_init_variable, # pylint: disable=protected-access
<add> predicate=_is_lazy_init_variable,
<ide> with_path=True,
<ide> ):
<ide> # Note that path is a tuple that contains string and ints, eg:
<ide> def _map_subclass_model_variable(model, layout_map):
<ide> _set_object_by_path(model, path, new_variable)
<ide> lazy_init_variable_to_tf_variable_map[id(variable)] = new_variable
<ide>
<del> for layer in model._flatten( # pylint: disable=protected-access
<add> for layer in model._flatten(
<ide> predicate=lambda o: isinstance(o, base_layer.Layer)
<ide> ):
<ide> _config_dvariable_regularization(
<ide> def _map_subclass_model_variable(model, layout_map):
<ide> # After we replaced all the variables, we want to make sure all the cached
<ide> # attributes are having the new variable, rather than old LazyInitVariable.
<ide> for path, variable in model._flatten(
<del> predicate=_is_lazy_init_variable, # pylint: disable=protected-access
<add> predicate=_is_lazy_init_variable,
<ide> with_path=True,
<ide> ):
<ide> tf_variable = lazy_init_variable_to_tf_variable_map[id(variable)]
<ide> def _init_state_variable_for_rng(model, layout_map):
<ide> BaseRandomLayers.
<ide> layout_map: used to get the default mesh information to create DVariable.
<ide> """
<del> # pylint: disable=protected-access
<add>
<ide> for l in model._flatten(
<ide> predicate=lambda o: isinstance(o, base_layer.BaseRandomLayer)
<ide> ):
<ide> def _config_dvariable_regularization(
<ide> lazy_init_variable_to_tf_variable_map: the dict between LazyInitVariable
<ide> ID and newly created DVariable.
<ide> """
<del> # pylint: disable=protected-access
<add>
<ide> for (name, variable, regualarizer) in layer._captured_weight_regularizer:
<ide> if not _is_lazy_init_variable(variable):
<ide> raise ValueError(
<ide> def _create_dvariable(layout_map, object_path, variable):
<ide> layout = dtensor.Layout.replicated(
<ide> mesh=layout_map.get_default_mesh(), rank=variable_rank
<ide> )
<del> init_val = variable._initial_value # pylint: disable=protected-access
<add> init_val = variable._initial_value
<ide> if callable(init_val):
<ide> with lazy_variable.disable_init_variable_creator():
<ide> init_val = utils.call_with_layout(init_val, layout)
<ide><path>keras/dtensor/lazy_variable.py
<ide> def _infer_shape_dtype_and_create_handle(initial_value, shape, dtype, name):
<ide> s=[compat.as_bytes("loc:@%s" % handle_name)]
<ide> )
<ide> )
<del> with ops.get_default_graph()._attr_scope(
<del> {"_class": attr}
<del> ): # pylint: disable=protected-access
<add> with ops.get_default_graph()._attr_scope({"_class": attr}):
<ide> with ops.name_scope("Initializer"), device_context_manager(None):
<ide> if not callable(initial_value):
<ide> if isinstance(
<ide> def __init__(
<ide> initial_value=None,
<ide> trainable=None,
<ide> collections=None,
<del> validate_shape=True, # pylint: disable=unused-argument
<add> validate_shape=True,
<ide> caching_device=None,
<ide> name=None,
<ide> dtype=None,
<ide><path>keras/dtensor/optimizers.py
<ide> from tensorflow.tools.docs import doc_controls
<ide>
<ide>
<del># pylint: disable=protected-access,missing-class-docstring
<ide> class Optimizer(optimizer_lib._BaseOptimizer):
<ide> """DTensor specific optimizers.
<ide>
<ide><path>keras/dtensor/test_util.py
<ide> def tearDown(self):
<ide> reset_dtensor()
<ide>
<ide> @staticmethod
<del> def configTestMesh(device_type_mesh_map): # pylint: disable=invalid-name
<add> def configTestMesh(device_type_mesh_map):
<ide> """Configs corresponding mesh given test context.
<ide>
<ide> If runs on a CPU mesh, set virtual device on CPU.
<ide> def create_device_array(shape, device_type):
<ide> device_count = np.prod(shape)
<ide> return np.asarray(
<ide> [
<del> tf.DeviceSpec( # pylint: disable=g-complex-comprehension
<add> tf.DeviceSpec(
<ide> job="localhost/replica:0/task:0",
<ide> device_type=device_type,
<ide> device_index=i,
<ide> def create_device_ids_array(shape):
<ide>
<ide>
<ide> def reset_context():
<del> context._reset_context() # pylint: disable=protected-access
<add> context._reset_context()
<ide>
<ide>
<ide> def reset_logical_devices(device_type, count):
<ide> def reset_logical_devices(device_type, count):
<ide>
<ide>
<ide> def reset_dtensor():
<del> dtensor_api._reset() # pylint: disable=protected-access
<add> dtensor_api._reset()
<ide><path>keras/dtensor/utils.py
<ide> def _wrap_function(instance, *args, **kwargs):
<ide> # of __init__, since the class might need the mesh to create weights in
<ide> # the __init__.
<ide> if mesh is not None:
<del> instance._mesh = mesh # pylint: disable=protected-access
<add> instance._mesh = mesh
<ide> init_method(instance, *args, **kwargs)
<ide>
<ide> return tf.__internal__.decorator.make_decorator(
<ide><path>keras/engine/base_layer.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<del># pylint: disable=g-classes-have-attributes
<del># pylint: disable=g-bad-import-order
<add>
<add>
<ide> """Contains the base Layer class, from which all layers inherit."""
<ide>
<ide> import collections
<ide> from tensorflow.python.util.tf_export import keras_export
<ide> from tensorflow.tools.docs import doc_controls
<ide>
<del># pylint: disable=g-inconsistent-quotes
<add>
<ide> metrics_mod = generic_utils.LazyLoader(
<ide> "metrics_mod", globals(), "keras.metrics"
<ide> )
<del># pylint: enable=g-inconsistent-quotes
<add>
<ide>
<ide> # Prefix that is added to the TF op layer names.
<ide> _TF_OP_LAYER_NAME_PREFIX = "tf_op_layer_"
<ide> def build(self, input_shape):
<ide> self.built = True
<ide>
<ide> @doc_controls.for_subclass_implementers
<del> def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
<add> def call(self, inputs, *args, **kwargs):
<ide> """This is where the layer's logic lives.
<ide>
<ide> The `call()` method may not create state (except in its first
<ide> def add_weight(
<ide> old_getter = getter
<ide>
<ide> # Wrap variable constructor to return an AutoCastVariable.
<del> def getter(*args, **kwargs): # pylint: disable=function-redefined
<add> def getter(*args, **kwargs):
<ide> variable = old_getter(*args, **kwargs)
<ide> return autocast_variable.create_autocast_variable(variable)
<ide>
<ide> def check_type_return_shape(s):
<ide> )
<ide>
<ide> @generic_utils.default
<del> def compute_mask(
<del> self, inputs, mask=None
<del> ): # pylint: disable=unused-argument
<add> def compute_mask(self, inputs, mask=None):
<ide> """Computes an output mask tensor.
<ide>
<ide> Args:
<ide> def _get_unnested_name_scope(self):
<ide> if current_name_scope == "/":
<ide> current_name_scope = self._name_scope_on_declaration
<ide> with tf.name_scope(current_name_scope):
<del> name_scope = (
<del> self._name_scope()
<del> ) # Avoid autoincrementing. # pylint: disable=not-callable
<add> name_scope = self._name_scope() # Avoid autoincrementing.
<ide> else:
<ide> name_scope = self._name_scope()
<ide>
<ide> def _tag_callable(loss):
<ide> return None
<ide> if not tf.is_tensor(loss):
<ide> loss = tf.convert_to_tensor(loss, dtype=backend.floatx())
<del> loss._unconditional_loss = True # pylint: disable=protected-access
<add> loss._unconditional_loss = True
<ide> return loss
<ide>
<ide> losses = tf.nest.flatten(losses)
<ide> def add_update(self, updates):
<ide> if not call_context.frozen:
<ide> for update in tf.nest.flatten(updates):
<ide> if callable(update):
<del> update() # pylint: disable=not-callable
<add> update()
<ide>
<ide> def set_weights(self, weights):
<ide> """Sets the weights of the layer, from NumPy arrays.
<ide> def _infer_output_signature(self, inputs, args, kwargs, input_masks):
<ide> keras_tensor.keras_tensor_to_placeholder, input_masks
<ide> )
<ide>
<del> with backend.name_scope(
<del> self._name_scope()
<del> ): # pylint: disable=not-callable
<add> with backend.name_scope(self._name_scope()):
<ide> with autocast_variable.enable_auto_cast_variables(
<ide> self._compute_dtype_object
<ide> ):
<ide> def _dtype(self, value):
<ide> value = tf.as_dtype(value).name
<ide> self._set_dtype_policy(policy.Policy(value))
<ide>
<del> def _name_scope(self): # pylint: disable=method-hidden
<add> def _name_scope(self):
<ide> if not tf.__internal__.tf2.enabled():
<ide> return self.name
<ide> name_scope = self.name
<ide> def _maybe_build(self, inputs):
<ide> # `init_scope` to avoid creating symbolic Tensors that will
<ide> # later pollute any eager operations.
<ide> with tf_utils.maybe_init_scope(self):
<del> self.build(input_shapes) # pylint:disable=not-callable
<add> self.build(input_shapes)
<ide> # We must set also ensure that the layer is marked as built, and the
<ide> # build shape is stored since user defined build functions may not
<ide> # be calling `super.build()`
<ide> def __delattr__(self, name):
<ide> if existing_value not in reference_counts:
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(
<ide> name
<del> ) # pylint: disable=bad-super-call
<add> )
<ide> return
<ide>
<ide> reference_count = reference_counts[existing_value]
<ide> def __delattr__(self, name):
<ide> reference_counts[existing_value] = reference_count - 1
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(
<ide> name
<del> ) # pylint: disable=bad-super-call
<add> )
<ide> return
<ide> else:
<ide> # This is the last remaining reference.
<ide> del reference_counts[existing_value]
<ide>
<del> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(
<del> name
<del> ) # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name)
<ide>
<ide> if isinstance(existing_value, Layer) or base_layer_utils.has_weights(
<ide> existing_value
<ide> ):
<del> super(
<del> tf.__internal__.tracking.AutoTrackable, self
<del> ).__setattr__( # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> "_self_tracked_trackables",
<ide> [
<ide> l
<ide> def __delattr__(self, name):
<ide> ],
<ide> )
<ide> if isinstance(existing_value, tf.Variable):
<del> super(
<del> tf.__internal__.tracking.AutoTrackable, self
<del> ).__setattr__( # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> "_trainable_weights",
<ide> [w for w in self._trainable_weights if w is not existing_value],
<ide> )
<del> super(
<del> tf.__internal__.tracking.AutoTrackable, self
<del> ).__setattr__( # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> "_non_trainable_weights",
<ide> [
<ide> w
<ide> def __setattr__(self, name, value):
<ide> try:
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> name, value
<del> ) # pylint: disable=bad-super-call
<add> )
<ide> except AttributeError:
<ide> raise AttributeError(
<ide> (
<ide> def __setattr__(self, name, value):
<ide> # status quo. See the comment at __delattr__.
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> name, value
<del> ) # pylint: disable=bad-super-call
<add> )
<ide>
<ide> def _gather_children_attribute(self, attribute):
<ide> assert attribute in {
<ide> def get_config(self):
<ide> return config
<ide>
<ide>
<del>def _in_functional_construction_mode(
<del> layer, inputs, args, kwargs, input_list
<del>): # pylint: disable=unused-argument
<add>def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list):
<ide> """Check the arguments to see if we are constructing a functional model."""
<ide> # We are constructing a functional model if any of the inputs
<ide> # are KerasTensors
<ide><path>keras/engine/base_layer_test.py
<ide> import os
<ide>
<ide> import numpy as np
<del>
<del># pylint: disable=g-bad-import-order
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> from keras import backend
<ide> def test_dynamic_layer_error_running_in_graph_mode(self):
<ide>
<ide> def test_manual_compute_output_shape(self):
<ide> class BuildCounter(base_layer.Layer):
<del> def __init__(
<del> self, *args, **kwargs
<del> ): # pylint: disable=redefined-outer-name
<add> def __init__(self, *args, **kwargs):
<ide> super().__init__(*args, **kwargs)
<ide> self.build_counter = 0
<ide>
<ide> def get_config(self):
<ide> )
<ide>
<ide> class MyLayerNew2(base_layer.Layer):
<del> def __init__(
<del> self, name="MyLayerName", dtype=None, **kwargs
<del> ): # pylint:disable=redefined-outer-name
<add> def __init__(self, name="MyLayerName", dtype=None, **kwargs):
<ide> super().__init__(name=name, dtype=dtype, **kwargs)
<ide>
<ide> # Check that if the kwargs in `__init__` are base layer constructor
<ide> def call(self, inputs, *, training=True):
<ide>
<ide> def _test_custom_layer_training_arg(
<ide> self,
<del> # pylint: disable=invalid-name
<ide> CustomLayerNoTrainingArg,
<ide> CustomLayerDefaultTrainingMissing,
<ide> CustomLayerDefaultTrainingNone,
<ide> CustomLayerDefaultTrainingFalse,
<ide> CustomLayerDefaultTrainingTrue,
<del> # pylint: enable=invalid-name
<ide> ):
<ide> x = tf.ones(shape=(1, 1))
<ide>
<ide> def easily_identifiable_name():
<ide> try:
<ide> _ = TypeErrorLayer()(inputs)
<ide> except TypeError as e:
<del> self.assertIn(
<del> "easily_identifiable_name", str(e)
<del> ) # pylint: disable=g-assert-in-except
<add> self.assertIn("easily_identifiable_name", str(e))
<ide>
<ide> @test_combinations.generate(
<ide> test_combinations.combine(mode=["graph", "eager"])
<ide><path>keras/engine/base_layer_utils.py
<ide> def make_variable(
<ide> collections=None,
<ide> synchronization=tf.VariableSynchronization.AUTO,
<ide> aggregation=tf.VariableAggregation.NONE,
<del> partitioner=None, # pylint: disable=unused-argument
<add> partitioner=None,
<ide> layout=None,
<ide> ):
<ide> """Temporary util to create a variable (relies on `variable_scope.variable`).
<ide> def mark_checked(tensors):
<ide> """
<ide>
<ide> def _mark_checked(tensor):
<del> tensor._keras_history_checked = True # pylint: disable=protected-access
<add> tensor._keras_history_checked = True
<ide>
<ide> tf.nest.map_structure(_mark_checked, tensors)
<ide>
<ide> def _mark_as_return(tensor):
<ide> if not tf.is_tensor(tensor):
<ide> return tensor
<ide>
<del> # pylint: disable=protected-access
<ide> return_tensor = acd.mark_as_return(tensor)
<ide> if getattr(tensor, "_keras_mask", None) is not None:
<ide> return_tensor._keras_mask = acd.mark_as_return(tensor._keras_mask)
<ide> def _mark_as_return(tensor):
<ide> return_tensor._tfp_distribution = tensor._tfp_distribution
<ide>
<ide> return return_tensor
<del> # pylint: enable=protected-access
<ide>
<ide> return tf.nest.map_structure(_mark_as_return, outputs)
<ide>
<ide><path>keras/engine/base_layer_v1.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<del># pylint: disable=g-bad-import-order
<add>
<add>
<ide> """Contains the base Layer class, from which all layers inherit."""
<ide>
<ide> import functools
<ide> from tensorflow.tools.docs import doc_controls
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> class Layer(base_layer.Layer):
<ide> """Base layer class.
<ide>
<ide> def build(self, input_shape):
<ide> self.built = True
<ide>
<ide> @doc_controls.for_subclass_implementers
<del> def call(self, inputs, **kwargs): # pylint: disable=unused-argument
<add> def call(self, inputs, **kwargs):
<ide> """This is where the layer's logic lives.
<ide>
<ide> Args:
<ide> def add_weight(
<ide> # Wrap 'getter' with a version that returns an AutoCastVariable.
<ide> old_getter = getter
<ide>
<del> def getter(*args, **kwargs): # pylint: disable=function-redefined
<add> def getter(*args, **kwargs):
<ide> variable = old_getter(*args, **kwargs)
<ide> return autocast_variable.create_autocast_variable(variable)
<ide>
<ide> def check_type_return_shape(s):
<ide> )
<ide>
<ide> @generic_utils.default
<del> def compute_mask(
<del> self, inputs, mask=None
<del> ): # pylint: disable=unused-argument
<add> def compute_mask(self, inputs, mask=None):
<ide> """Computes an output mask tensor.
<ide>
<ide> Args:
<ide> def _convert_non_tensor(x):
<ide> self.input_spec, inputs, self.name
<ide> )
<ide> graph = backend.get_graph()
<del> with graph.as_default(), backend.name_scope(
<del> self._name_scope()
<del> ): # pylint: disable=not-callable
<add> with graph.as_default(), backend.name_scope(self._name_scope()):
<ide> # Build layer if applicable (if the `build` method has been
<ide> # overridden).
<ide> self._maybe_build(inputs)
<ide> def _convert_non_tensor(x):
<ide> self._set_inputs(inputs, outputs)
<ide> else:
<ide> # Eager execution on data tensors.
<del> with backend.name_scope(
<del> self._name_scope()
<del> ): # pylint: disable=not-callable
<add> with backend.name_scope(self._name_scope()):
<ide> self._maybe_build(inputs)
<ide> cast_inputs = self._maybe_cast_inputs(inputs)
<ide> with autocast_variable.enable_auto_cast_variables(
<ide> def _tag_unconditional(loss):
<ide> return None
<ide> if not tf.is_tensor(loss):
<ide> loss = tf.convert_to_tensor(loss, dtype=backend.floatx())
<del> loss._unconditional_loss = (
<del> inputs is None
<del> ) # pylint: disable=protected-access
<add> loss._unconditional_loss = inputs is None
<ide> return loss
<ide>
<ide> losses = tf.nest.flatten(losses)
<ide> def _dtype(self, value):
<ide> value = tf.as_dtype(value).name
<ide> self._set_dtype_policy(policy.Policy(value))
<ide>
<del> def _name_scope(self): # pylint: disable=method-hidden
<add> def _name_scope(self):
<ide> return self.name
<ide>
<ide> def _init_set_name(self, name, zero_based=True):
<ide> def __delattr__(self, name):
<ide> if existing_value not in reference_counts:
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(
<ide> name
<del> ) # pylint: disable=bad-super-call
<add> )
<ide> return
<ide>
<ide> reference_count = reference_counts[existing_value]
<ide> def __delattr__(self, name):
<ide> reference_counts[existing_value] = reference_count - 1
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(
<ide> name
<del> ) # pylint: disable=bad-super-call
<add> )
<ide> return
<ide> else:
<ide> # This is the last remaining reference.
<ide> del reference_counts[existing_value]
<ide>
<del> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(
<del> name
<del> ) # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name)
<ide>
<ide> if isinstance(existing_value, Layer) or base_layer_utils.has_weights(
<ide> existing_value
<ide> ):
<del> super(
<del> tf.__internal__.tracking.AutoTrackable, self
<del> ).__setattr__( # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> "_self_tracked_trackables",
<ide> [
<ide> l
<ide> def __delattr__(self, name):
<ide> ],
<ide> )
<ide> if isinstance(existing_value, tf.Variable):
<del> super(
<del> tf.__internal__.tracking.AutoTrackable, self
<del> ).__setattr__( # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> "_trainable_weights",
<ide> [w for w in self._trainable_weights if w is not existing_value],
<ide> )
<del> super(
<del> tf.__internal__.tracking.AutoTrackable, self
<del> ).__setattr__( # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> "_non_trainable_weights",
<ide> [
<ide> w
<ide> def __setattr__(self, name, value):
<ide> try:
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> name, value
<del> ) # pylint: disable=bad-super-call
<add> )
<ide> except AttributeError:
<ide> raise AttributeError(
<ide> (
<ide> def __setattr__(self, name, value):
<ide> # status quo. See the comment at __delattr__.
<ide> super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
<ide> name, value
<del> ) # pylint: disable=bad-super-call
<add> )
<ide>
<ide> # This is a hack so that the is_layer (within
<ide> # training/trackable/layer_utils.py) check doesn't get the weights attr.
<ide><path>keras/engine/base_preprocessing_layer.py
<ide> def update_state(self, data):
<ide> raise NotImplementedError
<ide>
<ide> @doc_controls.do_not_generate_docs
<del> def reset_state(self): # pylint: disable=method-hidden
<add> def reset_state(self):
<ide> """Resets the statistics of the preprocessing layer."""
<ide> raise NotImplementedError
<ide>
<ide> def adapt(self, data, batch_size=None, steps=None):
<ide> """
<ide> _disallow_inside_tf_function("adapt")
<ide> if not version_utils.should_use_v2():
<del> raise RuntimeError(
<del> "`adapt` is only supported in tensorflow v2."
<del> ) # pylint: disable=g-doc-exception
<add> raise RuntimeError("`adapt` is only supported in tensorflow v2.")
<ide> if not self._is_compiled:
<ide> self.compile() # Compile with defaults.
<ide> if self.built:
<ide><path>keras/engine/base_preprocessing_layer_test.py
<ide> def build(self, input_shape):
<ide> def update_state(self, data):
<ide> self.sum.assign_add(tf.reduce_sum(tf.cast(data, tf.float32)))
<ide>
<del> def reset_state(self): # pylint: disable=method-hidden
<add> def reset_state(self):
<ide> self.sum.assign(0.0)
<ide>
<ide> def set_total(self, sum_value):
<ide><path>keras/engine/compile_utils.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Utilities for `Model.compile`."""
<ide>
<ide>
<ide> def _get_loss_object(self, loss):
<ide> if loss_name is None:
<ide> raise ValueError(f"Loss should be a callable, received: {loss}")
<ide> loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)
<del> loss._allow_sum_over_batch_size = (
<del> True # pylint: disable=protected-access
<del> )
<add> loss._allow_sum_over_batch_size = True
<ide> return loss
<ide>
<ide> def _should_broadcast(self, obj):
<ide> def _set_metric_names(self):
<ide> # For multi-output models, prepend the output name to the metric name.
<ide> # For weighted metrics, prepend "weighted_" if the name would be
<ide> # non-unique.
<del> # pylint: disable=protected-access
<add>
<ide> metric_names = set()
<ide> is_multi_output = len(self._output_names) > 1
<ide> zip_args = (self._output_names, self._metrics, self._weighted_metrics)
<ide> def _set_metric_names(self):
<ide> "to have unique names."
<ide> )
<ide> metric_names.add(wm._name)
<del> # pylint: enable=protected-access
<ide>
<ide> def _create_ordered_metrics(self):
<ide> """Cache the flat order needed when return metrics, for backcompat."""
<ide> def _get_metric_object(self, metric, y_t, y_p):
<ide> metric_obj = metrics_mod.categorical_crossentropy
<ide>
<ide> if isinstance(metric_obj, losses_mod.Loss):
<del> metric_obj._allow_sum_over_batch_size = (
<del> True # pylint: disable=protected-access
<del> )
<add> metric_obj._allow_sum_over_batch_size = True
<ide>
<ide> if not isinstance(metric_obj, metrics_mod.Metric):
<ide> if isinstance(metric, str):
<ide><path>keras/engine/data_adapter.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide> try:
<del> import pandas as pd # pylint: disable=g-import-not-at-top
<add> import pandas as pd
<ide> except ImportError:
<ide> pd = None
<ide>
<ide> def __init__(
<ide>
<ide> def _get_tensor_spec(t):
<ide> # TODO(b/226395276): Remove _with_tensor_ranks_only usage.
<del> return type_spec.type_spec_from_value(
<del> t
<del> )._with_tensor_ranks_only() # pylint: disable=protected-access
<add> return type_spec.type_spec_from_value(t)._with_tensor_ranks_only()
<ide>
<ide> output_signature = tf.nest.map_structure(_get_tensor_spec, peek)
<ide>
<ide> def _get_tensor_types():
<ide>
<ide> def _is_scipy_sparse(x):
<ide> try:
<del> from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
<add> from scipy.sparse import issparse
<ide>
<ide> return issparse(x)
<ide> except ImportError:
<ide><path>keras/engine/data_adapter_test.py
<ide> def test_training_numpy(self):
<ide>
<ide> def test_can_handle_pandas(self):
<ide> try:
<del> import pandas as pd # pylint: disable=g-import-not-at-top
<add> import pandas as pd
<ide> except ImportError:
<ide> self.skipTest("Skipping test because pandas is not installed.")
<ide> self.assertTrue(
<ide> def test_can_handle_pandas(self):
<ide> @test_combinations.run_all_keras_modes(always_skip_v1=True)
<ide> def test_training_pandas(self):
<ide> try:
<del> import pandas as pd # pylint: disable=g-import-not-at-top
<add> import pandas as pd
<ide> except ImportError:
<ide> self.skipTest("Skipping test because pandas is not installed.")
<ide> input_a = keras.Input(shape=(3,), name="input_a")
<ide><path>keras/engine/deferred_sequential_test.py
<ide> from keras.testing_infra import test_utils
<ide>
<ide> try:
<del> import h5py # pylint:disable=g-import-not-at-top
<add> import h5py
<ide> except ImportError:
<ide> h5py = None
<ide>
<ide><path>keras/engine/functional.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<add>
<ide> """A `Network` is way to compose layers: the topological form of a `Model`."""
<ide>
<ide>
<ide> from tensorflow.tools.docs import doc_controls
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> class Functional(training_lib.Model):
<ide> """A `Functional` model is a `Model` defined as a directed graph of layers.
<ide>
<ide> def _init_graph_network(self, inputs, outputs):
<ide> layer,
<ide> node_index,
<ide> tensor_index,
<del> ) = x._keras_history # pylint: disable=protected-access
<add> ) = x._keras_history
<ide> self._output_layers.append(layer)
<ide> self._output_coordinates.append((layer, node_index, tensor_index))
<ide>
<ide> def _init_graph_network(self, inputs, outputs):
<ide> layer,
<ide> node_index,
<ide> tensor_index,
<del> ) = x._keras_history # pylint: disable=protected-access
<add> ) = x._keras_history
<ide> # It's supposed to be an input layer, so only one node
<ide> # and one tensor output.
<ide> assert node_index == 0
<ide> def compute_output_shape(self, input_shape):
<ide> layer_output_shapes, to_tuples=False
<ide> )
<ide>
<del> node_index = layer._inbound_nodes.index(
<del> node
<del> ) # pylint: disable=protected-access
<add> node_index = layer._inbound_nodes.index(node)
<ide> for j, shape in enumerate(
<ide> tf.nest.flatten(layer_output_shapes)
<ide> ):
<ide> def _validate_graph_inputs_and_outputs(self):
<ide> f"Received inputs={x} (missing previous layer metadata)."
<ide> )
<ide> # Check that x is an input tensor.
<del> # pylint: disable=protected-access
<add>
<ide> layer = x._keras_history.layer
<ide> if len(layer._inbound_nodes) > 1 or (
<ide> layer._inbound_nodes and not layer._inbound_nodes[0].is_input
<ide> def _build_map_helper(
<ide> layer,
<ide> node_index,
<ide> _,
<del> ) = tensor._keras_history # pylint: disable=protected-access
<del> node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
<add> ) = tensor._keras_history
<add> node = layer._inbound_nodes[node_index]
<ide>
<ide> # Don't repeat work for shared subgraphs
<ide> if node in finished_nodes:
<ide><path>keras/engine/functional_test.py
<ide> def call(self, inputs):
<ide>
<ide> x = input_layer_lib.Input(shape=(32,))
<ide> test_layer = PowersLayer()
<del> p1, p2 = test_layer(x) # pylint: disable=not-callable
<add> p1, p2 = test_layer(x)
<ide>
<ide> self.assertIs(test_layer.input, x)
<ide> self._assertAllIs(test_layer.output, [p1, p2])
<ide> def call(self, inputs):
<ide> a = input_layer_lib.Input(shape=(32,))
<ide> b = input_layer_lib.Input(shape=(32,))
<ide> test_layer = AddLayer()
<del> y = test_layer([a, b]) # pylint: disable=not-callable
<add> y = test_layer([a, b])
<ide>
<ide> self._assertAllIs(test_layer.input, [a, b])
<ide> self.assertIs(test_layer.output, y)
<ide> def compute_mask(self, inputs, mask=None):
<ide> self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
<ide> else:
<ide> x = input_layer_lib.Input(shape=(32,))
<del> y = MaskedLayer()(x) # pylint: disable=not-callable
<add> y = MaskedLayer()(x)
<ide> network = functional.Functional(x, y)
<ide>
<ide> # test callability on Input
<ide> class AddLayer(layers.Layer):
<ide> def call(self, inputs):
<ide> return inputs[0] + inputs[1]
<ide>
<del> c = AddLayer()([a, input_b]) # pylint: disable=not-callable
<add> c = AddLayer()([a, input_b])
<ide> c = layers.Dense(2)(c)
<ide>
<ide> network = functional.Functional([input_a, input_b], [a, c])
<ide> def __init__(self):
<ide> self.block = BasicBlock()
<ide>
<ide> def call(self, x):
<del> x = self.block(x) # pylint: disable=not-callable
<add> x = self.block(x)
<ide> return x
<ide>
<ide> model = CompoundModel()
<ide> def call(self, x):
<ide> "Model should have its weights created as it " "has been built",
<ide> )
<ide> sample_input = tf.ones((1, 10, 10, 1))
<del> output = model(sample_input) # pylint: disable=not-callable
<add> output = model(sample_input)
<ide> self.assertEqual(output.shape, (1, 3))
<ide>
<ide> @test_combinations.generate(
<ide><path>keras/engine/functional_utils.py
<ide> def clone_graph_nodes(inputs, outputs):
<ide> # It is used in the Node constructor to check if the tensor
<ide> # "is_keras_tensor()" The history will be override by the Node
<ide> # constructor anyway for the corresponding layer output anyway.
<del> cpy._keras_history = (
<del> kt_output._keras_history
<del> ) # pylint: disable=protected-access
<add> cpy._keras_history = kt_output._keras_history
<ide> cloned_outputs.append(cpy)
<ide> kt_id_mapping[id(kt_output)] = cpy
<ide> cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs)
<ide> def clone_keras_tensors(args, keras_tensor_mapping):
<ide> else:
<ide> # Create copy of keras_tensor if we haven't done it before
<ide> cpy = _clone_keras_tensor(obj)
<del> cpy._keras_history = (
<del> obj._keras_history
<del> ) # pylint: disable=protected-access
<add> cpy._keras_history = obj._keras_history
<ide> keras_tensor_mapping[id(obj)] = cpy
<ide> result.append(cpy)
<ide> else:
<ide><path>keras/engine/input_layer.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<add>
<ide> """Input layer code (`Input` and `InputLayer`)."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide> def __init__(
<ide> if isinstance(input_tensor, keras_tensor.KerasTensor) or (
<ide> tf_utils.is_extension_type(input_tensor)
<ide> ):
<del> self._type_spec = (
<del> input_tensor._type_spec
<del> ) # pylint: disable=protected-access
<add> self._type_spec = input_tensor._type_spec
<ide> else:
<ide> self._type_spec = tf.TensorSpec(
<ide> shape=input_tensor.shape,
<ide> def _trackable_saved_model_saver(self):
<ide>
<ide> @keras_export("keras.Input", "keras.layers.Input")
<ide> @traceback_utils.filter_traceback
<del>def Input( # pylint: disable=invalid-name
<add>def Input(
<ide> shape=None,
<ide> batch_size=None,
<ide> name=None,
<ide><path>keras/engine/input_spec.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> """Contains the InputSpec class."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide><path>keras/engine/keras_tensor.py
<ide> # isort: off
<ide> from tensorflow.python.data.util import structure
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> # Tensorflow tensors have a maximum rank of 254
<ide> # (See `MaxDimensions()` in //tensorflow/core/framework/tensor_shape.h )
<ide> def name(self):
<ide> return self._name
<ide>
<ide> @classmethod
<del> def _overload_all_operators(
<del> cls, tensor_class
<del> ): # pylint: disable=invalid-name
<add> def _overload_all_operators(cls, tensor_class):
<ide> """Register overloads for all operators."""
<ide> for operator in tf.Tensor.OVERLOADABLE_OPERATORS:
<ide> cls._overload_operator(tensor_class, operator)
<ide> def _overload_all_operators(
<ide> cls._overload_operator(tensor_class, "experimental_ref")
<ide>
<ide> @classmethod
<del> def _overload_operator(
<del> cls, tensor_class, operator
<del> ): # pylint: disable=invalid-name
<add> def _overload_operator(cls, tensor_class, operator):
<ide> """Overload an operator with the same implementation as a base Tensor class.
<ide>
<ide> We pull the operator out of the class dynamically to avoid ordering
<ide> def _overload_operator(
<ide> setattr(cls, operator, tensor_oper)
<ide>
<ide>
<del>KerasTensor._overload_all_operators(
<del> tf.Tensor
<del>) # pylint: disable=protected-access
<add>KerasTensor._overload_all_operators(tf.Tensor)
<ide>
<ide>
<ide> class SparseKerasTensor(KerasTensor):
<ide> def ragged_rank(self):
<ide>
<ide>
<ide> # Overload slicing
<del>RaggedKerasTensor._overload_operator(
<del> tf.RaggedTensor, "__getitem__"
<del>) # pylint: disable=protected-access
<add>RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__getitem__")
<ide>
<ide> # Overload math ops
<del>RaggedKerasTensor._overload_operator(
<del> tf.RaggedTensor, "__add__"
<del>) # pylint: disable=protected-access
<del>RaggedKerasTensor._overload_operator(
<del> tf.RaggedTensor, "__radd__"
<del>) # pylint: disable=protected-access
<del>RaggedKerasTensor._overload_operator(
<del> tf.RaggedTensor, "__mul__"
<del>) # pylint: disable=protected-access
<del>RaggedKerasTensor._overload_operator(
<del> tf.RaggedTensor, "__rmul__"
<del>) # pylint: disable=protected-access
<add>RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__add__")
<add>RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__radd__")
<add>RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__mul__")
<add>RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__rmul__")
<ide>
<ide>
<ide> # TODO(b/161487382):
<ide> def register_keras_tensor_specialization(cls, keras_tensor_subclass):
<ide> def keras_tensor_to_placeholder(x):
<ide> """Construct a graph placeholder to represent a KerasTensor when tracing."""
<ide> if isinstance(x, KerasTensor):
<del> return x._to_placeholder() # pylint: disable=protected-access
<add> return x._to_placeholder()
<ide> else:
<ide> return x
<ide>
<ide> def keras_tensor_from_tensor(tensor):
<ide> out = keras_tensor_cls.from_tensor(tensor)
<ide>
<ide> if hasattr(tensor, "_keras_mask"):
<del> out._keras_mask = keras_tensor_from_tensor(
<del> tensor._keras_mask
<del> ) # pylint: disable=protected-access
<add> out._keras_mask = keras_tensor_from_tensor(tensor._keras_mask)
<ide> return out
<ide>
<ide>
<ide> def keras_tensor_from_type_spec(type_spec, name=None):
<ide> def type_spec_with_shape(spec, shape):
<ide> """Returns a copy of TypeSpec `spec` with its shape set to `shape`."""
<ide> if isinstance(spec, tf.TensorSpec):
<del> # pylint: disable=protected-access
<add>
<ide> # TODO(b/203201161) Figure out why mutation is needed here, and remove
<ide> # it. (TensorSpec objects should be immutable; and we should not be
<ide> # modifying private fields.)
<ide><path>keras/engine/keras_tensor_test.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """InputSpec tests."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide> from absl.testing import parameterized
<ide> def test_missing_dtype_error(self):
<ide> AttributeError,
<ide> "KerasTensor wraps TypeSpec .* which does not have a dtype.",
<ide> ):
<del> kt.dtype # pylint: disable=pointless-statement
<add> kt.dtype
<ide>
<ide> def test_wrong_dtype_type_error(self):
<ide> spec = CustomTypeSpec(None, tf.int32)
<ide> def test_wrong_dtype_type_error(self):
<ide> TypeError,
<ide> "KerasTensor requires that wrapped TypeSpec's dtype is a DType; .*",
<ide> ):
<del> kt.dtype # pylint: disable=pointless-statement
<add> kt.dtype
<ide>
<ide>
<ide> if __name__ == "__main__":
<ide><path>keras/engine/node.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> """Contains the `Node` class."""
<ide>
<ide> import collections
<ide><path>keras/engine/partial_batch_padding_handler.py
<ide>
<ide> from keras import backend
<ide>
<del># pylint: disable=protected-access
<del>
<ide>
<ide> class PartialBatchPaddingHandler:
<ide> """A container that holds info about partial batches for `predict()`."""
<ide><path>keras/engine/saving.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<add>
<ide> """Model saving utilities.
<ide>
<ide> Everything has been moved to keras/saving/. This file will be deleted soon.
<ide><path>keras/engine/sequential.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<add>
<ide> """Home of the `Sequential` model."""
<ide>
<ide> import copy
<ide> def __init__(self, layers=None, name=None):
<ide> """
<ide> # Skip the init in FunctionalModel since model doesn't have input/output
<ide> # yet
<del> super(
<del> functional.Functional, self
<del> ).__init__( # pylint: disable=bad-super-call
<del> name=name, autocast=False
<del> )
<add> super(functional.Functional, self).__init__(name=name, autocast=False)
<ide> base_layer.keras_api_gauge.get_cell("Sequential").set(True)
<ide> self.supports_masking = True
<ide> self._compute_output_and_mask_jointly = True
<ide> def build(self, input_shape=None):
<ide> super().build(input_shape)
<ide> self.built = True
<ide>
<del> def call(
<del> self, inputs, training=None, mask=None
<del> ): # pylint: disable=redefined-outer-name
<add> def call(self, inputs, training=None, mask=None):
<ide> # If applicable, update the static input shape of the model.
<ide> if not self._has_explicit_input_shape:
<ide> if not tf.is_tensor(inputs) and not isinstance(inputs, tf.Tensor):
<ide> def compute_mask(self, inputs, mask):
<ide> # TODO(omalleyt): b/123540974 This function is not really safe to call
<ide> # by itself because it will duplicate any updates and losses in graph
<ide> # mode by `call`ing the Layers again.
<del> outputs = self.call(
<del> inputs, mask=mask
<del> ) # pylint: disable=unexpected-keyword-arg
<add> outputs = self.call(inputs, mask=mask)
<ide> return getattr(outputs, "_keras_mask", None)
<ide>
<ide> def get_config(self):
<ide> def _assert_weights_created(self):
<ide> return
<ide> # When the graph has not been initialized, use the Model's
<ide> # implementation to to check if the weights has been created.
<del> super(
<del> functional.Functional, self
<del> )._assert_weights_created() # pylint: disable=bad-super-call
<add> super(functional.Functional, self)._assert_weights_created()
<ide>
<ide>
<ide> def _get_shape_tuple(t):
<ide><path>keras/engine/training.py
<ide> def call(self, inputs, training=False):
<ide> ),
<ide> base_layer.Layer._TF_MODULE_IGNORED_PROPERTIES,
<ide> )
<del> ) # pylint: disable=protected-access
<add> )
<ide> _SCALAR_UPRANKING_ON = False
<ide>
<ide> def __new__(cls, *args, **kwargs):
<ide> def metrics(self):
<ide> metrics += self.compiled_metrics.metrics
<ide>
<ide> for l in self._flatten_layers():
<del> metrics.extend(l._metrics) # pylint: disable=protected-access
<add> metrics.extend(l._metrics)
<ide> return metrics
<ide>
<ide> @property
<ide> def run_eagerly(self):
<ide> Returns:
<ide> Boolean, whether the model should run eagerly.
<ide> """
<del> if (
<del> self.dynamic and self._run_eagerly == False
<del> ): # pylint:disable=g-bool-id-comparison
<add> if self.dynamic and self._run_eagerly == False:
<ide> # TODO(fchollet): consider using py_func to enable this.
<ide> raise ValueError(
<ide> "Your model contains layers that can only be "
<ide> def run_step(data):
<ide> outputs = model.train_step(data)
<ide> # Ensure counter is updated only if `train_step` succeeds.
<ide> with tf.control_dependencies(_minimum_control_deps(outputs)):
<del> model._train_counter.assign_add(
<del> 1
<del> ) # pylint: disable=protected-access
<add> model._train_counter.assign_add(1)
<ide> return outputs
<ide>
<ide> if self._jit_compile:
<ide> def fit(
<ide> val_sample_weight,
<ide> ) = data_adapter.unpack_x_y_sample_weight(validation_data)
<ide>
<del> if (
<del> self.distribute_strategy._should_use_with_coordinator
<del> ): # pylint: disable=protected-access
<add> if self.distribute_strategy._should_use_with_coordinator:
<ide> self._cluster_coordinator = (
<ide> tf.distribute.experimental.coordinator.ClusterCoordinator(
<ide> self.distribute_strategy
<ide> def fit(
<ide> with data_handler.catch_stop_iteration():
<ide> data_handler._initial_step = data_handler._initial_step or (
<ide> self._maybe_load_initial_step_from_ckpt()
<del> ) # pylint: disable=protected-access
<add> )
<ide> for step in data_handler.steps():
<ide> with tf.profiler.experimental.Trace(
<ide> "train",
<ide> def run_step(data):
<ide> outputs = model.test_step(data)
<ide> # Ensure counter is updated only if `test_step` succeeds.
<ide> with tf.control_dependencies(_minimum_control_deps(outputs)):
<del> model._test_counter.assign_add(
<del> 1
<del> ) # pylint: disable=protected-access
<add> model._test_counter.assign_add(1)
<ide> return outputs
<ide>
<ide> if self._jit_compile:
<ide> def evaluate(
<ide> if kwargs:
<ide> raise TypeError(f"Invalid keyword arguments: {list(kwargs.keys())}")
<ide>
<del> if (
<del> self.distribute_strategy._should_use_with_coordinator
<del> ): # pylint: disable=protected-access
<add> if self.distribute_strategy._should_use_with_coordinator:
<ide> self._cluster_coordinator = (
<ide> tf.distribute.experimental.coordinator.ClusterCoordinator(
<ide> self.distribute_strategy
<ide> def run_step(data):
<ide> outputs = model.predict_step(data)
<ide> # Ensure counter is updated only if `test_step` succeeds.
<ide> with tf.control_dependencies(_minimum_control_deps(outputs)):
<del> model._predict_counter.assign_add(
<del> 1
<del> ) # pylint: disable=protected-access
<add> model._predict_counter.assign_add(1)
<ide> return outputs
<ide>
<ide> if self._jit_compile:
<ide> def predict(
<ide> # prediction. If running under PSS, then swap it with OneDeviceStrategy
<ide> # so that execution will run on the coordinator.
<ide> original_pss_strategy = None
<del> if (
<del> self.distribute_strategy._should_use_with_coordinator
<del> ): # pylint: disable=protected-access
<add> if self.distribute_strategy._should_use_with_coordinator:
<ide> original_pss_strategy = self.distribute_strategy
<ide> self._distribution_strategy = None
<ide>
<ide> def save(
<ide> options=None,
<ide> save_traces=True,
<ide> ):
<del> # pylint: disable=line-too-long
<add>
<ide> """Saves the model to Tensorflow SavedModel or a single HDF5 file.
<ide>
<ide> Please see `tf.keras.models.save_model` or the
<ide> def save(
<ide> model = load_model('my_model.h5')
<ide> ```
<ide> """
<del> # pylint: enable=line-too-long
<add>
<ide> save.save_model(
<ide> self,
<ide> filepath,
<ide> def get_config(self):
<ide> # as a result.
<ide> config = {}
<ide>
<del> if saving_lib._ENABLED: # pylint: disable=protected-access
<add> if saving_lib._ENABLED:
<ide> if self.optimizer:
<ide> config["optimizer"] = saving_lib.serialize_keras_object(
<ide> self.optimizer
<ide> def from_config(cls, config, custom_objects=None):
<ide> f"Error encountered during deserialization:\n{e}"
<ide> )
<ide>
<del> if saving_lib._ENABLED: # pylint: disable=protected-access
<add> if saving_lib._ENABLED:
<ide>
<ide> if optimizer or loss:
<ide> model.compile(optimizer=optimizer, loss=loss)
<ide> def _check_sample_weight_warning(self, x, sample_weight):
<ide> and len(x.element_spec) == 3
<ide> )
<ide>
<del> # pylint: disable=protected-access
<ide> if (
<ide> sample_weight_present
<ide> and self.compiled_metrics._user_weighted_metrics is None
<ide> def _get_compile_args(self, user_metrics=True):
<ide> Dictionary of arguments that were used when compiling the model.
<ide> """
<ide> self._assert_compile_was_called()
<del> # pylint: disable=protected-access
<ide>
<ide> saved_metrics = self.compiled_metrics._user_metrics
<ide> saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics
<ide> def _get_compile_args(self, user_metrics=True):
<ide> "weighted_metrics": saved_weighted_metrics,
<ide> "loss_weights": self.compiled_loss._user_loss_weights,
<ide> }
<del> # pylint: enable=protected-access
<add>
<ide> return compile_args
<ide>
<ide> def _get_callback_model(self):
<ide> return self
<ide>
<ide> def _in_multi_worker_mode(self):
<del> return (
<del> self.distribute_strategy.extended._in_multi_worker_mode()
<del> ) # pylint: disable=protected-access
<add> return self.distribute_strategy.extended._in_multi_worker_mode()
<ide>
<ide> @property
<ide> def _compile_was_called(self):
<ide> def potentially_ragged_concat(tensors):
<ide>
<ide> def _get_verbosity(verbose, distribute_strategy):
<ide> """Find the right verbosity value for 'auto'."""
<del> if (
<del> verbose == 1 and distribute_strategy._should_use_with_coordinator
<del> ): # pylint: disable=protected-access
<add> if verbose == 1 and distribute_strategy._should_use_with_coordinator:
<ide> raise ValueError(
<ide> "`verbose=1` is not allowed with `ParameterServerStrategy` for "
<ide> f"performance reasons. Received: verbose={verbose}"
<ide> def disable_multi_worker(method):
<ide> """Decorator that disallows multi-worker use of `method`."""
<ide>
<ide> def _method_wrapper(self, *args, **kwargs):
<del> if self._in_multi_worker_mode(): # pylint: disable=protected-access
<add> if self._in_multi_worker_mode():
<ide> raise ValueError(
<ide> f"{method.__name__} is not supported in multi-worker "
<ide> "mode. Please use a non-multi-worker "
<ide><path>keras/engine/training_arrays_v1.py
<ide> # isort: off
<ide> from tensorflow.python.platform import tf_logging as logging
<ide>
<del># pylint: disable=protected-access
<del>
<ide>
<ide> try:
<del> from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
<add> from scipy.sparse import issparse
<ide> except ImportError:
<ide> issparse = None
<ide>
<ide><path>keras/engine/training_distributed_v1.py
<ide> from tensorflow.python.distribute import input_lib
<ide> from tensorflow.python.platform import tf_logging as logging
<ide>
<del># pylint: disable=protected-access
<del>
<ide>
<ide> def _per_replica_execution_function(model, mode):
<ide> exec_func = model._make_execution_function(mode)
<ide><path>keras/engine/training_eager_v1.py
<ide> from tensorflow.python.eager.backprop import GradientTape
<ide> from tensorflow.python.platform import tf_logging as logging
<ide>
<del># pylint: disable=protected-access
<del>
<ide>
<ide> def _eager_loss_fn(outputs, targets, loss_fn, output_name):
<ide> with backend.name_scope(output_name + "_loss"):
<ide><path>keras/engine/training_generator_v1.py
<ide> # isort: off
<ide> from tensorflow.python.platform import tf_logging as logging
<ide>
<del># pylint: disable=protected-access
<del>
<ide>
<ide> def model_iteration(
<ide> model,
<ide> def _make_execution_function(model, mode, class_weight=None):
<ide> else:
<ide> # Match signature of other modes to allow
<ide> # 1, 2, or 3-tuples from generator
<del> def predict_on_batch(
<del> x, y=None, sample_weights=None
<del> ): # pylint: disable=unused-argument
<add> def predict_on_batch(x, y=None, sample_weights=None):
<ide> return model.predict_on_batch(x)
<ide>
<ide> f = predict_on_batch
<ide><path>keras/engine/training_gpu_test.py
<ide> def prepare_simple_model(input_tensor, loss_name, target):
<ide> labels_channels_first = [
<ide> np.array(
<ide> [[[[0, 1, 3], [2, 1, 0], [2, 2, 1]]]], dtype=np.float32
<del> ), # pylint: disable=line-too-long
<add> ),
<ide> np.array(
<ide> [
<ide> [
<ide> def prepare_simple_model(input_tensor, loss_name, target):
<ide> ]
<ide> ],
<ide> dtype=np.float32,
<del> ), # pylint: disable=line-too-long
<add> ),
<ide> np.array(
<ide> [
<ide> [
<ide> def prepare_simple_model(input_tensor, loss_name, target):
<ide> ],
<ide> dtype=np.float32,
<ide> ),
<del> ] # pylint: disable=line-too-long
<add> ]
<ide> # Compute one loss for each loss function in the list
<ide> # `losses_to_test`:
<ide> loss_channels_last = [0.0, 0.0, 0.0]
<ide><path>keras/engine/training_integration_test.py
<ide> def _gather_test_cases():
<ide> arg_dict,
<ide> filter_fn,
<ide> ) in _LAYERS_TO_TEST:
<del> arg_combinations = [
<del> [(k, i) for i in v] for k, v in arg_dict.items()
<del> ] # pylint: disable=g-complex-comprehension
<add> arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()]
<ide> for arguments in itertools.product(*arg_combinations):
<ide> layer_kwargs = {k: v for k, v in arguments}
<ide> if filter_fn is not None and not filter_fn(**layer_kwargs):
<ide><path>keras/engine/training_test.py
<ide> )
<ide>
<ide> try:
<del> import scipy.sparse as scipy_sparse # pylint: disable=g-import-not-at-top
<add> import scipy.sparse as scipy_sparse
<ide> except ImportError:
<ide> scipy_sparse = None
<ide>
<ide> class _OptimizerOverrideApplyGradients(_Optimizer):
<ide>
<ide> _HAS_AGGREGATE_GRAD = False
<ide>
<del> def apply_gradients(
<del> self, grads_and_vars, name=None
<del> ): # pylint: disable=useless-super-delegation
<add> def apply_gradients(self, grads_and_vars, name=None):
<ide> return super().apply_gradients(grads_and_vars, name)
<ide>
<ide> mock_optimizer = _OptimizerOverrideApplyGradients()
<ide><path>keras/engine/training_utils.py
<ide> def __init__(self, model):
<ide> self._should_set_trainable = False
<ide>
<ide> def __enter__(self):
<del> self._current_trainable_state = (
<del> self._model._get_trainable_state()
<del> ) # pylint: disable=protected-access
<del> self._compiled_trainable_state = (
<del> self._model._compiled_trainable_state
<del> ) # pylint: disable=protected-access
<add> self._current_trainable_state = self._model._get_trainable_state()
<add> self._compiled_trainable_state = self._model._compiled_trainable_state
<ide>
<ide> # Check to see if any layer's trainable state has changed since
<ide> # `compile`.
<ide> def __enter__(self):
<ide>
<ide> # If so, restore the model to its compiled state.
<ide> if self._should_set_trainable:
<del> self._model._set_trainable_state(
<del> self._compiled_trainable_state
<del> ) # pylint: disable=protected-access
<add> self._model._set_trainable_state(self._compiled_trainable_state)
<ide>
<ide> def __exit__(self, type_arg, value_arg, traceback_arg):
<ide> # If we set the values to their compiled state in __enter__, we need to
<ide> # restore the original values before leaving the scope.
<ide> if self._should_set_trainable:
<del> self._model._set_trainable_state(
<del> self._current_trainable_state
<del> ) # pylint: disable=protected-access
<add> self._model._set_trainable_state(self._current_trainable_state)
<ide> return False # False values do not suppress exceptions
<ide>
<ide>
<ide> # Allow use of methods not exposed to the user.
<del># pylint: disable=protected-access
<add>
<add>
<ide> def get_input_shape_and_dtype(layer):
<ide> """Retrieves input shape and input dtype of layer if applicable.
<ide>
<ide> def _is_graph_model(layer):
<ide> return None, None
<ide>
<ide>
<del># pylint: enable=protected-access
<del>
<del>
<ide> def get_static_batch_size(layer):
<ide> """Gets the static batch size of a Layer.
<ide>
<ide><path>keras/engine/training_utils_v1.py
<ide> def _slice_assign(self, batch_element, batch_start, batch_end, is_finished):
<ide> try:
<ide> self.results[batch_start:batch_end] = batch_element
<ide>
<del> except Exception as e: # pylint: disable=broad-except
<add> except Exception as e:
<ide> # `_slice_assign` should only be called in threads and exceptions
<ide> # raised in threads do not carry over to the main thread. So instead
<ide> # we perform a a broad catch in the thread and then store the
<ide> def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
<ide> """
<ide> if x_weight is None or (
<ide> isinstance(x_weight, (list, tuple)) and len(x_weight) == 0
<del> ): # pylint: disable=g-explicit-length-test
<add> ):
<ide> return [None for _ in output_names]
<ide> if len(output_names) == 1:
<ide> if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:
<ide> def collect_per_output_metric_info(
<ide> metric_fn = get_metric_function(
<ide> metric, output_shape=output_shapes[i], loss_fn=loss_fns[i]
<ide> )
<del> metric_fn._from_serialized = (
<del> from_serialized # pylint: disable=protected-access
<del> )
<add> metric_fn._from_serialized = from_serialized
<ide>
<ide> # If the metric function is not stateful, we create a stateful
<ide> # version.
<ide> def collect_per_output_metric_info(
<ide> # If the metric is being revived from something stateless, such
<ide> # as a string (e.g. "accuracy"), we may need to later reapply
<ide> # transformations such as renaming.
<del> metric_fn._from_serialized = (
<del> False # pylint: disable=protected-access
<del> )
<add> metric_fn._from_serialized = False
<ide> metrics_dict[metric_name] = metric_fn
<ide> per_output_metrics.append(metrics_dict)
<ide>
<ide> def is_eager_dataset_or_iterator(data):
<ide> )
<ide>
<ide>
<del># pylint: disable=protected-access
<ide> def get_dataset_graph_def(dataset):
<ide> if tf.executing_eagerly():
<ide> graph_def_str = dataset._as_serialized_graph().numpy()
<ide> def as_list(self):
<ide>
<ide>
<ide> # Allow use of methods not exposed to the user.
<del># pylint: disable=protected-access
<del>
<del>
<del># pylint: enable=protected-access
<ide>
<ide>
<ide> def generic_output_names(outputs_list):
<ide> def unpack_validation_data(validation_data, raise_if_ambiguous=True):
<ide> (
<ide> val_x,
<ide> val_y,
<del> ) = validation_data # pylint: disable=unpacking-non-sequence
<add> ) = validation_data
<ide> val_sample_weight = None
<ide> except ValueError:
<ide> val_x, val_y, val_sample_weight = validation_data, None, None
<ide> def unpack_validation_data(validation_data, raise_if_ambiguous=True):
<ide> val_x,
<ide> val_y,
<ide> val_sample_weight,
<del> ) = validation_data # pylint: disable=unpacking-non-sequence
<add> ) = validation_data
<ide> except ValueError:
<ide> val_x, val_y, val_sample_weight = validation_data, None, None
<ide> else:
<ide><path>keras/engine/training_utils_v1_test.py
<ide> def test_dict_eager(self):
<ide>
<ide> class DatasetUtilsTest(tf.test.TestCase, parameterized.TestCase):
<ide> @parameterized.named_parameters(
<del> # pylint: disable=g-long-lambda
<ide> ("Batch", lambda: tf.data.Dataset.range(5).batch(2)),
<ide> ("Cache", lambda: tf.data.Dataset.range(5).cache()),
<ide> (
<ide> class DatasetUtilsTest(tf.test.TestCase, parameterized.TestCase):
<ide> ("TFRecordDataset", lambda: tf.data.TFRecordDataset([])),
<ide> ("Window", lambda: tf.data.Dataset.range(5).window(2)),
<ide> ("Zip", lambda: tf.data.Dataset.zip(tf.data.Dataset.range(5))),
<del> # pylint: enable=g-long-lambda
<ide> )
<ide> def test_verify_dataset_shuffled(self, dataset_fn, expect_shuffled=False):
<ide> dataset = dataset_fn()
<ide> def __init__(self, *args, **kwargs):
<ide> def apply_async(self, func, *args, **kwargs):
<ide> self._apply_counter += 1
<ide> if self._func_wrapper:
<del> func = self._func_wrapper(func) # pylint: disable=not-callable
<add> func = self._func_wrapper(func)
<ide> return super().apply_async(func, *args, **kwargs)
<ide>
<ide>
<ide> def wrapped(*args, **kwargs):
<ide>
<ide> def cause_error(f):
<ide> @functools.wraps(f)
<del> def wrapped(
<del> batch_element, batch_start, batch_end, is_finished
<del> ): # pylint: disable=unused-argument
<add> def wrapped(batch_element, batch_start, batch_end, is_finished):
<ide> # Induce a TypeError during assignment.
<ide> return f(None, None, None, is_finished)
<ide>
<ide><path>keras/engine/training_v1.py
<ide> import warnings
<ide>
<ide> import numpy as np
<del>
<del># pylint: disable=g-classes-have-attributes
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> from keras import backend
<ide> from tensorflow.python.platform import tf_logging as logging
<ide>
<ide> try:
<del> from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
<add> from scipy.sparse import issparse
<ide> except ImportError:
<ide> issparse = None
<ide>
<ide> def load_weights(self, filepath, by_name=False, skip_mismatch=False):
<ide> if backend.is_tpu_strategy(self._distribution_strategy):
<ide> if self._distribution_strategy.extended.steps_per_run > 1 and (
<ide> not saving_utils.is_hdf5_filepath(filepath)
<del> ): # pylint: disable=protected-access
<add> ):
<ide> raise ValueError(
<ide> "Load weights is not yet supported with TPUStrategy "
<ide> "with steps_per_run greater than 1."
<ide> def reset_metrics(self):
<ide>
<ide> # Reset metrics on all the distributed (cloned) models.
<ide> if self._distribution_strategy:
<del> distributed_training_utils_v1._reset_metrics(
<del> self
<del> ) # pylint: disable=protected-access
<add> distributed_training_utils_v1._reset_metrics(self)
<ide>
<ide> def train_on_batch(
<ide> self,
<ide> def train_on_batch(
<ide> + output_dict["output_losses"]
<ide> + output_dict["metrics"]
<ide> )
<del> outputs = [
<del> _non_none_constant_value(v) for v in outputs
<del> ] # pylint: disable=protected-access
<add> outputs = [_non_none_constant_value(v) for v in outputs]
<ide> else:
<ide> x = training_utils_v1.ModelInputs(x).as_list()
<ide> ins = x + list(y or []) + list(sample_weights or [])
<ide> def train_on_batch(
<ide>
<ide> self._update_sample_weight_modes(sample_weights=sample_weights)
<ide> self._make_train_function()
<del> outputs = self.train_function(ins) # pylint: disable=not-callable
<add> outputs = self.train_function(ins)
<ide>
<ide> if reset_metrics:
<ide> self.reset_metrics()
<ide> def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True):
<ide> + output_dict["output_losses"]
<ide> + output_dict["metrics"]
<ide> )
<del> outputs = [
<del> _non_none_constant_value(v) for v in outputs
<del> ] # pylint: disable=protected-access
<add> outputs = [_non_none_constant_value(v) for v in outputs]
<ide> else:
<ide> x = training_utils_v1.ModelInputs(x).as_list()
<ide> inputs = x + list(y or []) + list(sample_weights or [])
<ide>
<ide> self._update_sample_weight_modes(sample_weights=sample_weights)
<ide> self._make_test_function()
<del> outputs = self.test_function(inputs) # pylint: disable=not-callable
<add> outputs = self.test_function(inputs)
<ide>
<ide> if reset_metrics:
<ide> self.reset_metrics()
<ide> def predict_on_batch(self, x):
<ide> if len(inputs) == 1:
<ide> inputs = inputs[0]
<ide>
<del> return self(inputs) # pylint: disable=not-callable
<add> return self(inputs)
<ide>
<ide> self._make_predict_function()
<ide> outputs = self.predict_function(inputs)
<ide> def _process_target_tensor_for_compile(self, target_tensors):
<ide>
<ide> if target_tensors is not None and not (
<ide> isinstance(target_tensors, list) and target_tensors == []
<del> ): # pylint: disable=g-explicit-bool-comparison
<add> ):
<ide> if isinstance(target_tensors, list):
<ide> if len(target_tensors) != len(self.outputs):
<ide> raise ValueError(
<ide> def _set_per_output_metric_attributes(self, metrics_dict, output_index):
<ide>
<ide> # Update the name on the metric class to be the unique generated
<ide> # name.
<del> metric_fn._name = metric_name # pylint: disable=protected-access
<add> metric_fn._name = metric_name
<ide> updated_metrics_dict[metric_name] = metric_fn
<ide> # Keep track of metric name and function.
<ide> self._compile_metric_functions.append(metric_fn)
<ide> def _make_train_function(self):
<ide> metrics_tensors = [
<ide> m._call_result
<ide> for m in metrics
<del> if hasattr(
<del> m, "_call_result"
<del> ) # pylint: disable=protected-access
<add> if hasattr(m, "_call_result")
<ide> ]
<ide>
<ide> with backend.name_scope("training"):
<ide> def _make_test_function(self):
<ide> metrics_tensors = [
<ide> m._call_result
<ide> for m in metrics
<del> if hasattr(
<del> m, "_call_result"
<del> ) # pylint: disable=protected-access
<add> if hasattr(m, "_call_result")
<ide> ]
<ide>
<ide> with backend.name_scope("evaluation"):
<ide> def _standardize_tensors(
<ide> def _type_spec_from_value(value):
<ide> """Grab type_spec without converting array-likes to tensors."""
<ide> if tf_utils.is_extension_type(value):
<del> return value._type_spec # pylint: disable=protected-access
<add> return value._type_spec
<ide> # Get a TensorSpec for array-like data without
<ide> # converting the data to a Tensor
<ide> if hasattr(value, "shape") and hasattr(value, "dtype"):
<ide> def _in_multi_worker_mode(self):
<ide> # Otherwise, use the strategy whose scope this is in.
<ide> if not strategy and tf.distribute.has_strategy():
<ide> strategy = tf.distribute.get_strategy()
<del> return (
<del> strategy and strategy.extended._in_multi_worker_mode()
<del> ) # pylint: disable=protected-access
<add> return strategy and strategy.extended._in_multi_worker_mode()
<ide>
<ide> @property
<ide> def _trackable_saved_model_saver(self):
<ide> def load_weights(self, filepath, by_name=False):
<ide> orig_model_weights = self._original_model.get_weights()
<ide> distributed_training_utils_v1.set_weights(
<ide> self._original_model._distribution_strategy,
<del> self, # pylint: disable=protected-access
<add> self,
<ide> orig_model_weights,
<ide> )
<ide>
<ide> def _get_metrics_from_layers(layers):
<ide> # We cannot call 'metrics' on the model because we do not want to
<ide> # include the metrics that were added in compile API of a nested
<ide> # model.
<del> metrics.extend(layer._metrics) # pylint: disable=protected-access
<add> metrics.extend(layer._metrics)
<ide> metrics.extend(_get_metrics_from_layers(layer.layers))
<ide> else:
<ide> metrics.extend(layer.metrics)
<ide><path>keras/estimator/__init__.py
<ide> def input_fn():
<ide> try:
<ide> # isort: off
<ide> from tensorflow_estimator.python.estimator import (
<del> keras_lib, # pylint: disable=g-import-not-at-top
<add> keras_lib,
<ide> )
<ide> except ImportError:
<ide> raise NotImplementedError(
<ide> "tf.keras.estimator.model_to_estimator function not available in "
<ide> "your installation."
<ide> )
<ide> _model_to_estimator_usage_gauge.get_cell("v1").set(True)
<del> return (
<del> keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg
<del> keras_model=keras_model,
<del> keras_model_path=keras_model_path,
<del> custom_objects=custom_objects,
<del> model_dir=model_dir,
<del> config=config,
<del> checkpoint_format=checkpoint_format,
<del> use_v2_estimator=False,
<del> metric_names_map=metric_names_map,
<del> export_outputs=export_outputs,
<del> )
<add> return keras_lib.model_to_estimator(
<add> keras_model=keras_model,
<add> keras_model_path=keras_model_path,
<add> custom_objects=custom_objects,
<add> model_dir=model_dir,
<add> config=config,
<add> checkpoint_format=checkpoint_format,
<add> use_v2_estimator=False,
<add> metric_names_map=metric_names_map,
<add> export_outputs=export_outputs,
<ide> )
<ide>
<ide>
<ide> def input_fn():
<ide> try:
<ide> # isort: off
<ide> from tensorflow_estimator.python.estimator import (
<del> keras_lib, # pylint: disable=g-import-not-at-top
<add> keras_lib,
<ide> )
<ide> except ImportError:
<ide> raise NotImplementedError(
<ide> "tf.keras.estimator.model_to_estimator function not available in "
<ide> "your installation."
<ide> )
<ide> _model_to_estimator_usage_gauge.get_cell("v2").set(True)
<del> return (
<del> keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg
<del> keras_model=keras_model,
<del> keras_model_path=keras_model_path,
<del> custom_objects=custom_objects,
<del> model_dir=model_dir,
<del> config=config,
<del> checkpoint_format=checkpoint_format,
<del> use_v2_estimator=True,
<del> metric_names_map=metric_names_map,
<del> export_outputs=export_outputs,
<del> )
<add> return keras_lib.model_to_estimator(
<add> keras_model=keras_model,
<add> keras_model_path=keras_model_path,
<add> custom_objects=custom_objects,
<add> model_dir=model_dir,
<add> config=config,
<add> checkpoint_format=checkpoint_format,
<add> use_v2_estimator=True,
<add> metric_names_map=metric_names_map,
<add> export_outputs=export_outputs,
<ide> )
<ide>
<ide>
<ide><path>keras/feature_column/dense_features.py
<ide>
<ide>
<ide> @keras_export(v1=["keras.layers.DenseFeatures"])
<del>class DenseFeatures(kfc._BaseFeaturesLayer): # pylint: disable=protected-access
<add>class DenseFeatures(kfc._BaseFeaturesLayer):
<ide> """A layer that produces a dense `Tensor` based on given `feature_columns`.
<ide>
<ide> Generally a single example in training data is described with
<ide><path>keras/feature_column/dense_features_test.py
<ide> def test_static_batch_size_mismatch(self):
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<ide> r"Batch size \(first dimension\) of each feature must be same.",
<del> ): # pylint: disable=anomalous-backslash-in-string
<add> ):
<ide> df.DenseFeatures([price1, price2])(features)
<ide>
<ide> def test_subset_of_static_batch_size_mismatch(self):
<ide> def test_subset_of_static_batch_size_mismatch(self):
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<ide> r"Batch size \(first dimension\) of each feature must be same.",
<del> ): # pylint: disable=anomalous-backslash-in-string
<add> ):
<ide> df.DenseFeatures([price1, price2, price3])(features)
<ide>
<ide> def test_runtime_batch_size_mismatch(self):
<ide><path>keras/feature_column/dense_features_v2.py
<ide> def build(self, _):
<ide> with tf.name_scope(column.name):
<ide> column.create_state(self._state_manager)
<ide> # We would like to call Layer.build and not _DenseFeaturesHelper.build.
<del> # pylint: disable=protected-access
<del> super(kfc._BaseFeaturesLayer, self).build(
<del> None
<del> ) # pylint: disable=bad-super-call
<ide>
<add> super(kfc._BaseFeaturesLayer, self).build(None)
<ide>
<del>class _StateManagerImplV2(
<del> tf.__internal__.feature_column.StateManager
<del>): # pylint: disable=protected-access
<add>
<add>class _StateManagerImplV2(tf.__internal__.feature_column.StateManager):
<ide> """Manages the state of DenseFeatures."""
<ide>
<ide> def create_variable(
<ide> def create_variable(
<ide> use_resource=use_resource,
<ide> )
<ide> if isinstance(var, tf.__internal__.tracking.Trackable):
<del> self._layer._track_trackable(
<del> var, feature_column.name + "/" + name
<del> ) # pylint: disable=protected-access
<add> self._layer._track_trackable(var, feature_column.name + "/" + name)
<ide> self._cols_to_vars_map[feature_column][name] = var
<ide> return var
<ide>
<ide> def build():
<ide> Yields:
<ide> a scope in which the object doesn't track dependencies manually.
<ide> """
<del> # pylint: disable=protected-access
<add>
<ide> previous_value = getattr(obj, "_manual_tracking", True)
<ide> obj._manual_tracking = False
<ide> try:
<ide><path>keras/feature_column/dense_features_v2_test.py
<ide> def test_static_batch_size_mismatch(self):
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<ide> r"Batch size \(first dimension\) of each feature must be same.",
<del> ): # pylint: disable=anomalous-backslash-in-string
<add> ):
<ide> df.DenseFeatures([price1, price2])(features)
<ide>
<ide> def test_subset_of_static_batch_size_mismatch(self):
<ide> def test_subset_of_static_batch_size_mismatch(self):
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<ide> r"Batch size \(first dimension\) of each feature must be same.",
<del> ): # pylint: disable=anomalous-backslash-in-string
<add> ):
<ide> df.DenseFeatures([price1, price2, price3])(features)
<ide>
<ide> def test_runtime_batch_size_mismatch(self):
<ide><path>keras/feature_column/sequence_feature_column.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=protected-access
<del>
<ide>
<ide> @keras_export("keras.experimental.SequenceFeatures")
<ide> class SequenceFeatures(kfc._BaseFeaturesLayer):
<ide><path>keras/initializers/initializers_v1.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras initializers for TF 1."""
<del># pylint:disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/initializers/initializers_v2.py
<ide> def _ensure_keras_seeded():
<ide> initialized with same seed for tf.random.Generator, so that the value
<ide> created are in sync among all the clients.
<ide> """
<del> if not getattr(
<del> backend._SEED_GENERATOR, "generator", None
<del> ): # pylint:disable=protected-access
<add> if not getattr(backend._SEED_GENERATOR, "generator", None):
<ide> raise ValueError(
<ide> "When using DTensor APIs, you need to set the global seed "
<ide> "before using any Keras initializers. Please make sure "
<ide><path>keras/integration_test/forwardprop_test.py
<ide> def testBatchNormLayerParamGrads(self, value, op_fn):
<ide> output, [input_value] + layer.trainable_variables
<ide> )
<ide> jac_forward = _jacfwd(
<del> lambda *args: layer(
<del> args[0], training=training
<del> ), # pylint:disable=cell-var-from-loop
<add> lambda *args: layer(args[0], training=training),
<ide> [input_value] + layer.trainable_variables,
<ide> )
<ide> for backward, forward in zip(jac_back, jac_forward):
<ide> def call(self, x):
<ide> return self.proj(self.embed(x))
<ide>
<ide> model = M()
<del> model(tf.zeros([3, 3], dtype=tf.int32)) # pylint: disable=not-callable
<add> model(tf.zeros([3, 3], dtype=tf.int32))
<ide> parameters = model.embed.variables
<ide> tangents = [tf.ones_like(v) for v in parameters]
<ide> with tf.autodiff.ForwardAccumulator(parameters, tangents):
<ide> # Note that forwardprop runs alongside the original computation.
<ide> # This test is just checking that it doesn't crash; correctness is
<ide> # tested in core TF.
<del> model(
<del> tf.zeros([3, 3], dtype=tf.int32)
<del> ) # pylint: disable=not-callable
<add> model(tf.zeros([3, 3], dtype=tf.int32))
<ide>
<ide>
<ide> class HessianTests(tf.test.TestCase, parameterized.TestCase):
<ide><path>keras/integration_test/function_test.py
<ide> def testDefunKerasModelCall(self):
<ide> model.call = tf.function(model.call)
<ide>
<ide> x = tf.ones([1, 2])
<del> y = model(x) # pylint:disable=not-callable
<add> y = model(x)
<ide>
<ide> self.assertAllEqual([[3.0]], self.evaluate(y))
<ide>
<ide> def testDecoratedMethodGetConcreteFunction(self):
<ide>
<ide> def testDecoratedMethodVariableCleanup(self):
<ide> m = DefunnedMiniModel()
<del> m(tf.ones([1, 2])) # pylint:disable=not-callable
<add> m(tf.ones([1, 2]))
<ide> variable_refs = list({v.ref() for v in m.variables})
<ide> self.assertLen(variable_refs, 2)
<ide> del m
<ide> def test_optimizer(self):
<ide> x = tf.constant([[3.0, 4.0]])
<ide> y = tf.constant([2.0])
<ide> model = ModelWithOptimizer()
<del> model(x, y) # pylint:disable=not-callable
<add> model(x, y)
<ide>
<ide>
<ide> class AutomaticControlDependenciesTest(tf.test.TestCase):
<ide><path>keras/integration_test/gradients_test.py
<ide> def testKerasRecompute(self):
<ide> test_model = TestKerasModelClass(10)
<ide> test_input = tf.constant(tf.zeros((10, 10), dtype=np.float32))
<ide> # Ensures keras model is initialized.
<del> test_model(test_input) # pylint: disable=not-callable
<add> test_model(test_input)
<ide> grads_re, grads = self._TestVariablesGradient(
<ide> test_input, test_model, test_input
<ide> )
<ide> def call(self, x):
<ide> def jacobian(x):
<ide> with tf.GradientTape() as tape:
<ide> tape.watch(x)
<del> y = m(x) # pylint: disable=not-callable
<add> y = m(x)
<ide> return tape.batch_jacobian(y, x)
<ide>
<ide> inp = tf.nn.l2_normalize(tf.ones([1, 2, 3]), axis=[1, 2])
<ide><path>keras/integration_test/legacy_rnn_test.py
<ide> def testRNNCellActsLikeKerasRNNCellInProperScope(self):
<ide>
<ide> z = tf.zeros((2, 3))
<ide>
<del> kn1(z) # pylint:disable=not-callable
<del> kn2(z) # pylint:disable=not-callable
<add> kn1(z)
<add> kn2(z)
<ide>
<del> # pylint: disable=protected-access
<ide> self.assertTrue(all("kn1" in v.name for v in kn1._cell.variables))
<ide> self.assertTrue(all("kn2" in v.name for v in kn2._cell.variables))
<ide>
<ide> with tf.layers.experimental.keras_style_scope():
<ide> kn1_new = KerasNetworkTFRNNs(name="kn1_new")
<ide> kn2_new = KerasNetworkKerasRNNs(name="kn2_new")
<ide>
<del> kn2_new(z) # pylint:disable=not-callable
<add> kn2_new(z)
<ide> # Most importantly, this doesn't fail due to variable scope reuse
<ide> # issues.
<del> kn1_new(z) # pylint:disable=not-callable
<add> kn1_new(z)
<ide>
<ide> self.assertTrue(
<ide> all("kn1_new" in v.name for v in kn1_new._cell.variables)
<ide><path>keras/integration_test/multi_worker_tutorial_test.py
<ide> def skip_fetch_failure_exception(self):
<ide> self.skipTest(
<ide> "Data loading error: Bad magic number for file header."
<ide> )
<del> except Exception as e: # pylint: disable=broad-except
<add> except Exception as e:
<ide> if "URL fetch failure" in str(e):
<ide> self.skipTest(
<ide> "URL fetch error not considered failure of the test."
<ide> def extract_accuracy(worker_id, input_string):
<ide>
<ide> for worker_id in range(NUM_WORKERS):
<ide> accu_result = tf.nest.map_structure(
<del> lambda x: extract_accuracy(
<del> worker_id, x
<del> ), # pylint: disable=cell-var-from-loop
<add> lambda x: extract_accuracy(worker_id, x),
<ide> mpr_result.stdout,
<ide> )
<ide> self.assertTrue(
<ide> def proc_func(checkpoint_dir):
<ide> multi_worker_dataset = (
<ide> strategy.distribute_datasets_from_function(
<ide> lambda input_context: self.dataset_fn(
<del> global_batch_size, # pylint: disable=g-long-lambda
<add> global_batch_size,
<ide> input_context,
<ide> )
<ide> )
<ide><path>keras/integration_test/parameter_server_keras_preprocessing_test.py
<ide> def feature_and_label_gen():
<ide> )
<ide>
<ide> train_dataset = raw_dataset.map(
<del> lambda x: ( # pylint: disable=g-long-lambda
<add> lambda x: (
<ide> {"features": feature_ps(x["features"])},
<ide> label_ps(x["label"]),
<ide> )
<ide><path>keras/integration_test/saved_model_test.py
<ide> class _MultiOutput(tf.keras.layers.Layer):
<ide> def call(self, x):
<ide> return x + 1.0, x + 2.0
<ide>
<del> out = _MultiOutput(name="out")(inp) # pylint: disable=not-callable
<add> out = _MultiOutput(name="out")(inp)
<ide> model = tf.keras.Model(inp, out)
<ide> loaded = cycle(model, cycles)
<ide> self.assertAllClose(
<ide><path>keras/integration_test/tpu_strategy_test.py
<ide> def feature_and_label_gen():
<ide> )
<ide>
<ide> train_dataset = raw_dataset.map(
<del> lambda x: ( # pylint: disable=g-long-lambda
<add> lambda x: (
<ide> {"features": feature_mapper(x["features"])},
<ide> label_mapper(x["label"]),
<ide> )
<ide><path>keras/layers/activation/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Layers that act as activation functions."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> from keras.layers.activation.elu import ELU
<ide> from keras.layers.activation.leaky_relu import LeakyReLU
<ide><path>keras/layers/activation/elu.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Exponential Linear Unit activation layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.engine.base_layer import Layer
<ide><path>keras/layers/activation/leaky_relu.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Leaky version of a Rectified Linear Unit activation layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.engine.base_layer import Layer
<ide><path>keras/layers/activation/prelu.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Parametric Rectified Linear Unit activation layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras import constraints
<ide><path>keras/layers/activation/relu.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Rectified Linear Unit activation layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.engine.base_layer import Layer
<ide><path>keras/layers/activation/softmax.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Softmax activation layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/activation/thresholded_relu.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Thresholded Rectified Linear Unit activation layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/attention/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras attention layers."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> from keras.layers.attention.additive_attention import AdditiveAttention
<ide> from keras.layers.attention.attention import Attention
<ide><path>keras/layers/attention/additive_attention.py
<ide> This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
<ide> Attention is formed by three tensors: Query, Key and Value.
<ide> """
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/attention/attention.py
<ide> This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
<ide> Attention is formed by three tensors: Query, Key and Value.
<ide> """
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/attention/base_dense_attention.py
<ide> This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
<ide> Attention is formed by three tensors: Query, Key and Value.
<ide> """
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/attention/multi_head_attention.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras-based multi-head attention layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import collections
<ide> import math
<ide> def from_config(cls, config):
<ide> str(cls),
<ide> )
<ide> else:
<del> layer._build_from_signature(
<del> query_shape, value_shape, key_shape
<del> ) # pylint: disable=protected-access
<add> layer._build_from_signature(query_shape, value_shape, key_shape)
<ide> return layer
<ide>
<ide> def _build_from_signature(self, query, value, key=None):
<ide><path>keras/layers/convolutional/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras convolution layers."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> # Convolution layer aliases.
<ide> # Convolution layers.
<ide><path>keras/layers/convolutional/base_conv.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras base class for convolution layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def _validate_init(self):
<ide> )
<ide>
<ide> if self.padding == "causal":
<del> # pylint: disable=g-import-not-at-top
<add>
<ide> from keras.layers.convolutional.conv1d import Conv1D
<ide> from keras.layers.convolutional.separable_conv1d import (
<ide> SeparableConv1D,
<ide> )
<ide>
<del> # pylint: enable=g-import-not-at-top
<ide> if not isinstance(self, (Conv1D, SeparableConv1D)):
<ide> raise ValueError(
<ide> "Causal padding is only supported for `Conv1D`"
<ide> def compute_output_shape(self, input_shape):
<ide> f"dimension."
<ide> )
<ide>
<del> def _recreate_conv_op(self, inputs): # pylint: disable=unused-argument
<add> def _recreate_conv_op(self, inputs):
<ide> return False
<ide>
<ide> def get_config(self):
<ide><path>keras/layers/convolutional/base_depthwise_conv.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras abstract base for depthwise convolutions."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/convolutional/base_separable_conv.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras abstract base layer for separable nD convolution."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/convolutional/conv1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras 1D convolution layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import activations
<ide> from keras import constraints
<ide><path>keras/layers/convolutional/conv1d_transpose.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras 1D transposed convolution layer (sometimes called deconvolution)."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/convolutional/conv2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras 2D convolution layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import activations
<ide> from keras import constraints
<ide><path>keras/layers/convolutional/conv2d_transpose.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras 2D transposed convolution layer (sometimes called deconvolution)."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/convolutional/conv3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras 3D convolution layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import activations
<ide> from keras import constraints
<ide><path>keras/layers/convolutional/conv3d_transpose.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras 3D transposed convolution layer (sometimes called deconvolution)."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/convolutional/conv_transpose_test.py
<ide> def test_conv2d_transpose_dilation(self):
<ide> )
<ide>
<ide> input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32)
<del> # pylint: disable=too-many-function-args
<add>
<ide> expected_output = np.float32(
<ide> [
<ide> [192, 228, 192, 228],
<ide><path>keras/layers/convolutional/depthwise_conv1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras depthwise 1D convolution."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/convolutional/depthwise_conv2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras depthwise 2D convolution."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.layers.convolutional.base_depthwise_conv import DepthwiseConv
<ide><path>keras/layers/convolutional/separable_conv1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras depthwise separable 1D convolution."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/convolutional/separable_conv2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras depthwise separable 2D convolution."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/core/activation.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the Activation layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import activations
<ide> from keras.engine.base_layer import Layer
<ide><path>keras/layers/core/dense.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the Dense layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/core/einsum_dense.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras-based einsum dense layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import re
<ide>
<ide><path>keras/layers/core/embedding.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Embedding layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/core/embedding_test.py
<ide> def test_embedding_with_ragged_input(self):
<ide> inputs = keras.layers.Input(
<ide> shape=(None,), dtype=tf.float32, ragged=True
<ide> )
<del> # pylint: disable=unnecessary-lambda
<add>
<ide> outputs = keras.layers.Lambda(
<ide> lambda args: keras.backend.identity(args)
<ide> )(inputs)
<del> # pylint: enable=unnecessary-lambda
<add>
<ide> outputs = layer(outputs)
<ide>
<ide> model = keras.Model(inputs, outputs)
<ide><path>keras/layers/core/lambda_layer.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the Lambda layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide> import sys
<ide> import textwrap
<ide> import types as python_types
<ide><path>keras/layers/core/masking.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the Masking layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def call(self, inputs):
<ide> )
<ide> outputs = inputs * tf.cast(boolean_mask, inputs.dtype)
<ide> # Compute the mask and outputs simultaneously.
<del> outputs._keras_mask = tf.squeeze(
<del> boolean_mask, axis=-1
<del> ) # pylint: disable=protected-access
<add> outputs._keras_mask = tf.squeeze(boolean_mask, axis=-1)
<ide> return outputs
<ide>
<ide> def compute_output_shape(self, input_shape):
<ide><path>keras/layers/core/tf_op_layer.py
<ide> get_symbol_from_name,
<ide> )
<ide>
<del># pylint: enable=g-bad-import-order
<del>
<ide>
<ide> class ClassMethod(Layer):
<ide> """Wraps a TF API Class's class method in a `Layer` object.
<ide> def from_config(cls, config, custom_objects=None):
<ide> return cls(**config)
<ide>
<ide>
<del>def _delegate_property(
<del> keras_tensor_cls, property_name
<del>): # pylint: disable=invalid-name
<add>def _delegate_property(keras_tensor_cls, property_name):
<ide> """Register property on a KerasTensor class.
<ide>
<ide> Calling this multiple times with the same arguments should be a no-op.
<ide> def _delegate_property(
<ide> # due to dynamic layer class versioning.
<ide> property_access = property(
<ide> lambda self: InstanceProperty(property_name)(self)
<del> ) # pylint: disable=unnecessary-lambda
<add> )
<ide> setattr(keras_tensor_cls, property_name, property_access)
<ide>
<ide>
<del>def _delegate_method(
<del> keras_tensor_cls, method_name
<del>): # pylint: disable=invalid-name
<add>def _delegate_method(keras_tensor_cls, method_name):
<ide> """Register method on a KerasTensor class.
<ide>
<ide> Calling this function times with the same arguments should be a no-op.
<ide> def handle(self, args, kwargs):
<ide>
<ide>
<ide> for slicing_op in [
<del> tf.__operators__.getitem, # pylint: disable=protected-access
<add> tf.__operators__.getitem,
<ide> tf.compat.v1.boolean_mask,
<ide> tf.boolean_mask,
<ide> tf.__operators__.ragged_getitem,
<ide><path>keras/layers/kernelized.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Keras layers that implement explicit (approximate) kernel feature maps."""
<ide>
<ide> import numpy as np
<ide><path>keras/layers/layers_test.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Tests for layers.__init__."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide><path>keras/layers/locally_connected/locally_connected1d.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide> """Locally-connected layer for 1D input."""
<ide>
<ide> from keras import activations
<ide><path>keras/layers/locally_connected/locally_connected2d.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide> """Locally-connected layer for 2D input."""
<ide>
<ide> from keras import activations
<ide><path>keras/layers/merging/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras merging layers."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> # Merging functions.
<ide> # Merging layers.
<ide><path>keras/layers/merging/base_merge.py
<ide> def _compute_elemwise_op_output_shape(self, shape1, shape2):
<ide> if None in [shape1, shape2]:
<ide> return None
<ide> elif len(shape1) < len(shape2):
<del> return self._compute_elemwise_op_output_shape(
<del> shape2, shape1
<del> ) # pylint: disable=arguments-out-of-order
<add> return self._compute_elemwise_op_output_shape(shape2, shape1)
<ide> elif not shape2:
<ide> return shape1
<ide> output_shape = list(shape1[: -len(shape2)])
<ide> def compute_mask(self, inputs, mask=None):
<ide> backend.concatenate(masks, axis=0), axis=0, keepdims=False
<ide> )
<ide>
<del> def get_config(self): # pylint: disable=useless-super-delegation
<add> def get_config(self):
<ide> return super().get_config()
<ide><path>keras/layers/noise.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Layers that operate regularization via the addition of noise."""
<del># pylint: disable=g-bad-import-order,unused-import
<add>
<ide>
<ide> from keras.layers.regularization.alpha_dropout import AlphaDropout # noqa: F401
<ide>
<ide><path>keras/layers/normalization/batch_normalization.py
<ide> def calculate_update_delta():
<ide> if tf.compat.v1.executing_eagerly_outside_functions():
<ide> return variable.assign_sub(calculate_update_delta(), name=scope)
<ide> else:
<del> with tf.compat.v1.colocate_with(
<del> variable
<del> ): # pylint: disable=protected-access
<add> with tf.compat.v1.colocate_with(variable):
<ide> return tf.compat.v1.assign_sub(
<ide> variable, calculate_update_delta(), name=scope
<ide> )
<ide> def _assign_new_value(self, variable, value):
<ide> if tf.compat.v1.executing_eagerly_outside_functions():
<ide> return variable.assign(value, name=scope)
<ide> else:
<del> with tf.compat.v1.colocate_with(
<del> variable
<del> ): # pylint: disable=protected-access
<add> with tf.compat.v1.colocate_with(variable):
<ide> return tf.compat.v1.assign(variable, value, name=scope)
<ide>
<ide> def _fused_batch_norm(self, inputs, training):
<ide> def get_config(self):
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.layers.experimental.SyncBatchNormalization", v1=[])
<ide> class SyncBatchNormalization(BatchNormalizationBase):
<ide> r"""Normalize and scale inputs or activations synchronously across replicas.
<ide><path>keras/layers/normalization/batch_normalization_v1.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Batch Normalization V1 layer."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> from keras.layers.normalization import batch_normalization
<ide>
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=missing-docstring
<ide> @keras_export(v1=["keras.layers.BatchNormalization"])
<ide> class BatchNormalization(batch_normalization.BatchNormalizationBase):
<ide> _USE_V2_BEHAVIOR = False
<ide><path>keras/layers/normalization/layer_normalization.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> @keras_export("keras.layers.LayerNormalization")
<ide> class LayerNormalization(Layer):
<ide><path>keras/layers/normalization/layer_normalization_test.py
<ide> def _test_backward_pass(
<ide> )
<ide> norm.build(x.shape)
<ide>
<del> # pylint: disable=cell-var-from-loop
<ide> def forward_fn(x, beta, gamma):
<ide> # We must monkey-patch the attributes of `norm` with the
<ide> # function arguments, so that the gradient checker will
<ide> def forward_fn(x, beta, gamma):
<ide> ):
<ide> return norm(x)
<ide>
<del> # pylint: enable=cell-var-from-loop
<ide> results = tf.test.compute_gradient(
<ide> forward_fn,
<ide> [keras.backend.cast(x, dtype), norm.beta, norm.gamma],
<ide><path>keras/layers/normalization/unit_normalization.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Unit Normalization layer."""
<del># pylint: disable=g-bad-import-order
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/normalization/unit_normalization_test.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Tests for Unit Normalization layer."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras Pooling layers."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> # Pooling layer aliases.
<ide> # Pooling layers.
<ide><path>keras/layers/pooling/average_pooling1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Average pooling 1D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import functools
<ide>
<ide><path>keras/layers/pooling/average_pooling2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Average pooling 2D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/average_pooling3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Average pooling 3D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/base_global_pooling1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Private base class for global pooling 1D layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/base_global_pooling2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Private base class for global pooling 2D layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/base_global_pooling3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Private base class for global pooling 3D layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/base_pooling1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Private base class for pooling 1D layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/base_pooling2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Private base class for pooling 2D layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/base_pooling3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Private base class for pooling 3D layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/global_average_pooling1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Global average pooling 1D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/global_average_pooling2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Global average pooling 2D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.layers.pooling.base_global_pooling2d import GlobalPooling2D
<ide><path>keras/layers/pooling/global_average_pooling3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Global average pooling 3D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.layers.pooling.base_global_pooling3d import GlobalPooling3D
<ide><path>keras/layers/pooling/global_max_pooling1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Global max pooling 1D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.layers.pooling.base_global_pooling1d import GlobalPooling1D
<ide><path>keras/layers/pooling/global_max_pooling2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Global max pooling 2D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.layers.pooling.base_global_pooling2d import GlobalPooling2D
<ide><path>keras/layers/pooling/global_max_pooling3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Global max pooling 3D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import backend
<ide> from keras.layers.pooling.base_global_pooling3d import GlobalPooling3D
<ide><path>keras/layers/pooling/max_pooling1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Max pooling 1D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import functools
<ide>
<ide><path>keras/layers/pooling/max_pooling2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Max pooling 2D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/pooling/max_pooling3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Max pooling 3D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/benchmarks/index_lookup_forward_benchmark.py
<ide> def tensor_gen(batch, num_elements):
<ide> def get_vocab():
<ide> vocab = list(
<ide> set([a + b for a in string.ascii_letters for b in string.ascii_letters])
<del> ) # pylint:disable=g-complex-comprehension
<add> )
<ide> vocab.sort()
<ide> return vocab
<ide>
<ide><path>keras/layers/preprocessing/category_encoding.py
<ide> # ==============================================================================
<ide> """Keras CategoryEncoding preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/discretization.py
<ide> # ==============================================================================
<ide> """Keras discretization preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide> def build(self, input_shape):
<ide> initializer=lambda shape, dtype: [
<ide> [],
<ide> [],
<del> ], # pylint: disable=unused-arguments
<add> ],
<ide> trainable=False,
<ide> )
<ide>
<ide> def finalize_state(self):
<ide> get_bin_boundaries(self.summary, self.num_bins)
<ide> )
<ide>
<del> def reset_state(self): # pylint: disable=method-hidden
<add> def reset_state(self):
<ide> if self.input_bin_boundaries is not None or not self.built:
<ide> return
<ide>
<ide><path>keras/layers/preprocessing/hashed_crossing.py
<ide> # ==============================================================================
<ide> """Keras hashed crossing preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/hashing.py
<ide> # ==============================================================================
<ide> """Keras hashing preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/preprocessing/image_preprocessing.py
<ide> # ==============================================================================
<ide> """Keras image preprocessing layers."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide> def _augment(self, inputs):
<ide> bounding_box = inputs.get(BOUNDING_BOXES, None)
<ide> transformation = self.get_random_transformation(
<ide> image=image, label=label, bounding_box=bounding_box
<del> ) # pylint: disable=assignment-from-none
<add> )
<ide> image = self.augment_image(image, transformation=transformation)
<ide> result = {IMAGES: image}
<ide> if label is not None:
<ide> def __init__(
<ide> self.width_lower = width_factor[0]
<ide> self.width_upper = width_factor[1]
<ide> else:
<del> self.width_lower = (
<del> -width_factor
<del> ) # pylint: disable=invalid-unary-operand-type
<add> self.width_lower = -width_factor
<ide> self.width_upper = width_factor
<ide>
<ide> if self.width_lower < -1.0 or self.width_upper < -1.0:
<ide><path>keras/layers/preprocessing/index_lookup.py
<ide> # ==============================================================================
<ide> """Keras index lookup preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import collections
<ide>
<ide> def num_tensors(self):
<ide>
<ide> def set_weights(self, weights):
<ide> tokens = tf.convert_to_tensor(weights[0], self._dtype)
<del> self._layer.lookup_table = self._layer._lookup_table_from_tokens(
<del> tokens
<del> ) # pylint: disable=protected-access
<add> self._layer.lookup_table = self._layer._lookup_table_from_tokens(tokens)
<ide>
<ide> def get_tensors(self):
<ide> # Just save the non-config part of the vocab (no special tokens).
<ide> def finalize_state(self):
<ide> # tables.
<ide> self.reset_state()
<ide>
<del> def reset_state(self): # pylint: disable=method-hidden
<add> def reset_state(self):
<ide> if self._has_input_vocabulary:
<ide> return
<ide>
<ide><path>keras/layers/preprocessing/integer_lookup.py
<ide> # ==============================================================================
<ide> """Keras string lookup preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide><path>keras/layers/preprocessing/normalization.py
<ide> # ==============================================================================
<ide> """Normalization preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide> def update_state(self, data):
<ide> self.adapt_variance.assign(total_variance)
<ide> self.count.assign(total_count)
<ide>
<del> def reset_state(self): # pylint: disable=method-hidden
<add> def reset_state(self):
<ide> if self.input_mean is not None or not self.built:
<ide> return
<ide>
<ide><path>keras/layers/preprocessing/preprocessing_stage.py
<ide> from keras.engine import sequential
<ide> from keras.utils import tf_utils
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> # Sequential methods should take precedence.
<ide> class PreprocessingStage(
<ide> def map_fn(x):
<ide> Batch of inputs to be processed by layer
<ide> `self.layers[current_layer_index]`
<ide> """
<del> if (
<del> current_layer_index == 0
<del> ): # pylint: disable=cell-var-from-loop
<add> if current_layer_index == 0:
<ide> return x
<del> for i in range(
<del> current_layer_index
<del> ): # pylint: disable=cell-var-from-loop
<add> for i in range(current_layer_index):
<ide> x = self.layers[i](x)
<ide> return x
<ide>
<ide><path>keras/layers/preprocessing/preprocessing_stage_functional_test.py
<ide> from keras.layers.preprocessing import preprocessing_test_utils
<ide> from keras.testing_infra import test_combinations
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> class PL(base_preprocessing_layer.PreprocessingLayer):
<ide> def __init__(self, **kwargs):
<ide><path>keras/layers/preprocessing/preprocessing_stage_test.py
<ide> from keras.layers.preprocessing import preprocessing_test_utils
<ide> from keras.testing_infra import test_combinations
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> @test_combinations.run_all_keras_modes(always_skip_v1=True)
<ide> class PreprocessingStageTest(
<ide><path>keras/layers/preprocessing/string_lookup.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> @keras_export(
<ide> "keras.layers.StringLookup",
<ide><path>keras/layers/preprocessing/text_vectorization.py
<ide> # ==============================================================================
<ide> """Keras text vectorization preprocessing layer."""
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide> def update_state(self, data):
<ide> def finalize_state(self):
<ide> self._lookup_layer.finalize_state()
<ide>
<del> def reset_state(self): # pylint: disable=method-hidden
<add> def reset_state(self):
<ide> self._lookup_layer.reset_state()
<ide>
<ide> def get_vocabulary(self, include_special_tokens=True):
<ide><path>keras/layers/preprocessing/text_vectorization_test.py
<ide> def test_tfidf_output_hard_maximum(self, sparse):
<ide> )
<ide>
<ide> # pyformat: disable
<del> # pylint: disable=bad-whitespace
<add>
<ide> expected_output = [[0, 0.8, 0.25, 0.75, 0, 0], [1, 0.4, 0, 0, 0.6, 0]]
<del> # pylint: enable=bad-whitespace
<add>
<ide> # pyformat: enable
<ide> max_tokens = 6
<ide> expected_output_shape = [None, max_tokens]
<ide> def test_tfidf_output_soft_maximum(self, sparse):
<ide> )
<ide>
<ide> # pyformat: disable
<del> # pylint: disable=bad-whitespace
<add>
<ide> expected_output = [[0, 0.8, 0.25, 0.75, 0], [1, 0.4, 0, 0, 0.6]]
<del> # pylint: enable=bad-whitespace
<add>
<ide> # pyformat: enable
<ide> max_tokens = 5
<ide> expected_output_shape = [None, max_tokens]
<ide> def test_tfidf_output_set_oov_weight(self, sparse):
<ide> )
<ide>
<ide> # pyformat: disable
<del> # pylint: disable=bad-whitespace
<add>
<ide> expected_output = [[0, 0.8, 0.25, 0.75, 0], [0.2, 0.4, 0, 0, 0.6]]
<del> # pylint: enable=bad-whitespace
<add>
<ide> # pyformat: enable
<ide> max_tokens = 5
<ide> expected_output_shape = [None, max_tokens]
<ide> def test_saving_with_tfidf(self):
<ide> )
<ide>
<ide> # pyformat: disable
<del> # pylint: disable=bad-whitespace
<add>
<ide> expected_output = [[0, 0.8, 0.25, 0.75, 0], [1, 0.4, 0, 0, 0.6]]
<ide> vocab_data = ["earth", "wind", "and", "fire"]
<del> # pylint: enable=bad-whitespace
<add>
<ide> # pyformat: enable
<ide>
<ide> # Build and validate a golden model.
<ide><path>keras/layers/regularization/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras regularization layers."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> from keras.layers.regularization.activity_regularization import (
<ide> ActivityRegularization,
<ide><path>keras/layers/regularization/activity_regularization.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the ActivityRegularization layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import regularizers
<ide> from keras.engine.base_layer import Layer
<ide><path>keras/layers/regularization/alpha_dropout.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the AlphaDropout layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def call(self, inputs, training=None):
<ide> if 0.0 < self.rate < 1.0:
<ide> noise_shape = self._get_noise_shape(inputs)
<ide>
<del> def dropped_inputs(
<del> inputs=inputs, rate=self.rate
<del> ): # pylint: disable=missing-docstring
<add> def dropped_inputs(inputs=inputs, rate=self.rate):
<ide> alpha = 1.6732632423543772848170429916717
<ide> scale = 1.0507009873554804934193349852946
<ide> alpha_p = -alpha * scale
<ide><path>keras/layers/regularization/dropout.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the Dropout layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
<ide> self.supports_masking = True
<ide>
<ide> def build(self, input_shape):
<del> self._random_generator._maybe_init() # pylint: disable=protected-access
<add> self._random_generator._maybe_init()
<ide>
<ide> def _get_noise_shape(self, inputs):
<ide> # Subclasses of `Dropout` may implement `_get_noise_shape(self,
<ide><path>keras/layers/regularization/gaussian_dropout.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the GaussianDropout layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide><path>keras/layers/regularization/gaussian_noise.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the GaussianNoise layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/regularization/spatial_dropout1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the SpatialDropout1D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/regularization/spatial_dropout2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the SpatialDropout2D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/regularization/spatial_dropout3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the SpatialDropout3D layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/cropping1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras cropping layer for 1D input."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/cropping2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras cropping layer for 2D input."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
<ide>
<ide> def compute_output_shape(self, input_shape):
<ide> input_shape = tf.TensorShape(input_shape).as_list()
<del> # pylint: disable=invalid-unary-operand-type
<add>
<ide> if self.data_format == "channels_first":
<ide> return tf.TensorShape(
<ide> [
<ide> def compute_output_shape(self, input_shape):
<ide> input_shape[3],
<ide> ]
<ide> )
<del> # pylint: enable=invalid-unary-operand-type
<ide>
<ide> def call(self, inputs):
<del> # pylint: disable=invalid-unary-operand-type
<add>
<ide> if self.data_format == "channels_first":
<ide> if (
<ide> inputs.shape[2] is not None
<ide> def call(self, inputs):
<ide> self.cropping[0][0] : -self.cropping[0][1],
<ide> self.cropping[1][0] : -self.cropping[1][1],
<ide> :,
<del> ] # pylint: disable=invalid-unary-operand-type
<del> # pylint: enable=invalid-unary-operand-type
<add> ]
<ide>
<ide> def get_config(self):
<ide> config = {"cropping": self.cropping, "data_format": self.data_format}
<ide><path>keras/layers/reshaping/cropping3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras cropping layer for 3D input."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def __init__(
<ide>
<ide> def compute_output_shape(self, input_shape):
<ide> input_shape = tf.TensorShape(input_shape).as_list()
<del> # pylint: disable=invalid-unary-operand-type
<add>
<ide> if self.data_format == "channels_first":
<ide> if input_shape[2] is not None:
<ide> dim1 = (
<ide> def compute_output_shape(self, input_shape):
<ide> return tf.TensorShape(
<ide> [input_shape[0], dim1, dim2, dim3, input_shape[4]]
<ide> )
<del> # pylint: enable=invalid-unary-operand-type
<ide>
<ide> def call(self, inputs):
<del> # pylint: disable=invalid-unary-operand-type
<add>
<ide> if self.data_format == "channels_first":
<ide> if (
<ide> self.cropping[0][1]
<ide> def call(self, inputs):
<ide> self.cropping[1][0] : -self.cropping[1][1],
<ide> self.cropping[2][0] : -self.cropping[2][1],
<ide> :,
<del> ] # pylint: disable=invalid-unary-operand-type
<del> # pylint: enable=invalid-unary-operand-type
<add> ]
<ide>
<ide> def get_config(self):
<ide> config = {"cropping": self.cropping, "data_format": self.data_format}
<ide><path>keras/layers/reshaping/flatten.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the flatten layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import functools
<ide> import operator
<ide><path>keras/layers/reshaping/permute.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the Permute layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import copy
<ide>
<ide><path>keras/layers/reshaping/repeat_vector.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the RepeatVector layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/reshape.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Contains the Reshape layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide><path>keras/layers/reshaping/up_sampling1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras upsampling layer for 1D inputs."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/up_sampling2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras upsampling layer for 2D inputs."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/up_sampling3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras upsampling layer for 3D inputs."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/zero_padding1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras zero-padding layer for 1D input."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/zero_padding2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras zero-padding layer for 2D input."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/reshaping/zero_padding3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras zero-padding layer for 3D input."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/rnn/abstract_rnn_cell.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Base class for RNN cells."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras.engine import base_layer
<ide> from keras.layers.rnn import rnn_utils
<ide><path>keras/layers/rnn/base_conv_lstm.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Base class for N-D convolutional LSTM layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/rnn/base_conv_rnn.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Base class for convolutional-recurrent layers."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import numpy as np
<ide> import tensorflow.compat.v2 as tf
<ide> def build(self, input_shape):
<ide> # Note input_shape will be list of shapes of initial states and
<ide> # constants if these are passed in __call__.
<ide> if self._num_constants is not None:
<del> constants_shape = input_shape[
<del> -self._num_constants :
<del> ] # pylint: disable=invalid-unary-operand-type
<add> constants_shape = input_shape[-self._num_constants :]
<ide> else:
<ide> constants_shape = None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> def step(inputs, states):
<del> constants = states[
<del> -self._num_constants :
<del> ] # pylint: disable=invalid-unary-operand-type
<del> states = states[
<del> : -self._num_constants
<del> ] # pylint: disable=invalid-unary-operand-type
<add> constants = states[-self._num_constants :]
<add> states = states[: -self._num_constants]
<ide> return self.cell.call(
<ide> inputs, states, constants=constants, **kwargs
<ide> )
<ide><path>keras/layers/rnn/base_cudnn_rnn.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Base class for recurrent layers backed by cuDNN."""
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def __init__(
<ide> ):
<ide> # We invoke the base layer's initializer directly here because we do not
<ide> # want to create RNN cell instance.
<del> super(RNN, self).__init__(**kwargs) # pylint: disable=bad-super-call
<add> super(RNN, self).__init__(**kwargs)
<ide> self.return_sequences = return_sequences
<ide> self.return_state = return_state
<ide> self.go_backwards = go_backwards
<ide> def get_config(self):
<ide> "stateful": self.stateful,
<ide> "time_major": self.time_major,
<ide> }
<del> base_config = super( # pylint: disable=bad-super-call
<del> RNN, self
<del> ).get_config()
<add> base_config = super(RNN, self).get_config()
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<ide> @classmethod
<ide> def non_trainable_weights(self):
<ide>
<ide> @property
<ide> def losses(self):
<del> return super(RNN, self).losses # pylint: disable=bad-super-call
<add> return super(RNN, self).losses
<ide>
<ide> def get_losses_for(self, inputs=None):
<del> return super( # pylint: disable=bad-super-call
<del> RNN, self
<del> ).get_losses_for(inputs=inputs)
<add> return super(RNN, self).get_losses_for(inputs=inputs)
<ide><path>keras/layers/rnn/base_rnn.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Base class for recurrent layers."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import collections
<ide>
<ide> def call(
<ide> )
<ide>
<ide> def step(inputs, states):
<del> constants = states[
<del> -self._num_constants :
<del> ] # pylint: disable=invalid-unary-operand-type
<del> states = states[
<del> : -self._num_constants
<del> ] # pylint: disable=invalid-unary-operand-type
<add> constants = states[-self._num_constants :]
<add> states = states[: -self._num_constants]
<ide>
<ide> states = (
<ide> states[0] if len(states) == 1 and is_tf_rnn_cell else states
<ide> def from_config(cls, config, custom_objects=None):
<ide> )
<ide> num_constants = config.pop("num_constants", 0)
<ide> layer = cls(cell, **config)
<del> layer._num_constants = num_constants # pylint: disable=protected-access
<add> layer._num_constants = num_constants
<ide> return layer
<ide>
<ide> @property
<ide><path>keras/layers/rnn/base_wrapper.py
<ide>
<ide> Wrappers are layers that augment the functionality of another layer.
<ide> """
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import copy
<ide>
<ide><path>keras/layers/rnn/bidirectional.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Bidirectional wrapper for RNNs."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import copy
<ide>
<ide> def force_zero_output_for_mask(layer):
<ide>
<ide> @property
<ide> def _use_input_spec_as_call_signature(self):
<del> return (
<del> self.layer._use_input_spec_as_call_signature
<del> ) # pylint: disable=protected-access
<add> return self.layer._use_input_spec_as_call_signature
<ide>
<ide> def _verify_layer_config(self):
<ide> """Ensure the forward and backward layers have valid common property."""
<ide> def from_config(cls, config, custom_objects=None):
<ide> config["backward_layer"] = backward_layer
<ide> # Instantiate the wrapper, adjust it and return it.
<ide> layer = cls(**config)
<del> layer._num_constants = num_constants # pylint: disable=protected-access
<add> layer._num_constants = num_constants
<ide> return layer
<ide><path>keras/layers/rnn/bidirectional_test.py
<ide> def test_Bidirectional_ragged_input(self, merge_mode):
<ide> )
<ide> x = tf.cast(x, "float32")
<ide>
<del> # pylint: disable=g-long-lambda
<ide> with self.cached_session():
<ide> if merge_mode == "ave":
<ide> merge_func = lambda y, y_rev: (y + y_rev) / 2
<ide> elif merge_mode == "concat":
<ide> merge_func = lambda y, y_rev: tf.concat((y, y_rev), axis=-1)
<ide> elif merge_mode == "mul":
<ide> merge_func = lambda y, y_rev: (y * y_rev)
<del> # pylint: enable=g-long-lambda
<ide>
<ide> inputs = keras.Input(
<ide> shape=(None, 3), batch_size=4, dtype="float32", ragged=True
<ide><path>keras/layers/rnn/cell_wrappers.py
<ide> def get_config(self):
<ide> "input_size": self._input_size,
<ide> "seed": self._seed,
<ide> }
<del> if (
<del> self._dropout_state_filter != _default_dropout_state_filter_visitor
<del> ): # pylint: disable=comparison-with-callable
<add> if self._dropout_state_filter != _default_dropout_state_filter_visitor:
<ide> (
<ide> function,
<ide> function_type,
<ide><path>keras/layers/rnn/conv_lstm1d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """1D Convolutional LSTM layer."""
<del># pylint: disable=g-classes-have-attributes,disable=g-direct-tensorflow-import
<add>
<ide>
<ide> from keras.layers.rnn.base_conv_lstm import ConvLSTM
<ide>
<ide><path>keras/layers/rnn/conv_lstm2d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """2D Convolutional LSTM layer."""
<del># pylint: disable=g-classes-have-attributes,disable=g-direct-tensorflow-import
<add>
<ide>
<ide> from keras.layers.rnn.base_conv_lstm import ConvLSTM
<ide>
<ide><path>keras/layers/rnn/conv_lstm3d.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """3D Convolutional LSTM layer."""
<del># pylint: disable=g-classes-have-attributes,disable=g-direct-tensorflow-import
<add>
<ide>
<ide> from keras.layers.rnn.base_conv_lstm import ConvLSTM
<ide>
<ide><path>keras/layers/rnn/cudnn_gru.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Fast GRU layer backed by cuDNN."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import collections
<ide>
<ide><path>keras/layers/rnn/cudnn_lstm.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Fast LSTM layer backed by cuDNN."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import collections
<ide>
<ide><path>keras/layers/rnn/gru.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Gated Recurrent Unit layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import uuid
<ide>
<ide><path>keras/layers/rnn/gru_lstm_utils.py
<ide> def __init__(self, time_major, go_backwards, layer_name):
<ide> }
<ide> if self.layer_name == "lstm":
<ide> from keras.layers.rnn import (
<del> lstm, # pylint: disable=g-import-not-at-top
<add> lstm,
<ide> )
<ide>
<ide> layer_func = lstm.lstm_with_backend_selection
<ide> else:
<ide> from keras.layers.rnn import (
<del> gru, # pylint: disable=g-import-not-at-top
<add> gru,
<ide> )
<ide>
<ide> layer_func = gru.gru_with_backend_selection
<ide><path>keras/layers/rnn/gru_v1.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Gated Recurrent Unit V1 layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import activations
<ide> from keras import constraints
<ide><path>keras/layers/rnn/legacy_cell_wrappers.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Module implementing the V1 version of RNN cell wrappers."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from __future__ import absolute_import
<ide> from __future__ import division
<ide> def get_config(self):
<ide> "input_size": self._input_size,
<ide> "seed": self._seed,
<ide> }
<del> if (
<del> self._dropout_state_filter != _default_dropout_state_filter_visitor
<del> ): # pylint: disable=comparison-with-callable
<add> if self._dropout_state_filter != _default_dropout_state_filter_visitor:
<ide> (
<ide> function,
<ide> function_type,
<ide> def get_config(self):
<ide>
<ide> def _default_dropout_state_filter_visitor(substate):
<ide> from keras.layers.rnn.legacy_cells import (
<del> LSTMStateTuple, # pylint: disable=g-import-not-at-top
<add> LSTMStateTuple,
<ide> )
<ide>
<ide> if isinstance(substate, LSTMStateTuple):
<ide><path>keras/layers/rnn/legacy_cells.py
<ide> Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
<ide> calling the `rnn` ops several times.
<ide> """
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from __future__ import absolute_import
<ide> from __future__ import division
<ide> def zero_state(self, batch_size, dtype):
<ide> return output
<ide>
<ide> # TODO(b/134773139): Remove when contrib RNN cells implement `get_config`
<del> def get_config(self): # pylint: disable=useless-super-delegation
<add> def get_config(self):
<ide> return super().get_config()
<ide>
<ide> @property
<ide> def call(self, inputs, state):
<ide> ) * self._activation(j)
<ide>
<ide> if self._cell_clip is not None:
<del> # pylint: disable=invalid-unary-operand-type
<add>
<ide> c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)
<del> # pylint: enable=invalid-unary-operand-type
<add>
<ide> if self._use_peepholes:
<ide> m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
<ide> else:
<ide> def call(self, inputs, state):
<ide> m = tf.matmul(m, self._proj_kernel)
<ide>
<ide> if self._proj_clip is not None:
<del> # pylint: disable=invalid-unary-operand-type
<add>
<ide> m = tf.clip_by_value(m, -self._proj_clip, self._proj_clip)
<del> # pylint: enable=invalid-unary-operand-type
<ide>
<ide> new_state = (
<ide> LSTMStateTuple(c, m)
<ide><path>keras/layers/rnn/lstm.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Long Short-Term Memory layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import uuid
<ide>
<ide><path>keras/layers/rnn/lstm_v1.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Long Short-Term Memory V1 layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> from keras import activations
<ide> from keras import constraints
<ide><path>keras/layers/rnn/rnn_utils.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Utilities for RNN cells and layers."""
<del># pylint: disable=protected-access
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/rnn/simple_rnn.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Fully connected RNN layer."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide><path>keras/layers/rnn/stacked_rnn_cells.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Wrapper allowing a stack of RNN cells to behave as a single cell."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import functools
<ide>
<ide><path>keras/layers/rnn/time_distributed.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Wrapper layer to apply every temporal slice of an input."""
<del># pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> def step(x, _):
<ide> mask=mask,
<ide> unroll=False,
<ide> )
<del> # pylint: disable=g-long-lambda
<add>
<ide> y = tf.nest.map_structure(
<ide> lambda output: backend.maybe_convert_to_ragged(
<ide> is_ragged_input, output, row_lengths
<ide> def step(x, _):
<ide>
<ide> # Shape: (num_samples, timesteps, ...)
<ide> output_shape = self.compute_output_shape(input_shape)
<del> # pylint: disable=g-long-lambda
<add>
<ide> output_shape = tf.nest.map_structure(
<ide> lambda tensor, int_shape: self._get_shape_tuple(
<ide> (-1, input_length), tensor, 1, int_shape[2:]
<ide><path>keras/layers/serialization.py
<ide> def populate_deserializable_objects():
<ide> ] = batch_normalization.BatchNormalization
<ide>
<ide> # Prevent circular dependencies.
<del> from keras import models # pylint: disable=g-import-not-at-top
<add> from keras import models
<ide> from keras.feature_column.sequence_feature_column import (
<del> SequenceFeatures, # pylint: disable=g-import-not-at-top
<add> SequenceFeatures,
<ide> )
<ide> from keras.premade_models.linear import (
<del> LinearModel, # pylint: disable=g-import-not-at-top
<add> LinearModel,
<ide> )
<ide> from keras.premade_models.wide_deep import (
<del> WideDeepModel, # pylint: disable=g-import-not-at-top
<add> WideDeepModel,
<ide> )
<ide>
<ide> LOCAL.ALL_OBJECTS["Input"] = input_layer.Input
<ide> def populate_deserializable_objects():
<ide>
<ide> if tf.__internal__.tf2.enabled():
<ide> from keras.feature_column.dense_features_v2 import (
<del> DenseFeatures, # pylint: disable=g-import-not-at-top
<add> DenseFeatures,
<ide> )
<ide>
<ide> LOCAL.ALL_OBJECTS["DenseFeatures"] = DenseFeatures
<ide> else:
<ide> from keras.feature_column.dense_features import (
<del> DenseFeatures, # pylint: disable=g-import-not-at-top
<add> DenseFeatures,
<ide> )
<ide>
<ide> LOCAL.ALL_OBJECTS["DenseFeatures"] = DenseFeatures
<ide><path>keras/legacy_tf_layers/__init__.py
<ide> """Init file."""
<ide>
<del>from keras.legacy_tf_layers import (
<del> migration_utils, # pylint: disable=unused-import
<del>)
<add>from keras.legacy_tf_layers import migration_utils
<ide><path>keras/legacy_tf_layers/base.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # =============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Contains the base Layer class, from which all layers inherit."""
<ide> from __future__ import absolute_import
<ide> from __future__ import division
<ide> def add_loss(self, losses, inputs=None):
<ide> new_losses, tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
<ide> )
<ide>
<del> def _name_scope(self): # pylint: disable=method-hidden
<add> def _name_scope(self):
<ide> """Determines op naming for the Layer."""
<ide> if self._keras_style:
<ide> return super()._name_scope()
<ide> def _should_add_regularizer(variable, existing_variable_set):
<ide> self._scope, reuse=reuse, auxiliary_name_scope=False
<ide> ) as scope:
<ide> self._current_scope = scope
<del> with backend.name_scope(
<del> self._name_scope()
<del> ): # pylint: disable=not-callable
<add> with backend.name_scope(self._name_scope()):
<ide> use_resource = (
<ide> use_resource
<ide> or self._use_resource_variables
<ide> def _should_add_regularizer(variable, existing_variable_set):
<ide> self._handle_weight_regularization(
<ide> name, variable, regularizer
<ide> )
<del> var_store = (
<del> vs._get_default_variable_store()
<del> ) # pylint: disable=protected-access
<add> var_store = vs._get_default_variable_store()
<ide> # When the shim to get variable scope working in TF2 is
<ide> # used, We need to explicitly make the shim track the
<ide> # regularization losses as the collections will not be
<ide> def __call__(self, inputs, *args, **kwargs):
<ide> # Some classes which inherit from Layer do not use its
<ide> # constructor, so rather than initializing to None we check for
<ide> # an AttributeError.
<del> scope_context_manager = (
<del> self._always_reuse_variable_scope
<del> ) # pylint: disable=access-member-before-definition
<add> scope_context_manager = self._always_reuse_variable_scope
<ide> except AttributeError:
<ide> scope_context_manager = None
<ide>
<ide> def __deepcopy__(self, memo):
<ide> def __setattr__(self, value, name):
<ide> # By-pass the automatic dependency tracking performed by the parent
<ide> # Layer.
<del> super(tf.__internal__.tracking.Trackable, self).__setattr__(
<del> value, name
<del> ) # pylint: disable=bad-super-call
<add> super(tf.__internal__.tracking.Trackable, self).__setattr__(value, name)
<ide>
<ide> @property
<ide> def _is_legacy_layer(self):
<ide><path>keras/legacy_tf_layers/convolutional.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # =============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Contains the convolutional layer classes and their functional aliases."""
<ide> from __future__ import absolute_import
<ide> from __future__ import division
<ide><path>keras/legacy_tf_layers/core.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # =============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Contains the core layers: Dense, Dropout.
<ide>
<ide> Also contains their functional aliases.
<ide><path>keras/legacy_tf_layers/core_test.py
<ide> def testFunctionalDenseInScope(self):
<ide> def testComputeOutputShape(self):
<ide> dense = core_layers.Dense(2, activation=tf.nn.relu, name="dense1")
<ide> ts = tf.TensorShape
<del> # pylint: disable=protected-access
<add>
<ide> with self.assertRaises(ValueError):
<ide> dense.compute_output_shape(ts(None))
<ide> with self.assertRaises(ValueError):
<ide> def testComputeOutputShape(self):
<ide> self.assertEqual(
<ide> [None, 4, 2], dense.compute_output_shape(ts([None, 4, 3])).as_list()
<ide> )
<del> # pylint: enable=protected-access
<ide>
<ide> @test_combinations.generate(
<ide> test_combinations.combine(mode=["graph", "eager"])
<ide> def testConstraints(self):
<ide>
<ide>
<ide> def _get_variable_dict_from_varstore():
<del> var_dict = (
<del> variable_scope._get_default_variable_store()._vars
<del> ) # pylint: disable=protected-access
<add> var_dict = variable_scope._get_default_variable_store()._vars
<ide> sorted_var_dict = collections.OrderedDict(
<ide> sorted(var_dict.items(), key=lambda t: t[0])
<ide> )
<ide><path>keras/legacy_tf_layers/normalization.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # =============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Contains the normalization layer classes and their functional aliases."""
<ide> from __future__ import absolute_import
<ide> from __future__ import division
<ide><path>keras/legacy_tf_layers/pooling.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # =============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Contains the pooling layer classes and their functional aliases."""
<ide> from __future__ import absolute_import
<ide> from __future__ import division
<ide><path>keras/legacy_tf_layers/variable_scope_shim.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # =============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Contains a shim to allow using TF1 get_variable code in TF2."""
<ide> from __future__ import absolute_import
<ide> from __future__ import division
<ide> def custom_getter(getter, name, *args, **kwargs):
<ide> # it to custom_getter.
<ide> # Note: the parameters of _true_getter, and their documentation, match
<ide> # *exactly* item-for-item with the docstring of this method.
<del> def _true_getter( # pylint: disable=missing-docstring
<add> def _true_getter(
<ide> name,
<ide> shape=None,
<ide> dtype=tf.float32,
<ide> initializer=None,
<ide> regularizer=None,
<ide> reuse=None,
<ide> trainable=None,
<del> collections=None, # pylint: disable=unused-argument
<add> collections=None,
<ide> caching_device=None,
<ide> partitioner=None,
<ide> validate_shape=True,
<del> use_resource=None, # pylint: disable=unused-argument
<add> use_resource=None,
<ide> constraint=None,
<ide> synchronization=tf.VariableSynchronization.AUTO,
<ide> aggregation=tf.compat.v1.VariableAggregation.NONE,
<ide> def _get_single_variable(
<ide> return found_var
<ide>
<ide> # The code below handles only the case of creating a new variable.
<del> if reuse is True: # pylint: disable=g-bool-id-comparison
<add> if reuse is True:
<ide> raise ValueError(
<ide> "Variable %s does not exist, or was not created with "
<ide> "tf.get_variable(). Did you mean to set "
<ide> def _method_wrapper(self, *args, **kwargs):
<ide> "does not extend Module, Layer, or Model.".format(self)
<ide> )
<ide> var_store = _EagerVariableStore()
<del> self._tf1_style_var_store = (
<del> var_store # pylint: disable=protected-access
<del> )
<add> self._tf1_style_var_store = var_store
<ide>
<del> existing_regularized_variables = set(
<del> var_store._regularizers.keys()
<del> ) # pylint: disable=protected-access
<add> existing_regularized_variables = set(var_store._regularizers.keys())
<ide> with var_store.scope():
<ide> out = method(self, *args, **kwargs)
<ide>
<ide> def _method_wrapper(self, *args, **kwargs):
<ide> for (
<ide> var_name,
<ide> regularizer,
<del> ) in (
<del> var_store._regularizers.items()
<del> ): # pylint: disable=protected-access
<add> ) in var_store._regularizers.items():
<ide> if var_name not in existing_regularized_variables:
<ide> self.add_loss(regularizer)
<ide>
<ide> def call(self, inputs):
<ide> Returns:
<ide> The created layer.
<ide> """
<del> store = vs._get_default_variable_store() # pylint: disable=protected-access
<add> store = vs._get_default_variable_store()
<ide> if not isinstance(store, _EagerVariableStore):
<ide> if not tf.compat.v1.executing_eagerly_outside_functions():
<ide> # tf1 case; just create and return layer
<ide><path>keras/legacy_tf_layers/variable_scope_shim_test.py
<ide> def get_compat_v1_regularization_losses(self):
<ide> return {
<ide> name: regularizer()
<ide> for name, regularizer in self._tf1_style_var_store._regularizers.items() # noqa: E501
<del> } # pylint: disable=protected-access
<add> }
<ide>
<ide>
<ide> @test_combinations.generate(test_combinations.combine(mode=["eager"]))
<ide> def get_compat_v1_regularization_losses(self):
<ide> return {
<ide> name: regularizer()
<ide> for name, regularizer in self._variable_store._regularizers.items() # noqa: E501
<del> } # pylint: disable=protected-access
<add> }
<ide>
<ide> def __call__(self, inputs, training=None):
<ide> with self._variable_store.scope():
<ide><path>keras/losses.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes
<add>
<ide> """Built-in loss functions."""
<ide>
<ide>
<ide> def get_config(self):
<ide> backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v
<ide> )
<ide>
<del> if saving_lib._ENABLED: # pylint: disable=protected-access
<add> if saving_lib._ENABLED:
<ide> config["fn"] = generic_utils.get_registered_name(self.fn)
<ide>
<ide> base_config = super().get_config()
<ide> def from_config(cls, config):
<ide> Returns:
<ide> A `keras.losses.Loss` instance.
<ide> """
<del> if saving_lib._ENABLED: # pylint: disable=protected-access
<add> if saving_lib._ENABLED:
<ide> fn_name = config.pop("fn", None)
<ide> if fn_name and cls is LossFunctionWrapper:
<ide> config["fn"] = get(fn_name)
<ide><path>keras/metrics/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """All Keras metrics."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> # Utilities
<ide> # Base classes
<ide><path>keras/metrics/base_metric.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes
<del># pylint: disable=g-doc-return-or-yield
<add>
<add>
<ide> """Base Metric classes."""
<ide>
<ide> import abc
<ide> def replica_local_fn(*args, **kwargs):
<ide> ):
<ide> update_op = None
<ide> else:
<del> update_op = self.update_state(
<del> *args, **kwargs
<del> ) # pylint: disable=not-callable
<add> update_op = self.update_state(*args, **kwargs)
<ide> update_ops = []
<ide> if update_op is not None:
<ide> update_ops.append(update_op)
<ide> with tf.control_dependencies(update_ops):
<del> result_t = self.result() # pylint: disable=not-callable
<add> result_t = self.result()
<ide>
<ide> # We are adding the metric object as metadata on the result
<ide> # tensor. This is required when we want to use a metric with
<ide> def replica_local_fn(*args, **kwargs):
<ide> # model = Model()
<ide> # mean = Mean()
<ide> # model.add_metric(mean(values), name='mean')
<del> result_t._metric_obj = self # pylint: disable=protected-access
<add> result_t._metric_obj = self
<ide> return result_t
<ide>
<ide> from keras.distribute import (
<del> distributed_training_utils, # pylint:disable=g-import-not-at-top
<add> distributed_training_utils,
<ide> )
<ide>
<ide> return distributed_training_utils.call_replica_local_fn(
<ide> def update_state(self, y_true, y_pred, sample_weight=None):
<ide> def get_config(self):
<ide> config = {}
<ide>
<del> if (
<del> type(self) is MeanMetricWrapper
<del> ): # pylint: disable=unidiomatic-typecheck
<add> if type(self) is MeanMetricWrapper:
<ide> # Only include function argument when the object is a
<ide> # MeanMetricWrapper and not a subclass.
<ide> config["fn"] = self._fn
<ide> def get_config(self):
<ide>
<ide> @classmethod
<ide> def from_config(cls, config):
<del> from keras.metrics import get # pylint: disable=g-import-not-at-top
<add> from keras.metrics import get
<ide>
<ide> # Note that while MeanMetricWrapper itself isn't public, objects of this
<ide> # class may be created and added to the model by calling model.compile.
<ide> def _build(self, shape):
<ide> )
<ide> with tf.init_scope():
<ide> if not tf.executing_eagerly():
<del> backend._initialize_variables(
<del> backend._get_session()
<del> ) # pylint: disable=protected-access
<add> backend._initialize_variables(backend._get_session())
<ide> self._built = True
<ide>
<ide> @property
<ide><path>keras/metrics/base_metric_test.py
<ide> def test_unweighted(self):
<ide> ]
<ide> )
<ide>
<del> update_op = btp_obj.update_state(
<del> y_true, y_pred
<del> ) # pylint: disable=assignment-from-no-return
<add> update_op = btp_obj.update_state(y_true, y_pred)
<ide> self.evaluate(update_op)
<ide> result = btp_obj.result()
<ide> self.assertEqual(7, self.evaluate(result))
<ide> def test_invalid_custom_metric_fn_error_msg(self):
<ide> y = layers.Dense(3)(x)
<ide> model = training_module.Model(x, y)
<ide>
<del> def bad_metric(
<del> y_true, y_pred, sample_weight=None
<del> ): # pylint: disable=unused-argument
<add> def bad_metric(y_true, y_pred, sample_weight=None):
<ide> return None
<ide>
<del> def dict_metric(
<del> y_true, y_pred, sample_weight=None
<del> ): # pylint: disable=unused-argument
<add> def dict_metric(y_true, y_pred, sample_weight=None):
<ide> return {"value": 0.0}
<ide>
<ide> with self.assertRaisesRegex(
<ide><path>keras/metrics/confusion_matrix_test.py
<ide> def test_invalid_summation_method(self):
<ide>
<ide> def test_extra_dims(self):
<ide> try:
<del> from scipy import special # pylint: disable=g-import-not-at-top
<add> from scipy import special
<ide>
<ide> self.setup()
<ide> logits = special.expit(
<ide><path>keras/metrics/metrics.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=g-classes-have-attributes
<del># pylint: disable=g-doc-return-or-yield
<add>
<add>
<ide> """Built-in metrics."""
<ide>
<ide> import abc
<ide> def _build(self, shape):
<ide> # AUC should be initialized outside of any tf.functions, and
<ide> # therefore in eager mode.
<ide> if not tf.executing_eagerly():
<del> backend._initialize_variables(
<del> backend._get_session()
<del> ) # pylint: disable=protected-access
<add> backend._initialize_variables(backend._get_session())
<ide>
<ide> self._built = True
<ide>
<ide><path>keras/mixed_precision/autocast_variable.py
<ide> def numpy_text(tensor, is_repr=False):
<ide> """Human readable representation of a tensor's numpy value."""
<ide> if tensor.dtype.is_numpy_compatible:
<del> # pylint: disable=protected-access
<add>
<ide> text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
<del> # pylint: enable=protected-access
<add>
<ide> else:
<ide> text = "<unprintable>"
<ide> if "\n" in text:
<ide> def _apply_assign_update(
<ide> # 'op' attribute is defined. This matches the behavior of
<ide> # tf.Variable.assign.
<ide> var = create_autocast_variable(self._variable)
<del> var._op = assign_op # pylint:disable=protected-access
<add> var._op = assign_op
<ide> return var
<ide> return assign_op
<ide>
<ide> def name(self):
<ide>
<ide> @property
<ide> def _shared_name(self):
<del> return self._variable._shared_name # pylint:disable=protected-access
<add> return self._variable._shared_name
<ide>
<ide> @property
<ide> def initializer(self):
<ide> def op(self):
<ide> return self._op
<ide>
<ide> def _as_graph_element(self):
<del> graph_element = (
<del> self._variable._as_graph_element()
<del> ) # pylint:disable=protected-access
<add> graph_element = self._variable._as_graph_element()
<ide> if graph_element is None:
<ide> return self._op
<ide> return graph_element
<ide> def _gather_saveables_for_checkpoint(self):
<ide> # AutoCastVariables are identical to checkpoints with normal variables.
<ide> # Therefore models checkpointed with AutoCastVariables can be restored
<ide> # on models with normal variables, and vice versa.
<del> return (
<del> self._variable._gather_saveables_for_checkpoint()
<del> ) # pylint:disable=protected-access
<add> return self._variable._gather_saveables_for_checkpoint()
<ide>
<ide> def _map_resources(self, save_options):
<ide> # By delegating this method to the wrapped variable, SavedModel with
<ide> # AutoCastVariables are identical to SavedModel with normal variables.
<del> obj_map, resource_map = self._variable._map_resources(
<del> save_options
<del> ) # pylint:disable=protected-access
<add> obj_map, resource_map = self._variable._map_resources(save_options)
<ide> obj_map[self] = obj_map[self._variable]
<ide> return obj_map, resource_map
<ide>
<ide> def from_proto(self, variable_def, import_scope=None):
<ide> # private attributes is hacky and difficult to maintain.
<ide> @property
<ide> def _handle_name(self):
<del> return self._variable._handle_name # pylint: disable=protected-access
<add> return self._variable._handle_name
<ide>
<ide> @_handle_name.setter
<ide> def _handle_name(self, handle_name):
<del> self._variable._handle_name = (
<del> handle_name # pylint: disable=protected-access
<del> )
<add> self._variable._handle_name = handle_name
<ide>
<ide> @property
<ide> def _initializer_op(self):
<del> return (
<del> self._variable._initializer_op
<del> ) # pylint: disable=protected-access
<add> return self._variable._initializer_op
<ide>
<ide> @_initializer_op.setter
<ide> def _initializer_op(self, initializer_op):
<del> self._variable._initializer_op = (
<del> initializer_op # pylint: disable=protected-access
<del> )
<add> self._variable._initializer_op = initializer_op
<ide>
<ide> # Operator overloads:
<ide> # Note we only overload operators that support floating-point types, as
<ide> def __rpow__(self, o):
<ide> return pow(o, self.read_value())
<ide>
<ide> def __neg__(self):
<del> return -self.read_value() # pylint: disable=invalid-unary-operand-type
<add> return -self.read_value()
<ide>
<ide> def __abs__(self):
<ide> return abs(self.read_value())
<ide> def __rmatmul__(self, o):
<ide> # https://docs.python.org/3/library/constants.html#NotImplemented
<ide> return NotImplemented
<ide>
<del> # pylint: enable=multiple-statements
<del>
<ide>
<ide> tf.register_tensor_conversion_function(
<ide> AutoCastVariable, AutoCastVariable._dense_var_to_tensor
<del>) # pylint:disable=protected-access
<add>)
<ide>
<ide>
<ide> def create_autocast_variable(variable):
<ide> class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):
<ide>
<ide> def __repr__(self):
<ide>
<del> # pylint: disable=missing-format-attribute
<ide> return (
<ide> "<AutoCastDistributedVariable dtype={v.dtype.name} "
<ide> "dtype_to_cast_to={v._cast_dtype.name} "
<ide> "inner_variable={v._variable}>"
<ide> ).format(v=self)
<del> # pylint: enable=missing-format-attribute
<ide>
<ide> return AutoCastDistributedVariable(variable)
<ide>
<ide>
<del>class enable_auto_cast_variables: # pylint:disable=invalid-name
<add>class enable_auto_cast_variables:
<ide> """Context manager which enables the autocasting of `AutoCastVariable`s.
<ide>
<ide> Under this context manager, `AutoCastVariable`s will be cast to `dtype` if
<ide><path>keras/mixed_precision/autocast_variable_test.py
<ide> def evaluate(var):
<ide> self.assertIsInstance(
<ide> var, autocast_variable.AutoCastVariable
<ide> )
<del> self.assertEqual(
<del> tf.identity(var).dtype, read_dtype
<del> ) # pylint: disable=cell-var-from-loop
<add> self.assertEqual(tf.identity(var).dtype, read_dtype)
<ide> return self.evaluate(var)
<ide>
<ide> x = get_var(7.0, tf.float32)
<ide> def test_op_attribute(self, distribution):
<ide> # AutoCastVariable.
<ide> if tf.executing_eagerly():
<ide> with self.assertRaises(AttributeError):
<del> x.op # pylint: disable=pointless-statement
<add> x.op
<ide> self.assertIsNone(x.assign(1.0).op)
<ide> self.assertIsNone(x.assign_add(1.0).op)
<ide> self.assertIsNone(x.assign_sub(1.0).op)
<ide><path>keras/mixed_precision/loss_scale_optimizer.py
<ide> def _add_weight(self, name, initial_value, dtype=None):
<ide> graph_key = None
<ide> else:
<ide> graph = tf.compat.v1.get_default_graph()
<del> graph_key = graph._graph_key # pylint: disable=protected-access
<add> graph_key = graph._graph_key
<ide>
<ide> key = (name, graph_key)
<ide> self._weights[key] = variable
<ide> def _trackable_children(self, save_type="checkpoint", **kwargs):
<ide> graph_key = None
<ide> else:
<ide> graph = tf.compat.v1.get_default_graph()
<del> graph_key = graph._graph_key # pylint: disable=protected-access
<add> graph_key = graph._graph_key
<ide> weights = {}
<ide> for (name, g), v in sorted(
<ide> self._weights.items(), key=lambda i: i[0][0]
<ide> def _lookup_dependency(self, name):
<ide> graph_key = None
<ide> else:
<ide> graph = tf.compat.v1.get_default_graph()
<del> graph_key = graph._graph_key # pylint: disable=protected-access
<add> graph_key = graph._graph_key
<ide> return self._weights.get((name, graph_key), None)
<ide>
<ide> @property
<ide> def __call__(cls, inner_optimizer, *args, **kwargs):
<ide>
<ide>
<ide> # TODO(b/215389169): Delete this class after `OptimizerV2` is deprecated.
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> @keras_export("keras.mixed_precision.LossScaleOptimizer")
<ide> class BaseLossScaleOptimizer(metaclass=LossScaleOptimizerMetaclass):
<ide> """An optimizer that applies loss scaling to prevent numeric underflow.
<ide> def get_unscaled_gradients(self, grads):
<ide> raise NotImplementedError
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> class LossScaleOptimizer(
<ide> tf.__internal__.tracking.DelegatingTrackableMixin,
<ide> optimizer_v2.OptimizerV2,
<ide> def get_gradients(self, loss, params):
<ide> return self.get_unscaled_gradients(grads)
<ide>
<ide> def _create_all_weights(self, var_list):
<del> self._optimizer._create_all_weights(
<del> var_list
<del> ) # pylint: disable=protected-access
<add> self._optimizer._create_all_weights(var_list)
<ide>
<ide> def apply_gradients(
<ide> self, grads_and_vars, name=None, experimental_aggregate_gradients=True
<ide> def apply_gradients(
<ide> grads_and_vars = self._optimizer._aggregate_gradients(
<ide> grads_and_vars
<ide> )
<del> # pylint: enable=protected-access
<ide>
<ide> grads_and_vars = tuple(grads_and_vars)
<ide> grads = [g for g, _ in grads_and_vars]
<ide> def from_config(cls, config, custom_objects=None):
<ide> loss_scale, tf.compat.v1.mixed_precision.FixedLossScale
<ide> ):
<ide> config["dynamic"] = False
<del> config[
<del> "initial_scale"
<del> ] = (
<del> loss_scale._loss_scale_value
<del> ) # pylint: disable=protected-access
<add> config["initial_scale"] = loss_scale._loss_scale_value
<ide> elif isinstance(
<ide> loss_scale, tf.compat.v1.mixed_precision.DynamicLossScale
<ide> ):
<ide> def clipvalue(self, val):
<ide> self._optimizer.clipvalue = val
<ide>
<ide> def _aggregate_gradients(self, grads_and_vars):
<del> return self._optimizer._aggregate_gradients(
<del> grads_and_vars
<del> ) # pylint: disable=protected-access
<add> return self._optimizer._aggregate_gradients(grads_and_vars)
<ide>
<ide> def _restore_slot_variable(self, slot_name, variable, slot_variable):
<ide> return self._optimizer._restore_slot_variable(
<ide> slot_name,
<del> variable, # pylint: disable=protected-access
<add> variable,
<ide> slot_variable,
<ide> )
<ide>
<ide> def _create_loss_scale_optimizer_from_v1_loss_scale(optimizer, loss_scale):
<ide> optimizer, dynamic=False, initial_scale=loss_scale
<ide> )
<ide> elif isinstance(loss_scale, tf.compat.v1.mixed_precision.FixedLossScale):
<del> ls_val = (
<del> loss_scale._loss_scale_value
<del> ) # pylint: disable=protected-access
<add> ls_val = loss_scale._loss_scale_value
<ide> return LossScaleOptimizer(
<ide> optimizer, dynamic=False, initial_scale=ls_val
<ide> )
<ide><path>keras/mixed_precision/loss_scale_optimizer_test.py
<ide> def testDynamicLossScaleDefaultValues(self, opt_cls):
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> self.assertEqual(self.evaluate(opt.loss_scale), 2**15)
<ide>
<del> # pylint: disable=cell-var-from-loop
<ide> @test_combinations.generate(opt_and_strategy_and_mode_combinations())
<ide> def testClipping(self, opt_cls, strategy_fn, use_tf_function):
<ide> strategy = strategy_fn()
<ide> def testClipping(self, opt_cls, strategy_fn, use_tf_function):
<ide> ) # Var does not change
<ide> self.assertEqual(self.evaluate(opt.loss_scale), 4)
<ide>
<del> # pylint: enable=cell-var-from-loop
<del>
<ide> @test_combinations.generate(opt_and_strategy_and_mode_combinations())
<ide> def testDynamicUpdate(self, opt_cls, strategy_fn, use_tf_function):
<ide> with strategy_fn().scope() as strategy:
<ide> def testHyperParametersExposed(self):
<ide> opt = adam.Adam(learning_rate=1.0, beta_1=0.5, beta_2=0.9)
<ide> lso = loss_scale_optimizer.LossScaleOptimizer(opt)
<ide> # Force hyperparameters to be created
<del> opt.lr # pylint: disable=pointless-statement
<add> opt.lr
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide>
<ide> self.assertEqual(self.evaluate(lso.beta_1), 0.5)
<ide> def testArbitraryAttributesNotExposed(self, opt_cls):
<ide> AttributeError,
<ide> "'LossScaleOptimizer(V3)?' object has no attribute 'nesterov'",
<ide> ):
<del> lso.nesterov # pylint: disable=pointless-statement
<add> lso.nesterov
<ide>
<ide> lso.nesterov = True
<ide> self.assertTrue(lso.nesterov)
<ide> def get_config(self):
<ide> opt = create_lso(opt)
<ide>
<ide> # Force hyperparameters to be created
<del> opt.learning_rate # pylint: disable=pointless-statement
<add> opt.learning_rate
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide>
<ide> self.assertEqual(self.evaluate(opt.learning_rate), 1.0)
<ide> def testGetConfigFixed(self, config_version):
<ide> opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
<ide>
<ide> # Force hyperparameters to be created
<del> opt.learning_rate # pylint: disable=pointless-statement
<add> opt.learning_rate
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide>
<ide> # Test attributes on the optimizer
<ide> def testGetConfigDynamic(self, config_version):
<ide> opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
<ide>
<ide> # Force hyperparameters to be created
<del> opt.learning_rate # pylint: disable=pointless-statement
<add> opt.learning_rate
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide>
<ide> # Test attributes on the optimizer
<ide> def testSerializationWithBuiltInOptimizer(self, lso_type):
<ide> config = optimizers.serialize(opt)
<ide> opt = optimizers.deserialize(config)
<ide> # Force hyperparameters to be created
<del> opt.learning_rate # pylint: disable=pointless-statement
<add> opt.learning_rate
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide>
<ide> self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
<ide> def __init__(self, *args, **kwargs):
<ide> custom_objects = {"MySGD": MySGD}
<ide> opt = optimizers.deserialize(config, custom_objects=custom_objects)
<ide> # Force hyperparameters to be created
<del> opt.learning_rate # pylint: disable=pointless-statement
<add> opt.learning_rate
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide>
<ide> self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
<ide><path>keras/mixed_precision/policy.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.mixed_precision.Policy", v1=[])
<ide> class Policy:
<ide> """A dtype policy for a Keras layer.
<ide> def _policy_equivalent_to_dtype(policy):
<ide> """
<ide> # We use type() instead of isinstance because a subclass of Policy is never
<ide> # equivalent to a dtype.
<del> return type(policy) == Policy and ( # pylint: disable=unidiomatic-typecheck
<add> return type(policy) == Policy and (
<ide> policy.name == "_infer" or _is_convertible_to_dtype(policy.name)
<ide> )
<ide>
<ide><path>keras/models/__init__.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Keras models API."""
<del># pylint: disable=g-bad-import-order
<add>
<ide>
<ide> from keras.engine.functional import Functional
<ide> from keras.engine.sequential import Sequential
<ide><path>keras/models/cloning.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<add>
<ide> """Code for model cloning, plus model-related API entries."""
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide> # API entries importable from `keras.models`:
<del>Model = training.Model # pylint: disable=invalid-name
<del>Sequential = sequential.Sequential # pylint: disable=invalid-name
<add>Model = training.Model
<add>Sequential = sequential.Sequential
<ide>
<ide>
<ide> # Callable used to clone a layer with weights preserved.
<ide> def _reset_build_compile_trackers(model):
<ide> model.inputs = None
<ide> model.outputs = None
<ide> # Reset compile state
<del> model._is_compiled = False # pylint:disable=protected-access
<add> model._is_compiled = False
<ide> if not tf.compat.v1.executing_eagerly_outside_functions():
<ide> model._v1_compile_was_called = False
<ide> model.optimizer = None
<ide> def clone_and_build_model(
<ide> )
<ide>
<ide> if compile_clone:
<del> compile_args = (
<del> model._get_compile_args()
<del> ) # pylint: disable=protected-access
<add> compile_args = model._get_compile_args()
<ide> # Allows this method to be robust to switching graph and eager classes.
<ide> model._get_compile_args = lambda: compile_args
<ide>
<ide><path>keras/models/sharpness_aware_minimization.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=g-classes-have-attributes
<del>
<ide>
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.models.experimental.SharpnessAwareMinimization", v1=[])
<ide><path>keras/optimizers/__init__.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=g-bad-import-order
<add>
<add>
<ide> """Built-in optimizer classes.
<ide>
<ide> For more examples see the base class `tf.keras.optimizers.Optimizer`.
<ide> def deserialize(config, custom_objects=None):
<ide> # loss_scale_optimizer has a direct dependency of optimizer, import here
<ide> # rather than top to avoid the cyclic dependency.
<ide> from keras.mixed_precision import (
<del> loss_scale_optimizer, # pylint: disable=g-import-not-at-top
<add> loss_scale_optimizer,
<ide> )
<ide>
<ide> all_classes = {
<ide><path>keras/optimizers/optimizer_experimental/adadelta.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.Adadelta", v1=[])
<ide> class Adadelta(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/adagrad.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.Adagrad", v1=[])
<ide> class Adagrad(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/adam.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.Adam", v1=[])
<ide> class Adam(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/adamax.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.Adamax", v1=[])
<ide> class Adamax(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/adamw.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.AdamW", v1=[])
<ide> class AdamW(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/ftrl.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.Ftrl", v1=[])
<ide> class Ftrl(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/nadam.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.Nadam", v1=[])
<ide> class Nadam(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/optimizer.py
<ide> def _var_key(self, variable):
<ide> # Get the distributed variable if it exists.
<ide> # TODO(b/199214315): replace _unique_id with ref() after fixing ref()
<ide> # issues on AggregatingVariable.
<del> return variable._unique_id # pylint: disable=protected-access
<add> return variable._unique_id
<ide>
<ide> @abc.abstractmethod
<ide> def update_step(self, gradient, variable):
<ide> def from_config(cls, config):
<ide> **kwargs: keyword arguments only used for backward compatibility."""
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.experimental.Optimizer", v1=[])
<ide> class Optimizer(_BaseOptimizer):
<ide> """Abstract optimizer base class.
<ide> def add_variable_from_reference(
<ide>
<ide> def _var_key(self, variable):
<ide> """Get a unique identifier of the given variable."""
<del> # pylint: disable=protected-access
<add>
<ide> # Get the distributed variable if it exists.
<ide> # TODO(b/197554203): replace _distributed_container() with a public api.
<ide> if hasattr(variable, "_distributed_container"):
<ide><path>keras/optimizers/optimizer_experimental/optimizer_pss_test.py
<ide>
<ide> adadelta_fn = tf.__internal__.test.combinations.NamedObject(
<ide> "adadelta",
<del> lambda: adadelta.Adadelta( # pylint: disable=g-long-lambda
<add> lambda: adadelta.Adadelta(
<ide> 0.002, use_ema=True, ema_overwrite_frequency=None
<ide> ),
<ide> )
<ide> )
<ide> sgd_fn = tf.__internal__.test.combinations.NamedObject(
<ide> "sgdaverage",
<del> lambda: sgd.SGD( # pylint: disable=g-long-lambda
<del> 0.002, use_ema=True, ema_overwrite_frequency=1
<del> ),
<add> lambda: sgd.SGD(0.002, use_ema=True, ema_overwrite_frequency=1),
<ide> )
<ide>
<ide> OPTIMIZER_FN = [
<ide><path>keras/optimizers/optimizer_experimental/optimizer_test.py
<ide>
<ide> adadelta_new_fn = tf.__internal__.test.combinations.NamedObject(
<ide> "experimentaladadelta",
<del> lambda: adadelta_new.Adadelta( # pylint: disable=g-long-lambda
<add> lambda: adadelta_new.Adadelta(
<ide> 0.002, use_ema=True, ema_overwrite_frequency=None
<ide> ),
<ide> )
<ide> )
<ide> sgd_new_fn = tf.__internal__.test.combinations.NamedObject(
<ide> "experimentalsgdaverage",
<del> lambda: sgd_new.SGD( # pylint: disable=g-long-lambda
<del> 0.002, use_ema=True, ema_overwrite_frequency=1
<del> ),
<add> lambda: sgd_new.SGD(0.002, use_ema=True, ema_overwrite_frequency=1),
<ide> )
<ide>
<ide> OPTIMIZER_FN = [
<ide><path>keras/optimizers/optimizer_experimental/rmsprop.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.RMSprop", v1=[])
<ide> class RMSprop(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_experimental/sgd.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @generic_utils.register_keras_serializable()
<ide> @keras_export("keras.optimizers.experimental.SGD", v1=[])
<ide> class SGD(optimizer.Optimizer):
<ide><path>keras/optimizers/optimizer_v1.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> """Legacy v1 optimizer classes.
<ide>
<ide> For more examples see the base class `tf.compat.v1.keras.optimizers.Optimizer`.
<ide> def get_config(self):
<ide> class TFOptimizer(Optimizer, tf.__internal__.tracking.Trackable):
<ide> """Wrapper class for native TensorFlow optimizers."""
<ide>
<del> def __init__(
<del> self, optimizer, iterations=None
<del> ): # pylint: disable=super-init-not-called
<add> def __init__(self, optimizer, iterations=None):
<ide> self.optimizer = optimizer
<ide> self._track_trackable(optimizer, name="optimizer")
<ide> if iterations is None:
<ide><path>keras/optimizers/optimizer_v2/adadelta.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide>
<del>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.Adadelta")
<ide> class Adadelta(optimizer_v2.OptimizerV2):
<ide> r"""Optimizer that implements the Adadelta algorithm.
<ide><path>keras/optimizers/optimizer_v2/adadelta_test.py
<ide> def doTestBasic(self, use_resource=False, use_callable_params=False):
<ide> learning_rate=lambda: lr,
<ide> rho=lambda: rho,
<ide> epsilon=epsilon,
<del> ) # pylint: disable=cell-var-from-loop
<add> )
<ide> else:
<ide> adadelta_opt = adadelta.Adadelta(
<ide> learning_rate=lr, rho=rho, epsilon=epsilon
<ide> def testMinimizeSparseResourceVariable(self):
<ide> def loss():
<ide> pred = tf.matmul(
<ide> tf.compat.v1.nn.embedding_lookup([var0], [0]), x
<del> ) # pylint: disable=cell-var-from-loop
<add> )
<ide> return pred * pred
<ide>
<ide> sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize(
<ide><path>keras/optimizers/optimizer_v2/adagrad.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide>
<del>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.Adagrad")
<ide> class Adagrad(optimizer_v2.OptimizerV2):
<ide> r"""Optimizer that implements the Adagrad algorithm.
<ide><path>keras/optimizers/optimizer_v2/adagrad_test.py
<ide> def testMinimizeSparseResourceVariable(self):
<ide> def loss():
<ide> pred = tf.matmul(
<ide> tf.compat.v1.nn.embedding_lookup([var0], [0]), x
<del> ) # pylint: disable=cell-var-from-loop
<add> )
<ide> return pred * pred
<ide>
<ide> sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0])
<ide> def testSparseRepeatedIndicesByEmbeddingLookUp(self):
<ide> with tf.Graph().as_default():
<ide> for dtype in _DATA_TYPES:
<ide> var_repeated = tf.Variable([1.0, 2.0], dtype=dtype)
<del> loss_repeated = (
<del> lambda: tf.reduce_sum( # pylint: disable=g-long-lambda
<del> tf.compat.v1.nn.embedding_lookup(var_repeated, [0, 0])
<del> )
<del> ) # pylint: disable=cell-var-from-loop
<add> loss_repeated = lambda: tf.reduce_sum(
<add> tf.compat.v1.nn.embedding_lookup(var_repeated, [0, 0])
<add> )
<ide> var_aggregated = tf.Variable([1.0, 2.0], dtype=dtype)
<del> loss_aggregated = (
<del> lambda: 2
<del> * tf.reduce_sum( # pylint: disable=g-long-lambda
<del> tf.compat.v1.nn.embedding_lookup(var_aggregated, [0])
<del> )
<del> ) # pylint: disable=cell-var-from-loop
<add> loss_aggregated = lambda: 2 * tf.reduce_sum(
<add> tf.compat.v1.nn.embedding_lookup(var_aggregated, [0])
<add> )
<ide> update_op_repeated = adagrad.Adagrad(2.0).minimize(
<ide> loss_repeated, var_list=[var_repeated]
<ide> )
<ide><path>keras/optimizers/optimizer_v2/adam.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.Adam")
<ide> class Adam(optimizer_v2.OptimizerV2):
<ide> r"""Optimizer that implements the Adam algorithm.
<ide><path>keras/optimizers/optimizer_v2/adam_test.py
<ide> def testSparseDevicePlacement(self):
<ide> # placed on it (i.e. they have GPU kernels).
<ide> var = tf.Variable([[1.0], [2.0]])
<ide> indices = tf.constant([0, 1], dtype=index_dtype)
<del> g_sum = lambda: tf.reduce_sum(
<del> tf.gather(var, indices)
<del> ) # pylint: disable=cell-var-from-loop
<add> g_sum = lambda: tf.reduce_sum(tf.gather(var, indices))
<ide> optimizer = adam.Adam(3.0)
<ide> minimize_op = optimizer.minimize(g_sum, var_list=[var])
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> def testSparseDevicePlacement(self):
<ide> # placed on it (i.e. they have GPU kernels).
<ide> var = tf.Variable([[1.0], [2.0]])
<ide> indices = tf.constant([0, 1], dtype=index_dtype)
<del> g_sum = lambda: tf.reduce_sum(
<del> tf.gather(var, indices)
<del> ) # pylint: disable=cell-var-from-loop
<add> g_sum = lambda: tf.reduce_sum(tf.gather(var, indices))
<ide> optimizer = adam.NonFusedAdam(3.0)
<ide> minimize_op = optimizer.minimize(g_sum, var_list=[var])
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide><path>keras/optimizers/optimizer_v2/adamax.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.Adamax")
<ide> class Adamax(optimizer_v2.OptimizerV2):
<ide> """Optimizer that implements the Adamax algorithm.
<ide><path>keras/optimizers/optimizer_v2/adamax_test.py
<ide> def testResourceSparse(self):
<ide> for dtype in [tf.half, tf.float32, tf.float64]:
<ide> with tf.Graph().as_default(), self.cached_session():
<ide> # Initialize variables for numpy implementation.
<del> zero_slots = lambda: np.zeros(
<del> (3), dtype=dtype.as_numpy_dtype
<del> ) # pylint: disable=cell-var-from-loop
<add> zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype)
<ide> m0, v0, m1, v1 = (
<ide> zero_slots(),
<ide> zero_slots(),
<ide> def testSparseDevicePlacement(self):
<ide> # placed on it (i.e. they have GPU kernels).
<ide> var = tf.Variable([[1.0], [2.0]])
<ide> indices = tf.constant([0, 1], dtype=index_dtype)
<del> g_sum = lambda: tf.reduce_sum(
<del> tf.gather(var, indices)
<del> ) # pylint: disable=cell-var-from-loop
<add> g_sum = lambda: tf.reduce_sum(tf.gather(var, indices))
<ide> optimizer = adamax.Adamax(3.0)
<ide> minimize_op = optimizer.minimize(g_sum, var_list=[var])
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide><path>keras/optimizers/optimizer_v2/ftrl.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Ftrl-proximal optimizer implementation."""
<del># pylint: disable=g-bad-import-order
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.Ftrl")
<ide> class Ftrl(optimizer_v2.OptimizerV2):
<ide> r"""Optimizer that implements the FTRL algorithm.
<ide><path>keras/optimizers/optimizer_v2/ftrl_test.py
<ide> def testMinimizeSparseResourceVariable(self):
<ide> def loss():
<ide> pred = tf.matmul(
<ide> tf.compat.v1.nn.embedding_lookup([var0], [0]), x
<del> ) # pylint: disable=cell-var-from-loop
<add> )
<ide> return pred * pred
<ide>
<ide> sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0])
<ide><path>keras/optimizers/optimizer_v2/gradient_descent.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """SGD optimizer implementation."""
<del># pylint: disable=g-bad-import-order
<del># pylint: disable=g-classes-have-attributes
<add>
<add>
<ide> import tensorflow.compat.v2 as tf
<ide>
<ide> from keras.optimizers.optimizer_v2 import optimizer_v2
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.SGD")
<ide> class SGD(optimizer_v2.OptimizerV2):
<ide> r"""Gradient descent (with momentum) optimizer.
<ide><path>keras/optimizers/optimizer_v2/gradient_descent_test.py
<ide> def testMinimizeResourceVariable(self):
<ide> var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
<ide> var1 = tf.Variable([3.0], dtype=dtype)
<ide> x = tf.constant([[4.0], [5.0]], dtype=dtype)
<del> loss = (
<del> lambda: tf.matmul(var0, x) + var1
<del> ) # pylint: disable=cell-var-from-loop
<add> loss = lambda: tf.matmul(var0, x) + var1
<ide> sgd = gradient_descent.SGD(1.0)
<ide> sgd_op = sgd.minimize(loss, [var0, var1])
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> def testMinimizeSparseResourceVariable(self):
<ide> def loss():
<ide> pred = tf.matmul(
<ide> tf.compat.v1.nn.embedding_lookup([var0], [0]), x
<del> ) # pylint: disable=cell-var-from-loop
<del> pred += var1 # pylint: disable=cell-var-from-loop
<add> )
<add> pred += var1
<ide> return pred * pred
<ide>
<ide> sgd_op = gradient_descent.SGD(1.0).minimize(loss, [var0, var1])
<ide> def testGradWrtRef(self):
<ide> opt = gradient_descent.SGD(3.0)
<ide> values = [1.0, 3.0]
<ide> vars_ = [tf.Variable([v], dtype=dtype) for v in values]
<del> loss = (
<del> lambda: vars_[0] + vars_[1]
<del> ) # pylint: disable=cell-var-from-loop
<add> loss = lambda: vars_[0] + vars_[1]
<ide> grads_and_vars = opt._compute_gradients(loss, vars_)
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> for grad, _ in grads_and_vars:
<ide> def testNesterovMomentum(self):
<ide> var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
<ide> accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
<ide> accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
<del> loss = (
<del> lambda: 5 * var0 * var0 + 3 * var1
<del> ) # pylint: disable=cell-var-from-loop
<add> loss = lambda: 5 * var0 * var0 + 3 * var1
<ide> mom_op = gradient_descent.SGD(
<ide> learning_rate=2.0, momentum=0.9, nesterov=True
<ide> )
<ide> def testMinimizeSparseResourceVariable(self):
<ide> for dtype in [tf.half, tf.float32, tf.float64]:
<ide> var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
<ide>
<del> # pylint: disable=cell-var-from-loop
<ide> def loss():
<ide> x = tf.constant([[4.0], [5.0]], dtype=dtype)
<ide> pred = tf.matmul(
<ide> tf.compat.v1.nn.embedding_lookup([var0], [0]), x
<ide> )
<ide> return pred * pred
<ide>
<del> # pylint: enable=cell-var-from-loop
<del>
<ide> opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9)
<ide> sgd_op = opt.minimize(loss, [var0])
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide><path>keras/optimizers/optimizer_v2/nadam.py
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.Nadam")
<ide> class Nadam(optimizer_v2.OptimizerV2):
<ide> r"""Optimizer that implements the NAdam algorithm.
<ide> def _prepare_local(self, var_device, var_dtype, apply_state):
<ide>
<ide> apply_state[(var_device, var_dtype)] = dict(
<ide> lr_t=lr_t,
<del> neg_lr_t=-lr_t, # pylint: disable=invalid-unary-operand-type
<add> neg_lr_t=-lr_t,
<ide> epsilon=tf.convert_to_tensor(self.epsilon, var_dtype),
<ide> beta_1_t=beta_1_t,
<ide> beta_2_t=beta_2_t,
<ide><path>keras/optimizers/optimizer_v2/optimizer_v2.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Version 2 of class Optimizer."""
<del># pylint: disable=g-bad-name
<ide>
<ide>
<ide> import abc
<ide> def apply_grad_to_update_var(var, grad):
<ide> # If the current context is graph mode or any of the update ops
<ide> # are symbolic then the step update should be carried out under
<ide> # a graph context. (eager updates execute immediately)
<del> with backend._current_graph(
<del> update_ops
<del> ).as_default(): # pylint: disable=protected-access
<add> with backend._current_graph(update_ops).as_default():
<ide> with tf.control_dependencies([tf.group(update_ops)]):
<ide> return self.iterations.assign_add(1, read_value=False)
<ide>
<ide> def _create_slots_for_sharded_variables(self, var_list):
<ide> sharded_vars = set()
<ide> for var in var_list:
<ide> if getattr(var, "_sharded_container", False):
<del> sharded_vars.add(
<del> var._sharded_container()
<del> ) # pylint: disable=protected-access
<add> sharded_vars.add(var._sharded_container())
<ide>
<ide> for sharded_var in sharded_vars:
<ide> sharded_key = _var_key(sharded_var)
<ide> def add_slot(self, var, slot_name, initializer="zeros", shape=None):
<ide> % (
<ide> var._shared_name,
<ide> slot_name,
<del> ), # pylint: disable=protected-access
<add> ),
<ide> dtype=var.dtype,
<ide> trainable=False,
<ide> initial_value=initial_value,
<ide> def _prepare(self, var_list):
<ide> keys = set()
<ide> for var in var_list:
<ide> if isinstance(var, tf.distribute.DistributedValues):
<del> var_devices = var._devices # pylint: disable=protected-access
<add> var_devices = var._devices
<ide> else:
<ide> var_devices = [var.device]
<ide> var_dtype = var.dtype.base_dtype
<ide> def _var_key(var):
<ide> the unique name of the variable.
<ide> """
<ide>
<del> # pylint: disable=protected-access
<ide> # Get the distributed variable if it exists.
<ide> if hasattr(var, "_distributed_container"):
<ide> var = var._distributed_container()
<ide><path>keras/optimizers/optimizer_v2/optimizer_v2_test.py
<ide> def testBasic(self):
<ide> with test_utils.use_gpu():
<ide> var0 = tf.Variable([1.0, 2.0], dtype=dtype)
<ide> var1 = tf.Variable([3.0, 4.0], dtype=dtype)
<del> loss = (
<del> lambda: 5 * var0 + 3 * var1
<del> ) # pylint: disable=cell-var-from-loop
<add> loss = lambda: 5 * var0 + 3 * var1
<ide> sgd = gradient_descent.SGD(3.0)
<ide>
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> def testAdaptiveLearningRate(self):
<ide> var1 = tf.Variable([3.0, 4.0], dtype=dtype)
<ide>
<ide> def loss():
<del> return (
<del> 5 * var0 + 3 * var1
<del> ) # pylint: disable=cell-var-from-loop
<add> return 5 * var0 + 3 * var1
<ide>
<ide> sgd = gradient_descent.SGD(1.0)
<ide>
<ide> def testPrecomputedGradient(self):
<ide> with test_utils.use_gpu():
<ide> var0 = tf.Variable([1.0, 2.0], dtype=dtype)
<ide> var1 = tf.Variable([3.0, 4.0], dtype=dtype)
<del> loss = (
<del> lambda: 5 * var0 + 3 * var1
<del> ) # pylint: disable=cell-var-from-loop
<add> loss = lambda: 5 * var0 + 3 * var1
<ide> grad_loss = tf.constant([42, -42], dtype=dtype)
<ide> sgd = gradient_descent.SGD(3.0)
<ide>
<ide> def testNoGradients(self):
<ide> with test_utils.use_gpu():
<ide> var0 = tf.Variable([1.0, 2.0], dtype=dtype)
<ide> var1 = tf.Variable([3.0, 4.0], dtype=dtype)
<del> loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop
<add> loss = lambda: 5 * var0
<ide> sgd_op = gradient_descent.SGD(3.0)
<ide> with self.assertRaisesRegex(ValueError, "No gradients"):
<ide> # var1 has no gradient
<ide> def testGradientsAsVariables(self):
<ide> with test_utils.use_gpu():
<ide> var0 = tf.Variable([1.0, 2.0], dtype=dtype)
<ide> var1 = tf.Variable([3.0, 4.0], dtype=dtype)
<del> loss = (
<del> lambda: 5 * var0 + 3 * var1
<del> ) # pylint: disable=cell-var-from-loop
<add> loss = lambda: 5 * var0 + 3 * var1
<ide>
<ide> sgd = gradient_descent.SGD(3.0)
<ide> grads_and_vars = sgd._compute_gradients(loss, [var0, var1])
<ide> def gradient_aggregator(grads_and_vars):
<ide> # Simulate an all-reduce where the other replica has zeros for
<ide> # gradients, by dividing each gradient by 2.
<ide> grads = [g for g, _ in grads_and_vars]
<del> vars = [
<del> v for _, v in grads_and_vars
<del> ] # pylint: disable=redefined-builtin
<add> vars = [v for _, v in grads_and_vars]
<ide> all_reduced_grads = [g / 2 for g in grads]
<ide> return list(zip(all_reduced_grads, vars))
<ide>
<ide> def _aggregate_gradients(self, grads_and_vars):
<ide> # Simulate an all-reduce where the other replica has zeros for
<ide> # gradients, by dividing each gradient by 2.
<ide> grads = [g for g, _ in grads_and_vars]
<del> vars = [
<del> v for _, v in grads_and_vars
<del> ] # pylint: disable=redefined-builtin
<add> vars = [v for _, v in grads_and_vars]
<ide> all_reduced_grads = [g / 2 for g in grads]
<ide> return list(zip(all_reduced_grads, vars))
<ide>
<ide> def test_subclass_compat(self, optimizer_class, init_kwargs=None):
<ide> """Ensure that subclassed optimizers without apply_state still work."""
<ide>
<ide> class SubclassedOptimizer(optimizer_class):
<del> def _resource_apply_dense(
<del> self, grad, var
<del> ): # pylint: disable=useless-super-delegation
<add> def _resource_apply_dense(self, grad, var):
<ide> return super()._resource_apply_dense(grad, var)
<ide>
<del> def _resource_apply_sparse(
<del> self, grad, var, indices
<del> ): # pylint: disable=useless-super-delegation
<add> def _resource_apply_sparse(self, grad, var, indices):
<ide> return super()._resource_apply_sparse(grad, var, indices)
<ide>
<ide> init_kwargs = init_kwargs or {}
<ide><path>keras/optimizers/optimizer_v2/rmsprop.py
<ide> # isort: off
<ide> from tensorflow.python.util.tf_export import keras_export
<ide>
<del># pylint: disable=g-classes-have-attributes
<ide>
<del>
<del># pylint: disable=g-classes-have-attributes
<ide> @keras_export("keras.optimizers.RMSprop")
<ide> class RMSprop(optimizer_v2.OptimizerV2):
<ide> r"""Optimizer that implements the RMSprop algorithm.
<ide><path>keras/optimizers/optimizer_v2/rmsprop_test.py
<ide> def testMinimizeSparseResourceVariable(self):
<ide> def loss():
<ide> pred = tf.matmul(
<ide> tf.compat.v1.nn.embedding_lookup([var0], [0]), x
<del> ) # pylint: disable=cell-var-from-loop
<add> )
<ide> return pred * pred
<ide>
<ide> sgd_op = rmsprop.RMSprop(
<ide> def testMinimizeSparseResourceVariableCentered(self):
<ide> def loss():
<ide> pred = tf.matmul(
<ide> tf.compat.v1.nn.embedding_lookup([var0], [0]), x
<del> ) # pylint: disable=cell-var-from-loop
<add> )
<ide> return pred * pred
<ide>
<del> # loss = lambda: pred * pred # pylint:
<add> # loss = lambda: pred * pred
<ide> # disable=cell-var-from-loop
<ide> sgd_op = rmsprop.RMSprop(
<ide> learning_rate=1.0,
<ide><path>keras/premade_models/linear.py
<ide> def call(self, inputs):
<ide> if self.use_bias:
<ide> result = tf.nn.bias_add(result, self.bias)
<ide> if self.activation is not None:
<del> return self.activation(result) # pylint: disable=not-callable
<add> return self.activation(result)
<ide> return result
<ide>
<ide> def get_config(self):
<ide><path>keras/premade_models/wide_deep.py
<ide> def call(self, inputs, training=None):
<ide> else:
<ide> linear_inputs, dnn_inputs = inputs
<ide> linear_output = self.linear_model(linear_inputs)
<del> # pylint: disable=protected-access
<add>
<ide> if self.dnn_model._expects_training_arg:
<ide> if training is None:
<ide> training = backend.learning_phase()
<ide> def _make_train_function(self):
<ide> metrics_tensors = [
<ide> m._call_result
<ide> for m in metrics
<del> if hasattr(
<del> m, "_call_result"
<del> ) # pylint: disable=protected-access
<add> if hasattr(m, "_call_result")
<ide> ]
<ide>
<ide> with backend.name_scope("training"):
<ide><path>keras/preprocessing/image.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=invalid-name
<del># pylint: disable=g-import-not-at-top
<del># pylint: disable=g-classes-have-attributes
<add>
<ide>
<ide> """Utilies for image preprocessing and augmentation.
<ide>
<ide> def set_processing_attrs(
<ide> self.save_format = save_format
<ide> self.interpolation = interpolation
<ide> if subset is not None:
<del> validation_split = (
<del> self.image_data_generator._validation_split
<del> ) # pylint: disable=protected-access
<add> validation_split = self.image_data_generator._validation_split
<ide> if subset == "validation":
<ide> split = (0, validation_split)
<ide> elif subset == "training":
<ide><path>keras/preprocessing/image_test.py
<ide> from keras.utils import image_utils
<ide>
<ide> try:
<del> import PIL # pylint:disable=g-import-not-at-top
<add> import PIL
<ide> except ImportError:
<ide> PIL = None
<ide>
<ide><path>keras/preprocessing/sequence.py
<ide> with sequences. See the [tf.data guide](https://www.tensorflow.org/guide/data)
<ide> for more details.
<ide> """
<del># pylint: disable=invalid-name
<del># pylint: disable=g-classes-have-attributes
<ide>
<ide>
<ide> import json
<ide><path>keras/preprocessing/sequence_test.py
<ide> def test_TimeSeriesGenerator_doesnt_miss_any_sample(self):
<ide>
<ide> self.assertEqual(expected, actual)
<ide>
<del> if len(g) > 0: # pylint: disable=g-explicit-length-test
<add> if len(g) > 0:
<ide> # All elements in range(length, 10) should be used as current
<ide> # step
<ide> expected = np.arange(length, 10).reshape(-1, 1)
<ide><path>keras/preprocessing/text.py
<ide> and [preprocessing layer guide]
<ide> (https://www.tensorflow.org/guide/keras/preprocessing_layers).
<ide> """
<del># pylint: disable=invalid-name
<del># pylint: disable=g-classes-have-attributes
<ide>
<ide>
<ide> import collections
<ide><path>keras/regularizers.py
<ide> # limitations under the License.
<ide> # ==============================================================================
<ide> """Built-in regularizers."""
<del># pylint: disable=g-classes-have-attributes
<del># pylint: disable=invalid-name
<add>
<ide>
<ide> import math
<ide>
<ide> class Regularizer:
<ide>
<ide> >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
<ide> ... class L2Regularizer(tf.keras.regularizers.Regularizer):
<del> ... def __init__(self, l2=0.): # pylint: disable=redefined-outer-name
<add> ... def __init__(self, l2=0.):
<ide> ... self.l2 = l2
<ide> ...
<ide> ... def __call__(self, x):
<ide> class L1L2(Regularizer):
<ide> l2: Float; L2 regularization factor.
<ide> """
<ide>
<del> def __init__(self, l1=0.0, l2=0.0): # pylint: disable=redefined-outer-name
<add> def __init__(self, l1=0.0, l2=0.0):
<ide> # The default value for l1 and l2 are different from the value in l1_l2
<ide> # for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
<ide> # and no l1 penalty.
<ide> class L1(Regularizer):
<ide> l1: Float; L1 regularization factor.
<ide> """
<ide>
<del> def __init__(
<del> self, l1=0.01, **kwargs
<del> ): # pylint: disable=redefined-outer-name
<add> def __init__(self, l1=0.01, **kwargs):
<ide> l1 = kwargs.pop("l", l1) # Backwards compatibility
<ide> if kwargs:
<ide> raise TypeError(f"Argument(s) not recognized: {kwargs}")
<ide> class L2(Regularizer):
<ide> l2: Float; L2 regularization factor.
<ide> """
<ide>
<del> def __init__(
<del> self, l2=0.01, **kwargs
<del> ): # pylint: disable=redefined-outer-name
<add> def __init__(self, l2=0.01, **kwargs):
<ide> l2 = kwargs.pop("l", l2) # Backwards compatibility
<ide> if kwargs:
<ide> raise TypeError(f"Argument(s) not recognized: {kwargs}")
<ide> def get_config(self):
<ide>
<ide>
<ide> @keras_export("keras.regularizers.l1_l2")
<del>def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name
<add>def l1_l2(l1=0.01, l2=0.01):
<ide> r"""Create a regularizer that applies both L1 and L2 penalties.
<ide>
<ide> The L1 regularization penalty is computed as:
<ide><path>keras/saving/experimental/saving_lib_test.py
<ide> def test_saving_after_compile_but_before_fit(self):
<ide> @keras.utils.generic_utils.register_keras_serializable(
<ide> package="my_custom_package"
<ide> )
<del> def my_mean_squared_error(
<del> y_true, y_pred
<del> ): # pylint: disable=redefined-outer-name
<add> def my_mean_squared_error(y_true, y_pred):
<ide> """Function-local `mean_squared_error`."""
<ide> return backend.mean(
<ide> tf.math.squared_difference(y_pred, y_true), axis=-1
<ide><path>keras/saving/hdf5_format.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<add>
<ide> """Functions for saving and loading a Keras Model from HDF5 format."""
<ide>
<ide> import json
<ide>
<ide> # TODO(b/134426265): Switch back to single-quotes to match the rest of the file
<ide> # once the issue with copybara is fixed.
<del># pylint:disable=g-inconsistent-quotes
<add>
<ide> sequential_lib = LazyLoader(
<ide> "sequential_lib", globals(), "keras.engine.sequential"
<ide> )
<del># pylint:enable=g-inconsistent-quotes
<ide>
<ide>
<ide> def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):
<ide> def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):
<ide> f.close()
<ide>
<ide>
<del>def load_model_from_hdf5(
<del> filepath, custom_objects=None, compile=True
<del>): # pylint: disable=redefined-builtin
<add>def load_model_from_hdf5(filepath, custom_objects=None, compile=True):
<ide> """Loads a model saved via `save_model_to_hdf5`.
<ide>
<ide> Args:
<ide><path>keras/saving/losses_serialization_test.py
<ide> from keras.utils import losses_utils
<ide>
<ide> try:
<del> import h5py # pylint:disable=g-import-not-at-top
<add> import h5py
<ide> except ImportError:
<ide> h5py = None
<ide>
<ide><path>keras/saving/metrics_serialization_test.py
<ide> from keras.utils import generic_utils
<ide>
<ide> try:
<del> import h5py # pylint:disable=g-import-not-at-top
<add> import h5py
<ide> except ImportError:
<ide> h5py = None
<ide>
<ide><path>keras/saving/model_config.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide> # ==============================================================================
<del># pylint: disable=protected-access
<add>
<ide> """Functions that save the model's config into different formats."""
<ide>
<ide> # isort: off
<ide> def model_from_config(config, custom_objects=None):
<ide> f"Received: config={config}. Did you meant to use "
<ide> "`Sequential.from_config(config)`?"
<ide> )
<del> from keras.layers import deserialize # pylint: disable=g-import-not-at-top
<add> from keras.layers import deserialize
<ide>
<ide> return deserialize(config, custom_objects=custom_objects)
<ide>
<ide> def model_from_json(json_string, custom_objects=None):
<ide> A Keras model instance (uncompiled).
<ide> """
<ide> from keras.layers import (
<del> deserialize_from_json, # pylint: disable=g-import-not-at-top
<add> deserialize_from_json,
<ide> )
<ide>
<ide> return deserialize_from_json(json_string, custom_objects=custom_objects) | 300 |
Mixed | Ruby | link official taps automatically | e7b369273a9b23f0674491fe33a8d3aaf197c292 | <ide><path>Library/Homebrew/cmd/update-report.rb
<ide> require "cleanup"
<ide> require "description_cache_store"
<ide> require "cli/parser"
<del>require "completions"
<ide>
<ide> module Homebrew
<ide> extend T::Sig
<ide> def update_report
<ide> puts "Already up-to-date." unless args.quiet?
<ide> end
<ide>
<del> if Completions.read_completions_option.empty?
<del> ohai "Homebrew completions are unlinked by default!"
<del> puts <<~EOS
<del> To opt-in to automatically linking Homebrew shell competion files, run:
<del> brew completions link
<del> Then, follow the directions at #{Formatter.url("https://docs.brew.sh/Shell-Completion")}
<del> EOS
<del> end
<del>
<ide> Commands.rebuild_commands_completion_list
<ide> link_completions_manpages_and_docs
<ide> Tap.each(&:link_completions_and_manpages)
<ide> def install_core_tap_if_necessary
<ide>
<ide> def link_completions_manpages_and_docs(repository = HOMEBREW_REPOSITORY)
<ide> command = "brew update"
<del>
<del> Completions.link_if_allowed! command: command
<add> Utils::Link.link_completions(repository, command)
<ide> Utils::Link.link_manpages(repository, command)
<ide> Utils::Link.link_docs(repository, command)
<ide> rescue => e
<ide><path>Library/Homebrew/completions.rb
<ide> module Completions
<ide>
<ide> module_function
<ide>
<del> sig { params(command: String).void }
<del> def link_if_allowed!(command: "brew completions link")
<del> if link_completions?
<del> link! command: command
<del> else
<del> unlink!
<del> end
<del> end
<del>
<del> sig { params(command: String).void }
<del> def link!(command: "brew completions link")
<add> sig { void }
<add> def link!
<ide> write_completions_option "yes"
<del> Utils::Link.link_completions HOMEBREW_REPOSITORY, command
<add> Tap.each do |tap|
<add> Utils::Link.link_completions tap.path, "brew completions link"
<add> end
<ide> end
<ide>
<ide> sig { void }
<ide> def unlink!
<ide> write_completions_option "no"
<del> Utils::Link.unlink_completions HOMEBREW_REPOSITORY
<add> Tap.each do |tap|
<add> next if tap.official?
<add>
<add> Utils::Link.unlink_completions tap.path
<add> end
<ide> end
<ide>
<ide> sig { returns(T::Boolean) }
<ide> def link_completions?
<ide> read_completions_option == "yes"
<ide> end
<ide>
<del> sig { returns(String) }
<del> def read_completions_option
<add> sig { returns(T::Boolean) }
<add> def completions_to_link?
<add> shells = %w[bash fish zsh]
<add> Tap.each do |tap|
<add> next if tap.official?
<add>
<add> shells.each do |shell|
<add> return true if (tap.path/"completions/#{shell}").exist?
<add> end
<add> end
<add>
<add> false
<add> end
<add>
<add> sig { params(option: String).returns(String) }
<add> def read_completions_option(option: "linkcompletions")
<ide> HOMEBREW_REPOSITORY.cd do
<del> Utils.popen_read("git", "config", "--get", "homebrew.linkcompletions").chomp
<add> Utils.popen_read("git", "config", "--get", "homebrew.#{option}").chomp
<ide> end
<ide> end
<ide>
<del> sig { params(state: String).void }
<del> def write_completions_option(state)
<add> sig { params(state: String, option: String).void }
<add> def write_completions_option(state, option: "linkcompletions")
<ide> HOMEBREW_REPOSITORY.cd do
<del> T.unsafe(self).safe_system "git", "config", "--replace-all", "homebrew.linkcompletions", state.to_s
<add> T.unsafe(self).safe_system "git", "config", "--replace-all", "homebrew.#{option}", state.to_s
<ide> end
<ide> end
<add>
<add> sig { void }
<add> def show_completions_message_if_needed
<add> return if read_completions_option(option: "completionsmessageshown") == "yes"
<add> return unless completions_to_link?
<add>
<add> T.unsafe(self).ohai "Homebrew completions for external commands are unlinked by default!"
<add> T.unsafe(self).puts <<~EOS
<add> To opt-in to automatically linking Homebrew shell competion files, run:
<add> brew completions link
<add> Then, follow the directions at #{Formatter.url("https://docs.brew.sh/Shell-Completion")}
<add> EOS
<add>
<add> write_completions_option("yes", option: "completionsmessageshown")
<add> end
<ide> end
<ide><path>Library/Homebrew/tap.rb
<ide> # frozen_string_literal: true
<ide>
<ide> require "commands"
<add>require "completions"
<ide> require "extend/cachable"
<ide> require "description_cache_store"
<ide>
<ide> def install(full_clone: true, quiet: false, clone_target: nil, force_auto_update
<ide> def link_completions_and_manpages
<ide> command = "brew tap --repair"
<ide> Utils::Link.link_manpages(path, command)
<del> Utils::Link.link_completions(path, command)
<add>
<add> Completions.show_completions_message_if_needed
<add> if official? || Completions.link_completions?
<add> Utils::Link.link_completions(path, command)
<add> else
<add> Utils::Link.unlink_completions(path)
<add> end
<ide> end
<ide>
<ide> # Uninstall this {Tap}.
<ide><path>Library/Homebrew/test/cmd/completions_spec.rb
<ide> .to output(/Completions are linked/).to_stdout
<ide> .and not_to_output.to_stderr
<ide> .and be_a_success
<del>
<del> brew "completions", "unlink"
<del> expect { brew "completions" }
<del> .to output(/Completions are not linked/).to_stdout
<del> .and not_to_output.to_stderr
<del> .and be_a_success
<ide> end
<ide> end
<ide><path>Library/Homebrew/test/tap_spec.rb
<ide> def setup_completion(link:)
<ide> HOMEBREW_REPOSITORY.cd do
<ide> system "git", "init"
<ide> system "git", "config", "--replace-all", "homebrew.linkcompletions", link
<add> system "git", "config", "--replace-all", "homebrew.completionsmessageshown", "yes"
<ide> end
<ide> end
<ide>
<ide> def setup_completion(link:)
<ide> (HOMEBREW_PREFIX/"share").rmtree if (HOMEBREW_PREFIX/"share").exist?
<ide> end
<ide>
<del> specify "#link_completions_and_manpages when completions are enabled" do
<add> specify "#link_completions_and_manpages when completions are enabled for non-official tap" do
<ide> setup_tap_files
<ide> setup_git_repo
<ide> setup_completion link: "yes"
<del> tap = described_class.new("Homebrew", "baz")
<add> tap = described_class.new("NotHomebrew", "baz")
<ide> tap.install clone_target: subject.path/".git"
<ide> (HOMEBREW_PREFIX/"share/man/man1/brew-tap-cmd.1").delete
<ide> (HOMEBREW_PREFIX/"etc/bash_completion.d/brew-tap-cmd").delete
<ide> def setup_completion(link:)
<ide> (HOMEBREW_PREFIX/"share").rmtree if (HOMEBREW_PREFIX/"share").exist?
<ide> end
<ide>
<del> specify "#link_completions_and_manpages when completions are disabled" do
<add> specify "#link_completions_and_manpages when completions are disabled for non-official tap" do
<ide> setup_tap_files
<ide> setup_git_repo
<ide> setup_completion link: "no"
<del> tap = described_class.new("Homebrew", "baz")
<add> tap = described_class.new("NotHomebrew", "baz")
<ide> tap.install clone_target: subject.path/".git"
<ide> (HOMEBREW_PREFIX/"share/man/man1/brew-tap-cmd.1").delete
<ide> tap.link_completions_and_manpages
<ide> def setup_completion(link:)
<ide> (HOMEBREW_PREFIX/"share").rmtree if (HOMEBREW_PREFIX/"share").exist?
<ide> end
<ide>
<add> specify "#link_completions_and_manpages when completions are enabled for official tap" do
<add> setup_tap_files
<add> setup_git_repo
<add> setup_completion link: "no"
<add> tap = described_class.new("Homebrew", "baz")
<add> tap.install clone_target: subject.path/".git"
<add> (HOMEBREW_PREFIX/"share/man/man1/brew-tap-cmd.1").delete
<add> (HOMEBREW_PREFIX/"etc/bash_completion.d/brew-tap-cmd").delete
<add> (HOMEBREW_PREFIX/"share/zsh/site-functions/_brew-tap-cmd").delete
<add> (HOMEBREW_PREFIX/"share/fish/vendor_completions.d/brew-tap-cmd.fish").delete
<add> tap.link_completions_and_manpages
<add> expect(HOMEBREW_PREFIX/"share/man/man1/brew-tap-cmd.1").to be_a_file
<add> expect(HOMEBREW_PREFIX/"etc/bash_completion.d/brew-tap-cmd").to be_a_file
<add> expect(HOMEBREW_PREFIX/"share/zsh/site-functions/_brew-tap-cmd").to be_a_file
<add> expect(HOMEBREW_PREFIX/"share/fish/vendor_completions.d/brew-tap-cmd.fish").to be_a_file
<add> tap.uninstall
<add> ensure
<add> (HOMEBREW_PREFIX/"etc").rmtree if (HOMEBREW_PREFIX/"etc").exist?
<add> (HOMEBREW_PREFIX/"share").rmtree if (HOMEBREW_PREFIX/"share").exist?
<add> end
<add>
<ide> specify "#config" do
<ide> setup_git_repo
<ide>
<ide><path>Library/Homebrew/utils/link.rb
<ide> # typed: true
<ide> # frozen_string_literal: true
<ide>
<del>require "completions"
<del>
<ide> module Utils
<ide> # Helper functions for creating symlinks.
<ide> #
<ide> def unlink_manpages(path)
<ide> end
<ide>
<ide> def link_completions(path, command)
<del> unless Completions.link_completions?
<del> unlink_completions path
<del> return
<del> end
<del>
<ide> link_src_dst_dirs(path/"completions/bash", HOMEBREW_PREFIX/"etc/bash_completion.d", command)
<ide> link_src_dst_dirs(path/"completions/zsh", HOMEBREW_PREFIX/"share/zsh/site-functions", command)
<ide> link_src_dst_dirs(path/"completions/fish", HOMEBREW_PREFIX/"share/fish/vendor_completions.d", command)
<ide><path>docs/Shell-Completion.md
<ide> Homebrew comes with completion definitions for the `brew` command. Some packages
<ide>
<ide> `zsh`, `bash` and `fish` are currently supported.
<ide>
<del>Shell completions for built-in Homebrew commands are not automatically installed. To opt-in to using our completions, they need to be linked to `HOMEBREW_PREFIX` by running `brew completions link`.
<del>
<ide> You must then configure your shell to enable its completion support. This is because the Homebrew-managed completions are stored under `HOMEBREW_PREFIX` which your system shell may not be aware of, and since it is difficult to automatically configure `bash` and `zsh` completions in a robust manner, the Homebrew installer does not do it for you.
<ide>
<add>Shell completions for external Homebrew commands are not automatically installed. To opt-in to using completions for external commands (if provided), they need to be linked to `HOMEBREW_PREFIX` by running `brew completions link`.
<add>
<ide> ## Configuring Completions in `bash`
<ide>
<ide> To make Homebrew's completions available in `bash`, you must source the definitions as part of your shell's startup. Add the following to your `~/.bash_profile` (or, if it doesn't exist, `~/.profile`): | 7 |
Javascript | Javascript | add modulegraph argument to comparators | 4dfe88edb02987d2c928983cc6d7f18ca3879985 | <ide><path>lib/Chunk.js
<ide> const { compareModulesById } = require("./util/comparators");
<ide> /** @typedef {import("webpack-sources").Source} Source */
<ide> /** @typedef {import("./ChunkGraph")} ChunkGraph */
<ide> /** @typedef {import("./ChunkGroup")} ChunkGroup */
<add>/** @typedef {import("./Compilation")} Compilation */
<ide> /** @typedef {import("./Module")} Module */
<add>/** @typedef {import("./ModuleGraph")} ModuleGraph */
<ide> /** @typedef {import("./ModuleReason")} ModuleReason */
<ide> /** @typedef {import("./util/createHash").Hash} Hash */
<ide>
<ide> class Chunk {
<ide>
<ide> /**
<ide> * @param {Hash} hash hash (will be modified)
<del> * @param {ChunkGraph} chunkGraph the chunk graph
<add> * @param {Compilation} compilation the compilation
<ide> * @returns {void}
<ide> */
<del> updateHash(hash, chunkGraph) {
<add> updateHash(hash, compilation) {
<ide> hash.update(`${this.id} `);
<ide> hash.update(this.ids ? this.ids.join(",") : "");
<ide> hash.update(`${this.name || ""} `);
<del> for (const m of chunkGraph.getOrderedChunkModulesIterable(
<add> for (const m of compilation.chunkGraph.getOrderedChunkModulesIterable(
<ide> this,
<del> compareModulesById
<add> compareModulesById(compilation.moduleGraph)
<ide> )) {
<ide> hash.update(m.hash);
<ide> }
<del> const entryModules = chunkGraph.getChunkEntryModulesWithChunkGroupIterable(
<add> const entryModules = compilation.chunkGraph.getChunkEntryModulesWithChunkGroupIterable(
<ide> this
<ide> );
<ide> for (const [m, chunkGroup] of entryModules) {
<ide> class Chunk {
<ide> }
<ide>
<ide> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<ide> * @param {ChunkGraph} chunkGraph the chunk graph
<ide> * @returns {Record<string, Set<TODO>[]>} a record object of names to lists of child ids(?)
<ide> */
<del> getChildIdsByOrders(chunkGraph) {
<add> getChildIdsByOrders(moduleGraph, chunkGraph) {
<ide> const lists = new Map();
<ide> for (const group of this.groupsIterable) {
<ide> if (group.chunks[group.chunks.length - 1] === this) {
<ide> class Chunk {
<ide> list.sort((a, b) => {
<ide> const cmp = b.order - a.order;
<ide> if (cmp !== 0) return cmp;
<del> return a.group.compareTo(chunkGraph, b.group);
<add> return a.group.compareTo(moduleGraph, chunkGraph, b.group);
<ide> });
<ide> result[name] = Array.from(
<ide> list.reduce((set, item) => {
<ide> class Chunk {
<ide> return result;
<ide> }
<ide>
<del> getChildIdsByOrdersMap(chunkGraph, includeDirectChildren) {
<add> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<add> * @param {ChunkGraph} chunkGraph the chunk graph
<add> * @param {boolean=} includeDirectChildren include direct children (by default only children of async children are included)
<add> * @returns {Record<string|number, Record<string, Set<TODO>[]>>} a record object of names to lists of child ids(?) by chunk id
<add> */
<add> getChildIdsByOrdersMap(moduleGraph, chunkGraph, includeDirectChildren) {
<ide> const chunkMaps = Object.create(null);
<ide>
<ide> const addChildIdsByOrdersToMap = chunk => {
<del> const data = chunk.getChildIdsByOrders(chunkGraph);
<add> const data = chunk.getChildIdsByOrders(moduleGraph, chunkGraph);
<ide> for (const key of Object.keys(data)) {
<ide> let chunkMap = chunkMaps[key];
<ide> if (chunkMap === undefined) {
<ide><path>lib/ChunkGraph.js
<ide> const { compareModulesById } = require("./util/comparators");
<ide> /** @typedef {import("./Chunk")} Chunk */
<ide> /** @typedef {import("./ChunkGroup")} ChunkGroup */
<ide> /** @typedef {import("./Module")} Module */
<add>/** @typedef {import("./ModuleGraph")} ModuleGraph */
<ide>
<ide> /** @typedef {(m: Module) => boolean} ModuleFilterPredicate */
<ide>
<ide> class ChunkGraph {
<ide> */
<ide>
<ide> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<ide> * @param {Chunk} chunk the chunk
<ide> * @param {ModuleFilterPredicate} filterFn function used to filter modules
<ide> * @returns {ChunkModuleMaps} module map information
<ide> */
<del> getChunkModuleMaps(chunk, filterFn) {
<add> getChunkModuleMaps(moduleGraph, chunk, filterFn) {
<ide> /** @type {Record<string|number, (string|number)[]>} */
<ide> const chunkModuleIdMap = Object.create(null);
<ide> /** @type {Record<string|number, string>} */
<ide> class ChunkGraph {
<ide> let array;
<ide> for (const module of this.getOrderedChunkModulesIterable(
<ide> asyncChunk,
<del> compareModulesById
<add> compareModulesById(moduleGraph)
<ide> )) {
<ide> if (filterFn(module)) {
<ide> if (array === undefined) {
<ide> class ChunkGraph {
<ide> }
<ide>
<ide> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<ide> * @param {Chunk} chunkA first chunk
<ide> * @param {Chunk} chunkB second chunk
<ide> * @returns {-1|0|1} this is a comparitor function like sort and returns -1, 0, or 1 based on sort order
<ide> */
<del> compareChunks(chunkA, chunkB) {
<add> compareChunks(moduleGraph, chunkA, chunkB) {
<ide> const cgcA = this._getChunkGraphChunk(chunkA);
<ide> const cgcB = this._getChunkGraphChunk(chunkB);
<ide> if (cgcA.modules.size > cgcB.modules.size) return -1;
<ide> if (cgcA.modules.size < cgcB.modules.size) return 1;
<del> cgcA.modules.sortWith(compareModulesById);
<del> cgcB.modules.sortWith(compareModulesById);
<add> const cmpFn = compareModulesById(moduleGraph);
<add> cgcA.modules.sortWith(cmpFn);
<add> cgcB.modules.sortWith(cmpFn);
<ide> const a = cgcA.modules[Symbol.iterator]();
<ide> const b = cgcB.modules[Symbol.iterator]();
<ide> // eslint-disable-next-line no-constant-condition
<ide><path>lib/ChunkGroup.js
<ide> const SortableSet = require("./util/SortableSet");
<ide> /** @typedef {import("./ChunkGraph")} ChunkGraph */
<ide> /** @typedef {import("./Dependency").DependencyLocation} DependencyLocation */
<ide> /** @typedef {import("./Module")} Module */
<add>/** @typedef {import("./ModuleGraph")} ModuleGraph */
<ide> /** @typedef {import("./ModuleReason")} ModuleReason */
<ide>
<ide> /** @typedef {{id: number}} HasId */
<ide> class ChunkGroup {
<ide> * Sorting predicate which allows current ChunkGroup to be compared against another.
<ide> * Sorting values are based off of number of chunks in ChunkGroup.
<ide> *
<add> * @param {ModuleGraph} moduleGraph the module graph
<ide> * @param {ChunkGraph} chunkGraph the chunk graph
<ide> * @param {ChunkGroup} otherGroup the chunkGroup to compare this against
<ide> * @returns {-1|0|1} sort position for comparison
<ide> */
<del> compareTo(chunkGraph, otherGroup) {
<add> compareTo(moduleGraph, chunkGraph, otherGroup) {
<ide> if (this.chunks.length > otherGroup.chunks.length) return -1;
<ide> if (this.chunks.length < otherGroup.chunks.length) return 1;
<ide> const a = this.chunks[Symbol.iterator]();
<ide> class ChunkGroup {
<ide> const aItem = a.next();
<ide> const bItem = b.next();
<ide> if (aItem.done) return 0;
<del> const cmp = chunkGraph.compareChunks(aItem.value, bItem.value);
<add> const cmp = chunkGraph.compareChunks(
<add> moduleGraph,
<add> aItem.value,
<add> bItem.value
<add> );
<ide> if (cmp !== 0) return cmp;
<ide> }
<ide> }
<ide>
<del> getChildrenByOrders(chunkGraph) {
<add> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<add> * @param {ChunkGraph} chunkGraph the chunk graph
<add> * @returns {Record<string, ChunkGroup[]>} mapping from children type to ordered list of ChunkGroups
<add> */
<add> getChildrenByOrders(moduleGraph, chunkGraph) {
<add> /** @type {Map<string, {order: number, group: ChunkGroup}[]>} */
<ide> const lists = new Map();
<ide> for (const childGroup of this._children) {
<ide> for (const key of Object.keys(childGroup.options)) {
<ide> class ChunkGroup {
<ide> }
<ide> }
<ide> }
<add> /** @type {Record<string, ChunkGroup[]>} */
<ide> const result = Object.create(null);
<ide> for (const [name, list] of lists) {
<ide> list.sort((a, b) => {
<ide> const cmp = b.order - a.order;
<ide> if (cmp !== 0) return cmp;
<del> return a.group.compareTo(chunkGraph, b.group);
<add> return a.group.compareTo(moduleGraph, chunkGraph, b.group);
<ide> });
<ide> result[name] = list.map(i => i.group);
<ide> }
<ide><path>lib/Compilation.js
<ide> const compareLocations = require("./compareLocations");
<ide> const Queue = require("./util/Queue");
<ide> const Semaphore = require("./util/Semaphore");
<ide> const SortableSet = require("./util/SortableSet");
<add>const { compareModulesByIndexOrIdentifier } = require("./util/comparators");
<ide> const createHash = require("./util/createHash");
<ide>
<ide> /** @typedef {import("webpack-sources").Source} Source */
<ide> const byIdOrIdentifier = (a, b) => {
<ide> return 0;
<ide> };
<ide>
<del>/**
<del> * @param {Module} a first module to sort by
<del> * @param {Module} b second module to sort by
<del> * @returns {-1|0|1} sort value
<del> */
<del>const byIndexOrIdentifier = (a, b) => {
<del> if (a.index < b.index) return -1;
<del> if (a.index > b.index) return 1;
<del> const identA = a.identifier();
<del> const identB = b.identifier();
<del> if (identA < identB) return -1;
<del> if (identA > identB) return 1;
<del> return 0;
<del>};
<del>
<ide> /**
<ide> * @param {Compilation} a first compilation to sort by
<ide> * @param {Compilation} b second compilation to sort by
<ide> class Compilation {
<ide> // TODO webpack 5: this should only be enabled when `moduleIds: "natural"`
<ide> // TODO move it into a plugin (NaturalModuleIdsPlugin) and use this in WebpackOptionsApply
<ide> // TODO remove this method
<del> modules.sort(byIndexOrIdentifier);
<add> modules.sort(compareModulesByIndexOrIdentifier(this.moduleGraph));
<ide> }
<ide>
<ide> /**
<ide> class Compilation {
<ide> if (outputOptions.hashSalt) {
<ide> chunkHash.update(outputOptions.hashSalt);
<ide> }
<del> chunk.updateHash(chunkHash, this.chunkGraph);
<add> chunk.updateHash(chunkHash, this);
<ide> const template = chunk.hasRuntime()
<ide> ? this.mainTemplate
<ide> : this.chunkTemplate;
<ide><path>lib/HotModuleReplacementPlugin.js
<ide> module.exports = class HotModuleReplacementPlugin {
<ide> compiler.hooks.compilation.tap(
<ide> "HotModuleReplacementPlugin",
<ide> (compilation, { normalModuleFactory }) => {
<add> const moduleGraph = compilation.moduleGraph;
<ide> const hotUpdateChunkTemplate = compilation.hotUpdateChunkTemplate;
<ide> if (!hotUpdateChunkTemplate) return;
<ide>
<ide> module.exports = class HotModuleReplacementPlugin {
<ide> records.chunkModuleIds[chunk.id] = Array.from(
<ide> chunkGraph.getOrderedChunkModulesIterable(
<ide> chunk,
<del> compareModulesById
<add> compareModulesById(moduleGraph)
<ide> ),
<ide> m => m.id
<ide> );
<ide><path>lib/JavascriptModulesPlugin.js
<ide> class JavascriptModulesPlugin {
<ide> compiler.hooks.compilation.tap(
<ide> "JavascriptModulesPlugin",
<ide> (compilation, { normalModuleFactory }) => {
<add> const moduleGraph = compilation.moduleGraph;
<ide> const hooks = JavascriptModulesPlugin.getHooks(compilation);
<ide> hooks.shouldRender.tap("JavascriptModulesPlugin", module => {
<ide> if (module.type === "javascript/auto") return true;
<ide> class JavascriptModulesPlugin {
<ide> template.updateHashForChunk(hash, chunk);
<ide> for (const m of chunkGraph.getOrderedChunkModulesIterable(
<ide> chunk,
<del> compareModulesById
<add> compareModulesById(moduleGraph)
<ide> )) {
<ide> if (typeof m.source === "function") {
<ide> hash.update(m.hash);
<ide><path>lib/LibManifestPlugin.js
<ide> class LibManifestPlugin {
<ide> compiler.hooks.emit.tapAsync(
<ide> "LibManifestPlugin",
<ide> (compilation, callback) => {
<add> const moduleGraph = compilation.moduleGraph;
<ide> asyncLib.forEach(
<ide> compilation.chunks,
<ide> (chunk, callback) => {
<ide> class LibManifestPlugin {
<ide> content: Array.from(
<ide> chunkGraph.getOrderedChunkModulesIterable(
<ide> chunk,
<del> compareModulesById
<add> compareModulesById(moduleGraph)
<ide> ),
<ide> module => {
<ide> if (
<ide><path>lib/Stats.js
<ide> class Stats {
<ide> for (const keyValuePair of groupMap) {
<ide> const name = keyValuePair[0];
<ide> const cg = keyValuePair[1];
<del> const children = cg.getChildrenByOrders(chunkGraph);
<add> const children = cg.getChildrenByOrders(moduleGraph, chunkGraph);
<ide> obj[name] = {
<ide> chunks: cg.chunks.map(c => c.id),
<ide> assets: cg.chunks.reduce(
<ide> (array, c) => array.concat(c.files || []),
<del> []
<add> /** @type {string[]} */ ([])
<ide> ),
<ide> children: Object.keys(children).reduce((obj, key) => {
<ide> const groups = children[key];
<ide> class Stats {
<ide> chunks: group.chunks.map(c => c.id),
<ide> assets: group.chunks.reduce(
<ide> (array, c) => array.concat(c.files || []),
<del> []
<add> /** @type {string[]} */ ([])
<ide> )
<ide> }));
<ide> return obj;
<del> }, Object.create(null)),
<add> }, /** @type {Record<string, {name: string, chunks: (string|number)[], assets: string[]}[]>} */ Object.create(null)),
<ide> childAssets: Object.keys(children).reduce((obj, key) => {
<ide> const groups = children[key];
<ide> obj[key] = Array.from(
<ide> class Stats {
<ide> }
<ide> }
<ide> return set;
<del> }, new Set())
<add> }, /** @type {Set<string>} */ (new Set()))
<ide> );
<ide> return obj;
<ide> }, Object.create(null))
<ide> class Stats {
<ide> const parents = new Set();
<ide> const children = new Set();
<ide> const siblings = new Set();
<del> const childIdByOrder = chunk.getChildIdsByOrders(chunkGraph);
<add> const childIdByOrder = chunk.getChildIdsByOrders(
<add> moduleGraph,
<add> chunkGraph
<add> );
<ide> for (const chunkGroup of chunk.groupsIterable) {
<ide> for (const parentGroup of chunkGroup.parentsIterable) {
<ide> for (const chunk of parentGroup.chunks) {
<ide><path>lib/optimize/AggressiveSplittingPlugin.js
<ide> class AggressiveSplittingPlugin {
<ide> compiler.hooks.thisCompilation.tap(
<ide> "AggressiveSplittingPlugin",
<ide> compilation => {
<add> const moduleGraph = compilation.moduleGraph;
<ide> let needAdditionalSeal = false;
<ide> let newSplits;
<ide> let fromAggressiveSplittingSet;
<ide> class AggressiveSplittingPlugin {
<ide>
<ide> // for any chunk which isn't splitted yet, split it and create a new entry
<ide> // start with the biggest chunk
<add> const cmpFn = compareModulesById(moduleGraph);
<ide> const sortedChunks = chunks.slice().sort((a, b) => {
<ide> const diff1 =
<ide> chunkGraph.getChunkModulesSize(b) -
<ide> class AggressiveSplittingPlugin {
<ide> chunkGraph.getNumberOfChunkModules(b);
<ide> if (diff2) return diff2;
<ide> const modulesA = Array.from(
<del> chunkGraph.getOrderedChunkModulesIterable(a, compareModulesById)
<add> chunkGraph.getOrderedChunkModulesIterable(a, cmpFn)
<ide> );
<ide> const modulesB = Array.from(
<del> chunkGraph.getOrderedChunkModulesIterable(b, compareModulesById)
<add> chunkGraph.getOrderedChunkModulesIterable(b, cmpFn)
<ide> );
<ide> const aI = modulesA[Symbol.iterator]();
<ide> const bI = modulesB[Symbol.iterator]();
<ide><path>lib/optimize/ChunkModuleIdRangePlugin.js
<ide> class ChunkModuleIdRangePlugin {
<ide> apply(compiler) {
<ide> const options = this.options;
<ide> compiler.hooks.compilation.tap("ChunkModuleIdRangePlugin", compilation => {
<add> const moduleGraph = compilation.moduleGraph;
<ide> compilation.hooks.moduleIds.tap("ChunkModuleIdRangePlugin", modules => {
<ide> const chunkGraph = compilation.chunkGraph;
<ide> const chunk = compilation.chunks.find(
<ide> class ChunkModuleIdRangePlugin {
<ide> let cmpFn;
<ide> switch (options.order) {
<ide> case "index":
<del> cmpFn = compareModulesByIndex;
<add> cmpFn = compareModulesByIndex(moduleGraph);
<ide> break;
<ide> case "index2":
<del> cmpFn = compareModulesByIndex2;
<add> cmpFn = compareModulesByIndex2(moduleGraph);
<ide> break;
<ide> default:
<ide> throw new Error(
<ide><path>lib/optimize/NaturalChunkOrderPlugin.js
<ide> class NaturalChunkOrderPlugin {
<ide> */
<ide> apply(compiler) {
<ide> compiler.hooks.compilation.tap("NaturalChunkOrderPlugin", compilation => {
<add> const moduleGraph = compilation.moduleGraph;
<ide> compilation.hooks.optimizeChunkOrder.tap(
<ide> "NaturalChunkOrderPlugin",
<ide> chunks => {
<ide> const chunkGraph = compilation.chunkGraph;
<ide> chunks.sort((chunkA, chunkB) => {
<add> const cmpFn = compareModulesById(moduleGraph);
<ide> const a = chunkGraph
<del> .getOrderedChunkModulesIterable(chunkA, compareModulesById)
<add> .getOrderedChunkModulesIterable(chunkA, cmpFn)
<ide> [Symbol.iterator]();
<ide> const b = chunkGraph
<del> .getOrderedChunkModulesIterable(chunkB, compareModulesById)
<add> .getOrderedChunkModulesIterable(chunkB, cmpFn)
<ide> [Symbol.iterator]();
<ide> // eslint-disable-next-line no-constant-condition
<ide> while (true) {
<ide><path>lib/util/comparators.js
<ide>
<ide> /** @typedef {import("../Chunk")} Chunk */
<ide> /** @typedef {import("../Module")} Module */
<add>/** @typedef {import("../ModuleGraph")} ModuleGraph */
<add>
<add>/** @template T @typedef {function(T, T): -1|0|1} Comparator */
<add>/** @template TArg @template T @typedef {function(TArg, T, T): -1|0|1} RawParamizedComparator */
<add>/** @template TArg @template T @typedef {function(TArg): Comparator<T>} ParamizedComparator */
<add>
<add>/**
<add> * @template T
<add> * @param {RawParamizedComparator<any, T>} fn comparator with argument
<add> * @returns {ParamizedComparator<any, T>} comparator
<add> */
<add>const createCachedParamizedComparator = fn => {
<add> /** @type {WeakMap<object, Comparator<T>>} */
<add> const map = new WeakMap();
<add> return arg => {
<add> const cachedResult = map.get(arg);
<add> if (cachedResult !== undefined) return cachedResult;
<add> /**
<add> * @param {T} a first item
<add> * @param {T} b second item
<add> * @returns {-1|0|1} compare result
<add> */
<add> const result = (a, b) => {
<add> return fn(arg, a, b);
<add> };
<add> map.set(arg, result);
<add> return result;
<add> };
<add>};
<ide>
<ide> /**
<ide> * @param {Chunk} a chunk
<ide> exports.compareChunksById = (a, b) => {
<ide> };
<ide>
<ide> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<ide> * @param {Module} a module
<ide> * @param {Module} b module
<ide> * @returns {-1|0|1} compare result
<ide> */
<del>exports.compareModulesById = (a, b) => {
<add>const compareModulesById = (moduleGraph, a, b) => {
<ide> return compareIds(a.id, b.id);
<ide> };
<add>/** @type {ParamizedComparator<ModuleGraph, Module>} */
<add>exports.compareModulesById = createCachedParamizedComparator(
<add> compareModulesById
<add>);
<ide>
<ide> /**
<ide> * @param {number} a number
<ide> const compareNumbers = (a, b) => {
<ide> };
<ide>
<ide> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<ide> * @param {Module} a module
<ide> * @param {Module} b module
<ide> * @returns {-1|0|1} compare result
<ide> */
<del>exports.compareModulesByIndex = (a, b) => {
<add>const compareModulesByIndex = (moduleGraph, a, b) => {
<ide> return compareNumbers(a.index, b.index);
<ide> };
<add>/** @type {ParamizedComparator<ModuleGraph, Module>} */
<add>exports.compareModulesByIndex = createCachedParamizedComparator(
<add> compareModulesByIndex
<add>);
<ide>
<ide> /**
<add> * @param {ModuleGraph} moduleGraph the module graph
<ide> * @param {Module} a module
<ide> * @param {Module} b module
<ide> * @returns {-1|0|1} compare result
<ide> */
<del>exports.compareModulesByIndex2 = (a, b) => {
<add>const compareModulesByIndex2 = (moduleGraph, a, b) => {
<ide> return compareNumbers(a.index2, b.index2);
<ide> };
<add>/** @type {ParamizedComparator<ModuleGraph, Module>} */
<add>exports.compareModulesByIndex2 = createCachedParamizedComparator(
<add> compareModulesByIndex2
<add>);
<add>
<add>/**
<add> * @param {ModuleGraph} moduleGraph the module graph
<add> * @param {Module} a module
<add> * @param {Module} b module
<add> * @returns {-1|0|1} compare result
<add> */
<add>const compareModulesByIndexOrIdentifier = (moduleGraph, a, b) => {
<add> if (a.index < b.index) return -1;
<add> if (a.index > b.index) return 1;
<add> const identA = a.identifier();
<add> const identB = b.identifier();
<add> if (identA < identB) return -1;
<add> if (identA > identB) return 1;
<add> return 0;
<add>};
<add>/** @type {ParamizedComparator<ModuleGraph, Module>} */
<add>exports.compareModulesByIndexOrIdentifier = createCachedParamizedComparator(
<add> compareModulesByIndexOrIdentifier
<add>);
<ide>
<ide> /**
<ide> * @param {string|number} a first id
<ide><path>lib/wasm/WasmMainTemplatePlugin.js
<ide> const WebAssemblyUtils = require("./WebAssemblyUtils");
<ide> /** @typedef {import("../ModuleGraph")} ModuleGraph */
<ide>
<ide> // Get all wasm modules
<del>const getAllWasmModules = (chunkGraph, chunk) => {
<add>const getAllWasmModules = (moduleGraph, chunkGraph, chunk) => {
<ide> const wasmModules = chunk.getAllAsyncChunks();
<ide> const array = [];
<ide> for (const chunk of wasmModules) {
<ide> for (const m of chunkGraph.getOrderedChunkModulesIterable(
<ide> chunk,
<del> compareModulesById
<add> compareModulesById(moduleGraph)
<ide> )) {
<ide> if (m.type.startsWith("webassembly")) {
<ide> array.push(m);
<ide> class WasmMainTemplatePlugin {
<ide> * @returns {void}
<ide> */
<ide> apply(mainTemplate) {
<add> const moduleGraph = this.compilation.moduleGraph;
<ide> mainTemplate.hooks.localVars.tap(
<ide> "WasmMainTemplatePlugin",
<ide> (source, chunk) => {
<ide> const wasmModules = getAllWasmModules(
<add> this.compilation.moduleGraph,
<ide> this.compilation.chunkGraph,
<ide> chunk
<ide> );
<ide> class WasmMainTemplatePlugin {
<ide> mainTemplate.outputOptions.webassemblyModuleFilename;
<ide>
<ide> const chunkModuleMaps = this.compilation.chunkGraph.getChunkModuleMaps(
<add> moduleGraph,
<ide> chunk,
<ide> m => m.type.startsWith("webassembly")
<ide> );
<ide> class WasmMainTemplatePlugin {
<ide> "WasmMainTemplatePlugin",
<ide> (hash, chunk) => {
<ide> const chunkModuleMaps = this.compilation.chunkGraph.getChunkModuleMaps(
<add> moduleGraph,
<ide> chunk,
<ide> m => m.type.startsWith("webassembly")
<ide> );
<ide> hash.update(JSON.stringify(chunkModuleMaps.id));
<ide> const wasmModules = getAllWasmModules(
<add> this.compilation.moduleGraph,
<ide> this.compilation.chunkGraph,
<ide> chunk
<ide> );
<ide><path>lib/wasm/WebAssemblyModulesPlugin.js
<ide> class WebAssemblyModulesPlugin {
<ide> compiler.hooks.compilation.tap(
<ide> "WebAssemblyModulesPlugin",
<ide> (compilation, { normalModuleFactory }) => {
<add> const moduleGraph = compilation.moduleGraph;
<ide> compilation.dependencyFactories.set(
<ide> WebAssemblyImportDependency,
<ide> normalModuleFactory
<ide> class WebAssemblyModulesPlugin {
<ide>
<ide> for (const module of chunkGraph.getOrderedChunkModulesIterable(
<ide> chunk,
<del> compareModulesById
<add> compareModulesById(moduleGraph)
<ide> )) {
<ide> if (module.type && module.type.startsWith("webassembly")) {
<ide> const filenameTemplate =
<ide><path>lib/web/JsonpChunkTemplatePlugin.js
<ide> class JsonpChunkTemplatePlugin {
<ide> * @returns {void}
<ide> */
<ide> apply(chunkTemplate) {
<add> const moduleGraph = this.compilation.moduleGraph;
<ide> chunkTemplate.hooks.render.tap(
<ide> "JsonpChunkTemplatePlugin",
<ide> (modules, moduleTemplate, { chunk, chunkGraph }) => {
<ide> const jsonpFunction = chunkTemplate.outputOptions.jsonpFunction;
<ide> const globalObject = chunkTemplate.outputOptions.globalObject;
<ide> const source = new ConcatSource();
<del> const prefetchChunks = chunk.getChildIdsByOrders(chunkGraph).prefetch;
<add> const prefetchChunks = chunk.getChildIdsByOrders(
<add> moduleGraph,
<add> chunkGraph
<add> ).prefetch;
<ide> source.add(
<ide> `(${globalObject}[${JSON.stringify(
<ide> jsonpFunction
<ide> class JsonpChunkTemplatePlugin {
<ide> const chunkGraph = this.compilation.chunkGraph;
<ide> hash.update(JSON.stringify(getEntryInfo(chunkGraph, chunk)));
<ide> hash.update(
<del> JSON.stringify(chunk.getChildIdsByOrders(chunkGraph).prefetch) || ""
<add> JSON.stringify(
<add> chunk.getChildIdsByOrders(moduleGraph, chunkGraph).prefetch
<add> ) || ""
<ide> );
<ide> }
<ide> );
<ide><path>lib/web/JsonpMainTemplatePlugin.js
<ide> class JsonpMainTemplatePlugin {
<ide> * @returns {void}
<ide> */
<ide> apply(mainTemplate) {
<add> const moduleGraph = this.compilation.moduleGraph;
<ide> const needChunkOnDemandLoadingCode = chunk => {
<ide> for (const chunkGroup of chunk.groupsIterable) {
<ide> if (chunkGroup.getNumberOfChildren() > 0) return true;
<ide> class JsonpMainTemplatePlugin {
<ide> };
<ide> const needPrefetchingCode = chunk => {
<ide> const allPrefetchChunks = chunk.getChildIdsByOrdersMap(
<add> moduleGraph,
<ide> this.compilation.chunkGraph,
<ide> true
<ide> ).prefetch;
<ide> class JsonpMainTemplatePlugin {
<ide> }),
<ide> (source, chunkIdExpression, { chunk, hash }) => {
<ide> const chunkMap = chunk.getChildIdsByOrdersMap(
<add> moduleGraph,
<ide> this.compilation.chunkGraph
<ide> ).preload;
<ide> if (!chunkMap || Object.keys(chunkMap).length === 0) return source;
<ide> class JsonpMainTemplatePlugin {
<ide> "JsonpMainTemplatePlugin",
<ide> (source, chunk, hash) => {
<ide> const chunkGraph = this.compilation.chunkGraph;
<del> const prefetchChunks = chunk.getChildIdsByOrders(chunkGraph).prefetch;
<add> const prefetchChunks = chunk.getChildIdsByOrders(
<add> moduleGraph,
<add> chunkGraph
<add> ).prefetch;
<ide> if (
<ide> needChunkLoadingCode(chunk) &&
<ide> prefetchChunks &&
<ide><path>test/statsCases/named-chunks-plugin-async/webpack.config.js
<ide> module.exports = {
<ide> entry: "./entry"
<ide> },
<ide> plugins: [
<del> new NamedChunksPlugin(function(chunk, { chunkGraph }) {
<add> new NamedChunksPlugin(function(chunk, { chunkGraph, moduleGraph }) {
<ide> if (chunk.name) {
<ide> return chunk.name;
<ide> }
<ide> const chunkModulesToName = chunk =>
<ide> Array.from(
<del> chunkGraph.getOrderedChunkModulesIterable(chunk, compareModulesById),
<add> chunkGraph.getOrderedChunkModulesIterable(
<add> chunk,
<add> compareModulesById(moduleGraph)
<add> ),
<ide> mod => {
<ide> const rs = new RequestShortener(mod.context);
<ide> return rs.shorten(mod.request).replace(/[./\\]/g, "_"); | 17 |
Python | Python | fix typo in signals.py | c33db1a61023a058b8aa8e517e75244d759bd5cb | <ide><path>celery/signals.py
<ide> },
<ide> )
<ide>
<del># - Prorgam: `celery worker`
<add># - Program: `celery worker`
<ide> celeryd_init = Signal(
<ide> name='celeryd_init',
<ide> providing_args={'instance', 'conf', 'options'}, | 1 |
Ruby | Ruby | limit some heuristics to strict mode | 56ccf10efaac5f97bae6b2e5aef9ef87d7529328 | <ide><path>Library/Homebrew/dev-cmd/audit.rb
<ide> def initialize(formula, options = {})
<ide> @specs = %w[stable devel head].map { |s| formula.send(s) }.compact
<ide> end
<ide>
<del> def self.check_http_content(url, name, user_agents: [:default], check_content: false)
<add> def self.check_http_content(url, name, user_agents: [:default], check_content: false, strict: false)
<ide> return unless url.start_with? "http"
<ide>
<ide> details = nil
<ide> def self.check_http_content(url, name, user_agents: [:default], check_content: f
<ide> return "The URL #{url} should use HTTPS rather than HTTP"
<ide> end
<ide>
<add> return unless strict
<add>
<ide> # Same size, different content after normalization
<ide> # (typical causes: Generated ID, Timestamp, Unix time)
<ide> if details[:file].length == secure_details[:file].length
<ide> def audit_homepage
<ide> if http_content_problem = FormulaAuditor.check_http_content(homepage,
<ide> formula.name,
<ide> user_agents: [:browser, :default],
<del> check_content: true)
<add> check_content: true,
<add> strict: @strict)
<ide> problem http_content_problem
<ide> end
<ide> end | 1 |
Ruby | Ruby | ignore dependencies when fetching | a333014c55c1edc2555cd2dfbd8dc07463cc9b76 | <ide><path>Library/Homebrew/formula_installer.rb
<ide> def fetch_dependency(dep)
<ide> fi.verbose = verbose?
<ide> fi.quiet = quiet?
<ide> fi.debug = debug?
<add> # When fetching we don't need to recurse the dependency tree as it's already
<add> # been done for us in `compute_dependencies` and there's no requirement to
<add> # fetch in a particular order.
<add> fi.ignore_deps = true
<ide> fi.fetch
<ide> end
<ide> | 1 |
Javascript | Javascript | update sendgrid email address | 2c351ea6ebd4d25fccea1a4d7ecbab1e9449535a | <ide><path>controllers/contact.js
<ide> exports.postContact = function(req, res) {
<ide> var from = req.body.email;
<ide> var name = req.body.name;
<ide> var body = req.body.message;
<del> var to = '[email protected]';
<add> var to = '[email protected]';
<ide> var subject = 'API Example | Contact Form';
<ide>
<ide> var email = new sendgrid.Email({
<ide> exports.postContact = function(req, res) {
<ide>
<ide> sendgrid.send(email, function(err) {
<ide> if (err) {
<del> req.flash('error', err.message);
<add> req.flash('errors', err.message);
<ide> return res.redirect('/contact');
<ide> }
<ide> req.flash('success', 'Email has been sent successfully!'); | 1 |
Javascript | Javascript | fix missed case in "observe" type check | f81ef8daacc86c3ecd15aed4a93e760d3fd532a4 | <ide><path>src/createStore.js
<ide> export default function createStore(reducer, preloadedState, enhancer) {
<ide> * emission of values from the observable.
<ide> */
<ide> subscribe(observer) {
<del> if (typeof observer !== 'object') {
<add> if (typeof observer !== 'object' || observer === null) {
<ide> throw new TypeError('Expected the observer to be an object.')
<ide> }
<ide>
<ide><path>test/createStore.spec.js
<ide> describe('createStore', () => {
<ide>
<ide> expect(function() {
<ide> obs.subscribe()
<del> }).toThrow()
<add> }).toThrowError(new TypeError('Expected the observer to be an object.'))
<add>
<add> expect(function() {
<add> obs.subscribe(null)
<add> }).toThrowError(new TypeError('Expected the observer to be an object.'))
<ide>
<ide> expect(function() {
<ide> obs.subscribe(() => {})
<del> }).toThrow()
<add> }).toThrowError(new TypeError('Expected the observer to be an object.'))
<ide>
<ide> expect(function() {
<ide> obs.subscribe({}) | 2 |
Go | Go | remove extra conditional | 1d5698936a12ce8fb4578273d9fdf92c36c09128 | <ide><path>libnetwork/store.go
<ide> import (
<ide> )
<ide>
<ide> func (c *controller) validateDatastoreConfig() bool {
<del> if c.cfg == nil || c.cfg.Datastore.Client.Provider == "" || c.cfg.Datastore.Client.Address == "" {
<del> return false
<del> }
<del> return true
<add> return c.cfg != nil && c.cfg.Datastore.Client.Provider != "" && c.cfg.Datastore.Client.Address != ""
<ide> }
<ide>
<ide> func (c *controller) initDataStore() error { | 1 |
PHP | PHP | deprecate a few methods | 7f1cfa5c62d98a32638a5c389c39f9e8143bd954 | <ide><path>src/Illuminate/Database/Schema/Blueprint.php
<ide> public function dropColumn($columns)
<ide> */
<ide> public function renameColumn($from, $to)
<ide> {
<del> return $this->addCommand('renameColumn', compact('from', 'to'));
<add> throw new \BadMethodCallException("Column renaming has been deprecated.");
<ide> }
<ide>
<ide> /**
<ide><path>src/Illuminate/Database/Schema/Grammars/SQLiteGrammar.php
<ide> public function compileDropIfExists(Blueprint $blueprint, Fluent $command)
<ide> */
<ide> public function compileDropColumn(Blueprint $blueprint, Fluent $command, Connection $connection)
<ide> {
<del> throw new \RuntimeException("Dropping columns not supported on SQLite");
<add> throw new \BadMethodCallException("SQLite column dropping has been deprecated.");
<ide> }
<ide>
<ide> /** | 2 |
PHP | PHP | add test for url generation with multiple prefxies | 7a859105bb6ff106e0d440c804a4e5e502653676 | <ide><path>tests/TestCase/Routing/RouterTest.php
<ide> public function testUrlGenerationWithRegexQualifiedParams() {
<ide> * @return void
<ide> */
<ide> public function testUrlGenerationWithPrefix() {
<del> Configure::write('Routing.prefixes', array('admin'));
<ide> Router::reload();
<ide>
<ide> Router::connect('/pages/*', array('controller' => 'pages', 'action' => 'display'));
<ide> public function testUrlGenerationPrefixedPlugin() {
<ide> $this->assertEquals($expected, $result);
<ide> }
<ide>
<add>/**
<add> * Test URL generation with multiple prefixes.
<add> *
<add> * @return void
<add> */
<add> public function testUrlGenerationMultiplePrefixes() {
<add> Router::prefix('admin', function ($routes) {
<add> $routes->prefix('backoffice', function ($routes) {
<add> $routes->fallbacks();
<add> });
<add> });
<add> $result = Router::url([
<add> 'prefix' => 'admin/backoffice',
<add> 'controller' => 'Dashboards',
<add> 'action' => 'home'
<add> ]);
<add> $expected = '/admin/backoffice/dashboards/home';
<add> $this->assertEquals($expected, $result);
<add> }
<add>
<ide> /**
<ide> * testUrlGenerationWithExtensions method
<ide> * | 1 |
Ruby | Ruby | add empty line after guard clause | 8244a869f63f7d10e8533df7705a7fec5d596315 | <ide><path>Library/Homebrew/cmd/info.rb
<ide> def output_formula_analytics(f)
<ide> if full_analytics
<ide> next if args.days.present? && args.days&.to_i != days
<ide> next if args.category.present? && args.category != category
<add>
<ide> analytics_table(category, days, results)
<ide> else
<ide> total_count = results.values.inject("+") | 1 |
Python | Python | fix parser creation in language class | 1b651db9c577fc09fb4ca2be7e5858985de5d207 | <ide><path>spacy/language.py
<ide> def __init__(self, path=True, **overrides):
<ide> self.tagger = self.Defaults.create_tagger(self) \
<ide> if 'tagger' not in overrides \
<ide> else overrides['tagger']
<del> self.parser = self.Defaults.create_tagger(self) \
<add> self.parser = self.Defaults.create_parser(self) \
<ide> if 'parser' not in overrides \
<ide> else overrides['parser']
<ide> self.entity = self.Defaults.create_entity(self) \ | 1 |
Javascript | Javascript | fix tests on master | c79eda2425ace1667c9b541ec6a4bcd1e8ef2fd5 | <ide><path>packages/ember-application/tests/system/engine_initializers_test.js
<ide> QUnit.test('initializers are per-engine', function() {
<ide> initialize(engine) {}
<ide> });
<ide>
<del> throws(function() {
<add> expectAssertion(function() {
<ide> FirstEngine.initializer({
<ide> name: 'abc',
<ide> initialize(engine) {}
<ide> });
<del> }, Error, /Assertion Failed: The initializer 'abc' has already been registered'/);
<add> });
<ide>
<ide> let SecondEngine = Engine.extend();
<ide> SecondEngine.instanceInitializer({
<ide><path>packages/ember-application/tests/system/engine_instance_initializers_test.js
<ide> QUnit.test('initializers are per-engine', function() {
<ide> initialize(engine) {}
<ide> });
<ide>
<del> throws(function() {
<add> expectAssertion(function() {
<ide> FirstEngine.instanceInitializer({
<ide> name: 'abc',
<ide> initialize(engine) {}
<ide> });
<del> }, Error, /Assertion Failed: The instance initializer 'abc' has already been registered'/);
<add> });
<ide>
<ide> let SecondEngine = Engine.extend();
<ide> SecondEngine.instanceInitializer({
<ide><path>packages/ember-application/tests/system/initializers_test.js
<ide> QUnit.test('initializers are per-app', function() {
<ide> initialize(app) {}
<ide> });
<ide>
<del> throws(function() {
<add> expectAssertion(function() {
<ide> FirstApp.initializer({
<ide> name: 'abc',
<ide> initialize(app) {}
<ide> });
<del> }, Error, /Assertion Failed: The initializer 'abc' has already been registered'/);
<add> });
<ide>
<ide> let SecondApp = Application.extend();
<ide> SecondApp.instanceInitializer({
<ide><path>packages/ember-application/tests/system/instance_initializers_test.js
<ide> QUnit.test('initializers are per-app', function() {
<ide> initialize(app) {}
<ide> });
<ide>
<del> throws(function() {
<add> expectAssertion(function() {
<ide> FirstApp.instanceInitializer({
<ide> name: 'abc',
<ide> initialize(app) {}
<ide> });
<del> }, Error, /Assertion Failed: The instance initializer 'abc' has already been registered'/);
<add> });
<ide>
<ide> let SecondApp = Application.extend();
<ide> SecondApp.instanceInitializer({ | 4 |
PHP | PHP | allow configuration of token guard keys | ad027d845fe3eec20cecdbe6f00d971a065d30c3 | <ide><path>src/Illuminate/Auth/AuthManager.php
<ide> public function createTokenDriver($name, $config)
<ide> // user in the database or another persistence layer where users are.
<ide> $guard = new TokenGuard(
<ide> $this->createUserProvider($config['provider'] ?? null),
<del> $this->app['request']
<add> $this->app['request'],
<add> $config['input_key'] ?? 'api_token',
<add> $config['storage_key'] ?? 'api_token'
<ide> );
<ide>
<ide> $this->app->refresh('request', $guard, 'setRequest'); | 1 |
Ruby | Ruby | pass sdk_path in std_cmake_args" | a508f9f94b55089d2e37e4f0b0869bd0e425d40c | <ide><path>Library/Homebrew/formula.rb
<ide> def std_cmake_args
<ide> -DCMAKE_BUILD_TYPE=Release
<ide> -DCMAKE_FIND_FRAMEWORK=LAST
<ide> -DCMAKE_VERBOSE_MAKEFILE=ON
<del> -DCMAKE_OSX_SYSROOT=#{MacOS.sdk_path}
<ide> -Wno-dev
<ide> ]
<ide> end | 1 |
PHP | PHP | remove unneeded parameter | 1f62d23c65c92a7ece0ceec2a99965adceb42d66 | <ide><path>src/Illuminate/Routing/Middleware/ValidateSignature.php
<ide> class ValidateSignature
<ide> */
<ide> public function handle($request, Closure $next)
<ide> {
<del> if ($request->hasValidSignature($request)) {
<add> if ($request->hasValidSignature()) {
<ide> return $next($request);
<ide> }
<ide> | 1 |
Java | Java | make view hierarchy optimizer smarter | e7af72b29a8f675c14c6d4c131bb9f2fd9890f46 | <ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/BaseViewManager.java
<ide> import android.os.Build;
<ide> import android.view.View;
<ide> import android.view.ViewParent;
<del>
<ide> import com.facebook.react.R;
<ide> import com.facebook.react.bridge.ReadableArray;
<ide> import com.facebook.react.uimanager.annotations.ReactProp;
<ide>
<ide> private static final String PROP_BACKGROUND_COLOR = ViewProps.BACKGROUND_COLOR;
<ide> private static final String PROP_TRANSFORM = "transform";
<del> private static final String PROP_OPACITY = "opacity";
<ide> private static final String PROP_ELEVATION = "elevation";
<ide> private static final String PROP_Z_INDEX = "zIndex";
<ide> private static final String PROP_RENDER_TO_HARDWARE_TEXTURE = "renderToHardwareTextureAndroid";
<ide> public void setTransform(T view, ReadableArray matrix) {
<ide> }
<ide> }
<ide>
<del> @ReactProp(name = PROP_OPACITY, defaultFloat = 1.f)
<add> @ReactProp(name = ViewProps.OPACITY, defaultFloat = 1.f)
<ide> public void setOpacity(T view, float opacity) {
<ide> view.setAlpha(opacity);
<ide> }
<ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/NativeViewHierarchyOptimizer.java
<ide>
<ide> package com.facebook.react.uimanager;
<ide>
<del>import javax.annotation.Nullable;
<del>
<ide> import android.util.SparseBooleanArray;
<del>
<ide> import com.facebook.infer.annotation.Assertions;
<ide> import com.facebook.react.bridge.ReadableArray;
<ide> import com.facebook.react.bridge.ReadableMapKeySetIterator;
<add>import javax.annotation.Nullable;
<ide>
<ide> /**
<ide> * Class responsible for optimizing the native view hierarchy while still respecting the final UI
<ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/ViewProps.java
<ide>
<ide> package com.facebook.react.uimanager;
<ide>
<add>import android.graphics.Color;
<add>import com.facebook.react.bridge.ReadableMap;
<ide> import java.util.Arrays;
<ide> import java.util.HashSet;
<ide>
<del>import com.facebook.react.bridge.ReadableMap;
<del>
<ide> /**
<ide> * Keys for props that need to be shared across multiple classes.
<ide> */
<ide> public class ViewProps {
<ide> public static final String TEXT_ALIGN_VERTICAL = "textAlignVertical";
<ide> public static final String TEXT_DECORATION_LINE = "textDecorationLine";
<ide> public static final String TEXT_BREAK_STRATEGY = "textBreakStrategy";
<add> public static final String OPACITY = "opacity";
<ide>
<ide> public static final String ALLOW_FONT_SCALING = "allowFontScaling";
<ide> public static final String INCLUDE_FONT_PADDING = "includeFontPadding";
<ide> public class ViewProps {
<ide> public static final String BORDER_TOP_RIGHT_RADIUS = "borderTopRightRadius";
<ide> public static final String BORDER_BOTTOM_LEFT_RADIUS = "borderBottomLeftRadius";
<ide> public static final String BORDER_BOTTOM_RIGHT_RADIUS = "borderBottomRightRadius";
<add> public static final String BORDER_COLOR = "borderColor";
<add> public static final String BORDER_LEFT_COLOR = "borderLeftColor";
<add> public static final String BORDER_RIGHT_COLOR = "borderRightColor";
<add> public static final String BORDER_TOP_COLOR = "borderTopColor";
<add> public static final String BORDER_BOTTOM_COLOR = "borderBottomColor";
<ide> public static final int[] BORDER_SPACING_TYPES = {
<ide> Spacing.ALL, Spacing.START, Spacing.END, Spacing.TOP, Spacing.BOTTOM
<ide> };
<ide> public class ViewProps {
<ide> PADDING_TOP,
<ide> PADDING_BOTTOM));
<ide>
<add> public static boolean sIsOptimizationsEnabled;
<add>
<ide> public static boolean isLayoutOnly(ReadableMap map, String prop) {
<ide> if (LAYOUT_ONLY_PROPS.contains(prop)) {
<ide> return true;
<ide> } else if (POINTER_EVENTS.equals(prop)) {
<ide> String value = map.getString(prop);
<del> return "auto".equals(value) || "box-none".equals(value);
<del> } else {
<del> return false;
<add> return "auto".equals(value);
<ide> }
<add>
<add> if (sIsOptimizationsEnabled) {
<add> switch (prop) {
<add> case OPACITY:
<add> return map.getDouble(OPACITY) == 1d; // Ignore if explicitly set to default opacity.
<add> case BACKGROUND_COLOR:
<add> return map.getInt(BACKGROUND_COLOR) == Color.TRANSPARENT;
<add> case BORDER_RADIUS: // Without a background color or border width set, a border won't show.
<add> if (map.hasKey(BACKGROUND_COLOR) && map.getInt(BACKGROUND_COLOR) != Color.TRANSPARENT) {
<add> return false;
<add> }
<add> if (map.hasKey(BORDER_WIDTH) && map.getDouble(BORDER_WIDTH) != 0d) {
<add> return false;
<add> }
<add> return true;
<add> case BORDER_COLOR:
<add> return map.getInt(BORDER_COLOR) == Color.TRANSPARENT;
<add> case BORDER_LEFT_COLOR:
<add> return map.getInt(BORDER_LEFT_COLOR) == Color.TRANSPARENT;
<add> case BORDER_RIGHT_COLOR:
<add> return map.getInt(BORDER_RIGHT_COLOR) == Color.TRANSPARENT;
<add> case BORDER_TOP_COLOR:
<add> return map.getInt(BORDER_TOP_COLOR) == Color.TRANSPARENT;
<add> case BORDER_BOTTOM_COLOR:
<add> return map.getInt(BORDER_BOTTOM_COLOR) == Color.TRANSPARENT;
<add> case BORDER_WIDTH:
<add> return map.getDouble(BORDER_WIDTH) == 0d;
<add> case BORDER_LEFT_WIDTH:
<add> return map.getDouble(BORDER_LEFT_WIDTH) == 0d;
<add> case BORDER_TOP_WIDTH:
<add> return map.getDouble(BORDER_TOP_WIDTH) == 0d;
<add> case BORDER_RIGHT_WIDTH:
<add> return map.getDouble(BORDER_RIGHT_WIDTH) == 0d;
<add> case BORDER_BOTTOM_WIDTH:
<add> return map.getDouble(BORDER_BOTTOM_WIDTH) == 0d;
<add> case "onLayout":
<add> return true;
<add> case "overflow": // We do nothing with this right now.
<add> return true;
<add> default:
<add> return false;
<add> }
<add> }
<add>
<add> return false;
<ide> }
<ide> }
<ide><path>ReactAndroid/src/main/java/com/facebook/react/views/view/ReactViewManager.java
<ide>
<ide> package com.facebook.react.views.view;
<ide>
<del>import javax.annotation.Nullable;
<del>
<del>import java.util.Locale;
<del>import java.util.Map;
<del>
<ide> import android.annotation.TargetApi;
<ide> import android.graphics.Rect;
<ide> import android.os.Build;
<ide> import android.view.View;
<del>
<del>import com.facebook.yoga.YogaConstants;
<ide> import com.facebook.react.bridge.JSApplicationIllegalArgumentException;
<ide> import com.facebook.react.bridge.ReadableArray;
<ide> import com.facebook.react.bridge.ReadableMap;
<ide> import com.facebook.react.uimanager.ViewProps;
<ide> import com.facebook.react.uimanager.annotations.ReactProp;
<ide> import com.facebook.react.uimanager.annotations.ReactPropGroup;
<add>import com.facebook.yoga.YogaConstants;
<add>import java.util.Locale;
<add>import java.util.Map;
<add>import javax.annotation.Nullable;
<ide>
<ide> /**
<ide> * View manager for AndroidViews (plain React Views).
<ide> public void setBorderWidth(ReactViewGroup view, int index, float width) {
<ide> view.setBorderWidth(SPACING_TYPES[index], width);
<ide> }
<ide>
<del> @ReactPropGroup(names = {
<del> "borderColor", "borderLeftColor", "borderRightColor", "borderTopColor", "borderBottomColor"
<del> }, customType = "Color")
<add> @ReactPropGroup(
<add> names = {
<add> ViewProps.BORDER_COLOR,
<add> ViewProps.BORDER_LEFT_COLOR,
<add> ViewProps.BORDER_RIGHT_COLOR,
<add> ViewProps.BORDER_TOP_COLOR,
<add> ViewProps.BORDER_BOTTOM_COLOR
<add> },
<add> customType = "Color"
<add> )
<ide> public void setBorderColor(ReactViewGroup view, int index, Integer color) {
<ide> float rgbComponent = color == null ? YogaConstants.UNDEFINED : (float) ((int)color & 0x00FFFFFF);
<ide> float alphaComponent = color == null ? YogaConstants.UNDEFINED : (float) ((int)color >>> 24); | 4 |
Python | Python | replace nielsr by google namespace in tests | a28da4c4901c775be724ca1cec79ace32e6e80ee | <ide><path>tests/test_modeling_canine.py
<ide> def test_model_from_pretrained(self):
<ide> class CanineModelIntegrationTest(unittest.TestCase):
<ide> @slow
<ide> def test_inference_no_head(self):
<del> # TODO replace nielsr by google
<del> model = CanineModel.from_pretrained("nielsr/canine-s")
<add> model = CanineModel.from_pretrained("google/canine-s")
<ide> # this one corresponds to the first example of the TydiQA dev set (in Swahili)
<ide> # fmt: off
<ide> input_ids = [57344, 57349, 85, 107, 117, 98, 119, 97, 32, 119, 97, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 111, 114, 105, 32, 110, 105, 32, 107, 105, 97, 115, 105, 32, 103, 97, 110, 105, 63, 57345, 57350, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 111, 114, 105, 32, 44, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 97, 117, 32, 105, 110, 103, 46, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 40, 112, 105, 97, 58, 32, 84, 111, 108, 105, 109, 97, 110, 32, 97, 117, 32, 82, 105, 103, 105, 108, 32, 75, 101, 110, 116, 97, 117, 114, 117, 115, 41, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 105, 110, 97, 121, 111, 110, 103, 39, 97, 97, 32, 115, 97, 110, 97, 32, 107, 97, 116, 105, 107, 97, 32, 97, 110, 103, 97, 32, 121, 97, 32, 107, 117, 115, 105, 110, 105, 32, 107, 119, 101, 110, 121, 101, 32, 107, 117, 110, 100, 105, 110, 121, 111, 116, 97, 32, 121, 97, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 40, 112, 105, 97, 58, 32, 105, 110, 103, 46, 32, 67, 101, 110, 116, 97, 117, 114, 117, 115, 41, 46, 32, 78, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 107, 117, 110, 103, 97, 97, 32, 115, 97, 110, 97, 32, 121, 97, 32, 110, 110, 101, 32, 97, 110, 103, 97, 110, 105, 32, 108, 97, 107, 105, 110, 105, 32, 104, 97, 105, 111, 110, 101, 107, 97, 110, 105, 32, 107, 119, 101, 110, 121, 101, 32, 110, 117, 115, 117, 100, 117, 110, 105, 97, 32, 121, 97, 32, 107, 97, 115, 107, 97, 122, 105, 110, 105, 46, 32, 57351, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 112, 101, 107, 101, 101, 32, 107, 119, 97, 32, 115, 97, 98, 97, 98, 117, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 101, 116, 117, 32, 106, 105, 114, 97, 110, 105, 32, 107, 97, 116, 105, 107, 97, 32, 97, 110, 103, 97, 32, 105, 110, 97, 32, 117, 109, 98, 97, 108, 105, 32, 119, 97, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 50, 46, 32, 73, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 97, 110, 103, 97, 110, 105, 32, 107, 97, 114, 105, 98, 117, 32, 110, 97, 32, 107, 117, 110, 100, 105, 110, 121, 111, 116, 97, 32, 121, 97, 32, 83, 97, 108, 105, 98, 117, 32, 40, 67, 114, 117, 120, 41, 46, 32, 57352, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 40, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 41, 32, 105, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 107, 97, 109, 97, 32, 110, 121, 111, 116, 97, 32, 109, 111, 106, 97, 32, 108, 97, 107, 105, 110, 105, 32, 107, 119, 97, 32, 100, 97, 114, 117, 98, 105, 110, 105, 32, 107, 117, 98, 119, 97, 32, 105, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 107, 117, 119, 97, 32, 109, 102, 117, 109, 111, 32, 119, 97, 32, 110, 121, 111, 116, 97, 32, 116, 97, 116, 117, 32, 122, 105, 110, 97, 122, 111, 107, 97, 97, 32, 107, 97, 114, 105, 98, 117, 32, 110, 97, 32, 107, 117, 115, 104, 105, 107, 97, 109, 97, 110, 97, 32, 107, 97, 116, 105, 32, 121, 97, 111, 46, 32, 78, 121, 111, 116, 97, 32, 109, 97, 112, 97, 99, 104, 97, 32, 122, 97, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 65, 32, 110, 97, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 66, 32, 122, 105, 107, 111, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 51, 54, 32, 107, 117, 116, 111, 107, 97, 32, 107, 119, 101, 116, 117, 32, 110, 97, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 116, 97, 116, 117, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 67, 32, 97, 117, 32, 80, 114, 111, 120, 105, 109, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 105, 110, 97, 32, 117, 109, 98, 97, 108, 105, 32, 119, 97, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 50, 50, 46, 32, 57353, 32, 80, 114, 111, 120, 105, 109, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 40, 121, 97, 97, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 105, 108, 105, 121, 111, 32, 107, 97, 114, 105, 98, 117, 32, 122, 97, 105, 100, 105, 32, 110, 97, 115, 105, 41, 32, 105, 109, 101, 103, 117, 110, 100, 117, 108, 105, 119, 97, 32, 107, 117, 119, 97, 32, 110, 97, 32, 115, 97, 121, 97, 114, 105, 32, 109, 111, 106, 97, 46, 32, 86, 105, 112, 105, 109, 111, 32, 118, 105, 110, 97, 118, 121, 111, 112, 97, 116, 105, 107, 97, 110, 97, 32, 104, 97, 100, 105, 32, 115, 97, 115, 97, 32, 122, 105, 110, 97, 111, 110, 121, 101, 115, 104, 97, 32, 117, 119, 101, 122, 101, 107, 97, 110, 111, 32, 109, 107, 117, 98, 119, 97, 32, 121, 97, 32, 107, 119, 97, 109, 98, 97, 32, 115, 97, 121, 97, 114, 105, 32, 104, 105, 105, 32, 110, 105, 32, 121, 97, 32, 109, 119, 97, 109, 98, 97, 32, 40, 107, 97, 109, 97, 32, 100, 117, 110, 105, 97, 32, 121, 101, 116, 117, 44, 32, 77, 105, 114, 105, 104, 105, 32, 97, 117, 32, 90, 117, 104, 117, 114, 97, 41, 32, 110, 97, 32, 105, 110, 97, 119, 101, 122, 97, 32, 107, 117, 119, 97, 32, 110, 97, 32, 97, 110, 103, 97, 104, 101, 119, 97, 44, 32, 116, 101, 110, 97, 32, 107, 97, 116, 105, 107, 97, 32, 117, 112, 101, 111, 32, 119, 97, 32, 106, 111, 116, 111, 32, 117, 110, 97, 111, 114, 117, 104, 117, 115, 117, 32, 107, 117, 119, 101, 112, 111, 32, 107, 119, 97, 32, 117, 104, 97, 105, 46, 32, 91, 49, 93, 57345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] | 1 |
Ruby | Ruby | move comparepdf to boneyard | 8a2464bf25592fea926f7bf77611dd4d5c8759f9 | <ide><path>Library/Homebrew/tap_migrations.rb
<ide> 'jscoverage' => 'homebrew/boneyard',
<ide> 'jsl' => 'homebrew/binary',
<ide> 'nlopt' => 'homebrew/science',
<add> 'comparepdf' => 'homebrew/boneyard',
<ide> } | 1 |
PHP | PHP | support union types on event discovery | 8c65b3d8edf245ea0dbb13ed203eb23209b2b8fe | <ide><path>src/Illuminate/Foundation/Events/DiscoverEvents.php
<ide> class DiscoverEvents
<ide> */
<ide> public static function within($listenerPath, $basePath)
<ide> {
<del> return collect(static::getListenerEvents(
<add> $listeners = collect(static::getListenerEvents(
<ide> (new Finder)->files()->in($listenerPath), $basePath
<del> ))->mapToDictionary(function ($event, $listener) {
<del> return [$event => $listener];
<del> })->all();
<add> ));
<add>
<add> $discoveredEvents = [];
<add>
<add> foreach ($listeners as $listener => $events) {
<add> foreach ($events as $event) {
<add> if (! isset($discoveredEvents[$event])) {
<add> $discoveredEvents[$event] = [];
<add> }
<add>
<add> $discoveredEvents[$event][] = $listener;
<add> }
<add> }
<add>
<add> return $discoveredEvents;
<ide> }
<ide>
<ide> /**
<ide> protected static function getListenerEvents($listeners, $basePath)
<ide> }
<ide>
<ide> $listenerEvents[$listener->name.'@'.$method->name] =
<del> Reflector::getParameterClassName($method->getParameters()[0]);
<add> Reflector::getParameterClassNames($method->getParameters()[0]);
<ide> }
<ide> }
<ide>
<ide><path>src/Illuminate/Support/Reflector.php
<ide> use ReflectionClass;
<ide> use ReflectionMethod;
<ide> use ReflectionNamedType;
<add>use ReflectionUnionType;
<ide>
<ide> class Reflector
<ide> {
<ide> public static function getParameterClassName($parameter)
<ide> return;
<ide> }
<ide>
<add> return static::getTypeName($parameter, $type);
<add> }
<add>
<add> /**
<add> * Get the class names of the given parameter's type, including union types.
<add> *
<add> * @param \ReflectionParameter $parameter
<add> * @return array
<add> */
<add> public static function getParameterClassNames($parameter)
<add> {
<add> $type = $parameter->getType();
<add>
<add> if (! $type instanceof ReflectionUnionType) {
<add> return [static::getParameterClassName($parameter)];
<add> }
<add>
<add> $unionTypes = [];
<add>
<add> foreach ($type->getTypes() as $listedType) {
<add> if (! $listedType instanceof ReflectionNamedType || $listedType->isBuiltin()) {
<add> continue;
<add> }
<add>
<add> $unionTypes[] = static::getTypeName($parameter, $listedType);
<add> }
<add>
<add> return $unionTypes;
<add> }
<add>
<add> /**
<add> * Get the given type's class name.
<add> *
<add> * @param \ReflectionParameter $parameter
<add> * @param \ReflectionNamedType $type
<add> * @return string
<add> */
<add> protected static function getTypeName($parameter, $type)
<add> {
<ide> $name = $type->getName();
<ide>
<ide> if (! is_null($class = $parameter->getDeclaringClass())) {
<ide><path>tests/Integration/Foundation/DiscoverEventsTest.php
<ide> use Illuminate\Tests\Integration\Foundation\Fixtures\EventDiscovery\Listeners\AbstractListener;
<ide> use Illuminate\Tests\Integration\Foundation\Fixtures\EventDiscovery\Listeners\Listener;
<ide> use Illuminate\Tests\Integration\Foundation\Fixtures\EventDiscovery\Listeners\ListenerInterface;
<add>use Illuminate\Tests\Integration\Foundation\Fixtures\EventDiscovery\UnionListeners\UnionListener;
<ide> use Orchestra\Testbench\TestCase;
<ide>
<ide> class DiscoverEventsTest extends TestCase
<ide> class_alias(ListenerInterface::class, 'Tests\Integration\Foundation\Fixtures\Eve
<ide> ],
<ide> ], $events);
<ide> }
<add>
<add> public function testUnionEventsCanBeDiscovered()
<add> {
<add> if (version_compare(phpversion(), '8.0.0', '<')) {
<add> $this->markTestSkipped('Test uses union types.');
<add> }
<add>
<add> class_alias(UnionListener::class, 'Tests\Integration\Foundation\Fixtures\EventDiscovery\UnionListeners\UnionListener');
<add>
<add> $events = DiscoverEvents::within(__DIR__.'/Fixtures/EventDiscovery/UnionListeners', getcwd());
<add>
<add> $this->assertEquals([
<add> EventOne::class => [
<add> UnionListener::class.'@handle',
<add> ],
<add> EventTwo::class => [
<add> UnionListener::class.'@handle',
<add> ],
<add> ], $events);
<add> }
<ide> }
<ide><path>tests/Integration/Foundation/Fixtures/EventDiscovery/UnionListeners/UnionListener.php
<add><?php
<add>
<add>namespace Illuminate\Tests\Integration\Foundation\Fixtures\EventDiscovery\UnionListeners;
<add>
<add>use Illuminate\Tests\Integration\Foundation\Fixtures\EventDiscovery\Events\EventOne;
<add>use Illuminate\Tests\Integration\Foundation\Fixtures\EventDiscovery\Events\EventTwo;
<add>
<add>class UnionListener
<add>{
<add> public function handle(EventOne|EventTwo $event)
<add> {
<add> //
<add> }
<add>} | 4 |
Text | Text | fix broken link in react-18 streaming docs. | 281ef22ebabeb0a77155e68269926fe15207d27a | <ide><path>docs/advanced-features/react-18/streaming.md
<ide> # Streaming SSR (Alpha)
<ide>
<ide> React 18 will include architectural improvements to React server-side rendering (SSR) performance. This means you can use `Suspense` in your React components in streaming SSR mode and React will render them on the server and send them through HTTP streams.
<del>It's worth noting that another experimental feature, React Server Components, is based on streaming. You can read more about server components related streaming APIs in [`next/streaming`](docs/api-reference/next/streaming.md). However, this guide focuses on basic React 18 streaming.
<add>It's worth noting that another experimental feature, React Server Components, is based on streaming. You can read more about server components related streaming APIs in [`next/streaming`](/docs/api-reference/next/streaming.md). However, this guide focuses on basic React 18 streaming.
<ide>
<ide> ## Enable Streaming SSR
<ide> | 1 |
PHP | PHP | fix docblock errors | 9b60b7d7e3a4a3d75a527b074274afdbd1837806 | <ide><path>src/Database/Expression/UnaryExpression.php
<ide> class UnaryExpression implements ExpressionInterface {
<ide> *
<ide> * @param string $operator The operator to used for the expression
<ide> * @param mixed $value the value to use as the operand for the expression
<del> * @param int $mode either UnaryExpression::PREFIX or UnaryExpression::POSTFIX
<add> * @param int $mode either UnaryExpression::PREFIX or UnaryExpression::POSTFIX
<ide> */
<ide> public function __construct($operator, $value, $mode = self::PREFIX) {
<ide> $this->_operator = $operator;
<ide><path>tests/TestCase/Database/QueryTest.php
<ide> public function testIsNullWithExpressions() {
<ide> ->where(function($exp) use ($subquery) {
<ide> return $exp->isNotNull($subquery);
<ide> })
<del> ->execute();
<add> ->execute();
<ide> $this->assertNotEmpty($result->fetchAll('assoc'));
<ide>
<ide> $result = (new Query($this->connection))
<ide> public function testIsNullWithExpressions() {
<ide> }
<ide>
<ide> /**
<del> * Tests that strings passed to isNull and isNotNull will be treaded as identifiers
<add> * Tests that strings passed to isNull and isNotNull will be treated as identifiers
<ide> * when using autoQuoting
<ide> *
<ide> * @return void | 2 |
Ruby | Ruby | remove skip on tests that have been fixed | 682d624a85e2c604ba29eb7ac91ab32e8b7864be | <ide><path>actionpack/test/controller/parameters/mutators_test.rb
<ide> class ParametersMutatorsTest < ActiveSupport::TestCase
<ide> end
<ide>
<ide> test "select! retains permitted status" do
<del> jruby_skip "https://github.com/jruby/jruby/issues/3137"
<del>
<ide> @params.permit!
<ide> assert @params.select! { |k| k != "person" }.permitted?
<ide> end
<ide>
<ide> test "select! retains unpermitted status" do
<del> jruby_skip "https://github.com/jruby/jruby/issues/3137"
<del>
<ide> assert_not @params.select! { |k| k != "person" }.permitted?
<ide> end
<ide> | 1 |
Javascript | Javascript | increase readline coverage | b2ab41e5ae6213b17de8031771585030aea046e2 | <ide><path>test/parallel/test-readline.js
<add>'use strict';
<add>const common = require('../common');
<add>const { PassThrough } = require('stream');
<add>const readline = require('readline');
<add>const assert = require('assert');
<add>
<add>{
<add> const input = new PassThrough();
<add> const rl = readline.createInterface({
<add> terminal: true,
<add> input: input
<add> });
<add>
<add> rl.on('line', common.mustCall((data) => {
<add> assert.strictEqual(data, 'abc');
<add> }));
<add>
<add> input.end('abc');
<add>}
<add>
<add>{
<add> const input = new PassThrough();
<add> const rl = readline.createInterface({
<add> terminal: true,
<add> input: input
<add> });
<add>
<add> rl.on('line', common.mustNotCall('must not be called before newline'));
<add>
<add> input.write('abc');
<add>}
<add>
<add>{
<add> const input = new PassThrough();
<add> const rl = readline.createInterface({
<add> terminal: true,
<add> input: input
<add> });
<add>
<add> rl.on('line', common.mustCall((data) => {
<add> assert.strictEqual(data, 'abc');
<add> }));
<add>
<add> input.write('abc\n');
<add>} | 1 |
PHP | PHP | extract duplicate logic out of serverrequest | 7b9c2b5527cd0636431f97b3fda6413a3faaed14 | <ide><path>src/Http/ContentTypeNegotiation.php
<ide> public function prefersChoice(RequestInterface $request, array $types): ?string
<ide> foreach ($parsed as $acceptTypes) {
<ide> $common = array_intersect($acceptTypes, $types);
<ide> if ($common) {
<del> return $common[0];
<add> return array_shift($common);
<ide> }
<ide> }
<ide>
<ide><path>src/Http/ServerRequest.php
<ide> public function subdomains(int $tldLength = 1): array
<ide> */
<ide> public function accepts(?string $type = null)
<ide> {
<del> $raw = $this->parseAccept();
<add> $content = new ContentTypeNegotiation();
<add> if ($type) {
<add> return $content->prefersChoice($this, [$type]) !== null;
<add> }
<add>
<ide> $accept = [];
<del> foreach ($raw as $types) {
<add> foreach ($content->parseAccept($this) as $types) {
<ide> $accept = array_merge($accept, $types);
<ide> }
<del> if ($type === null) {
<del> return $accept;
<del> }
<ide>
<del> return in_array($type, $accept, true);
<add> return $accept;
<ide> }
<ide>
<ide> /**
<ide> public function accepts(?string $type = null)
<ide> * of the accepted content types.
<ide> *
<ide> * @return array An array of `prefValue => [content/types]`
<add> * @deprecated 4.4.0 Use accepts() or ContentTypeNegotiation instead.
<ide> */
<ide> public function parseAccept(): array
<ide> {
<del> return $this->_parseAcceptWithQualifier($this->getHeaderLine('Accept'));
<add> return (new ContentTypeNegotiation())->parseAccept($this);
<ide> }
<ide>
<ide> /**
<ide> public function parseAccept(): array
<ide> */
<ide> public function acceptLanguage(?string $language = null)
<ide> {
<del> $raw = $this->_parseAcceptWithQualifier($this->getHeaderLine('Accept-Language'));
<add> $raw = (new ContentTypeNegotiation())->parseAccept($this, 'Accept-Language');
<ide> $accept = [];
<ide> foreach ($raw as $languages) {
<ide> foreach ($languages as &$lang) {
<ide> public function acceptLanguage(?string $language = null)
<ide> return in_array(strtolower($language), $accept, true);
<ide> }
<ide>
<del> /**
<del> * Parse Accept* headers with qualifier options.
<del> *
<del> * Only qualifiers will be extracted, any other accept extensions will be
<del> * discarded as they are not frequently used.
<del> *
<del> * @param string $header Header to parse.
<del> * @return array
<del> */
<del> protected function _parseAcceptWithQualifier(string $header): array
<del> {
<del> $accept = [];
<del> $headers = explode(',', $header);
<del> foreach (array_filter($headers) as $value) {
<del> $prefValue = '1.0';
<del> $value = trim($value);
<del>
<del> $semiPos = strpos($value, ';');
<del> if ($semiPos !== false) {
<del> $params = explode(';', $value);
<del> $value = trim($params[0]);
<del> foreach ($params as $param) {
<del> $qPos = strpos($param, 'q=');
<del> if ($qPos !== false) {
<del> $prefValue = substr($param, $qPos + 2);
<del> }
<del> }
<del> }
<del>
<del> if (!isset($accept[$prefValue])) {
<del> $accept[$prefValue] = [];
<del> }
<del> if ($prefValue) {
<del> $accept[$prefValue][] = $value;
<del> }
<del> }
<del> krsort($accept);
<del>
<del> return $accept;
<del> }
<del>
<ide> /**
<ide> * Read a specific query value or dotted path.
<ide> * | 2 |
Javascript | Javascript | simplify duration arguments to benchmarks | 4b80f217cd91a5b29089d94398059b85b1ef8a93 | <ide><path>benchmark/fs/readfile.js
<ide> var filename = path.resolve(__dirname, '.removeme-benchmark-garbage');
<ide> var fs = require('fs');
<ide>
<ide> var bench = common.createBenchmark(main, {
<del> dur: [1, 3],
<add> dur: [5],
<ide> len: [1024, 16 * 1024 * 1024],
<ide> concurrent: [1, 10]
<ide> });
<ide><path>benchmark/fs/write-stream-throughput.js
<ide> var filename = path.resolve(__dirname, '.removeme-benchmark-garbage');
<ide> var fs = require('fs');
<ide>
<ide> var bench = common.createBenchmark(main, {
<del> dur: [1, 3],
<add> dur: [5],
<ide> type: ['buf', 'asc', 'utf'],
<ide> size: [2, 1024, 65535, 1024 * 1024]
<ide> });
<ide><path>benchmark/misc/startup.js
<ide> var i = 0;
<ide> var start;
<ide>
<ide> var bench = common.createBenchmark(startNode, {
<del> dur: [1, 3]
<add> dur: [1]
<ide> });
<ide>
<ide> function startNode(conf) {
<ide><path>benchmark/net/net-c2s.js
<ide> var PORT = common.PORT;
<ide> var bench = common.createBenchmark(main, {
<ide> len: [102400, 1024 * 1024 * 16],
<ide> type: ['utf', 'asc', 'buf'],
<del> dur: [1, 3],
<add> dur: [5],
<ide> });
<ide>
<ide> var dur;
<ide><path>benchmark/net/net-pipe.js
<ide> var PORT = common.PORT;
<ide> var bench = common.createBenchmark(main, {
<ide> len: [102400, 1024 * 1024 * 16],
<ide> type: ['utf', 'asc', 'buf'],
<del> dur: [1, 3],
<add> dur: [5],
<ide> });
<ide>
<ide> var dur;
<ide><path>benchmark/net/net-s2c.js
<ide> var PORT = common.PORT;
<ide> var bench = common.createBenchmark(main, {
<ide> len: [102400, 1024 * 1024 * 16],
<ide> type: ['utf', 'asc', 'buf'],
<del> dur: [1, 3],
<add> dur: [5]
<ide> });
<ide>
<ide> var dur;
<ide><path>benchmark/net/tcp-raw-c2s.js
<ide> var common = require('../common.js');
<ide> var bench = common.createBenchmark(main, {
<ide> len: [102400, 1024 * 1024 * 16],
<ide> type: ['utf', 'asc', 'buf'],
<del> dur: [1, 3],
<add> dur: [5]
<ide> });
<ide>
<ide> var TCP = process.binding('tcp_wrap').TCP;
<ide><path>benchmark/net/tcp-raw-pipe.js
<ide> var common = require('../common.js');
<ide> var bench = common.createBenchmark(main, {
<ide> len: [102400, 1024 * 1024 * 16],
<ide> type: ['utf', 'asc', 'buf'],
<del> dur: [1, 3],
<add> dur: [5]
<ide> });
<ide>
<ide> var TCP = process.binding('tcp_wrap').TCP;
<ide><path>benchmark/net/tcp-raw-s2c.js
<ide> var common = require('../common.js');
<ide> var bench = common.createBenchmark(main, {
<ide> len: [102400, 1024 * 1024 * 16],
<ide> type: ['utf', 'asc', 'buf'],
<del> dur: [1, 3],
<add> dur: [5]
<ide> });
<ide>
<ide> var TCP = process.binding('tcp_wrap').TCP;
<ide><path>benchmark/tls/throughput.js
<ide> var common = require('../common.js');
<ide> var bench = common.createBenchmark(main, {
<del> dur: [1, 3],
<add> dur: [5],
<ide> type: ['buf', 'asc', 'utf'],
<ide> size: [2, 1024, 1024 * 1024]
<ide> });
<ide><path>benchmark/tls/tls-connect.js
<ide> var assert = require('assert'),
<ide> var common = require('../common.js');
<ide> var bench = common.createBenchmark(main, {
<ide> concurrency: [1, 10],
<del> dur: [1, 3]
<add> dur: [5]
<ide> });
<ide>
<ide> var clientConn = 0; | 11 |
PHP | PHP | remove superfluous docblock notes | ca864c883c69459671ca85cdf8837ab3fcc84495 | <ide><path>src/Console/ConsoleInput.php
<ide> <?php
<ide> /**
<del> * ConsoleInput file.
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>src/View/Helper/RssHelper.php
<ide> <?php
<ide> /**
<del> * RSS Helper class file.
<del> *
<del> * Simplifies the output of RSS feeds.
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/TestCase/Configure/Engine/IniConfigTest.php
<ide> <?php
<ide> /**
<del> * IniConfigTest
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/TestCase/Configure/Engine/PhpConfigTest.php
<ide> <?php
<ide> /**
<del> * PhpConfigReaderTest
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/TestCase/Controller/Component/Acl/IniAclTest.php
<ide> <?php
<ide> /**
<del> * IniAclTest file.
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/TestCase/Controller/Component/Acl/PhpAclTest.php
<ide> <?php
<ide> /**
<del> * PhpAclTest file.
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/TestCase/Routing/Route/PluginShortRouteTest.php
<ide> <?php
<ide> /**
<del> * Request Test case file.
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/TestCase/Routing/Route/RedirectRouteTest.php
<ide> <?php
<ide> /**
<del> * Request Test case file.
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Config/load.php
<ide> <?php
<ide> /**
<del> * Test Suite TestPlugin config file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Config/more.load.php
<ide> <?php
<ide> /**
<del> * Test Suite TestPlugin config file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Console/Command/ExampleShell.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Controller/Component/PluginsComponent.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Controller/Component/TestPluginComponent.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Controller/Component/TestPluginOtherComponent.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Controller/TestsController.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Vendor/sample/sample_plugin.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/Vendor/welcome.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPlugin/View/Helper/PluggedHelperHelper.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPluginTwo/Console/Command/ExampleShell.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/Plugin/TestPluginTwo/Console/Command/WelcomeShell.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/vendor/Test/MyTest.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/vendor/Test/hello.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/vendor/sample/configure_test_vendor_sample.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/vendor/somename/some.name.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide><path>tests/test_app/vendor/welcome.php
<ide> <?php
<ide> /**
<del> * Short description for file.
<del> *
<ide> * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
<ide> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> * | 25 |
PHP | PHP | trim comment in config class | 9c6d4871b2718e7ad5e749ac50feaaa3a16dadd4 | <ide><path>laravel/config.php
<ide> class Config {
<ide> * // Determine if the "session" configuration file exists
<ide> * $exists = Config::has('session');
<ide> *
<del> * // Determine if the "timezone" option exists in the "application" configuration array
<add> * // Determine if the "timezone" option exists in the "application" configuration
<ide> * $exists = Config::has('application.timezone');
<ide> * </code>
<ide> * | 1 |
Text | Text | fix typo s/prefered/preferred/ [ci skip] | b96990da114daea41f171c565eb0cc617594d043 | <ide><path>guides/source/api_app.md
<ide> when added (including the session key), so you can't just add a `session_store.r
<ide> may work, but your session options will be ignored - i.e the session key will default to `_session_id`)
<ide>
<ide> Instead of the initializer, you'll have to set the relevant options somewhere before your middleware is
<del>built (like `config/application.rb`) and pass them to your prefered middleware, like this:
<add>built (like `config/application.rb`) and pass them to your preferred middleware, like this:
<ide>
<ide> ```ruby
<ide> config.session_store :cookie_store, key: '_interslice_session' # <-- this also configures session_options for use below | 1 |
Python | Python | make output from users cli command more consistent | f38ebaf438e8176c4de814090fa5b773735bc9c0 | <ide><path>airflow/cli/commands/user_command.py
<ide> def users_create(args):
<ide> return
<ide> user = appbuilder.sm.add_user(args.username, args.firstname, args.lastname, args.email, role, password)
<ide> if user:
<del> print(f'{args.role} user {args.username} created')
<add> print(f'User "{args.username}" created with role "{args.role}"')
<ide> else:
<ide> raise SystemExit('Failed to create user')
<ide>
<ide> def users_delete(args):
<ide> appbuilder = cached_app().appbuilder
<ide>
<ide> if appbuilder.sm.del_register_user(user):
<del> print(f'User {args.username} deleted')
<add> print(f'User "{user.username}" deleted')
<ide> else:
<ide> raise SystemExit('Failed to delete user')
<ide>
<ide> def users_manage_role(args, remove=False):
<ide> role = appbuilder.sm.find_role(args.role)
<ide> if not role:
<ide> valid_roles = appbuilder.sm.get_all_roles()
<del> raise SystemExit(f'{args.role} is not a valid role. Valid roles are: {valid_roles}')
<add> raise SystemExit(f'"{args.role}" is not a valid role. Valid roles are: {valid_roles}')
<ide>
<ide> if remove:
<del> if role in user.roles:
<del> user.roles = [r for r in user.roles if r != role]
<del> appbuilder.sm.update_user(user)
<del> print(f'User "{user}" removed from role "{args.role}"')
<del> else:
<del> raise SystemExit(f'User "{user}" is not a member of role "{args.role}"')
<add> if role not in user.roles:
<add> raise SystemExit(f'User "{user.username}" is not a member of role "{args.role}"')
<add>
<add> user.roles = [r for r in user.roles if r != role]
<add> appbuilder.sm.update_user(user)
<add> print(f'User "{user.username}" removed from role "{args.role}"')
<ide> else:
<ide> if role in user.roles:
<del> raise SystemExit(f'User "{user}" is already a member of role "{args.role}"')
<del> else:
<del> user.roles.append(role)
<del> appbuilder.sm.update_user(user)
<del> print(f'User "{user}" added to role "{args.role}"')
<add> raise SystemExit(f'User "{user.username}" is already a member of role "{args.role}"')
<add>
<add> user.roles.append(role)
<add> appbuilder.sm.update_user(user)
<add> print(f'User "{user.username}" added to role "{args.role}"')
<ide>
<ide>
<ide> def users_export(args):
<ide> def _import_users(users_list):
<ide> role = appbuilder.sm.find_role(rolename)
<ide> if not role:
<ide> valid_roles = appbuilder.sm.get_all_roles()
<del> raise SystemExit(f"Error: '{rolename}' is not a valid role. Valid roles are: {valid_roles}")
<del> else:
<del> roles.append(role)
<add> raise SystemExit(f'Error: "{rolename}" is not a valid role. Valid roles are: {valid_roles}')
<add>
<add> roles.append(role)
<ide>
<ide> required_fields = ['username', 'firstname', 'lastname', 'email', 'roles']
<ide> for field in required_fields:
<ide> def _import_users(users_list):
<ide> existing_user = appbuilder.sm.find_user(email=user['email'])
<ide> if existing_user:
<ide> print(f"Found existing user with email '{user['email']}'")
<del> existing_user.roles = roles
<del> existing_user.first_name = user['firstname']
<del> existing_user.last_name = user['lastname']
<del>
<ide> if existing_user.username != user['username']:
<ide> raise SystemExit(
<ide> "Error: Changing the username is not allowed - "
<ide> "please delete and recreate the user with "
<ide> "email '{}'".format(user['email'])
<ide> )
<ide>
<add> existing_user.roles = roles
<add> existing_user.first_name = user['firstname']
<add> existing_user.last_name = user['lastname']
<ide> appbuilder.sm.update_user(existing_user)
<ide> users_updated.append(user['email'])
<ide> else:
<ide><path>tests/cli/commands/test_user_command.py
<ide> def test_cli_delete_user(self):
<ide> 'test3',
<ide> ]
<ide> )
<del> user_command.users_delete(args)
<add> with redirect_stdout(io.StringIO()) as stdout:
<add> user_command.users_delete(args)
<add> assert 'User "test3" deleted' in stdout.getvalue()
<ide>
<ide> def test_cli_delete_user_by_email(self):
<ide> args = self.parser.parse_args(
<ide> def test_cli_delete_user_by_email(self):
<ide> '[email protected]',
<ide> ]
<ide> )
<del> user_command.users_delete(args)
<add> with redirect_stdout(io.StringIO()) as stdout:
<add> user_command.users_delete(args)
<add> assert 'User "test4" deleted' in stdout.getvalue()
<ide>
<ide> @pytest.mark.parametrize(
<ide> 'args,raise_match',
<ide> def test_cli_delete_user_by_email(self):
<ide> ),
<ide> ],
<ide> )
<del> def test_find_user(self, args, raise_match):
<add> def test_find_user_exceptions(self, args, raise_match):
<ide> args = self.parser.parse_args(args)
<ide> with pytest.raises(
<ide> SystemExit,
<ide> def _export_users_to_file(self):
<ide> user_command.users_export(args)
<ide> return f.name
<ide>
<del> def test_cli_add_user_role(self):
<add> @pytest.fixture()
<add> def create_user_test4(self):
<ide> args = self.parser.parse_args(
<ide> [
<ide> 'users',
<ide> def test_cli_add_user_role(self):
<ide> )
<ide> user_command.users_create(args)
<ide>
<add> def test_cli_add_user_role(self, create_user_test4):
<ide> assert not _does_user_belong_to_role(
<ide> appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'
<ide> ), "User should not yet be a member of role 'Op'"
<ide>
<ide> args = self.parser.parse_args(['users', 'add-role', '--username', 'test4', '--role', 'Op'])
<del> user_command.users_manage_role(args, remove=False)
<add> with redirect_stdout(io.StringIO()) as stdout:
<add> user_command.users_manage_role(args, remove=False)
<add> assert 'User "test4" added to role "Op"' in stdout.getvalue()
<ide>
<ide> assert _does_user_belong_to_role(
<ide> appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'
<ide> ), "User should have been added to role 'Op'"
<ide>
<del> def test_cli_remove_user_role(self):
<del> args = self.parser.parse_args(
<del> [
<del> 'users',
<del> 'create',
<del> '--username',
<del> 'test4',
<del> '--lastname',
<del> 'doe',
<del> '--firstname',
<del> 'jon',
<del> '--email',
<del> TEST_USER1_EMAIL,
<del> '--role',
<del> 'Viewer',
<del> '--use-random-password',
<del> ]
<del> )
<del> user_command.users_create(args)
<del>
<add> def test_cli_remove_user_role(self, create_user_test4):
<ide> assert _does_user_belong_to_role(
<ide> appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'
<ide> ), "User should have been created with role 'Viewer'"
<ide>
<ide> args = self.parser.parse_args(['users', 'remove-role', '--username', 'test4', '--role', 'Viewer'])
<del> user_command.users_manage_role(args, remove=True)
<add> with redirect_stdout(io.StringIO()) as stdout:
<add> user_command.users_manage_role(args, remove=True)
<add> assert 'User "test4" removed from role "Viewer"' in stdout.getvalue()
<ide>
<ide> assert not _does_user_belong_to_role(
<ide> appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'
<ide> ), "User should have been removed from role 'Viewer'"
<add>
<add> @pytest.mark.parametrize(
<add> "action, role, message",
<add> [
<add> ["add-role", "Viewer", 'User "test4" is already a member of role "Viewer"'],
<add> ["add-role", "Foo", '"Foo" is not a valid role. Valid roles are'],
<add> ["remove-role", "Admin", 'User "test4" is not a member of role "Admin"'],
<add> ["remove-role", "Foo", '"Foo" is not a valid role. Valid roles are'],
<add> ],
<add> )
<add> def test_cli_manage_roles_exceptions(self, create_user_test4, action, role, message):
<add> args = self.parser.parse_args(['users', action, '--username', 'test4', '--role', role])
<add> with pytest.raises(SystemExit, match=message):
<add> if action == 'add-role':
<add> user_command.add_role(args)
<add> else:
<add> user_command.remove_role(args) | 2 |
Ruby | Ruby | add collectionproxy#include? documentation | 5111ec446bedbc8d0ff6ef11de1000047e0edff5 | <ide><path>activerecord/lib/active_record/associations/collection_proxy.rb
<ide> class CollectionProxy < Relation
<ide> # pet.group == 'cats'
<ide> # end
<ide> # # => true
<add>
<add> ##
<add> # :method: include?
<add> # Returns true if the given object is present in the collection.
<add> #
<add> # class Person < ActiveRecord::Base
<add> # has_many :pets
<add> # end
<add> #
<add> # person.pets # => [#<Pet id: 20, name: "Snoop">]
<add> #
<add> # person.pets.include?(Pet.find(20)) # => true
<add> # person.pets.include?(Pet.find(21)) # => false
<ide> delegate :select, :find, :first, :last,
<ide> :build, :create, :create!,
<ide> :concat, :replace, :delete_all, :destroy_all, :delete, :destroy, :uniq, | 1 |
Javascript | Javascript | fix the second half of the bug in suspendlisteners | d034d11591eb68be88021004e5f3de5c4cc0f4ba | <ide><path>packages/ember-metal/lib/events.js
<ide> function suspendListeners(obj, eventNames, target, method, callback) {
<ide> }
<ide>
<ide> var suspendedActions = [],
<add> actionsList = [],
<ide> eventName, actions, i, l;
<ide>
<ide> for (i=0, l=eventNames.length; i<l; i++) {
<ide> function suspendListeners(obj, eventNames, target, method, callback) {
<ide> if (actionIndex !== -1) {
<ide> actions[actionIndex+2] |= SUSPENDED;
<ide> suspendedActions.push(actionIndex);
<add> actionsList.push(actions);
<ide> }
<ide> }
<ide>
<ide> function suspendListeners(obj, eventNames, target, method, callback) {
<ide> function finalizer() {
<ide> for (var i = 0, l = suspendedActions.length; i < l; i++) {
<ide> var actionIndex = suspendedActions[i];
<del> actions[actionIndex+2] &= ~SUSPENDED;
<add> actionsList[i][actionIndex+2] &= ~SUSPENDED;
<ide> }
<ide> }
<ide> | 1 |
Ruby | Ruby | avoid should in test names | 0435d65000712bbde2734ddf6e24dcc39c8dda59 | <ide><path>actionmailer/test/parameterized_test.rb
<ide> class ParameterizedTest < ActiveSupport::TestCase
<ide> assert_equal("So says [email protected]", @mail.body.encoded)
<ide> end
<ide>
<del> test "should enqueue the email with params" do
<add> test "enqueue the email with params" do
<ide> assert_performed_with(job: ActionMailer::Parameterized::DeliveryJob, args: ["ParamsMailer", "invitation", "deliver_now", { inviter: "[email protected]", invitee: "[email protected]" } ]) do
<ide> @mail.deliver_later
<ide> end | 1 |
PHP | PHP | fix coding standards | b1aa75bec07094407eb2569ea80d71142ead220b | <ide><path>lib/Cake/Test/Case/View/MediaViewTest.php
<ide> public function testRenderUpperExtension() {
<ide> $this->MediaView->render();
<ide> }
<ide>
<del>
<ide> } | 1 |
Python | Python | allow overriding meta from spacy.blank | 7dfc4bc062e75dcf6dcc6c1f3d01a8ea1b5e014c | <ide><path>spacy/__init__.py
<ide> def load(
<ide>
<ide>
<ide> def blank(
<del> name: str, *, config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict()
<add> name: str,
<add> *,
<add> config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
<add> meta: Dict[str, Any] = util.SimpleFrozenDict()
<ide> ) -> Language:
<ide> """Create a blank nlp object for a given language code.
<ide>
<ide> name (str): The language code, e.g. "en".
<ide> config (Dict[str, Any] / Config): Optional config overrides.
<add> meta (Dict[str, Any]): Overrides for nlp.meta.
<ide> RETURNS (Language): The nlp object.
<ide> """
<ide> LangClass = util.get_lang_class(name)
<del> return LangClass.from_config(config)
<add> return LangClass.from_config(config, meta=meta)
<ide><path>spacy/language.py
<ide> def from_config(
<ide> vocab: Union[Vocab, bool] = True,
<ide> disable: Iterable[str] = SimpleFrozenList(),
<ide> exclude: Iterable[str] = SimpleFrozenList(),
<add> meta: Dict[str, Any] = SimpleFrozenDict(),
<ide> auto_fill: bool = True,
<ide> validate: bool = True,
<ide> ) -> "Language":
<ide> def from_config(
<ide> explicitly enable them by calling nlp.enable_pipe.
<ide> exclude (Iterable[str]): Names of pipeline components to exclude.
<ide> Excluded components won't be loaded.
<add> meta (Dict[str, Any]): Meta overrides for nlp.meta.
<ide> auto_fill (bool): Automatically fill in missing values in config based
<ide> on defaults and function argument annotations.
<ide> validate (bool): Validate the component config and arguments against
<ide> def from_config(
<ide> # inside stuff like the spacy train function. If we loaded them here,
<ide> # then we would load them twice at runtime: once when we make from config,
<ide> # and then again when we load from disk.
<del> nlp = lang_cls(vocab=vocab, create_tokenizer=create_tokenizer)
<add> nlp = lang_cls(vocab=vocab, create_tokenizer=create_tokenizer, meta=meta)
<ide> if after_creation is not None:
<ide> nlp = after_creation(nlp)
<ide> if not isinstance(nlp, cls):
<ide><path>spacy/tests/test_language.py
<ide> from spacy.training import Example
<ide> from spacy.lang.en import English
<ide> from spacy.util import registry
<add>import spacy
<ide>
<ide> from .util import add_vecs_to_vocab, assert_docs_equal
<ide>
<ide> def create_tokenizer(nlp):
<ide> assert [t.text for t in doc] == ["_hello", "_world"]
<ide> doc = list(nlp.pipe(["hello world"]))[0]
<ide> assert [t.text for t in doc] == ["_hello", "_world"]
<add>
<add>
<add>def test_spacy_blank():
<add> nlp = spacy.blank("en")
<add> assert nlp.config["training"]["dropout"] == 0.1
<add> config = {"training": {"dropout": 0.2}}
<add> meta = {"name": "my_custom_model"}
<add> nlp = spacy.blank("en", config=config, meta=meta)
<add> assert nlp.config["training"]["dropout"] == 0.2
<add> assert nlp.meta["name"] == "my_custom_model" | 3 |
Javascript | Javascript | move wkwebview into webview.ios.js | 95801f1eda2d723d9b87760d88fa9f1a1bb33ab1 | <ide><path>Libraries/Components/WKWebView/WKWebView.android.js
<del>/**
<del> * Copyright (c) 2015-present, Facebook, Inc.
<del> *
<del> * This source code is licensed under the MIT license found in the
<del> * LICENSE file in the root directory of this source tree.
<del> *
<del> * @format
<del> * @flow
<del> * @providesModule WKWebView
<del> */
<del>
<del>const React = require('React');
<del>const View = require('View');
<del>const Text = require('Text');
<del>
<del>module.exports = () => {
<del> return (
<del> <View>
<del> <Text>Android version not implemented.</Text>
<del> </View>
<del> );
<del>};
<ide><path>Libraries/Components/WebView/WebView.android.js
<ide> class WebView extends React.Component {
<ide> PropTypes.number,
<ide> ]),
<ide>
<add> /**
<add> * If true, use WKWebView instead of UIWebView.
<add> * @platform ios
<add> */
<add> useWebKit: PropTypes.bool,
<add>
<ide> /**
<ide> * Used on Android only, JS is enabled by default for WebView on iOS
<ide> * @platform android
<ide><path>Libraries/Components/WebView/WebView.ios.js
<ide> const requireNativeComponent = require('requireNativeComponent');
<ide> const resolveAssetSource = require('resolveAssetSource');
<ide>
<ide> const RCTWebViewManager = require('NativeModules').WebViewManager;
<add>const RCTWKWebViewManager = require('NativeModules').WKWebViewManager;
<ide>
<ide> const BGWASH = 'rgba(255,255,255,0.8)';
<ide> const RCT_WEBVIEW_REF = 'webview';
<ide> const DataDetectorTypes = [
<ide> 'link',
<ide> 'address',
<ide> 'calendarEvent',
<add> 'trackingNumber',
<add> 'flightNumber',
<add> 'lookupSuggestion',
<ide> 'none',
<ide> 'all',
<ide> ];
<ide> class WebView extends React.Component {
<ide> PropTypes.number,
<ide> ]),
<ide>
<add> /**
<add> * If true, use WKWebView instead of UIWebView.
<add> * @platform ios
<add> */
<add> useWebKit: PropTypes.bool,
<add>
<ide> /**
<ide> * Function that returns a view to show if there's an error.
<ide> */
<ide> class WebView extends React.Component {
<ide> * - `'none'`
<ide> * - `'all'`
<ide> *
<add> * With the new WebKit implementation, we have three new values:
<add> * - `'trackingNumber'`,
<add> * - `'flightNumber'`,
<add> * - `'lookupSuggestion'`,
<add> *
<ide> * @platform ios
<ide> */
<ide> dataDetectorTypes: PropTypes.oneOfType([
<ide> class WebView extends React.Component {
<ide>
<ide> const nativeConfig = this.props.nativeConfig || {};
<ide>
<del> const viewManager = nativeConfig.viewManager || RCTWebViewManager;
<add> let viewManager = nativeConfig.viewManager;
<add>
<add> if (this.props.useWebKit) {
<add> viewManager = viewManager || RCTWKWebViewManager;
<add> } else {
<add> viewManager = viewManager || RCTWebViewManager;
<add> }
<ide>
<ide> const compiledWhitelist = [
<ide> 'about:blank',
<ide> class WebView extends React.Component {
<ide>
<ide> const messagingEnabled = typeof this.props.onMessage === 'function';
<ide>
<del> const NativeWebView = nativeConfig.component || RCTWebView;
<add> let NativeWebView = nativeConfig.component;
<add>
<add> if (this.props.useWebKit) {
<add> NativeWebView = NativeWebView || RCTWKWebView;
<add> } else {
<add> NativeWebView = NativeWebView || RCTWebView;
<add> }
<ide>
<ide> const webView = (
<ide> <NativeWebView
<ide> class WebView extends React.Component {
<ide> );
<ide> }
<ide>
<add> _getCommands() {
<add> if (!this.props.useWebKit) {
<add> return UIManager.RCTWebView.Commands;
<add> }
<add>
<add> return UIManager.RCTWKWebView.Commands;
<add> }
<add>
<ide> /**
<ide> * Go forward one page in the web view's history.
<ide> */
<ide> goForward = () => {
<ide> UIManager.dispatchViewManagerCommand(
<ide> this.getWebViewHandle(),
<del> UIManager.RCTWebView.Commands.goForward,
<add> this._getCommands().goForward,
<ide> null,
<ide> );
<ide> };
<ide> class WebView extends React.Component {
<ide> goBack = () => {
<ide> UIManager.dispatchViewManagerCommand(
<ide> this.getWebViewHandle(),
<del> UIManager.RCTWebView.Commands.goBack,
<add> this._getCommands().goBack,
<ide> null,
<ide> );
<ide> };
<ide> class WebView extends React.Component {
<ide> this.setState({viewState: WebViewState.LOADING});
<ide> UIManager.dispatchViewManagerCommand(
<ide> this.getWebViewHandle(),
<del> UIManager.RCTWebView.Commands.reload,
<add> this._getCommands().reload,
<ide> null,
<ide> );
<ide> };
<ide> class WebView extends React.Component {
<ide> stopLoading = () => {
<ide> UIManager.dispatchViewManagerCommand(
<ide> this.getWebViewHandle(),
<del> UIManager.RCTWebView.Commands.stopLoading,
<add> this._getCommands().stopLoading,
<ide> null,
<ide> );
<ide> };
<ide> class WebView extends React.Component {
<ide> postMessage = data => {
<ide> UIManager.dispatchViewManagerCommand(
<ide> this.getWebViewHandle(),
<del> UIManager.RCTWebView.Commands.postMessage,
<add> this._getCommands().postMessage,
<ide> [String(data)],
<ide> );
<ide> };
<ide> class WebView extends React.Component {
<ide> injectJavaScript = data => {
<ide> UIManager.dispatchViewManagerCommand(
<ide> this.getWebViewHandle(),
<del> UIManager.RCTWebView.Commands.injectJavaScript,
<add> this._getCommands().injectJavaScript,
<ide> [data],
<ide> );
<ide> };
<ide> class WebView extends React.Component {
<ide> const {onMessage} = this.props;
<ide> onMessage && onMessage(event);
<ide> };
<add>
<add> componentDidUpdate(prevProps) {
<add> if (!(prevProps.useWebKit && this.props.useWebKit)) {
<add> return;
<add> }
<add>
<add> this._showRedboxOnPropChanges(prevProps, 'allowsInlineMediaPlayback');
<add> this._showRedboxOnPropChanges(prevProps, 'mediaPlaybackRequiresUserAction');
<add> this._showRedboxOnPropChanges(prevProps, 'dataDetectorTypes');
<add> }
<add>
<add> _showRedboxOnPropChanges(prevProps, propName: string) {
<add> if (this.props[propName] !== prevProps[propName]) {
<add> console.error(
<add> `Changes to property ${propName} do nothing after the initial render.`,
<add> );
<add> }
<add> }
<ide> }
<ide>
<del>const RCTWebView = requireNativeComponent('RCTWebView');
<add>const RCTWebView = requireNativeComponent(
<add> 'RCTWebView',
<add> WebView,
<add> WebView.extraNativeComponentConfig,
<add>);
<add>const RCTWKWebView = requireNativeComponent(
<add> 'RCTWKWebView',
<add> WebView,
<add> WebView.extraNativeComponentConfig,
<add>);
<ide>
<ide> const styles = StyleSheet.create({
<ide> container: { | 3 |
PHP | PHP | add methods for supported and available drivers | 52b3b29aff760e93ef85017fc51e0e6db0419a2b | <ide><path>src/Illuminate/Database/DatabaseManager.php
<ide>
<ide> namespace Illuminate\Database;
<ide>
<add>use PDO;
<ide> use Illuminate\Support\Arr;
<ide> use Illuminate\Support\Str;
<ide> use InvalidArgumentException;
<ide> public function setDefaultConnection($name)
<ide> $this->app['config']['database.default'] = $name;
<ide> }
<ide>
<add> /**
<add> * Get all of the support drivers.
<add> *
<add> * @return array[string]
<add> */
<add> public function supportedDrivers()
<add> {
<add> return ['mysql', 'pgsql', 'sqlite', 'dblib', 'sqlsrv'];
<add> }
<add>
<add> /**
<add> * Get all of the drivers that are actually available.
<add> *
<add> * @return array
<add> */
<add> public function availableDrivers()
<add> {
<add> return array_intersect($this->supportedDrivers(), PDO::getAvailableDrivers());
<add> }
<add>
<ide> /**
<ide> * Register an extension connection resolver.
<ide> * | 1 |
Ruby | Ruby | fix rubocop warnings | 5cba530eef15ece440be63adb4d625018c97badc | <ide><path>Library/Homebrew/requirements/x11_requirement.rb
<ide> class X11Requirement < Requirement
<ide>
<ide> def initialize(name = "x11", tags = [])
<ide> @name = name
<del> if /(\d\.)+\d/ === tags.first
<add> if /(\d\.)+\d/ =~ tags.first
<ide> @min_version = Version.create(tags.shift)
<ide> @min_version_string = " #{@min_version}"
<ide> else
<ide> def message
<ide> end
<ide>
<ide> def <=>(other)
<del> return unless X11Requirement === other
<add> return unless other.is_a? X11Requirement
<ide> min_version <=> other.min_version
<ide> end
<ide> | 1 |
Javascript | Javascript | fix multiple_line problem in match | 613654e882d9b3eda43680de0811d5f52caed994 | <ide><path>test/simple/test-repl.js
<ide> function error_test() {
<ide> if (read_buffer.indexOf(prompt_unix) !== -1) {
<ide> // if it's an exact match, then don't do the regexp
<ide> if (read_buffer !== client_unix.expect) {
<del> assert.ok(read_buffer.match(client_unix.expect));
<add> var expect = client_unix.expect;
<add> if (expect === prompt_multiline)
<add> expect = /[\.]{3} /;
<add> assert.ok(read_buffer.match(expect));
<ide> console.error('match');
<ide> }
<ide> read_buffer = ''; | 1 |
Go | Go | remove unused containerconfig.endpoint() | 56e690f340e030027ed1b5503bbde06e5a879518 | <ide><path>daemon/cluster/executor/container/container.go
<ide> func (c *containerConfig) taskID() string {
<ide> return c.task.ID
<ide> }
<ide>
<del>func (c *containerConfig) endpoint() *api.Endpoint {
<del> return c.task.Endpoint
<del>}
<del>
<ide> func (c *containerConfig) spec() *api.ContainerSpec {
<ide> return c.task.Spec.GetContainer()
<ide> } | 1 |
PHP | PHP | fix error output line highlighting off by one | eceac7dfbeafec507263dcbabac957e86436d73b | <ide><path>src/Error/Debugger.php
<ide> public function outputError($data)
<ide> $file = $files[1];
<ide> }
<ide> if ($file) {
<del> $code = static::excerpt($file['file'], $file['line'] - 1, 1);
<add> $code = static::excerpt($file['file'], $file['line'], 1);
<ide> }
<ide> $trace = static::trace(['start' => $data['start'], 'depth' => '20']);
<ide> $insertOpts = ['before' => '{:', 'after' => '}'];
<ide><path>tests/TestCase/Error/DebuggerTest.php
<ide> public function testOutputErrorDescriptionEncoding()
<ide> $this->assertNotContains('<script>', $result);
<ide> }
<ide>
<add> /**
<add> * Tests that the correct line is being highlighted.
<add> *
<add> * @return void
<add> */
<add> public function testOutputErrorLineHighlight()
<add> {
<add> Debugger::outputAs('js');
<add>
<add> ob_start();
<add> $debugger = Debugger::getInstance();
<add> $data = [
<add> 'level' => E_NOTICE,
<add> 'code' => E_NOTICE,
<add> 'file' => __FILE__,
<add> 'line' => __LINE__,
<add> 'description' => 'Error description',
<add> 'start' => 1
<add> ];
<add> $debugger->outputError($data);
<add> $result = ob_get_clean();
<add>
<add> $this->assertRegExp('#^\<span class\="code\-highlight"\>.*outputError.*\</span\>$#m', $result);
<add> }
<add>
<ide> /**
<ide> * Tests that changes in output formats using Debugger::output() change the templates used.
<ide> * | 2 |
Go | Go | remove package daemonbuilder | 9c332b164f1aefa2407706adf59d50495d6e02cb | <ide><path>api/server/router/build/backend.go
<ide> package build
<ide>
<add>import (
<add> "github.com/docker/docker/builder"
<add> "github.com/docker/engine-api/types"
<add> "io"
<add>)
<add>
<ide> // Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID.
<ide> type Backend interface {
<ide> // Build builds a Docker image referenced by an imageID string.
<ide> type Backend interface {
<ide> // by the caller.
<ide> //
<ide> // TODO: make this return a reference instead of string
<del> Build() (imageID string)
<add> Build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error)
<ide> }
<ide><path>api/server/router/build/build.go
<ide> package build
<ide> import (
<ide> "github.com/docker/docker/api/server/router"
<ide> "github.com/docker/docker/api/server/router/local"
<del> "github.com/docker/docker/daemon"
<ide> )
<ide>
<ide> // buildRouter is a router to talk with the build controller
<ide> type buildRouter struct {
<del> backend *daemon.Daemon
<add> backend Backend
<ide> routes []router.Route
<ide> }
<ide>
<ide> // NewRouter initializes a new build router
<del>func NewRouter(b *daemon.Daemon) router.Router {
<add>func NewRouter(b Backend) router.Router {
<ide> r := &buildRouter{
<ide> backend: b,
<ide> }
<ide><path>api/server/router/build/build_routes.go
<ide> import (
<ide> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/docker/api/server/httputils"
<ide> "github.com/docker/docker/builder"
<del> "github.com/docker/docker/builder/dockerfile"
<del> "github.com/docker/docker/daemon/daemonbuilder"
<ide> "github.com/docker/docker/pkg/ioutils"
<ide> "github.com/docker/docker/pkg/progress"
<ide> "github.com/docker/docker/pkg/streamformatter"
<del> "github.com/docker/docker/reference"
<ide> "github.com/docker/docker/utils"
<ide> "github.com/docker/engine-api/types"
<ide> "github.com/docker/engine-api/types/container"
<ide> "github.com/docker/go-units"
<ide> "golang.org/x/net/context"
<ide> )
<ide>
<del>// sanitizeRepoAndTags parses the raw "t" parameter received from the client
<del>// to a slice of repoAndTag.
<del>// It also validates each repoName and tag.
<del>func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
<del> var (
<del> repoAndTags []reference.Named
<del> // This map is used for deduplicating the "-t" parameter.
<del> uniqNames = make(map[string]struct{})
<del> )
<del> for _, repo := range names {
<del> if repo == "" {
<del> continue
<del> }
<del>
<del> ref, err := reference.ParseNamed(repo)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> ref = reference.WithDefaultTag(ref)
<del>
<del> if _, isCanonical := ref.(reference.Canonical); isCanonical {
<del> return nil, errors.New("build tag cannot contain a digest")
<del> }
<del>
<del> if _, isTagged := ref.(reference.NamedTagged); !isTagged {
<del> ref, err = reference.WithTag(ref, reference.DefaultTag)
<del> }
<del>
<del> nameWithTag := ref.String()
<del>
<del> if _, exists := uniqNames[nameWithTag]; !exists {
<del> uniqNames[nameWithTag] = struct{}{}
<del> repoAndTags = append(repoAndTags, ref)
<del> }
<del> }
<del> return repoAndTags, nil
<del>}
<del>
<ide> func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
<ide> version := httputils.VersionFromContext(ctx)
<ide> options := &types.ImageBuildOptions{}
<ide> func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
<ide> options.CPUSetCPUs = r.FormValue("cpusetcpus")
<ide> options.CPUSetMems = r.FormValue("cpusetmems")
<ide> options.CgroupParent = r.FormValue("cgroupparent")
<add> options.Tags = r.Form["t"]
<ide>
<ide> if r.Form.Get("shmsize") != "" {
<ide> shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
<ide> func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
<ide> return errf(err)
<ide> }
<ide>
<del> repoAndTags, err := sanitizeRepoAndTags(r.Form["t"])
<del> if err != nil {
<del> return errf(err)
<del> }
<del>
<ide> remoteURL := r.FormValue("remote")
<ide>
<ide> // Currently, only used if context is from a remote url.
<ide> func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
<ide> var (
<ide> context builder.ModifiableContext
<ide> dockerfileName string
<add> out io.Writer
<ide> )
<del> context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader)
<add> context, dockerfileName, err = builder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader)
<ide> if err != nil {
<ide> return errf(err)
<ide> }
<ide> func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
<ide> buildOptions.Dockerfile = dockerfileName
<ide> }
<ide>
<del> b, err := dockerfile.NewBuilder(
<del> buildOptions, // result of newBuildConfig
<del> &daemonbuilder.Docker{br.backend},
<del> builder.DockerIgnoreContext{ModifiableContext: context},
<del> nil)
<del> if err != nil {
<del> return errf(err)
<del> }
<del> if buildOptions.SuppressOutput {
<del> b.Output = notVerboseBuffer
<del> } else {
<del> b.Output = output
<del> }
<del> b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
<del> b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf}
<add> out = output
<ide> if buildOptions.SuppressOutput {
<del> b.Stdout = &streamformatter.StdoutFormatter{Writer: notVerboseBuffer, StreamFormatter: sf}
<del> b.Stderr = &streamformatter.StderrFormatter{Writer: notVerboseBuffer, StreamFormatter: sf}
<add> out = notVerboseBuffer
<ide> }
<add> stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf}
<add> stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf}
<ide>
<del> if closeNotifier, ok := w.(http.CloseNotifier); ok {
<del> finished := make(chan struct{})
<del> defer close(finished)
<del> clientGone := closeNotifier.CloseNotify()
<del> go func() {
<del> select {
<del> case <-finished:
<del> case <-clientGone:
<del> logrus.Infof("Client disconnected, cancelling job: build")
<del> b.Cancel()
<del> }
<del> }()
<add> closeNotifier := make(<-chan bool)
<add> if notifier, ok := w.(http.CloseNotifier); ok {
<add> closeNotifier = notifier.CloseNotify()
<ide> }
<ide>
<del> imgID, err := b.Build()
<add> imgID, err := br.backend.Build(buildOptions,
<add> builder.DockerIgnoreContext{ModifiableContext: context},
<add> stdout, stderr, out,
<add> closeNotifier)
<ide> if err != nil {
<ide> return errf(err)
<ide> }
<ide>
<del> for _, rt := range repoAndTags {
<del> if err := br.backend.TagImage(rt, imgID); err != nil {
<del> return errf(err)
<del> }
<del> }
<del>
<ide> // Everything worked so if -q was provided the output from the daemon
<ide> // should be just the image ID and we'll print that to stdout.
<ide> if buildOptions.SuppressOutput {
<ide><path>api/server/server.go
<ide> import (
<ide> "github.com/docker/docker/api/server/router/network"
<ide> "github.com/docker/docker/api/server/router/system"
<ide> "github.com/docker/docker/api/server/router/volume"
<add> "github.com/docker/docker/builder/dockerfile"
<ide> "github.com/docker/docker/daemon"
<ide> "github.com/docker/docker/pkg/authorization"
<ide> "github.com/docker/docker/utils"
<ide> func (s *Server) InitRouters(d *daemon.Daemon) {
<ide> s.addRouter(network.NewRouter(d))
<ide> s.addRouter(system.NewRouter(d))
<ide> s.addRouter(volume.NewRouter(d))
<del> s.addRouter(build.NewRouter(d))
<add> s.addRouter(build.NewRouter(dockerfile.NewBuildManager(d)))
<ide> }
<ide>
<ide> // addRouter adds a new router to the server.
<ide><path>builder/builder.go
<ide> import (
<ide> "os"
<ide> "time"
<ide>
<add> "github.com/docker/docker/reference"
<ide> "github.com/docker/engine-api/types"
<ide> "github.com/docker/engine-api/types/container"
<ide> )
<ide> type Backend interface {
<ide> // TODO: use digest reference instead of name
<ide>
<ide> // GetImage looks up a Docker image referenced by `name`.
<del> GetImage(name string) (Image, error)
<add> GetImageOnBuild(name string) (Image, error)
<add> // Tag an image with newTag
<add> TagImage(newTag reference.Named, imageName string) error
<ide> // Pull tells Docker to pull image referenced by `name`.
<del> Pull(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error)
<add> PullOnBuild(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error)
<ide> // ContainerAttach attaches to container.
<del> ContainerAttach(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error
<add> ContainerAttachOnBuild(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error
<ide> // ContainerCreate creates a new Docker container and returns potential warnings
<ide> ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error)
<ide> // ContainerRm removes a container specified by `id`.
<ide> type Backend interface {
<ide> ContainerStart(containerID string, hostConfig *container.HostConfig) error
<ide> // ContainerWait stops processing until the given container is stopped.
<ide> ContainerWait(containerID string, timeout time.Duration) (int, error)
<del>
<ide> // ContainerUpdateCmd updates container.Path and container.Args
<del> ContainerUpdateCmd(containerID string, cmd []string) error
<add> ContainerUpdateCmdOnBuild(containerID string, cmd []string) error
<ide>
<ide> // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container
<ide> // specified by a container object.
<ide> type Backend interface {
<ide> // with Context.Walk
<ide> //ContainerCopy(name string, res string) (io.ReadCloser, error)
<ide> // TODO: use copyBackend api
<del> BuilderCopy(containerID string, destPath string, src FileInfo, decompress bool) error
<add> CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error
<add>}
<add>
<add>// Image represents a Docker image used by the builder.
<add>type Image interface {
<add> ImageID() string
<add> RunConfig() *container.Config
<ide> }
<ide>
<ide> // ImageCache abstracts an image cache store.
<ide> // (parent image, child runconfig) -> child image
<ide> type ImageCache interface {
<ide> // GetCachedImage returns a reference to a cached image whose parent equals `parent`
<ide> // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
<del> GetCachedImage(parentID string, cfg *container.Config) (imageID string, err error)
<add> GetCachedImageOnBuild(parentID string, cfg *container.Config) (imageID string, err error)
<ide> }
<ide><path>builder/dockerfile/builder.go
<ide> package dockerfile
<ide>
<ide> import (
<ide> "bytes"
<add> "errors"
<ide> "fmt"
<ide> "io"
<ide> "io/ioutil"
<ide> import (
<ide> "github.com/docker/docker/builder"
<ide> "github.com/docker/docker/builder/dockerfile/parser"
<ide> "github.com/docker/docker/pkg/stringid"
<add> "github.com/docker/docker/reference"
<ide> "github.com/docker/engine-api/types"
<ide> "github.com/docker/engine-api/types/container"
<ide> )
<ide> type Builder struct {
<ide>
<ide> Stdout io.Writer
<ide> Stderr io.Writer
<add> Output io.Writer
<ide>
<ide> docker builder.Backend
<ide> context builder.Context
<ide> type Builder struct {
<ide> allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'.
<ide>
<ide> // TODO: remove once docker.Commit can receive a tag
<del> id string
<del> Output io.Writer
<add> id string
<add>}
<add>
<add>// BuildManager implements builder.Backend and is shared across all Builder objects.
<add>type BuildManager struct {
<add> backend builder.Backend
<add>}
<add>
<add>// NewBuildManager creates a BuildManager.
<add>func NewBuildManager(b builder.Backend) (bm *BuildManager) {
<add> return &BuildManager{backend: b}
<ide> }
<ide>
<ide> // NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config.
<ide> func NewBuilder(config *types.ImageBuildOptions, backend builder.Backend, contex
<ide> return b, nil
<ide> }
<ide>
<del>// Build runs the Dockerfile builder from a context and a docker object that allows to make calls
<add>// sanitizeRepoAndTags parses the raw "t" parameter received from the client
<add>// to a slice of repoAndTag.
<add>// It also validates each repoName and tag.
<add>func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
<add> var (
<add> repoAndTags []reference.Named
<add> // This map is used for deduplicating the "-t" parameter.
<add> uniqNames = make(map[string]struct{})
<add> )
<add> for _, repo := range names {
<add> if repo == "" {
<add> continue
<add> }
<add>
<add> ref, err := reference.ParseNamed(repo)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> ref = reference.WithDefaultTag(ref)
<add>
<add> if _, isCanonical := ref.(reference.Canonical); isCanonical {
<add> return nil, errors.New("build tag cannot contain a digest")
<add> }
<add>
<add> if _, isTagged := ref.(reference.NamedTagged); !isTagged {
<add> ref, err = reference.WithTag(ref, reference.DefaultTag)
<add> }
<add>
<add> nameWithTag := ref.String()
<add>
<add> if _, exists := uniqNames[nameWithTag]; !exists {
<add> uniqNames[nameWithTag] = struct{}{}
<add> repoAndTags = append(repoAndTags, ref)
<add> }
<add> }
<add> return repoAndTags, nil
<add>}
<add>
<add>// Build creates a NewBuilder, which builds the image.
<add>func (bm *BuildManager) Build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) {
<add> b, err := NewBuilder(config, bm.backend, context, nil)
<add> if err != nil {
<add> return "", err
<add> }
<add> img, err := b.build(config, context, stdout, stderr, out, clientGone)
<add> return img, err
<add>
<add>}
<add>
<add>// build runs the Dockerfile builder from a context and a docker object that allows to make calls
<ide> // to Docker.
<ide> //
<ide> // This will (barring errors):
<ide> func NewBuilder(config *types.ImageBuildOptions, backend builder.Backend, contex
<ide> // * walk the AST and execute it by dispatching to handlers. If Remove
<ide> // or ForceRemove is set, additional cleanup around containers happens after
<ide> // processing.
<add>// * Tag image, if applicable.
<ide> // * Print a happy message and return the image ID.
<del>// * NOT tag the image, that is responsibility of the caller.
<ide> //
<del>func (b *Builder) Build() (string, error) {
<add>func (b *Builder) build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) {
<add> b.options = config
<add> b.context = context
<add> b.Stdout = stdout
<add> b.Stderr = stderr
<add> b.Output = out
<add>
<ide> // If Dockerfile was not parsed yet, extract it from the Context
<ide> if b.dockerfile == nil {
<ide> if err := b.readDockerfile(); err != nil {
<ide> return "", err
<ide> }
<ide> }
<ide>
<add> finished := make(chan struct{})
<add> defer close(finished)
<add> go func() {
<add> select {
<add> case <-finished:
<add> case <-clientGone:
<add> b.cancelOnce.Do(func() {
<add> close(b.cancelled)
<add> })
<add> }
<add>
<add> }()
<add>
<add> repoAndTags, err := sanitizeRepoAndTags(config.Tags)
<add> if err != nil {
<add> return "", err
<add> }
<add>
<ide> var shortImgID string
<ide> for i, n := range b.dockerfile.Children {
<ide> select {
<ide> func (b *Builder) Build() (string, error) {
<ide> return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
<ide> }
<ide>
<add> for _, rt := range repoAndTags {
<add> if err := b.docker.TagImage(rt, b.image); err != nil {
<add> return "", err
<add> }
<add> }
<add>
<ide> fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID)
<ide> return b.image, nil
<ide> }
<ide><path>builder/dockerfile/dispatchers.go
<ide> func from(b *Builder, args []string, attributes map[string]bool, original string
<ide> } else {
<ide> // TODO: don't use `name`, instead resolve it to a digest
<ide> if !b.options.PullParent {
<del> image, err = b.docker.GetImage(name)
<add> image, err = b.docker.GetImageOnBuild(name)
<ide> // TODO: shouldn't we error out if error is different from "not found" ?
<ide> }
<ide> if image == nil {
<del> image, err = b.docker.Pull(name, b.options.AuthConfigs, b.Output)
<add> image, err = b.docker.PullOnBuild(name, b.options.AuthConfigs, b.Output)
<ide> if err != nil {
<ide> return err
<ide> }
<ide><path>builder/dockerfile/internals.go
<ide> func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalD
<ide> }
<ide>
<ide> for _, info := range infos {
<del> if err := b.docker.BuilderCopy(container.ID, dest, info.FileInfo, info.decompress); err != nil {
<add> if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil {
<ide> return err
<ide> }
<ide> }
<ide> func containsWildcards(name string) bool {
<ide>
<ide> func (b *Builder) processImageFrom(img builder.Image) error {
<ide> if img != nil {
<del> b.image = img.ID()
<add> b.image = img.ImageID()
<ide>
<del> if img.Config() != nil {
<del> b.runConfig = img.Config()
<add> if img.RunConfig() != nil {
<add> b.runConfig = img.RunConfig()
<ide> }
<ide> }
<ide>
<ide> func (b *Builder) probeCache() (bool, error) {
<ide> if !ok || b.options.NoCache || b.cacheBusted {
<ide> return false, nil
<ide> }
<del> cache, err := c.GetCachedImage(b.image, b.runConfig)
<add> cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig)
<ide> if err != nil {
<ide> return false, err
<ide> }
<ide> func (b *Builder) create() (string, error) {
<ide>
<ide> if config.Cmd.Len() > 0 {
<ide> // override the entry point that may have been picked up from the base image
<del> if err := b.docker.ContainerUpdateCmd(c.ID, config.Cmd.Slice()); err != nil {
<add> if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd.Slice()); err != nil {
<ide> return "", err
<ide> }
<ide> }
<ide> func (b *Builder) create() (string, error) {
<ide> func (b *Builder) run(cID string) (err error) {
<ide> errCh := make(chan error)
<ide> go func() {
<del> errCh <- b.docker.ContainerAttach(cID, nil, b.Stdout, b.Stderr, true)
<add> errCh <- b.docker.ContainerAttachOnBuild(cID, nil, b.Stdout, b.Stderr, true)
<ide> }()
<ide>
<ide> finished := make(chan struct{})
<ide><path>builder/image.go
<del>package builder
<del>
<del>import "github.com/docker/engine-api/types/container"
<del>
<del>// Image represents a Docker image used by the builder.
<del>type Image interface {
<del> ID() string
<del> Config() *container.Config
<del>}
<ide><path>builder/remote.go
<ide> import (
<ide> "io/ioutil"
<ide> "regexp"
<ide>
<add> "github.com/docker/docker/api"
<add> "github.com/docker/docker/pkg/archive"
<ide> "github.com/docker/docker/pkg/httputils"
<add> "github.com/docker/docker/pkg/urlutil"
<ide> )
<ide>
<ide> // When downloading remote contexts, limit the amount (in bytes)
<ide> func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.
<ide> return MakeTarSumContext(contextReader)
<ide> }
<ide>
<add>// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used
<add>// irrespective of user input.
<add>// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint).
<add>func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) {
<add> switch {
<add> case remoteURL == "":
<add> context, err = MakeTarSumContext(r)
<add> case urlutil.IsGitURL(remoteURL):
<add> context, err = MakeGitContext(remoteURL)
<add> case urlutil.IsURL(remoteURL):
<add> context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){
<add> httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
<add> dockerfile, err := ioutil.ReadAll(rc)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller
<add> // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input.
<add> dockerfileName = api.DefaultDockerfileName
<add>
<add> // TODO: return a context without tarsum
<add> return archive.Generate(dockerfileName, string(dockerfile))
<add> },
<add> // fallback handler (tar context)
<add> "": func(rc io.ReadCloser) (io.ReadCloser, error) {
<add> return createProgressReader(rc), nil
<add> },
<add> })
<add> default:
<add> err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
<add> }
<add> return
<add>}
<add>
<ide> // inspectResponse looks into the http response data at r to determine whether its
<ide> // content-type is on the list of acceptable content types for remote build contexts.
<ide> // This function returns:
<ide><path>daemon/archive.go
<ide> import (
<ide> "path/filepath"
<ide> "strings"
<ide>
<add> "github.com/docker/docker/builder"
<ide> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/pkg/archive"
<ide> "github.com/docker/docker/pkg/chrootarchive"
<add> "github.com/docker/docker/pkg/idtools"
<ide> "github.com/docker/docker/pkg/ioutils"
<ide> "github.com/docker/engine-api/types"
<ide> )
<ide> func (daemon *Daemon) containerCopy(container *container.Container, resource str
<ide> daemon.LogContainerEvent(container, "copy")
<ide> return reader, nil
<ide> }
<add>
<add>// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container
<add>// specified by a container object.
<add>// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already).
<add>// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths.
<add>func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error {
<add> srcPath := src.Path()
<add> destExists := true
<add> destDir := false
<add> rootUID, rootGID := daemon.GetRemappedUIDGID()
<add>
<add> // Work in daemon-local OS specific file paths
<add> destPath = filepath.FromSlash(destPath)
<add>
<add> c, err := daemon.GetContainer(cID)
<add> if err != nil {
<add> return err
<add> }
<add> err = daemon.Mount(c)
<add> if err != nil {
<add> return err
<add> }
<add> defer daemon.Unmount(c)
<add>
<add> dest, err := c.GetResourcePath(destPath)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> // Preserve the trailing slash
<add> // TODO: why are we appending another path separator if there was already one?
<add> if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." {
<add> destDir = true
<add> dest += string(os.PathSeparator)
<add> }
<add>
<add> destPath = dest
<add>
<add> destStat, err := os.Stat(destPath)
<add> if err != nil {
<add> if !os.IsNotExist(err) {
<add> //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
<add> return err
<add> }
<add> destExists = false
<add> }
<add>
<add> uidMaps, gidMaps := daemon.GetUIDGIDMaps()
<add> archiver := &archive.Archiver{
<add> Untar: chrootarchive.Untar,
<add> UIDMaps: uidMaps,
<add> GIDMaps: gidMaps,
<add> }
<add>
<add> if src.IsDir() {
<add> // copy as directory
<add> if err := archiver.CopyWithTar(srcPath, destPath); err != nil {
<add> return err
<add> }
<add> return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists)
<add> }
<add> if decompress && archive.IsArchivePath(srcPath) {
<add> // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file)
<add>
<add> // First try to unpack the source as an archive
<add> // to support the untar feature we need to clean up the path a little bit
<add> // because tar is very forgiving. First we need to strip off the archive's
<add> // filename from the path but this is only added if it does not end in slash
<add> tarDest := destPath
<add> if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
<add> tarDest = filepath.Dir(destPath)
<add> }
<add>
<add> // try to successfully untar the orig
<add> err := archiver.UntarPath(srcPath, tarDest)
<add> /*
<add> if err != nil {
<add> logrus.Errorf("Couldn't untar to %s: %v", tarDest, err)
<add> }
<add> */
<add> return err
<add> }
<add>
<add> // only needed for fixPermissions, but might as well put it before CopyFileWithTar
<add> if destDir || (destExists && destStat.IsDir()) {
<add> destPath = filepath.Join(destPath, src.Name())
<add> }
<add>
<add> if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil {
<add> return err
<add> }
<add> if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil {
<add> return err
<add> }
<add>
<add> return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists)
<add>}
<ide><path>daemon/archive_unix.go
<ide>
<ide> package daemon
<ide>
<del>import "github.com/docker/docker/container"
<add>import (
<add> "github.com/docker/docker/container"
<add> "os"
<add> "path/filepath"
<add>)
<ide>
<ide> // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
<ide> // cannot be in a read-only volume. If it is not in a volume, the container
<ide> func checkIfPathIsInAVolume(container *container.Container, absPath string) (boo
<ide> }
<ide> return toVolume, nil
<ide> }
<add>
<add>func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
<add> // If the destination didn't already exist, or the destination isn't a
<add> // directory, then we should Lchown the destination. Otherwise, we shouldn't
<add> // Lchown the destination.
<add> destStat, err := os.Stat(destination)
<add> if err != nil {
<add> // This should *never* be reached, because the destination must've already
<add> // been created while untar-ing the context.
<add> return err
<add> }
<add> doChownDestination := !destExisted || !destStat.IsDir()
<add>
<add> // We Walk on the source rather than on the destination because we don't
<add> // want to change permissions on things we haven't created or modified.
<add> return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
<add> // Do not alter the walk root iff. it existed before, as it doesn't fall under
<add> // the domain of "things we should chown".
<add> if !doChownDestination && (source == fullpath) {
<add> return nil
<add> }
<add>
<add> // Path is prefixed by source: substitute with destination instead.
<add> cleaned, err := filepath.Rel(source, fullpath)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> fullpath = filepath.Join(destination, cleaned)
<add> return os.Lchown(fullpath, uid, gid)
<add> })
<add>}
<ide><path>daemon/archive_windows.go
<ide> import "github.com/docker/docker/container"
<ide> func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
<ide> return false, nil
<ide> }
<add>
<add>func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
<add> // chown is not supported on Windows
<add> return nil
<add>}
<ide><path>daemon/attach.go
<ide> func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *Containe
<ide> return daemon.attachWithLogs(container, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream, c.DetachKeys)
<ide> }
<ide>
<add>// ContainerAttachOnBuild attaches streams to the container cID. If stream is true, it streams the output.
<add>func (daemon *Daemon) ContainerAttachOnBuild(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
<add> return daemon.ContainerWsAttachWithLogs(cID, &ContainerWsAttachWithLogsConfig{
<add> InStream: stdin,
<add> OutStream: stdout,
<add> ErrStream: stderr,
<add> Stream: stream,
<add> })
<add>}
<add>
<ide> func (daemon *Daemon) attachWithLogs(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error {
<ide> if logs {
<ide> logDriver, err := daemon.getLogger(container)
<ide><path>daemon/daemon.go
<ide> import (
<ide> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/distribution/digest"
<ide> "github.com/docker/docker/api"
<add> "github.com/docker/docker/builder"
<ide> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/events"
<ide> "github.com/docker/docker/daemon/exec"
<ide> func (daemon *Daemon) PullImage(ref reference.Named, metaHeaders map[string][]st
<ide> return err
<ide> }
<ide>
<add>// PullOnBuild tells Docker to pull image referenced by `name`.
<add>func (daemon *Daemon) PullOnBuild(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) {
<add> ref, err := reference.ParseNamed(name)
<add> if err != nil {
<add> return nil, err
<add> }
<add> ref = reference.WithDefaultTag(ref)
<add>
<add> pullRegistryAuth := &types.AuthConfig{}
<add> if len(authConfigs) > 0 {
<add> // The request came with a full auth config file, we prefer to use that
<add> repoInfo, err := daemon.RegistryService.ResolveRepository(ref)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> resolvedConfig := registry.ResolveAuthConfig(
<add> authConfigs,
<add> repoInfo.Index,
<add> )
<add> pullRegistryAuth = &resolvedConfig
<add> }
<add>
<add> if err := daemon.PullImage(ref, nil, pullRegistryAuth, output); err != nil {
<add> return nil, err
<add> }
<add> return daemon.GetImage(name)
<add>}
<add>
<ide> // ExportImage exports a list of images to the given output stream. The
<ide> // exported images are archived into a tar when written to the output
<ide> // stream. All images with the given tag and all versions containing
<ide> func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
<ide> return daemon.imageStore.Get(imgID)
<ide> }
<ide>
<add>// GetImageOnBuild looks up a Docker image referenced by `name`.
<add>func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) {
<add> img, err := daemon.GetImage(name)
<add> if err != nil {
<add> return nil, err
<add> }
<add> return img, nil
<add>}
<add>
<ide> // GraphDriverName returns the name of the graph driver used by the layer.Store
<ide> func (daemon *Daemon) GraphDriverName() string {
<ide> return daemon.layerStore.DriverName()
<ide> func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
<ide> return uid, gid
<ide> }
<ide>
<del>// ImageGetCached returns the most recent created image that is a child
<add>// GetCachedImage returns the most recent created image that is a child
<ide> // of the image with imgID, that had the same config when it was
<ide> // created. nil is returned if a child cannot be found. An error is
<ide> // returned if the parent image cannot be found.
<del>func (daemon *Daemon) ImageGetCached(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
<add>func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
<ide> // Loop on the children of the given image and check the config
<ide> getMatch := func(siblings []image.ID) (*image.Image, error) {
<ide> var match *image.Image
<ide> func (daemon *Daemon) ImageGetCached(imgID image.ID, config *containertypes.Conf
<ide> return getMatch(siblings)
<ide> }
<ide>
<add>// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
<add>// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
<add>func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) {
<add> cache, err := daemon.GetCachedImage(image.ID(imgID), cfg)
<add> if cache == nil || err != nil {
<add> return "", err
<add> }
<add> return cache.ID().String(), nil
<add>}
<add>
<ide> // tempDir returns the default directory to use for temporary files.
<ide> func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
<ide> var tmpDir string
<ide><path>daemon/daemonbuilder/builder.go
<del>package daemonbuilder
<del>
<del>import (
<del> "fmt"
<del> "io"
<del> "io/ioutil"
<del> "os"
<del> "path/filepath"
<del> "strings"
<del>
<del> "github.com/Sirupsen/logrus"
<del> "github.com/docker/docker/api"
<del> "github.com/docker/docker/builder"
<del> "github.com/docker/docker/daemon"
<del> "github.com/docker/docker/image"
<del> "github.com/docker/docker/pkg/archive"
<del> "github.com/docker/docker/pkg/chrootarchive"
<del> "github.com/docker/docker/pkg/httputils"
<del> "github.com/docker/docker/pkg/idtools"
<del> "github.com/docker/docker/pkg/ioutils"
<del> "github.com/docker/docker/pkg/urlutil"
<del> "github.com/docker/docker/reference"
<del> "github.com/docker/docker/registry"
<del> "github.com/docker/engine-api/types"
<del> "github.com/docker/engine-api/types/container"
<del>)
<del>
<del>// Docker implements builder.Backend for the docker Daemon object.
<del>type Docker struct {
<del> *daemon.Daemon
<del>}
<del>
<del>// ensure Docker implements builder.Backend
<del>var _ builder.Backend = Docker{}
<del>
<del>// Pull tells Docker to pull image referenced by `name`.
<del>func (d Docker) Pull(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) {
<del> ref, err := reference.ParseNamed(name)
<del> if err != nil {
<del> return nil, err
<del> }
<del> ref = reference.WithDefaultTag(ref)
<del>
<del> pullRegistryAuth := &types.AuthConfig{}
<del> if len(authConfigs) > 0 {
<del> // The request came with a full auth config file, we prefer to use that
<del> repoInfo, err := d.Daemon.RegistryService.ResolveRepository(ref)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> resolvedConfig := registry.ResolveAuthConfig(
<del> authConfigs,
<del> repoInfo.Index,
<del> )
<del> pullRegistryAuth = &resolvedConfig
<del> }
<del>
<del> if err := d.Daemon.PullImage(ref, nil, pullRegistryAuth, ioutils.NopWriteCloser(output)); err != nil {
<del> return nil, err
<del> }
<del> return d.GetImage(name)
<del>}
<del>
<del>// GetImage looks up a Docker image referenced by `name`.
<del>func (d Docker) GetImage(name string) (builder.Image, error) {
<del> img, err := d.Daemon.GetImage(name)
<del> if err != nil {
<del> return nil, err
<del> }
<del> return imgWrap{img}, nil
<del>}
<del>
<del>// ContainerUpdateCmd updates Path and Args for the container with ID cID.
<del>func (d Docker) ContainerUpdateCmd(cID string, cmd []string) error {
<del> c, err := d.Daemon.GetContainer(cID)
<del> if err != nil {
<del> return err
<del> }
<del> c.Path = cmd[0]
<del> c.Args = cmd[1:]
<del> return nil
<del>}
<del>
<del>// ContainerAttach attaches streams to the container cID. If stream is true, it streams the output.
<del>func (d Docker) ContainerAttach(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
<del> return d.Daemon.ContainerWsAttachWithLogs(cID, &daemon.ContainerWsAttachWithLogsConfig{
<del> InStream: stdin,
<del> OutStream: stdout,
<del> ErrStream: stderr,
<del> Stream: stream,
<del> })
<del>}
<del>
<del>// BuilderCopy copies/extracts a source FileInfo to a destination path inside a container
<del>// specified by a container object.
<del>// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already).
<del>// BuilderCopy should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths.
<del>func (d Docker) BuilderCopy(cID string, destPath string, src builder.FileInfo, decompress bool) error {
<del> srcPath := src.Path()
<del> destExists := true
<del> destDir := false
<del> rootUID, rootGID := d.Daemon.GetRemappedUIDGID()
<del>
<del> // Work in daemon-local OS specific file paths
<del> destPath = filepath.FromSlash(destPath)
<del>
<del> c, err := d.Daemon.GetContainer(cID)
<del> if err != nil {
<del> return err
<del> }
<del> err = d.Daemon.Mount(c)
<del> if err != nil {
<del> return err
<del> }
<del> defer d.Daemon.Unmount(c)
<del>
<del> dest, err := c.GetResourcePath(destPath)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> // Preserve the trailing slash
<del> // TODO: why are we appending another path separator if there was already one?
<del> if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." {
<del> destDir = true
<del> dest += string(os.PathSeparator)
<del> }
<del>
<del> destPath = dest
<del>
<del> destStat, err := os.Stat(destPath)
<del> if err != nil {
<del> if !os.IsNotExist(err) {
<del> logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
<del> return err
<del> }
<del> destExists = false
<del> }
<del>
<del> uidMaps, gidMaps := d.Daemon.GetUIDGIDMaps()
<del> archiver := &archive.Archiver{
<del> Untar: chrootarchive.Untar,
<del> UIDMaps: uidMaps,
<del> GIDMaps: gidMaps,
<del> }
<del>
<del> if src.IsDir() {
<del> // copy as directory
<del> if err := archiver.CopyWithTar(srcPath, destPath); err != nil {
<del> return err
<del> }
<del> return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists)
<del> }
<del> if decompress && archive.IsArchivePath(srcPath) {
<del> // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file)
<del>
<del> // First try to unpack the source as an archive
<del> // to support the untar feature we need to clean up the path a little bit
<del> // because tar is very forgiving. First we need to strip off the archive's
<del> // filename from the path but this is only added if it does not end in slash
<del> tarDest := destPath
<del> if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
<del> tarDest = filepath.Dir(destPath)
<del> }
<del>
<del> // try to successfully untar the orig
<del> err := archiver.UntarPath(srcPath, tarDest)
<del> if err != nil {
<del> logrus.Errorf("Couldn't untar to %s: %v", tarDest, err)
<del> }
<del> return err
<del> }
<del>
<del> // only needed for fixPermissions, but might as well put it before CopyFileWithTar
<del> if destDir || (destExists && destStat.IsDir()) {
<del> destPath = filepath.Join(destPath, src.Name())
<del> }
<del>
<del> if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil {
<del> return err
<del> }
<del> if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil {
<del> return err
<del> }
<del>
<del> return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists)
<del>}
<del>
<del>// GetCachedImage returns a reference to a cached image whose parent equals `parent`
<del>// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
<del>func (d Docker) GetCachedImage(imgID string, cfg *container.Config) (string, error) {
<del> cache, err := d.Daemon.ImageGetCached(image.ID(imgID), cfg)
<del> if cache == nil || err != nil {
<del> return "", err
<del> }
<del> return cache.ID().String(), nil
<del>}
<del>
<del>// Following is specific to builder contexts
<del>
<del>// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used
<del>// irrespective of user input.
<del>// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint).
<del>func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context builder.ModifiableContext, dockerfileName string, err error) {
<del> switch {
<del> case remoteURL == "":
<del> context, err = builder.MakeTarSumContext(r)
<del> case urlutil.IsGitURL(remoteURL):
<del> context, err = builder.MakeGitContext(remoteURL)
<del> case urlutil.IsURL(remoteURL):
<del> context, err = builder.MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){
<del> httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
<del> dockerfile, err := ioutil.ReadAll(rc)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller
<del> // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input.
<del> dockerfileName = api.DefaultDockerfileName
<del>
<del> // TODO: return a context without tarsum
<del> return archive.Generate(dockerfileName, string(dockerfile))
<del> },
<del> // fallback handler (tar context)
<del> "": func(rc io.ReadCloser) (io.ReadCloser, error) {
<del> return createProgressReader(rc), nil
<del> },
<del> })
<del> default:
<del> err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
<del> }
<del> return
<del>}
<ide><path>daemon/daemonbuilder/builder_unix.go
<del>// +build freebsd linux
<del>
<del>package daemonbuilder
<del>
<del>import (
<del> "os"
<del> "path/filepath"
<del>)
<del>
<del>func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
<del> // If the destination didn't already exist, or the destination isn't a
<del> // directory, then we should Lchown the destination. Otherwise, we shouldn't
<del> // Lchown the destination.
<del> destStat, err := os.Stat(destination)
<del> if err != nil {
<del> // This should *never* be reached, because the destination must've already
<del> // been created while untar-ing the context.
<del> return err
<del> }
<del> doChownDestination := !destExisted || !destStat.IsDir()
<del>
<del> // We Walk on the source rather than on the destination because we don't
<del> // want to change permissions on things we haven't created or modified.
<del> return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
<del> // Do not alter the walk root iff. it existed before, as it doesn't fall under
<del> // the domain of "things we should chown".
<del> if !doChownDestination && (source == fullpath) {
<del> return nil
<del> }
<del>
<del> // Path is prefixed by source: substitute with destination instead.
<del> cleaned, err := filepath.Rel(source, fullpath)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> fullpath = filepath.Join(destination, cleaned)
<del> return os.Lchown(fullpath, uid, gid)
<del> })
<del>}
<ide><path>daemon/daemonbuilder/builder_windows.go
<del>// +build windows
<del>
<del>package daemonbuilder
<del>
<del>func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
<del> // chown is not supported on Windows
<del> return nil
<del>}
<ide><path>daemon/daemonbuilder/image.go
<del>package daemonbuilder
<del>
<del>import (
<del> "github.com/docker/docker/image"
<del> "github.com/docker/engine-api/types/container"
<del>)
<del>
<del>type imgWrap struct {
<del> inner *image.Image
<del>}
<del>
<del>func (img imgWrap) ID() string {
<del> return string(img.inner.ID())
<del>}
<del>
<del>func (img imgWrap) Config() *container.Config {
<del> return img.inner.Config
<del>}
<ide><path>daemon/update.go
<ide> func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostCon
<ide> return warnings, nil
<ide> }
<ide>
<add>// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID.
<add>func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error {
<add> c, err := daemon.GetContainer(cID)
<add> if err != nil {
<add> return err
<add> }
<add> c.Path = cmd[0]
<add> c.Args = cmd[1:]
<add> return nil
<add>}
<add>
<ide> func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error {
<ide> if hostConfig == nil {
<ide> return nil
<ide><path>image/image.go
<ide> func (img *Image) ID() ID {
<ide> return img.computedID
<ide> }
<ide>
<add>// ImageID stringizes ID.
<add>func (img *Image) ImageID() string {
<add> return string(img.ID())
<add>}
<add>
<add>// RunConfig returns the image's container config.
<add>func (img *Image) RunConfig() *container.Config {
<add> return img.Config
<add>}
<add>
<ide> // MarshalJSON serializes the image to JSON. It sorts the top-level keys so
<ide> // that JSON that's been manipulated by a push/pull cycle with a legacy
<ide> // registry won't end up with a different key order. | 21 |
Python | Python | allow avoidance of the apache mod_rewrite undo | 99bbaa0090a605cfb80c9f1d7b1f86cb6b9e06f8 | <ide><path>django/core/handlers/base.py
<ide> def get_script_name(environ):
<ide> Note: this isn't used by the mod_python handler, since the equivalent of
<ide> SCRIPT_NAME isn't available there.
<ide> """
<del> # If mod_rewrite had a whack at the URL, Apache set SCRIPT_URL to
<del> # SCRIPT_NAME before applying any rewrites.
<del> script_url = force_unicode(environ.get('SCRIPT_URL', ''))
<del> if script_url:
<del> return script_url
<add> if not environ.get('DJANGO_USE_POST_REWRITE'):
<add> # If mod_rewrite had a whack at the URL, Apache set SCRIPT_URL to
<add> # SCRIPT_NAME before applying any rewrites.
<add> script_url = force_unicode(environ.get('SCRIPT_URL', ''))
<add> if script_url:
<add> return script_url
<ide> return force_unicode(environ.get('SCRIPT_NAME', ''))
<ide> | 1 |
Python | Python | handle bytestrings in json. closes . | 9bffd354327ffc99a0c50ad140a86ede94f9dfba | <ide><path>rest_framework/utils/encoders.py
<ide> def default(self, obj):
<ide> return six.text_type(obj)
<ide> elif isinstance(obj, QuerySet):
<ide> return tuple(obj)
<add> elif isinstance(obj, six.binary_type):
<add> # Best-effort for binary blobs. See #4187.
<add> return obj.decode('utf-8')
<ide> elif hasattr(obj, 'tolist'):
<ide> # Numpy arrays and array scalars.
<ide> return obj.tolist() | 1 |
Python | Python | add timeout when waiting for not_empty | dbc104e60e32f215a06015c2f78b633950986ec2 | <ide><path>celery/utils/timer2.py
<ide> def next(self):
<ide> try:
<ide> delay = self.scheduler.next()
<ide> if delay is None:
<del> print("WAITING FOR ENTRY")
<del> self.not_empty.wait()
<add> self.not_empty.wait(1.0)
<ide> return delay
<ide> finally:
<ide> self.not_empty.release() | 1 |
Python | Python | add tests for utility kubernetes functions | abd30bf17a74c7b3876653813a67bd842ac9d37f | <ide><path>libcloud/container/drivers/kubernetes.py
<ide> def to_n_cpus(cpu_str: str) -> Union[int, float]:
<ide> return 0
<ide>
<ide>
<del>def sum_resources(self, *resource_dicts):
<add>def sum_resources(*resource_dicts):
<ide> total_cpu = 0
<ide> total_memory = 0
<ide> for rd in resource_dicts:
<ide><path>libcloud/test/container/test_kubernetes.py
<ide>
<ide> from libcloud.container.base import ContainerImage
<ide> from libcloud.container.drivers.kubernetes import KubernetesContainerDriver
<add>from libcloud.container.drivers.kubernetes import to_n_bytes
<add>from libcloud.container.drivers.kubernetes import to_cpu_str
<add>from libcloud.container.drivers.kubernetes import to_n_cpus
<add>from libcloud.container.drivers.kubernetes import to_memory_str
<add>from libcloud.container.drivers.kubernetes import sum_resources
<ide>
<ide> from libcloud.test.secrets import CONTAINER_PARAMS_KUBERNETES
<ide> from libcloud.test.common.test_kubernetes import KubernetesAuthTestCaseMixin
<ide> def test_list_deployments(self):
<ide> self.assertIsInstance(deployment.replicas, int)
<ide> self.assertIsInstance(deployment.selector, dict)
<ide>
<add> def test_to_n_bytes(self):
<add> memory = "0"
<add> self.assertEqual(to_n_bytes(memory), 0)
<add> memory = "1000Ki"
<add> self.assertEqual(to_n_bytes(memory), 1_024_000)
<add> memory = "100K"
<add> self.assertEqual(to_n_bytes(memory), 100_000)
<add> memory = "512Mi"
<add> self.assertEqual(to_n_bytes(memory), 536_870_912)
<add> memory = "900M"
<add> self.assertEqual(to_n_bytes(memory), 900_000_000)
<add> memory = "10Gi"
<add> self.assertEqual(to_n_bytes(memory), 10_737_418_240)
<add> memory = "10G"
<add> self.assertEqual(to_n_bytes(memory), 10_000_000_000)
<add>
<add> def test_to_memory_str(self):
<add> memory = 0
<add> self.assertEqual(to_memory_str(memory), "0K")
<add> memory = 1_024_000
<add> self.assertEqual(to_memory_str(memory), "1000Ki")
<add> memory = 100_000
<add> self.assertEqual(to_memory_str(memory), "100K")
<add> memory = 536_870_912
<add> self.assertEqual(to_memory_str(memory), "512Mi")
<add> memory = 900_000_000
<add> self.assertEqual(to_memory_str(memory), "900M")
<add> memory = 10_737_418_240
<add> self.assertEqual(to_memory_str(memory), "10Gi")
<add> memory = 10_000_000_000
<add> self.assertEqual(to_memory_str(memory), "10G")
<add>
<add> def test_to_cpu_str(self):
<add> cpu = 0
<add> self.assertEqual(to_cpu_str(cpu), "0")
<add> cpu = 0.5
<add> self.assertEqual(to_cpu_str(cpu), "500m")
<add> cpu = 2
<add> self.assertEqual(to_cpu_str(cpu), "2000m")
<add> cpu = 0.000001
<add> self.assertEqual(to_cpu_str(cpu), "1u")
<add> cpu = 0.0005
<add> self.assertEqual(to_cpu_str(cpu), "500u")
<add> cpu = 0.000000001
<add> self.assertEqual(to_cpu_str(cpu), "1n")
<add> cpu = 0.0000005
<add> self.assertEqual(to_cpu_str(cpu), "500n")
<add>
<add> def test_to_n_cpus(self):
<add> cpu = "0m"
<add> self.assertEqual(to_n_cpus(cpu), 0)
<add> cpu = "2"
<add> self.assertEqual(to_n_cpus(cpu), 2)
<add> cpu = "500m"
<add> self.assertEqual(to_n_cpus(cpu), 0.5)
<add> cpu = "500m"
<add> self.assertEqual(to_n_cpus(cpu), 0.5)
<add> cpu = "2000m"
<add> self.assertEqual(to_n_cpus(cpu), 2)
<add> cpu = "1u"
<add> self.assertEqual(to_n_cpus(cpu), 0.000001)
<add> cpu = "500u"
<add> self.assertEqual(to_n_cpus(cpu), 0.0005)
<add> cpu = "1n"
<add> self.assertEqual(to_n_cpus(cpu), 0.000000001)
<add> cpu = "500n"
<add> self.assertEqual(to_n_cpus(cpu), 0.0000005)
<add>
<add> def test_sum_resources(self):
<add> resource_1 = {"cpu": "1", "memory": "1000Mi"}
<add> resource_2 = {"cpu": "2", "memory": "2000Mi"}
<add> self.assertDictEqual(
<add> sum_resources(resource_1, resource_2),
<add> {"cpu": "3000m", "memory": "3000Mi"},
<add> )
<add> resource_3 = {"cpu": "1500m", "memory": "1Gi"}
<add> self.assertDictEqual(
<add> sum_resources(resource_1, resource_2, resource_3),
<add> {"cpu": "4500m", "memory": "4024Mi"},
<add> )
<add>
<ide>
<ide> class KubernetesMockHttp(MockHttp):
<ide> fixtures = ContainerFileFixtures("kubernetes") | 2 |
Python | Python | fix dec_attn_mask in tftransfoxlmainlayer | d4692ad16162d1f45e57a074f887188ad9779c22 | <ide><path>src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py
<ide> def call(
<ide> mlen = shape_list(inputs["mems"][0])[0] if inputs["mems"] is not None else 0
<ide> klen = mlen + qlen
<ide>
<del> attn_mask = tf.ones([qlen, qlen])
<del> mask_u = tf.linalg.band_part(attn_mask, 0, -1)
<del> mask_dia = tf.linalg.band_part(attn_mask, 0, 0)
<del> attn_mask_pad = tf.zeros([qlen, mlen])
<del> dec_attn_mask = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
<del> if self.same_length:
<del> mask_l = tf.linalg.band_part(attn_mask, -1, 0)
<del> dec_attn_mask = tf.concat([dec_attn_mask[:, :qlen] + mask_l - mask_dia, dec_attn_mask[:, qlen:]], 1)
<add> # Compute decoder attention mask
<add>
<ide> # ::: PyTorch masking code for reference :::
<ide> # if self.same_length:
<ide> # all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)
<ide> def call(
<ide> # dec_attn_mask = torch.triu(
<ide> # word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1+mlen)[:,:,None]
<ide>
<add> # TensorFlow version
<add> dec_attn_mask = 1 - tf.linalg.band_part(
<add> tf.ones([qlen, klen], dtype=tf.int32), -1, mlen
<add> ) # (q, q): diagonal with 1's
<add> if self.same_length:
<add> mask_len = klen - self.mem_len
<add> if mask_len > 0:
<add> mask_shift_len = qlen - mask_len
<add> else:
<add> mask_shift_len = qlen
<add> if mask_shift_len >= 1:
<add> dec_attn_mask += 1 - tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), mask_shift_len - 1, -1)
<add> else:
<add> dec_attn_mask += tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, -mask_shift_len)
<add>
<ide> hids = []
<ide> attentions = [] if inputs["output_attentions"] else None
<ide> if self.attn_type == 0: # default | 1 |
Ruby | Ruby | add apple silicon | 6a3f18b0ae65806710c8d7d7a3b95bef81b05b11 | <ide><path>Library/Homebrew/extend/os/mac/hardware/cpu.rb
<ide> def type
<ide> case sysctl_int("hw.cputype")
<ide> when 7
<ide> :intel
<add> when MachO::Headers::CPU_TYPE_ARM64
<add> :arm
<ide> else
<ide> :dunno
<ide> end
<ide> end
<ide>
<ide> def family
<add> return :dunno if arm?
<add>
<ide> case sysctl_int("hw.cpufamily")
<ide> when 0x73d67300 # Yonah: Core Solo/Duo
<ide> :core | 1 |
Javascript | Javascript | replace anonymous closure with arrow functions | 0bf743a8a57ef59e5ca0105d1697486aa20d3c05 | <ide><path>test/pummel/test-net-pause.js
<ide> const N = 200;
<ide> let recv = '';
<ide> let chars_recved = 0;
<ide>
<del>const server = net.createServer(function(connection) {
<add>const server = net.createServer((connection) => {
<ide> function write(j) {
<ide> if (j >= N) {
<ide> connection.end();
<ide> return;
<ide> }
<del> setTimeout(function() {
<add> setTimeout(() => {
<ide> connection.write('C');
<ide> write(j + 1);
<ide> }, 10);
<ide> }
<ide> write(0);
<ide> });
<ide>
<del>server.on('listening', function() {
<add>server.on('listening', () => {
<ide> const client = net.createConnection(common.PORT);
<ide> client.setEncoding('ascii');
<ide> client.on('data', function(d) {
<ide> console.log(d);
<ide> recv += d;
<ide> });
<ide>
<del> setTimeout(function() {
<add> setTimeout(() => {
<ide> chars_recved = recv.length;
<ide> console.log(`pause at: ${chars_recved}`);
<ide> assert.strictEqual(chars_recved > 1, true);
<ide> client.pause();
<del> setTimeout(function() {
<add> setTimeout(() => {
<ide> console.log(`resume at: ${chars_recved}`);
<ide> assert.strictEqual(chars_recved, recv.length);
<ide> client.resume();
<ide>
<del> setTimeout(function() {
<add> setTimeout(() => {
<ide> chars_recved = recv.length;
<ide> console.log(`pause at: ${chars_recved}`);
<ide> client.pause();
<ide>
<del> setTimeout(function() {
<add> setTimeout(() => {
<ide> console.log(`resume at: ${chars_recved}`);
<ide> assert.strictEqual(chars_recved, recv.length);
<ide> client.resume();
<ide> server.on('listening', function() {
<ide>
<ide> }, 500);
<ide>
<del> client.on('end', function() {
<add> client.on('end', () => {
<ide> server.close();
<ide> client.end();
<ide> });
<ide> });
<ide> server.listen(common.PORT);
<ide>
<del>process.on('exit', function() {
<add>process.on('exit', () => {
<ide> assert.strictEqual(recv.length, N);
<ide> console.error('Exit');
<ide> }); | 1 |
Python | Python | fix minor docstring typos | a6146e10e1cf92506a926fdccea1cab7b0c07096 | <ide><path>numpy/core/defchararray.py
<ide> def split(a, sep=None, maxsplit=None):
<ide> For each element in `a`, return a list of the words in the
<ide> string, using `sep` as the delimiter string.
<ide>
<del> Calls `str.rsplit` element-wise.
<add> Calls `str.split` element-wise.
<ide>
<ide> Parameters
<ide> ----------
<ide> def strip(a, chars=None):
<ide> For each element in `a`, return a copy with the leading and
<ide> trailing characters removed.
<ide>
<del> Calls `str.rstrip` element-wise.
<add> Calls `str.strip` element-wise.
<ide>
<ide> Parameters
<ide> ---------- | 1 |
Javascript | Javascript | add support for required property in checkbox | 15d5a9e305c828baf3eac36d61acd6f03f8db02f | <ide><path>packages_es6/ember-handlebars/lib/controls/checkbox.js
<ide> var Checkbox = View.extend({
<ide> tagName: 'input',
<ide>
<ide> attributeBindings: ['type', 'checked', 'indeterminate', 'disabled', 'tabindex', 'name',
<del> 'autofocus', 'form'],
<add> 'autofocus', 'required', 'form'],
<ide>
<ide> type: "checkbox",
<ide> checked: false, | 1 |
Ruby | Ruby | replace snowman with utf8=✓ | c6160898c83107ba63017ad2a8b3878733267136 | <ide><path>actionpack/lib/action_view/helpers/form_tag_helper.rb
<ide> def html_options_for_form(url_for_options, options, *parameters_for_url)
<ide>
<ide> def extra_tags_for_form(html_options)
<ide> snowman_tag = tag(:input, :type => "hidden",
<del> :name => "_e", :value => "☃".html_safe)
<add> :name => "utf8", :value => "✓".html_safe)
<ide>
<ide> method = html_options.delete("method").to_s
<ide>
<ide><path>actionpack/test/template/form_helper_test.rb
<ide> def test_form_for_with_labelled_builder
<ide>
<ide> def snowman(method = nil)
<ide> txt = %{<div style="margin:0;padding:0;display:inline">}
<del> txt << %{<input name="_e" type="hidden" value="☃" />}
<add> txt << %{<input name="utf8" type="hidden" value="✓" />}
<ide> txt << %{<input name="_method" type="hidden" value="#{method}" />} if method
<ide> txt << %{</div>}
<ide> end
<ide><path>actionpack/test/template/form_tag_helper_test.rb
<ide> def snowman(options = {})
<ide> method = options[:method]
<ide>
<ide> txt = %{<div style="margin:0;padding:0;display:inline">}
<del> txt << %{<input name="_e" type="hidden" value="☃" />}
<add> txt << %{<input name="utf8" type="hidden" value="✓" />}
<ide> txt << %{<input name="_method" type="hidden" value="#{method}" />} if method
<ide> txt << %{</div>}
<ide> end | 3 |
Javascript | Javascript | fix variable shadowing in blog-starter example | aed8b3752999ad08935c6e38880091da3b69cc2b | <ide><path>examples/blog-starter/pages/posts/[slug].js
<ide> export async function getStaticPaths() {
<ide> const posts = getAllPosts(['slug'])
<ide>
<ide> return {
<del> paths: posts.map((posts) => {
<add> paths: posts.map((post) => {
<ide> return {
<ide> params: {
<del> slug: posts.slug,
<add> slug: post.slug,
<ide> },
<ide> }
<ide> }), | 1 |
Go | Go | add tests related to hcsshim recycle bin skipping | 72192f5052667118c2f83282f8f8c3df8cbf514b | <ide><path>integration-cli/docker_api_build_windows_test.go
<add>// +build windows
<add>
<add>package main
<add>
<add>import (
<add> "net/http"
<add>
<add> "github.com/docker/docker/integration-cli/checker"
<add> "github.com/docker/docker/internal/test/fakecontext"
<add> "github.com/docker/docker/internal/test/request"
<add> "github.com/go-check/check"
<add> "github.com/gotestyourself/gotestyourself/assert"
<add> is "github.com/gotestyourself/gotestyourself/assert/cmp"
<add>)
<add>
<add>func (s *DockerSuite) TestBuildWithRecycleBin(c *check.C) {
<add> testRequires(c, DaemonIsWindows)
<add>
<add> dockerfile := "" +
<add> "FROM " + testEnv.PlatformDefaults.BaseImage + "\n" +
<add> "RUN md $REcycLE.biN && md missing\n" +
<add> "RUN dir $Recycle.Bin && exit 1 || exit 0\n" +
<add> "RUN dir missing\n"
<add>
<add> ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
<add> defer ctx.Close()
<add>
<add> res, body, err := request.Post(
<add> "/build",
<add> request.RawContent(ctx.AsTarReader(c)),
<add> request.ContentType("application/x-tar"))
<add>
<add> c.Assert(err, checker.IsNil)
<add> c.Assert(res.StatusCode, checker.Equals, http.StatusOK)
<add>
<add> out, err := request.ReadBody(body)
<add> assert.NilError(c, err)
<add> assert.Check(c, is.Contains(string(out), "Successfully built"))
<add>} | 1 |
Go | Go | use correct lstat, fix archive check | a5aed699cfaa4d84b1b134033fb468b3a7a874f0 | <ide><path>builder/remotecontext/lazycontext.go
<ide> func (c *lazySource) Hash(path string) (string, error) {
<ide> return "", errors.WithStack(convertPathError(err, cleanPath))
<ide> }
<ide>
<del> fi, err := os.Lstat(fullPath)
<add> fi, err := c.root.Lstat(fullPath)
<ide> if err != nil {
<ide> // Backwards compatibility: a missing file returns a path as hash.
<ide> // This is reached in the case of a broken symlink.
<ide><path>pkg/archive/archive.go
<ide> func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
<ide> hdr.AccessTime = time.Time{}
<ide> hdr.ChangeTime = time.Time{}
<ide> hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
<del> name, err = canonicalTarName(name, fi.IsDir())
<del> if err != nil {
<del> return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
<del> }
<del> hdr.Name = name
<add> hdr.Name = canonicalTarName(name, fi.IsDir())
<ide> if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
<ide> return nil, err
<ide> }
<ide> func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *
<ide>
<ide> // canonicalTarName provides a platform-independent and consistent posix-style
<ide> //path for files and directories to be archived regardless of the platform.
<del>func canonicalTarName(name string, isDir bool) (string, error) {
<del> name, err := CanonicalTarNameForPath(name)
<del> if err != nil {
<del> return "", err
<del> }
<add>func canonicalTarName(name string, isDir bool) string {
<add> name = CanonicalTarNameForPath(name)
<ide>
<ide> // suffix with '/' for directories
<ide> if isDir && !strings.HasSuffix(name, "/") {
<ide> name += "/"
<ide> }
<del> return name, nil
<add> return name
<ide> }
<ide>
<ide> // addTarFile adds to the tar archive a file from `path` as `name`
<ide><path>pkg/archive/archive_unix.go
<ide> func getWalkRoot(srcPath string, include string) string {
<ide> // CanonicalTarNameForPath returns platform-specific filepath
<ide> // to canonical posix-style path for tar archival. p is relative
<ide> // path.
<del>func CanonicalTarNameForPath(p string) (string, error) {
<del> return p, nil // already unix-style
<add>func CanonicalTarNameForPath(p string) string {
<add> return p // already unix-style
<ide> }
<ide>
<ide> // chmodTarEntry is used to adjust the file permissions used in tar header based
<ide><path>pkg/archive/archive_unix_test.go
<ide> func TestCanonicalTarNameForPath(t *testing.T) {
<ide> {"foo/dir/", "foo/dir/"},
<ide> }
<ide> for _, v := range cases {
<del> if out, err := CanonicalTarNameForPath(v.in); err != nil {
<del> t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
<del> } else if out != v.expected {
<del> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
<add> if CanonicalTarNameForPath(v.in) != v.expected {
<add> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, CanonicalTarNameForPath(v.in))
<ide> }
<ide> }
<ide> }
<ide> func TestCanonicalTarName(t *testing.T) {
<ide> {"foo/bar", true, "foo/bar/"},
<ide> }
<ide> for _, v := range cases {
<del> if out, err := canonicalTarName(v.in, v.isDir); err != nil {
<del> t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
<del> } else if out != v.expected {
<del> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
<add> if canonicalTarName(v.in, v.isDir) != v.expected {
<add> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, canonicalTarName(v.in, v.isDir))
<ide> }
<ide> }
<ide> }
<ide><path>pkg/archive/archive_windows.go
<ide> package archive // import "github.com/docker/docker/pkg/archive"
<ide>
<ide> import (
<ide> "archive/tar"
<del> "fmt"
<ide> "os"
<ide> "path/filepath"
<del> "strings"
<ide>
<ide> "github.com/docker/docker/pkg/idtools"
<ide> "github.com/docker/docker/pkg/longpath"
<ide> func getWalkRoot(srcPath string, include string) string {
<ide> // CanonicalTarNameForPath returns platform-specific filepath
<ide> // to canonical posix-style path for tar archival. p is relative
<ide> // path.
<del>func CanonicalTarNameForPath(p string) (string, error) {
<del> // windows: convert windows style relative path with backslashes
<del> // into forward slashes. Since windows does not allow '/' or '\'
<del> // in file names, it is mostly safe to replace however we must
<del> // check just in case
<del> if strings.Contains(p, "/") {
<del> return "", fmt.Errorf("Windows path contains forward slash: %s", p)
<del> }
<del> return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
<del>
<add>func CanonicalTarNameForPath(p string) string {
<add> return filepath.ToSlash(p)
<ide> }
<ide>
<ide> // chmodTarEntry is used to adjust the file permissions used in tar header based
<ide><path>pkg/archive/archive_windows_test.go
<ide> func TestCopyFileWithInvalidDest(t *testing.T) {
<ide> func TestCanonicalTarNameForPath(t *testing.T) {
<ide> cases := []struct {
<ide> in, expected string
<del> shouldFail bool
<ide> }{
<del> {"foo", "foo", false},
<del> {"foo/bar", "___", true}, // unix-styled windows path must fail
<del> {`foo\bar`, "foo/bar", false},
<add> {"foo", "foo"},
<add> {"foo/bar", "foo/bar"},
<add> {`foo\bar`, "foo/bar"},
<ide> }
<ide> for _, v := range cases {
<del> if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail {
<del> t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
<del> } else if v.shouldFail && err == nil {
<del> t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out)
<del> } else if !v.shouldFail && out != v.expected {
<del> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
<add> if CanonicalTarNameForPath(v.in) != v.expected {
<add> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, CanonicalTarNameForPath(v.in))
<ide> }
<ide> }
<ide> }
<ide> func TestCanonicalTarName(t *testing.T) {
<ide> {`foo\bar`, true, "foo/bar/"},
<ide> }
<ide> for _, v := range cases {
<del> if out, err := canonicalTarName(v.in, v.isDir); err != nil {
<del> t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
<del> } else if out != v.expected {
<del> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
<add> if canonicalTarName(v.in, v.isDir) != v.expected {
<add> t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, canonicalTarName(v.in, v.isDir))
<ide> }
<ide> }
<ide> } | 6 |
Javascript | Javascript | add manifest to htmldompropertyconfig | 3de80ec4ba932a486c1294c539a5da974fc289db | <ide><path>src/browser/ui/dom/HTMLDOMPropertyConfig.js
<ide> var HTMLDOMPropertyConfig = {
<ide> lang: null,
<ide> list: null,
<ide> loop: MUST_USE_PROPERTY | HAS_BOOLEAN_VALUE,
<add> manifest: MUST_USE_ATTRIBUTE,
<ide> max: null,
<ide> maxLength: MUST_USE_ATTRIBUTE,
<ide> media: MUST_USE_ATTRIBUTE, | 1 |
Javascript | Javascript | update documentation for emberarray.any | 2ee24e149401b3907ea96513a88d1b8516f8835e | <ide><path>packages/@ember/-internals/runtime/lib/mixins/array.js
<ide> const ArrayMixin = Mixin.create(Enumerable, {
<ide> },
<ide>
<ide> /**
<del> Returns `true` if the passed function returns true for any item in the
<del> enumeration.
<del>
<del> The callback method you provide should have the following signature (all
<del> parameters are optional):
<add> The any() method executes the callback function once for each element
<add> present in the array until it finds the one where callback returns a truthy
<add> value (i.e. `true`). If such an element is found, any() immediately returns
<add> true. Otherwise, any() returns false.
<ide>
<ide> ```javascript
<ide> function(item, index, array);
<ide> const ArrayMixin = Mixin.create(Enumerable, {
<ide> - `index` is the current index in the iteration.
<ide> - `array` is the array object itself.
<ide>
<del> It must return a truthy value (i.e. `true`) to include an item in the
<del> results. Any non-truthy return value will discard the item from the
<del> results.
<del>
<ide> Note that in addition to a callback, you can also pass an optional target
<del> object that will be set as `this` on the context. This is a good way
<del> to give your iterator function access to the current object.
<add> object that will be set as `this` on the context. It can be a good way
<add> to give your iterator function access to an object in cases where an ES6
<add> arrow function would not be appropriate.
<ide>
<ide> Usage Example:
<ide>
<ide> ```javascript
<del> if (people.any(isManager)) {
<add> let includesManager = people.any(this.findPersonInManagersList, this);
<add>
<add> let includesStockHolder = people.any(person => {
<add> return this.findPersonInStockHoldersList(person)
<add> });
<add>
<add> if (includesManager || includesStockHolder) {
<ide> Paychecks.addBiggerBonus();
<ide> }
<ide> ``` | 1 |
Python | Python | timedistributeddense speed up | 089fa1175260e06f18c249c9ab12a1df3a586795 | <ide><path>keras/layers/core.py
<ide> def output_shape(self):
<ide>
<ide> def get_output(self, train=False):
<ide> X = self.get_input(train)
<del>
<add> input_length = self.input_shape[1]
<add> if input_length and K._BACKEND == 'theano':
<add> import theano.tensor as T
<add> #X: (nb_samples, timesteps, input_dim)
<add> X = K.permute_dimensions(X, (1, 0, 2))
<add> #X: (timesteps, nb_samples, input_dim)
<add> W = [self.W] * input_length
<add> W = T.stack(*W)
<add> #W: (timesteps, input_dim, output_dim)
<add> z = T.batched_tensordot(X, W, axes=[(2), (1)])
<add> #z: (timesteps, nb_samples, output_dim)
<add> z = K.permute_dimensions(z, (1, 0, 2))
<add> #z: (nb_samples, timesteps, output_dim)
<add> b = [self.b] * input_length
<add> b = T.stack(*b)
<add> #b: (timesteps, output_dim)
<add> Y = self.activation(z + b)
<add> return Y
<add>
<ide> def step(x, states):
<ide> output = K.dot(x, self.W) + self.b
<ide> return output, [] | 1 |
Ruby | Ruby | return failing exit code on circular dependencies | a5f7fc814e8e6452625fde42f958ee3804cb17e7 | <ide><path>Library/Homebrew/cmd/deps.rb
<ide> def recursive_deps_tree(f, dep_stack:, prefix:, recursive:, args:)
<ide> end
<ide>
<ide> display_s = "#{tree_lines} #{dep_display_name(dep, args: args)}"
<add>
<add> # Detect circular dependencies and consider them a failure if present.
<ide> is_circular = dep_stack.include?(dep.name)
<del> display_s = "#{display_s} (CIRCULAR DEPENDENCY)" if is_circular
<add> if is_circular
<add> display_s = "#{display_s} (CIRCULAR DEPENDENCY)"
<add> Homebrew.failed = true
<add> end
<add>
<ide> puts "#{prefix}#{display_s}"
<ide>
<ide> next if !recursive || is_circular | 1 |
Ruby | Ruby | use mutex_m rather than use a delegate system | 6d71080530f8127b1a029f4314891c26e59446ce | <ide><path>actionpack/lib/action_view/template/resolver.rb
<ide> require "active_support/core_ext/class/attribute_accessors"
<ide> require "action_view/template"
<ide> require "thread"
<add>require "mutex_m"
<ide>
<ide> module ActionView
<ide> # = Action View Resolver
<ide> def to_str
<ide> # Threadsafe template cache
<ide> class Cache #:nodoc:
<ide> class CacheEntry
<del> attr_accessor :templates
<del>
<del> delegate :synchronize, :to => "@mutex"
<add> include Mutex_m
<ide>
<del> def initialize
<del> @mutex = Mutex.new
<del> end
<add> attr_accessor :templates
<ide> end
<ide>
<ide> def initialize | 1 |
Javascript | Javascript | use buffer.alloc(0) for zero-size buffers | 0f944ab3cf4435c299471e90515742eb99bac15e | <ide><path>lib/dgram.js
<ide> Socket.prototype.send = function(buffer,
<ide> self.bind({port: 0, exclusive: true}, null);
<ide>
<ide> if (list.length === 0)
<del> list.push(Buffer.allocUnsafe(0));
<add> list.push(Buffer.alloc(0));
<ide>
<ide> // If the socket hasn't been bound yet, push the outbound packet onto the
<ide> // send queue and send after binding is complete. | 1 |
Ruby | Ruby | add example label to activesupport/configurable | 664afe37dd254199346b837286395325cf046188 | <ide><path>activesupport/lib/active_support/configurable.rb
<ide> def #{name}=(value); config.#{name} = value; end
<ide>
<ide> # Reads and writes attributes from a configuration <tt>OrderedHash</tt>.
<ide> #
<add> # Example:
<add> #
<ide> # require 'active_support/configurable'
<ide> #
<ide> # class User | 1 |
Text | Text | improve guide text for ci runs | 0a78f7d622534344888013f93de5d9fed6305e6b | <ide><path>COLLABORATOR_GUIDE.md
<ide> status indicator.
<ide>
<ide> Do not land any Pull Requests without passing (green or yellow) CI runs. If you
<ide> believe any failed (red or grey) CI sub-tasks are unrelated to the change in the
<del>Pull Request, you may re-run the sub-task to try to see if it passes (just open
<del>the failed sub-task page and press the "Rebuild" button; be sure you are still
<del>logged in for this action). If re-runs of all failed sub-tasks pass (do not
<del>forget to provide the links for successfully rerun sub-tasks), it is permissible
<del>to land the Pull Request but only if the initial failures are believed in good
<del>faith to be unrelated to the changes in the Pull Request. Otherwise, reasonable
<del>steps must be taken to confirm that the changes are not resulting in an
<del>unreliable test.
<add>Pull Request, use "Resume Build" in the left navigation of the relevant
<add>`node-test-pull-request` job. It will create a new `node-test-pull-request` run
<add>that preserves all the green results from the current job but re-runs everything
<add>else.
<ide>
<ide> #### Useful CI Jobs
<ide> | 1 |
Javascript | Javascript | improve angular.scope.$eval docs | 8d91ec4173a652da9fe984d12a50d6b1b4ef935f | <ide><path>src/Scope.js
<ide> function createScope(parent, providers, instanceCache) {
<ide> * @function
<ide> *
<ide> * @description
<del> * Without the `exp` parameter triggers an eval cycle, for this scope and it's child scopes.
<add> * Without the `exp` parameter triggers an eval cycle for this scope and its child scopes.
<ide> *
<ide> * With the `exp` parameter, compiles the expression to a function and calls it with `this` set
<del> * to the current scope and returns the result.
<add> * to the current scope and returns the result. In other words, evaluates `exp` as angular
<add> * expression in the context of the current scope.
<ide> *
<ide> * # Example
<ide> <pre> | 1 |
PHP | PHP | add test for booting callbacks | aa8f695cb18edb950938591593f0e0e5bfc3d7b1 | <ide><path>tests/Foundation/FoundationApplicationTest.php
<ide> public function testAfterBootstrappingAddsClosure()
<ide> $app->afterBootstrapping(RegisterFacades::class, $closure);
<ide> $this->assertArrayHasKey(0, $app['events']->getListeners('bootstrapped: Illuminate\Foundation\Bootstrap\RegisterFacades'));
<ide> }
<add>
<add> public function testBootingCallbacks()
<add> {
<add> $app = new Application;
<add>
<add> $counter = 0;
<add> $closure = function ($app) use (&$counter) {
<add> $counter++;
<add> $this->assertInstanceOf(Application::class, $app);
<add> };
<add>
<add> $closure2 = function ($app) use (&$counter) {
<add> $counter++;
<add> $this->assertInstanceOf(Application::class, $app);
<add> };
<add>
<add> $app->booting($closure);
<add> $app->booting($closure2);
<add> $app->boot();
<add> $this->assertEquals(2, $counter);
<add> }
<ide> }
<ide>
<ide> class ApplicationBasicServiceProviderStub extends ServiceProvider | 1 |
Python | Python | add ex_list_nodes method | 17715cdd67cab500f00f0071dd974c081048e6bd | <ide><path>libcloud/container/drivers/kubernetes.py
<ide>
<ide> import datetime
<ide> import json
<add>import hashlib
<ide>
<ide> from libcloud.container.base import (
<ide> Container,
<ide> from libcloud.container.providers import Provider
<ide> from libcloud.container.types import ContainerState
<ide>
<del>__all__ = ["KubernetesContainerDriver"]
<add>from libcloud.compute.types import NodeState
<add>from libcloud.compute.base import Node
<add>from libcloud.compute.base import NodeDriver
<add>from libcloud.compute.base import NodeSize
<add>from libcloud.compute.base import NodeImage
<add>from libcloud.compute.base import NodeLocation
<add>
<add>__all__ = [
<add> 'KubernetesContainerDriver'
<add>]
<ide>
<ide>
<ide> ROOT_URL = "/api/"
<ide> def destroy_container(self, container):
<ide> """
<ide> return self.ex_destroy_pod(container.extra["namespace"], container.extra["pod"])
<ide>
<add> def ex_list_nodes(self):
<add> """
<add> List available Nodes
<add>
<add> :rtype: ``list`` of :class:`.Node`
<add> """
<add> result = self.connection.request(ROOT_URL + "v1/nodes").object
<add> return [self._to_node(node) for node in result['items']]
<add>
<add> def _to_node(self, node):
<add> """
<add> Convert an API node to a `Node` object
<add> """
<add> ID = node['metadata']['uid']
<add> name = node['metadata']['name']
<add> driver = self.connection.driver
<add> namespace = 'undefined'
<add> memory = node['status'].get('capacity', {}).get('memory', 0)
<add> if not isinstance(memory, int):
<add> if 'Ki' in memory:
<add> memory = memory.rstrip('Ki')
<add> memory = int(memory) * 1024
<add> elif 'K' in memory:
<add> memory = memory.rstrip('K')
<add> memory = int(memory) * 1000
<add> elif 'M' in memory or 'Mi' in memory:
<add> memory = memory.rstrip('M')
<add> memory = memory.rstrip('Mi')
<add> memory = int(memory)
<add> elif 'Gi' in memory:
<add> memory = memory.rstrip('Gi')
<add> memory = int(memory) // 1024
<add> elif 'G' in memory:
<add> memory = memory.rstrip('G')
<add> memory = int(memory) // 1000
<add> cpu = node['status'].get('capacity', {}).get('cpu', 1)
<add> if not isinstance(cpu, int):
<add> cpu = int(cpu.rstrip('m'))
<add> extra_size = {'cpus': cpu}
<add> size_name = f'{cpu} vCPUs, {memory}MB Ram'
<add> size_id = hashlib.md5(size_name.encode("utf-8")).hexdigest()
<add> size = NodeSize(id=size_id, name=size_name, ram=memory,
<add> disk=0, bandwidth=0, price=0,
<add> driver=driver, extra=extra_size)
<add> extra = {'memory': memory, 'cpu': cpu}
<add> # TODO: Find state
<add> state = NodeState.UNKNOWN
<add> public_ips, private_ips = [], []
<add> for address in node['status']['addresses']:
<add> if address['type'] == 'InternalIP':
<add> private_ips.append(address['address'])
<add> elif address['type'] == 'ExternalIP':
<add> public_ips.append(address['address'])
<add> created_at = datetime.datetime.strptime(
<add> node['metadata']['creationTimestamp'],
<add> '%Y-%m-%dT%H:%M:%SZ')
<add> return Node(id=ID, name=name, state=state,
<add> public_ips=public_ips,
<add> private_ips=private_ips,
<add> driver=driver, size=size,
<add> extra=extra, created_at=created_at)
<add>
<ide> def ex_list_pods(self):
<ide> """
<ide> List available Pods | 1 |
Text | Text | use hub_module_url in bert readme file | 6a76ce5b0c33e542f9ec8645dfd340b26f561f17 | <ide><path>official/nlp/bert/README.md
<ide> This repository contains TensorFlow 2.x implementation for BERT.
<ide>
<ide> ## Pre-trained Models
<ide>
<del>Our current released checkpoints are exactly the same as TF 1.x official BERT
<del>repository, thus inside `BertConfig`, there is `backward_compatible=True`. We
<del>are going to release new pre-trained checkpoints soon.
<add>We released both checkpoints and tf.hub modules as the pretrained models for
<add>fine-tuning. They are TF 2.x compatible and are converted from the checkpoints
<add>released in TF 1.x official BERT repository
<add>[google-research/bert](https://github.com/google-research/bert)
<add>in order to keep consistent with BERT paper.
<add>
<ide>
<ide> ### Access to Pretrained Checkpoints
<ide>
<del>We provide checkpoints that are converted from [google-research/bert](https://github.com/google-research/bert),
<del>in order to keep consistent with BERT paper.
<add>Pretrained checkpoints can be found in the following links:
<ide>
<ide> **Note: We have switched BERT implementation
<ide> to use Keras functional-style networks in [nlp/modeling](../modeling).
<ide> The new checkpoints are:**
<ide> * **[`BERT-Large, Cased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-24_H-1024_A-16.tar.gz)**:
<ide> 24-layer, 1024-hidden, 16-heads, 340M parameters
<ide>
<del>Here are the stable model checkpoints work with [v2.0 release](https://github.com/tensorflow/models/releases/tag/v2.0).
<del>
<del>**Note: these checkpoints are not compatible with the current master examples.**
<del>
<del>* **[`BERT-Large, Uncased (Whole Word Masking)`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/tf_20/wwm_uncased_L-24_H-1024_A-16.tar.gz)**:
<del> 24-layer, 1024-hidden, 16-heads, 340M parameters
<del>* **[`BERT-Large, Cased (Whole Word Masking)`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/tf_20/wwm_cased_L-24_H-1024_A-16.tar.gz)**:
<del> 24-layer, 1024-hidden, 16-heads, 340M parameters
<del>* **[`BERT-Base, Uncased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/tf_20/uncased_L-12_H-768_A-12.tar.gz)**:
<del> 12-layer, 768-hidden, 12-heads, 110M parameters
<del>* **[`BERT-Large, Uncased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/tf_20/uncased_L-24_H-1024_A-16.tar.gz)**:
<del> 24-layer, 1024-hidden, 16-heads, 340M parameters
<del>* **[`BERT-Base, Cased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/tf_20/cased_L-12_H-768_A-12.tar.gz)**:
<del> 12-layer, 768-hidden, 12-heads , 110M parameters
<del>* **[`BERT-Large, Cased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/tf_20/cased_L-24_H-1024_A-16.tar.gz)**:
<del> 24-layer, 1024-hidden, 16-heads, 340M parameters
<del>
<ide> We recommend to host checkpoints on Google Cloud storage buckets when you use
<ide> Cloud GPU/TPU.
<ide>
<ide> checkpoint.restore(init_checkpoint)
<ide> Checkpoints featuring native serialized Keras models
<ide> (i.e. model.load()/load_weights()) will be available soon.
<ide>
<add>### Access to Pretrained hub modules.
<add>
<add>Pretrained tf.hub modules in TF 2.x SavedModel format can be found in the
<add>following links:
<add>
<add>* **[`BERT-Large, Uncased (Whole Word Masking)`](https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/1)**:
<add> 24-layer, 1024-hidden, 16-heads, 340M parameters
<add>* **[`BERT-Large, Cased (Whole Word Masking)`](https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/1)**:
<add> 24-layer, 1024-hidden, 16-heads, 340M parameters
<add>* **[`BERT-Base, Uncased`](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1)**:
<add> 12-layer, 768-hidden, 12-heads, 110M parameters
<add>* **[`BERT-Large, Uncased`](https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1)**:
<add> 24-layer, 1024-hidden, 16-heads, 340M parameters
<add>* **[`BERT-Base, Cased`](https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/1)**:
<add> 12-layer, 768-hidden, 12-heads , 110M parameters
<add>* **[`BERT-Large, Cased`](https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/1)**:
<add> 24-layer, 1024-hidden, 16-heads, 340M parameters
<add>* **[`BERT-Base, Multilingual Cased`](https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/1)**:
<add> 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
<add>* **[`BERT-Base, Chinese`](https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/1)**:
<add> Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads,
<add> 110M parameters
<add>
<ide> ## Set Up
<ide>
<ide> ```shell
<ide> and unpack it to some directory `$GLUE_DIR`.
<ide>
<ide> ```shell
<ide> export GLUE_DIR=~/glue
<del>export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<add>export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<ide>
<ide> export TASK_NAME=MNLI
<ide> export OUTPUT_DIR=gs://some_bucket/datasets
<ide> python ../data/create_finetuning_data.py \
<ide> --input_data_dir=${GLUE_DIR}/${TASK_NAME}/ \
<del> --vocab_file=${BERT_BASE_DIR}/vocab.txt \
<add> --vocab_file=${BERT_DIR}/vocab.txt \
<ide> --train_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_train.tf_record \
<ide> --eval_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_eval.tf_record \
<ide> --meta_data_file_path=${OUTPUT_DIR}/${TASK_NAME}_meta_data \
<ide> The necessary files can be found here:
<ide> ```shell
<ide> export SQUAD_DIR=~/squad
<ide> export SQUAD_VERSION=v1.1
<del>export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<add>export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<ide> export OUTPUT_DIR=gs://some_bucket/datasets
<ide>
<ide> python ../data/create_finetuning_data.py \
<ide> --squad_data_file=${SQUAD_DIR}/train-${SQUAD_VERSION}.json \
<del> --vocab_file=${BERT_BASE_DIR}/vocab.txt \
<add> --vocab_file=${BERT_DIR}/vocab.txt \
<ide> --train_data_output_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_train.tf_record \
<ide> --meta_data_file_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_meta_data \
<ide> --fine_tuning_task_type=squad --max_seq_length=384
<ide> The unzipped pre-trained model files can also be found in the Google Cloud
<ide> Storage folder `gs://cloud-tpu-checkpoints/bert/keras_bert`. For example:
<ide>
<ide> ```shell
<del>export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<add>export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<ide> export MODEL_DIR=gs://some_bucket/my_output_dir
<ide> ```
<ide>
<ide> For GPU memory of 16GB or smaller, you may try to use `BERT-Base`
<ide> (uncased_L-12_H-768_A-12).
<ide>
<ide> ```shell
<del>export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<add>export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<ide> export MODEL_DIR=gs://some_bucket/my_output_dir
<ide> export GLUE_DIR=gs://some_bucket/datasets
<ide> export TASK=MRPC
<ide> python run_classifier.py \
<ide> --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \
<ide> --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \
<ide> --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \
<del> --bert_config_file=${BERT_BASE_DIR}/bert_config.json \
<del> --init_checkpoint=${BERT_BASE_DIR}/bert_model.ckpt \
<add> --bert_config_file=${BERT_DIR}/bert_config.json \
<add> --init_checkpoint=${BERT_DIR}/bert_model.ckpt \
<ide> --train_batch_size=4 \
<ide> --eval_batch_size=4 \
<ide> --steps_per_loop=1 \
<ide> python run_classifier.py \
<ide> --distribution_strategy=mirrored
<ide> ```
<ide>
<add>Alternatively, instead of specifying `init_checkpoint`, you can specify
<add>`hub_module_url` to employ a pretraind BERT hub module, e.g.,
<add>` --hub_module_url=https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1`.
<add>
<ide> To use TPU, you only need to switch distribution strategy type to `tpu` with TPU
<ide> information and use remote storage for model checkpoints.
<ide>
<ide> ```shell
<del>export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<add>export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<ide> export TPU_IP_ADDRESS='???'
<ide> export MODEL_DIR=gs://some_bucket/my_output_dir
<ide> export GLUE_DIR=gs://some_bucket/datasets
<add>export TASK=MRPC
<ide>
<ide> python run_classifier.py \
<ide> --mode='train_and_eval' \
<ide> --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \
<ide> --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \
<ide> --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \
<del> --bert_config_file=$BERT_BASE_DIR/bert_config.json \
<del> --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
<add> --bert_config_file=${BERT_DIR}/bert_config.json \
<add> --init_checkpoint=${BERT_DIR}/bert_model.ckpt \
<ide> --train_batch_size=32 \
<ide> --eval_batch_size=32 \
<ide> --learning_rate=2e-5 \
<ide> For GPU memory of 16GB or smaller, you may try to use `BERT-Base`
<ide> (uncased_L-12_H-768_A-12).
<ide>
<ide> ```shell
<del>export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<add>export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<ide> export SQUAD_DIR=gs://some_bucket/datasets
<ide> export MODEL_DIR=gs://some_bucket/my_output_dir
<ide> export SQUAD_VERSION=v1.1
<ide> python run_squad.py \
<ide> --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \
<ide> --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \
<ide> --predict_file=${SQUAD_DIR}/dev-v1.1.json \
<del> --vocab_file=${BERT_BASE_DIR}/vocab.txt \
<del> --bert_config_file=$BERT_BASE_DIR/bert_config.json \
<del> --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
<add> --vocab_file=${BERT_DIR}/vocab.txt \
<add> --bert_config_file=${BERT_DIR}/bert_config.json \
<add> --init_checkpoint=${BERT_DIR}/bert_model.ckpt \
<ide> --train_batch_size=4 \
<ide> --predict_batch_size=4 \
<ide> --learning_rate=8e-5 \
<ide> python run_squad.py \
<ide> --distribution_strategy=mirrored
<ide> ```
<ide>
<add>Similarily, you can replace `init_checkpoint` FLAG with `hub_module_url` to
<add>specify a hub module path.
<add>
<ide> To use TPU, you need switch distribution strategy type to `tpu` with TPU
<ide> information.
<ide>
<ide> ```shell
<del>export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<add>export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16
<ide> export TPU_IP_ADDRESS='???'
<ide> export MODEL_DIR=gs://some_bucket/my_output_dir
<ide> export SQUAD_DIR=gs://some_bucket/datasets
<ide> python run_squad.py \
<ide> --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \
<ide> --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \
<ide> --predict_file=${SQUAD_DIR}/dev-v1.1.json \
<del> --vocab_file=${BERT_BASE_DIR}/vocab.txt \
<del> --bert_config_file=$BERT_BASE_DIR/bert_config.json \
<del> --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
<add> --vocab_file=${BERT_DIR}/vocab.txt \
<add> --bert_config_file=${BERT_DIR}/bert_config.json \
<add> --init_checkpoint=${BERT_DIR}/bert_model.ckpt \
<ide> --train_batch_size=32 \
<ide> --learning_rate=8e-5 \
<ide> --num_train_epochs=2 \ | 1 |
Javascript | Javascript | add convenience methods for date comparisons | 7f006b836917ed5c021872ac4e1e684bd1a824ce | <ide><path>src/lib/moment/compare.js
<ide> export function isSame (input, units) {
<ide> return +(this.clone().startOf(units)) <= inputMs && inputMs <= +(this.clone().endOf(units));
<ide> }
<ide> }
<add>
<add>export function isSameOrAfter (input, units) {
<add> return this.isSame(input, units) || this.isAfter(input,units);
<add>}
<add>
<add>export function isSameOrBefore (input, units) {
<add> return this.isSame(input, units) || this.isBefore(input,units);
<add>}
<ide><path>src/lib/moment/prototype.js
<ide> var proto = Moment.prototype;
<ide> import { add, subtract } from './add-subtract';
<ide> import { calendar } from './calendar';
<ide> import { clone } from './clone';
<del>import { isBefore, isBetween, isSame, isAfter } from './compare';
<add>import { isBefore, isBetween, isSame, isAfter, isSameOrAfter, isSameOrBefore } from './compare';
<ide> import { diff } from './diff';
<ide> import { format, toString, toISOString } from './format';
<ide> import { from, fromNow } from './from';
<ide> import { valueOf, toDate, toArray, toObject, toJSON, unix } from './to-type';
<ide> import { isValid, parsingFlags, invalidAt } from './valid';
<ide> import { creationData } from './creation-data';
<ide>
<del>proto.add = add;
<del>proto.calendar = calendar;
<del>proto.clone = clone;
<del>proto.diff = diff;
<del>proto.endOf = endOf;
<del>proto.format = format;
<del>proto.from = from;
<del>proto.fromNow = fromNow;
<del>proto.to = to;
<del>proto.toNow = toNow;
<del>proto.get = getSet;
<del>proto.invalidAt = invalidAt;
<del>proto.isAfter = isAfter;
<del>proto.isBefore = isBefore;
<del>proto.isBetween = isBetween;
<del>proto.isSame = isSame;
<del>proto.isValid = isValid;
<del>proto.lang = lang;
<del>proto.locale = locale;
<del>proto.localeData = localeData;
<del>proto.max = prototypeMax;
<del>proto.min = prototypeMin;
<del>proto.parsingFlags = parsingFlags;
<del>proto.set = getSet;
<del>proto.startOf = startOf;
<del>proto.subtract = subtract;
<del>proto.toArray = toArray;
<del>proto.toObject = toObject;
<del>proto.toDate = toDate;
<del>proto.toISOString = toISOString;
<del>proto.toJSON = toJSON;
<del>proto.toString = toString;
<del>proto.unix = unix;
<del>proto.valueOf = valueOf;
<del>proto.creationData = creationData;
<add>proto.add = add;
<add>proto.calendar = calendar;
<add>proto.clone = clone;
<add>proto.diff = diff;
<add>proto.endOf = endOf;
<add>proto.format = format;
<add>proto.from = from;
<add>proto.fromNow = fromNow;
<add>proto.to = to;
<add>proto.toNow = toNow;
<add>proto.get = getSet;
<add>proto.invalidAt = invalidAt;
<add>proto.isAfter = isAfter;
<add>proto.isBefore = isBefore;
<add>proto.isBetween = isBetween;
<add>proto.isSame = isSame;
<add>proto.isSameOrAfter = isSameOrAfter;
<add>proto.isSameOrBefore = isSameOrBefore;
<add>proto.isValid = isValid;
<add>proto.lang = lang;
<add>proto.locale = locale;
<add>proto.localeData = localeData;
<add>proto.max = prototypeMax;
<add>proto.min = prototypeMin;
<add>proto.parsingFlags = parsingFlags;
<add>proto.set = getSet;
<add>proto.startOf = startOf;
<add>proto.subtract = subtract;
<add>proto.toArray = toArray;
<add>proto.toObject = toObject;
<add>proto.toDate = toDate;
<add>proto.toISOString = toISOString;
<add>proto.toJSON = toJSON;
<add>proto.toString = toString;
<add>proto.unix = unix;
<add>proto.valueOf = valueOf;
<add>proto.creationData = creationData;
<ide>
<ide> // Year
<ide> import { getSetYear, getIsLeapYear } from '../units/year';
<ide><path>src/test/moment/is_same_or_after.js
<add>import { module, test } from '../qunit';
<add>import moment from '../../moment';
<add>
<add>module('is same or after');
<add>
<add>test('is same or after without units', function (assert) {
<add> var m = moment(new Date(2011, 3, 2, 3, 4, 5, 10)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 3, 2, 3, 5, 5, 10))), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 3, 2, 3, 3, 5, 10))), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 4, 2, 3, 4, 5, 10))), false, 'month is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 2, 3, 4, 5, 10))), true, 'month is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 3, 3, 4, 5, 10))), false, 'day is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 1, 3, 4, 5, 10))), true, 'day is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 4, 4, 5, 10))), false, 'hour is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 2, 4, 5, 10))), true, 'hour is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 5, 5, 10))), false, 'minute is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 3, 5, 10))), true, 'minute is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 6, 10))), false, 'second is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 4, 11))), true, 'second is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 5, 10))), true, 'millisecond match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 5, 11))), false, 'millisecond is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 5, 9))), true, 'millisecond is earlier');
<add> assert.equal(m.isSameOrAfter(m), true, 'moments are the same as themselves');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter second should not change moment');
<add>});
<add>
<add>test('is same or after year', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 5, 6, 7, 8, 9, 10)), 'year'), true, 'year match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 5, 6, 7, 8, 9, 10)), 'years'), true, 'plural should work');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 5, 6, 7, 8, 9, 10)), 'year'), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 5, 6, 7, 8, 9, 10)), 'year'), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 0, 1, 0, 0, 0, 0)), 'year'), true, 'exact start of year');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 11, 31, 23, 59, 59, 999)), 'year'), true, 'exact end of year');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 0, 1, 0, 0, 0, 0)), 'year'), false, 'start of next year');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 11, 31, 23, 59, 59, 999)), 'year'), true, 'end of previous year');
<add> assert.equal(m.isSameOrAfter(m, 'year'), true, 'same moments are in the same year');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter year should not change moment');
<add>});
<add>
<add>test('is same or after month', function (assert) {
<add> var m = moment(new Date(2011, 2, 3, 4, 5, 6, 7)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 6, 7, 8, 9, 10)), 'month'), true, 'month match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 6, 7, 8, 9, 10)), 'months'), true, 'plural should work');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 2, 6, 7, 8, 9, 10)), 'month'), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 2, 6, 7, 8, 9, 10)), 'month'), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 5, 6, 7, 8, 9, 10)), 'month'), false, 'month is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 6, 7, 8, 9, 10)), 'month'), true, 'month is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 1, 0, 0, 0, 0)), 'month'), true, 'exact start of month');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 31, 23, 59, 59, 999)), 'month'), true, 'exact end of month');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 1, 0, 0, 0, 0)), 'month'), false, 'start of next month');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 27, 23, 59, 59, 999)), 'month'), true, 'end of previous month');
<add> assert.equal(m.isSameOrAfter(m, 'month'), true, 'same moments are in the same month');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter month should not change moment');
<add>});
<add>
<add>test('is same or after day', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 7, 8, 9, 10)), 'day'), true, 'day match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 7, 8, 9, 10)), 'days'), true, 'plural should work');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 1, 2, 7, 8, 9, 10)), 'day'), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 1, 2, 7, 8, 9, 10)), 'day'), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 2, 7, 8, 9, 10)), 'day'), false, 'month is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 12, 2, 7, 8, 9, 10)), 'day'), true, 'month is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 3, 7, 8, 9, 10)), 'day'), false, 'day is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 1, 7, 8, 9, 10)), 'day'), true, 'day is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 0, 0, 0, 0)), 'day'), true, 'exact start of day');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 23, 59, 59, 999)), 'day'), true, 'exact end of day');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 3, 0, 0, 0, 0)), 'day'), false, 'start of next day');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 1, 23, 59, 59, 999)), 'day'), true, 'end of previous day');
<add> assert.equal(m.isSameOrAfter(m, 'day'), true, 'same moments are in the same day');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter day should not change moment');
<add>});
<add>
<add>test('is same or after hour', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 8, 9, 10)), 'hour'), true, 'hour match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 8, 9, 10)), 'hours'), true, 'plural should work');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 1, 2, 3, 8, 9, 10)), 'hour'), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 1, 2, 3, 8, 9, 10)), 'hour'), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 2, 3, 8, 9, 10)), 'hour'), false, 'month is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 12, 2, 3, 8, 9, 10)), 'hour'), true, 'month is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 3, 3, 8, 9, 10)), 'hour'), false, 'day is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 1, 3, 8, 9, 10)), 'hour'), true, 'day is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 4, 8, 9, 10)), 'hour'), false, 'hour is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 2, 8, 9, 10)), 'hour'), true, 'hour is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 0, 0, 0)), 'hour'), true, 'exact start of hour');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 59, 59, 999)), 'hour'), true, 'exact end of hour');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 4, 0, 0, 0)), 'hour'), false, 'start of next hour');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 2, 59, 59, 999)), 'hour'), true, 'end of previous hour');
<add> assert.equal(m.isSameOrAfter(m, 'hour'), true, 'same moments are in the same hour');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter hour should not change moment');
<add>});
<add>
<add>test('is same or after minute', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 9, 10)), 'minute'), true, 'minute match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 9, 10)), 'minutes'), true, 'plural should work');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 1, 2, 3, 4, 9, 10)), 'minute'), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 1, 2, 3, 4, 9, 10)), 'minute'), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 2, 3, 4, 9, 10)), 'minute'), false, 'month is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 12, 2, 3, 4, 9, 10)), 'minute'), true, 'month is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 3, 3, 4, 9, 10)), 'minute'), false, 'day is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 1, 3, 4, 9, 10)), 'minute'), true, 'day is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 4, 4, 9, 10)), 'minute'), false, 'hour is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 2, 4, 9, 10)), 'minute'), true, 'hour is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 5, 9, 10)), 'minute'), false, 'minute is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 3, 9, 10)), 'minute'), true, 'minute is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 0, 0)), 'minute'), true, 'exact start of minute');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 59, 999)), 'minute'), true, 'exact end of minute');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 5, 0, 0)), 'minute'), false, 'start of next minute');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 3, 59, 999)), 'minute'), true, 'end of previous minute');
<add> assert.equal(m.isSameOrAfter(m, 'minute'), true, 'same moments are in the same minute');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter minute should not change moment');
<add>});
<add>
<add>test('is same or after second', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 5, 10)), 'second'), true, 'second match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 5, 10)), 'seconds'), true, 'plural should work');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 1, 2, 3, 4, 5, 10)), 'second'), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 1, 2, 3, 4, 5, 10)), 'second'), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 2, 3, 4, 5, 10)), 'second'), false, 'month is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 12, 2, 3, 4, 5, 10)), 'second'), true, 'month is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 3, 3, 4, 5, 10)), 'second'), false, 'day is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 1, 3, 4, 5, 10)), 'second'), true, 'day is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 4, 4, 5, 10)), 'second'), false, 'hour is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 2, 4, 5, 10)), 'second'), true, 'hour is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 5, 5, 10)), 'second'), false, 'minute is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 3, 5, 10)), 'second'), true, 'minute is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 6, 10)), 'second'), false, 'second is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 4, 10)), 'second'), true, 'second is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 5, 0)), 'second'), true, 'exact start of second');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 5, 999)), 'second'), true, 'exact end of second');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 6, 0)), 'second'), false, 'start of next second');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 1, 2, 3, 4, 4, 999)), 'second'), true, 'end of previous second');
<add> assert.equal(m.isSameOrAfter(m, 'second'), true, 'same moments are in the same second');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter second should not change moment');
<add>});
<add>
<add>test('is same or after millisecond', function (assert) {
<add> var m = moment(new Date(2011, 3, 2, 3, 4, 5, 10)), mCopy = moment(m);
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 5, 10)), 'millisecond'), true, 'millisecond match');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 5, 10)), 'milliseconds'), true, 'plural should work');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2012, 3, 2, 3, 4, 5, 10)), 'millisecond'), false, 'year is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2010, 3, 2, 3, 4, 5, 10)), 'millisecond'), true, 'year is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 4, 2, 3, 4, 5, 10)), 'millisecond'), false, 'month is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 2, 2, 3, 4, 5, 10)), 'millisecond'), true, 'month is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 3, 3, 4, 5, 10)), 'millisecond'), false, 'day is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 1, 1, 4, 5, 10)), 'millisecond'), true, 'day is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 4, 4, 5, 10)), 'millisecond'), false, 'hour is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 1, 4, 1, 5, 10)), 'millisecond'), true, 'hour is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 5, 5, 10)), 'millisecond'), false, 'minute is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 3, 5, 10)), 'millisecond'), true, 'minute is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 6, 10)), 'millisecond'), false, 'second is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 4, 5)), 'millisecond'), true, 'second is earlier');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 6, 11)), 'millisecond'), false, 'millisecond is later');
<add> assert.equal(m.isSameOrAfter(moment(new Date(2011, 3, 2, 3, 4, 4, 9)), 'millisecond'), true, 'millisecond is earlier');
<add> assert.equal(m.isSameOrAfter(m, 'millisecond'), true, 'same moments are in the same millisecond');
<add> assert.equal(+m, +mCopy, 'isSameOrAfter millisecond should not change moment');
<add>});
<add>
<add>test('is same or after with utc offset moments', function (assert) {
<add> assert.ok(moment.parseZone('2013-02-01T-05:00').isSameOrAfter(moment('2013-02-01'), 'year'), 'zoned vs local moment');
<add> assert.ok(moment('2013-02-01').isSameOrAfter(moment('2013-02-01').utcOffset('-05:00'), 'year'), 'local vs zoned moment');
<add> assert.ok(moment.parseZone('2013-02-01T-05:00').isSameOrAfter(moment.parseZone('2013-02-01T-06:30'), 'year'),
<add> 'zoned vs (differently) zoned moment');
<add>});
<add>
<add>test('is same or after with invalid moments', function (assert) {
<add> var m = moment(), invalid = moment.invalid();
<add> assert.equal(invalid.isSameOrAfter(invalid), false, 'invalid moments are not considered equal');
<add> assert.equal(m.isSameOrAfter(invalid), false, 'valid moment is not after invalid moment');
<add> assert.equal(invalid.isSameOrAfter(m), false, 'invalid moment is not after valid moment');
<add> assert.equal(m.isSameOrAfter(invalid, 'year'), false, 'invalid moment year');
<add> assert.equal(m.isSameOrAfter(invalid, 'month'), false, 'invalid moment month');
<add> assert.equal(m.isSameOrAfter(invalid, 'day'), false, 'invalid moment day');
<add> assert.equal(m.isSameOrAfter(invalid, 'hour'), false, 'invalid moment hour');
<add> assert.equal(m.isSameOrAfter(invalid, 'minute'), false, 'invalid moment minute');
<add> assert.equal(m.isSameOrAfter(invalid, 'second'), false, 'invalid moment second');
<add> assert.equal(m.isSameOrAfter(invalid, 'milliseconds'), false, 'invalid moment milliseconds');
<add>});
<ide><path>src/test/moment/is_same_or_before.js
<add>import { module, test } from '../qunit';
<add>import moment from '../../moment';
<add>
<add>module('is same or before');
<add>
<add>test('is same or before without units', function (assert) {
<add> var m = moment(new Date(2011, 3, 2, 3, 4, 5, 10)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 3, 2, 3, 5, 5, 10))), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 3, 2, 3, 3, 5, 10))), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 4, 2, 3, 4, 5, 10))), true, 'month is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 2, 3, 4, 5, 10))), false, 'month is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 3, 3, 4, 5, 10))), true, 'day is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 1, 3, 4, 5, 10))), false, 'day is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 4, 4, 5, 10))), true, 'hour is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 2, 4, 5, 10))), false, 'hour is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 5, 5, 10))), true, 'minute is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 3, 5, 10))), false, 'minute is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 6, 10))), true, 'second is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 4, 11))), false, 'second is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 5, 10))), true, 'millisecond match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 5, 11))), true, 'millisecond is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 5, 9))), false, 'millisecond is earlier');
<add> assert.equal(m.isSameOrBefore(m), true, 'moments are the same as themselves');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore second should not change moment');
<add>});
<add>
<add>test('is same or before year', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 5, 6, 7, 8, 9, 10)), 'year'), true, 'year match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 5, 6, 7, 8, 9, 10)), 'years'), true, 'plural should work');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 5, 6, 7, 8, 9, 10)), 'year'), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 5, 6, 7, 8, 9, 10)), 'year'), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 0, 1, 0, 0, 0, 0)), 'year'), true, 'exact start of year');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 11, 31, 23, 59, 59, 999)), 'year'), true, 'exact end of year');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 0, 1, 0, 0, 0, 0)), 'year'), true, 'start of next year');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 11, 31, 23, 59, 59, 999)), 'year'), false, 'end of previous year');
<add> assert.equal(m.isSameOrBefore(m, 'year'), true, 'same moments are in the same year');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore year should not change moment');
<add>});
<add>
<add>test('is same or before month', function (assert) {
<add> var m = moment(new Date(2011, 2, 3, 4, 5, 6, 7)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 6, 7, 8, 9, 10)), 'month'), true, 'month match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 6, 7, 8, 9, 10)), 'months'), true, 'plural should work');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 2, 6, 7, 8, 9, 10)), 'month'), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 2, 6, 7, 8, 9, 10)), 'month'), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 5, 6, 7, 8, 9, 10)), 'month'), true, 'month is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 6, 7, 8, 9, 10)), 'month'), false, 'month is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 1, 0, 0, 0, 0)), 'month'), true, 'exact start of month');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 31, 23, 59, 59, 999)), 'month'), true, 'exact end of month');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 1, 0, 0, 0, 0)), 'month'), true, 'start of next month');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 27, 23, 59, 59, 999)), 'month'), false, 'end of previous month');
<add> assert.equal(m.isSameOrBefore(m, 'month'), true, 'same moments are in the same month');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore month should not change moment');
<add>});
<add>
<add>test('is same or before day', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 7, 8, 9, 10)), 'day'), true, 'day match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 7, 8, 9, 10)), 'days'), true, 'plural should work');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 1, 2, 7, 8, 9, 10)), 'day'), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 1, 2, 7, 8, 9, 10)), 'day'), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 2, 7, 8, 9, 10)), 'day'), true, 'month is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 12, 2, 7, 8, 9, 10)), 'day'), false, 'month is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 3, 7, 8, 9, 10)), 'day'), true, 'day is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 1, 7, 8, 9, 10)), 'day'), false, 'day is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 0, 0, 0, 0)), 'day'), true, 'exact start of day');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 23, 59, 59, 999)), 'day'), true, 'exact end of day');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 3, 0, 0, 0, 0)), 'day'), true, 'start of next day');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 1, 23, 59, 59, 999)), 'day'), false, 'end of previous day');
<add> assert.equal(m.isSameOrBefore(m, 'day'), true, 'same moments are in the same day');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore day should not change moment');
<add>});
<add>
<add>test('is same or before hour', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 8, 9, 10)), 'hour'), true, 'hour match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 8, 9, 10)), 'hours'), true, 'plural should work');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 1, 2, 3, 8, 9, 10)), 'hour'), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 1, 2, 3, 8, 9, 10)), 'hour'), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 2, 3, 8, 9, 10)), 'hour'), true, 'month is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 12, 2, 3, 8, 9, 10)), 'hour'), false, 'month is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 3, 3, 8, 9, 10)), 'hour'), true, 'day is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 1, 3, 8, 9, 10)), 'hour'), false, 'day is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 4, 8, 9, 10)), 'hour'), true, 'hour is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 2, 8, 9, 10)), 'hour'), false, 'hour is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 0, 0, 0)), 'hour'), true, 'exact start of hour');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 59, 59, 999)), 'hour'), true, 'exact end of hour');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 4, 0, 0, 0)), 'hour'), true, 'start of next hour');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 2, 59, 59, 999)), 'hour'), false, 'end of previous hour');
<add> assert.equal(m.isSameOrBefore(m, 'hour'), true, 'same moments are in the same hour');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore hour should not change moment');
<add>});
<add>
<add>test('is same or before minute', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 9, 10)), 'minute'), true, 'minute match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 9, 10)), 'minutes'), true, 'plural should work');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 1, 2, 3, 4, 9, 10)), 'minute'), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 1, 2, 3, 4, 9, 10)), 'minute'), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 2, 3, 4, 9, 10)), 'minute'), true, 'month is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 12, 2, 3, 4, 9, 10)), 'minute'), false, 'month is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 3, 3, 4, 9, 10)), 'minute'), true, 'day is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 1, 3, 4, 9, 10)), 'minute'), false, 'day is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 4, 4, 9, 10)), 'minute'), true, 'hour is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 2, 4, 9, 10)), 'minute'), false, 'hour is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 5, 9, 10)), 'minute'), true, 'minute is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 3, 9, 10)), 'minute'), false, 'minute is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 0, 0)), 'minute'), true, 'exact start of minute');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 59, 999)), 'minute'), true, 'exact end of minute');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 5, 0, 0)), 'minute'), true, 'start of next minute');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 3, 59, 999)), 'minute'), false, 'end of previous minute');
<add> assert.equal(m.isSameOrBefore(m, 'minute'), true, 'same moments are in the same minute');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore minute should not change moment');
<add>});
<add>
<add>test('is same or before second', function (assert) {
<add> var m = moment(new Date(2011, 1, 2, 3, 4, 5, 6)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 5, 10)), 'second'), true, 'second match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 5, 10)), 'seconds'), true, 'plural should work');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 1, 2, 3, 4, 5, 10)), 'second'), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 1, 2, 3, 4, 5, 10)), 'second'), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 2, 3, 4, 5, 10)), 'second'), true, 'month is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 12, 2, 3, 4, 5, 10)), 'second'), false, 'month is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 3, 3, 4, 5, 10)), 'second'), true, 'day is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 1, 3, 4, 5, 10)), 'second'), false, 'day is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 4, 4, 5, 10)), 'second'), true, 'hour is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 2, 4, 5, 10)), 'second'), false, 'hour is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 5, 5, 10)), 'second'), true, 'minute is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 3, 5, 10)), 'second'), false, 'minute is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 6, 10)), 'second'), true, 'second is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 4, 10)), 'second'), false, 'second is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 5, 0)), 'second'), true, 'exact start of second');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 5, 999)), 'second'), true, 'exact end of second');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 6, 0)), 'second'), true, 'start of next second');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 1, 2, 3, 4, 4, 999)), 'second'), false, 'end of previous second');
<add> assert.equal(m.isSameOrBefore(m, 'second'), true, 'same moments are in the same second');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore second should not change moment');
<add>});
<add>
<add>test('is same or before millisecond', function (assert) {
<add> var m = moment(new Date(2011, 3, 2, 3, 4, 5, 10)), mCopy = moment(m);
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 5, 10)), 'millisecond'), true, 'millisecond match');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 5, 10)), 'milliseconds'), true, 'plural should work');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2012, 3, 2, 3, 4, 5, 10)), 'millisecond'), true, 'year is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2010, 3, 2, 3, 4, 5, 10)), 'millisecond'), false, 'year is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 4, 2, 3, 4, 5, 10)), 'millisecond'), true, 'month is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 2, 2, 3, 4, 5, 10)), 'millisecond'), false, 'month is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 3, 3, 4, 5, 10)), 'millisecond'), true, 'day is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 1, 1, 4, 5, 10)), 'millisecond'), false, 'day is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 4, 4, 5, 10)), 'millisecond'), true, 'hour is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 1, 4, 1, 5, 10)), 'millisecond'), false, 'hour is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 5, 5, 10)), 'millisecond'), true, 'minute is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 3, 5, 10)), 'millisecond'), false, 'minute is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 6, 10)), 'millisecond'), true, 'second is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 4, 5)), 'millisecond'), false, 'second is earlier');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 6, 11)), 'millisecond'), true, 'millisecond is later');
<add> assert.equal(m.isSameOrBefore(moment(new Date(2011, 3, 2, 3, 4, 4, 9)), 'millisecond'), false, 'millisecond is earlier');
<add> assert.equal(m.isSameOrBefore(m, 'millisecond'), true, 'same moments are in the same millisecond');
<add> assert.equal(+m, +mCopy, 'isSameOrBefore millisecond should not change moment');
<add>});
<add>
<add>test('is same with utc offset moments', function (assert) {
<add> assert.ok(moment.parseZone('2013-02-01T-05:00').isSameOrBefore(moment('2013-02-01'), 'year'), 'zoned vs local moment');
<add> assert.ok(moment('2013-02-01').isSameOrBefore(moment('2013-02-01').utcOffset('-05:00'), 'year'), 'local vs zoned moment');
<add> assert.ok(moment.parseZone('2013-02-01T-05:00').isSameOrBefore(moment.parseZone('2013-02-01T-06:30'), 'year'),
<add> 'zoned vs (differently) zoned moment');
<add>});
<add>
<add>test('is same with invalid moments', function (assert) {
<add> var m = moment(), invalid = moment.invalid();
<add> assert.equal(invalid.isSameOrBefore(invalid), false, 'invalid moments are not considered equal');
<add> assert.equal(m.isSameOrBefore(invalid), false, 'valid moment is not before invalid moment');
<add> assert.equal(invalid.isSameOrBefore(m), false, 'invalid moment is not before valid moment');
<add> assert.equal(m.isSameOrBefore(invalid, 'year'), false, 'invalid moment year');
<add> assert.equal(m.isSameOrBefore(invalid, 'month'), false, 'invalid moment month');
<add> assert.equal(m.isSameOrBefore(invalid, 'day'), false, 'invalid moment day');
<add> assert.equal(m.isSameOrBefore(invalid, 'hour'), false, 'invalid moment hour');
<add> assert.equal(m.isSameOrBefore(invalid, 'minute'), false, 'invalid moment minute');
<add> assert.equal(m.isSameOrBefore(invalid, 'second'), false, 'invalid moment second');
<add> assert.equal(m.isSameOrBefore(invalid, 'milliseconds'), false, 'invalid moment milliseconds');
<add>}); | 4 |
Ruby | Ruby | move some actioncable logs to debug level | 963572b2a4bf252d6c860eb1c5586809f8f4936e | <ide><path>actioncable/lib/action_cable/channel/base.rb
<ide> def unsubscribed # :doc:
<ide> # Transmit a hash of data to the subscriber. The hash will automatically be wrapped in a JSON envelope with
<ide> # the proper channel identifier marked as the recipient.
<ide> def transmit(data, via: nil) # :doc:
<del> logger.info "#{self.class.name} transmitting #{data.inspect.truncate(300)}".tap { |m| m << " (via #{via})" if via }
<add> logger.debug "#{self.class.name} transmitting #{data.inspect.truncate(300)}".tap { |m| m << " (via #{via})" if via }
<ide>
<ide> payload = { channel_class: self.class.name, data: data, via: via }
<ide> ActiveSupport::Notifications.instrument("transmit.action_cable", payload) do
<ide><path>actioncable/lib/action_cable/server/broadcasting.rb
<ide> def initialize(server, broadcasting, coder:)
<ide> end
<ide>
<ide> def broadcast(message)
<del> server.logger.info "[ActionCable] Broadcasting to #{broadcasting}: #{message.inspect}"
<add> server.logger.debug "[ActionCable] Broadcasting to #{broadcasting}: #{message.inspect}"
<ide>
<ide> payload = { broadcasting: broadcasting, message: message, coder: coder }
<ide> ActiveSupport::Notifications.instrument("broadcast.action_cable", payload) do | 2 |
Javascript | Javascript | upgrade entrypoint to es6 | 083b9a7975f7fef30a19374fef9a4f9a348aa391 | <ide><path>lib/Entrypoint.js
<ide> MIT License http://www.opensource.org/licenses/mit-license.php
<ide> Author Tobias Koppers @sokra
<ide> */
<del>function Entrypoint(name) {
<del> this.name = name;
<del> this.chunks = [];
<del>}
<del>module.exports = Entrypoint;
<add>"use strict";
<ide>
<del>Entrypoint.prototype.unshiftChunk = function(chunk) {
<del> this.chunks.unshift(chunk);
<del> chunk.entrypoints.push(this);
<del>};
<add>class Entrypoint {
<add> constructor(name) {
<add> this.name = name;
<add> this.chunks = [];
<add> }
<add>
<add> unshiftChunk(chunk) {
<add> this.chunks.unshift(chunk);
<add> chunk.entrypoints.push(this);
<add> }
<ide>
<del>Entrypoint.prototype.insertChunk = function(chunk, before) {
<del> var idx = this.chunks.indexOf(before);
<del> if(idx >= 0) {
<del> this.chunks.splice(idx, 0, chunk);
<del> } else {
<del> throw new Error("before chunk not found");
<add> insertChunk(chunk, before) {
<add> const idx = this.chunks.indexOf(before);
<add> if(idx >= 0) {
<add> this.chunks.splice(idx, 0, chunk);
<add> } else {
<add> throw new Error("before chunk not found");
<add> }
<add> chunk.entrypoints.push(this);
<ide> }
<del> chunk.entrypoints.push(this);
<del>};
<ide>
<del>Entrypoint.prototype.getFiles = function() {
<del> var files = [];
<add> getFiles() {
<add> let files = [];
<ide>
<del> for(var chunkIdx = 0; chunkIdx < this.chunks.length; chunkIdx++) {
<del> for(var fileIdx = 0; fileIdx < this.chunks[chunkIdx].files.length; fileIdx++) {
<del> if(files.indexOf(this.chunks[chunkIdx].files[fileIdx]) === -1) {
<del> files.push(this.chunks[chunkIdx].files[fileIdx]);
<add> for(let chunkIdx = 0; chunkIdx < this.chunks.length; chunkIdx++) {
<add> for(let fileIdx = 0; fileIdx < this.chunks[chunkIdx].files.length; fileIdx++) {
<add> if(files.indexOf(this.chunks[chunkIdx].files[fileIdx]) === -1) {
<add> files.push(this.chunks[chunkIdx].files[fileIdx]);
<add> }
<ide> }
<ide> }
<del> }
<ide>
<del> return files;
<add> return files;
<add> }
<ide> }
<add>
<add>module.exports = Entrypoint; | 1 |
Text | Text | add a changelog for elements having the same key | 7b2101e3528782b2ddf445b21460af7ff1ff5398 | <ide><path>CHANGELOG.md
<ide> * Previously, changing the `ref` to a component would always detach the ref before that component's render is called. Now, we change the `ref` later, when applying the changes to the DOM.
<ide> * It is not safe to re-render into a container that was modified by something other than React. This worked previously in some cases but was never supported. We now emit a warning in this case. Instead you should clean up your component trees using `ReactDOM.unmountComponentAtNode`. [See this example.](https://github.com/facebook/react/issues/10294#issuecomment-318820987)
<ide> * `componentDidUpdate` lifecycle no longer receives `prevContext` param. ([@bvaughn](https://github.com/bvaughn) in [#8631](https://github.com/facebook/react/pull/8631))
<add> * Non-unique keys may now cause children to be duplicated and/or omitted. Using non-unique keys is not (and has never been) supported, but previously it was a hard error.
<ide> * Shallow renderer no longer calls `componentDidUpdate()` because DOM refs are not available. This also makes it consistent with `componentDidMount()` (which does not get called in previous versions either).
<ide> * Shallow renderer does not implement `unstable_batchedUpdates()` anymore.
<ide> - The names and paths to the single-file browser builds have changed to emphasize the difference between development and production builds. For example: | 1 |
PHP | PHP | improve method naming | c175fbea2fcabb2eab05c9cc6aa1b85c19cbbd9a | <ide><path>src/View/View.php
<ide> public function layoutPath($path = null)
<ide> }
<ide>
<ide> /**
<del> * Get the current state of auto layout.
<add> * Returns if CakePHP's conventional mode of applying layout files is enabled.
<add> * Disabled means that layouts will not be automatically applied to rendered views.
<ide> *
<ide> * @return bool
<ide> */
<del> public function getAutoLayout()
<add> public function isAutoLayoutEnabled()
<ide> {
<ide> return $this->autoLayout;
<ide> }
<ide>
<ide> /**
<ide> * Turns on or off CakePHP's conventional mode of applying layout files.
<ide> * On by default. Setting to off means that layouts will not be
<del> * automatically applied to rendered templates.
<add> * automatically applied to rendered views.
<ide> *
<del> * @param bool $autoLayout Boolean to turn on/off.
<add> * @param bool $enable Boolean to turn on/off.
<ide> * @return void
<ide> */
<del> public function setAutoLayout($autoLayout)
<add> public function enableAutoLayout($enable = true)
<ide> {
<del> $this->autoLayout = $autoLayout;
<add> $this->autoLayout = (bool)$enable;
<ide> }
<ide>
<ide> /**
<ide> * Turns on or off CakePHP's conventional mode of applying layout files.
<ide> * On by default. Setting to off means that layouts will not be
<ide> * automatically applied to rendered templates.
<ide> *
<del> * @deprecated 3.5.0 Use getAutoLayout()/setAutoLayout() instead.
<add> * @deprecated 3.5.0 Use isAutoLayoutEnabled()/enableAutoLayout() instead.
<ide> * @param bool|null $autoLayout Boolean to turn on/off. If null returns current value.
<ide> * @return bool|null
<ide> */
<ide><path>tests/TestCase/View/ViewTest.php
<ide> public function testGetSetLayoutPath()
<ide> }
<ide>
<ide> /**
<del> * Test getAutoLayout() and setAutoLayout().
<add> * Test isAutoLayoutEnabled() and enableAutoLayout().
<ide> *
<ide> * @return void
<ide> */
<del> public function testGetSetAutoLayout()
<add> public function testAutoLayout()
<ide> {
<del> $this->View->setAutoLayout(false);
<del> $autoLayout = $this->View->getAutoLayout();
<add> $this->View->enableAutoLayout(false);
<add> $autoLayout = $this->View->isAutoLayoutEnabled();
<ide> $this->assertSame($autoLayout, false);
<ide>
<del> $this->View->setAutoLayout(true);
<del> $autoLayout = $this->View->getAutoLayout();
<add> $this->View->enableAutoLayout();
<add> $autoLayout = $this->View->isAutoLayoutEnabled();
<ide> $this->assertSame($autoLayout, true);
<ide> }
<ide> | 2 |
Java | Java | fix typo in urlpathhelper | 81eb911c09378058d185c2def8273cbbdc2665b2 | <ide><path>spring-web/src/main/java/org/springframework/web/util/UrlPathHelper.java
<ide> public String getPathWithinApplication(HttpServletRequest request) {
<ide> * Match the given "mapping" to the start of the "requestUri" and if there
<ide> * is a match return the extra part. This method is needed because the
<ide> * context path and the servlet path returned by the HttpServletRequest are
<del> * stripped of semicolon content unlike the requesUri.
<add> * stripped of semicolon content unlike the requestUri.
<ide> */
<ide> @Nullable
<ide> private String getRemainingPath(String requestUri, String mapping, boolean ignoreCase) { | 1 |
Javascript | Javascript | use smaller keys for a faster keygen test | 561e30d9ef581e86b36318fe22ebd1e82ab88754 | <ide><path>test/parallel/test-crypto-keygen.js
<ide> function convertDERToPEM(label, der) {
<ide> // with a relatively small key.
<ide> const ret = generateKeyPairSync('rsa', {
<ide> publicExponent: 0x10001,
<del> modulusLength: 1024,
<add> modulusLength: 512,
<ide> publicKeyEncoding: {
<ide> type: 'pkcs1',
<ide> format: 'pem'
<ide> function convertDERToPEM(label, der) {
<ide>
<ide> assert.strictEqual(typeof publicKey, 'string');
<ide> assert(pkcs1PubExp.test(publicKey));
<del> assertApproximateSize(publicKey, 272);
<add> assertApproximateSize(publicKey, 162);
<ide> assert.strictEqual(typeof privateKey, 'string');
<ide> assert(pkcs8Exp.test(privateKey));
<del> assertApproximateSize(privateKey, 912);
<add> assertApproximateSize(privateKey, 512);
<ide>
<ide> testEncryptDecrypt(publicKey, privateKey);
<ide> testSignVerify(publicKey, privateKey);
<ide> function convertDERToPEM(label, der) {
<ide> // Test async RSA key generation.
<ide> generateKeyPair('rsa', {
<ide> publicExponent: 0x10001,
<del> modulusLength: 4096,
<add> modulusLength: 512,
<ide> publicKeyEncoding: {
<ide> type: 'pkcs1',
<ide> format: 'der'
<ide> function convertDERToPEM(label, der) {
<ide> // will still need to convert it to PEM for testing.
<ide> assert(Buffer.isBuffer(publicKeyDER));
<ide> const publicKey = convertDERToPEM('RSA PUBLIC KEY', publicKeyDER);
<del> assertApproximateSize(publicKey, 720);
<add> assertApproximateSize(publicKey, 180);
<ide>
<ide> assert.strictEqual(typeof privateKey, 'string');
<ide> assert(pkcs1PrivExp.test(privateKey));
<del> assertApproximateSize(privateKey, 3272);
<add> assertApproximateSize(privateKey, 512);
<ide>
<ide> testEncryptDecrypt(publicKey, privateKey);
<ide> testSignVerify(publicKey, privateKey);
<ide> function convertDERToPEM(label, der) {
<ide> // Now do the same with an encrypted private key.
<ide> generateKeyPair('rsa', {
<ide> publicExponent: 0x10001,
<del> modulusLength: 4096,
<add> modulusLength: 512,
<ide> publicKeyEncoding: {
<ide> type: 'pkcs1',
<ide> format: 'der'
<ide> function convertDERToPEM(label, der) {
<ide> // will still need to convert it to PEM for testing.
<ide> assert(Buffer.isBuffer(publicKeyDER));
<ide> const publicKey = convertDERToPEM('RSA PUBLIC KEY', publicKeyDER);
<del> assertApproximateSize(publicKey, 720);
<add> assertApproximateSize(publicKey, 180);
<ide>
<ide> assert.strictEqual(typeof privateKey, 'string');
<ide> assert(pkcs1EncExp('AES-256-CBC').test(privateKey));
<ide> function convertDERToPEM(label, der) {
<ide> {
<ide> // Test async DSA key generation.
<ide> generateKeyPair('dsa', {
<del> modulusLength: 2048,
<add> modulusLength: 256,
<ide> divisorLength: 256,
<ide> publicKeyEncoding: {
<ide> type: 'spki',
<ide> function convertDERToPEM(label, der) {
<ide> assert(Buffer.isBuffer(privateKeyDER));
<ide> const privateKey = convertDERToPEM('ENCRYPTED PRIVATE KEY', privateKeyDER);
<ide>
<del> assertApproximateSize(publicKey, 1194);
<del> assertApproximateSize(privateKey, 1054);
<add> assertApproximateSize(publicKey, 440);
<add> assertApproximateSize(privateKey, 512);
<ide>
<ide> // Since the private key is encrypted, signing shouldn't work anymore.
<ide> assert.throws(() => {
<ide> function convertDERToPEM(label, der) {
<ide> // Test async elliptic curve key generation, e.g. for ECDSA, with an encrypted
<ide> // private key.
<ide> generateKeyPair('ec', {
<del> namedCurve: 'P-256',
<add> namedCurve: 'P-192',
<ide> paramEncoding: 'named',
<ide> publicKeyEncoding: {
<ide> type: 'spki',
<ide> function convertDERToPEM(label, der) {
<ide> // Test the util.promisified API with async RSA key generation.
<ide> promisify(generateKeyPair)('rsa', {
<ide> publicExponent: 0x10001,
<del> modulusLength: 3072,
<add> modulusLength: 512,
<ide> publicKeyEncoding: {
<ide> type: 'pkcs1',
<ide> format: 'pem'
<ide> function convertDERToPEM(label, der) {
<ide> const { publicKey, privateKey } = keys;
<ide> assert.strictEqual(typeof publicKey, 'string');
<ide> assert(pkcs1PubExp.test(publicKey));
<del> assertApproximateSize(publicKey, 600);
<add> assertApproximateSize(publicKey, 180);
<ide>
<ide> assert.strictEqual(typeof privateKey, 'string');
<ide> assert(pkcs1PrivExp.test(privateKey));
<del> assertApproximateSize(privateKey, 2455);
<add> assertApproximateSize(privateKey, 512);
<ide>
<ide> testEncryptDecrypt(publicKey, privateKey);
<ide> testSignVerify(publicKey, privateKey);
<del> })).catch(common.mustNotCall());
<add> }));
<ide> }
<ide>
<ide> {
<ide> function convertDERToPEM(label, der) {
<ide> // Test invalid callbacks.
<ide> for (const cb of [undefined, null, 0, {}]) {
<ide> common.expectsError(() => generateKeyPair('rsa', {
<del> modulusLength: 4096,
<add> modulusLength: 512,
<ide> publicKeyEncoding: { type: 'pkcs1', format: 'pem' },
<ide> privateKeyEncoding: { type: 'pkcs1', format: 'pem' }
<ide> }, cb), {
<ide> function convertDERToPEM(label, der) {
<ide>
<ide> // It should recognize both NIST and standard curve names.
<ide> generateKeyPair('ec', {
<del> namedCurve: 'P-256',
<add> namedCurve: 'P-192',
<ide> publicKeyEncoding: { type: 'spki', format: 'pem' },
<ide> privateKeyEncoding: { type: 'pkcs8', format: 'pem' }
<ide> }, common.mustCall((err, publicKey, privateKey) => { | 1 |
Ruby | Ruby | fix example of setting defaults in fixtures | 69d395cbb244d63feae78d7dad33fa8b9e6ed17c | <ide><path>activerecord/lib/active_record/fixtures.rb
<ide> class FixtureClassNotFound < ActiveRecord::ActiveRecordError #:nodoc:
<ide> #
<ide> # first:
<ide> # name: Smurf
<del> # *DEFAULTS
<add> # <<: *DEFAULTS
<ide> #
<ide> # second:
<ide> # name: Fraggle
<del> # *DEFAULTS
<add> # <<: *DEFAULTS
<ide> #
<ide> # Any fixture labeled "DEFAULTS" is safely ignored.
<ide> class FixtureSet | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.