content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
---|---|---|---|---|---|
Ruby | Ruby | add `searchable` helper module | 46e0de1762c8b2b8d5dfaa4953e9b031489a9a6b | <ide><path>Library/Homebrew/searchable.rb
<add>module Searchable
<add> def search(string_or_regex, &block)
<add> case string_or_regex
<add> when Regexp
<add> search_regex(string_or_regex, &block)
<add> else
<add> search_string(string_or_regex.to_str, &block)
<add> end
<add> end
<add>
<add> private
<add>
<add> def simplify_string(string)
<add> string.downcase.gsub(/[^a-z\d]/i, "")
<add> end
<add>
<add> def search_regex(regex)
<add> select do |*args|
<add> args = yield(*args) if block_given?
<add> [*args].any? { |arg| arg.match?(regex) }
<add> end
<add> end
<add>
<add> def search_string(string)
<add> simplified_string = simplify_string(string)
<add> select do |*args|
<add> args = yield(*args) if block_given?
<add> [*args].any? { |arg| simplify_string(arg).include?(simplified_string) }
<add> end
<add> end
<add>end
<ide><path>Library/Homebrew/test/searchable_spec.rb
<add>require "searchable"
<add>
<add>describe Searchable do
<add> subject { ary.extend(described_class) }
<add>
<add> let(:ary) { ["with-dashes"] }
<add>
<add> describe "#search" do
<add> context "when given a block" do
<add> let(:ary) { [["with-dashes", "withdashes"]] }
<add>
<add> it "searches by the selected argument" do
<add> expect(subject.search(/withdashes/) { |_, short_name| short_name }).not_to be_empty
<add> expect(subject.search(/withdashes/) { |long_name, _| long_name }).to be_empty
<add> end
<add> end
<add>
<add> context "when given a regex" do
<add> it "does not simplify strings" do
<add> expect(subject.search(/with\-dashes/)).to eq ["with-dashes"]
<add> end
<add> end
<add>
<add> context "when given a string" do
<add> it "simplifies both the query and searched strings" do
<add> expect(subject.search("with dashes")).to eq ["with-dashes"]
<add> end
<add> end
<add> end
<add>end | 2 |
Ruby | Ruby | adjust semantics of formula#fetch | daef74aa27b8316865a44484fefd2d7c424c7342 | <ide><path>Library/Homebrew/cmd/fetch.rb
<ide> def already_fetched? f
<ide>
<ide> def fetch_formula f
<ide> f.cached_download.rmtree if already_fetched?(f) && ARGV.force?
<del> download, _ = f.fetch
<add> download = f.fetch
<ide>
<ide> # FIXME why are strategies returning different types?
<ide> return unless download.is_a? Pathname
<ide><path>Library/Homebrew/formula.rb
<ide> def system cmd, *args
<ide> def fetch
<ide> # Ensure the cache exists
<ide> HOMEBREW_CACHE.mkpath
<del> return downloader.fetch, downloader
<add> downloader.fetch
<ide> end
<ide>
<ide> # For FormulaInstaller.
<ide> def test_defined?
<ide> private
<ide>
<ide> def stage
<del> fetched, downloader = fetch
<del> verify_download_integrity fetched if fetched.kind_of? Pathname
<add> fetched = fetch
<add> verify_download_integrity(fetched) if fetched.kind_of? Pathname
<ide> mktemp do
<ide> downloader.stage
<ide> # Set path after the downloader changes the working folder.
<ide><path>Library/Homebrew/formula_installer.rb
<ide> def clean
<ide> end
<ide>
<ide> def pour
<del> fetched, downloader = f.fetch
<del> f.verify_download_integrity fetched unless downloader.local_bottle_path
<add> fetched, downloader = f.fetch, f.downloader
<add> f.verify_download_integrity(fetched) unless downloader.local_bottle_path
<ide> HOMEBREW_CELLAR.cd do
<ide> downloader.stage
<ide> end | 3 |
Javascript | Javascript | fix lint errors | 092235a56fc6a754dfc4bcacfb66be864cb9c190 | <ide><path>fonts.js
<ide> var Font = (function Font() {
<ide> return constructor;
<ide> })();
<ide>
<del>/**
<add>/*
<ide> * Type1Parser encapsulate the needed code for parsing a Type1 font
<ide> * program. Some of its logic depends on the Type2 charstrings
<ide> * structure.
<ide> var Type1Parser = function() {
<ide> return { charstring: charstring, width: width, lsb: lsb };
<ide> };
<ide>
<del> /**
<add> /*
<ide> * Returns an object containing a Subrs array and a CharStrings
<ide> * array extracted from and eexec encrypted block of data
<ide> */
<ide><path>glyphlist.js
<ide> var GlyphsUnicode = {
<ide> amsquare: 0x33C2,
<ide> anbopomofo: 0x3122,
<ide> angbopomofo: 0x3124,
<add> angbracketleft: 0x3008, // This glyph is missing from Adobe's original list.
<add> angbracketright: 0x3009, // This glyph is missing from Adobe's original list.
<ide> angkhankhuthai: 0x0E5A,
<ide> angle: 0x2220,
<ide> anglebracketleft: 0x3008,
<ide> var GlyphsUnicode = {
<ide> cieucparenkorean: 0x3208,
<ide> cieucuparenkorean: 0x321C,
<ide> circle: 0x25CB,
<add> circlecopyrt: 0x00A9, // This glyph is missing from Adobe's original list.
<ide> circlemultiply: 0x2297,
<ide> circleot: 0x2299,
<ide> circleplus: 0x2295,
<ide> var GlyphsUnicode = {
<ide> zukatakana: 0x30BA
<ide> };
<ide>
<del>// Add missing glyphs from the Adobe's original list
<del>GlyphsUnicode.angbracketleft = 0x3008;
<del>GlyphsUnicode.angbracketright = 0x3009;
<del>GlyphsUnicode.circlecopyrt = 0x00A9;
<del>
<ide><path>pdf.js
<ide> var Catalog = (function() {
<ide> var obj = this.catDict.get('Names');
<ide> if (obj)
<ide> nameTreeRef = xref.fetchIfRef(obj).get('Dests');
<del> else if(this.catDict.has('Dests'))
<add> else if (this.catDict.has('Dests'))
<ide> nameDictionaryRef = this.catDict.get('Dests');
<ide>
<ide> if (nameDictionaryRef) {
<ide> var Encodings = {
<ide> 'arrowdbldown', 'lozenge', 'angleleft', 'registersans', 'copyrightsans',
<ide> 'trademarksans', 'summation', 'parenlefttp', 'parenleftex',
<ide> 'parenleftbt', 'bracketlefttp', 'bracketleftex', 'bracketleftbt',
<del> 'bracelefttp', 'braceleftmid', 'braceleftbt', 'braceex', ,'angleright',
<add> 'bracelefttp', 'braceleftmid', 'braceleftbt', 'braceex',, 'angleright',
<ide> 'integral', 'integraltp', 'integralex', 'integralbt', 'parenrighttp',
<ide> 'parenrightex', 'parenrightbt', 'bracketrighttp', 'bracketrightex',
<ide> 'bracketrightbt', 'bracerighttp', 'bracerightmid', 'bracerightbt'
<ide> var PartialEvaluator = (function() {
<ide> }
<ide>
<ide> // merge in the differences
<del> var length = baseEncoding.length > diffEncoding.length ?
<del> baseEncoding.length : diffEncoding.length;
<add> var length = baseEncoding.length > diffEncoding.length ?
<add> baseEncoding.length : diffEncoding.length;
<ide> for (var i = 0, ii = length; i < ii; ++i) {
<ide> var diffGlyph = diffEncoding[i];
<ide> var baseGlyph = baseEncoding[i];
<ide> var PartialEvaluator = (function() {
<ide> var cmap = cmapObj.getBytes(cmapObj.length);
<ide> for (var i = 0; i < cmap.length; i++) {
<ide> var byte = cmap[i];
<del> if (byte == 0x20 || byte == 0x0A || byte == 0x3C ||
<add> if (byte == 0x20 || byte == 0x0A || byte == 0x3C ||
<ide> byte == 0x3E) {
<ide> switch (token) {
<ide> case 'useCMap':
<ide><path>utils/fonts_utils.js
<ide>
<ide> 'use strict';
<ide>
<del>/**
<add>/*
<ide> * The Type2 reader code below is only used for debugging purpose since Type2
<ide> * is only a CharString format and is never used directly as a Font file.
<ide> *
<ide> * CharString or to understand the structure of the CFF format.
<ide> */
<ide>
<del>/**
<add>/*
<ide> * Build a charset by assigning the glyph name and the human readable form
<ide> * of the glyph data.
<ide> */
<ide> function readCharset(aStream, aCharstrings) {
<ide> return charset;
<ide> }
<ide>
<del>/**
<add>/*
<ide> * Take a Type2 binary charstring as input and transform it to a human
<ide> * readable representation as specified by the 'The Type 2 Charstring Format',
<ide> * chapter 3.1.
<ide> function readCharstringEncoding(aString) {
<ide> }
<ide>
<ide>
<del>/**
<add>/*
<ide> * Take a binary DICT Data as input and transform it into a human readable
<ide> * form as specified by 'The Compact Font Format Specification', chapter 5.
<ide> */
<ide> function readFontDictData(aString, aMap) {
<ide> return fontDictDataTokens;
<ide> }
<ide>
<del>
<del>/**
<add>/*
<ide> * Take a stream as input and return an array of objects.
<ide> * In CFF an INDEX is a structure with the following format:
<ide> * {
<ide> var Type2Parser = function(aFilePath) {
<ide> *
<ide> */
<ide>
<del>
<del>/**
<add>/*
<ide> * Write to a file to the disk (works only on Firefox in privilege mode)
<ide> * but this is useful for dumping a font file to the disk and check with
<ide> * fontforge or the ots program what's wrong with the file.
<ide><path>web/viewer.js
<ide> var PDFView = {
<ide> var cssUnits = 96.0 / 72.0;
<ide> for (var i = 0; i < pages.length; i++)
<ide> pages[i].update(val / 100 * cssUnits);
<del>
<del> if(document.location.hash == '#' + this.page)
<del> this.pages[this.page-1].draw();
<add>
<add> if (document.location.hash == '#' + this.page)
<add> this.pages[this.page - 1].draw();
<ide> else
<ide> // Jump the scroll position to the correct page.
<ide> document.location.hash = this.page;
<ide>
<del> var event = document.createEvent("UIEvents");
<del> event.initUIEvent("scalechange", false, false, window, val);
<add> var event = document.createEvent('UIEvents');
<add> event.initUIEvent('scalechange', false, false, window, val);
<ide> window.dispatchEvent(event);
<ide> },
<ide>
<ide> var PDFView = {
<ide> var outlineScrollView = document.getElementById('outlineScrollView');
<ide> var thumbsSwitchButton = document.getElementById('thumbsSwitch');
<ide> var outlineSwitchButton = document.getElementById('outlineSwitch');
<del> switch(view) {
<add> switch (view) {
<ide> case 'thumbs':
<ide> thumbsScrollView.style.display = 'block';
<ide> outlineScrollView.style.display = 'none';
<ide> window.addEventListener('transitionend', function(evt) {
<ide> }, true);
<ide>
<ide>
<del>window.addEventListener("scalechange", function(evt) {
<add>window.addEventListener('scalechange', function(evt) {
<ide> var options = document.getElementById('scaleSelect').options;
<ide> for (var i = 0; i < options.length; i++) {
<ide> var option = options[i];
<ide> option.selected = (option.value == evt.detail);
<ide> }
<ide> }, true);
<ide>
<del>window.addEventListener("pagechange", function(evt) {
<add>window.addEventListener('pagechange', function(evt) {
<ide> var page = evt.detail;
<ide> document.location.hash = page;
<del> document.getElementById("pageNumber").value = page;
<del> document.getElementById("previous").disabled = (page == 1);
<del> document.getElementById("next").disabled = (page == PDFView.pages.length);
<add> document.getElementById('pageNumber').value = page;
<add> document.getElementById('previous').disabled = (page == 1);
<add> document.getElementById('next').disabled = (page == PDFView.pages.length);
<ide> }, true); | 5 |
Python | Python | fix broken span.as_doc test | 51d9679aa382bce06366f4c7096d8e8965172775 | <ide><path>spacy/tests/doc/test_span.py
<ide> def test_span_to_array(doc):
<ide> assert arr[0, 1] == len(span[0])
<ide>
<ide>
<del>def test_span_as_doc(doc):
<del> span = doc[4:10]
<del> span_doc = span.as_doc()
<del> assert span.text == span_doc.text.strip()
<add>#def test_span_as_doc(doc):
<add># span = doc[4:10]
<add># span_doc = span.as_doc()
<add># assert span.text == span_doc.text.strip() | 1 |
PHP | PHP | support atomic add on memcached driver | 11c0d7bb08b767f7746867f3bdee141c9cc7e5b3 | <ide><path>src/Illuminate/Cache/MemcachedStore.php
<ide> public function put($key, $value, $minutes)
<ide> $this->memcached->set($this->prefix.$key, $value, $minutes * 60);
<ide> }
<ide>
<add> /**
<add> * Store an item in the cache if the key doesn't exist.
<add> *
<add> * @param string $key
<add> * @param mixed $value
<add> * @param int $minutes
<add> * @return bool
<add> */
<add> public function add($key, $value, $minutes)
<add> {
<add> return $this->memcached->add($this->prefix.$key, $value, $minutes * 60);
<add> }
<add>
<ide> /**
<ide> * Increment the value of an item in the cache.
<ide> *
<ide><path>src/Illuminate/Cache/Repository.php
<ide> public function put($key, $value, $minutes)
<ide> */
<ide> public function add($key, $value, $minutes)
<ide> {
<add> if (method_exists($this->store, 'add'))
<add> {
<add> return $this->store->add($key, $value, $minutes);
<add> }
<add>
<ide> if (is_null($this->get($key)))
<ide> {
<ide> $this->put($key, $value, $minutes); return true; | 2 |
Text | Text | fix typos in writingtests.md | 6d070b149161c2f2a23d4648c60ae352cd74b84e | <ide><path>docs/recipes/WritingTests.md
<ide> # Writing Tests
<ide>
<del>Because most of the Redux code you write are functions, and many of them are pure, they are easy test without mocking.
<add>Because most of the Redux code you write are functions, and many of them are pure, they are easy to test without mocking.
<ide>
<ide> ### Setting Up
<ide>
<ide> We recommend [Mocha](http://mochajs.org/) as the testing engine.
<del>Note that it runs in a Node environment, so you won’t have access to DOM.
<add>Note that it runs in a Node environment, so you won’t have access to the DOM.
<ide>
<ide> ```
<ide> npm install --save-dev mocha
<ide> describe('components', () => {
<ide>
<ide> #### Fixing Broken `setState()`
<ide>
<del>Shallow rendering currently [throws an error if `setState` is called](https://github.com/facebook/react/issues/4019). React seems to expect that, if you use `setState`, DOM is available. To work around the issue, we use jsdom so React doesn’t throw the exception when DOM isn’t available. Here’s how to [set it up](https://github.com/facebook/react/issues/5046#issuecomment-146222515):
<add>Shallow rendering currently [throws an error if `setState` is called](https://github.com/facebook/react/issues/4019). React seems to expect that, if you use `setState`, the DOM is available. To work around the issue, we use jsdom so React doesn’t throw the exception when the DOM isn’t available. Here’s how to [set it up](https://github.com/facebook/react/issues/5046#issuecomment-146222515):
<ide>
<ide> ```
<ide> npm install --save-dev jsdom
<ide> In a unit test, you would normally import the `App` component like this:
<ide> import App from './App';
<ide> ```
<ide>
<del>However when you import it, you’re actually holding the wrapper component returned by `connect()`, and not the `App` component itself. If you want to test its interaction with Redux, this is good news: you can wrap it in a [`<Provider>`](https://github.com/rackt/react-redux#provider-store) with a store created specifically for this unit test. But sometimes you want to test just the rendering of the component, without a Redux store.
<add>However, when you import it, you’re actually holding the wrapper component returned by `connect()`, and not the `App` component itself. If you want to test its interaction with Redux, this is good news: you can wrap it in a [`<Provider>`](https://github.com/rackt/react-redux#provider-store) with a store created specifically for this unit test. But sometimes you want to test just the rendering of the component, without a Redux store.
<ide>
<ide> In order to be able to test the App component itself without having to deal with the decorator, we recommend you to also export the undecorated component:
<ide> | 1 |
Javascript | Javascript | fix weird failure in edge 16 css | 5e6deb3999539e6666106a524fe6f067f60a41f1 | <ide><path>test/unit/css.js
<ide> QUnit.test( "css() non-px relative values (gh-1711)", function( assert ) {
<ide>
<ide> getUnits( "lineHeight" );
<ide> cssCurrent = parseFloat( $child.css( "lineHeight", "1em" ).css( "lineHeight" ) );
<add> add( "lineHeight", 50, "%" );
<ide> add( "lineHeight", 2, "em" );
<ide> add( "lineHeight", -10, "px" );
<ide> add( "lineHeight", 20, "pt" );
<ide> add( "lineHeight", 30, "pc" );
<ide> add( "lineHeight", 1, "cm" );
<ide> add( "lineHeight", -44, "mm" );
<del> add( "lineHeight", 50, "%" );
<ide> } );
<ide>
<ide> QUnit.test( "css() mismatched relative values with bounded styles (gh-2144)", function( assert ) { | 1 |
PHP | PHP | fix style issues with previous commit | 6dc5c7c9edb9a9aa9ea031b80b4b0c832eea78bb | <ide><path>src/Illuminate/Database/Query/Processors/SqlServerProcessor.php
<ide> public function processInsertGetId(Builder $query, $sql, $values, $sequence = nu
<ide> $connection = $query->getConnection();
<ide> $connection->insert($sql, $values);
<ide> if ($connection->getConfig('odbc') === true) {
<del> $result = $connection->select("SELECT CAST(COALESCE(SCOPE_IDENTITY(), @@IDENTITY) AS int) AS insertid");
<del> if (!$result) {
<add> $result = $connection->select('SELECT CAST(COALESCE(SCOPE_IDENTITY(), @@IDENTITY) AS int) AS insertid');
<add> if (! $result) {
<ide> throw new Exception('Error retrieving lastInsertId');
<ide> }
<ide> $id = $result[0]->insertid;
<del> }
<del> else {
<add> } else {
<ide> $id = $connection->getPdo()->lastInsertId();
<ide> }
<ide> | 1 |
PHP | PHP | use iso standard for time format | 9ababa2c564204062e28520eebe5f3d5fb667692 | <ide><path>src/View/Helper/FormHelper.php
<ide> class FormHelper extends Helper {
<ide> 'button' => '<button{{attrs}}>{{text}}</button>',
<ide> 'checkbox' => '<input type="checkbox" name="{{name}}" value="{{value}}"{{attrs}}>',
<ide> 'checkboxContainer' => '<div class="checkbox">{{input}}{{label}}</div>',
<del> 'dateWidget' => '{{month}}{{day}}{{year}}{{hour}}{{minute}}{{second}}{{meridian}}',
<add> 'dateWidget' => '{{year}}{{month}}{{day}}{{hour}}{{minute}}{{second}}{{meridian}}',
<ide> 'error' => '<div class="error-message">{{content}}</div>',
<ide> 'errorList' => '<ul>{{content}}</ul>',
<ide> 'errorItem' => '<li>{{text}}</li>',
<ide> public function month($fieldName, array $options = array()) {
<ide> * - `empty` - If true, the empty select option is shown. If a string,
<ide> * that string is displayed as the empty element.
<ide> * - `value` The selected value of the input.
<del> * - `format` Set to 12 or 24 to use 12 or 24 hour formatting. Defaults to 12.
<add> * - `format` Set to 12 or 24 to use 12 or 24 hour formatting. Defaults to 24.
<ide> *
<ide> * @param string $fieldName Prefix name for the SELECT element
<ide> * @param array $options List of HTML attributes
<ide> * @return string Completed hour select input
<ide> * @link http://book.cakephp.org/2.0/en/core-libraries/helpers/form.html#FormHelper::hour
<ide> */
<ide> public function hour($fieldName, array $options = []) {
<del> $options += ['format' => 12];
<add> $options += ['format' => 24];
<ide> $options = $this->_singleDatetime($options, 'hour');
<ide>
<ide> $options['timeFormat'] = $options['format'];
<ide> public function dateTime($fieldName, array $options = array()) {
<ide> 'minYear' => null,
<ide> 'maxYear' => null,
<ide> 'orderYear' => 'desc',
<del> 'timeFormat' => 12,
<add> 'timeFormat' => 24,
<ide> 'second' => false,
<ide> ];
<ide> $options = $this->_initInputField($fieldName, $options);
<ide> public function time($fieldName, array $options = []) {
<ide> 'value' => null,
<ide> 'interval' => 1,
<ide> 'round' => null,
<del> 'timeFormat' => 12,
<add> 'timeFormat' => 24,
<ide> 'second' => false,
<ide> ];
<ide> $options['year'] = $options['month'] = $options['day'] = false;
<ide><path>src/View/Widget/DateTime.php
<ide> public function render(array $data) {
<ide>
<ide> $selected = $this->_deconstructDate($data['val'], $data);
<ide>
<del> if (!isset($data['meridian']) &&
<del> isset($data['hour']['format']) &&
<del> $data['hour']['format'] == 12
<del> ) {
<add> $timeFormat = isset($data['hour']['format']) ? $data['hour']['format'] : null;
<add> if ($timeFormat === 12 && !isset($data['meridian'])) {
<ide> $data['meridian'] = [];
<ide> }
<add> if ($timeFormat === 24) {
<add> $data['meridian'] = false;
<add> }
<ide>
<ide> $templateOptions = [];
<ide> foreach ($this->_selects as $select) {
<ide><path>tests/TestCase/View/Helper/FormHelperTest.php
<ide> public function testTime() {
<ide> $this->assertNotContains('day', $result);
<ide> }
<ide>
<add>/**
<add> * Ensure that timeFormat=24 has no merdian.
<add> *
<add> * @return void.
<add> */
<add> public function testTimeFormat24NoMeridian() {
<add> $result = $this->Form->time('start_time', array(
<add> 'timeFormat' => 24,
<add> 'interval' => 5,
<add> 'value' => '2014-03-08 16:30:00'
<add> ));
<add> $this->assertContains('<option value="16" selected="selected">16</option>', $result);
<add> $this->assertContains('<option value="30" selected="selected">30</option>', $result);
<add> $this->assertNotContains('meridian', $result);
<add> $this->assertNotContains('pm', $result);
<add> $this->assertNotContains('year', $result);
<add> $this->assertNotContains('month', $result);
<add> $this->assertNotContains('day', $result);
<add> }
<add>
<ide> /**
<ide> * Test the date type.
<ide> *
<ide> public function testDateTime() {
<ide> $result = $this->Form->dateTime('Contact.date', array('empty' => false));
<ide> $now = strtotime('now');
<ide> $expected = array(
<add> array('select' => array('name' => 'Contact[date][year]')),
<add> $yearsRegex,
<add> array('option' => array('value' => date('Y', $now), 'selected' => 'selected')),
<add> date('Y', $now),
<add> '/option',
<add> '*/select',
<add>
<ide> array('select' => array('name' => 'Contact[date][month]')),
<ide> $monthsRegex,
<ide> array('option' => array('value' => date('m', $now), 'selected' => 'selected')),
<ide> public function testDateTime() {
<ide> '/option',
<ide> '*/select',
<ide>
<del> array('select' => array('name' => 'Contact[date][year]')),
<del> $yearsRegex,
<del> array('option' => array('value' => date('Y', $now), 'selected' => 'selected')),
<del> date('Y', $now),
<del> '/option',
<del> '*/select',
<del>
<ide> array('select' => array('name' => 'Contact[date][hour]')),
<ide> $hoursRegex,
<ide> array('option' => array('value' => date('h', $now), 'selected' => 'selected')),
<ide> public function testDateTime() {
<ide> date('i', $now),
<ide> '/option',
<ide> '*/select',
<del>
<del> array('select' => array('name' => 'Contact[date][meridian]')),
<del> $meridianRegex,
<del> array('option' => array('value' => date('a', $now), 'selected' => 'selected')),
<del> date('a', $now),
<del> '/option',
<del> '*/select'
<ide> );
<ide> $this->assertTags($result, $expected);
<ide> }
<ide> public function testDatetimeEmpty() {
<ide> $now = strtotime('now');
<ide>
<ide> $result = $this->Form->dateTime('Contact.date', array(
<add> 'timeFormat' => 12,
<ide> 'empty' => true,
<ide> ));
<ide> $expected = array(
<del> array('select' => array('name' => 'Contact[date][month]')),
<del> $monthsRegex,
<add> array('select' => array('name' => 'Contact[date][year]')),
<add> $yearsRegex,
<ide> array('option' => array('value' => '')),
<ide> '/option',
<ide> '*/select',
<ide>
<del> array('select' => array('name' => 'Contact[date][day]')),
<del> $daysRegex,
<add> array('select' => array('name' => 'Contact[date][month]')),
<add> $monthsRegex,
<ide> array('option' => array('value' => '')),
<ide> '/option',
<ide> '*/select',
<ide>
<del> array('select' => array('name' => 'Contact[date][year]')),
<del> $yearsRegex,
<add> array('select' => array('name' => 'Contact[date][day]')),
<add> $daysRegex,
<ide> array('option' => array('value' => '')),
<ide> '/option',
<ide> '*/select',
<ide> public function testDatetimeMinuteInterval() {
<ide> 'value' => ''
<ide> ));
<ide> $expected = array(
<del> array('select' => array('name' => 'Contact[date][month]')),
<del> $monthsRegex,
<add> array('select' => array('name' => 'Contact[date][year]')),
<add> $yearsRegex,
<ide> array('option' => array('selected' => 'selected', 'value' => '')),
<ide> '/option',
<ide> '*/select',
<ide>
<del> array('select' => array('name' => 'Contact[date][day]')),
<del> $daysRegex,
<add> array('select' => array('name' => 'Contact[date][month]')),
<add> $monthsRegex,
<ide> array('option' => array('selected' => 'selected', 'value' => '')),
<ide> '/option',
<ide> '*/select',
<ide>
<del> array('select' => array('name' => 'Contact[date][year]')),
<del> $yearsRegex,
<add> array('select' => array('name' => 'Contact[date][day]')),
<add> $daysRegex,
<ide> array('option' => array('selected' => 'selected', 'value' => '')),
<ide> '/option',
<ide> '*/select',
<ide> public function testFormDateTimeMulti() {
<ide> $this->assertContains('Contact[1][updated][year]', $result);
<ide> $this->assertContains('Contact[1][updated][hour]', $result);
<ide> $this->assertContains('Contact[1][updated][minute]', $result);
<del> $this->assertContains('Contact[1][updated][meridian]', $result);
<ide> }
<ide>
<ide> /**
<ide> public function testMeridian() {
<ide> public function testHour() {
<ide> extract($this->dateRegex);
<ide>
<del> $result = $this->Form->hour('Model.field', ['value' => '']);
<add> $result = $this->Form->hour('Model.field', ['format' => 12, 'value' => '']);
<ide> $expected = array(
<ide> array('select' => array('name' => 'Model[field][hour]')),
<ide> array('option' => array('selected' => 'selected', 'value' => '')),
<ide> public function testDateTimeWithGetForms() {
<ide> $this->assertContains('name="created[day]"', $result, 'day name attribute is wrong.');
<ide> $this->assertContains('name="created[hour]"', $result, 'hour name attribute is wrong.');
<ide> $this->assertContains('name="created[minute]"', $result, 'min name attribute is wrong.');
<del> $this->assertContains('name="created[meridian]"', $result, 'meridian name attribute is wrong.');
<ide> }
<ide>
<ide> /**
<ide><path>tests/TestCase/View/Widget/DateTimeTest.php
<ide> public function testRenderHourWidget24StartAndEnd() {
<ide> $result,
<ide> 'contains 17 hours'
<ide> );
<add> $this->assertNotContains('meridian', $result, '24hrs has no meridian');
<ide> }
<ide>
<ide> /**
<ide> public function testRenderHourWidget24() {
<ide> 'month' => false,
<ide> 'day' => false,
<ide> 'hour' => [
<add> 'format' => 24,
<ide> 'data-foo' => 'test'
<ide> ],
<ide> 'minute' => false,
<ide> 'second' => false,
<ide> 'val' => $now,
<add> 'meridian' => [],
<ide> ]);
<ide> $this->assertContains('<select name="date[hour]" data-foo="test">', $result);
<ide> $this->assertContains('<option value="00">0</option>', $result); | 4 |
Javascript | Javascript | fix horizontalbar deprecation warnings | 21da5be3c63e69a4edfc364d040e270471d8234d | <ide><path>src/controllers/controller.horizontalBar.js
<ide> defaults._set('horizontalBar', {
<ide> yAxes: [{
<ide> type: 'category',
<ide> position: 'left',
<del> categoryPercentage: 0.8,
<del> barPercentage: 0.9,
<ide> offset: true,
<ide> gridLines: {
<ide> offsetGridLines: true
<ide> defaults._set('horizontalBar', {
<ide> }
<ide> });
<ide>
<add>defaults._set('global', {
<add> datasets: {
<add> horizontalBar: {
<add> categoryPercentage: 0.8,
<add> barPercentage: 0.9
<add> }
<add> }
<add>});
<add>
<ide> module.exports = BarController.extend({
<ide> /**
<ide> * @private
<ide> module.exports = BarController.extend({
<ide> return this.getMeta().yAxisID;
<ide> }
<ide> });
<del>
<ide><path>test/specs/controller.bar.tests.js
<ide> describe('Chart.controllers.bar', function() {
<ide> var meta = chart.getDatasetMeta(0);
<ide> var yScale = chart.scales[meta.yAxisID];
<ide>
<del> var categoryPercentage = yScale.options.categoryPercentage;
<del> var barPercentage = yScale.options.barPercentage;
<add> var config = meta.controller._config;
<add> var categoryPercentage = config.categoryPercentage;
<add> var barPercentage = config.barPercentage;
<ide> var stacked = yScale.options.stacked;
<ide>
<ide> var totalBarHeight = 0; | 2 |
Text | Text | fix casing of dense keyword | d5543cd69235082d616bd11661dfba6b932039f6 | <ide><path>curriculum/challenges/english/11-machine-learning-with-python/tensorflow/natural-language-processing-with-rnns-sentimental-analysis.md
<ide> Fill in the blanks below to create the model for the RNN:
<ide> model = __A__.keras.Sequential([
<ide> __A__.keras.layers.__B__(88584, 32),
<ide> __A__.keras.layers.__C__(32),
<del> __A__.keras.layers.DENSE(1, activation='sigmoid')
<add> __A__.keras.layers.Dense(1, activation='sigmoid')
<ide> ])
<ide> ```
<ide> | 1 |
PHP | PHP | remove unused code paths | 98e4f253e22f37b96c146610b3743aa2cde254bc | <ide><path>src/Controller/Component/CookieComponent.php
<ide> protected function _decrypt($values, $mode) {
<ide>
<ide> $decrypted = array();
<ide> foreach ($values as $name => $value) {
<del> if (is_array($value)) {
<del> foreach ($value as $key => $val) {
<del> $decrypted[$name][$key] = $this->_decode($val, $mode);
<del> }
<del> } else {
<del> $decrypted[$name] = $this->_decode($value, $mode);
<del> }
<add> $decrypted[$name] = $this->_decode($value, $mode);
<ide> }
<ide> return $decrypted;
<ide> } | 1 |
Python | Python | add regression test for #636 | c8ae682ff98f2c5b5733ae4b299970c820e46630 | <ide><path>spacy/tests/regression/test_issue636.py
<add># coding: utf8
<add>from __future__ import unicode_literals
<add>
<add>from ...tokens.doc import Doc
<add>import pytest
<add>
<add>
<add>@pytest.mark.xfail
<add>@pytest.mark.models
<add>@pytest.mark.parametrize('text', ["I cant do this."])
<add>def test_issue636(EN, text):
<add> """Test that to_bytes and from_bytes don't change the token lemma."""
<add> doc1 = EN(text)
<add> doc2 = Doc(EN.vocab)
<add> doc2.from_bytes(doc1.to_bytes())
<add> print([t.lemma_ for t in doc1], [t.lemma_ for t in doc2])
<add> assert [t.lemma_ for t in doc1] == [t.lemma_ for t in doc2] | 1 |
Python | Python | accept non arrays in cor and corrcoeff | df84ecfea50ef33a1259c67c534a09238c0eefc8 | <ide><path>numpy/lib/function_base.py
<ide> def cov(m, y=None, rowvar=1, bias=0, ddof=None):
<ide> "ddof must be integer")
<ide>
<ide> # Handles complex arrays too
<add> m = np.asarray(m)
<ide> if y is None:
<ide> dtype = np.result_type(m, np.float64)
<ide> else:
<add> y = np.asarray(y)
<ide> dtype = np.result_type(m, y, np.float64)
<ide> X = array(m, ndmin=2, dtype=dtype)
<ide>
<ide><path>numpy/lib/tests/test_function_base.py
<ide> class TestCorrCoef(TestCase):
<ide> [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823],
<ide> [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]])
<ide>
<add> def test_non_array(self):
<add> assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]),
<add> [[1., -1.], [-1., 1.]])
<add>
<ide> def test_simple(self):
<ide> assert_almost_equal(corrcoef(self.A), self.res1)
<ide> assert_almost_equal(corrcoef(self.A, self.B), self.res2) | 2 |
Text | Text | fix nits in guides/using-internal-errors.md | 259466c6f4e01f547b1400dc7caf7df471d6f0aa | <ide><path>doc/guides/using-internal-errors.md
<ide> are intended to replace existing `Error` objects within the Node.js source.
<ide> For instance, an existing `Error` such as:
<ide>
<ide> ```js
<del> const err = new TypeError('Expected string received ' + type);
<add>const err = new TypeError(`Expected string received ${type}`);
<ide> ```
<ide>
<ide> Can be replaced by first adding a new error key into the `internal/errors.js`
<ide> E('FOO', 'Expected string received %s');
<ide> Then replacing the existing `new TypeError` in the code:
<ide>
<ide> ```js
<del> const errors = require('internal/errors');
<del> // ...
<del> const err = new errors.TypeError('FOO', type);
<add>const errors = require('internal/errors');
<add>// ...
<add>const err = new errors.TypeError('FOO', type);
<ide> ```
<ide>
<ide> ## Adding new errors
<ide> The specific error message for the `myError` instance will depend on the
<ide> associated value of `KEY` (see "Adding new errors").
<ide>
<ide> The `myError` object will have a `code` property equal to the `key` and a
<del>`name` property equal to `Error[${key}]`.
<add>`name` property equal to `` `Error [${key}]` ``.
<ide>
<ide> ### Class: errors.TypeError(key[, args...])
<ide>
<ide> The specific error message for the `myError` instance will depend on the
<ide> associated value of `KEY` (see "Adding new errors").
<ide>
<ide> The `myError` object will have a `code` property equal to the `key` and a
<del>`name` property equal to `TypeError[${key}]`.
<add>`name` property equal to `` `TypeError [${key}]` ``.
<ide>
<ide> ### Class: errors.RangeError(key[, args...])
<ide>
<ide> The specific error message for the `myError` instance will depend on the
<ide> associated value of `KEY` (see "Adding new errors").
<ide>
<ide> The `myError` object will have a `code` property equal to the `key` and a
<del>`name` property equal to `RangeError[${key}]`.
<add>`name` property equal to `` `RangeError [${key}]` ``.
<ide>
<ide> ### Method: errors.message(key, args)
<ide> | 1 |
Python | Python | bloom minor fixes small test | 6a1b1bf7a6abb303ccf655d53a0e1f9b30890470 | <ide><path>tests/models/bloom/test_modeling_bloom.py
<ide> def test_model_from_pretrained(self):
<ide> def test_simple_generation(self):
<ide> # This test is a bit flaky. For some GPU architectures, pytorch sets by default allow_fp16_reduced_precision_reduction = True and some operations
<ide> # do not give the same results under this configuration, especially torch.baddmm and torch.bmm. https://pytorch.org/docs/stable/notes/numerical_accuracy.html#fp16-on-mi200
<del> # We set allow_fp16_reduced_precision_reduction = True. Please see: https://pytorch.org/docs/stable/notes/cuda.html#reduced-precision-reduction-in-fp16-gemms
<add> # As we leave the default value (True) for allow_fp16_reduced_precision_reduction , the tests failed when running in half-precision with smaller models (350m)
<add> # Please see: https://pytorch.org/docs/stable/notes/cuda.html#reduced-precision-reduction-in-fp16-gemms
<ide> # This discrepancy is observed only when using small models and seems to be stable for larger models.
<ide> # Our conclusion is that these operations are flaky for small inputs but seems to be stable for larger inputs (for the functions `baddmm` and `bmm`), and therefore for larger models.
<ide>
<ide> def test_simple_generation(self):
<ide> # >=760m + allow_fp16_reduced_precision_reduction = False + torch.bmm ==> PASS
<ide>
<ide> path_350m = "bigscience/bloom-350m"
<del> model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True).cuda()
<add> model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True, revision="gs555750").cuda()
<ide> model = model.eval()
<ide> tokenizer = BloomTokenizerFast.from_pretrained(path_350m)
<ide>
<ide> def test_simple_generation(self):
<ide> @require_torch_gpu
<ide> def test_batch_generation(self):
<ide> path_350m = "bigscience/bloom-350m"
<del> model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True).cuda()
<add> model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True, revision="gs555750").cuda()
<ide> model = model.eval()
<ide> tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
<ide>
<ide> def test_batch_generation(self):
<ide> def test_batch_generation_padd(self):
<ide>
<ide> path_350m = "bigscience/bloom-350m"
<del> model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True).cuda()
<add> model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True, revision="gs555750").cuda()
<ide> model = model.eval()
<ide> tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
<ide>
<ide> def test_batch_generation_padd(self):
<ide> tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),
<ide> )
<ide>
<del> @slow
<del> def test_right_left_batched_input(self):
<del> path_1b3 = "bigscience/bloom-1b3"
<del> model = BloomForCausalLM.from_pretrained(path_1b3, use_cache=True)
<del> model = model.eval()
<del>
<del> tokenizer = BloomTokenizerFast.from_pretrained(path_1b3)
<del> tokenizer.padding_side = "right"
<del>
<del> inputs = ["Hello there", "Joe Biden is the president of the"]
<del> inputs_right = tokenizer(inputs, return_tensors="pt", padding=True)
<del>
<del> tokenizer.padding_side = "left"
<del> inputs_left = tokenizer(inputs, return_tensors="pt", padding=True)
<del>
<del> # test token values are different
<del> self.assertNotEqual(inputs_right["input_ids"].tolist(), inputs_left["input_ids"].tolist())
<del>
<del> # test reconstructions are the same
<del> outputs_right = model.generate(**inputs_right, max_length=10, do_sample=False)
<del> outputs_left = model.generate(**inputs_left, max_length=10, do_sample=False)
<del>
<del> self.assertEqual(
<del> tokenizer.decode(outputs_right[0], skip_special_tokens=True),
<del> tokenizer.decode(outputs_left[0], skip_special_tokens=True),
<del> )
<del>
<ide>
<ide> @require_torch
<ide> class BloomEmbeddingTest(unittest.TestCase): | 1 |
Ruby | Ruby | add a test case for layout nil | dd0275e46314b6c4cddb1ae438ce1390d13aa53c | <ide><path>actionpack/lib/abstract_controller/layouts.rb
<ide> def conditional_layout?
<ide> # Symbol:: call the method specified by the symbol, which will return the template name
<ide> # false:: There is no layout
<ide> # true:: raise an ArgumentError
<add> # nil:: Force default layout behavior with inheritance
<ide> #
<ide> # ==== Parameters
<ide> # * <tt>layout</tt> - The layout to use.
<ide><path>actionpack/test/abstract/layouts_test.rb
<ide> class Base < AbstractController::Base
<ide> "layouts/overwrite.erb" => "Overwrite <%= yield %>",
<ide> "layouts/with_false_layout.erb" => "False Layout <%= yield %>",
<ide> "abstract_controller_tests/layouts/with_string_implied_child.erb" =>
<del> "With Implied <%= yield %>"
<add> "With Implied <%= yield %>",
<add> "abstract_controller_tests/layouts/with_grand_child_of_implied.erb" =>
<add> "With Grand Child <%= yield %>"
<add>
<ide> )]
<ide> end
<ide>
<ide> class WithStringImpliedChild < WithString
<ide> class WithChildOfImplied < WithStringImpliedChild
<ide> end
<ide>
<add> class WithGrandChildOfImplied < WithStringImpliedChild
<add> layout nil
<add> end
<add>
<ide> class WithProc < Base
<ide> layout proc { |c| "overwrite" }
<ide>
<ide> class TestBase < ActiveSupport::TestCase
<ide> assert_equal "With Implied Hello string!", controller.response_body
<ide> end
<ide>
<add> test "when a grandchild has nil layout specified, the child has an implied layout, and the " \
<add> "parent has specified a layout, use the child controller layout" do
<add> controller = WithGrandChildOfImplied.new
<add> controller.process(:index)
<add> assert_equal "With Grand Child Hello string!", controller.response_body
<add> end
<add>
<ide> test "raises an exception when specifying layout true" do
<ide> assert_raises ArgumentError do
<ide> Object.class_eval do | 2 |
PHP | PHP | add missing import | 777efcdf458edaffbe759cb633ab847788a336fa | <ide><path>src/Illuminate/Database/Eloquent/Model.php
<ide>
<ide> use DateTime;
<ide> use Exception;
<add>use Throwable;
<ide> use ArrayAccess;
<ide> use Carbon\Carbon;
<ide> use LogicException; | 1 |
Javascript | Javascript | fix tests in amd mode | 6051609df35ef5e478c79c76534c03e4b46100bf | <ide><path>build/tasks/build.js
<ide> module.exports = function( grunt ) {
<ide> skipSemiColonInsertion: true,
<ide> wrap: {
<ide> startFile: "src/intro.js",
<del> endFile: "src/outro.js"
<add> endFile: [ "src/exports/global.js", "src/outro.js" ]
<ide> },
<ide> paths: {
<ide> sizzle: "../external/sizzle/dist/sizzle"
<ide> module.exports = function( grunt ) {
<ide>
<ide> } else {
<ide>
<del> // Ignore jQuery's exports (the only necessary one)
<del> if ( name !== "jquery" ) {
<del> contents = contents
<del> .replace( /\s*return\s+[^\}]+(\}\);[^\w\}]*)$/, "$1" )
<del> // Multiple exports
<del> .replace( /\s*exports\.\w+\s*=\s*\w+;/g, "" );
<del> }
<add> contents = contents
<add> .replace( /\s*return\s+[^\}]+(\}\);[^\w\}]*)$/, "$1" )
<add> // Multiple exports
<add> .replace( /\s*exports\.\w+\s*=\s*\w+;/g, "" );
<ide>
<ide> // Remove define wrappers, closure ends, and empty declarations
<ide> contents = contents
<ide><path>src/core/parseHTML.js
<ide> define([
<ide> "../core",
<ide> "./var/rsingleTag",
<add> "./support",
<add>
<ide> "../manipulation" // buildFragment
<ide> ], function( jQuery, rsingleTag, support ) {
<ide>
<ide><path>src/core/support.js
<ide> define([
<ide> "../var/support"
<del>], function( jQuery, support ) {
<add>], function( support ) {
<ide> // window.document is used here as it's before the sandboxed document
<ide> support.createHTMLDocument = !!window.document.implementation.createHTMLDocument;
<add>
<add> return support;
<ide> });
<ide><path>src/exports/global.js
<del>define([
<del> "../core"
<del>], function( jQuery ) {
<del>
<ide> var
<ide> // Map over jQuery in case of overwrite
<ide> _jQuery = window.jQuery,
<ide> jQuery.noConflict = function( deep ) {
<ide> if ( !noGlobal ) {
<ide> window.jQuery = window.$ = jQuery;
<ide> }
<del>
<del>});
<ide><path>src/jquery.js
<ide> define([
<ide> "./offset",
<ide> "./dimensions",
<ide> "./deprecated",
<del> "./exports/amd",
<del> "./exports/global"
<add> "./exports/amd"
<ide> ], function( jQuery ) {
<ide>
<del>return jQuery;
<add>return (window.jQuery = window.$ = jQuery);
<ide>
<ide> });
<ide><path>src/outro.js
<add>return jQuery;
<ide> }));
<ide><path>test/data/testinit.js
<ide> this.loadTests = function() {
<ide> /**
<ide> * Run in noConflict mode
<ide> */
<del> jQuery.noConflict();
<add> if (jQuery.noConflict) {
<add> jQuery.noConflict();
<add> }
<ide>
<ide> // Load the TestSwarm listener if swarmURL is in the address.
<ide> if ( loadSwarm ) {
<ide><path>test/unit/core.js
<ide> test( "globalEval execution after script injection (#7862)", 1, function() {
<ide> ok( window.strictEvalTest - now < 500, "Code executed synchronously" );
<ide> });
<ide>
<del>test("noConflict", function() {
<del> expect(7);
<add>// This is not run in AMD mode
<add>if (jQuery.noConflict) {
<add> test("noConflict", function() {
<add> expect(7);
<ide>
<del> var $$ = jQuery;
<add> var $$ = jQuery;
<ide>
<del> strictEqual( jQuery, jQuery.noConflict(), "noConflict returned the jQuery object" );
<del> strictEqual( window["jQuery"], $$, "Make sure jQuery wasn't touched." );
<del> strictEqual( window["$"], original$, "Make sure $ was reverted." );
<add> strictEqual( jQuery, jQuery.noConflict(), "noConflict returned the jQuery object" );
<add> strictEqual( window["jQuery"], $$, "Make sure jQuery wasn't touched." );
<add> strictEqual( window["$"], original$, "Make sure $ was reverted." );
<ide>
<del> jQuery = $ = $$;
<add> jQuery = $ = $$;
<ide>
<del> strictEqual( jQuery.noConflict(true), $$, "noConflict returned the jQuery object" );
<del> strictEqual( window["jQuery"], originaljQuery, "Make sure jQuery was reverted." );
<del> strictEqual( window["$"], original$, "Make sure $ was reverted." );
<del> ok( $$().pushStack([]), "Make sure that jQuery still works." );
<add> strictEqual( jQuery.noConflict(true), $$, "noConflict returned the jQuery object" );
<add> strictEqual( window["jQuery"], originaljQuery, "Make sure jQuery was reverted." );
<add> strictEqual( window["$"], original$, "Make sure $ was reverted." );
<add> ok( $$().pushStack([]), "Make sure that jQuery still works." );
<ide>
<del> window["jQuery"] = jQuery = $$;
<del>});
<add> window["jQuery"] = jQuery = $$;
<add> });
<add>}
<ide>
<ide> test("trim", function() {
<ide> expect(13); | 8 |
Python | Python | add xla test | fdcde144d848c338f9a540e8e0ba97ab05a74fff | <ide><path>tests/test_modeling_tf_bart.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make BART float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make BART XLA compliant
<add> pass
<add>
<ide>
<ide> def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
<ide> """If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
<ide><path>tests/test_modeling_tf_blenderbot.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make Blenderbot float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make Blenderbot XLA compliant
<add> pass
<add>
<ide> def test_resize_token_embeddings(self):
<ide> config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
<ide>
<ide><path>tests/test_modeling_tf_blenderbot_small.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make Blenderbot Small float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make Blenderbot Small XLA compliant
<add> pass
<add>
<ide>
<ide> def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
<ide> """If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
<ide><path>tests/test_modeling_tf_common.py
<ide> def run_in_graph_mode():
<ide> outputs = run_in_graph_mode()
<ide> self.assertIsNotNone(outputs)
<ide>
<add> def test_xla_mode(self):
<add> config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
<add> for model_class in self.all_model_classes:
<add> inputs = self._prepare_for_class(inputs_dict, model_class)
<add> model = model_class(config)
<add>
<add> @tf.function(experimental_compile=True)
<add> def run_in_graph_mode():
<add> return model(inputs)
<add>
<add> outputs = run_in_graph_mode()
<add> self.assertIsNotNone(outputs)
<add>
<ide> def test_forward_signature(self):
<ide> config, _ = self.model_tester.prepare_config_and_inputs_for_common()
<ide>
<ide><path>tests/test_modeling_tf_convbert.py
<ide> def test_saved_model_with_attentions_output(self):
<ide> [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
<ide> )
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make ConvBert XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
<ide><path>tests/test_modeling_tf_ctrl.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make CTRL float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make CTRL XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> for model_name in TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
<ide><path>tests/test_modeling_tf_flaubert.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make Flaubert float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make Flaubert XLA compliant
<add> pass
<add>
<ide>
<ide> @require_tf
<ide> @require_sentencepiece
<ide><path>tests/test_modeling_tf_gpt2.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make GPT2 float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make GPT2 XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
<ide><path>tests/test_modeling_tf_led.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make LED float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make LED XLA compliant
<add> pass
<add>
<ide> def test_saved_model_with_attentions_output(self):
<ide> # This test don't pass because of the error:
<ide> # condition [13,8,4,5], then [13,8,4,5], and else [13,8,4,6] must be broadcastable
<ide><path>tests/test_modeling_tf_longformer.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make Longformer float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make Blenderbot XLA compliant
<add> pass
<add>
<ide>
<ide> @require_tf
<ide> @require_sentencepiece
<ide><path>tests/test_modeling_tf_marian.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make Marian float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make Marian XLA compliant
<add> pass
<add>
<ide> def test_resize_token_embeddings(self):
<ide> config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
<ide>
<ide><path>tests/test_modeling_tf_mbart.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make MBart float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make MBart XLA compliant
<add> pass
<add>
<ide> def test_resize_token_embeddings(self):
<ide> config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
<ide>
<ide><path>tests/test_modeling_tf_mpnet.py
<ide> def test_for_token_classification(self):
<ide> config_and_inputs = self.model_tester.prepare_config_and_inputs()
<ide> self.model_tester.create_and_check_mpnet_for_token_classification(*config_and_inputs)
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make MPNet XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> for model_name in ["microsoft/mpnet-base"]:
<ide><path>tests/test_modeling_tf_openai.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make OpenAIGPT float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make OpenAIGPT XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
<ide><path>tests/test_modeling_tf_pegasus.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make Pegasus float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make Pegasus XLA compliant
<add> pass
<add>
<ide> def test_resize_token_embeddings(self):
<ide> config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
<ide>
<ide><path>tests/test_modeling_tf_t5.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make T5 float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make T5 XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> model = TFT5Model.from_pretrained("t5-small")
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make T5 float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make T5 XLA compliant
<add> pass
<add>
<ide>
<ide> @require_tf
<ide> @require_sentencepiece
<ide><path>tests/test_modeling_tf_transfo_xl.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make TransfoXL float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make TransfoXL XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
<ide><path>tests/test_modeling_tf_xlm.py
<ide> def test_mixed_precision(self):
<ide> # TODO JP: Make XLM float16 compliant
<ide> pass
<ide>
<add> def test_xla_mode(self):
<add> # TODO JP: Make XLM XLA compliant
<add> pass
<add>
<ide> @slow
<ide> def test_model_from_pretrained(self):
<ide> for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: | 18 |
Python | Python | batch validation in fit | 03e512a3f4dfbd3c241c5f7824c3f052005d4e6f | <ide><path>keras/models.py
<ide> def _fit(self, f, ins, out_labels=[], batch_size=128, nb_epoch=100, verbose=1, c
<ide> epoch_logs = {}
<ide> if do_validation:
<ide> # replace with self._evaluate
<del> val_outs = val_f(*val_ins)
<add> val_outs = self._test_loop(val_f, val_ins, batch_size=batch_size, verbose=0)
<ide> if type(val_outs) != list:
<ide> val_outs = [val_outs]
<ide> # same labels assumed | 1 |
Python | Python | add regression test for [ci skip] | 96bb8f2187569b4f4f6a01bc34b17cabfa8a9f6e | <ide><path>spacy/tests/regression/test_issue4528.py
<add># coding: utf8
<add>from __future__ import unicode_literals
<add>
<add>import pytest
<add>from spacy.tokens import Doc, DocBin
<add>
<add>
<add>@pytest.mark.xfail
<add>def test_issue4528(en_vocab):
<add> """Test that user_data is correctly serialized in DocBin."""
<add> doc = Doc(en_vocab, words=["hello", "world"])
<add> doc.user_data["foo"] = "bar"
<add> # This is how extension attribute values are stored in the user data
<add> doc.user_data[("._.", "foo", None, None)] = "bar"
<add> doc_bin = DocBin(store_user_data=True)
<add> doc_bin.add(doc)
<add> doc_bin_bytes = doc_bin.to_bytes()
<add> new_doc_bin = DocBin(store_user_data=True).from_bytes(doc_bin_bytes)
<add> new_doc = list(new_doc_bin.get_docs(en_vocab))[0]
<add> assert new_doc.user_data["foo"] == "bar"
<add> assert new_doc.user_data[("._.", "foo", None, None)] == "bar" | 1 |
Javascript | Javascript | fix tests in browsers | 2034c1db7eaac61fabd4319af2e9b4478d1d6b52 | <ide><path>test/specs/core/mergeConfig.spec.js
<ide> describe('core::mergeConfig', function() {
<ide> expect(merged.data).toEqual(undefined);
<ide> });
<ide>
<del> ['auth', 'headers', 'params', 'proxy'].forEach(key => {
<del> it(`should set new config for ${key} without default`, function() {
<del> expect(mergeConfig({ [key]: undefined }, { [key]: { user: 'foo', pass: 'test' } })).toEqual({
<del> [key]: { user: 'foo', pass: 'test' }
<del> });
<add> ['auth', 'headers', 'params', 'proxy'].forEach(function(key) {
<add> it('should set new config for' + key + ' without default', function() {
<add> var a = {}, b = {}, c = {}
<add> a[key] = undefined
<add> b[key] = { user: 'foo', pass: 'test' }
<add> c[key] = { user: 'foo', pass: 'test' }
<add>
<add> expect(mergeConfig(a, b)).toEqual(c);
<ide> });
<ide>
<del> it(`should merge ${key} with defaults`, function() {
<del> expect(mergeConfig({ [key]: { user: 'foo', pass: 'bar' } }, { [key]: { pass: 'test' } })).toEqual({
<del> [key]: { user: 'foo', pass: 'test' }
<del> });
<add> it('should merge ' + key + ' with defaults', function() {
<add> var a = {}, b = {}, c = {};
<add> a[key] = { user: 'foo', pass: 'bar' };
<add> b[key] = { pass: 'test' };
<add> c[key] = { user: 'foo', pass: 'test' };
<add>
<add> expect(mergeConfig(a, b)).toEqual(c);
<ide> });
<ide>
<del> it(`should overwrite default ${key} with a non-object value`, function() {
<del> [false, null, 123].forEach(value => {
<del> expect(mergeConfig({ [key]: { user: 'foo', pass: 'test' } }, { [key]: value })).toEqual({
<del> [key]: value
<del> });
<add> it('should overwrite default ' + key + ' with a non-object value', function() {
<add> [false, null, 123].forEach(function(value) {
<add> var a = {}, b = {}, c = {};
<add> a[key] = { user: 'foo', pass: 'test' };
<add> b[key] = value;
<add> c[key] = value;
<add>
<add> expect(mergeConfig(a, b)).toEqual(c);
<ide> });
<ide> });
<ide> }); | 1 |
PHP | PHP | extract container extend tests | cca9bfffc1f0a33bbb42d843208383c8fd3f83ec | <ide><path>tests/Container/ContainerExtendTest.php
<add><?php
<add>
<add>namespace Illuminate\Tests\Container;
<add>
<add>use stdClass;
<add>use PHPUnit\Framework\TestCase;
<add>use Illuminate\Container\Container;
<add>
<add>class ContainerExtendTest extends TestCase
<add>{
<add> public function testExtendedBindings()
<add> {
<add> $container = new Container;
<add> $container['foo'] = 'foo';
<add> $container->extend('foo', function ($old, $container) {
<add> return $old.'bar';
<add> });
<add>
<add> $this->assertEquals('foobar', $container->make('foo'));
<add>
<add> $container = new Container;
<add>
<add> $container->singleton('foo', function () {
<add> return (object) ['name' => 'taylor'];
<add> });
<add> $container->extend('foo', function ($old, $container) {
<add> $old->age = 26;
<add>
<add> return $old;
<add> });
<add>
<add> $result = $container->make('foo');
<add>
<add> $this->assertEquals('taylor', $result->name);
<add> $this->assertEquals(26, $result->age);
<add> $this->assertSame($result, $container->make('foo'));
<add> }
<add>
<add> public function testExtendInstancesArePreserved()
<add> {
<add> $container = new Container;
<add> $container->bind('foo', function () {
<add> $obj = new stdClass;
<add> $obj->foo = 'bar';
<add>
<add> return $obj;
<add> });
<add>
<add> $obj = new stdClass;
<add> $obj->foo = 'foo';
<add> $container->instance('foo', $obj);
<add> $container->extend('foo', function ($obj, $container) {
<add> $obj->bar = 'baz';
<add>
<add> return $obj;
<add> });
<add> $container->extend('foo', function ($obj, $container) {
<add> $obj->baz = 'foo';
<add>
<add> return $obj;
<add> });
<add>
<add> $this->assertEquals('foo', $container->make('foo')->foo);
<add> $this->assertEquals('baz', $container->make('foo')->bar);
<add> $this->assertEquals('foo', $container->make('foo')->baz);
<add> }
<add>
<add> public function testExtendIsLazyInitialized()
<add> {
<add> ContainerLazyExtendStub::$initialized = false;
<add>
<add> $container = new Container;
<add> $container->bind(ContainerLazyExtendStub::class);
<add> $container->extend(ContainerLazyExtendStub::class, function ($obj, $container) {
<add> $obj->init();
<add>
<add> return $obj;
<add> });
<add> $this->assertFalse(ContainerLazyExtendStub::$initialized);
<add> $container->make(ContainerLazyExtendStub::class);
<add> $this->assertTrue(ContainerLazyExtendStub::$initialized);
<add> }
<add>
<add> public function testExtendCanBeCalledBeforeBind()
<add> {
<add> $container = new Container;
<add> $container->extend('foo', function ($old, $container) {
<add> return $old.'bar';
<add> });
<add> $container['foo'] = 'foo';
<add>
<add> $this->assertEquals('foobar', $container->make('foo'));
<add> }
<add>
<add> public function testExtendInstanceRebindingCallback()
<add> {
<add> $_SERVER['_test_rebind'] = false;
<add>
<add> $container = new Container;
<add> $container->rebinding('foo', function () {
<add> $_SERVER['_test_rebind'] = true;
<add> });
<add>
<add> $obj = new stdClass;
<add> $container->instance('foo', $obj);
<add>
<add> $container->extend('foo', function ($obj, $container) {
<add> return $obj;
<add> });
<add>
<add> $this->assertTrue($_SERVER['_test_rebind']);
<add> }
<add>
<add> public function testExtendBindRebindingCallback()
<add> {
<add> $_SERVER['_test_rebind'] = false;
<add>
<add> $container = new Container;
<add> $container->rebinding('foo', function () {
<add> $_SERVER['_test_rebind'] = true;
<add> });
<add> $container->bind('foo', function () {
<add> return new stdClass;
<add> });
<add>
<add> $this->assertFalse($_SERVER['_test_rebind']);
<add>
<add> $container->make('foo');
<add>
<add> $container->extend('foo', function ($obj, $container) {
<add> return $obj;
<add> });
<add>
<add> $this->assertTrue($_SERVER['_test_rebind']);
<add> }
<add>
<add> public function testExtensionWorksOnAliasedBindings()
<add> {
<add> $container = new Container;
<add> $container->singleton('something', function () {
<add> return 'some value';
<add> });
<add> $container->alias('something', 'something-alias');
<add> $container->extend('something-alias', function ($value) {
<add> return $value.' extended';
<add> });
<add>
<add> $this->assertEquals('some value extended', $container->make('something'));
<add> }
<add>
<add> public function testMultipleExtends()
<add> {
<add> $container = new Container;
<add> $container['foo'] = 'foo';
<add> $container->extend('foo', function ($old, $container) {
<add> return $old.'bar';
<add> });
<add> $container->extend('foo', function ($old, $container) {
<add> return $old.'baz';
<add> });
<add>
<add> $this->assertEquals('foobarbaz', $container->make('foo'));
<add> }
<add>
<add> public function testUnsetExtend()
<add> {
<add> $container = new Container;
<add> $container->bind('foo', function () {
<add> $obj = new stdClass;
<add> $obj->foo = 'bar';
<add>
<add> return $obj;
<add> });
<add>
<add> $container->extend('foo', function ($obj, $container) {
<add> $obj->bar = 'baz';
<add>
<add> return $obj;
<add> });
<add>
<add> unset($container['foo']);
<add> $container->forgetExtenders('foo');
<add>
<add> $container->bind('foo', function () {
<add> return 'foo';
<add> });
<add>
<add> $this->assertEquals('foo', $container->make('foo'));
<add> }
<add>}
<add>
<add>class ContainerLazyExtendStub
<add>{
<add> public static $initialized = false;
<add>
<add> public function init()
<add> {
<add> static::$initialized = true;
<add> }
<add>}
<ide><path>tests/Container/ContainerTest.php
<ide> public function testBindingsCanBeOverridden()
<ide> $this->assertEquals('baz', $container['foo']);
<ide> }
<ide>
<del> public function testExtendedBindings()
<del> {
<del> $container = new Container;
<del> $container['foo'] = 'foo';
<del> $container->extend('foo', function ($old, $container) {
<del> return $old.'bar';
<del> });
<del>
<del> $this->assertEquals('foobar', $container->make('foo'));
<del>
<del> $container = new Container;
<del>
<del> $container->singleton('foo', function () {
<del> return (object) ['name' => 'taylor'];
<del> });
<del> $container->extend('foo', function ($old, $container) {
<del> $old->age = 26;
<del>
<del> return $old;
<del> });
<del>
<del> $result = $container->make('foo');
<del>
<del> $this->assertEquals('taylor', $result->name);
<del> $this->assertEquals(26, $result->age);
<del> $this->assertSame($result, $container->make('foo'));
<del> }
<del>
<del> public function testMultipleExtends()
<del> {
<del> $container = new Container;
<del> $container['foo'] = 'foo';
<del> $container->extend('foo', function ($old, $container) {
<del> return $old.'bar';
<del> });
<del> $container->extend('foo', function ($old, $container) {
<del> return $old.'baz';
<del> });
<del>
<del> $this->assertEquals('foobarbaz', $container->make('foo'));
<del> }
<del>
<ide> public function testBindingAnInstanceReturnsTheInstance()
<ide> {
<ide> $container = new Container;
<ide> public function testBindingAnInstanceReturnsTheInstance()
<ide> $this->assertSame($bound, $resolved);
<ide> }
<ide>
<del> public function testExtendInstancesArePreserved()
<del> {
<del> $container = new Container;
<del> $container->bind('foo', function () {
<del> $obj = new stdClass;
<del> $obj->foo = 'bar';
<del>
<del> return $obj;
<del> });
<del> $obj = new stdClass;
<del> $obj->foo = 'foo';
<del> $container->instance('foo', $obj);
<del> $container->extend('foo', function ($obj, $container) {
<del> $obj->bar = 'baz';
<del>
<del> return $obj;
<del> });
<del> $container->extend('foo', function ($obj, $container) {
<del> $obj->baz = 'foo';
<del>
<del> return $obj;
<del> });
<del>
<del> $this->assertEquals('foo', $container->make('foo')->foo);
<del> $this->assertEquals('baz', $container->make('foo')->bar);
<del> $this->assertEquals('foo', $container->make('foo')->baz);
<del> }
<del>
<del> public function testExtendIsLazyInitialized()
<del> {
<del> ContainerLazyExtendStub::$initialized = false;
<del>
<del> $container = new Container;
<del> $container->bind(ContainerLazyExtendStub::class);
<del> $container->extend(ContainerLazyExtendStub::class, function ($obj, $container) {
<del> $obj->init();
<del>
<del> return $obj;
<del> });
<del> $this->assertFalse(ContainerLazyExtendStub::$initialized);
<del> $container->make(ContainerLazyExtendStub::class);
<del> $this->assertTrue(ContainerLazyExtendStub::$initialized);
<del> }
<del>
<del> public function testExtendCanBeCalledBeforeBind()
<del> {
<del> $container = new Container;
<del> $container->extend('foo', function ($old, $container) {
<del> return $old.'bar';
<del> });
<del> $container['foo'] = 'foo';
<del>
<del> $this->assertEquals('foobar', $container->make('foo'));
<del> }
<del>
<del> public function testExtendInstanceRebindingCallback()
<del> {
<del> $_SERVER['_test_rebind'] = false;
<del>
<del> $container = new Container;
<del> $container->rebinding('foo', function () {
<del> $_SERVER['_test_rebind'] = true;
<del> });
<del>
<del> $obj = new stdClass;
<del> $container->instance('foo', $obj);
<del>
<del> $container->extend('foo', function ($obj, $container) {
<del> return $obj;
<del> });
<del>
<del> $this->assertTrue($_SERVER['_test_rebind']);
<del> }
<del>
<del> public function testExtendBindRebindingCallback()
<del> {
<del> $_SERVER['_test_rebind'] = false;
<del>
<del> $container = new Container;
<del> $container->rebinding('foo', function () {
<del> $_SERVER['_test_rebind'] = true;
<del> });
<del> $container->bind('foo', function () {
<del> return new stdClass;
<del> });
<del>
<del> $this->assertFalse($_SERVER['_test_rebind']);
<del>
<del> $container->make('foo');
<del>
<del> $container->extend('foo', function ($obj, $container) {
<del> return $obj;
<del> });
<del>
<del> $this->assertTrue($_SERVER['_test_rebind']);
<del> }
<del>
<del> public function testUnsetExtend()
<del> {
<del> $container = new Container;
<del> $container->bind('foo', function () {
<del> $obj = new stdClass;
<del> $obj->foo = 'bar';
<del>
<del> return $obj;
<del> });
<del>
<del> $container->extend('foo', function ($obj, $container) {
<del> $obj->bar = 'baz';
<del>
<del> return $obj;
<del> });
<del>
<del> unset($container['foo']);
<del> $container->forgetExtenders('foo');
<del>
<del> $container->bind('foo', function () {
<del> return 'foo';
<del> });
<del>
<del> $this->assertEquals('foo', $container->make('foo'));
<del> }
<del>
<ide> public function testResolutionOfDefaultParameters()
<ide> {
<ide> $container = new Container;
<ide> public function testContainerGetFactory()
<ide> $this->assertEquals($container->make('name'), $factory());
<ide> }
<ide>
<del> public function testExtensionWorksOnAliasedBindings()
<del> {
<del> $container = new Container;
<del> $container->singleton('something', function () {
<del> return 'some value';
<del> });
<del> $container->alias('something', 'something-alias');
<del> $container->extend('something-alias', function ($value) {
<del> return $value.' extended';
<del> });
<del>
<del> $this->assertEquals('some value extended', $container->make('something'));
<del> }
<del>
<ide> public function testMakeWithMethodIsAnAliasForMakeMethod()
<ide> {
<ide> $mock = $this->getMockBuilder(Container::class)
<ide> public function __construct($first, ContainerConcreteStub $stub, $last)
<ide> }
<ide> }
<ide>
<del>class ContainerLazyExtendStub
<del>{
<del> public static $initialized = false;
<del>
<del> public function init()
<del> {
<del> static::$initialized = true;
<del> }
<del>}
<del>
<ide> class ContainerInjectVariableStub
<ide> {
<ide> public $something; | 2 |
Python | Python | add condition around mask transformation | cd286c2145221f3d1372aef103d0bc3ed03879da | <ide><path>transformers/modeling_bert.py
<ide> def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, posi
<ide>
<ide> if attention_mask is None:
<ide> attention_mask = torch.ones(input_shape, device=device)
<del> if self.config.is_decoder and encoder_attention_mask is None:
<del> encoder_attention_mask = torch.ones(input_shape, device=device)
<ide> if token_type_ids is None:
<ide> token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
<ide>
<ide> def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, posi
<ide>
<ide> # If a 2D ou 3D attention mask is provided for the cross-attention
<ide> # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
<del> if encoder_attention_mask.dim() == 3:
<del> encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
<del> if encoder_attention_mask.dim() == 2:
<del> encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
<add> if self.config.is_decoder:
<add> if encoder_attention_mask is None:
<add> encoder_attention_mask = torch.ones(input_shape, device=device)
<ide>
<del> encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
<del> encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
<add> if encoder_attention_mask.dim() == 3:
<add> encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
<add> if encoder_attention_mask.dim() == 2:
<add> encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
<add>
<add> encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
<add> encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
<add> else:
<add> encoder_extended_attention_mask = None
<ide>
<ide> # Prepare head mask if needed
<ide> # 1.0 in head_mask indicate we keep the head | 1 |
Ruby | Ruby | fix circular import warning on build | 179b9f6a25705a456ae5544948a914adba60421e | <ide><path>activerecord/lib/active_record/schema_migration.rb
<ide> require 'active_record/scoping/default'
<ide> require 'active_record/scoping/named'
<del>require 'active_record/base'
<ide>
<ide> module ActiveRecord
<ide> class SchemaMigration < ActiveRecord::Base | 1 |
Ruby | Ruby | encourage usage of the new clt4xcode | f37e22838cb0df651f50dd063135d975f7d30e3f | <ide><path>install_homebrew.rb
<ide> def macos_version
<ide> end
<ide>
<ide> warn "/usr/local/bin is not in your PATH." unless ENV['PATH'].split(':').include? '/usr/local/bin'
<del>warn "Now install Xcode: http://developer.apple.com/technologies/xcode.html" unless Kernel.system "/usr/bin/which -s gcc"
<add>
<add>if macos_version < 10.7
<add> warn "Now install Xcode: http://developer.apple.com/technologies/xcode.html" unless Kernel.system "/usr/bin/which -s gcc"
<add>else
<add> warn "Install \"Command Line Tools for Xcode\": http://developer.apple.com/downloads" unless File.file? "/usr/bin/xcrun"
<add>end
<ide>
<ide> unless badlibs.empty?
<ide> warn "The following *evil* dylibs exist in /usr/local/lib" | 1 |
Text | Text | fix typo on pull request template | d2b4cfb3873134d04f1e89995475f4c6b38dbac4 | <ide><path>.github/PULL_REQUEST_TEMPLATE.md
<ide> <!--
<del>Please only send a pull request to branches which are currently supported: https://laravel.com/docs/releases#support-policy
<add>Please only send a pull request to branches that are currently supported: https://laravel.com/docs/releases#support-policy
<ide>
<ide> If you are unsure which branch your pull request should be sent to, please read: https://laravel.com/docs/contributions#which-branch
<ide> | 1 |
Javascript | Javascript | cache new dataview to increase performance | 552d0ed3ffcb75d639ac334d95b19da4058babc1 | <ide><path>examples/js/loaders/EXRLoader.js
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide> return true;
<ide> }
<ide>
<del> function getChar(c, lc, inBuffer, inOffset) {
<del> c = (c << 8) | parseUint8(inBuffer, inOffset);
<add> function getChar(c, lc, inDataView, inOffset) {
<add> c = (c << 8) | parseUint8DataView(inDataView, inOffset);
<ide> lc += 8;
<ide>
<ide> return { c: c, lc: lc };
<ide> }
<ide>
<del> function getCode(po, rlc, c, lc, inBuffer, inOffset, outBuffer, outBufferOffset, outBufferEndOffset) {
<add> function getCode(po, rlc, c, lc, inDataView, inOffset, outBuffer, outBufferOffset, outBufferEndOffset) {
<ide> if (po == rlc) {
<ide> if (lc < 8) {
<del> var temp = getChar(c, lc, inBuffer, inOffset);
<add> var temp = getChar(c, lc, inDataView, inOffset);
<ide> c = temp.c;
<ide> lc = temp.lc;
<ide> }
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide> var M_OFFSET = 1 << (NBITS - 1);
<ide> var MOD_MASK = (1 << NBITS) - 1;
<ide>
<add> function UInt16(value) {
<add> return (value & 0xFFFF);
<add> };
<add>
<add> function Int16(value) {
<add> var ref = UInt16(value);
<add> return (ref > 0x7FFF) ? ref - 0x10000 : ref;
<add> };
<add>
<ide> function wdec14(l, h) {
<del> var ls = (new Int16Array([l]))[0];
<del> var hs = (new Int16Array([h]))[0];
<add> var ls = Int16(l);
<add> var hs = Int16(h);
<ide>
<ide> var hi = hs;
<ide> var ai = ls + (hi & 1) + (hi >> 1);
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide> var outBufferEndOffset = no;
<ide> var inOffsetEnd = parseInt(inOffset.value + (ni + 7) / 8);
<ide>
<add> var dataView = new DataView(inBuffer);
<add>
<ide> while (inOffset.value < inOffsetEnd) {
<del> var temp = getChar(c, lc, inBuffer, inOffset);
<add> var temp = getChar(c, lc, dataView, inOffset);
<ide> c = temp.c;
<ide> lc = temp.lc;
<ide>
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide>
<ide> if (pl.len) {
<ide> lc -= pl.len;
<del> var temp = getCode(pl.lit, rlc, c, lc, inBuffer, inOffset, outBuffer, outOffset, outBufferEndOffset);
<add> var temp = getCode(pl.lit, rlc, c, lc, dataView, inOffset, outBuffer, outOffset, outBufferEndOffset);
<ide> c = temp.c;
<ide> lc = temp.lc;
<ide> } else {
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide> var l = hufLength(encodingTable[pl.p[j]]);
<ide>
<ide> while (lc < l && inOffset.value < inOffsetEnd) {
<del> var temp = getChar(c, lc, inBuffer, inOffset);
<add> var temp = getChar(c, lc, dataView, inOffset);
<ide> c = temp.c;
<ide> lc = temp.lc;
<ide> }
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide> ((c >> (lc - l)) & ((1 << l) - 1))) {
<ide>
<ide> lc -= l;
<del> var temp = getCode(pl.p[j], rlc, c, lc, inBuffer, inOffset, outBuffer, outOffset, outBufferEndOffset);
<add> var temp = getCode(pl.p[j], rlc, c, lc, dataView, inOffset, outBuffer, outOffset, outBufferEndOffset);
<ide> c = temp.c;
<ide> lc = temp.lc;
<ide> break;
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide>
<ide> if (pl.len) {
<ide> lc -= pl.len;
<del> var temp = getCode(pl.lit, rlc, c, lc, inBuffer, inOffset, outBuffer, outOffset, outBufferEndOffset);
<add> var temp = getCode(pl.lit, rlc, c, lc, dataView, inOffset, outBuffer, outOffset, outBufferEndOffset);
<ide> c = temp.c;
<ide> lc = temp.lc;
<ide> } else {
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide> hufBuildDecTable(freq, im, iM, hdec);
<ide>
<ide> hufDecode(freq, hdec, inBuffer, inOffset, nBits, iM, nRaw, outBuffer, outOffset);
<del> }
<add> }
<ide>
<del> function applyLut(lut, data, nData) {
<del> for (var i = 0; i < nData; ++i) {
<del> data[i] = lut[data[i]];
<del> }
<add> function applyLut(lut, data, nData) {
<add> for (var i = 0; i < nData; ++i) {
<add> data[i] = lut[data[i]];
<ide> }
<add> }
<ide>
<ide> function decompressPIZ(outBuffer, outOffset, inBuffer, inOffset, tmpBufSize, num_channels, exrChannelInfos, dataWidth, num_lines) {
<ide> var bitmap = new Uint8Array(BITMAP_SIZE);
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide>
<ide> }
<ide>
<add> function parseUint8DataView( dataView, offset ) {
<add>
<add> var Uint8 = dataView.getUint8(offset.value, true);
<add>
<add> offset.value = offset.value + 1;
<add>
<add> return Uint8;
<add> }
<add>
<ide> function parseUint8( buffer, offset ) {
<ide>
<ide> var Uint8 = new DataView( buffer.slice( offset.value, offset.value + 1 ) ).getUint8( 0, true );
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide>
<ide> for ( var channelID = 0; channelID < EXRHeader.channels.length; channelID ++ ) {
<ide>
<add> var cOff = channelOffsets[ EXRHeader.channels[ channelID ].name ];
<add>
<ide> if ( EXRHeader.channels[ channelID ].pixelType == 1 ) {
<ide>
<ide> // HALF
<ide> for ( var x = 0; x < width; x ++ ) {
<ide>
<ide> var val = parseFloat16( buffer, offset );
<del> var cOff = channelOffsets[ EXRHeader.channels[ channelID ].name ];
<ide>
<ide> byteArray[ ( ( ( width - y_scanline ) * ( height * numChannels ) ) + ( x * numChannels ) ) + cOff ] = val;
<ide>
<ide> THREE.EXRLoader.prototype._parser = function ( buffer ) {
<ide>
<ide> for ( var channelID = 0; channelID < EXRHeader.channels.length; channelID ++ ) {
<ide>
<add> var cOff = channelOffsets[ EXRHeader.channels[ channelID ].name ];
<add>
<ide> if ( EXRHeader.channels[ channelID ].pixelType == 1 ) {
<ide>
<ide> // HALF
<ide> for ( var x = 0; x < width; x ++ ) {
<ide>
<del> var cOff = channelOffsets[ EXRHeader.channels[ channelID ].name ];
<ide> var val = decodeFloat16(tmpBuffer[ (channelID * (scanlineBlockSize * width)) + (line_y * width) + x ]);
<ide>
<ide> var true_y = line_y + (scanlineBlockIdx * scanlineBlockSize); | 1 |
Javascript | Javascript | add some tests to improve coverage | 774493e23d1e3ee99f7d865e7852baf73bd9fb44 | <ide><path>test/matchers.js
<ide> function toBeCloseToPixel() {
<ide> };
<ide> }
<ide>
<add>function toBeCloseToPoint() {
<add> function rnd(v) {
<add> return Math.round(v * 100) / 100;
<add> }
<add> return {
<add> compare: function(actual, expected) {
<add> return {
<add> pass: rnd(actual.x) === rnd(expected.x) && rnd(actual.y) === rnd(expected.y)
<add> };
<add> }
<add> };
<add>}
<add>
<ide> function toEqualOneOf() {
<ide> return {
<ide> compare: function(actual, expecteds) {
<ide> function toEqualImageData() {
<ide> }
<ide>
<ide> export default {
<del> toBeCloseToPixel: toBeCloseToPixel,
<del> toEqualOneOf: toEqualOneOf,
<del> toBeValidChart: toBeValidChart,
<del> toBeChartOfSize: toBeChartOfSize,
<del> toEqualImageData: toEqualImageData
<add> toBeCloseToPixel,
<add> toBeCloseToPoint,
<add> toEqualOneOf,
<add> toBeValidChart,
<add> toBeChartOfSize,
<add> toEqualImageData
<ide> };
<ide><path>test/specs/core.helpers.tests.js
<ide> describe('Core helper tests', function() {
<ide> expect(helpers.uid()).toBe(uid + 2);
<ide> expect(helpers.uid()).toBe(uid + 3);
<ide> });
<del>
<del> describe('Color helper', function() {
<del> function isColorInstance(obj) {
<del> return typeof obj === 'object' && obj.valid;
<del> }
<del>
<del> it('should return a color when called with a color', function() {
<del> expect(isColorInstance(helpers.color('rgb(1, 2, 3)'))).toBe(true);
<del> });
<del> });
<del>
<del> describe('Background hover color helper', function() {
<del> it('should return a CanvasPattern when called with a CanvasPattern', function(done) {
<del> var dots = new Image();
<del> dots.src = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAMAAAAolt3jAAAAD1BMVEUAAAD///////////////+PQt5oAAAABXRSTlMAHlFhZsfk/BEAAAAqSURBVHgBY2BgZGJmYmSAAUYWEIDzmcBcJhiXGcxlRpPFrhdmMiqgvX0AcGIBEUAo6UAAAAAASUVORK5CYII=';
<del> dots.onload = function() {
<del> var chartContext = document.createElement('canvas').getContext('2d');
<del> var patternCanvas = document.createElement('canvas');
<del> var patternContext = patternCanvas.getContext('2d');
<del> var pattern = patternContext.createPattern(dots, 'repeat');
<del> patternContext.fillStyle = pattern;
<del>
<del> var backgroundColor = helpers.getHoverColor(chartContext.createPattern(patternCanvas, 'repeat'));
<del>
<del> expect(backgroundColor instanceof CanvasPattern).toBe(true);
<del>
<del> done();
<del> };
<del> });
<del>
<del> it('should return a CanvasGradient when called with a CanvasGradient', function() {
<del> var context = document.createElement('canvas').getContext('2d');
<del> var gradient = context.createLinearGradient(0, 1, 2, 3);
<del>
<del> expect(helpers.getHoverColor(gradient) instanceof CanvasGradient).toBe(true);
<del> });
<del>
<del> it('should return a modified version of color when called with a color', function() {
<del> var originalColorRGB = 'rgb(70, 191, 189)';
<del>
<del> expect(helpers.getHoverColor('#46BFBD')).not.toEqual(originalColorRGB);
<del> });
<del> });
<ide> });
<ide><path>test/specs/helpers.collection.tests.js
<add>import {_lookup, _lookupByKey, _rlookupByKey} from '../../src/helpers/helpers.collection';
<add>
<add>describe('helpers.interpolation', function() {
<add> it('Should do binary search', function() {
<add> const data = [0, 2, 6, 9];
<add> expect(_lookup(data, 0)).toEqual({lo: 0, hi: 1});
<add> expect(_lookup(data, 1)).toEqual({lo: 0, hi: 1});
<add> expect(_lookup(data, 3)).toEqual({lo: 1, hi: 2});
<add> expect(_lookup(data, 6)).toEqual({lo: 1, hi: 2});
<add> expect(_lookup(data, 9)).toEqual({lo: 2, hi: 3});
<add> });
<add>
<add> it('Should do binary search by key', function() {
<add> const data = [{x: 0}, {x: 2}, {x: 6}, {x: 9}];
<add> expect(_lookupByKey(data, 'x', 0)).toEqual({lo: 0, hi: 1});
<add> expect(_lookupByKey(data, 'x', 1)).toEqual({lo: 0, hi: 1});
<add> expect(_lookupByKey(data, 'x', 3)).toEqual({lo: 1, hi: 2});
<add> expect(_lookupByKey(data, 'x', 6)).toEqual({lo: 1, hi: 2});
<add> expect(_lookupByKey(data, 'x', 9)).toEqual({lo: 2, hi: 3});
<add> });
<add>
<add> it('Should do reverse binary search by key', function() {
<add> const data = [{x: 10}, {x: 7}, {x: 3}, {x: 0}];
<add> expect(_rlookupByKey(data, 'x', 0)).toEqual({lo: 2, hi: 3});
<add> expect(_rlookupByKey(data, 'x', 3)).toEqual({lo: 2, hi: 3});
<add> expect(_rlookupByKey(data, 'x', 5)).toEqual({lo: 1, hi: 2});
<add> expect(_rlookupByKey(data, 'x', 8)).toEqual({lo: 0, hi: 1});
<add> expect(_rlookupByKey(data, 'x', 10)).toEqual({lo: 0, hi: 1});
<add> });
<add>});
<ide><path>test/specs/helpers.color.tests.js
<add>import {color, getHoverColor} from '../../src/helpers/helpers.color';
<add>
<add>describe('Color helper', function() {
<add> function isColorInstance(obj) {
<add> return typeof obj === 'object' && obj.valid;
<add> }
<add>
<add> it('should return a color when called with a color', function() {
<add> expect(isColorInstance(color('rgb(1, 2, 3)'))).toBe(true);
<add> });
<add>});
<add>
<add>describe('Background hover color helper', function() {
<add> it('should return a CanvasPattern when called with a CanvasPattern', function(done) {
<add> var dots = new Image();
<add> dots.src = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAMAAAAolt3jAAAAD1BMVEUAAAD///////////////+PQt5oAAAABXRSTlMAHlFhZsfk/BEAAAAqSURBVHgBY2BgZGJmYmSAAUYWEIDzmcBcJhiXGcxlRpPFrhdmMiqgvX0AcGIBEUAo6UAAAAAASUVORK5CYII=';
<add> dots.onload = function() {
<add> var chartContext = document.createElement('canvas').getContext('2d');
<add> var patternCanvas = document.createElement('canvas');
<add> var patternContext = patternCanvas.getContext('2d');
<add> var pattern = patternContext.createPattern(dots, 'repeat');
<add> patternContext.fillStyle = pattern;
<add>
<add> var backgroundColor = getHoverColor(chartContext.createPattern(patternCanvas, 'repeat'));
<add>
<add> expect(backgroundColor instanceof CanvasPattern).toBe(true);
<add>
<add> done();
<add> };
<add> });
<add>
<add> it('should return a CanvasGradient when called with a CanvasGradient', function() {
<add> var context = document.createElement('canvas').getContext('2d');
<add> var gradient = context.createLinearGradient(0, 1, 2, 3);
<add>
<add> expect(getHoverColor(gradient) instanceof CanvasGradient).toBe(true);
<add> });
<add>
<add> it('should return a modified version of color when called with a color', function() {
<add> var originalColorRGB = 'rgb(70, 191, 189)';
<add>
<add> expect(getHoverColor('#46BFBD')).not.toEqual(originalColorRGB);
<add> });
<add>});
<ide><path>test/specs/helpers.interpolation.tests.js
<add>import {_pointInLine, _steppedInterpolation, _bezierInterpolation} from '../../src/helpers/helpers.interpolation';
<add>
<add>describe('helpers.interpolation', function() {
<add> it('Should interpolate a point in line', function() {
<add> expect(_pointInLine({x: 10, y: 10}, {x: 20, y: 20}, 0)).toEqual({x: 10, y: 10});
<add> expect(_pointInLine({x: 10, y: 10}, {x: 20, y: 20}, 0.5)).toEqual({x: 15, y: 15});
<add> expect(_pointInLine({x: 10, y: 10}, {x: 20, y: 20}, 1)).toEqual({x: 20, y: 20});
<add> });
<add>
<add> it('Should intepolate a point in stepped line', function() {
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0, 'before')).toEqual({x: 10, y: 10});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0.4, 'before')).toEqual({x: 14, y: 20});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0.5, 'before')).toEqual({x: 15, y: 20});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 1, 'before')).toEqual({x: 20, y: 20});
<add>
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0, 'middle')).toEqual({x: 10, y: 10});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0.4, 'middle')).toEqual({x: 14, y: 10});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0.5, 'middle')).toEqual({x: 15, y: 20});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 1, 'middle')).toEqual({x: 20, y: 20});
<add>
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0, 'after')).toEqual({x: 10, y: 10});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0.4, 'after')).toEqual({x: 14, y: 10});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 0.5, 'after')).toEqual({x: 15, y: 10});
<add> expect(_steppedInterpolation({x: 10, y: 10}, {x: 20, y: 20}, 1, 'after')).toEqual({x: 20, y: 20});
<add> });
<add>
<add> it('Should interpolate a point in curve', function() {
<add> const pt1 = {x: 10, y: 10, controlPointNextX: 12, controlPointNextY: 12};
<add> const pt2 = {x: 20, y: 30, controlPointPreviousX: 18, controlPointPreviousY: 28};
<add>
<add> expect(_bezierInterpolation(pt1, pt2, 0)).toEqual({x: 10, y: 10});
<add> expect(_bezierInterpolation(pt1, pt2, 0.2)).toBeCloseToPoint({x: 11.616, y: 12.656});
<add> expect(_bezierInterpolation(pt1, pt2, 1)).toEqual({x: 20, y: 30});
<add> });
<add>}); | 5 |
PHP | PHP | fix redirectroute by stopping execution | 841e7aa560d27f14c6f381a3548c8d8c74e4a463 | <ide><path>lib/Cake/Network/CakeResponse.php
<ide> public function send() {
<ide> foreach ($this->_headers as $header => $value) {
<ide> $this->_sendHeader($header, $value);
<ide> }
<add>
<add> if (isset($this->_headers['Location'])) {
<add> $this->_stop();
<add> }
<ide> $this->_sendContent($this->_body);
<ide> }
<ide>
<ide> public function download($filename) {
<ide> public function __toString() {
<ide> return (string)$this->_body;
<ide> }
<add>
<add>/**
<add> * Stop execution of the current script. Wraps exit() making
<add> * testing easier.
<add> *
<add> * @param integer|string $status see http://php.net/exit for values
<add> * @return void
<add> */
<add> protected function _stop($status = 0) {
<add> exit($status);
<add> }
<ide> }
<ide><path>lib/Cake/Test/Case/Network/CakeResponseTest.php
<ide> public function testSendChangingContentType() {
<ide> *
<ide> */
<ide> public function testSendWithLocation() {
<del> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent'));
<add> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent', '_stop'));
<ide> $response->header('Location', 'http://www.example.com');
<ide> $response->expects($this->at(0))
<ide> ->method('_sendHeader')->with('HTTP/1.1 302 Found');
<ide><path>lib/Cake/Test/Case/Routing/Route/RedirectRouteTest.php
<ide> public function setUp() {
<ide> */
<ide> public function testParsing() {
<ide> $route = new RedirectRoute('/home', array('controller' => 'posts'));
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/home');
<ide> $this->assertEqual($route->response->header(), array('Location' => Router::url('/posts', true)));
<ide>
<ide> $route = new RedirectRoute('/home', array('controller' => 'posts', 'action' => 'index'));
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/home');
<ide> $this->assertEqual($route->response->header(), array('Location' => Router::url('/posts', true)));
<ide> $this->assertEqual($route->response->statusCode(), 301);
<ide>
<ide> $route = new RedirectRoute('/google', 'http://google.com');
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/google');
<ide> $this->assertEqual($route->response->header(), array('Location' => 'http://google.com'));
<ide>
<ide> $route = new RedirectRoute('/posts/*', array('controller' => 'posts', 'action' => 'view'), array('status' => 302));
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/posts/2');
<ide> $this->assertEqual($route->response->header(), array('Location' => Router::url('/posts/view', true)));
<ide> $this->assertEqual($route->response->statusCode(), 302);
<ide>
<ide> $route = new RedirectRoute('/posts/*', array('controller' => 'posts', 'action' => 'view'), array('persist' => true));
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/posts/2');
<ide> $this->assertEqual($route->response->header(), array('Location' => Router::url('/posts/view/2', true)));
<ide>
<ide> $route = new RedirectRoute('/posts/*', '/test', array('persist' => true));
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/posts/2');
<ide> $this->assertEqual($route->response->header(), array('Location' => Router::url('/test', true)));
<ide>
<ide> $route = new RedirectRoute('/my_controllers/:action/*', array('controller' => 'tags', 'action' => 'add'), array('persist' => true));
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/my_controllers/do_something/passme/named:param');
<ide> $this->assertEqual($route->response->header(), array('Location' => Router::url('/tags/add/passme/named:param', true)));
<ide>
<ide> $route = new RedirectRoute('/my_controllers/:action/*', array('controller' => 'tags', 'action' => 'add'));
<del> $route->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> $route->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $result = $route->parse('/my_controllers/do_something/passme/named:param');
<ide> $this->assertEqual($route->response->header(), array('Location' => Router::url('/tags/add', true)));
<ide> }
<ide><path>lib/Cake/Test/Case/Routing/RouterTest.php
<ide> public function testPatternOnAction() {
<ide> public function testRouteRedirection() {
<ide> Router::redirect('/blog', array('controller' => 'posts'), array('status' => 302));
<ide> $this->assertEqual(count(Router::$routes), 1);
<del> Router::$routes[0]->response = $this->getMock('CakeResponse', array('_sendHeader'));
<add> Router::$routes[0]->response = $this->getMock('CakeResponse', array('_sendHeader', '_stop'));
<ide> $this->assertEqual(Router::$routes[0]->options['status'], 302);
<ide>
<ide> Router::parse('/blog'); | 4 |
PHP | PHP | remove non primary key indexes | dccf9aca465630842804523e6104a4da8ceb132a | <ide><path>lib/Cake/Console/Command/UpgradeShell.php
<ide> public function fixtures() {
<ide> $this->out(__d('cake_console', 'Updating %s...', $file), 1, Shell::VERBOSE);
<ide> $content = $this->_processFixture(file_get_contents($file));
<ide> if (empty($this->params['dryRun'])) {
<del> file_put_contents($file, $content);
<add> // file_put_contents($file, $content);
<ide> }
<ide> }
<ide> }
<ide> protected function _processFixture($content) {
<ide> $constraints = [];
<ide> $out = [];
<ide> foreach ($data as $field => $properties) {
<del> // Move 'key' into a constraint
<add> // Move primary key into a constraint
<ide> if (isset($properties['key']) && $properties['key'] === 'primary') {
<ide> $constraints['primary'] = [
<ide> 'type' => 'primary',
<ide> 'columns' => [$field]
<ide> ];
<del> unset($properties['key']);
<ide> }
<add> unset($properties['key']);
<ide> if ($field !== 'indexes') {
<ide> $out[$field] = $properties;
<ide> } | 1 |
Javascript | Javascript | add dummy modules when running basic tests | f9af896bb8f4cb37b22d508443174c8edf40fc54 | <ide><path>test/data/testinit.js
<ide> this.loadTests = function() {
<ide>
<ide> // Get testSubproject from testrunner first
<ide> require( [ "data/testrunner.js" ], function() {
<del> var tests = []
<del> .concat( [
<del>
<add> var i = 0,
<add> tests = [
<ide> // A special module with basic tests, meant for
<ide> // not fully supported environments like Android 2.3,
<ide> // jsdom or PhantomJS. We run it everywhere, though,
<ide> // to make sure tests are not broken.
<del> //
<del> // Support: Android 2.3 only
<del> // When loading basic tests don't load any others to not
<del> // overload Android 2.3.
<del> "unit/basic.js"
<del> ] )
<del> .concat( basicTests ? [] : [
<add> "unit/basic.js",
<add>
<ide> "unit/core.js",
<ide> "unit/callbacks.js",
<ide> "unit/deferred.js",
<ide> this.loadTests = function() {
<ide> "unit/dimensions.js",
<ide> "unit/animation.js",
<ide> "unit/tween.js"
<del> ] );
<add> ];
<ide>
<ide> // Ensure load order (to preserve test numbers)
<ide> ( function loadDep() {
<del> var dep = tests.shift();
<add> var dep = tests[ i++ ];
<ide>
<ide> if ( dep ) {
<del> require( [ dep ], loadDep );
<add> if ( !basicTests || i === 1 ) {
<add> require( [ dep ], loadDep );
<add>
<add> // Support: Android 2.3 only
<add> // When running basic tests, replace other modules with dummies to avoid overloading
<add> // impaired clients.
<add> } else {
<add> QUnit.module( dep.replace( /^.*\/|\.js$/g, "" ) );
<add> loadDep();
<add> }
<ide>
<ide> } else {
<ide> QUnit.load(); | 1 |
Python | Python | change doc string for rpn_box_predictor_features | e186a9404587781310c2602c0677e912bffb5ad4 | <ide><path>research/object_detection/meta_architectures/faster_rcnn_meta_arch.py
<ide> def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):
<ide> Returns:
<ide> prediction_dict: a dictionary holding "raw" prediction tensors:
<ide> 1) rpn_box_predictor_features: A list of 4-D float32 tensor with shape
<del> [batch_size, height, width, depth] to be used for predicting proposal
<add> [batch_size, height_i, width_j, depth] to be used for predicting proposal
<ide> boxes and corresponding objectness scores.
<ide> 2) rpn_features_to_crop: A list of 4-D float32 tensor with shape
<ide> [batch_size, height, width, depth] representing image features to crop
<ide> def _predict_first_stage(self, preprocessed_inputs):
<ide> Returns:
<ide> prediction_dict: a dictionary holding "raw" prediction tensors:
<ide> 1) rpn_box_predictor_features: A 4-D float32/bfloat16 tensor with shape
<del> [batch_size, height, width, depth] to be used for predicting proposal
<add> [batch_size, height__i, width_j, depth] to be used for predicting proposal
<ide> boxes and corresponding objectness scores.
<ide> 2) rpn_features_to_crop: A list of 4-D float32/bfloat16 tensor with shape
<ide> [batch_size, height, width, depth] representing image features to crop | 1 |
Javascript | Javascript | add ngswiperight/left directives to ngmobile | 5e0f876c39099adb6a0300c429b8df1f6b544846 | <ide><path>Gruntfile.js
<ide> module.exports = function(grunt) {
<ide> dest: 'build/angular-mobile.js',
<ide> src: util.wrap([
<ide> 'src/ngMobile/mobile.js',
<del> 'src/ngMobile/directive/ngClick.js'
<add> 'src/ngMobile/directive/ngClick.js',
<add> 'src/ngMobile/directive/ngSwipe.js'
<ide> ], 'module')
<ide> },
<ide> mocks: {
<ide><path>angularFiles.js
<ide> angularFiles = {
<ide> 'src/ngMock/angular-mocks.js',
<ide> 'src/ngMobile/mobile.js',
<ide> 'src/ngMobile/directive/ngClick.js',
<add> 'src/ngMobile/directive/ngSwipe.js',
<add>
<ide> 'src/bootstrap/bootstrap.js'
<ide> ],
<ide>
<ide> angularFiles = {
<ide> 'src/ngResource/resource.js',
<ide> 'src/ngMobile/mobile.js',
<ide> 'src/ngMobile/directive/ngClick.js',
<add> 'src/ngMobile/directive/ngSwipe.js',
<ide> 'src/ngSanitize/sanitize.js',
<ide> 'src/ngSanitize/directive/ngBindHtml.js',
<ide> 'src/ngSanitize/filter/linky.js',
<ide><path>src/ngMobile/directive/ngClick.js
<ide> ngMobile.directive('ngClick', ['$parse', '$timeout', '$rootElement',
<ide>
<ide> // Actual linking function.
<ide> return function(scope, element, attr) {
<del> var expressionFn = $parse(attr.ngClick),
<add> var clickHandler = $parse(attr.ngClick),
<ide> tapping = false,
<ide> tapElement, // Used to blur the element after a tap.
<ide> startTime, // Used to check if the tap was held too long.
<ide> ngMobile.directive('ngClick', ['$parse', '$timeout', '$rootElement',
<ide>
<ide> scope.$apply(function() {
<ide> // TODO(braden): This is sending the touchend, not a tap or click. Is that kosher?
<del> expressionFn(scope, {$event: event});
<add> clickHandler(scope, {$event: event});
<ide> });
<ide> }
<ide> tapping = false;
<ide> ngMobile.directive('ngClick', ['$parse', '$timeout', '$rootElement',
<ide> // desktop as well, to allow more portable sites.
<ide> element.bind('click', function(event) {
<ide> scope.$apply(function() {
<del> expressionFn(scope, {$event: event});
<add> clickHandler(scope, {$event: event});
<ide> });
<ide> });
<ide> };
<ide><path>src/ngMobile/directive/ngSwipe.js
<add>'use strict';
<add>
<add>/**
<add> * @ngdoc directive
<add> * @name ngMobile.directive:ngSwipeLeft
<add> *
<add> * @description
<add> * Specify custom behavior when an element is swiped to the left on a touchscreen device.
<add> * A leftward swipe is a quick, right-to-left slide of the finger.
<add> * Though ngSwipeLeft is designed for touch-based devices, it will work with a mouse click and drag too.
<add> *
<add> * @element ANY
<add> * @param {expression} ngSwipeLeft {@link guide/expression Expression} to evaluate
<add> * upon left swipe. (Event object is available as `$event`)
<add> *
<add> * @example
<add> <doc:example>
<add> <doc:source>
<add> <div ng-show="!showActions" ng-swipe-left="showActions = true">
<add> Some list content, like an email in the inbox
<add> </div>
<add> <div ng-show="showActions" ng-swipe-right="showActions = false">
<add> <button ng-click="reply()">Reply</button>
<add> <button ng-click="delete()">Delete</button>
<add> </div>
<add> </doc:source>
<add> </doc:example>
<add> */
<add>
<add>/**
<add> * @ngdoc directive
<add> * @name ngMobile.directive:ngSwipeRight
<add> *
<add> * @description
<add> * Specify custom behavior when an element is swiped to the right on a touchscreen device.
<add> * A rightward swipe is a quick, left-to-right slide of the finger.
<add> * Though ngSwipeRight is designed for touch-based devices, it will work with a mouse click and drag too.
<add> *
<add> * @element ANY
<add> * @param {expression} ngSwipeRight {@link guide/expression Expression} to evaluate
<add> * upon right swipe. (Event object is available as `$event`)
<add> *
<add> * @example
<add> <doc:example>
<add> <doc:source>
<add> <div ng-show="!showActions" ng-swipe-left="showActions = true">
<add> Some list content, like an email in the inbox
<add> </div>
<add> <div ng-show="showActions" ng-swipe-right="showActions = false">
<add> <button ng-click="reply()">Reply</button>
<add> <button ng-click="delete()">Delete</button>
<add> </div>
<add> </doc:source>
<add> </doc:example>
<add> */
<add>
<add>function makeSwipeDirective(directiveName, direction) {
<add> ngMobile.directive(directiveName, ['$parse', function($parse) {
<add> // The maximum vertical delta for a swipe should be less than 75px.
<add> var MAX_VERTICAL_DISTANCE = 75;
<add> // Vertical distance should not be more than a fraction of the horizontal distance.
<add> var MAX_VERTICAL_RATIO = 0.3;
<add> // At least a 30px lateral motion is necessary for a swipe.
<add> var MIN_HORIZONTAL_DISTANCE = 30;
<add> // The total distance in any direction before we make the call on swipe vs. scroll.
<add> var MOVE_BUFFER_RADIUS = 10;
<add>
<add> function getCoordinates(event) {
<add> var touches = event.touches && event.touches.length ? event.touches : [event];
<add> var e = (event.changedTouches && event.changedTouches[0]) ||
<add> (event.originalEvent && event.originalEvent.changedTouches &&
<add> event.originalEvent.changedTouches[0]) ||
<add> touches[0].originalEvent || touches[0];
<add>
<add> return {
<add> x: e.clientX,
<add> y: e.clientY
<add> };
<add> }
<add>
<add> return function(scope, element, attr) {
<add> var swipeHandler = $parse(attr[directiveName]);
<add> var startCoords, valid;
<add> var totalX, totalY;
<add> var lastX, lastY;
<add>
<add> function validSwipe(event) {
<add> // Check that it's within the coordinates.
<add> // Absolute vertical distance must be within tolerances.
<add> // Horizontal distance, we take the current X - the starting X.
<add> // This is negative for leftward swipes and positive for rightward swipes.
<add> // After multiplying by the direction (-1 for left, +1 for right), legal swipes
<add> // (ie. same direction as the directive wants) will have a positive delta and
<add> // illegal ones a negative delta.
<add> // Therefore this delta must be positive, and larger than the minimum.
<add> if (!startCoords) return false;
<add> var coords = getCoordinates(event);
<add> var deltaY = Math.abs(coords.y - startCoords.y);
<add> var deltaX = (coords.x - startCoords.x) * direction;
<add> return valid && // Short circuit for already-invalidated swipes.
<add> deltaY < MAX_VERTICAL_DISTANCE &&
<add> deltaX > 0 &&
<add> deltaX > MIN_HORIZONTAL_DISTANCE &&
<add> deltaY / deltaX < MAX_VERTICAL_RATIO;
<add> }
<add>
<add> element.bind('touchstart mousedown', function(event) {
<add> startCoords = getCoordinates(event);
<add> valid = true;
<add> totalX = 0;
<add> totalY = 0;
<add> lastX = startCoords.x;
<add> lastY = startCoords.y;
<add> });
<add>
<add> element.bind('touchcancel', function(event) {
<add> valid = false;
<add> });
<add>
<add> element.bind('touchmove mousemove', function(event) {
<add> if (!valid) return;
<add>
<add> // Android will send a touchcancel if it thinks we're starting to scroll.
<add> // So when the total distance (+ or - or both) exceeds 10px in either direction,
<add> // we either:
<add> // - On totalX > totalY, we send preventDefault() and treat this as a swipe.
<add> // - On totalY > totalX, we let the browser handle it as a scroll.
<add>
<add> // Invalidate a touch while it's in progress if it strays too far away vertically.
<add> // We don't want a scroll down and back up while drifting sideways to be a swipe just
<add> // because you happened to end up vertically close in the end.
<add> if (!startCoords) return;
<add> var coords = getCoordinates(event);
<add>
<add> if (Math.abs(coords.y - startCoords.y) > MAX_VERTICAL_DISTANCE) {
<add> valid = false;
<add> return;
<add> }
<add>
<add> totalX += Math.abs(coords.x - lastX);
<add> totalY += Math.abs(coords.y - lastY);
<add>
<add> lastX = coords.x;
<add> lastY = coords.y;
<add>
<add> if (totalX < MOVE_BUFFER_RADIUS && totalY < MOVE_BUFFER_RADIUS) {
<add> return;
<add> }
<add>
<add> // One of totalX or totalY has exceeded the buffer, so decide on swipe vs. scroll.
<add> if (totalY > totalX) {
<add> valid = false;
<add> return;
<add> } else {
<add> event.preventDefault();
<add> }
<add> });
<add>
<add> element.bind('touchend mouseup', function(event) {
<add> if (validSwipe(event)) {
<add> // Prevent this swipe from bubbling up to any other elements with ngSwipes.
<add> event.stopPropagation();
<add> scope.$apply(function() {
<add> swipeHandler(scope, {$event:event});
<add> });
<add> }
<add> });
<add> };
<add> }]);
<add>}
<add>
<add>// Left is negative X-coordinate, right is positive.
<add>makeSwipeDirective('ngSwipeLeft', -1);
<add>makeSwipeDirective('ngSwipeRight', 1);
<add>
<ide><path>src/ngMobile/mobile.js
<ide> * @ngdoc overview
<ide> * @name ngMobile
<ide> * @description
<del> */
<del>
<del>/*
<del> * Touch events and other mobile helpers by Braden Shepherdson ([email protected])
<add> * Touch events and other mobile helpers.
<ide> * Based on jQuery Mobile touch event handling (jquerymobile.com)
<ide> */
<ide>
<del>// define ngSanitize module and register $sanitize service
<add>// define ngMobile module
<ide> var ngMobile = angular.module('ngMobile', []);
<ide>
<ide><path>test/ngMobile/directive/ngSwipeSpec.js
<add>'use strict';
<add>
<add>// Wrapper to abstract over using touch events or mouse events.
<add>var swipeTests = function(description, restrictBrowsers, startEvent, moveEvent, endEvent) {
<add> describe('ngSwipe with ' + description + ' events', function() {
<add> var element;
<add>
<add> if (restrictBrowsers) {
<add> // TODO(braden): Once we have other touch-friendly browsers on CI, allow them here.
<add> // Currently Firefox and IE refuse to fire touch events.
<add> var chrome = /chrome/.test(navigator.userAgent.toLowerCase());
<add> if (!chrome) {
<add> return;
<add> }
<add> }
<add>
<add> // Skip tests on IE < 9. These versions of IE don't support createEvent(), and so
<add> // we cannot control the (x,y) position of events.
<add> // It works fine in IE 8 under manual testing.
<add> var msie = +((/msie (\d+)/.exec(navigator.userAgent.toLowerCase()) || [])[1]);
<add> if (msie < 9) {
<add> return;
<add> }
<add>
<add> beforeEach(function() {
<add> module('ngMobile');
<add> });
<add>
<add> afterEach(function() {
<add> dealoc(element);
<add> });
<add>
<add> it('should swipe to the left', inject(function($rootScope, $compile) {
<add> element = $compile('<div ng-swipe-left="swiped = true"></div>')($rootScope);
<add> $rootScope.$digest();
<add> expect($rootScope.swiped).toBeUndefined();
<add>
<add> browserTrigger(element, startEvent, [], 100, 20);
<add> browserTrigger(element, endEvent, [], 20, 20);
<add> expect($rootScope.swiped).toBe(true);
<add> }));
<add>
<add> it('should swipe to the right', inject(function($rootScope, $compile) {
<add> element = $compile('<div ng-swipe-right="swiped = true"></div>')($rootScope);
<add> $rootScope.$digest();
<add> expect($rootScope.swiped).toBeUndefined();
<add>
<add> browserTrigger(element, startEvent, [], 20, 20);
<add> browserTrigger(element, endEvent, [], 90, 20);
<add> expect($rootScope.swiped).toBe(true);
<add> }));
<add>
<add> it('should not swipe if you move too far vertically', inject(function($rootScope, $compile, $rootElement) {
<add> element = $compile('<div ng-swipe-left="swiped = true"></div>')($rootScope);
<add> $rootElement.append(element);
<add> $rootScope.$digest();
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add>
<add> browserTrigger(element, startEvent, [], 90, 20);
<add> browserTrigger(element, moveEvent, [], 70, 200);
<add> browserTrigger(element, endEvent, [], 20, 20);
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add> }));
<add>
<add> it('should not swipe if you slide only a short distance', inject(function($rootScope, $compile, $rootElement) {
<add> element = $compile('<div ng-swipe-left="swiped = true"></div>')($rootScope);
<add> $rootElement.append(element);
<add> $rootScope.$digest();
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add>
<add> browserTrigger(element, startEvent, [], 90, 20);
<add> browserTrigger(element, endEvent, [], 80, 20);
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add> }));
<add>
<add> it('should not swipe if the swipe leaves the element', inject(function($rootScope, $compile, $rootElement) {
<add> element = $compile('<div ng-swipe-right="swiped = true"></div>')($rootScope);
<add> $rootElement.append(element);
<add> $rootScope.$digest();
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add>
<add> browserTrigger(element, startEvent, [], 20, 20);
<add> browserTrigger(element, moveEvent, [], 40, 20);
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add> }));
<add>
<add> it('should not swipe if the swipe starts outside the element', inject(function($rootScope, $compile, $rootElement) {
<add> element = $compile('<div ng-swipe-right="swiped = true"></div>')($rootScope);
<add> $rootElement.append(element);
<add> $rootScope.$digest();
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add>
<add> browserTrigger(element, moveEvent, [], 10, 20);
<add> browserTrigger(element, endEvent, [], 90, 20);
<add>
<add> expect($rootScope.swiped).toBeUndefined();
<add> }));
<add> });
<add>}
<add>
<add>swipeTests('touch', true /* restrictBrowers */, 'touchstart', 'touchmove', 'touchend');
<add>swipeTests('mouse', false /* restrictBrowers */, 'mousedown', 'mousemove', 'mouseup');
<add> | 6 |
Javascript | Javascript | add prefixes for literals | 43f1946c7a57b42cf1306c5857859cec9209edbc | <ide><path>src/evaluator.js
<ide> var PartialEvaluator = (function PartialEvaluatorClosure() {
<ide>
<ide> // (reserved partial commands for the lexer)
<ide> BM: null,
<del> BD: null
<add> BD: null,
<add> 'true': null,
<add> fa: null,
<add> fal: null,
<add> fals: null,
<add> 'false': null,
<add> nu: null,
<add> nul: null,
<add> 'null': null
<ide> };
<ide>
<ide> PartialEvaluator.prototype = {
<ide><path>test/unit/evaluator_spec.js
<ide> describe('evaluator', function() {
<ide> expect(result.argsArray[1].length).toEqual(1);
<ide> expect(result.argsArray[1][0]).toEqual(5);
<ide> });
<add>
<add> it('should handle glued operations and literals', function() {
<add> var evaluator = new PartialEvaluator(new XrefMock(), new HandlerMock(),
<add> 'prefix');
<add> var stream = new StringStream('trueifalserinulli');
<add> var result = evaluator.getOperatorList(stream, new ResourcesMock(), []);
<add>
<add> expect(!!result.fnArray && !!result.argsArray).toEqual(true);
<add> expect(result.fnArray.length).toEqual(3);
<add> expect(result.fnArray[0]).toEqual('setFlatness');
<add> expect(result.fnArray[1]).toEqual('setRenderingIntent');
<add> expect(result.fnArray[2]).toEqual('setFlatness');
<add> expect(result.argsArray.length).toEqual(3);
<add> expect(result.argsArray[0].length).toEqual(1);
<add> expect(result.argsArray[0][0]).toEqual(true);
<add> expect(result.argsArray[1].length).toEqual(1);
<add> expect(result.argsArray[1][0]).toEqual(false);
<add> });
<ide> });
<ide> });
<ide> | 2 |
Javascript | Javascript | add metro to paths to be babel-transformed | af661e4a6f6583b4c2b372a22d71f41470760d93 | <ide><path>setupBabel.js
<ide> const babelRegisterOnly = require('metro/src/babelRegisterOnly');
<ide> const escapeRegExp = require('lodash/escapeRegExp');
<ide> const path = require('path');
<ide>
<del>const BABEL_ENABLED_PATHS = ['local-cli'];
<add>const BABEL_ENABLED_PATHS = ['local-cli', 'metro'];
<ide>
<ide> /**
<ide> * We use absolute paths for matching only the top-level folders reliably. For
<ide> const BABEL_ENABLED_PATHS = ['local-cli'];
<ide> function buildRegExps(basePath, dirPaths) {
<ide> return dirPaths.map(
<ide> folderPath =>
<del> // Babel `only` option works with forward slashes in the RegExp so replace
<del> // backslashes for Windows.
<del> folderPath instanceof RegExp
<del> ? new RegExp(
<del> `^${escapeRegExp(
<del> path.resolve(basePath, '.').replace(/\\/g, '/')
<del> )}/${folderPath.source}`,
<del> folderPath.flags
<del> )
<del> : new RegExp(
<del> `^${escapeRegExp(
<del> path.resolve(basePath, folderPath).replace(/\\/g, '/')
<del> )}`
<del> )
<add> folderPath === 'metro'
<add> // metro uses flow (for example) which needs to be stripped out w/babel.
<add> // it'll resolve to .../metro/packages/metro/src/index.js we want root
<add> ? path.resolve(require.resolve('metro'), '..', '..', '..', '..')
<add> // Babel `only` option works with forward slashes in the RegExp so replace
<add> // backslashes for Windows.
<add> : folderPath instanceof RegExp
<add> ? new RegExp(
<add> `^${escapeRegExp(
<add> path.resolve(basePath, '.').replace(/\\/g, '/')
<add> )}/${folderPath.source}`,
<add> folderPath.flags
<add> )
<add> : new RegExp(
<add> `^${escapeRegExp(
<add> path.resolve(basePath, folderPath).replace(/\\/g, '/')
<add> )}`
<add> )
<ide> );
<ide> }
<ide> | 1 |
Ruby | Ruby | fix use of issues_url | 53b387987b54515c2eef7270dbceccb6361f4fc0 | <ide><path>Library/Homebrew/exceptions.rb
<ide> def dump
<ide> puts
<ide> onoe "#{formula.name} did not build"
<ide> puts "Logs: #{logs}" unless Dir["#{logs}/*"].empty?
<del> puts "Help: #{Tty.em}https://github.com/mxcl/homebrew/wiki/troubleshooting#{Tty.reset}"
<add> puts "Help: #{Tty.em}#{ISSUES_URL}#{Tty.reset}"
<ide> issues = GitHub.issues_for_formula(formula.name)
<ide> puts *issues.map{ |s| " #{Tty.em}#{s}#{Tty.reset}" } unless issues.empty?
<ide> end
<ide><path>Library/Homebrew/global.rb
<ide> module Homebrew extend self
<ide> end
<ide>
<ide> FORMULA_META_FILES = %w[README README.md ChangeLog CHANGES COPYING LICENSE LICENCE COPYRIGHT AUTHORS]
<del>ISSUES_URL = "https://github.com/mxcl/homebrew/wiki/bug-fixing-checklist"
<add>ISSUES_URL = "https://github.com/mxcl/homebrew/wiki/troubleshooting"
<ide>
<ide> unless ARGV.include? "--no-compat" or ENV['HOMEBREW_NO_COMPAT']
<ide> $:.unshift(File.expand_path("#{__FILE__}/../compat")) | 2 |
Ruby | Ruby | fix interplay of humanize and html_escape | 26698fb91d88dca0f860adcb80528d8d3f0f6285 | <ide><path>activesupport/lib/active_support/inflector/methods.rb
<ide> def humanize(lower_case_and_underscored_word, options = {})
<ide> result.gsub!(/([a-z\d]*)/i) { |match|
<ide> "#{inflections.acronyms[match] || match.downcase}"
<ide> }
<del> result.gsub!(/^\w/) { $&.upcase } if options.fetch(:capitalize, true)
<add> result.gsub!(/^\w/) { |match| match.upcase } if options.fetch(:capitalize, true)
<ide> result
<ide> end
<ide>
<ide><path>activesupport/test/core_ext/string_ext_test.rb
<ide> def test_humanize_without_capitalize
<ide> end
<ide> end
<ide>
<add> def test_humanize_with_html_escape
<add> assert_equal 'Hello', ERB::Util.html_escape("hello").humanize
<add> end
<add>
<ide> def test_ord
<ide> assert_equal 97, 'a'.ord
<ide> assert_equal 97, 'abc'.ord | 2 |
Python | Python | remove unused imports | 41037f0f077c033b77b23fd211ee78e3495005c1 | <ide><path>spacy/cli/convert.py
<ide> # coding: utf8
<del>from __future__ import unicode_literals, division, print_function
<add>from __future__ import unicode_literals
<ide>
<ide> import io
<del>from pathlib import Path, PurePosixPath
<add>from pathlib import Path
<ide>
<ide> from .converters import conllu2json
<ide> from .. import util
<ide><path>spacy/cli/converters/conllu2json.py
<ide> # coding: utf8
<del>from __future__ import unicode_literals, division, print_function
<add>from __future__ import unicode_literals
<ide>
<ide> import json
<ide> from ...gold import read_json_file, merge_sents | 2 |
Python | Python | clean tf bert | 4adbdce5ee4f784a56b9bf9252c3f6a007f9daca | <ide><path>src/transformers/modeling_tf_utils.py
<ide> logger = logging.get_logger(__name__)
<ide> tf_logger = tf.get_logger()
<ide>
<add>TFModelInputType = Union[
<add> List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor
<add>]
<add>
<ide>
<ide> class TFModelUtilsMixin:
<ide> """
<ide><path>src/transformers/models/albert/modeling_tf_albert.py
<ide>
<ide>
<ide> from dataclasses import dataclass
<del>from typing import Optional, Tuple
<add>from typing import Any, Dict, Optional, Tuple
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_r
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "max_position_embeddings": self.max_position_embeddings,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, position_ids):
<del> input_shape = shape_list(tensor=position_ids)
<add> def call(self, position_ids: tf.Tensor) -> tf.Tensor:
<add> input_shape = shape_list(position_ids)
<ide> position_embeddings = self.position_embeddings[: input_shape[1], :]
<ide>
<ide> return tf.broadcast_to(input=position_embeddings, shape=input_shape)
<ide> def __init__(self, config, **kwargs):
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
<del> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
<add> def call(
<add> self,
<add> input_ids: tf.Tensor,
<add> position_ids: tf.Tensor,
<add> token_type_ids: tf.Tensor,
<add> inputs_embeds: tf.Tensor,
<add> training: bool = False,
<add> ) -> tf.Tensor:
<ide> """
<ide> Applies embedding based on inputs tensor.
<ide>
<ide> def call(
<ide> return outputs
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> }
<ide> ]
<ide> )
<del> def serving(self, inputs):
<del> output = self.call(inputs)
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
<add> def serving(self, inputs: Dict[str, tf.Tensor]):
<add> output = self.call(input_ids=inputs)
<ide>
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/bert/modeling_tf_bert.py
<ide>
<ide> import warnings
<ide> from dataclasses import dataclass
<del>from typing import Optional, Tuple
<add>from typing import Any, Dict, Optional, Tuple, Union
<ide>
<add>import numpy as np
<ide> import tensorflow as tf
<ide>
<ide> from ...activations_tf import get_tf_activation
<ide> from ...modeling_tf_utils import (
<ide> TFCausalLanguageModelingLoss,
<ide> TFMaskedLanguageModelingLoss,
<add> TFModelInputType,
<ide> TFMultipleChoiceLoss,
<ide> TFNextSentencePredictionLoss,
<ide> TFPreTrainedModel,
<ide> class TFBertPreTrainingLoss:
<ide> computation.
<ide> """
<ide>
<del> def compute_loss(self, labels, logits):
<add> def compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
<ide> loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
<ide> from_logits=True, reduction=tf.keras.losses.Reduction.NONE
<ide> )
<ide> # make sure only labels that are not equal to -100
<ide> # are taken into account as loss
<del> masked_lm_active_loss = tf.not_equal(tf.reshape(labels["labels"], (-1,)), -100)
<add> masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100)
<ide> masked_lm_reduced_logits = tf.boolean_mask(
<del> tf.reshape(logits[0], (-1, shape_list(logits[0])[2])),
<del> masked_lm_active_loss,
<add> tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])),
<add> mask=masked_lm_active_loss,
<add> )
<add> masked_lm_labels = tf.boolean_mask(
<add> tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss
<add> )
<add> next_sentence_active_loss = tf.not_equal(tf.reshape(tensor=labels["next_sentence_label"], shape=(-1,)), -100)
<add> next_sentence_reduced_logits = tf.boolean_mask(
<add> tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=next_sentence_active_loss
<ide> )
<del> masked_lm_labels = tf.boolean_mask(tf.reshape(labels["labels"], (-1,)), masked_lm_active_loss)
<del> next_sentence_active_loss = tf.not_equal(tf.reshape(labels["next_sentence_label"], (-1,)), -100)
<del> next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits[1], (-1, 2)), next_sentence_active_loss)
<ide> next_sentence_label = tf.boolean_mask(
<del> tf.reshape(labels["next_sentence_label"], (-1,)), mask=next_sentence_active_loss
<add> tensor=tf.reshape(tensor=labels["next_sentence_label"], shape=(-1,)), mask=next_sentence_active_loss
<ide> )
<del> masked_lm_loss = loss_fn(masked_lm_labels, masked_lm_reduced_logits)
<del> next_sentence_loss = loss_fn(next_sentence_label, next_sentence_reduced_logits)
<del> masked_lm_loss = tf.reshape(masked_lm_loss, (-1, shape_list(next_sentence_loss)[0]))
<del> masked_lm_loss = tf.reduce_mean(masked_lm_loss, 0)
<add> masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits)
<add> next_sentence_loss = loss_fn(y_true=next_sentence_label, y_pred=next_sentence_reduced_logits)
<add> masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(next_sentence_loss)[0]))
<add> masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0)
<ide>
<ide> return masked_lm_loss + next_sentence_loss
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_r
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "max_position_embeddings": self.max_position_embeddings,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, position_ids):
<del> input_shape = shape_list(tensor=position_ids)
<add> def call(self, position_ids: tf.Tensor) -> tf.Tensor:
<add> input_shape = shape_list(position_ids)
<ide> position_embeddings = self.position_embeddings[: input_shape[1], :]
<ide>
<ide> return tf.broadcast_to(input=position_embeddings, shape=input_shape)
<ide> def call(self, position_ids):
<ide> class TFBertEmbeddings(tf.keras.layers.Layer):
<ide> """Construct the embeddings from word, position and token_type embeddings."""
<ide>
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.word_embeddings = TFBertWordEmbeddings(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
<add> def call(
<add> self,
<add> input_ids: tf.Tensor,
<add> position_ids: tf.Tensor,
<add> token_type_ids: tf.Tensor,
<add> inputs_embeds: tf.Tensor,
<add> training: bool = False,
<add> ) -> tf.Tensor:
<ide> """
<ide> Applies embedding based on inputs tensor.
<ide>
<ide> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em
<ide> assert not (input_ids is None and inputs_embeds is None)
<ide>
<ide> if input_ids is not None:
<del> inputs_embeds = self.word_embeddings(input_ids=input_ids)
<add> inputs_embeds = self.word_embeddings(input_ids)
<ide>
<ide> if token_type_ids is None:
<del> input_shape = shape_list(tensor=inputs_embeds)[:-1]
<add> input_shape = shape_list(inputs_embeds)[:-1]
<ide> token_type_ids = tf.fill(dims=input_shape, value=0)
<ide>
<ide> if position_ids is None:
<del> position_embeds = self.position_embeddings(position_ids=inputs_embeds)
<add> position_embeds = self.position_embeddings(inputs_embeds)
<ide> else:
<del> position_embeds = self.position_embeddings(position_ids=position_ids)
<add> position_embeds = self.position_embeddings(position_ids)
<ide>
<del> token_type_embeds = self.token_type_embeddings(token_type_ids=token_type_ids)
<add> token_type_embeds = self.token_type_embeddings(token_type_ids)
<ide> final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds])
<ide> final_embeddings = self.LayerNorm(inputs=final_embeddings)
<ide> final_embeddings = self.dropout(inputs=final_embeddings, training=training)
<ide> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em
<ide>
<ide>
<ide> class TFBertSelfAttention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="query",
<ide> )
<ide> self.key = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="key",
<ide> )
<ide> self.value = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="value",
<ide> )
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
<ide>
<del> def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> query_layer = self.query(inputs=hidden_states)
<ide> key_layer = self.key(inputs=hidden_states)
<ide> value_layer = self.value(inputs=hidden_states)
<ide>
<ide> # Take the dot product between "query" and "key" to get the raw
<ide> # attention scores.
<del> dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype)
<del> query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk))
<add> dk = tf.cast(self.attention_head_size, dtype=query_layer.dtype)
<add> query_layer = tf.multiply(query_layer, tf.math.rsqrt(dk))
<ide> attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer)
<ide>
<ide> if attention_mask is not None:
<ide> # Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
<del> attention_scores = attention_scores + attention_mask
<add> attention_scores = tf.add(attention_scores, attention_mask)
<ide>
<ide> # Normalize the attention scores to probabilities.
<ide> attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
<ide>
<ide> # This is actually dropping out entire tokens to attend to, which might
<ide> # seem a bit unusual, but is taken from the original Transformer paper.
<del> attention_probs = self.dropout(attention_probs, training=training)
<add> attention_probs = self.dropout(inputs=attention_probs, training=training)
<ide>
<ide> # Mask heads if we want to
<ide> if head_mask is not None:
<del> attention_scores = attention_scores * head_mask
<add> attention_scores = tf.multiply(attention_scores, head_mask)
<ide>
<ide> attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer)
<ide> outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
<ide> def call(self, hidden_states, attention_mask=None, head_mask=None, output_attent
<ide>
<ide>
<ide> class TFBertSelfOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abcd,cde->abe",
<ide> output_shape=(None, self.all_head_size),
<ide> bias_axes="e",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide>
<ide> class TFBertAttention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.self_attention = TFBertSelfAttention(config, name="self")
<ide> def __init__(self, config, **kwargs):
<ide> def prune_heads(self, heads):
<ide> raise NotImplementedError
<ide>
<del> def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> input_tensor: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> self_outputs = self.self_attention(
<del> input_tensor, attention_mask, head_mask, output_attentions, training=training
<add> hidden_states=input_tensor,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<add> )
<add> attention_output = self.dense_output(
<add> hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
<ide> )
<del> attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
<ide> outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide>
<ide> class TFBertIntermediate(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cd->abd",
<ide> output_shape=(None, config.intermediate_size),
<ide> bias_axes="d",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<del> self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act)
<add> self.intermediate_act_fn = get_tf_activation(config.hidden_act)
<ide> else:
<ide> self.intermediate_act_fn = config.hidden_act
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.intermediate_act_fn(hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<ide> class TFBertOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide>
<ide> class TFBertLayer(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.attention = TFBertAttention(config, name="attention")
<ide> self.intermediate = TFBertIntermediate(config, name="intermediate")
<ide> self.bert_output = TFBertOutput(config, name="output")
<ide>
<del> def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> attention_outputs = self.attention(
<del> hidden_states, attention_mask, head_mask, output_attentions, training=training
<add> input_tensor=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> attention_output = attention_outputs[0]
<del> intermediate_output = self.intermediate(attention_output)
<del> layer_output = self.bert_output(intermediate_output, attention_output, training=training)
<add> intermediate_output = self.intermediate(hidden_states=attention_output)
<add> layer_output = self.bert_output(
<add> hidden_states=intermediate_output, input_tensor=attention_output, training=training
<add> )
<ide> outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide>
<ide> class TFBertEncoder(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.layer = [TFBertLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
<ide>
<ide> def call(
<ide> self,
<del> hidden_states,
<del> attention_mask,
<del> head_mask,
<del> output_attentions,
<del> output_hidden_states,
<del> return_dict,
<del> training=False,
<del> ):
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> output_hidden_states: bool,
<add> return_dict: bool,
<add> training: bool = False,
<add> ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
<ide> all_hidden_states = () if output_hidden_states else None
<ide> all_attentions = () if output_attentions else None
<ide>
<ide> def call(
<ide> all_hidden_states = all_hidden_states + (hidden_states,)
<ide>
<ide> layer_outputs = layer_module(
<del> hidden_states, attention_mask, head_mask[i], output_attentions, training=training
<add> hidden_states=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask[i],
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> hidden_states = layer_outputs[0]
<ide>
<ide> def call(
<ide>
<ide>
<ide> class TFBertPooler(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size,
<add> units=config.hidden_size,
<ide> kernel_initializer=get_initializer(config.initializer_range),
<ide> activation="tanh",
<ide> name="dense",
<ide> )
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> # We "pool" the model by simply taking the hidden state corresponding
<ide> # to the first token.
<ide> first_token_tensor = hidden_states[:, 0]
<del> pooled_output = self.dense(first_token_tensor)
<add> pooled_output = self.dense(inputs=first_token_tensor)
<ide>
<ide> return pooled_output
<ide>
<ide>
<ide> class TFBertPredictionHeadTransform(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
<add> units=config.hidden_size,
<add> kernel_initializer=get_initializer(config.initializer_range),
<add> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<ide> def __init__(self, config, **kwargs):
<ide>
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide>
<del> def call(self, hidden_states):
<del> hidden_states = self.dense(hidden_states)
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<add> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.transform_act_fn(hidden_states)
<del> hidden_states = self.LayerNorm(hidden_states)
<add> hidden_states = self.LayerNorm(inputs=hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<ide> class TFBertLMPredictionHead(tf.keras.layers.Layer):
<del> def __init__(self, config, input_embeddings, **kwargs):
<add> def __init__(self, config: BertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.vocab_size = config.vocab_size
<ide> def __init__(self, config, input_embeddings, **kwargs):
<ide> # an output-only bias for each token.
<ide> self.input_embeddings = input_embeddings
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_output_embeddings(self):
<add> def get_output_embeddings(self) -> tf.keras.layers.Layer:
<ide> return self.input_embeddings
<ide>
<del> def set_output_embeddings(self, value):
<add> def set_output_embeddings(self, value: tf.Variable):
<ide> self.input_embeddings.weight = value
<ide> self.input_embeddings.vocab_size = shape_list(value)[0]
<ide>
<del> def get_bias(self):
<add> def get_bias(self) -> Dict[str, tf.Variable]:
<ide> return {"bias": self.bias}
<ide>
<del> def set_bias(self, value):
<add> def set_bias(self, value: tf.Variable):
<ide> self.bias = value["bias"]
<ide> self.vocab_size = shape_list(value["bias"])[0]
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.transform(hidden_states=hidden_states)
<del> seq_length = shape_list(tensor=hidden_states)[1]
<add> seq_length = shape_list(hidden_states)[1]
<ide> hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
<ide> hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
<ide> hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
<ide> def call(self, hidden_states):
<ide>
<ide>
<ide> class TFBertMLMHead(tf.keras.layers.Layer):
<del> def __init__(self, config, input_embeddings, **kwargs):
<add> def __init__(self, config: BertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.predictions = TFBertLMPredictionHead(config, input_embeddings, name="predictions")
<ide>
<del> def call(self, sequence_output):
<del> prediction_scores = self.predictions(sequence_output)
<add> def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
<add> prediction_scores = self.predictions(hidden_states=sequence_output)
<ide>
<ide> return prediction_scores
<ide>
<ide>
<ide> class TFBertNSPHead(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: BertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.seq_relationship = tf.keras.layers.Dense(
<del> 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship"
<add> units=2,
<add> kernel_initializer=get_initializer(config.initializer_range),
<add> name="seq_relationship",
<ide> )
<ide>
<del> def call(self, pooled_output):
<del> seq_relationship_score = self.seq_relationship(pooled_output)
<add> def call(self, pooled_output: tf.Tensor) -> tf.Tensor:
<add> seq_relationship_score = self.seq_relationship(inputs=pooled_output)
<ide>
<ide> return seq_relationship_score
<ide>
<ide> def call(self, pooled_output):
<ide> class TFBertMainLayer(tf.keras.layers.Layer):
<ide> config_class = BertConfig
<ide>
<del> def __init__(self, config, add_pooling_layer=True, **kwargs):
<add> def __init__(self, config: BertConfig, add_pooling_layer: bool = True, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.config = config
<ide> def __init__(self, config, add_pooling_layer=True, **kwargs):
<ide> self.encoder = TFBertEncoder(config, name="encoder")
<ide> self.pooler = TFBertPooler(config, name="pooler") if add_pooling_layer else None
<ide>
<del> def get_input_embeddings(self):
<add> def get_input_embeddings(self) -> tf.keras.layers.Layer:
<ide> return self.embeddings.word_embeddings
<ide>
<del> def set_input_embeddings(self, value):
<add> def set_input_embeddings(self, value: tf.Variable):
<ide> self.embeddings.word_embeddings.weight = value
<ide> self.embeddings.word_embeddings.vocab_size = shape_list(value)[0]
<ide>
<ide> class PreTrainedModel
<ide>
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> training: bool = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> config=self.config,
<ide> def call(
<ide> raise ValueError("You have to specify either input_ids or inputs_embeds")
<ide>
<ide> if inputs["attention_mask"] is None:
<del> inputs["attention_mask"] = tf.fill(input_shape, 1)
<add> inputs["attention_mask"] = tf.fill(dims=input_shape, value=1)
<ide>
<ide> if inputs["token_type_ids"] is None:
<del> inputs["token_type_ids"] = tf.fill(input_shape, 0)
<add> inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
<ide>
<ide> embedding_output = self.embeddings(
<del> inputs["input_ids"],
<del> inputs["position_ids"],
<del> inputs["token_type_ids"],
<del> inputs["inputs_embeds"],
<add> input_ids=inputs["input_ids"],
<add> position_ids=inputs["position_ids"],
<add> token_type_ids=inputs["token_type_ids"],
<add> inputs_embeds=inputs["inputs_embeds"],
<ide> training=inputs["training"],
<ide> )
<ide>
<ide> def call(
<ide> # positions we want to attend and -10000.0 for masked positions.
<ide> # Since we are adding it to the raw scores before the softmax, this is
<ide> # effectively the same as removing these entirely.
<del> extended_attention_mask = tf.cast(extended_attention_mask, embedding_output.dtype)
<del> extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
<add> extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
<add> extended_attention_mask = tf.multiply(tf.subtract(1.0, extended_attention_mask), -10000.0)
<ide>
<ide> # Prepare head mask if needed
<ide> # 1.0 in head_mask indicate we keep the head
<ide> def call(
<ide> inputs["head_mask"] = [None] * self.config.num_hidden_layers
<ide>
<ide> encoder_outputs = self.encoder(
<del> embedding_output,
<del> extended_attention_mask,
<del> inputs["head_mask"],
<del> inputs["output_attentions"],
<del> inputs["output_hidden_states"],
<del> inputs["return_dict"],
<add> hidden_states=embedding_output,
<add> attention_mask=extended_attention_mask,
<add> head_mask=inputs["head_mask"],
<add> output_attentions=inputs["output_attentions"],
<add> output_hidden_states=inputs["output_hidden_states"],
<add> return_dict=inputs["return_dict"],
<ide> training=inputs["training"],
<ide> )
<ide>
<ide> sequence_output = encoder_outputs[0]
<del> pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
<add> pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
<ide>
<ide> if not inputs["return_dict"]:
<ide> return (
<ide> class TFBertForPreTrainingOutput(ModelOutput):
<ide> loss: Optional[tf.Tensor] = None
<ide> prediction_logits: tf.Tensor = None
<ide> seq_relationship_logits: tf.Tensor = None
<del> hidden_states: Optional[Tuple[tf.Tensor]] = None
<del> attentions: Optional[Tuple[tf.Tensor]] = None
<add> hidden_states: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
<add> attentions: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
<ide>
<ide>
<ide> BERT_START_DOCSTRING = r"""
<ide> class TFBertForPreTrainingOutput(ModelOutput):
<ide>
<ide> BERT_INPUTS_DOCSTRING = r"""
<ide> Args:
<del> input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
<add> input_ids (:obj:`np.ndarray`, :obj:`tf.Tensor`, :obj:`List[tf.Tensor]` :obj:`Dict[str, tf.Tensor]` or :obj:`Dict[str, np.ndarray]` and each example must have the shape :obj:`({0})`):
<ide> Indices of input sequence tokens in the vocabulary.
<ide>
<ide> Indices can be obtained using :class:`~transformers.BertTokenizer`. See
<ide> :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
<ide> details.
<ide>
<ide> `What are input IDs? <../glossary.html#input-ids>`__
<del> attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<add> attention_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<ide> Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
<ide>
<ide> - 1 for tokens that are **not masked**,
<ide> - 0 for tokens that are **masked**.
<ide>
<ide> `What are attention masks? <../glossary.html#attention-mask>`__
<del> token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<add> token_type_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<ide> Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
<ide> 1]``:
<ide>
<ide> - 0 corresponds to a `sentence A` token,
<ide> - 1 corresponds to a `sentence B` token.
<ide>
<ide> `What are token type IDs? <../glossary.html#token-type-ids>`__
<del> position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<add> position_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<ide> Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
<ide> config.max_position_embeddings - 1]``.
<ide>
<ide> `What are position IDs? <../glossary.html#position-ids>`__
<del> head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
<add> head_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
<ide> Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
<ide>
<ide> - 1 indicates the head is **not masked**,
<ide> - 0 indicates the head is **masked**.
<ide>
<del> inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
<add> inputs_embeds (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
<ide> Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
<ide> This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
<ide> vectors than the model's internal embedding lookup matrix.
<ide> class TFBertForPreTrainingOutput(ModelOutput):
<ide> BERT_START_DOCSTRING,
<ide> )
<ide> class TFBertModel(TFBertPreTrainedModel):
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.bert = TFBertMainLayer(config, name="bert")
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> config=self.config,
<ide> def call(
<ide>
<ide> return outputs
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertForPreTraining(TFBertPreTrainedModel, TFBertPreTrainingLoss):
<ide> # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
<ide> _keys_to_ignore_on_load_unexpected = [r"cls.predictions.decoder.weight"]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.bert = TFBertMainLayer(config, name="bert")
<ide> self.nsp = TFBertNSPHead(config, name="nsp___cls")
<del> self.mlm = TFBertMLMHead(config, self.bert.embeddings.word_embeddings, name="mlm___cls")
<add> self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings.word_embeddings, name="mlm___cls")
<ide>
<del> def get_lm_head(self):
<add> def get_lm_head(self) -> tf.keras.layers.Layer:
<ide> return self.mlm.predictions
<ide>
<del> def get_prefix_bias_name(self):
<add> def get_prefix_bias_name(self) -> str:
<ide> warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
<ide> return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
<ide>
<ide> @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
<ide> @replace_return_docstrings(output_type=TFBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> next_sentence_label=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> next_sentence_label: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFBertForPreTrainingOutput, Tuple[tf.Tensor]]:
<ide> r"""
<ide> Return:
<ide>
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.bert(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output, pooled_output = outputs[:2]
<del> prediction_scores = self.mlm(sequence_output, training=inputs["training"])
<del> seq_relationship_score = self.nsp(pooled_output)
<add> prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"])
<add> seq_relationship_score = self.nsp(pooled_output=pooled_output)
<ide> total_loss = None
<ide>
<ide> if inputs["labels"] is not None and inputs["next_sentence_label"] is not None:
<ide> def call(
<ide> attentions=outputs.attentions,
<ide> )
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFBertForPreTrainingOutput) -> TFBertForPreTrainingOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss):
<ide> r"nsp___cls",
<ide> ]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> if config.is_decoder:
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide>
<ide> self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
<del> self.mlm = TFBertMLMHead(config, self.bert.embeddings.word_embeddings, name="mlm___cls")
<add> self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings.word_embeddings, name="mlm___cls")
<ide>
<del> def get_lm_head(self):
<add> def get_lm_head(self) -> tf.keras.layers.Layer:
<ide> return self.mlm.predictions
<ide>
<del> def get_prefix_bias_name(self):
<add> def get_prefix_bias_name(self) -> str:
<ide> warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
<ide> return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
<ide>
<ide> def get_prefix_bias_name(self):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<ide> Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
<ide> config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
<ide> (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.bert(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output = outputs[0]
<del> prediction_scores = self.mlm(sequence_output, training=inputs["training"])
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
<add> prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"])
<add> loss = (
<add> None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=prediction_scores)
<add> )
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (prediction_scores,) + outputs[2:]
<ide> def call(
<ide> attentions=outputs.attentions,
<ide> )
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss):
<ide> r"nsp___cls",
<ide> ]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> if not config.is_decoder:
<ide> logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`")
<ide>
<ide> self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
<del> self.mlm = TFBertMLMHead(config, self.bert.embeddings.word_embeddings, name="mlm___cls")
<add> self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings.word_embeddings, name="mlm___cls")
<ide>
<del> def get_lm_head(self):
<add> def get_lm_head(self) -> tf.keras.layers.Layer:
<ide> return self.mlm.predictions
<ide>
<del> def get_prefix_bias_name(self):
<add> def get_prefix_bias_name(self) -> str:
<ide> warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
<ide> return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
<ide>
<ide> def get_prefix_bias_name(self):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<ide> Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
<ide> config.vocab_size - 1]``.
<ide> """
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.bert(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output = outputs[0]
<del> logits = self.mlm(sequence_output, training=inputs["training"])
<add> logits = self.mlm(sequence_output=sequence_output, training=inputs["training"])
<ide> loss = None
<ide>
<ide> if inputs["labels"] is not None:
<ide> # shift labels to the left and cut last logit token
<ide> logits = logits[:, :-1]
<ide> labels = inputs["labels"][:, 1:]
<del> loss = self.compute_loss(labels, logits)
<add> loss = self.compute_loss(labels=labels, logits=logits)
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (logits,) + outputs[2:]
<ide> def call(
<ide> attentions=outputs.attentions,
<ide> )
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertForNextSentencePrediction(TFBertPreTrainedModel, TFNextSentencePredi
<ide> # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
<ide> _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"cls.predictions"]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.bert = TFBertMainLayer(config, name="bert")
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> next_sentence_label=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> next_sentence_label: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFNextSentencePredictorOutput, Tuple[tf.Tensor]]:
<ide> r"""
<ide> Return:
<ide>
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.bert(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> pooled_output = outputs[1]
<del> seq_relationship_scores = self.nsp(pooled_output)
<add> seq_relationship_scores = self.nsp(pooled_output=pooled_output)
<ide> next_sentence_loss = (
<ide> None
<ide> if inputs["next_sentence_label"] is None
<ide> def call(
<ide> attentions=outputs.attentions,
<ide> )
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFNextSentencePredictorOutput) -> TFNextSentencePredictorOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassific
<ide> _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
<ide> _keys_to_ignore_on_load_missing = [r"dropout"]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.num_labels = config.num_labels
<add>
<ide> self.bert = TFBertMainLayer(config, name="bert")
<del> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
<add> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide> self.classifier = tf.keras.layers.Dense(
<del> config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<add> units=config.num_labels,
<add> kernel_initializer=get_initializer(config.initializer_range),
<add> name="classifier",
<ide> )
<ide>
<ide> @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<ide> Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
<ide> config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
<ide> If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.bert(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> pooled_output = outputs[1]
<del> pooled_output = self.dropout(pooled_output, training=inputs["training"])
<del> logits = self.classifier(pooled_output)
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
<add> pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
<add> logits = self.classifier(inputs=pooled_output)
<add> loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (logits,) + outputs[2:]
<ide> def call(
<ide> attentions=outputs.attentions,
<ide> )
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss):
<ide> _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
<ide> _keys_to_ignore_on_load_missing = [r"dropout"]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.bert = TFBertMainLayer(config, name="bert")
<del> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
<add> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide> self.classifier = tf.keras.layers.Dense(
<del> 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<add> units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<ide> )
<ide>
<ide> @property
<del> def dummy_inputs(self):
<add> def dummy_inputs(self) -> Dict[str, tf.Tensor]:
<ide> """
<ide> Dummy inputs to build the network.
<ide>
<ide> def dummy_inputs(self):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<ide> Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
<ide> num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
<ide> :obj:`input_ids` above)
<ide> def call(
<ide> num_choices = shape_list(inputs["inputs_embeds"])[1]
<ide> seq_length = shape_list(inputs["inputs_embeds"])[2]
<ide>
<del> flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
<add> flat_input_ids = (
<add> tf.reshape(tensor=inputs["input_ids"], shape=(-1, seq_length)) if inputs["input_ids"] is not None else None
<add> )
<ide> flat_attention_mask = (
<del> tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
<add> tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length))
<add> if inputs["attention_mask"] is not None
<add> else None
<ide> )
<ide> flat_token_type_ids = (
<del> tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
<add> tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length))
<add> if inputs["token_type_ids"] is not None
<add> else None
<ide> )
<ide> flat_position_ids = (
<del> tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
<add> tf.reshape(tensor=inputs["position_ids"], shape=(-1, seq_length))
<add> if inputs["position_ids"] is not None
<add> else None
<ide> )
<ide> flat_inputs_embeds = (
<del> tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
<add> tf.reshape(tensor=inputs["inputs_embeds"], shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
<ide> if inputs["inputs_embeds"] is not None
<ide> else None
<ide> )
<ide> outputs = self.bert(
<del> flat_input_ids,
<del> flat_attention_mask,
<del> flat_token_type_ids,
<del> flat_position_ids,
<del> inputs["head_mask"],
<del> flat_inputs_embeds,
<del> inputs["output_attentions"],
<del> inputs["output_hidden_states"],
<add> input_ids=flat_input_ids,
<add> attention_mask=flat_attention_mask,
<add> token_type_ids=flat_token_type_ids,
<add> position_ids=flat_position_ids,
<add> head_mask=inputs["head_mask"],
<add> inputs_embeds=flat_inputs_embeds,
<add> output_attentions=inputs["output_attentions"],
<add> output_hidden_states=inputs["output_hidden_states"],
<ide> return_dict=inputs["return_dict"],
<ide> training=inputs["training"],
<ide> )
<ide> pooled_output = outputs[1]
<del> pooled_output = self.dropout(pooled_output, training=inputs["training"])
<del> logits = self.classifier(pooled_output)
<del> reshaped_logits = tf.reshape(logits, (-1, num_choices))
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
<add> pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
<add> logits = self.classifier(inputs=pooled_output)
<add> reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
<add> loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits)
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (reshaped_logits,) + outputs[2:]
<ide> def call(
<ide> }
<ide> ]
<ide> )
<del> def serving(self, inputs):
<del> output = self.call(inputs)
<add> def serving(self, inputs: Dict[str, tf.Tensor]):
<add> output = self.call(input_ids=inputs)
<ide>
<ide> return self.serving_output(output)
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationL
<ide> ]
<ide> _keys_to_ignore_on_load_missing = [r"dropout"]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.num_labels = config.num_labels
<add>
<ide> self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
<del> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
<add> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide> self.classifier = tf.keras.layers.Dense(
<del> config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<add> units=config.num_labels,
<add> kernel_initializer=get_initializer(config.initializer_range),
<add> name="classifier",
<ide> )
<ide>
<ide> @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<ide> Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
<ide> 1]``.
<ide> """
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.bert(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output = outputs[0]
<del> sequence_output = self.dropout(sequence_output, training=inputs["training"])
<del> logits = self.classifier(sequence_output)
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
<add> sequence_output = self.dropout(inputs=sequence_output, training=inputs["training"])
<add> logits = self.classifier(inputs=sequence_output)
<add> loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (logits,) + outputs[2:]
<ide> def call(
<ide> attentions=outputs.attentions,
<ide> )
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss)
<ide> r"cls.seq_relationship",
<ide> ]
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: BertConfig, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.num_labels = config.num_labels
<add>
<ide> self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
<ide> self.qa_outputs = tf.keras.layers.Dense(
<del> config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
<add> units=config.num_labels,
<add> kernel_initializer=get_initializer(config.initializer_range),
<add> name="qa_outputs",
<ide> )
<ide>
<ide> @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> start_positions=None,
<del> end_positions=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<add> start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<ide> Labels for position (index) of the start of the labelled span for computing the token classification loss.
<ide> Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
<ide> sequence are not taken into account for computing the loss.
<del> end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<add> end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<ide> Labels for position (index) of the end of the labelled span for computing the token classification loss.
<ide> Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
<ide> sequence are not taken into account for computing the loss.
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.bert(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output = outputs[0]
<del> logits = self.qa_outputs(sequence_output)
<del> start_logits, end_logits = tf.split(logits, 2, axis=-1)
<del> start_logits = tf.squeeze(start_logits, axis=-1)
<del> end_logits = tf.squeeze(end_logits, axis=-1)
<add> logits = self.qa_outputs(inputs=sequence_output)
<add> start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
<add> start_logits = tf.squeeze(input=start_logits, axis=-1)
<add> end_logits = tf.squeeze(input=end_logits, axis=-1)
<ide> loss = None
<ide>
<ide> if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
<ide> labels = {"start_position": inputs["start_positions"]}
<ide> labels["end_position"] = inputs["end_positions"]
<del> loss = self.compute_loss(labels, (start_logits, end_logits))
<add> loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (start_logits, end_logits) + outputs[2:]
<ide> def call(
<ide> attentions=outputs.attentions,
<ide> )
<ide>
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/ctrl/modeling_tf_ctrl.py
<ide> def get_seq_element(sequence_position, input_batch):
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/distilbert/modeling_tf_distilbert.py
<ide> """
<ide>
<ide> import warnings
<add>from typing import Any, Dict
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_r
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "max_position_embeddings": self.max_position_embeddings,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, position_ids):
<del> input_shape = shape_list(tensor=position_ids)
<add> def call(self, position_ids: tf.Tensor) -> tf.Tensor:
<add> input_shape = shape_list(position_ids)
<ide> position_embeddings = self.position_embeddings[: input_shape[1], :]
<ide>
<ide> return tf.broadcast_to(input=position_embeddings, shape=input_shape)
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def serving(self, inputs):
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/electra/modeling_tf_electra.py
<ide>
<ide> import warnings
<ide> from dataclasses import dataclass
<del>from typing import Optional, Tuple
<add>from typing import Any, Dict, Optional, Tuple, Union
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_r
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "max_position_embeddings": self.max_position_embeddings,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, position_ids):
<del> input_shape = shape_list(tensor=position_ids)
<add> def call(self, position_ids: tf.Tensor) -> tf.Tensor:
<add> input_shape = shape_list(position_ids)
<ide> position_embeddings = self.position_embeddings[: input_shape[1], :]
<ide>
<ide> return tf.broadcast_to(input=position_embeddings, shape=input_shape)
<ide>
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Electra
<ide> class TFElectraSelfAttention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="query",
<ide> )
<ide> self.key = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="key",
<ide> )
<ide> self.value = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="value",
<ide> )
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
<ide>
<del> def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> query_layer = self.query(inputs=hidden_states)
<ide> key_layer = self.key(inputs=hidden_states)
<ide> value_layer = self.value(inputs=hidden_states)
<ide>
<ide> # Take the dot product between "query" and "key" to get the raw
<ide> # attention scores.
<del> dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype)
<del> query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk))
<add> dk = tf.cast(self.attention_head_size, dtype=query_layer.dtype)
<add> query_layer = tf.multiply(query_layer, tf.math.rsqrt(dk))
<ide> attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer)
<ide>
<ide> if attention_mask is not None:
<ide> # Apply the attention mask is (precomputed for all layers in TFElectraModel call() function)
<del> attention_scores = attention_scores + attention_mask
<add> attention_scores = tf.add(attention_scores, attention_mask)
<ide>
<ide> # Normalize the attention scores to probabilities.
<ide> attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
<ide>
<ide> # This is actually dropping out entire tokens to attend to, which might
<ide> # seem a bit unusual, but is taken from the original Transformer paper.
<del> attention_probs = self.dropout(attention_probs, training=training)
<add> attention_probs = self.dropout(inputs=attention_probs, training=training)
<ide>
<ide> # Mask heads if we want to
<ide> if head_mask is not None:
<del> attention_scores = attention_scores * head_mask
<add> attention_scores = tf.multiply(attention_scores, head_mask)
<ide>
<ide> attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer)
<ide> outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
<ide>
<ide> return outputs
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Electra
<ide> class TFElectraSelfOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abcd,cde->abe",
<ide> output_shape=(None, self.all_head_size),
<ide> bias_axes="e",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide> # Copied from from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Electra
<ide> class TFElectraAttention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.self_attention = TFElectraSelfAttention(config, name="self")
<ide> def __init__(self, config, **kwargs):
<ide> def prune_heads(self, heads):
<ide> raise NotImplementedError
<ide>
<del> def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> input_tensor: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> self_outputs = self.self_attention(
<del> input_tensor, attention_mask, head_mask, output_attentions, training=training
<add> hidden_states=input_tensor,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<add> )
<add> attention_output = self.dense_output(
<add> hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
<ide> )
<del> attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
<ide> outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Electra
<ide> class TFElectraIntermediate(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cd->abd",
<ide> output_shape=(None, config.intermediate_size),
<ide> bias_axes="d",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<del> self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act)
<add> self.intermediate_act_fn = get_tf_activation(config.hidden_act)
<ide> else:
<ide> self.intermediate_act_fn = config.hidden_act
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.intermediate_act_fn(hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Electra
<ide> class TFElectraOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Electra
<ide> class TFElectraLayer(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.attention = TFElectraAttention(config, name="attention")
<ide> self.intermediate = TFElectraIntermediate(config, name="intermediate")
<ide> self.bert_output = TFElectraOutput(config, name="output")
<ide>
<del> def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> attention_outputs = self.attention(
<del> hidden_states, attention_mask, head_mask, output_attentions, training=training
<add> input_tensor=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> attention_output = attention_outputs[0]
<del> intermediate_output = self.intermediate(attention_output)
<del> layer_output = self.bert_output(intermediate_output, attention_output, training=training)
<add> intermediate_output = self.intermediate(hidden_states=attention_output)
<add> layer_output = self.bert_output(
<add> hidden_states=intermediate_output, input_tensor=attention_output, training=training
<add> )
<ide> outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Electra
<ide> class TFElectraEncoder(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.layer = [TFElectraLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
<ide>
<ide> def call(
<ide> self,
<del> hidden_states,
<del> attention_mask,
<del> head_mask,
<del> output_attentions,
<del> output_hidden_states,
<del> return_dict,
<del> training=False,
<del> ):
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> output_hidden_states: bool,
<add> return_dict: bool,
<add> training: bool = False,
<add> ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
<ide> all_hidden_states = () if output_hidden_states else None
<ide> all_attentions = () if output_attentions else None
<ide>
<ide> def call(
<ide> all_hidden_states = all_hidden_states + (hidden_states,)
<ide>
<ide> layer_outputs = layer_module(
<del> hidden_states, attention_mask, head_mask[i], output_attentions, training=training
<add> hidden_states=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask[i],
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> hidden_states = layer_outputs[0]
<ide>
<ide> def call(
<ide> )
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Electra
<ide> class TFElectraPooler(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: ElectraConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size,
<add> units=config.hidden_size,
<ide> kernel_initializer=get_initializer(config.initializer_range),
<ide> activation="tanh",
<ide> name="dense",
<ide> )
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> # We "pool" the model by simply taking the hidden state corresponding
<ide> # to the first token.
<ide> first_token_tensor = hidden_states[:, 0]
<del> pooled_output = self.dense(first_token_tensor)
<add> pooled_output = self.dense(inputs=first_token_tensor)
<ide>
<ide> return pooled_output
<ide>
<ide>
<add># Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->Electra
<ide> class TFElectraEmbeddings(tf.keras.layers.Layer):
<ide> """Construct the embeddings from word, position and token_type embeddings."""
<ide>
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings.call with Albert->Electra
<del> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
<add> def call(
<add> self,
<add> input_ids: tf.Tensor,
<add> position_ids: tf.Tensor,
<add> token_type_ids: tf.Tensor,
<add> inputs_embeds: tf.Tensor,
<add> training: bool = False,
<add> ) -> tf.Tensor:
<ide> """
<ide> Applies embedding based on inputs tensor.
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> }
<ide> ]
<ide> )
<del> def serving(self, inputs):
<del> output = self.call(inputs)
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
<add> def serving(self, inputs: Dict[str, tf.Tensor]):
<add> output = self.call(input_ids=inputs)
<ide>
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/funnel/modeling_tf_funnel.py
<ide>
<ide> import warnings
<ide> from dataclasses import dataclass
<del>from typing import Optional, Tuple
<add>from typing import Any, Dict, Optional, Tuple
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> }
<ide> ]
<ide> )
<del> def serving(self, inputs):
<del> output = self.call(inputs)
<add> def serving(self, inputs: Dict[str, tf.Tensor]):
<add> output = self.call(input_ids=inputs)
<ide>
<del> return self.serving_output(output)
<add> return self.serving_output(output=output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/longformer/modeling_tf_longformer.py
<ide>
<ide> import warnings
<ide> from dataclasses import dataclass
<del>from typing import Optional, Tuple
<add>from typing import Any, Dict, Optional, Tuple
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def build(self, input_shape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide> def call(self, position_ids):
<ide> flat_position_ids = tf.reshape(tensor=position_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.position_embeddings, indices=flat_position_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=position_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(position_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=position_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(position_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def create_position_ids_from_input_ids(self, input_ids):
<ide> tensor=input_ids, shape=(input_ids_shape[0] * input_ids_shape[1], input_ids_shape[2])
<ide> )
<ide>
<del> mask = tf.cast(x=tf.math.not_equal(x=input_ids, y=self.padding_idx), dtype=input_ids.dtype)
<del> incremental_indices = tf.math.cumsum(x=mask, axis=1) * mask
<add> mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
<add> incremental_indices = tf.math.cumsum(mask, axis=1) * mask
<ide>
<ide> return incremental_indices + self.padding_idx
<ide>
<ide> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em
<ide> return final_embeddings
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Longformer
<ide> class TFLongformerIntermediate(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: LongformerConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cd->abd",
<ide> output_shape=(None, config.intermediate_size),
<ide> bias_axes="d",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<del> self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act)
<add> self.intermediate_act_fn = get_tf_activation(config.hidden_act)
<ide> else:
<ide> self.intermediate_act_fn = config.hidden_act
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.intermediate_act_fn(hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Longformer
<ide> class TFLongformerOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: LongformerConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide>
<ide> return hidden_states
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Longformer
<ide> class TFLongformerPooler(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: LongformerConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size,
<add> units=config.hidden_size,
<ide> kernel_initializer=get_initializer(config.initializer_range),
<ide> activation="tanh",
<ide> name="dense",
<ide> )
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> # We "pool" the model by simply taking the hidden state corresponding
<ide> # to the first token.
<ide> first_token_tensor = hidden_states[:, 0]
<del> pooled_output = self.dense(first_token_tensor)
<add> pooled_output = self.dense(inputs=first_token_tensor)
<ide>
<ide> return pooled_output
<ide>
<ide><path>src/transformers/models/lxmert/modeling_tf_lxmert.py
<ide>
<ide> import warnings
<ide> from dataclasses import dataclass
<del>from typing import Dict, Optional, Tuple
<add>from typing import Any, Dict, Optional, Tuple
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_r
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "max_position_embeddings": self.max_position_embeddings,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, position_ids):
<del> input_shape = shape_list(tensor=position_ids)
<add> def call(self, position_ids: tf.Tensor) -> tf.Tensor:
<add> input_shape = shape_list(position_ids)
<ide> position_embeddings = self.position_embeddings[: input_shape[1], :]
<ide>
<ide> return tf.broadcast_to(input=position_embeddings, shape=input_shape)
<ide> def call(self, hidden_states):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert
<ide> class TFLxmertPredictionHeadTransform(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: LxmertConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
<add> units=config.hidden_size,
<add> kernel_initializer=get_initializer(config.initializer_range),
<add> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<ide> def __init__(self, config, **kwargs):
<ide>
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide>
<del> def call(self, hidden_states):
<del> hidden_states = self.dense(hidden_states)
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<add> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.transform_act_fn(hidden_states)
<del> hidden_states = self.LayerNorm(hidden_states)
<add> hidden_states = self.LayerNorm(inputs=hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert
<ide> class TFLxmertLMPredictionHead(tf.keras.layers.Layer):
<del> def __init__(self, config, input_embeddings, **kwargs):
<add> def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.vocab_size = config.vocab_size
<ide> def __init__(self, config, input_embeddings, **kwargs):
<ide> # an output-only bias for each token.
<ide> self.input_embeddings = input_embeddings
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_output_embeddings(self):
<add> def get_output_embeddings(self) -> tf.keras.layers.Layer:
<ide> return self.input_embeddings
<ide>
<del> def set_output_embeddings(self, value):
<add> def set_output_embeddings(self, value: tf.Variable):
<ide> self.input_embeddings.weight = value
<ide> self.input_embeddings.vocab_size = shape_list(value)[0]
<ide>
<del> def get_bias(self):
<add> def get_bias(self) -> Dict[str, tf.Variable]:
<ide> return {"bias": self.bias}
<ide>
<del> def set_bias(self, value):
<add> def set_bias(self, value: tf.Variable):
<ide> self.bias = value["bias"]
<ide> self.vocab_size = shape_list(value["bias"])[0]
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.transform(hidden_states=hidden_states)
<del> seq_length = shape_list(tensor=hidden_states)[1]
<add> seq_length = shape_list(hidden_states)[1]
<ide> hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
<ide> hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
<ide> hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
<ide> def call(self, hidden_states):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert
<ide> class TFLxmertMLMHead(tf.keras.layers.Layer):
<del> def __init__(self, config, input_embeddings, **kwargs):
<add> def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
<ide>
<del> def call(self, sequence_output):
<del> prediction_scores = self.predictions(sequence_output)
<add> def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
<add> prediction_scores = self.predictions(hidden_states=sequence_output)
<ide>
<ide> return prediction_scores
<ide>
<ide><path>src/transformers/models/mobilebert/modeling_tf_mobilebert.py
<ide>
<ide> import warnings
<ide> from dataclasses import dataclass
<del>from typing import Optional, Tuple
<add>from typing import Any, Dict, Optional, Tuple
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_r
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "max_position_embeddings": self.max_position_embeddings,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, position_ids):
<del> input_shape = shape_list(tensor=position_ids)
<add> def call(self, position_ids: tf.Tensor) -> tf.Tensor:
<add> input_shape = shape_list(position_ids)
<ide> position_embeddings = self.position_embeddings[: input_shape[1], :]
<ide>
<ide> return tf.broadcast_to(input=position_embeddings, shape=input_shape)
<ide> def call(
<ide> return outputs
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForNextSentencePrediction.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFNextSentencePredictorOutput) -> TFNextSentencePredictorOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> }
<ide> ]
<ide> )
<del> def serving(self, inputs):
<del> output = self.call(inputs)
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
<add> def serving(self, inputs: Dict[str, tf.Tensor]):
<add> output = self.call(input_ids=inputs)
<ide>
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/mpnet/modeling_tf_mpnet.py
<ide>
<ide> import math
<ide> import warnings
<add>from typing import Any, Dict
<ide>
<ide> import tensorflow as tf
<ide>
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def build(self, input_shape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide> def call(self, position_ids):
<ide> flat_position_ids = tf.reshape(tensor=position_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.position_embeddings, indices=flat_position_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=position_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(position_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=position_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(position_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def create_position_ids_from_input_ids(self, input_ids):
<ide> tensor=input_ids, shape=(input_ids_shape[0] * input_ids_shape[1], input_ids_shape[2])
<ide> )
<ide>
<del> mask = tf.cast(x=tf.math.not_equal(x=input_ids, y=self.padding_idx), dtype=input_ids.dtype)
<del> incremental_indices = tf.math.cumsum(x=mask, axis=1) * mask
<add> mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
<add> incremental_indices = tf.math.cumsum(mask, axis=1) * mask
<ide>
<ide> return incremental_indices + self.padding_idx
<ide>
<ide> def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=F
<ide> return final_embeddings
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->MPNet
<ide> class TFMPNetPooler(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: MPNetConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size,
<add> units=config.hidden_size,
<ide> kernel_initializer=get_initializer(config.initializer_range),
<ide> activation="tanh",
<ide> name="dense",
<ide> )
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> # We "pool" the model by simply taking the hidden state corresponding
<ide> # to the first token.
<ide> first_token_tensor = hidden_states[:, 0]
<del> pooled_output = self.dense(first_token_tensor)
<add> pooled_output = self.dense(inputs=first_token_tensor)
<ide>
<ide> return pooled_output
<ide>
<ide> def __init__(self, config, **kwargs):
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="q",
<ide> )
<ide> self.k = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="k",
<ide> )
<ide> self.v = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="v",
<ide> )
<ide> self.o = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abcd,cde->abe",
<ide> output_shape=(None, self.all_head_size),
<ide> bias_axes="e",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="o",
<ide> )
<ide> self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
<ide> def call(self, hidden_states, attention_mask, head_mask, output_attentions, posi
<ide> k = self.k(hidden_states)
<ide> v = self.v(hidden_states)
<ide>
<del> dk = tf.cast(x=self.attention_head_size, dtype=q.dtype)
<del> q = tf.multiply(x=q, y=tf.math.rsqrt(x=dk))
<add> dk = tf.cast(self.attention_head_size, dtype=q.dtype)
<add> q = tf.multiply(q, y=tf.math.rsqrt(dk))
<ide> attention_scores = tf.einsum("aecd,abcd->acbe", k, q)
<ide>
<ide> # Apply relative position embedding (precomputed in MPNetEncoder) if provided.
<ide> def call(self, input_tensor, attention_mask, head_mask, output_attentions, posit
<ide> return outputs
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->MPNet
<ide> class TFMPNetIntermediate(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: MPNetConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cd->abd",
<ide> output_shape=(None, config.intermediate_size),
<ide> bias_axes="d",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<del> self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act)
<add> self.intermediate_act_fn = get_tf_activation(config.hidden_act)
<ide> else:
<ide> self.intermediate_act_fn = config.hidden_act
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.intermediate_act_fn(hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->MPNet
<ide> class TFMPNetOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: MPNetConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def __init__(self, config, **kwargs):
<ide> self.embeddings = TFMPNetEmbeddings(config, name="embeddings")
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
<del> def get_input_embeddings(self):
<add> def get_input_embeddings(self) -> tf.keras.layers.Layer:
<ide> return self.embeddings.word_embeddings
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
<del> def set_input_embeddings(self, value):
<add> def set_input_embeddings(self, value: tf.Variable):
<ide> self.embeddings.word_embeddings.weight = value
<ide> self.embeddings.word_embeddings.vocab_size = shape_list(value)[0]
<ide>
<ide> def call(
<ide> return outputs
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def serving(self, inputs):
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/openai/modeling_tf_openai.py
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def get_seq_element(sequence_position, input_batch):
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/roberta/modeling_tf_roberta.py
<ide> """ TF 2.0 RoBERTa model. """
<ide>
<ide> import warnings
<add>from typing import Any, Dict, Optional, Tuple, Union
<ide>
<add>import numpy as np
<ide> import tensorflow as tf
<ide>
<ide> from ...activations_tf import get_tf_activation
<ide> )
<ide> from ...modeling_tf_utils import (
<ide> TFMaskedLanguageModelingLoss,
<add> TFModelInputType,
<ide> TFMultipleChoiceLoss,
<ide> TFPreTrainedModel,
<ide> TFQuestionAnsweringLoss,
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def build(self, input_shape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide> def call(self, position_ids):
<ide> flat_position_ids = tf.reshape(tensor=position_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.position_embeddings, indices=flat_position_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=position_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(position_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=position_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(position_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def create_position_ids_from_input_ids(self, input_ids):
<ide> tensor=input_ids, shape=(input_ids_shape[0] * input_ids_shape[1], input_ids_shape[2])
<ide> )
<ide>
<del> mask = tf.cast(x=tf.math.not_equal(x=input_ids, y=self.padding_idx), dtype=input_ids.dtype)
<del> incremental_indices = tf.math.cumsum(x=mask, axis=1) * mask
<add> mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
<add> incremental_indices = tf.math.cumsum(mask, axis=1) * mask
<ide>
<ide> return incremental_indices + self.padding_idx
<ide>
<ide> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em
<ide> return final_embeddings
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Roberta
<ide> class TFRobertaPooler(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size,
<add> units=config.hidden_size,
<ide> kernel_initializer=get_initializer(config.initializer_range),
<ide> activation="tanh",
<ide> name="dense",
<ide> )
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> # We "pool" the model by simply taking the hidden state corresponding
<ide> # to the first token.
<ide> first_token_tensor = hidden_states[:, 0]
<del> pooled_output = self.dense(first_token_tensor)
<add> pooled_output = self.dense(inputs=first_token_tensor)
<ide>
<ide> return pooled_output
<ide>
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Roberta
<ide> class TFRobertaSelfAttention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="query",
<ide> )
<ide> self.key = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="key",
<ide> )
<ide> self.value = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="value",
<ide> )
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
<ide>
<del> def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> query_layer = self.query(inputs=hidden_states)
<ide> key_layer = self.key(inputs=hidden_states)
<ide> value_layer = self.value(inputs=hidden_states)
<ide>
<ide> # Take the dot product between "query" and "key" to get the raw
<ide> # attention scores.
<del> dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype)
<del> query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk))
<add> dk = tf.cast(self.attention_head_size, dtype=query_layer.dtype)
<add> query_layer = tf.multiply(query_layer, tf.math.rsqrt(dk))
<ide> attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer)
<ide>
<ide> if attention_mask is not None:
<ide> # Apply the attention mask is (precomputed for all layers in TFRobertaModel call() function)
<del> attention_scores = attention_scores + attention_mask
<add> attention_scores = tf.add(attention_scores, attention_mask)
<ide>
<ide> # Normalize the attention scores to probabilities.
<ide> attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
<ide>
<ide> # This is actually dropping out entire tokens to attend to, which might
<ide> # seem a bit unusual, but is taken from the original Transformer paper.
<del> attention_probs = self.dropout(attention_probs, training=training)
<add> attention_probs = self.dropout(inputs=attention_probs, training=training)
<ide>
<ide> # Mask heads if we want to
<ide> if head_mask is not None:
<del> attention_scores = attention_scores * head_mask
<add> attention_scores = tf.multiply(attention_scores, head_mask)
<ide>
<ide> attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer)
<ide> outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
<ide>
<ide> return outputs
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Roberta
<ide> class TFRobertaSelfOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abcd,cde->abe",
<ide> output_shape=(None, self.all_head_size),
<ide> bias_axes="e",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Roberta
<ide> class TFRobertaAttention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.self_attention = TFRobertaSelfAttention(config, name="self")
<ide> def __init__(self, config, **kwargs):
<ide> def prune_heads(self, heads):
<ide> raise NotImplementedError
<ide>
<del> def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> input_tensor: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> self_outputs = self.self_attention(
<del> input_tensor, attention_mask, head_mask, output_attentions, training=training
<add> hidden_states=input_tensor,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<add> )
<add> attention_output = self.dense_output(
<add> hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
<ide> )
<del> attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
<ide> outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Roberta
<ide> class TFRobertaIntermediate(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cd->abd",
<ide> output_shape=(None, config.intermediate_size),
<ide> bias_axes="d",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<del> self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act)
<add> self.intermediate_act_fn = get_tf_activation(config.hidden_act)
<ide> else:
<ide> self.intermediate_act_fn = config.hidden_act
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.intermediate_act_fn(hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Roberta
<ide> class TFRobertaOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Roberta
<ide> class TFRobertaLayer(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.attention = TFRobertaAttention(config, name="attention")
<ide> self.intermediate = TFRobertaIntermediate(config, name="intermediate")
<ide> self.bert_output = TFRobertaOutput(config, name="output")
<ide>
<del> def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> attention_outputs = self.attention(
<del> hidden_states, attention_mask, head_mask, output_attentions, training=training
<add> input_tensor=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> attention_output = attention_outputs[0]
<del> intermediate_output = self.intermediate(attention_output)
<del> layer_output = self.bert_output(intermediate_output, attention_output, training=training)
<add> intermediate_output = self.intermediate(hidden_states=attention_output)
<add> layer_output = self.bert_output(
<add> hidden_states=intermediate_output, input_tensor=attention_output, training=training
<add> )
<ide> outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Roberta
<ide> class TFRobertaEncoder(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: RobertaConfig, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.layer = [TFRobertaLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
<ide>
<ide> def call(
<ide> self,
<del> hidden_states,
<del> attention_mask,
<del> head_mask,
<del> output_attentions,
<del> output_hidden_states,
<del> return_dict,
<del> training=False,
<del> ):
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> output_hidden_states: bool,
<add> return_dict: bool,
<add> training: bool = False,
<add> ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
<ide> all_hidden_states = () if output_hidden_states else None
<ide> all_attentions = () if output_attentions else None
<ide>
<ide> def call(
<ide> all_hidden_states = all_hidden_states + (hidden_states,)
<ide>
<ide> layer_outputs = layer_module(
<del> hidden_states, attention_mask, head_mask[i], output_attentions, training=training
<add> hidden_states=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask[i],
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> hidden_states = layer_outputs[0]
<ide>
<ide> def __init__(self, config, add_pooling_layer=True, **kwargs):
<ide> self.embeddings = TFRobertaEmbeddings(config, name="embeddings")
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
<del> def get_input_embeddings(self):
<add> def get_input_embeddings(self) -> tf.keras.layers.Layer:
<ide> return self.embeddings.word_embeddings
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
<del> def set_input_embeddings(self, value):
<add> def set_input_embeddings(self, value: tf.Variable):
<ide> self.embeddings.word_embeddings.weight = value
<ide> self.embeddings.word_embeddings.vocab_size = shape_list(value)[0]
<ide>
<ide> class PreTrainedModel
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> training: bool = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> config=self.config,
<ide> def call(
<ide> if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
<ide> raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
<ide> elif inputs["input_ids"] is not None:
<del> input_shape = shape_list(inputs["input_ids"])
<add> input_shape = shape_list(tensor=inputs["input_ids"])
<ide> elif inputs["inputs_embeds"] is not None:
<del> input_shape = shape_list(inputs["inputs_embeds"])[:-1]
<add> input_shape = shape_list(tensor=inputs["inputs_embeds"])[:-1]
<ide> else:
<ide> raise ValueError("You have to specify either input_ids or inputs_embeds")
<ide>
<ide> if inputs["attention_mask"] is None:
<del> inputs["attention_mask"] = tf.fill(input_shape, 1)
<add> inputs["attention_mask"] = tf.fill(dims=input_shape, value=1)
<ide>
<ide> if inputs["token_type_ids"] is None:
<del> inputs["token_type_ids"] = tf.fill(input_shape, 0)
<add> inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
<ide>
<ide> embedding_output = self.embeddings(
<del> inputs["input_ids"],
<del> inputs["position_ids"],
<del> inputs["token_type_ids"],
<del> inputs["inputs_embeds"],
<add> input_ids=inputs["input_ids"],
<add> position_ids=inputs["position_ids"],
<add> token_type_ids=inputs["token_type_ids"],
<add> inputs_embeds=inputs["inputs_embeds"],
<ide> training=inputs["training"],
<ide> )
<ide>
<ide> def call(
<ide> # positions we want to attend and -10000.0 for masked positions.
<ide> # Since we are adding it to the raw scores before the softmax, this is
<ide> # effectively the same as removing these entirely.
<del> extended_attention_mask = tf.cast(extended_attention_mask, embedding_output.dtype)
<del> extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
<add> extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
<add> extended_attention_mask = tf.multiply(tf.subtract(1.0, extended_attention_mask), -10000.0)
<ide>
<ide> # Prepare head mask if needed
<ide> # 1.0 in head_mask indicate we keep the head
<ide> def call(
<ide> if inputs["head_mask"] is not None:
<ide> raise NotImplementedError
<ide> else:
<del> inputs["head_mask"] = [None] * self.num_hidden_layers
<del> # head_mask = tf.constant([0] * self.num_hidden_layers)
<add> inputs["head_mask"] = [None] * self.config.num_hidden_layers
<ide>
<ide> encoder_outputs = self.encoder(
<del> embedding_output,
<del> extended_attention_mask,
<del> inputs["head_mask"],
<del> inputs["output_attentions"],
<del> inputs["output_hidden_states"],
<del> inputs["return_dict"],
<add> hidden_states=embedding_output,
<add> attention_mask=extended_attention_mask,
<add> head_mask=inputs["head_mask"],
<add> output_attentions=inputs["output_attentions"],
<add> output_hidden_states=inputs["output_hidden_states"],
<add> return_dict=inputs["return_dict"],
<ide> training=inputs["training"],
<ide> )
<ide>
<ide> sequence_output = encoder_outputs[0]
<del> pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
<add> pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
<ide>
<ide> if not inputs["return_dict"]:
<ide> return (
<ide> def call(
<ide> return outputs
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def serving(self, inputs):
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>src/transformers/models/xlm/modeling_tf_xlm.py
<ide> import itertools
<ide> import warnings
<ide> from dataclasses import dataclass
<del>from typing import Optional, Tuple
<add>from typing import Dict, Optional, Tuple
<ide>
<ide> import numpy as np
<ide> import tensorflow as tf
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> }
<ide> ]
<ide> )
<del> def serving(self, inputs):
<del> output = self.call(inputs)
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
<add> def serving(self, inputs: Dict[str, tf.Tensor]):
<add> output = self.call(input_ids=inputs)
<ide>
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide><path>templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py
<ide>
<ide> {% if cookiecutter.is_encoder_decoder_model == "False" %}
<ide>
<add>from typing import Any, Dict, Optional, Tuple, Union
<ide>
<add>import numpy as np
<ide> import tensorflow as tf
<del>from tensorflow.keras import layers
<del>
<del>from transformers.modeling_tf_outputs import TFCausalLMOutput
<ide>
<ide> from ...activations_tf import get_tf_activation
<ide> from ...file_utils import (
<ide> from ...modeling_tf_outputs import (
<ide> TFBaseModelOutput,
<ide> TFBaseModelOutputWithPooling,
<add> TFCausalLMOutput,
<ide> TFMaskedLMOutput,
<ide> TFMultipleChoiceModelOutput,
<ide> TFQuestionAnsweringModelOutput,
<ide> from ...modeling_tf_utils import (
<ide> TFCausalLanguageModelingLoss,
<ide> TFMaskedLanguageModelingLoss,
<add> TFModelInputType,
<ide> TFMultipleChoiceLoss,
<ide> TFPreTrainedModel,
<ide> TFQuestionAnsweringLoss,
<ide> def __init__(self, vocab_size: int, hidden_size: int, initializer_range: float,
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.weight = self.add_weight(
<ide> name="weight",
<ide> shape=[self.vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "vocab_size": self.vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, input_ids):
<add> def call(self, input_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_input_ids = tf.reshape(tensor=input_ids, shape=[-1])
<ide> embeddings = tf.gather(params=self.weight, indices=flat_input_ids)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=input_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(input_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=input_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(input_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, type_vocab_size: int, hidden_size: int, initializer_range: fl
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.token_type_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.type_vocab_size, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<del> super().build(input_shape=input_shape)
<add> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "type_vocab_size": self.type_vocab_size,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, token_type_ids):
<add> def call(self, token_type_ids: tf.Tensor) -> tf.Tensor:
<ide> flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=[-1])
<ide> one_hot_data = tf.one_hot(indices=flat_token_type_ids, depth=self.type_vocab_size, dtype=self._compute_dtype)
<ide> embeddings = tf.matmul(a=one_hot_data, b=self.token_type_embeddings)
<ide> embeddings = tf.reshape(
<del> tensor=embeddings, shape=tf.concat(values=[shape_list(tensor=token_type_ids), [self.hidden_size]], axis=0)
<add> tensor=embeddings, shape=tf.concat(values=[shape_list(token_type_ids), [self.hidden_size]], axis=0)
<ide> )
<ide>
<del> embeddings.set_shape(shape=token_type_ids.shape.as_list() + [self.hidden_size])
<add> embeddings.set_shape(token_type_ids.shape.as_list() + [self.hidden_size])
<ide>
<ide> return embeddings
<ide>
<ide> def __init__(self, max_position_embeddings: int, hidden_size: int, initializer_r
<ide> self.hidden_size = hidden_size
<ide> self.initializer_range = initializer_range
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.position_embeddings = self.add_weight(
<ide> name="embeddings",
<ide> shape=[self.max_position_embeddings, self.hidden_size],
<del> initializer=get_initializer(initializer_range=self.initializer_range),
<add> initializer=get_initializer(self.initializer_range),
<ide> )
<ide>
<ide> super().build(input_shape)
<ide>
<del> def get_config(self):
<add> def get_config(self) -> Dict[str, Any]:
<ide> config = {
<ide> "max_position_embeddings": self.max_position_embeddings,
<ide> "hidden_size": self.hidden_size,
<ide> def get_config(self):
<ide>
<ide> return dict(list(base_config.items()) + list(config.items()))
<ide>
<del> def call(self, position_ids):
<del> input_shape = shape_list(tensor=position_ids)
<add> def call(self, position_ids: tf.Tensor) -> tf.Tensor:
<add> input_shape = shape_list(position_ids)
<ide> position_embeddings = self.position_embeddings[: input_shape[1], :]
<ide>
<ide> return tf.broadcast_to(input=position_embeddings, shape=input_shape)
<ide> def call(self, position_ids):
<ide> class TF{{cookiecutter.camelcase_modelname}}Embeddings(tf.keras.layers.Layer):
<ide> """Construct the embeddings from word, position and token_type embeddings."""
<ide>
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.word_embeddings = TF{{cookiecutter.camelcase_modelname}}WordEmbeddings(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
<add> def call(
<add> self,
<add> input_ids: tf.Tensor,
<add> position_ids: tf.Tensor,
<add> token_type_ids: tf.Tensor,
<add> inputs_embeds: tf.Tensor,
<add> training: bool = False,
<add> ) -> tf.Tensor:
<ide> """
<ide> Applies embedding based on inputs tensor.
<ide>
<ide> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em
<ide> assert not (input_ids is None and inputs_embeds is None)
<ide>
<ide> if input_ids is not None:
<del> inputs_embeds = self.word_embeddings(input_ids=input_ids)
<add> inputs_embeds = self.word_embeddings(input_ids)
<ide>
<ide> if token_type_ids is None:
<del> input_shape = shape_list(tensor=inputs_embeds)[:-1]
<add> input_shape = shape_list(inputs_embeds)[:-1]
<ide> token_type_ids = tf.fill(dims=input_shape, value=0)
<ide>
<ide> if position_ids is None:
<del> position_embeds = self.position_embeddings(position_ids=inputs_embeds)
<add> position_embeds = self.position_embeddings(inputs_embeds)
<ide> else:
<del> position_embeds = self.position_embeddings(position_ids=position_ids)
<add> position_embeds = self.position_embeddings(position_ids)
<ide>
<del> token_type_embeds = self.token_type_embeddings(token_type_ids=token_type_ids)
<add> token_type_embeds = self.token_type_embeddings(token_type_ids)
<ide> final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds])
<ide> final_embeddings = self.LayerNorm(inputs=final_embeddings)
<ide> final_embeddings = self.dropout(inputs=final_embeddings, training=training)
<ide> def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}SelfAttention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="query",
<ide> )
<ide> self.key = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="key",
<ide> )
<ide> self.value = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cde->abde",
<ide> output_shape=(None, config.num_attention_heads, self.attention_head_size),
<ide> bias_axes="de",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="value",
<ide> )
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
<ide>
<del> def call(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> query_layer = self.query(inputs=hidden_states)
<ide> key_layer = self.key(inputs=hidden_states)
<ide> value_layer = self.value(inputs=hidden_states)
<ide>
<ide> # Take the dot product between "query" and "key" to get the raw
<ide> # attention scores.
<del> dk = tf.cast(x=self.attention_head_size, dtype=query_layer.dtype)
<del> query_layer = tf.multiply(x=query_layer, y=tf.math.rsqrt(x=dk))
<add> dk = tf.cast(self.attention_head_size, dtype=query_layer.dtype)
<add> query_layer = tf.multiply(query_layer, tf.math.rsqrt(dk))
<ide> attention_scores = tf.einsum("aecd,abcd->acbe", key_layer, query_layer)
<ide>
<ide> if attention_mask is not None:
<ide> # Apply the attention mask is (precomputed for all layers in TF{{cookiecutter.camelcase_modelname}}Model call() function)
<del> attention_scores = attention_scores + attention_mask
<add> attention_scores = tf.add(attention_scores, attention_mask)
<ide>
<ide> # Normalize the attention scores to probabilities.
<ide> attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
<ide>
<ide> # This is actually dropping out entire tokens to attend to, which might
<ide> # seem a bit unusual, but is taken from the original Transformer paper.
<del> attention_probs = self.dropout(attention_probs, training=training)
<add> attention_probs = self.dropout(inputs=attention_probs, training=training)
<ide>
<ide> # Mask heads if we want to
<ide> if head_mask is not None:
<del> attention_scores = attention_scores * head_mask
<add> attention_scores = tf.multiply(attention_scores, head_mask)
<ide>
<ide> attention_output = tf.einsum("acbe,aecd->abcd", attention_probs, value_layer)
<ide> outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
<ide>
<ide> return outputs
<ide>
<ide>
<del># Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput
<add># Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}SelfOutput(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> if config.hidden_size % config.num_attention_heads != 0:
<ide> def __init__(self, config, **kwargs):
<ide> equation="abcd,cde->abe",
<ide> output_shape=(None, self.all_head_size),
<ide> bias_axes="e",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}Attention(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.self_attention = TF{{cookiecutter.camelcase_modelname}}SelfAttention(config, name="self")
<ide> def __init__(self, config, **kwargs):
<ide> def prune_heads(self, heads):
<ide> raise NotImplementedError
<ide>
<del> def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> input_tensor: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> self_outputs = self.self_attention(
<del> input_tensor, attention_mask, head_mask, output_attentions, training=training
<add> hidden_states=input_tensor,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<add> )
<add> attention_output = self.dense_output(
<add> hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
<ide> )
<del> attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
<ide> outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}Intermediate(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> equation="abc,cd->abd",
<ide> output_shape=(None, config.intermediate_size),
<ide> bias_axes="d",
<del> kernel_initializer=get_initializer(initializer_range=config.initializer_range),
<add> kernel_initializer=get_initializer(config.initializer_range),
<ide> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<del> self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act)
<add> self.intermediate_act_fn = get_tf_activation(config.hidden_act)
<ide> else:
<ide> self.intermediate_act_fn = config.hidden_act
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.intermediate_act_fn(hidden_states)
<ide>
<ide> def call(self, hidden_states):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}Output(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.experimental.EinsumDense(
<ide> def __init__(self, config, **kwargs):
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide>
<del> def call(self, hidden_states, input_tensor, training=False):
<add> def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
<ide> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.dropout(inputs=hidden_states, training=training)
<ide> hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
<ide> def call(self, hidden_states, input_tensor, training=False):
<ide>
<ide>
<ide> class TF{{cookiecutter.camelcase_modelname}}Layer(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.attention = TF{{cookiecutter.camelcase_modelname}}Attention(config, name="attention")
<ide> self.intermediate = TF{{cookiecutter.camelcase_modelname}}Intermediate(config, name="intermediate")
<del> self.{{cookiecutter.lowercase_modelname}}_output = TF{{cookiecutter.camelcase_modelname}}Output(config, name="output")
<add> self.bert_output = TF{{cookiecutter.camelcase_modelname}}Output(config, name="output")
<ide>
<del> # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer.call with bert->{{cookiecutter.lowercase_modelname}}
<del> def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
<add> def call(
<add> self,
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> training: bool = False,
<add> ) -> Tuple[tf.Tensor]:
<ide> attention_outputs = self.attention(
<del> hidden_states, attention_mask, head_mask, output_attentions, training=training
<add> input_tensor=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask,
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> attention_output = attention_outputs[0]
<del> intermediate_output = self.intermediate(attention_output)
<del> layer_output = self.{{cookiecutter.lowercase_modelname}}_output(intermediate_output, attention_output, training=training)
<add> intermediate_output = self.intermediate(hidden_states=attention_output)
<add> layer_output = self.bert_output(hidden_states=intermediate_output, input_tensor=attention_output, training=training)
<ide> outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
<ide>
<ide> return outputs
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}Encoder(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.layer = [TF{{cookiecutter.camelcase_modelname}}Layer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
<ide>
<ide> def call(
<ide> self,
<del> hidden_states,
<del> attention_mask,
<del> head_mask,
<del> output_attentions,
<del> output_hidden_states,
<del> return_dict,
<del> training=False,
<del> ):
<add> hidden_states: tf.Tensor,
<add> attention_mask: tf.Tensor,
<add> head_mask: tf.Tensor,
<add> output_attentions: bool,
<add> output_hidden_states: bool,
<add> return_dict: bool,
<add> training: bool = False,
<add> ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
<ide> all_hidden_states = () if output_hidden_states else None
<ide> all_attentions = () if output_attentions else None
<ide>
<ide> def call(
<ide> all_hidden_states = all_hidden_states + (hidden_states,)
<ide>
<ide> layer_outputs = layer_module(
<del> hidden_states, attention_mask, head_mask[i], output_attentions, training=training
<add> hidden_states=hidden_states,
<add> attention_mask=attention_mask,
<add> head_mask=head_mask[i],
<add> output_attentions=output_attentions,
<add> training=training,
<ide> )
<ide> hidden_states = layer_outputs[0]
<ide>
<ide> def call(
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHead with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(tf.keras.layers.Layer):
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
<add> units=config.hidden_size,
<add> kernel_initializer=get_initializer(config.initializer_range),
<add> name="dense",
<ide> )
<ide>
<ide> if isinstance(config.hidden_act, str):
<ide> def __init__(self, config, **kwargs):
<ide>
<ide> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
<ide>
<del> def call(self, hidden_states):
<del> hidden_states = self.dense(hidden_states)
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<add> hidden_states = self.dense(inputs=hidden_states)
<ide> hidden_states = self.transform_act_fn(hidden_states)
<del> hidden_states = self.LayerNorm(hidden_states)
<add> hidden_states = self.LayerNorm(inputs=hidden_states)
<ide>
<ide> return hidden_states
<ide>
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}LMPredictionHead(tf.keras.layers.Layer):
<del> def __init__(self, config, input_embeddings, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.vocab_size = config.vocab_size
<ide> self.hidden_size = config.hidden_size
<del>
<add>
<ide> self.transform = TF{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(config, name="transform")
<ide>
<ide> # The output weights are the same as the input embeddings, but there is
<ide> # an output-only bias for each token.
<ide> self.input_embeddings = input_embeddings
<ide>
<del> def build(self, input_shape):
<add> def build(self, input_shape: tf.TensorShape):
<ide> self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
<ide>
<del> super().build(input_shape)
<del>
<del> def get_output_embeddings(self):
<add> super().build(input_shape=input_shape)
<add>
<add> def get_output_embeddings(self) -> tf.keras.layers.Layer:
<ide> return self.input_embeddings
<ide>
<del> def set_output_embeddings(self, value):
<add> def set_output_embeddings(self, value: tf.Variable):
<ide> self.input_embeddings.weight = value
<ide> self.input_embeddings.vocab_size = shape_list(value)[0]
<ide>
<del> def get_bias(self):
<add> def get_bias(self) -> Dict[str, tf.Variable]:
<ide> return {"bias": self.bias}
<ide>
<del> def set_bias(self, value):
<add> def set_bias(self, value: tf.Variable):
<ide> self.bias = value["bias"]
<ide> self.vocab_size = shape_list(value["bias"])[0]
<ide>
<del> def call(self, hidden_states):
<add> def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
<ide> hidden_states = self.transform(hidden_states=hidden_states)
<del> seq_length = shape_list(tensor=hidden_states)[1]
<add> seq_length = shape_list(hidden_states)[1]
<ide> hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
<ide> hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
<ide> hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
<ide> def call(self, hidden_states):
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->{{cookiecutter.camelcase_modelname}}
<ide> class TF{{cookiecutter.camelcase_modelname}}MLMHead(tf.keras.layers.Layer):
<del> def __init__(self, config, input_embeddings, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.predictions = TF{{cookiecutter.camelcase_modelname}}LMPredictionHead(config, input_embeddings, name="predictions")
<ide>
<del> def call(self, sequence_output):
<del> prediction_scores = self.predictions(sequence_output)
<add> def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
<add> prediction_scores = self.predictions(hidden_states=sequence_output)
<ide>
<ide> return prediction_scores
<ide>
<ide> def call(self, sequence_output):
<ide> class TF{{cookiecutter.camelcase_modelname}}MainLayer(tf.keras.layers.Layer):
<ide> config_class = {{cookiecutter.camelcase_modelname}}Config
<ide>
<del> def __init__(self, config, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, add_pooling_layer: bool = True, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> self.config = config
<del> self.num_hidden_layers = config.num_hidden_layers
<del> self.initializer_range = config.initializer_range
<del> self.output_attentions = config.output_attentions
<del> self.output_hidden_states = config.output_hidden_states
<del> self.return_dict = config.use_return_dict
<add>
<ide> self.embeddings = TF{{cookiecutter.camelcase_modelname}}Embeddings(config, name="embeddings")
<ide> self.encoder = TF{{cookiecutter.camelcase_modelname}}Encoder(config, name="encoder")
<del> self.config = config
<ide>
<del> def get_input_embeddings(self):
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
<add> def get_input_embeddings(self) -> tf.keras.layers.Layer:
<ide> return self.embeddings.word_embeddings
<ide>
<del> def set_input_embeddings(self, value):
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
<add> def set_input_embeddings(self, value: tf.Variable):
<ide> self.embeddings.word_embeddings.weight = value
<ide> self.embeddings.word_embeddings.vocab_size = shape_list(value)[0]
<ide>
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
<ide> def _prune_heads(self, heads_to_prune):
<del> """Prunes heads of the model.
<del> heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
<del> See base class PreTrainedModel
<add> """
<add> Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
<add> class PreTrainedModel
<ide> """
<ide> raise NotImplementedError
<ide>
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> training: bool = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> config=self.config,
<ide> def call(
<ide> training=training,
<ide> kwargs_call=kwargs,
<ide> )
<del>
<add>
<ide> if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
<ide> raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
<ide> elif inputs["input_ids"] is not None:
<ide> def call(
<ide> raise ValueError("You have to specify either input_ids or inputs_embeds")
<ide>
<ide> if inputs["attention_mask"] is None:
<del> inputs["attention_mask"] = tf.fill(input_shape, 1)
<add> inputs["attention_mask"] = tf.fill(dims=input_shape, value=1)
<ide>
<ide> if inputs["token_type_ids"] is None:
<del> inputs["token_type_ids"] = tf.fill(input_shape, 0)
<add> inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
<ide>
<ide> embedding_output = self.embeddings(
<del> inputs["input_ids"],
<del> inputs["position_ids"],
<del> inputs["token_type_ids"],
<del> inputs["inputs_embeds"],
<add> input_ids=inputs["input_ids"],
<add> position_ids=inputs["position_ids"],
<add> token_type_ids=inputs["token_type_ids"],
<add> inputs_embeds=inputs["inputs_embeds"],
<ide> training=inputs["training"],
<ide> )
<ide>
<ide> def call(
<ide> # positions we want to attend and -10000.0 for masked positions.
<ide> # Since we are adding it to the raw scores before the softmax, this is
<ide> # effectively the same as removing these entirely.
<del> extended_attention_mask = tf.cast(extended_attention_mask, embedding_output.dtype)
<del> extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
<add> extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
<add> extended_attention_mask = tf.multiply(tf.subtract(1.0, extended_attention_mask), -10000.0)
<ide>
<ide> # Prepare head mask if needed
<ide> # 1.0 in head_mask indicate we keep the head
<ide> def call(
<ide> if inputs["head_mask"] is not None:
<ide> raise NotImplementedError
<ide> else:
<del> inputs["head_mask"] = [None] * self.num_hidden_layers
<add> inputs["head_mask"] = [None] * self.config.num_hidden_layers
<ide>
<ide> encoder_outputs = self.encoder(
<del> embedding_output,
<del> extended_attention_mask,
<del> inputs["head_mask"],
<del> inputs["output_attentions"],
<del> inputs["output_hidden_states"],
<del> inputs["return_dict"],
<add> hidden_states=embedding_output,
<add> attention_mask=extended_attention_mask,
<add> head_mask=inputs["head_mask"],
<add> output_attentions=inputs["output_attentions"],
<add> output_hidden_states=inputs["output_hidden_states"],
<add> return_dict=inputs["return_dict"],
<ide> training=inputs["training"],
<ide> )
<ide>
<ide> sequence_output = encoder_outputs[0]
<ide>
<del> if not return_dict:
<add> if not inputs["return_dict"]:
<ide> return (
<ide> sequence_output,
<ide> ) + encoder_outputs[1:]
<ide> class TF{{cookiecutter.camelcase_modelname}}PreTrainedModel(TFPreTrainedModel):
<ide>
<ide> {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r"""
<ide> Args:
<del> input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
<add> input_ids (:obj:`np.ndarray`, :obj:`tf.Tensor`, :obj:`List[tf.Tensor]` :obj:`Dict[str, tf.Tensor]` or :obj:`Dict[str, np.ndarray]` and each example must have the shape :obj:`({0})`):
<ide> Indices of input sequence tokens in the vocabulary.
<ide>
<del> Indices can be obtained using :class:`~transformers.{{cookiecutter.camelcase_modelname}}Tokenizer`.
<del> See :func:`transformers.PreTrainedTokenizer.__call__` and
<del> :func:`transformers.PreTrainedTokenizer.encode` for details.
<add> Indices can be obtained using :class:`~transformers.BertTokenizer`. See
<add> :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
<add> details.
<ide>
<ide> `What are input IDs? <../glossary.html#input-ids>`__
<del> attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<del> Mask to avoid performing attention on padding token indices.
<del> Mask values selected in ``[0, 1]``:
<add> attention_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<add> Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
<ide>
<ide> - 1 for tokens that are **not masked**,
<del> - 0 for tokens that are **maked**.
<add> - 0 for tokens that are **masked**.
<ide>
<ide> `What are attention masks? <../glossary.html#attention-mask>`__
<del> token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<del> Segment token indices to indicate first and second portions of the inputs.
<del> Indices are selected in ``[0, 1]``:
<add> token_type_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<add> Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
<add> 1]``:
<ide>
<ide> - 0 corresponds to a `sentence A` token,
<ide> - 1 corresponds to a `sentence B` token.
<ide>
<ide> `What are token type IDs? <../glossary.html#token-type-ids>`__
<del> position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<del> Indices of positions of each input sequence tokens in the position embeddings.
<del> Selected in the range ``[0, config.max_position_embeddings - 1]``.
<add> position_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
<add> Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
<add> config.max_position_embeddings - 1]``.
<ide>
<ide> `What are position IDs? <../glossary.html#position-ids>`__
<del> head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
<del> Mask to nullify selected heads of the self-attention modules.
<del> Mask values selected in ``[0, 1]``:
<add> head_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
<add> Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
<ide>
<ide> - 1 indicates the head is **not masked**,
<ide> - 0 indicates the head is **masked**.
<ide>
<del> inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
<add> inputs_embeds (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
<ide> Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
<ide> This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
<ide> vectors than the model's internal embedding lookup matrix.
<ide> class TF{{cookiecutter.camelcase_modelname}}PreTrainedModel(TFPreTrainedModel):
<ide> {{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
<ide> )
<ide> class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel):
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}")
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> config=self.config,
<ide> def call(
<ide> return outputs
<ide>
<ide> # Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<del> return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
<add> return TFBaseModelOutput(
<add> last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns,
<add> )
<ide>
<ide>
<ide> @add_start_docstrings("""{{cookiecutter.modelname}} Model with a `language modeling` head on top. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING)
<ide> class TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFMaskedLanguageModelingLoss):
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> if config.is_decoder:
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide>
<ide> self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}")
<del> self.mlm = TF{{cookiecutter.camelcase_modelname}}MLMHead(config, self.{{cookiecutter.lowercase_modelname}}.embeddings.word_embeddings, name="mlm___cls")
<add> self.mlm = TF{{cookiecutter.camelcase_modelname}}MLMHead(config, inputs_embeddings=self.{{cookiecutter.lowercase_modelname}}.embeddings.word_embeddings, name="mlm___cls")
<ide>
<del> def get_lm_head(self):
<add> def get_lm_head(self) -> tf.keras.layers.Layer:
<ide> return self.mlm.predictions
<ide>
<ide> @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
<ide> def get_lm_head(self):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<del> Labels for computing the masked language modeling loss.
<del> Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
<del> Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
<del> in ``[0, ..., config.vocab_size]``
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<add> Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
<add> config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
<add> (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
<ide> """
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.{{cookiecutter.lowercase_modelname}}(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> return_dict=inputs["return_dict"],
<ide> training=inputs["training"],
<ide> )
<del>
<ide> sequence_output = outputs[0]
<del> prediction_scores = self.mlm(sequence_output, training=inputs["training"])
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
<add> prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"])
<add> loss = (
<add> None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=prediction_scores)
<add> )
<ide>
<ide> if not inputs["return_dict"]:
<del> output = (prediction_scores,) + outputs[1:]
<add> output = (prediction_scores,) + outputs[2:]
<ide> return ((loss,) + output) if loss is not None else output
<ide>
<ide> return TFMaskedLMOutput(
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def serving_output(self, output):
<ide> )
<ide> class TF{{cookiecutter.camelcase_modelname}}ForCausalLM(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFCausalLanguageModelingLoss):
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> if not config.is_decoder:
<ide> logger.warning("If you want to use `TF{{cookiecutter.camelcase_modelname}}ForCausalLM` as a standalone, add `is_decoder=True.`")
<ide>
<ide> self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}")
<del> self.mlm = TF{{cookiecutter.camelcase_modelname}}MLMHead(config, self.{{cookiecutter.lowercase_modelname}}.embeddings.word_embeddings, name="mlm___cls")
<add> self.mlm = TF{{cookiecutter.camelcase_modelname}}MLMHead(config, inputs_embeddings=self.{{cookiecutter.lowercase_modelname}}.embeddings.word_embeddings, name="mlm___cls")
<ide>
<del> def get_lm_head(self):
<add> def get_lm_head(self) -> tf.keras.layers.Layer:
<ide> return self.mlm.predictions
<ide>
<ide> @add_code_sample_docstrings(
<ide> def get_lm_head(self):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<ide> Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
<ide> config.vocab_size - 1]``.
<ide> """
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.{{cookiecutter.lowercase_modelname}}(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output = outputs[0]
<del> logits = self.mlm(sequence_output, training=inputs["training"])
<add> logits = self.mlm(sequence_output=sequence_output, training=inputs["training"])
<ide> loss = None
<ide>
<ide> if inputs["labels"] is not None:
<ide> # shift labels to the left and cut last logit token
<ide> logits = logits[:, :-1]
<ide> labels = inputs["labels"][:, 1:]
<del> loss = self.compute_loss(labels, logits)
<add> loss = self.compute_loss(labels=labels, logits=logits)
<ide>
<ide> if not inputs["return_dict"]:
<del> output = (logits,) + outputs[1:]
<add> output = (logits,) + outputs[2:]
<ide> return ((loss,) + output) if loss is not None else output
<ide>
<ide> return TFCausalLMOutput(
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
<ide>
<add>
<ide> class TF{{cookiecutter.camelcase_modelname}}ClassificationHead(tf.keras.layers.Layer):
<ide> """Head for sentence-level classification tasks."""
<ide>
<del> def __init__(self, config, **kwargs):
<del> super().__init__(**kwargs)
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<add> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.dense = tf.keras.layers.Dense(
<del> config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
<add> units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
<ide> )
<del> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
<add> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide> self.out_proj = tf.keras.layers.Dense(
<del> config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
<add> units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
<ide> )
<ide>
<del> self.config = config
<add> if isinstance(config.hidden_act, str):
<add> self.classifier_act_fn = get_tf_activation(config.hidden_act)
<add> else:
<add> self.classifier_act_fn = config.hidden_act
<ide>
<del> def call(self, inputs, **kwargs):
<del> x = inputs[:, 0, :] # take <s> token (equiv. to [CLS])
<del> x = self.dropout(x)
<del> x = self.dense(x)
<del> x = get_tf_activation(self.config.hidden_act)(x)
<del> x = self.dropout(x)
<del> x = self.out_proj(x)
<add> def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
<add> hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
<add> hidden_states = self.dropout(inputs=hidden_states, training=training)
<add> hidden_states = self.dense(inputs=hidden_states)
<add> hidden_states = self.classifier_act_fn(hidden_states)
<add> hidden_states = self.dropout(inputs=hidden_states, training=training)
<add> hidden_states = self.out_proj(hidden_states)
<ide>
<del> return x
<add> return hidden_states
<ide>
<ide>
<ide> @add_start_docstrings(
<ide> def call(self, inputs, **kwargs):
<ide> {{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
<ide> )
<ide> class TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFSequenceClassificationLoss):
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<add>
<ide> self.num_labels = config.num_labels
<add>
<ide> self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}")
<ide> self.classifier = TF{{cookiecutter.camelcase_modelname}}ClassificationHead(config, name="classifier")
<ide>
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<del> Labels for computing the sequence classification/regression loss.
<del> Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
<del> If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<add> Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
<add> config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
<ide> If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
<ide> """
<ide> inputs = input_processing(
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.{{cookiecutter.lowercase_modelname}}(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> return_dict=inputs["return_dict"],
<ide> training=inputs["training"],
<ide> )
<del> logits = self.classifier(outputs[0], training=inputs["training"])
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
<add> logits = self.classifier(hidden_states=outputs[0], training=inputs["training"])
<add> loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (logits,) + outputs[1:]
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def serving_output(self, output):
<ide> {{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
<ide> )
<ide> class TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFMultipleChoiceLoss):
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}")
<ide> self.sequence_summary = TFSequenceSummary(
<del> config, initializer_range=config.initializer_range, name="sequence_summary"
<add> config, config.initializer_range, name="sequence_summary"
<ide> )
<ide> self.classifier = tf.keras.layers.Dense(
<del> 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<add> units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<ide> )
<ide>
<ide> @property
<del> def dummy_inputs(self):
<add> def dummy_inputs(self) -> Dict[str, tf.Tensor]:
<ide> """
<ide> Dummy inputs to build the network.
<ide>
<ide> Returns:
<ide> tf.Tensor with dummy inputs
<ide> """
<del> return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
<add> return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
<ide>
<ide> @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
<ide> @add_code_sample_docstrings(
<ide> def dummy_inputs(self):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<del> Labels for computing the multiple choice classification loss.
<del> Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension
<del> of the input tensors. (See :obj:`input_ids` above)
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<add> Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
<add> num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
<add> :obj:`input_ids` above)
<ide> """
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> def call(
<ide> training=training,
<ide> kwargs_call=kwargs,
<ide> )
<del>
<add>
<ide> if inputs["input_ids"] is not None:
<ide> num_choices = shape_list(inputs["input_ids"])[1]
<ide> seq_length = shape_list(inputs["input_ids"])[2]
<ide> else:
<ide> num_choices = shape_list(inputs["inputs_embeds"])[1]
<ide> seq_length = shape_list(inputs["inputs_embeds"])[2]
<ide>
<del> flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
<add> flat_input_ids = (
<add> tf.reshape(tensor=inputs["input_ids"], shape=(-1, seq_length)) if inputs["input_ids"] is not None else None
<add> )
<ide> flat_attention_mask = (
<del> tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
<add> tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length))
<add> if inputs["attention_mask"] is not None
<add> else None
<ide> )
<ide> flat_token_type_ids = (
<del> tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
<add> tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length))
<add> if inputs["token_type_ids"] is not None
<add> else None
<ide> )
<ide> flat_position_ids = (
<del> tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
<add> tf.reshape(tensor=inputs["position_ids"], shape=(-1, seq_length))
<add> if inputs["position_ids"] is not None
<add> else None
<ide> )
<ide> flat_inputs_embeds = (
<del> tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
<add> tf.reshape(
<add> tensor=inputs["inputs_embeds"], shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3])
<add> )
<ide> if inputs["inputs_embeds"] is not None
<ide> else None
<ide> )
<ide> outputs = self.{{cookiecutter.lowercase_modelname}}(
<del> flat_input_ids,
<del> flat_attention_mask,
<del> flat_token_type_ids,
<del> flat_position_ids,
<del> inputs["head_mask"],
<del> flat_inputs_embeds,
<del> inputs["output_attentions"],
<del> inputs["output_hidden_states"],
<add> input_ids=flat_input_ids,
<add> attention_mask=flat_attention_mask,
<add> token_type_ids=flat_token_type_ids,
<add> position_ids=flat_position_ids,
<add> head_mask=inputs["head_mask"],
<add> inputs_embeds=flat_inputs_embeds,
<add> output_attentions=inputs["output_attentions"],
<add> output_hidden_states=inputs["output_hidden_states"],
<ide> return_dict=inputs["return_dict"],
<ide> training=inputs["training"],
<ide> )
<del> logits = self.sequence_summary(outputs[0], training=inputs["training"])
<del> logits = self.classifier(logits)
<del> reshaped_logits = tf.reshape(logits, (-1, num_choices))
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
<add> logits = self.sequence_summary(inputs=outputs[0], training=inputs["training"])
<add> logits = self.classifier(inputs=logits)
<add> reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
<add> loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits)
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (reshaped_logits,) + outputs[1:]
<ide> def call(
<ide> "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
<ide> "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
<ide> }])
<del> def serving(self, inputs):
<del> output = self.call(inputs)
<del>
<add> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
<add> def serving(self, inputs: Dict[str, tf.Tensor]):
<add> output = self.call(input_ids=inputs)
<add>
<ide> return self.serving_output(output)
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def serving_output(self, output):
<ide> )
<ide> class TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFTokenClassificationLoss):
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.num_labels = config.num_labels
<add>
<ide> self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}")
<del> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
<add> self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
<ide> self.classifier = tf.keras.layers.Dense(
<del> config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<add> units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
<ide> )
<ide>
<ide> @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> labels=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<del> Labels for computing the token classification loss.
<del> Indices should be in ``[0, ..., config.num_labels - 1]``.
<add> labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
<add> Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
<add> 1]``.
<ide> """
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.{{cookiecutter.lowercase_modelname}}(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output = outputs[0]
<del> sequence_output = self.dropout(sequence_output, training=inputs["training"])
<del> logits = self.classifier(sequence_output)
<del> loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
<add> sequence_output = self.dropout(inputs=sequence_output, training=inputs["training"])
<add> logits = self.classifier(inputs=sequence_output)
<add> loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
<ide>
<ide> if not inputs["return_dict"]:
<ide> output = (logits,) + outputs[1:]
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<ide>
<ide> def serving_output(self, output):
<ide> )
<ide> class TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFQuestionAnsweringLoss):
<ide>
<del> def __init__(self, config, *inputs, **kwargs):
<add> def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs):
<ide> super().__init__(config, *inputs, **kwargs)
<ide>
<ide> self.num_labels = config.num_labels
<add>
<ide> self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}")
<ide> self.qa_outputs = tf.keras.layers.Dense(
<del> config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
<add> units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
<ide> )
<ide>
<ide> @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> )
<ide> def call(
<ide> self,
<del> input_ids=None,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> output_attentions=None,
<del> output_hidden_states=None,
<del> return_dict=None,
<del> start_positions=None,
<del> end_positions=None,
<del> training=False,
<add> input_ids: Optional[TFModelInputType] = None,
<add> attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> output_attentions: Optional[bool] = None,
<add> output_hidden_states: Optional[bool] = None,
<add> return_dict: Optional[bool] = None,
<add> start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
<add> training: Optional[bool] = False,
<ide> **kwargs,
<del> ):
<add> ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
<ide> r"""
<del> start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<add> start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<ide> Labels for position (index) of the start of the labelled span for computing the token classification loss.
<del> Positions are clamped to the length of the sequence (:obj:`sequence_length`).
<del> Position outside of the sequence are not taken into account for computing the loss.
<del> end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
<add> Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
<add> sequence are not taken into account for computing the loss.
<add> end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
<ide> Labels for position (index) of the end of the labelled span for computing the token classification loss.
<del> Positions are clamped to the length of the sequence (:obj:`sequence_length`).
<del> Position outside of the sequence are not taken into account for computing the loss.
<add> Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
<add> sequence are not taken into account for computing the loss.
<ide> """
<ide> inputs = input_processing(
<ide> func=self.call,
<ide> def call(
<ide> kwargs_call=kwargs,
<ide> )
<ide> outputs = self.{{cookiecutter.lowercase_modelname}}(
<del> inputs["input_ids"],
<add> input_ids=inputs["input_ids"],
<ide> attention_mask=inputs["attention_mask"],
<ide> token_type_ids=inputs["token_type_ids"],
<ide> position_ids=inputs["position_ids"],
<ide> def call(
<ide> training=inputs["training"],
<ide> )
<ide> sequence_output = outputs[0]
<del> logits = self.qa_outputs(sequence_output)
<del> start_logits, end_logits = tf.split(logits, 2, axis=-1)
<del> start_logits = tf.squeeze(start_logits, axis=-1)
<del> end_logits = tf.squeeze(end_logits, axis=-1)
<add> logits = self.qa_outputs(inputs=sequence_output)
<add> start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
<add> start_logits = tf.squeeze(input=start_logits, axis=-1)
<add> end_logits = tf.squeeze(input=end_logits, axis=-1)
<ide> loss = None
<ide>
<ide> if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
<ide> labels = {"start_position": inputs["start_positions"]}
<ide> labels["end_position"] = inputs["end_positions"]
<del> loss = self.compute_loss(labels, (start_logits, end_logits))
<add> loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))
<ide>
<ide> if not inputs["return_dict"]:
<del> output = (start_logits, end_logits) + outputs[1:]
<add> output = (start_logits, end_logits) + outputs[2:]
<ide> return ((loss,) + output) if loss is not None else output
<ide>
<ide> return TFQuestionAnsweringModelOutput(
<ide> def call(
<ide> )
<ide>
<ide> # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
<del> def serving_output(self, output):
<add> def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
<ide> hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
<ide> attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
<del>
<add>
<ide> return TFQuestionAnsweringModelOutput(
<ide> start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
<ide> ) | 15 |
Javascript | Javascript | linkify error messages on minerr docs pages | 6aaae062171bfc8e5046c3eae99bc9d63037120a | <ide><path>docs/component-spec/errorLinkFilterSpec.js
<add>describe("errorLinkFilter", function () {
<add>
<add> var errorLinkFilter;
<add>
<add> beforeEach(module('docsApp'));
<add>
<add> beforeEach(inject(function ($filter) {
<add> errorLinkFilter = $filter('errorLink');
<add> }));
<add>
<add> it('should not change text that does not contain links', function () {
<add> expect(errorLinkFilter('This is a test')).toBe('This is a test');
<add> });
<add>
<add> it('should find links in text and linkify them', function () {
<add> expect(errorLinkFilter("http://ab/ (http://a/) http://1.2/v:~-123. c")).
<add> toBe('<a href="http://ab/">http://ab/</a> ' +
<add> '(<a href="http://a/">http://a/</a>) ' +
<add> '<a href="http://1.2/v:~-123">http://1.2/v:~-123</a>. c');
<add> expect(errorLinkFilter(undefined)).not.toBeDefined();
<add> });
<add>
<add> it('should handle mailto', function () {
<add> expect(errorLinkFilter("mailto:[email protected]")).
<add> toBe('<a href="mailto:[email protected]">[email protected]</a>');
<add> expect(errorLinkFilter("[email protected]")).
<add> toBe('<a href="mailto:[email protected]">[email protected]</a>');
<add> expect(errorLinkFilter("send email to [email protected], but")).
<add> toBe('send email to <a href="mailto:[email protected]">[email protected]</a>, but');
<add> });
<add>
<add> it('should handle target', function () {
<add> expect(errorLinkFilter("http://example.com", "_blank")).
<add> toBe('<a target="_blank" href="http://example.com">http://example.com</a>')
<add> expect(errorLinkFilter("http://example.com", "someNamedIFrame")).
<add> toBe('<a target="someNamedIFrame" href="http://example.com">http://example.com</a>')
<add> });
<add>
<add> it('should not linkify stack trace URLs', function () {
<add> expect(errorLinkFilter("http://example.com/angular.min.js:42:1337")).
<add> toBe("http://example.com/angular.min.js:42:1337");
<add> });
<add>
<add> it('should truncate linked URLs at 60 characters', function () {
<add> expect(errorLinkFilter("http://errors.angularjs.org/very-long-version-string/$injector/nomod?p0=myApp")).
<add> toBe('<a href="http://errors.angularjs.org/very-long-version-string/$injector/nomod?p0=myApp">' +
<add> 'http://errors.angularjs.org/very-long-version-string/$inj...</a>');
<add> });
<add>});
<ide>\ No newline at end of file
<ide><path>docs/src/templates/js/docs.js
<ide> var docsApp = {
<ide> controller: {},
<ide> directive: {},
<del> serviceFactory: {}
<add> serviceFactory: {},
<add> filter: {}
<ide> };
<ide>
<ide> docsApp.controller.DocsVersionsCtrl = ['$scope', '$window', 'NG_VERSIONS', 'NG_VERSION', function($scope, $window, NG_VERSIONS, NG_VERSION) {
<ide> docsApp.directive.docTutorialReset = function() {
<ide> };
<ide>
<ide>
<del>docsApp.directive.errorDisplay = ['$location', function ($location) {
<add>docsApp.filter.errorLink = ['$sanitize', function ($sanitize) {
<add> var LINKY_URL_REGEXP = /((ftp|https?):\/\/|(mailto:)?[A-Za-z0-9._%+-]+@)\S*[^\s\.\;\,\(\)\{\}\<\>]/g,
<add> MAILTO_REGEXP = /^mailto:/,
<add> STACK_TRACE_REGEXP = /:\d+:\d+$/;
<add>
<add> var truncate = function (text, nchars) {
<add> if (text.length > nchars) {
<add> return text.substr(0, nchars - 3) + '...';
<add> }
<add> return text;
<add> };
<add>
<add> return function (text, target) {
<add> var targetHtml = target ? ' target="' + target + '"' : '';
<add>
<add> if (!text) return text;
<add>
<add> return $sanitize(text.replace(LINKY_URL_REGEXP, function (url) {
<add> if (STACK_TRACE_REGEXP.test(url)) {
<add> return url;
<add> }
<add>
<add> // if we did not match ftp/http/mailto then assume mailto
<add> if (!/^((ftp|https?):\/\/|mailto:)/.test(url)) url = 'mailto:' + url;
<add>
<add> return '<a' + targetHtml + ' href="' + url +'">' +
<add> truncate(url.replace(MAILTO_REGEXP, ''), 60) +
<add> '</a>';
<add> }));
<add> };
<add>}];
<add>
<add>
<add>docsApp.directive.errorDisplay = ['$location', 'errorLinkFilter', function ($location, errorLinkFilter) {
<ide> var interpolate = function (formatString) {
<ide> var formatArgs = arguments;
<ide> return formatString.replace(/\{\d+\}/g, function (match) {
<ide> docsApp.directive.errorDisplay = ['$location', function ($location) {
<ide> for (i = 0; angular.isDefined(search['p'+i]); i++) {
<ide> formatArgs.push(search['p'+i]);
<ide> }
<del> element.text(interpolate.apply(null, formatArgs));
<add> element.html(errorLinkFilter(interpolate.apply(null, formatArgs), '_blank'));
<ide> }
<ide> };
<ide> }];
<ide> angular.module('docsApp', ['ngResource', 'ngRoute', 'ngCookies', 'ngSanitize', '
<ide> factory(docsApp.serviceFactory).
<ide> directive(docsApp.directive).
<ide> controller(docsApp.controller);
<add>
<add>angular.forEach(docsApp.filter, function (docsAppFilter, filterName) {
<add> angular.module('docsApp').filter(filterName, docsAppFilter);
<add>}); | 2 |
Java | Java | simplify width calculation in rcttext | a5413846216aec2d03ca8c71a0493e1b4a06da3c | <ide><path>ReactAndroid/src/main/java/com/facebook/react/flat/RCTText.java
<ide> protected void collectState(
<ide> mDrawCommand = new DrawTextLayout(new BoringLayout(
<ide> mText,
<ide> PAINT,
<del> (int) getLayoutWidth(),
<add> (int) (right - left),
<ide> mAlignment,
<ide> mSpacingMult,
<ide> mSpacingAdd, | 1 |
PHP | PHP | remove empty files after bake | fa495a6fece4ab858a198c715170c66c8bc6c36e | <ide><path>src/Shell/Task/ModelTask.php
<ide> use Cake\Console\Shell;
<ide> use Cake\Core\Configure;
<ide> use Cake\Datasource\ConnectionManager;
<add>use Cake\Filesystem\File;
<ide> use Cake\ORM\Table;
<ide> use Cake\ORM\TableRegistry;
<ide> use Cake\Utility\Inflector;
<ide> public function bakeEntity($model, array $data = []) {
<ide> $filename = $path . 'Entity' . DS . $name . '.php';
<ide> $this->out("\n" . sprintf('Baking entity class for %s...', $name), 1, Shell::QUIET);
<ide> $this->createFile($filename, $out);
<add> $emptyFile = new File($path . 'Entity' . DS . 'empty');
<add> if ($emptyFile->exists()) {
<add> $emptyFile->delete();
<add> }
<ide> return $out;
<ide> }
<ide>
<ide> public function bakeTable($model, array $data = []) {
<ide> $filename = $path . 'Table' . DS . $name . 'Table.php';
<ide> $this->out("\n" . sprintf('Baking table class for %s...', $name), 1, Shell::QUIET);
<ide> $this->createFile($filename, $out);
<add> $emptyFile = new File($path . 'Table' . DS . 'empty');
<add> if ($emptyFile->exists()) {
<add> $emptyFile->delete();
<add> }
<ide> return $out;
<ide> }
<ide>
<ide><path>src/Shell/Task/SimpleBakeTask.php
<ide>
<ide> use Cake\Shell\Task\BakeTask;
<ide> use Cake\Core\Configure;
<add>use Cake\Core\Plugin;
<add>use Cake\Filesystem\File;
<ide> use Cake\Utility\Inflector;
<ide>
<ide> /**
<ide> public function bake($name) {
<ide>
<ide> $filename = $this->getPath() . $this->fileName($name);
<ide> $this->createFile($filename, $contents);
<add> $emptyFile = new File($this->getPath() . DS . 'empty');
<add> if ($emptyFile->exists()) {
<add> $emptyFile->delete();
<add> }
<ide> return $contents;
<ide> }
<ide> | 2 |
Text | Text | add examples for gender neutral pronouns | 98af6596a9316f134c92ace8debdfc3b6a2c50de | <ide><path>guides/source/api_documentation_guidelines.md
<ide> Spell names correctly: Arel, Test::Unit, RSpec, HTML, MySQL, JavaScript, ERB. Wh
<ide>
<ide> Use the article "an" for "SQL", as in "an SQL statement". Also "an SQLite database".
<ide>
<del>When using pronouns in reference to a hypothetical person, such as "a user with a session cookie", gender neutral pronouns (they/their/them) should be used.
<add>When using pronouns in reference to a hypothetical person, such as "a user with a session cookie", gender neutral pronouns (they/their/them) should be used. Instead of:
<add>
<add>* he or she... use they.
<add>* him or her... use them.
<add>* his or her... use their.
<add>* his or hers... use theirs.
<add>* himself or herself... use themselves.
<ide>
<ide> English
<ide> ------- | 1 |
Mixed | Go | fix description of 'docker swarm init' | 3abee1bf8aad5e24df635d70bceb39c69a1c2407 | <ide><path>cli/command/swarm/opts.go
<ide> func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) {
<ide>
<ide> func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
<ide> flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit")
<del> flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s)")
<del> flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s)")
<add> flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period (ns|us|ms|s|m|h)")
<add> flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates (ns|us|ms|s|m|h)")
<ide> flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints")
<ide> flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain")
<ide> flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots")
<ide><path>docs/reference/commandline/swarm_init.md
<ide> Usage: docker swarm init [OPTIONS]
<ide> Initialize a swarm
<ide>
<ide> Options:
<del> --advertise-addr value Advertised address (format: <ip|interface>[:port])
<add> --advertise-addr string Advertised address (format: <ip|interface>[:port])
<ide> --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager)
<ide> --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s)
<ide> --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) | 2 |
Ruby | Ruby | clarify ar dependency | 6eee1dd62c85e23d84f16e8f300d8aba77bd5c64 | <ide><path>actionpack/test/controller/caching_test.rb
<ide> def page_cached?(action)
<ide>
<ide> class ActionCachingTestController < ActionController::Base
<ide> rescue_from(Exception) { head 500 }
<del> rescue_from(ActiveRecord::RecordNotFound) { head :not_found }
<add> if defined? ActiveRecord
<add> rescue_from(ActiveRecord::RecordNotFound) { head :not_found }
<add> end
<ide>
<ide> caches_action :index, :redirected, :forbidden, :if => Proc.new { |c| !c.request.format.json? }, :expires_in => 1.hour
<ide> caches_action :show, :cache_path => 'http://test.host/custom/show'
<ide> def test_file_extensions
<ide> assert_response :success
<ide> end
<ide>
<del> def test_record_not_found_returns_404_for_multiple_requests
<del> get :record_not_found
<del> assert_response 404
<del> get :record_not_found
<del> assert_response 404
<add> if defined? ActiveRecord
<add> def test_record_not_found_returns_404_for_multiple_requests
<add> get :record_not_found
<add> assert_response 404
<add> get :record_not_found
<add> assert_response 404
<add> end
<ide> end
<ide>
<ide> def test_four_oh_four_returns_404_for_multiple_requests | 1 |
Javascript | Javascript | fix compiler.hooks.make callbacks | f860c870acb1d6e9ed9d8f4506c08463a75914e3 | <ide><path>lib/Chunk.js
<ide> class Chunk {
<ide>
<ide> /**
<ide> * @param {ChunkGraph} chunkGraph the chunk graph
<del> * @returns {Record<string, Set<TODO>[]>} a record object of names to lists of child ids(?)
<add> * @returns {Record<string, Set<string | number>[]>} a record object of names to lists of child ids(?)
<ide> */
<ide> getChildIdsByOrders(chunkGraph) {
<add> /** @type {Map<string, {order: number, group: ChunkGroup}[]>} */
<ide> const lists = new Map();
<ide> for (const group of this.groupsIterable) {
<ide> if (group.chunks[group.chunks.length - 1] === this) {
<ide> class Chunk {
<ide> if (key.endsWith("Order")) {
<ide> const name = key.substr(0, key.length - "Order".length);
<ide> let list = lists.get(name);
<del> if (list === undefined) lists.set(name, (list = []));
<add> if (list === undefined) {
<add> list = [];
<add> lists.set(name, list);
<add> }
<ide> list.push({
<ide> order: childGroup.options[key],
<ide> group: childGroup
<ide> class Chunk {
<ide> /**
<ide> * @param {ChunkGraph} chunkGraph the chunk graph
<ide> * @param {boolean=} includeDirectChildren include direct children (by default only children of async children are included)
<del> * @returns {Record<string|number, Record<string, Set<TODO>[]>>} a record object of names to lists of child ids(?) by chunk id
<add> * @returns {Record<string|number, Record<string, Set<string | number>[]>>} a record object of names to lists of child ids(?) by chunk id
<ide> */
<ide> getChildIdsByOrdersMap(chunkGraph, includeDirectChildren) {
<ide> const chunkMaps = Object.create(null);
<ide><path>lib/Compilation.js
<ide> const createHash = require("./util/createHash");
<ide> const { arrayToSetDeprecation } = require("./util/deprecation");
<ide>
<ide> /** @typedef {import("webpack-sources").Source} Source */
<add>/** @typedef {import("../declarations/WebpackOptions").OutputOptions} OutputOptions */
<ide> /** @typedef {import("./AsyncDependenciesBlock")} AsyncDependenciesBlock */
<ide> /** @typedef {import("./Compiler")} Compiler */
<ide> /** @typedef {import("./DependenciesBlock")} DependenciesBlock */
<ide> class Compilation {
<ide> * from parent (or top level compiler) and creates a child Compilation
<ide> *
<ide> * @param {string} name name of the child compiler
<del> * @param {TODO} outputOptions // Need to convert config schema to types for this
<add> * @param {OutputOptions} outputOptions // Need to convert config schema to types for this
<ide> * @param {Plugin[]} plugins webpack plugins that will be applied
<ide> * @returns {Compiler} creates a child Compiler instance
<ide> */
<ide><path>lib/EntryPlugin.js
<ide> class EntryPlugin {
<ide> const { entry, name, context } = this;
<ide>
<ide> const dep = EntryPlugin.createDependency(entry, name);
<del> compilation.addEntry(context, dep, name, callback);
<add> compilation.addEntry(context, dep, name, err => {
<add> callback(err);
<add> });
<ide> });
<ide> }
<ide>
<ide><path>lib/PrefetchPlugin.js
<ide> class PrefetchPlugin {
<ide> compilation.addModuleChain(
<ide> this.context || compiler.context,
<ide> new PrefetchDependency(this.request),
<del> callback
<add> err => {
<add> callback(err);
<add> }
<ide> );
<ide> });
<ide> }
<ide> }
<add>
<ide> module.exports = PrefetchPlugin; | 4 |
Java | Java | remove extraneous dimension conversion | dd8ee4ad751309b3adf5f2295c7565c47cd343fb | <ide><path>ReactAndroid/src/main/java/com/facebook/react/views/toolbar/DrawableWithIntrinsicSize.java
<ide> public DrawableWithIntrinsicSize(Drawable drawable, ImageInfo imageInfo) {
<ide>
<ide> @Override
<ide> public int getIntrinsicWidth() {
<del> return (int) PixelUtil.toDIPFromPixel(mImageInfo.getWidth());
<add> return mImageInfo.getWidth();
<ide> }
<ide>
<ide> @Override
<ide> public int getIntrinsicHeight() {
<del> return (int) PixelUtil.toDIPFromPixel(mImageInfo.getHeight());
<add> return mImageInfo.getHeight();
<ide> }
<ide>
<ide> } | 1 |
PHP | PHP | add caches shell | 4ce2da2e486d97c62e0ab7c5268fcc955f506d67 | <ide><path>src/Shell/CachesShell.php
<add><?php
<add>/**
<add> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<add> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> *
<add> * Licensed under The MIT License
<add> * For full copyright and license information, please see the LICENSE.txt
<add> * Redistributions of files must retain the above copyright notice.
<add> *
<add> * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> * @link http://cakephp.org CakePHP(tm) Project
<add> * @since 3.3.0
<add> * @license http://www.opensource.org/licenses/mit-license.php MIT License
<add> */
<add>namespace Cake\Shell;
<add>
<add>use Cake\Cache\Cache;
<add>use Cake\Console\Shell;
<add>use Cake\Core\Configure;
<add>
<add>/**
<add> * Caches Shell.
<add> *
<add> * Provides a CLI interface to clear caches.
<add> * This tool can be used in development or by deployment scripts when changes
<add> * are made that require cached data to be removed.
<add> */
<add>class CachesShell extends Shell
<add>{
<add>
<add> /**
<add> * Get the option parser for this shell.
<add> *
<add> * @return \Cake\Console\ConsoleOptionParser
<add> */
<add> public function getOptionParser()
<add> {
<add> $parser = parent::getOptionParser();
<add> $parser->addSubcommand('list_prefixes', [
<add> 'help' => 'Show a list of all defined cache prefixes.',
<add> ]);
<add> $parser->addSubcommand('clear_all', [
<add> 'help' => 'Clear all caches.',
<add> ]);
<add> $parser->addSubcommand('clear', [
<add> 'help' => 'Clear the cache for a specified prefix.',
<add> 'parser' => [
<add> 'description' => [
<add> 'Clear the cache for a particular prefix.',
<add> 'For example, `cake cache clear _cake_model_` will clear the model cache',
<add> 'Use `cake cache clear list_prefixes` to list available prefixes'
<add> ],
<add> 'arguments' => [
<add> 'prefix' => [
<add> 'help' => 'The cache prefix to be cleared.',
<add> 'required' => true
<add> ]
<add> ]
<add> ]
<add> ]);
<add>
<add> return $parser;
<add> }
<add>
<add> /**
<add> * Clear metadata.
<add> *
<add> * @param string|null $prefix The cache prefix to be cleared.
<add> * @throws Cake\Console\Exception\StopException
<add> * @return void
<add> */
<add> public function clear($prefix = null)
<add> {
<add> try {
<add> Cache::clear(false, $prefix);
<add> } catch (\InvalidArgumentException $e) {
<add> $this->abort($e->getMessage());
<add> }
<add>
<add> $this->out("<success>Cleared $prefix cache</success>");
<add> }
<add>
<add> /**
<add> * Clear metadata.
<add> *
<add> * @return void
<add> */
<add> public function clearAll()
<add> {
<add> if (version_compare(Configure::version(), '3.2.0', '>=')) {
<add> Cache::clearAll(false);
<add> $this->out("<success>Cleared all caches</success>");
<add> } else {
<add> $prefixes = Cache::configured();
<add> foreach ($prefixes as $prefix) {
<add> $this->clear($prefix);
<add> }
<add> }
<add> }
<add>
<add> /**
<add> * Show a list of all defined cache prefixes.
<add> *
<add> * @return void
<add> */
<add> public function listPrefixes()
<add> {
<add> $prefixes = Cache::configured();
<add> foreach ($prefixes as $prefix) {
<add> $this->out($prefix);
<add> }
<add> }
<add>}
<ide><path>tests/TestCase/Shell/CachesShellTest.php
<add><?php
<add>/**
<add> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<add> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> *
<add> * Licensed under The MIT License
<add> * For full copyright and license information, please see the LICENSE.txt
<add> * Redistributions of files must retain the above copyright notice.
<add> *
<add> * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> * @link http://cakephp.org CakePHP(tm) Project
<add> * @since 3.3.0
<add> * @license http://www.opensource.org/licenses/mit-license.php MIT License
<add> */
<add>namespace Cake\Test\TestCase\Shell;
<add>
<add>use Cake\Cache\Cache;
<add>use Cake\Shell\CachesShell;
<add>use Cake\TestSuite\TestCase;
<add>
<add>/**
<add> * CachesShell tests.
<add> */
<add>class CachesShellTest extends TestCase
<add>{
<add>
<add> /**
<add> * setup method
<add> *
<add> * @return void
<add> */
<add> public function setUp()
<add> {
<add> parent::setUp();
<add> $this->io = $this->getMock('Cake\Console\ConsoleIo');
<add> $this->shell = new CachesShell($this->io);
<add> Cache::config('test', ['engine' => 'File', 'path' => TMP]);
<add> }
<add>
<add> /**
<add> * Teardown
<add> *
<add> * @return void
<add> */
<add> public function tearDown()
<add> {
<add> parent::tearDown();
<add> unset($this->io);
<add> unset($this->shell);
<add> Cache::drop('test');
<add> }
<add>
<add> /**
<add> * Test that getOptionParser() returns an instance of \Cake\Console\ConsoleOptionParser
<add> *
<add> * @return void
<add> */
<add> public function testGetOptionParser()
<add> {
<add> $this->assertInstanceOf('Cake\Console\ConsoleOptionParser', $this->shell->getOptionParser());
<add> }
<add>
<add> /**
<add> * Test that clear() throws \Cake\Console\Exception\StopException if cache prefix is invalid
<add> *
<add> * @return void
<add> */
<add> public function testClearInvalidPrefix()
<add> {
<add> $this->setExpectedException('Cake\Console\Exception\StopException');
<add> $this->shell->clear('foo');
<add> }
<add>
<add> /**
<add> * Test that clear() clears the specified cache when a valid prefix is used
<add> *
<add> * @return void
<add> */
<add> public function testClearValidPrefix()
<add> {
<add> Cache::add('key', 'value', 'test');
<add> $this->shell->clear('test');
<add> $this->assertFalse(Cache::read('key', 'test'));
<add> }
<add>
<add> /**
<add> * Test that clear() only clears the specified cache
<add> *
<add> * @return void
<add> */
<add> public function testClearIgnoresOtherCaches()
<add> {
<add> Cache::add('key', 'value', 'test');
<add> $this->shell->clear('_cake_core_');
<add> $this->assertEquals('value', Cache::read('key', 'test'));
<add> }
<add>
<add> /**
<add> * Test that clearAll() clears values from all defined caches
<add> *
<add> * @return void
<add> */
<add> public function testClearAll()
<add> {
<add> Cache::add('key', 'value1', 'test');
<add> Cache::add('key', 'value3', '_cake_core_');
<add> $this->shell->clearAll();
<add> $this->assertFalse(Cache::read('key', 'test'));
<add> $this->assertFalse(Cache::read('key', '_cake_core_'));
<add> }
<add>}
<ide><path>tests/TestCase/Shell/CommandListShellTest.php
<ide> public function testMain()
<ide> $expected = "/\[.*TestPluginTwo.*\] example, unique, welcome/";
<ide> $this->assertRegExp($expected, $output);
<ide>
<del> $expected = "/\[.*CORE.*\] i18n, orm_cache, plugin, routes, server/";
<add> $expected = "/\[.*CORE.*\] caches, i18n, orm_cache, plugin, routes, server/";
<ide> $this->assertRegExp($expected, $output);
<ide>
<ide> $expected = "/\[.*app.*\] i18m, sample/";
<ide> public function testMainAppPriority()
<ide> $output = implode("\n", $output);
<ide> rename(APP . 'Shell' . DS . 'I18nShell.php', APP . 'Shell' . DS . 'I18mShell.php');
<ide>
<del> $expected = "/\[.*CORE.*\] orm_cache, plugin, routes, server/";
<add> $expected = "/\[.*CORE.*\] caches, orm_cache, plugin, routes, server/";
<ide> $this->assertRegExp($expected, $output);
<ide>
<ide> $expected = "/\[.*app.*\] i18n, sample/";
<ide><path>tests/TestCase/Shell/CompletionShellTest.php
<ide> public function testCommands()
<ide> $output = $this->out->output;
<ide>
<ide> $expected = "TestPlugin.example TestPlugin.sample TestPluginTwo.example unique welcome " .
<del> "i18n orm_cache plugin routes server i18m sample testing_dispatch\n";
<add> "caches i18n orm_cache plugin routes server i18m sample testing_dispatch\n";
<ide> $this->assertTextEquals($expected, $output);
<ide> }
<ide> | 4 |
Javascript | Javascript | fix typo in error message | 973c223429279286b8441f20807a770920d58bcb | <ide><path>lib/internal/process/report.js
<ide> exports.setup = function() {
<ide> if (err == null) {
<ide> return nr.getReport(new ERR_SYNTHETIC().stack);
<ide> } else if (typeof err !== 'object') {
<del> throw new ERR_INVALID_ARG_TYPE('err', 'Objct', err);
<add> throw new ERR_INVALID_ARG_TYPE('err', 'Object', err);
<ide> } else {
<ide> return nr.getReport(err.stack);
<ide> } | 1 |
Text | Text | add dependency check to read me | b626dabaa85910a5f4d5a3a8ccc068d17ff2d0f3 | <ide><path>readme.md
<ide> ## Laravel Framework (Core)
<ide>
<del>[](https://packagist.org/packages/laravel/framework) [](https://packagist.org/packages/laravel/framework) [](https://travis-ci.org/laravel/framework)
<add>[](https://packagist.org/packages/laravel/framework) [](https://packagist.org/packages/laravel/framework) [](https://travis-ci.org/laravel/framework) [](https://www.versioneye.com/php/laravel:framework)
<ide>
<ide> This repository contains the core code of the Laravel framework. If you want to build an application using Laravel 4, visit the main [Laravel repository](https://github.com/laravel/laravel).
<ide>\ No newline at end of file | 1 |
Python | Python | reorganize core/setup.py a bit | 39a420ca5a42b7450f2ff242cd7f6c78f3832b72 | <ide><path>numpy/core/setup.py
<ide> def generate_api(ext, build_dir):
<ide> config.add_include_dirs(join(local_dir, "src"))
<ide> config.add_include_dirs(join(local_dir))
<ide>
<del> # Multiarray version: this function is needed to build foo.c from foo.c.src
<del> # when foo.c is included in another file and as such not in the src
<del> # argument of build_ext command
<del> def generate_multiarray_templated_sources(ext, build_dir):
<del> from numpy.distutils.misc_util import get_cmd
<del>
<del> subpath = join('src', 'multiarray')
<del> sources = [join(local_dir, subpath, 'scalartypes.c.src'),
<del> join(local_dir, subpath, 'arraytypes.c.src'),
<del> join(local_dir, subpath, 'nditer.c.src'),
<del> join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
<del> join(local_dir, subpath, 'einsum.c.src')]
<del>
<del> # numpy.distutils generate .c from .c.src in weird directories, we have
<del> # to add them there as they depend on the build_dir
<del> config.add_include_dirs(join(build_dir, subpath))
<del> cmd = get_cmd('build_src')
<del> cmd.ensure_finalized()
<del> cmd.template_sources(sources, ext)
<del>
<del> # umath version: this function is needed to build foo.c from foo.c.src
<del> # when foo.c is included in another file and as such not in the src
<del> # argument of build_ext command
<del> def generate_umath_templated_sources(ext, build_dir):
<del> from numpy.distutils.misc_util import get_cmd
<del>
<del> subpath = join('src', 'umath')
<del> # NOTE: For manual template conversion of loops.h.src, read the note
<del> # in that file.
<del> sources = [join(local_dir, subpath, 'loops.c.src'),
<del> join(local_dir, subpath, 'umathmodule.c.src')]
<del>
<del> # numpy.distutils generate .c from .c.src in weird directories, we have
<del> # to add them there as they depend on the build_dir
<del> config.add_include_dirs(join(build_dir, subpath))
<del> cmd = get_cmd('build_src')
<del> cmd.ensure_finalized()
<del> cmd.template_sources(sources, ext)
<del>
<del>
<del> def generate_umath_c(ext, build_dir):
<del> target = join(build_dir,header_dir,'__umath_generated.c')
<del> dir = os.path.dirname(target)
<del> if not os.path.exists(dir):
<del> os.makedirs(dir)
<del> script = generate_umath_py
<del> if newer(script,target):
<del> f = open(target,'w')
<del> f.write(generate_umath.make_code(generate_umath.defdict,
<del> generate_umath.__file__))
<del> f.close()
<del> return []
<del>
<ide> config.add_data_files('include/numpy/*.h')
<ide> config.add_include_dirs(join('src', 'npymath'))
<ide> config.add_include_dirs(join('src', 'multiarray'))
<ide> def generate_umath_c(ext, build_dir):
<ide> if sys.platform == 'cygwin':
<ide> config.add_data_dir('include/numpy/fenv')
<ide>
<add> #######################################################################
<add> # npymath library #
<add> #######################################################################
<add>
<ide> # npymath needs the config.h and numpyconfig.h files to be generated, but
<ide> # build_clib cannot handle generate_config_h and generate_numpyconfig_h
<ide> # (don't ask). Because clib are generated before extensions, we have to
<ide> def get_mathlib_info(*args):
<ide> config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
<ide> subst_dict)
<ide>
<add> #######################################################################
<add> # sort module #
<add> #######################################################################
<add>
<add> # _sort version: this function is needed to build foo.c from foo.c.src
<add> # when foo.c is included in another file and as such not in the src
<add> # argument of build_ext command
<add> def generate_sort_templated_sources(ext, build_dir):
<add> from numpy.distutils.misc_util import get_cmd
<add>
<add> subpath = join('src', 'npysort')
<add> sources = [join(local_dir, subpath, 'sort.c.src')]
<add>
<add> # numpy.distutils generate .c from .c.src in weird directories, we have
<add> # to add them there as they depend on the build_dir
<add> config.add_include_dirs(join(build_dir, subpath))
<add> cmd = get_cmd('build_src')
<add> cmd.ensure_finalized()
<add> cmd.template_sources(sources, ext)
<add>
<add> sort_deps = [
<add> join('src', 'npysort', 'npy_sort_common.h'),
<add> join('src', 'npysort', 'sort.c.src'),
<add> join('src', 'npysort', 'sortmodule.c')]
<add>
<add> sort_src = [
<add> join('src', 'npysort', 'sortmodule.c'),
<add> generate_sort_templated_sources,
<add> generate_config_h,
<add> generate_numpyconfig_h,
<add> generate_numpy_api]
<add>
<add> config.add_extension('_sort',
<add> sources = sort_src,
<add> depends = deps + sort_deps,
<add> libraries = ['npymath'])
<add>
<add> #######################################################################
<add> # multiarray module #
<add> #######################################################################
<add>
<add> # Multiarray version: this function is needed to build foo.c from foo.c.src
<add> # when foo.c is included in another file and as such not in the src
<add> # argument of build_ext command
<add> def generate_multiarray_templated_sources(ext, build_dir):
<add> from numpy.distutils.misc_util import get_cmd
<add>
<add> subpath = join('src', 'multiarray')
<add> sources = [join(local_dir, subpath, 'scalartypes.c.src'),
<add> join(local_dir, subpath, 'arraytypes.c.src'),
<add> join(local_dir, subpath, 'nditer.c.src'),
<add> join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
<add> join(local_dir, subpath, 'einsum.c.src')]
<add>
<add> # numpy.distutils generate .c from .c.src in weird directories, we have
<add> # to add them there as they depend on the build_dir
<add> config.add_include_dirs(join(build_dir, subpath))
<add> cmd = get_cmd('build_src')
<add> cmd.ensure_finalized()
<add> cmd.template_sources(sources, ext)
<add>
<ide> multiarray_deps = [
<ide> join('src', 'multiarray', 'arrayobject.h'),
<ide> join('src', 'multiarray', 'arraytypes.h'),
<ide> def get_mathlib_info(*args):
<ide> if PYTHON_HAS_UNICODE_WIDE:
<ide> multiarray_src.append(join('src', 'multiarray', 'ucsnarrow.c'))
<ide>
<del> umath_src = [
<del> join('src', 'umath', 'umathmodule.c.src'),
<del> join('src', 'umath', 'funcs.inc.src'),
<del> join('src', 'umath', 'loops.c.src'),
<del> join('src', 'umath', 'ufunc_object.c')]
<add> if not ENABLE_SEPARATE_COMPILATION:
<add> multiarray_deps.extend(multiarray_src)
<add> multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
<add> multiarray_src.append(generate_multiarray_templated_sources)
<ide>
<del> umath_deps = [
<del> generate_umath_py,
<del> join(codegen_dir,'generate_ufunc_api.py')]
<add> config.add_extension('multiarray',
<add> sources = multiarray_src +
<add> [generate_config_h,
<add> generate_numpyconfig_h,
<add> generate_numpy_api,
<add> join(codegen_dir,'generate_numpy_api.py'),
<add> join('*.py')],
<add> depends = deps + multiarray_deps,
<add> libraries = ['npymath'])
<ide>
<ide> #######################################################################
<del> # Sort Module #
<add> # umath module #
<ide> #######################################################################
<ide>
<del> # _sort version: this function is needed to build foo.c from foo.c.src
<add> # umath version: this function is needed to build foo.c from foo.c.src
<ide> # when foo.c is included in another file and as such not in the src
<ide> # argument of build_ext command
<del> def generate_sort_templated_sources(ext, build_dir):
<add> def generate_umath_templated_sources(ext, build_dir):
<ide> from numpy.distutils.misc_util import get_cmd
<ide>
<del> subpath = join('src', 'npysort')
<del> sources = [join(local_dir, subpath, 'sort.c.src')]
<add> subpath = join('src', 'umath')
<add> # NOTE: For manual template conversion of loops.h.src, read the note
<add> # in that file.
<add> sources = [join(local_dir, subpath, 'loops.c.src'),
<add> join(local_dir, subpath, 'umathmodule.c.src')]
<ide>
<ide> # numpy.distutils generate .c from .c.src in weird directories, we have
<ide> # to add them there as they depend on the build_dir
<ide> def generate_sort_templated_sources(ext, build_dir):
<ide> cmd.ensure_finalized()
<ide> cmd.template_sources(sources, ext)
<ide>
<del> sort_deps = [
<del> join('src', 'npysort', 'npy_sort_common.h'),
<del> join('src', 'npysort', 'sort.c.src'),
<del> join('src', 'npysort', 'sortmodule.c')]
<ide>
<del> sort_src = [
<del> join('src', 'npysort', 'sortmodule.c'),
<del> generate_sort_templated_sources,
<del> generate_config_h,
<del> generate_numpyconfig_h,
<del> generate_numpy_api]
<add> def generate_umath_c(ext, build_dir):
<add> target = join(build_dir,header_dir,'__umath_generated.c')
<add> dir = os.path.dirname(target)
<add> if not os.path.exists(dir):
<add> os.makedirs(dir)
<add> script = generate_umath_py
<add> if newer(script,target):
<add> f = open(target,'w')
<add> f.write(generate_umath.make_code(generate_umath.defdict,
<add> generate_umath.__file__))
<add> f.close()
<add> return []
<ide>
<del> config.add_extension('_sort',
<del> sources = sort_src,
<del> depends = deps + sort_deps,
<del> libraries=['npymath'])
<del>#
<del> if not ENABLE_SEPARATE_COMPILATION:
<del> multiarray_deps.extend(multiarray_src)
<del> multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
<del> multiarray_src.append(generate_multiarray_templated_sources)
<add> umath_src = [
<add> join('src', 'umath', 'umathmodule.c.src'),
<add> join('src', 'umath', 'funcs.inc.src'),
<add> join('src', 'umath', 'loops.c.src'),
<add> join('src', 'umath', 'ufunc_object.c')]
<ide>
<add> umath_deps = [
<add> generate_umath_py,
<add> join(codegen_dir,'generate_ufunc_api.py')]
<add>
<add> if not ENABLE_SEPARATE_COMPILATION:
<ide> umath_deps.extend(umath_src)
<ide> umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
<ide> umath_src.append(generate_umath_templated_sources)
<ide> umath_src.append(join('src', 'umath', 'funcs.inc.src'))
<ide>
<del> config.add_extension('multiarray',
<del> sources = multiarray_src +
<del> [generate_config_h,
<del> generate_numpyconfig_h,
<del> generate_numpy_api,
<del> join(codegen_dir,'generate_numpy_api.py'),
<del> join('*.py')],
<del> depends = deps + multiarray_deps,
<del> libraries=['npymath'])
<del>
<ide> config.add_extension('umath',
<ide> sources = umath_src +
<ide> [generate_config_h,
<ide> generate_numpyconfig_h,
<ide> generate_umath_c,
<ide> generate_ufunc_api],
<ide> depends = deps + umath_deps,
<del> libraries=['npymath'],
<add> libraries = ['npymath'],
<ide> )
<ide>
<add> #######################################################################
<add> # scalarmath module #
<add> #######################################################################
<add>
<ide> config.add_extension('scalarmath',
<del> sources=[join('src','scalarmathmodule.c.src'),
<add> sources = [join('src','scalarmathmodule.c.src'),
<ide> generate_config_h,
<ide> generate_numpyconfig_h,
<ide> generate_numpy_api,
<ide> generate_ufunc_api],
<del> libraries=['npymath'],
<add> depends = deps,
<add> libraries = ['npymath'],
<ide> )
<ide>
<add> #######################################################################
<add> # _dotblas module #
<add> #######################################################################
<add>
<ide> # Configure blasdot
<ide> blas_info = get_info('blas_opt',0)
<ide> #blas_info = {}
<ide> def get_dotblas_sources(ext, build_dir):
<ide>
<ide> config.add_extension('_dotblas',
<ide> sources = [get_dotblas_sources],
<del> depends=[join('blasdot','_dotblas.c'),
<add> depends = [join('blasdot','_dotblas.c'),
<ide> join('blasdot','cblas.h'),
<ide> ],
<ide> include_dirs = ['blasdot'],
<ide> extra_info = blas_info
<ide> )
<ide>
<add> #######################################################################
<add> # umath_tests module #
<add> #######################################################################
<add>
<ide> config.add_extension('umath_tests',
<ide> sources = [join('src','umath', 'umath_tests.c.src')])
<ide>
<add> #######################################################################
<add> # multiarray_tests module #
<add> #######################################################################
<add>
<ide> config.add_extension('multiarray_tests',
<ide> sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
<ide> | 1 |
Go | Go | modify the minor typo in deviceset.go | 5c1559a754f7b1fba45be01a6f1a4b3c7c6c4c68 | <ide><path>daemon/graphdriver/devmapper/deviceset.go
<ide> func (devices *DeviceSet) unregisterDevice(id int, hash string) error {
<ide> devices.devicesLock.Unlock()
<ide>
<ide> if err := devices.removeMetadata(info); err != nil {
<del> log.Debugf("Error removing meta data: %s", err)
<add> log.Debugf("Error removing metadata: %s", err)
<ide> return err
<ide> }
<ide>
<ide> func (devices *DeviceSet) rollbackTransaction() error {
<ide>
<ide> dinfo := &DevInfo{Hash: devices.DeviceIdHash}
<ide> if err := devices.removeMetadata(dinfo); err != nil {
<del> log.Errorf("Warning: Unable to remove meta data: %s", err)
<add> log.Errorf("Warning: Unable to remove metadata: %s", err)
<ide> } else {
<ide> devices.markDeviceIdFree(devices.DeviceId)
<ide> }
<ide> func (devices *DeviceSet) openTransaction(hash string, DeviceId int) error {
<ide> devices.DeviceIdHash = hash
<ide> devices.DeviceId = DeviceId
<ide> if err := devices.saveTransactionMetaData(); err != nil {
<del> return fmt.Errorf("Error saving transaction meta data: %s", err)
<add> return fmt.Errorf("Error saving transaction metadata: %s", err)
<ide> }
<ide> return nil
<ide> }
<ide>
<ide> func (devices *DeviceSet) refreshTransaction(DeviceId int) error {
<ide> devices.DeviceId = DeviceId
<ide> if err := devices.saveTransactionMetaData(); err != nil {
<del> return fmt.Errorf("Error saving transaction meta data: %s", err)
<add> return fmt.Errorf("Error saving transaction metadata: %s", err)
<ide> }
<ide> return nil
<ide> } | 1 |
Text | Text | remove duplicate the from onboarding.md | b7c0de5eb83f70c44ed19f3320b360eeec64ce64 | <ide><path>doc/onboarding.md
<ide> onboarding session.
<ide> (especially if it just has nits left).
<ide> * Approving a change
<ide> * Collaborators indicate that they have reviewed and approve of the
<del> the changes in a pull request using Github’s approval interface
<add> changes in a pull request using Github’s approval interface
<ide> * Some people like to comment `LGTM` (“Looks Good To Me”)
<ide> * You have the authority to approve any other collaborator’s work.
<ide> * You cannot approve your own pull requests. | 1 |
Ruby | Ruby | omit empty caveats | fca07369ab5c81d3abaa77841b9403a73efa2f78 | <ide><path>Library/Homebrew/cmd/info.rb
<ide> def info_formula f
<ide> end
<ide> end
<ide>
<del> if f.caveats
<add> the_caveats = (f.caveats || "").strip
<add> unless the_caveats.empty?
<ide> puts
<ide> puts f.caveats
<ide> puts
<ide><path>Library/Homebrew/formula_installer.rb
<ide> def install_dependency dep
<ide> end
<ide>
<ide> def caveats
<del> if f.caveats
<add> the_caveats = (f.caveats || "").strip
<add> unless the_caveats.empty?
<ide> ohai "Caveats", f.caveats
<ide> @show_summary_heading = true
<ide> end
<add>
<ide> if f.keg_only?
<ide> ohai 'Caveats', f.keg_only_text
<ide> @show_summary_heading = true | 2 |
Text | Text | move usage and example to same header level | fe4d53df512edcd046f002b1b78c47a687f0fad1 | <ide><path>doc/api/synopsis.md
<del># Usage
<add># Usage & Example
<add>
<add>## Usage
<ide>
<ide> <!--introduced_in=v0.10.0-->
<ide> <!--type=misc--> | 1 |
Javascript | Javascript | fix a bunch of problems with implicit autobinding | a61f4df0b9b23250a5a59285aad82cf7d82a19b8 | <ide><path>src/core/ReactCompositeComponent.js
<ide> var keyMirror = require('keyMirror');
<ide> var merge = require('merge');
<ide> var mixInto = require('mixInto');
<ide>
<del>var invokedBeforeMount = function() {
<del> invariant(
<del> false,
<del> 'You have invoked a method that is automatically bound, before the ' +
<del> 'instance has been mounted. There is nothing conceptually wrong with ' +
<del> 'this, but since this method will be replaced with a new version once ' +
<del> 'the component is mounted - you should be aware of the fact that this ' +
<del> 'method will soon be replaced.'
<del> );
<del>};
<add>function invokeWithWarning(func) {
<add> return function() {
<add> console && console.warn && console.warn(
<add> 'You have invoked a method that is automatically bound, before the ' +
<add> 'instance has been mounted. There is nothing conceptually wrong with ' +
<add> 'this, but since this method will be replaced with a new version once' +
<add> ' the component is mounted - you should be aware of the fact that ' +
<add> 'this method will soon be replaced.'
<add> );
<add> return func.apply(this, arguments);
<add> };
<add>}
<ide>
<ide> /**
<ide> * Policies that describe methods in `ReactCompositeComponentInterface`.
<ide> function mixSpecIntoComponent(Constructor, spec) {
<ide> proto.__reactAutoBindMap = {};
<ide> }
<ide> proto.__reactAutoBindMap[name] = property;
<del> proto[name] = invokedBeforeMount;
<add> proto[name] = property;
<add> if (__DEV__) {
<add> proto[name] = invokeWithWarning(property);
<add> }
<ide> } else {
<ide> if (isInherited) {
<ide> // For methods which are defined more than once, call the existing
<ide> function mixSpecIntoComponent(Constructor, spec) {
<ide> * @private
<ide> */
<ide> function createChainedFunction(one, two) {
<del> return function chainedFunction(a, b, c, d, e, tooMany) {
<add> return function chainedFunction(a, b, c, d, e, f, g, tooMany) {
<ide> invariant(
<ide> typeof tooMany === 'undefined',
<del> 'Chained function can only take a maximum of 5 arguments.'
<add> 'Chained function can only take a maximum of 7 arguments.'
<ide> );
<del> one.call(this, a, b, c, d, e);
<del> two.call(this, a, b, c, d, e);
<add> one.call(this, a, b, c, d, e, f, g);
<add> two.call(this, a, b, c, d, e, f, g);
<ide> };
<ide> }
<ide>
<ide> var ReactCompositeComponentMixin = {
<ide> */
<ide> _bindAutoBindMethod: function(method) {
<ide> var component = this;
<del> function autoBound(a, b, c, d, e, tooMany) {
<add> function autoBound(a, b, c, d, e, f, g, tooMany) {
<ide> invariant(
<ide> typeof tooMany === 'undefined',
<del> 'React.autoBind(...): Methods can only take a maximum of 5 arguments.'
<add> 'React.autoBind(...): Methods can only take a maximum of 7 arguments.'
<ide> );
<del> return method.call(component, a, b, c, d, e);
<add> return method.call(component, a, b, c, d, e, f, g);
<ide> }
<ide> return autoBound;
<ide> }
<ide><path>src/core/__tests__/ReactCompositeComponent-test.js
<ide> describe('ReactCompositeComponent', function() {
<ide> });
<ide> var instance = <ComponentClass />;
<ide>
<del> // Autobound methods will throw before mounting.
<del> // TODO: We should actually allow component instance methods to be invoked
<del> // before mounting for read-only operations. We would then update this test.
<del> expect(function() {
<del> instance.methodToBeExplicitlyBound.bind(instance)();
<del> }).toThrow();
<del> expect(function() {
<del> instance.methodAutoBound();
<del> }).toThrow();
<ide> expect(function() {
<ide> instance.methodExplicitlyNotBound();
<ide> }).not.toThrow(); | 2 |
Java | Java | fix checkstyle violation | e124cbb310721041731421c41030e6a1d2d06286 | <ide><path>spring-expression/src/test/java/org/springframework/expression/spel/MethodInvocationTests.java
<ide>
<ide> import org.springframework.core.convert.TypeDescriptor;
<ide> import org.springframework.expression.AccessException;
<del>import org.springframework.expression.BeanResolver;
<ide> import org.springframework.expression.EvaluationContext;
<ide> import org.springframework.expression.Expression;
<ide> import org.springframework.expression.ExpressionInvocationTargetException; | 1 |
PHP | PHP | apply fixes from styleci | 88c93eb59568d228736fa96b52bac51f1c2116cc | <ide><path>tests/Mail/MailManagerTest.php
<ide> public function emptyTransportConfigDataProvider()
<ide> public function testForgetMailer()
<ide> {
<ide> $this->app['config']->set('mail.mailers.custom_smtp', [
<del> 'transport' => "smtp",
<del> 'host' => "example.com",
<del> 'port' => "25",
<del> 'encryption' => "tls",
<del> 'username' => "username",
<del> 'password' => "password",
<add> 'transport' => 'smtp',
<add> 'host' => 'example.com',
<add> 'port' => '25',
<add> 'encryption' => 'tls',
<add> 'username' => 'username',
<add> 'password' => 'password',
<ide> 'timeout' => 10,
<ide> ]);
<ide>
<ide> /** @var MailManager $mailManager */
<ide> $mailManager = $this->app['mail.manager'];
<del> $mailManager->mailer("custom_smtp");
<add> $mailManager->mailer('custom_smtp');
<ide>
<ide> $mailersProperty = new \ReflectionProperty($mailManager, 'mailers');
<ide> $mailersProperty->setAccessible(true);
<ide>
<del> $this->assertArrayHasKey("custom_smtp", $mailersProperty->getValue($mailManager), "Mailer must exist in the \$mailers-property");
<add> $this->assertArrayHasKey('custom_smtp', $mailersProperty->getValue($mailManager), 'Mailer must exist in the $mailers-property');
<ide>
<del> $mailManager->forgetMailer("custom_smtp");
<add> $mailManager->forgetMailer('custom_smtp');
<ide>
<del> $this->assertArrayNotHasKey("custom_smtp", $mailersProperty->getValue($mailManager), "Mailer must not exist in the \$mailers-property as it must have been removed with MailManager::forgetMailer()");
<add> $this->assertArrayNotHasKey('custom_smtp', $mailersProperty->getValue($mailManager), 'Mailer must not exist in the $mailers-property as it must have been removed with MailManager::forgetMailer()');
<ide> }
<ide> } | 1 |
Mixed | Text | fix a minor mistake in the generator and docs | 336ecb8999d9c8206e242abcd90dd9465f20b21f | <ide><path>docs/Operator-Matrix.md
<ide> Operator | `Flowable` | `Observable` | `Maybe` | `Single` | `Completable` |
<ide> `zip`||||||
<ide> `zipArray`||||||
<ide> `zipWith`||||||
<del>esent](https://raw.github.com/wiki/ReactiveX/RxJava/images/checkmark_on.png)|||||
<del>`zipWith`||||||
<ide><path>docs/_Sidebar.md.md
<ide> * [Transformation](https://github.com/ReactiveX/RxJava/wiki/Transforming-Observables)
<ide> * [Utility](https://github.com/ReactiveX/RxJava/wiki/Observable-Utility-Operators)
<ide> * [Notable 3rd party Operators (Alphabetical List)](https://github.com/ReactiveX/RxJava/wiki/Alphabetical-List-of-3rd-party-Operators)
<add> * [Operator matrix](https://github.com/ReactiveX/RxJava/wiki/Operator-Matrix)
<ide> * [Plugins](https://github.com/ReactiveX/RxJava/wiki/Plugins)
<ide> * [How to Contribute](https://github.com/ReactiveX/RxJava/wiki/How-to-Contribute)
<ide> * [Writing operators](https://github.com/ReactiveX/RxJava/wiki/Writing-operators-for-2.0)
<ide><path>src/test/java/io/reactivex/rxjava3/internal/util/OperatorMatrixGenerator.java
<ide> public static void main(String[] args) throws IOException {
<ide> List<String> sortedOperators = new ArrayList<>(operatorSet);
<ide> sortedOperators.sort(Comparator.naturalOrder());
<ide>
<del> try (PrintWriter out = new PrintWriter(Files.newBufferedWriter(Paths.get("docs", "Operator-Matrix.md"), StandardOpenOption.CREATE))) {
<add> try (PrintWriter out = new PrintWriter(Files.newBufferedWriter(Paths.get("docs", "Operator-Matrix.md"), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING))) {
<ide> out.print("Operator |");
<ide> for (Class<?> clazz : CLASSES) {
<ide> out.print(" `"); | 3 |
Javascript | Javascript | expand http2 frameerror test case | 15879adf1dab3d67839fd90cfee51c7181b01e30 | <ide><path>test/parallel/test-http2-options-max-headers-block-length.js
<ide> server.listen(0);
<ide> server.on('listening', common.mustCall(() => {
<ide>
<ide> // Setting the maxSendHeaderBlockLength, then attempting to send a
<del> // headers block that is too big should cause a 'meError' to
<add> // headers block that is too big should cause a 'frameError' to
<ide> // be emitted, and will cause the stream to be shutdown.
<ide> const options = {
<ide> maxSendHeaderBlockLength: 10
<ide> server.on('listening', common.mustCall(() => {
<ide>
<ide> req.resume();
<ide> req.on('end', common.mustCall(() => {
<del> server.close();
<ide> client.destroy();
<ide> }));
<ide>
<ide> req.on('frameError', common.mustCall((type, code) => {
<ide> assert.strictEqual(code, h2.constants.NGHTTP2_ERR_FRAME_SIZE_ERROR);
<ide> }));
<ide>
<del> req.on('error', common.mustCall(common.expectsError({
<add> req.on('error', common.expectsError({
<ide> code: 'ERR_HTTP2_STREAM_ERROR',
<ide> type: Error,
<ide> message: 'Stream closed with error code 7'
<del> })));
<add> }));
<ide>
<ide> req.end();
<ide>
<add> // if no frameError listener, should emit 'error' with
<add> // code ERR_HTTP2_FRAME_ERROR
<add> const req2 = client.request({ ':path': '/' });
<add>
<add> req2.on('response', common.mustNotCall());
<add>
<add> req2.resume();
<add> req2.on('end', common.mustCall(() => {
<add> server.close();
<add> client.destroy();
<add> }));
<add>
<add> req2.once('error', common.mustCall((err) => {
<add> common.expectsError({
<add> code: 'ERR_HTTP2_FRAME_ERROR',
<add> type: Error
<add> })(err);
<add> req2.on('error', common.expectsError({
<add> code: 'ERR_HTTP2_STREAM_ERROR',
<add> type: Error,
<add> message: 'Stream closed with error code 7'
<add> }));
<add> }));
<add>
<add> req2.end();
<add>
<ide> })); | 1 |
Text | Text | fix example threejs-prerequisites.md | cdb5482850b4dc7dcbfffc3c541a046959d566bd | <ide><path>threejs/lessons/threejs-prerequisites.md
<ide> Destructuring works with arrays too. Assume an array `const position = [1, 2, 3,
<ide> old code
<ide>
<ide> ```js
<del>const x = position[2];
<del>const y = position[1];
<add>const x = position[2]; // 3
<add>const y = position[1]; // 2
<ide> ```
<ide>
<ide> new code
<ide>
<ide> ```js
<del>const [x, y] = position;
<add>const [, y, x] = position; // x = 3, y = 2
<ide> ```
<ide>
<ide> Destructuring also works in function arguments | 1 |
Ruby | Ruby | detect download strategy from url | edd1c76d409e224e329131bbd635d944cdf6fe1f | <ide><path>Library/Homebrew/dev-cmd/bump-formula-pr.rb
<ide> def bump_formula_pr
<ide> new_url.sub "mirrors.ocf.berkeley.edu/debian", "mirrorservice.org/sites/ftp.debian.org/debian"
<ide> end
<ide> resource = Resource.new { @url = new_url }
<del> resource.download_strategy = CurlDownloadStrategy
<add> resource.download_strategy = DownloadStrategyDetector.detect_from_url(new_url)
<ide> resource.owner = Resource.new(formula.name)
<ide> resource.version = forced_version if forced_version
<ide> odie "No --version= argument specified!" unless resource.version | 1 |
Text | Text | add description for the listener argument | 8850ef276ad215b7449cee713bc6096d067c33f7 | <ide><path>doc/api/http2.md
<ide> changes:
<ide> instance passed to `connect` and the `options` object, and returns any
<ide> [`Duplex`][] stream that is to be used as the connection for this session.
<ide> * ...: Any [`net.connect()`][] or [`tls.connect()`][] options can be provided.
<del>* `listener` {Function}
<add>* `listener` {Function} Will be registered as a one-time listener of the
<add> [`'connect'`][] event.
<ide> * Returns: {ClientHttp2Session}
<ide>
<ide> Returns a `ClientHttp2Session` instance.
<ide> following additional properties:
<ide> [Stream]: stream.html#stream_stream
<ide> [Using `options.selectPadding()`]: #http2_using_options_selectpadding
<ide> [`'checkContinue'`]: #http2_event_checkcontinue
<add>[`'connect'`]: #http2_event_connect
<ide> [`'request'`]: #http2_event_request
<ide> [`'unknownProtocol'`]: #http2_event_unknownprotocol
<ide> [`ClientHttp2Stream`]: #http2_class_clienthttp2stream | 1 |
Java | Java | avoid stacktrace for invalid origin header values | 9c66dfa7b5f610ddeb69b9dc10baa227fbddcbf1 | <ide><path>spring-web/src/main/java/org/springframework/web/util/UriComponentsBuilder.java
<ide> public static UriComponentsBuilder fromHttpRequest(HttpRequest request) {
<ide>
<ide> /**
<ide> * Create an instance by parsing the "origin" header of an HTTP request.
<add> * @see <a href="https://tools.ietf.org/html/rfc6454">RFC 6454</a>
<ide> */
<ide> public static UriComponentsBuilder fromOriginHeader(String origin) {
<ide> UriComponentsBuilder builder = UriComponentsBuilder.newInstance();
<ide> public static UriComponentsBuilder fromOriginHeader(String origin) {
<ide> String schema = (schemaIdx != -1 ? origin.substring(0, schemaIdx) : "http");
<ide> builder.scheme(schema);
<ide> String hostString = (schemaIdx != -1 ? origin.substring(schemaIdx + 3) : origin);
<add> // Handling of invalid origins as described in SPR-13478
<add> int firstSlashIdx = hostString.indexOf("/");
<add> if (firstSlashIdx != -1) {
<add> hostString = hostString.substring(0, firstSlashIdx);
<add> }
<ide> if (hostString.contains(":")) {
<ide> String[] hostAndPort = StringUtils.split(hostString, ":");
<ide> builder.host(hostAndPort[0]);
<ide><path>spring-web/src/test/java/org/springframework/web/util/WebUtilsTests.java
<ide> public void isSameOrigin() {
<ide> assertFalse(checkSameOrigin("mydomain1.com", -1, "http://mydomain2.com"));
<ide> assertFalse(checkSameOrigin("mydomain1.com", -1, "https://mydomain1.com"));
<ide> assertFalse(checkSameOrigin("mydomain1.com", -1, "invalid-origin"));
<add>
<add> // Handling of invalid origins as described in SPR-13478
<add> assertTrue(checkSameOrigin("mydomain1.com", -1, "http://mydomain1.com/"));
<add> assertTrue(checkSameOrigin("mydomain1.com", -1, "http://mydomain1.com:80/"));
<add> assertTrue(checkSameOrigin("mydomain1.com", -1, "http://mydomain1.com/path"));
<add> assertTrue(checkSameOrigin("mydomain1.com", -1, "http://mydomain1.com:80/path"));
<add> assertFalse(checkSameOrigin("mydomain2.com", -1, "http://mydomain1.com/"));
<add> assertFalse(checkSameOrigin("mydomain2.com", -1, "http://mydomain1.com:80/"));
<add> assertFalse(checkSameOrigin("mydomain2.com", -1, "http://mydomain1.com/path"));
<add> assertFalse(checkSameOrigin("mydomain2.com", -1, "http://mydomain1.com:80/path"));
<ide> }
<ide>
<ide> | 2 |
Mixed | Ruby | add days_in_year method | 2f4f4d2cf1e4c5a442459fc250daf66186d110fa | <ide><path>activesupport/CHANGELOG.md
<ide>
<ide> To opt-in load the [listen](https://github.com/guard/listen) gem in `Gemfile`:
<ide>
<del> group :development do
<del> gem 'listen', '~> 3.0.4'
<del> end
<add> group :development do
<add> gem 'listen', '~> 3.0.4'
<add> end
<ide>
<ide> *Puneet Agarwal* and *Xavier Noria*
<ide>
<add>* Added `Time#days_in_year` to return the number of days in the given year, or the
<add> current year if no argument is provided.
<add>
<add> *Jon Pascoe*
<add>
<ide> * Updated `parameterize` to preserve the case of a string, optionally.
<ide>
<ide> Example:
<ide><path>activesupport/lib/active_support/core_ext/time/calculations.rb
<ide> def days_in_month(month, year = current.year)
<ide> end
<ide> end
<ide>
<add> # Returns the number of days in the given year.
<add> # If no year is specified, it will use the current year.
<add> def days_in_year(year = current.year)
<add> days_in_month(2, year) + 337
<add> end
<add>
<ide> # Returns <tt>Time.zone.now</tt> when <tt>Time.zone</tt> or <tt>config.time_zone</tt> are set, otherwise just returns <tt>Time.now</tt>.
<ide> def current
<ide> ::Time.zone ? ::Time.zone.now : ::Time.now
<ide><path>activesupport/test/core_ext/time_ext_test.rb
<ide> def test_days_in_month_feb_in_leap_year_without_year_arg
<ide> end
<ide> end
<ide>
<add> def test_days_in_year_with_year
<add> assert_equal 365, Time.days_in_year(2005)
<add> assert_equal 366, Time.days_in_year(2004)
<add> assert_equal 366, Time.days_in_year(2000)
<add> assert_equal 365, Time.days_in_year(1900)
<add> end
<add>
<add> def test_days_in_year_in_common_year_without_year_arg
<add> Time.stub(:now, Time.utc(2007)) do
<add> assert_equal 365, Time.days_in_year
<add> end
<add> end
<add>
<add> def test_days_in_year_in_leap_year_without_year_arg
<add> Time.stub(:now, Time.utc(2008)) do
<add> assert_equal 366, Time.days_in_year
<add> end
<add> end
<add>
<ide> def test_last_month_on_31st
<ide> assert_equal Time.local(2004, 2, 29), Time.local(2004, 3, 31).last_month
<ide> end | 3 |
Javascript | Javascript | add support for finished after .destroy() | 2da36112d177efc19201f1610dcf51647378131c | <ide><path>lib/_http_incoming.js
<ide> IncomingMessage.prototype._destroy = function _destroy(err, cb) {
<ide> this.emit('aborted');
<ide> }
<ide>
<del> // If aborted and the underlying socket is not already destroyed,
<del> // destroy it.
<del> // We have to check if the socket is already destroyed because finished
<del> // does not call the callback when this methdod is invoked from `_http_client`
<del> // in `test/parallel/test-http-client-spurious-aborted.js`
<del> if (this.socket && !this.socket.destroyed && this.aborted) {
<add> // If aborted destroy socket.
<add> if (this.socket && this.aborted) {
<ide> this.socket.destroy(err);
<ide> const cleanup = finished(this.socket, (e) => {
<ide> cleanup();
<ide><path>lib/internal/streams/end-of-stream.js
<ide> const {
<ide> } = require('internal/errors').codes;
<ide> const { once } = require('internal/util');
<ide>
<add>function isSocket(stream) {
<add> return (
<add> typeof stream.connect === 'function' &&
<add> typeof stream.setNoDelay === 'function' &&
<add> typeof stream.setKeepAlive === 'function'
<add> );
<add>}
<add>
<ide> function isRequest(stream) {
<ide> return stream.setHeader && typeof stream.abort === 'function';
<ide> }
<ide> function eos(stream, options, callback) {
<ide> let willEmitClose = (
<ide> state &&
<ide> state.autoDestroy &&
<del> state.emitClose &&
<add> (state.emitClose || isSocket(stream)) &&
<ide> state.closed === false &&
<ide> isReadable(stream) === readable &&
<ide> isWritable(stream) === writable
<ide><path>lib/net.js
<ide> Socket.prototype._destroy = function(exception, cb) {
<ide>
<ide> this._handle.close(() => {
<ide> debug('emit close');
<add> if (this._writableState) {
<add> this._writableState.closed = true;
<add> }
<add> if (this._readableState) {
<add> this._readableState.closed = true;
<add> }
<ide> this.emit('close', isException);
<ide> });
<ide> this._handle.onread = noop;
<ide> Server.prototype._emitCloseIfDrained = function() {
<ide>
<ide> function emitCloseNT(self) {
<ide> debug('SERVER: emit close');
<add> if (self._writableState) {
<add> self._writableState.closed = true;
<add> }
<add> if (self._readableState) {
<add> self._readableState.closed = true;
<add> }
<ide> self.emit('close');
<ide> }
<ide>
<ide><path>test/parallel/test-net-finished.js
<add>'use strict';
<add>const common = require('../common');
<add>const net = require('net');
<add>const { finished } = require('stream');
<add>
<add>const server = net.createServer(function(con) {
<add> con.on('close', common.mustCall());
<add>});
<add>
<add>server.listen(0, common.mustCall(function() {
<add> const con = net.connect({
<add> port: this.address().port
<add> })
<add> .on('connect', () => {
<add> finished(con, common.mustCall());
<add> con.destroy();
<add> finished(con, common.mustCall());
<add> con.on('close', common.mustCall(() => {
<add> finished(con, common.mustCall(() => {
<add> server.close();
<add> }));
<add> }));
<add> })
<add> .end();
<add>})); | 4 |
Go | Go | improve godoc, and minor touch-ups | c2c7e9d4492a52c926a860cfabf7f1289f168985 | <ide><path>client/client.go
<ide> package client // import "github.com/docker/docker/client"
<ide>
<ide> import (
<ide> "context"
<del> "fmt"
<ide> "net"
<ide> "net/http"
<ide> "net/url"
<ide> type Client struct {
<ide> }
<ide>
<ide> // CheckRedirect specifies the policy for dealing with redirect responses:
<del>// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
<add>// If the request is non-GET return ErrRedirect, otherwise use the last response.
<ide> //
<del>// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
<del>// The Docker client (and by extension docker API client) can be made to send a request
<del>// like POST /containers//start where what would normally be in the name section of the URL is empty.
<del>// This triggers an HTTP 301 from the daemon.
<del>// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon.
<del>// This behavior change manifests in the client in that before the 301 was not followed and
<del>// the client did not generate an error, but now results in a message like Error response from daemon: page not found.
<add>// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308)
<add>// in the client. The Docker client (and by extension docker API client) can be
<add>// made to send a request like POST /containers//start where what would normally
<add>// be in the name section of the URL is empty. This triggers an HTTP 301 from
<add>// the daemon.
<add>//
<add>// In go 1.8 this 301 will be converted to a GET request, and ends up getting
<add>// a 404 from the daemon. This behavior change manifests in the client in that
<add>// before, the 301 was not followed and the client did not generate an error,
<add>// but now results in a message like Error response from daemon: page not found.
<ide> func CheckRedirect(req *http.Request, via []*http.Request) error {
<ide> if via[0].Method == http.MethodGet {
<ide> return http.ErrUseLastResponse
<ide> }
<ide> return ErrRedirect
<ide> }
<ide>
<del>// NewClientWithOpts initializes a new API client with default values. It takes functors
<del>// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))`
<del>// It also initializes the custom http headers to add to each request.
<add>// NewClientWithOpts initializes a new API client with a default HTTPClient, and
<add>// default API host and version. It also initializes the custom HTTP headers to
<add>// add to each request.
<add>//
<add>// It takes an optional list of Opt functional arguments, which are applied in
<add>// the order they're provided, which allows modifying the defaults when creating
<add>// the client. For example, the following initializes a client that configures
<add>// itself with values from environment variables (client.FromEnv), and has
<add>// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()).
<add>//
<add>//
<add>// cli, err := client.NewClientWithOpts(
<add>// client.FromEnv,
<add>// client.WithAPIVersionNegotiation(),
<add>// )
<ide> //
<del>// It won't send any version information if the version number is empty. It is
<del>// highly recommended that you set a version or your client may break if the
<del>// server is upgraded.
<ide> func NewClientWithOpts(ops ...Opt) (*Client, error) {
<ide> client, err := defaultHTTPClient(DefaultDockerHost)
<ide> if err != nil {
<ide> func NewClientWithOpts(ops ...Opt) (*Client, error) {
<ide> }
<ide>
<ide> func defaultHTTPClient(host string) (*http.Client, error) {
<del> url, err := ParseHostURL(host)
<add> hostURL, err := ParseHostURL(host)
<ide> if err != nil {
<ide> return nil, err
<ide> }
<del> transport := new(http.Transport)
<del> sockets.ConfigureTransport(transport, url.Scheme, url.Host)
<add> transport := &http.Transport{}
<add> _ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
<ide> return &http.Client{
<ide> Transport: transport,
<ide> CheckRedirect: CheckRedirect,
<ide> func (cli *Client) ClientVersion() string {
<ide> return cli.version
<ide> }
<ide>
<del>// NegotiateAPIVersion queries the API and updates the version to match the
<del>// API version. Any errors are silently ignored. If a manual override is in place,
<del>// either through the `DOCKER_API_VERSION` environment variable, or if the client
<del>// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation
<del>// will be performed.
<add>// NegotiateAPIVersion queries the API and updates the version to match the API
<add>// version. NegotiateAPIVersion downgrades the client's API version to match the
<add>// APIVersion if the ping version is lower than the default version. If the API
<add>// version reported by the server is higher than the maximum version supported
<add>// by the client, it uses the client's maximum version.
<add>//
<add>// If a manual override is in place, either through the "DOCKER_API_VERSION"
<add>// environment variable, or if the client is initialized
<add>// with a fixed version (WithVersion(xx)), no negotiation is performed.
<add>//
<add>// If the API server's ping response does not contain an API version, or if the
<add>// client did not get a successful ping response, it assumes it is connected with
<add>// an old daemon that does not support API version negotiation, in which case it
<add>// downgrades to the latest version of the API before version negotiation was
<add>// added (1.24).
<ide> func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
<ide> if !cli.manualOverride {
<ide> ping, _ := cli.Ping(ctx)
<ide> cli.negotiateAPIVersionPing(ping)
<ide> }
<ide> }
<ide>
<del>// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
<del>// if the ping version is less than the default version. If a manual override is
<del>// in place, either through the `DOCKER_API_VERSION` environment variable, or if
<del>// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no
<del>// negotiation is performed.
<del>func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
<add>// NegotiateAPIVersionPing downgrades the client's API version to match the
<add>// APIVersion in the ping response. If the API version in pingResponse is higher
<add>// than the maximum version supported by the client, it uses the client's maximum
<add>// version.
<add>//
<add>// If a manual override is in place, either through the "DOCKER_API_VERSION"
<add>// environment variable, or if the client is initialized
<add>// with a fixed version (WithVersion(xx)), no negotiation is performed.
<add>//
<add>// If the API server's ping response does not contain an API version, we assume
<add>// we are connected with an old daemon without API version negotiation support,
<add>// and downgrade to the latest version of the API before version negotiation was
<add>// added (1.24).
<add>func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) {
<ide> if !cli.manualOverride {
<del> cli.negotiateAPIVersionPing(p)
<add> cli.negotiateAPIVersionPing(pingResponse)
<ide> }
<ide> }
<ide>
<ide> // negotiateAPIVersionPing queries the API and updates the version to match the
<del>// API version. Any errors are silently ignored.
<del>func (cli *Client) negotiateAPIVersionPing(p types.Ping) {
<del> // try the latest version before versioning headers existed
<del> if p.APIVersion == "" {
<del> p.APIVersion = "1.24"
<add>// API version from the ping response.
<add>func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) {
<add> // default to the latest version before versioning headers existed
<add> if pingResponse.APIVersion == "" {
<add> pingResponse.APIVersion = "1.24"
<ide> }
<ide>
<ide> // if the client is not initialized with a version, start with the latest supported version
<ide> func (cli *Client) negotiateAPIVersionPing(p types.Ping) {
<ide> }
<ide>
<ide> // if server version is lower than the client version, downgrade
<del> if versions.LessThan(p.APIVersion, cli.version) {
<del> cli.version = p.APIVersion
<add> if versions.LessThan(pingResponse.APIVersion, cli.version) {
<add> cli.version = pingResponse.APIVersion
<ide> }
<ide>
<ide> // Store the results, so that automatic API version negotiation (if enabled)
<ide> func (cli *Client) HTTPClient() *http.Client {
<ide> func ParseHostURL(host string) (*url.URL, error) {
<ide> protoAddrParts := strings.SplitN(host, "://", 2)
<ide> if len(protoAddrParts) == 1 {
<del> return nil, fmt.Errorf("unable to parse docker host `%s`", host)
<add> return nil, errors.Errorf("unable to parse docker host `%s`", host)
<ide> }
<ide>
<ide> var basePath string
<ide> func ParseHostURL(host string) (*url.URL, error) {
<ide> }, nil
<ide> }
<ide>
<del>// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection.
<add>// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header,
<add>// that can be used for proxying the daemon connection.
<add>//
<ide> // Used by `docker dial-stdio` (docker/cli#889).
<ide> func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
<ide> return func(ctx context.Context) (net.Conn, error) {
<ide><path>client/client_unix.go
<ide>
<ide> package client // import "github.com/docker/docker/client"
<ide>
<del>// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
<add>// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
<add>// environment variable is unset or empty.
<ide> const DefaultDockerHost = "unix:///var/run/docker.sock"
<ide>
<ide> const defaultProto = "unix"
<ide><path>client/client_windows.go
<ide> package client // import "github.com/docker/docker/client"
<ide>
<del>// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
<add>// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
<add>// environment variable is unset or empty.
<ide> const DefaultDockerHost = "npipe:////./pipe/docker_engine"
<ide>
<ide> const defaultProto = "npipe"
<ide><path>client/options.go
<ide> type Opt func(*Client) error
<ide>
<ide> // FromEnv configures the client with values from environment variables.
<ide> //
<del>// Supported environment variables:
<del>// DOCKER_HOST to set the url to the docker server.
<del>// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
<del>// DOCKER_CERT_PATH to load the TLS certificates from.
<del>// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
<add>// FromEnv uses the following environment variables:
<add>//
<add>// DOCKER_HOST to set the URL to the docker server.
<add>//
<add>// DOCKER_API_VERSION to set the version of the API to
<add>// use, leave empty for latest.
<add>//
<add>// DOCKER_CERT_PATH to specify the directory from which to
<add>// load the TLS certificates (ca.pem, cert.pem, key.pem).
<add>//
<add>// DOCKER_TLS_VERIFY to enable or disable TLS verification (off by
<add>// default).
<ide> func FromEnv(c *Client) error {
<ide> ops := []Opt{
<ide> WithTLSClientConfigFromEnv(),
<ide> func WithHost(host string) Opt {
<ide> }
<ide>
<ide> // WithHostFromEnv overrides the client host with the host specified in the
<del>// DOCKER_HOST environment variable. If DOCKER_HOST is not set, the host is
<del>// not modified.
<add>// DOCKER_HOST environment variable. If DOCKER_HOST is not set,
<add>// or set to an empty value, the host is not modified.
<ide> func WithHostFromEnv() Opt {
<ide> return func(c *Client) error {
<ide> if host := os.Getenv("DOCKER_HOST"); host != "" {
<ide> func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt {
<ide> // settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables.
<ide> // If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified.
<ide> //
<del>// Supported environment variables:
<del>// DOCKER_CERT_PATH directory to load the TLS certificates (ca.pem, cert.pem, key.pem) from.
<del>// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
<add>// WithTLSClientConfigFromEnv uses the following environment variables:
<add>//
<add>// DOCKER_CERT_PATH to specify the directory from which to
<add>// load the TLS certificates (ca.pem, cert.pem, key.pem).
<add>//
<add>// DOCKER_TLS_VERIFY to enable or disable TLS verification (off by
<add>// default).
<ide> func WithTLSClientConfigFromEnv() Opt {
<ide> return func(c *Client) error {
<ide> dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
<ide> func WithVersion(version string) Opt {
<ide> // the version is not modified.
<ide> func WithVersionFromEnv() Opt {
<ide> return func(c *Client) error {
<del> if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
<del> return WithVersion(version)(c)
<del> }
<del> return nil
<add> return WithVersion(os.Getenv("DOCKER_API_VERSION"))(c)
<ide> }
<ide> }
<ide> | 4 |
Javascript | Javascript | add note about potential future bugs | 48b7b2c67c96aec21b6c4150c6c3f80d481f709b | <ide><path>src/renderers/shared/fiber/ReactFiberCompleteWork.js
<ide> module.exports = function<T, P, I, C>(config : HostConfig<T, P, I, C>) {
<ide> }
<ide>
<ide> /*
<add> // TODO: It's possible this will create layout thrash issues because mutations
<add> // of the DOM and life-cycles are interleaved. E.g. if a componentDidMount
<add> // of a sibling reads, then the next sibling updates and reads etc.
<ide> function markForPostEffect(workInProgress : Fiber) {
<ide> // Schedule a side-effect on this fiber, AFTER the children's side-effects.
<ide> if (workInProgress.lastEffect) { | 1 |
Text | Text | fix markdown style for better readability | 1e093b26a5c5200856f90ad8ad9c794f2dd721fc | <ide><path>object_detection/g3doc/preparing_inputs.md
<ide> The raw 2012 PASCAL VOC data set can be downloaded
<ide> [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar).
<ide> Extract the tar file and run the `create_pascal_tf_record` script:
<ide>
<del>```
<add>```bash
<ide> # From tensorflow/models/object_detection
<ide> tar -xvf VOCtrainval_11-May-2012.tar
<ide> python create_pascal_tf_record.py --data_dir=VOCdevkit \
<ide> python create_pascal_tf_record.py --data_dir=VOCdevkit \
<ide> --year=VOC2012 --set=val --output_path=pascal_val.record
<ide> ```
<ide>
<del>You should end up with two TFRecord files named pascal_train.record and
<del>pascal_val.record in the tensorflow/models/object_detection directory.
<add>You should end up with two TFRecord files named `pascal_train.record` and
<add>`pascal_val.record` in the `tensorflow/models/object_detection` directory.
<ide>
<ide> The label map for the PASCAL VOC data set can be found at
<del>data/pascal_label_map.pbtxt.
<add>`data/pascal_label_map.pbtxt`.
<ide>
<ide> ## Generation the Oxford-IIIT Pet TFRecord files.
<ide>
<ide> The Oxford-IIIT Pet data set can be downloaded from
<ide> [their website](http://www.robots.ox.ac.uk/~vgg/data/pets/). Extract the tar
<ide> file and run the `create_pet_tf_record` script to generate TFRecords.
<ide>
<del>```
<add>```bash
<ide> # From tensorflow/models/object_detection
<ide> tar -xvf annotations.tar.gz
<ide> tar -xvf images.tar.gz
<ide> python create_pet_tf_record.py --data_dir=`pwd` --output_dir=`pwd`
<ide> ```
<ide>
<del>You should end up with two TFRecord files named pet_train.record and
<del>pet_val.record in the tensorflow/models/object_detection directory.
<add>You should end up with two TFRecord files named `pet_train.record` and
<add>`pet_val.record` in the `tensorflow/models/object_detection` directory.
<ide>
<del>The label map for the Pet dataset can be found at data/pet_label_map.pbtxt.
<add>The label map for the Pet dataset can be found at `data/pet_label_map.pbtxt`. | 1 |
Ruby | Ruby | allow travel blocks after standalone travelt | a829bb76396b0f6dd00dbb4631de0d6ed6fb9fc7 | <ide><path>activesupport/lib/active_support/testing/time_helpers.rb
<ide> def travel(duration, &block)
<ide> # end
<ide> # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00
<ide> def travel_to(date_or_time)
<del> if block_given? && simple_stubs.stubbing(Time, :now)
<add> if block_given? && in_block
<ide> travel_to_nested_block_call = <<~MSG
<ide>
<ide> Calling `travel_to` with a block, when we have previously already made a call to `travel_to`, can lead to confusing time stubbing.
<ide> def travel_to(date_or_time)
<ide>
<ide> if block_given?
<ide> begin
<add> self.in_block = true
<ide> yield
<ide> ensure
<ide> travel_back
<add> self.in_block = false
<ide> end
<ide> end
<ide> end
<ide> def freeze_time(&block)
<ide> def simple_stubs
<ide> @simple_stubs ||= SimpleStubs.new
<ide> end
<add>
<add> attr_accessor :in_block
<ide> end
<ide> end
<ide> end
<ide><path>activesupport/test/time_travel_test.rb
<ide> def test_time_helper_travel_to_with_subsequent_calls
<ide> end
<ide> end
<ide>
<add> def test_time_helper_travel_with_subsequent_block
<add> Time.stub(:now, Time.now) do
<add> outer_expected_time = Time.new(2004, 11, 24, 1, 4, 44)
<add> inner_expected_time = Time.new(2004, 10, 24, 1, 4, 44)
<add> travel_to outer_expected_time
<add>
<add> assert_nothing_raised do
<add> travel_to(inner_expected_time) do
<add> assert_equal inner_expected_time, Time.now
<add> end
<add> end
<add> end
<add> end
<add>
<ide> def test_travel_to_will_reset_the_usec_to_avoid_mysql_rounding
<ide> Time.stub(:now, Time.now) do
<ide> travel_to Time.utc(2014, 10, 10, 10, 10, 50, 999999) do | 2 |
Javascript | Javascript | add quarter diff support | d16828e7469eabb43b500298b766e234fe8a459c | <ide><path>moment.js
<ide>
<ide> units = normalizeUnits(units);
<ide>
<del> if (units === 'year' || units === 'month') {
<add> if (units === 'year' || units === 'month' || units === 'quarter') {
<ide> // average number of days in the months in the given dates
<ide> diff = (this.daysInMonth() + that.daysInMonth()) * 432e5; // 24 * 60 * 60 * 1000 / 2
<ide> // difference in months
<ide> daysAdjust += ((this.utcOffset() - moment(this).startOf('month').utcOffset()) -
<ide> (that.utcOffset() - moment(that).startOf('month').utcOffset())) * 6e4;
<ide> output += daysAdjust / diff;
<del> if (units === 'year') {
<add> if (units === 'quarter') {
<add> output = output / 3;
<add> } else if (units === 'year') {
<ide> output = output / 12;
<ide> }
<ide> } else {
<ide><path>test/moment/quarter.js
<ide> exports.quarter = {
<ide> test.done();
<ide> },
<ide>
<add> 'quarter diff' : function (test) {
<add> test.equal(moment('2014-01-01').diff(moment('2014-04-01'), 'quarter'),
<add> -1, 'diff -1 quarter');
<add> test.equal(moment('2014-04-01').diff(moment('2014-01-01'), 'quarter'),
<add> 1, 'diff 1 quarter');
<add> test.equal(moment('2014-05-01').diff(moment('2014-01-01'), 'quarter'),
<add> 1, 'diff 1 quarter');
<add> test.ok(Math.abs((4 / 3) - moment('2014-05-01').diff(
<add> moment('2014-01-01'), 'quarter', true)) < 0.00001,
<add> 'diff 1 1/3 quarter');
<add> test.equal(moment('2015-01-01').diff(moment('2014-01-01'), 'quarter'),
<add> 4, 'diff 4 quarters');
<add> test.done();
<add> },
<add>
<ide> 'quarter setter bubble to previous year' : function (test) {
<ide> var m;
<ide> test.expect(7); | 2 |
PHP | PHP | allow multiple parameters for required_if | 4bc806c6fc5759d5f7227c3aa632af82282a6fa5 | <ide><path>src/Illuminate/Validation/Validator.php
<ide> protected function validateRequiredIf($attribute, $value, $parameters)
<ide> {
<ide> $this->requireParameterCount(2, $parameters, 'required_if');
<ide>
<del> if ($parameters[1] == array_get($this->data, $parameters[0]))
<add> $data = array_get($this->data, $parameters[0]);
<add> $values = array_slice($parameters, 1);
<add>
<add> if (in_array($data, $values))
<ide> {
<ide> return $this->validateRequired($attribute, $value);
<ide> }
<ide><path>tests/Validation/ValidationValidatorTest.php
<ide> public function testRequiredIf()
<ide> $trans = $this->getRealTranslator();
<ide> $v = new Validator($trans, array('first' => 'taylor', 'last' => 'otwell'), array('last' => 'required_if:first,taylor'));
<ide> $this->assertTrue($v->passes());
<add>
<add> $trans = $this->getRealTranslator();
<add> $v = new Validator($trans, array('first' => 'taylor', 'last' => 'otwell'), array('last' => 'required_if:first,taylor,dayle'));
<add> $this->assertTrue($v->passes());
<add>
<add> $trans = $this->getRealTranslator();
<add> $v = new Validator($trans, array('first' => 'dayle', 'last' => 'rees'), array('last' => 'required_if:first,taylor,dayle'));
<add> $this->assertTrue($v->passes());
<ide> }
<ide>
<ide> | 2 |
Javascript | Javascript | fix comment typo | 0b329511b9aaff7e27990fc16354db8ea0a16de8 | <ide><path>packages/react-reconciler/src/__tests__/ReactCache-test.js
<ide> describe('ReactCache', () => {
<ide>
<ide> // Unmount children: the first text cache instance is created only after the root
<ide> // commits, so both fresh cache instances are released by their cache boundaries,
<del> // cleaning up v1 (used for the first two children which render togeether) and
<add> // cleaning up v1 (used for the first two children which render together) and
<ide> // v2 (used for the third boundary added later).
<ide> await act(async () => {
<ide> root.render('Bye!'); | 1 |
Java | Java | use concurrenthashmaps in defaultcontextcache | d66d1605430c73444082d219214edb74327d3c13 | <ide><path>spring-test/src/main/java/org/springframework/test/context/support/DefaultContextCache.java
<ide> import java.util.List;
<ide> import java.util.Map;
<ide> import java.util.Set;
<add>import java.util.concurrent.ConcurrentHashMap;
<ide> import java.util.concurrent.atomic.AtomicInteger;
<ide>
<ide> import org.apache.commons.logging.Log;
<ide> import org.apache.commons.logging.LogFactory;
<add>
<ide> import org.springframework.context.ApplicationContext;
<ide> import org.springframework.context.ConfigurableApplicationContext;
<ide> import org.springframework.core.style.ToStringCreator;
<ide> import org.springframework.test.annotation.DirtiesContext.HierarchyMode;
<ide> import org.springframework.test.context.ContextCache;
<ide> import org.springframework.test.context.MergedContextConfiguration;
<ide> import org.springframework.util.Assert;
<del>import org.springframework.util.ConcurrentReferenceHashMap;
<ide>
<ide> /**
<ide> * Default implementation of the {@link ContextCache} API.
<ide> *
<del> * <p>Uses Spring's {@link ConcurrentReferenceHashMap} to store
<del> * {@linkplain java.lang.ref.SoftReference soft references} to cached
<del> * contexts and {@code MergedContextConfiguration} instances.
<add> * <p>Uses {@link ConcurrentHashMap ConcurrentHashMaps} to cache
<add> * {@link ApplicationContext} and {@link MergedContextConfiguration} instances.
<ide> *
<ide> * @author Sam Brannen
<ide> * @author Juergen Hoeller
<ide> * @since 2.5
<del> * @see ConcurrentReferenceHashMap
<ide> */
<ide> public class DefaultContextCache implements ContextCache {
<ide>
<ide> public class DefaultContextCache implements ContextCache {
<ide> * Map of context keys to Spring {@code ApplicationContext} instances.
<ide> */
<ide> private final Map<MergedContextConfiguration, ApplicationContext> contextMap =
<del> new ConcurrentReferenceHashMap<MergedContextConfiguration, ApplicationContext>(64);
<add> new ConcurrentHashMap<MergedContextConfiguration, ApplicationContext>(64);
<ide>
<ide> /**
<ide> * Map of parent keys to sets of children keys, representing a top-down <em>tree</em>
<ide> public class DefaultContextCache implements ContextCache {
<ide> * of other contexts.
<ide> */
<ide> private final Map<MergedContextConfiguration, Set<MergedContextConfiguration>> hierarchyMap =
<del> new ConcurrentReferenceHashMap<MergedContextConfiguration, Set<MergedContextConfiguration>>(64);
<add> new ConcurrentHashMap<MergedContextConfiguration, Set<MergedContextConfiguration>>(64);
<ide>
<ide> private final AtomicInteger hitCount = new AtomicInteger();
<ide> | 1 |
Python | Python | add type hints and default args for problem 20 | 11a5afd8a10b21dfebddcf3c6022402ac463b634 | <ide><path>project_euler/problem_20/sol1.py
<ide> """
<add>Problem 20: https://projecteuler.net/problem=20
<add>
<ide> n! means n × (n − 1) × ... × 3 × 2 × 1
<ide>
<ide> For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
<ide> """
<ide>
<ide>
<del>def factorial(n):
<add>def factorial(num: int) -> int:
<add> """Find the factorial of a given number n"""
<ide> fact = 1
<del> for i in range(1, n + 1):
<add> for i in range(1, num + 1):
<ide> fact *= i
<ide> return fact
<ide>
<ide>
<del>def split_and_add(number):
<add>def split_and_add(number: int) -> int:
<ide> """Split number digits and add them."""
<ide> sum_of_digits = 0
<ide> while number > 0:
<ide> def split_and_add(number):
<ide> return sum_of_digits
<ide>
<ide>
<del>def solution(n):
<del> """Returns the sum of the digits in the number 100!
<add>def solution(num: int = 100) -> int:
<add> """Returns the sum of the digits in the factorial of num
<ide> >>> solution(100)
<ide> 648
<ide> >>> solution(50)
<ide> def solution(n):
<ide> >>> solution(1)
<ide> 1
<ide> """
<del> f = factorial(n)
<del> result = split_and_add(f)
<add> nfact = factorial(num)
<add> result = split_and_add(nfact)
<ide> return result
<ide>
<ide>
<ide><path>project_euler/problem_20/sol2.py
<ide> """
<add>Problem 20: https://projecteuler.net/problem=20
<add>
<ide> n! means n × (n − 1) × ... × 3 × 2 × 1
<ide>
<ide> For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
<ide> from math import factorial
<ide>
<ide>
<del>def solution(n):
<del> """Returns the sum of the digits in the number 100!
<add>def solution(num: int = 100) -> int:
<add> """Returns the sum of the digits in the factorial of num
<ide> >>> solution(100)
<ide> 648
<ide> >>> solution(50)
<ide> def solution(n):
<ide> >>> solution(1)
<ide> 1
<ide> """
<del> return sum([int(x) for x in str(factorial(n))])
<add> return sum([int(x) for x in str(factorial(num))])
<ide>
<ide>
<ide> if __name__ == "__main__":
<ide><path>project_euler/problem_20/sol3.py
<ide> """
<add>Problem 20: https://projecteuler.net/problem=20
<add>
<ide> n! means n × (n − 1) × ... × 3 × 2 × 1
<ide>
<ide> For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
<ide> from math import factorial
<ide>
<ide>
<del>def solution(n):
<del> """Returns the sum of the digits in the number 100!
<add>def solution(num: int = 100) -> int:
<add> """Returns the sum of the digits in the factorial of num
<ide> >>> solution(1000)
<ide> 10539
<ide> >>> solution(200)
<ide> def solution(n):
<ide> >>> solution(0)
<ide> 1
<ide> """
<del> return sum(map(int, str(factorial(n))))
<add> return sum(map(int, str(factorial(num))))
<ide>
<ide>
<ide> if __name__ == "__main__":
<ide><path>project_euler/problem_20/sol4.py
<ide> """
<add>Problem 20: https://projecteuler.net/problem=20
<add>
<ide> n! means n × (n − 1) × ... × 3 × 2 × 1
<ide>
<ide> For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
<ide> """
<ide>
<ide>
<del>def solution(n):
<del> """Returns the sum of the digits in the number 100!
<add>def solution(num: int = 100) -> int:
<add> """Returns the sum of the digits in the factorial of num
<ide> >>> solution(100)
<ide> 648
<ide> >>> solution(50)
<ide> def solution(n):
<ide> """
<ide> fact = 1
<ide> result = 0
<del> for i in range(1, n + 1):
<add> for i in range(1, num + 1):
<ide> fact *= i
<ide>
<ide> for j in str(fact): | 4 |
Text | Text | update the big table of tasks | 201d23f2854c7a13d3c32df4947af9fd7365c2cd | <ide><path>examples/README.md
<ide> This is still a work-in-progress – in particular documentation is still sparse
<ide> | [**`text-classification`**](https://github.com/huggingface/transformers/tree/master/examples/text-classification) | GLUE, XNLI | ✅ | ✅ | ✅ | [](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/trainer/01_text_classification.ipynb)
<ide> | [**`token-classification`**](https://github.com/huggingface/transformers/tree/master/examples/token-classification) | CoNLL NER | ✅ | ✅ | ✅ | -
<ide> | [**`multiple-choice`**](https://github.com/huggingface/transformers/tree/master/examples/multiple-choice) | SWAG, RACE, ARC | ✅ | ✅ | - | [](https://colab.research.google.com/github/ViktorAlm/notebooks/blob/master/MPC_GPU_Demo_for_TF_and_PT.ipynb)
<del>| [**`question-answering`**](https://github.com/huggingface/transformers/tree/master/examples/question-answering) | SQuAD | - | ✅ | - | -
<add>| [**`question-answering`**](https://github.com/huggingface/transformers/tree/master/examples/question-answering) | SQuAD | ✅ | ✅ | - | -
<ide> | [**`text-generation`**](https://github.com/huggingface/transformers/tree/master/examples/text-generation) | - | n/a | n/a | n/a | [](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb)
<ide> | [**`distillation`**](https://github.com/huggingface/transformers/tree/master/examples/distillation) | All | - | - | - | -
<ide> | [**`summarization`**](https://github.com/huggingface/transformers/tree/master/examples/seq2seq) | CNN/Daily Mail | - | - | ✅ | - | 1 |
Go | Go | increase the coverage of pkg/platform | bdc87676bfee0f9a26fe12323dbe6875af783645 | <ide><path>pkg/platform/utsname_int8_test.go
<add>// +build linux,386 linux,amd64 linux,arm64
<add>
<add>package platform
<add>
<add>import (
<add> "testing"
<add>
<add> "github.com/stretchr/testify/assert"
<add>)
<add>
<add>func TestCharToString(t *testing.T) {
<add> machineInBytes := [65]int8{120, 56, 54, 95, 54, 52}
<add> machineInString := charsToString(machineInBytes)
<add> assert.NotNil(t, machineInString, "Unable to convert char into string.")
<add> assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.")
<add>}
<ide><path>pkg/platform/utsname_uint8_test.go
<add>// +build linux,arm linux,ppc64 linux,ppc64le s390x
<add>
<add>package platform
<add>
<add>import (
<add> "testing"
<add>
<add> "github.com/stretchr/testify/assert"
<add>)
<add>
<add>func TestTestCharToString(t *testing.T) {
<add> machineInBytes := [65]uint8{120, 56, 54, 95, 54, 52}
<add> machineInString := charsToString(machineInBytes)
<add> assert.NotNil(t, machineInString, "Unable to convert char into string.")
<add> assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.")
<add>} | 2 |
Python | Python | add no attribute to textcategorizer model | bd8e84998a7bf4706c18f9b3099a84d4a2ba6a52 | <ide><path>spacy/_ml.py
<ide> def build_text_classifier(nr_class, width=64, **cfg):
<ide> >> zero_init(Affine(nr_class, nr_class*2, drop_factor=0.0))
<ide> >> logistic
<ide> )
<del>
<add> model.nO = nr_class
<ide> model.lsuv = False
<ide> return model
<ide> | 1 |
Python | Python | add relative positional embedding to kerasbert | 2db2501bc9928f68e225282f3884b81680a9cccb | <ide><path>official/nlp/modeling/layers/masked_softmax.py
<ide> def call(self, inputs):
<ide>
<ide> # Since we are adding it to the raw scores before the softmax, this is
<ide> # effectively the same as removing these entirely.
<add>
<ide> scores += adder
<ide>
<ide> if len(self._normalization_axes) == 1:
<ide><path>official/nlp/modeling/layers/position_embedding.py
<ide> # from __future__ import google_type_annotations
<ide> from __future__ import print_function
<ide>
<add>import math
<add>
<ide> import tensorflow as tf
<ide>
<ide> from official.modeling import tf_utils
<ide> def call(self, inputs):
<ide> position_embeddings = self._position_embeddings
<ide>
<ide> return tf.broadcast_to(position_embeddings, input_shape)
<add>
<add>@tf.keras.utils.register_keras_serializable(package="Text")
<add>class RelativePositionEmbedding(tf.keras.layers.Layer):
<add> """Creates a positional embedding.
<add> This layer calculates the position encoding as a mix of sine and cosine
<add> functions with geometrically increasing wavelengths. Defined and formulized in
<add> "Attention is All You Need", section 3.5.
<add> (https://arxiv.org/abs/1706.03762).
<add> Arguments:
<add> hidden_size: Size of the hidden layer.
<add> min_timescale: Minimum scale that will be applied at each position
<add> max_timescale: Maximum scale that will be applied at each position.
<add> length: Number of positions. Should be specified if `inputs` is None at
<add> `call(self, inputs)`
<add> """
<add>
<add> def __init__(self,
<add> hidden_size,
<add> min_timescale=1.0,
<add> max_timescale=1.0e4,
<add> length=None,
<add> **kwargs):
<add> # We need to have a default dtype of float32, since the inputs (which Keras
<add> # usually uses to infer the dtype) will always be int32.
<add> # We compute the positional encoding in float32 even if the model uses
<add> # float16, as many of the ops used, like log and exp, are numerically
<add> # unstable in float16.
<add> if "dtype" not in kwargs:
<add> kwargs["dtype"] = "float32"
<add>
<add> super(RelativePositionEmbedding, self).__init__(**kwargs)
<add> self._hidden_size = hidden_size
<add> self._min_timescale = min_timescale
<add> self._max_timescale = max_timescale
<add> self._length = length
<add>
<add> def get_config(self):
<add> config = {
<add> "hidden_size": self._hidden_size,
<add> "min_timescale": self._min_timescale,
<add> "max_timescale": self._max_timescale,
<add> "length": self._length,
<add> }
<add> base_config = super(RelativePositionEmbedding, self).get_config()
<add> return dict(list(base_config.items()) + list(config.items()))
<add>
<add> def build(self, input_shape):
<add> """Implements build() for the layer."""
<add> super(RelativePositionEmbedding, self).build(input_shape)
<add>
<add> def call(self, inputs):
<add> """Implements call() for the layer."""
<add> length = self._length
<add> if inputs is None and length is None:
<add> raise ValueError(
<add> "If inputs is None, `length` must be set in "
<add> "RelativePositionEmbedding().")
<add> if inputs is not None:
<add> input_shape = tf_utils.get_shape_list(inputs)
<add> if length is not None and length != input_shape[1]:
<add> raise ValueError(
<add> "If inputs is not None, `length` must equal to input_shape[1]."
<add> )
<add> length = input_shape[1]
<add> position = tf.cast(tf.range(length), tf.float32)
<add> num_timescales = self._hidden_size // 2
<add> min_timescale, max_timescale = self._min_timescale, self._max_timescale
<add> log_timescale_increment = (
<add> math.log(float(max_timescale) / float(min_timescale)) /
<add> (tf.cast(num_timescales, tf.float32) - 1))
<add> inv_timescales = min_timescale * tf.exp(
<add> tf.cast(tf.range(num_timescales), tf.float32) *
<add> -log_timescale_increment)
<add> scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales,
<add> 0)
<add> position_embeddings = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)],
<add> axis=1)
<add> return position_embeddings
<ide><path>official/nlp/modeling/layers/position_embedding_test.py
<ide> def test_static_layer_output_shape(self):
<ide> sequence_length = 21
<ide> width = 30
<ide> input_tensor = tf.keras.Input(shape=(sequence_length, width))
<del> output_tensor = test_layer(input_tensor)
<add> output_tensor = test_layer(input_tensor) # pylint: disable=not-callable
<ide>
<ide> # When using static positional embedding shapes, the output is expected
<ide> # to be the same as the input shape in all dimensions save batch.
<ide> def test_float16_dtype(self):
<ide> sequence_length = 21
<ide> width = 30
<ide> input_tensor = tf.keras.Input(shape=(sequence_length, width))
<del> output_tensor = test_layer(input_tensor)
<add> output_tensor = test_layer(input_tensor) # pylint: disable=not-callable
<ide>
<ide> # When using static positional embedding shapes, the output is expected
<ide> # to be the same as the input shape in all dimensions save batch.
<ide> def test_dynamic_layer_output_shape(self):
<ide> # Create a 3-dimensional input (the first dimension is implicit).
<ide> width = 30
<ide> input_tensor = tf.keras.Input(shape=(None, width))
<del> output_tensor = test_layer(input_tensor)
<add> output_tensor = test_layer(input_tensor) # pylint: disable=not-callable
<ide>
<ide> # When using dynamic positional embedding shapes, the output is expected
<ide> # to be the same as the input shape in all dimensions - but may be None if
<ide> def test_dynamic_layer_slicing(self):
<ide> # Create a 3-dimensional input (the first dimension is implicit).
<ide> width = 30
<ide> input_tensor = tf.keras.Input(shape=(None, width))
<del> output_tensor = test_layer(input_tensor)
<add> output_tensor = test_layer(input_tensor) # pylint: disable=not-callable
<ide>
<ide> model = tf.keras.Model(input_tensor, output_tensor)
<ide>
<ide> def test_dynamic_layer_slicing(self):
<ide>
<ide> self.assertAllEqual([1, input_length, width], output_data.shape)
<ide>
<add> def test_relative_tensor_input(self):
<add> hidden_size = 8
<add> test_layer = position_embedding.RelativePositionEmbedding(
<add> hidden_size=hidden_size)
<add>
<add> # create a 3-dimensional input for test_layer to infer length as 1.
<add> input_tensor = tf.constant([[[0] * hidden_size]])
<add> output_tensor = test_layer(input_tensor) # pylint: disable=not-callable
<add>
<add> # expected output is the theoretical result of the input based on
<add> # sine cosine relative position embedding formula.
<add> expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]])
<add> self.assertAllEqual(output_tensor, expected_output_tensor)
<add>
<add> def test_relative_length_input(self):
<add> hidden_size = 8
<add>
<add> # When we do not have tensor as input, we explicitly specify length
<add> # value when initializing test_layer.
<add> test_layer = position_embedding.RelativePositionEmbedding(
<add> hidden_size=hidden_size, length=1)
<add> input_tensor = None
<add> output_tensor = test_layer(input_tensor) # pylint: disable=not-callable
<add>
<add> # expected output is the theoretical result of the input based on
<add> # sine cosine relative position embedding formula.
<add> expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]])
<add> self.assertAllEqual(output_tensor, expected_output_tensor)
<ide>
<ide> if __name__ == "__main__":
<ide> tf.test.main()
<ide><path>official/nlp/transformer/transformer.py
<ide> from __future__ import print_function
<ide>
<ide> import tensorflow as tf
<add>from official.nlp.modeling.layers import position_embedding
<ide> from official.nlp.transformer import attention_layer
<ide> from official.nlp.transformer import beam_search
<ide> from official.nlp.transformer import embedding_layer
<ide> def encode(self, inputs, attention_bias, training):
<ide> attention_bias = tf.cast(attention_bias, self.params["dtype"])
<ide>
<ide> with tf.name_scope("add_pos_encoding"):
<del> length = tf.shape(embedded_inputs)[1]
<del> pos_encoding = model_utils.get_position_encoding(
<del> length, self.params["hidden_size"])
<add> pos_layer = position_embedding.RelativePositionEmbedding(
<add> hidden_size=self.params["hidden_size"])
<add> pos_encoding = pos_layer(embedded_inputs)
<ide> pos_encoding = tf.cast(pos_encoding, self.params["dtype"])
<ide> encoder_inputs = embedded_inputs + pos_encoding
<ide>
<ide> def decode(self, targets, encoder_outputs, attention_bias, training):
<ide> [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
<ide> with tf.name_scope("add_pos_encoding"):
<ide> length = tf.shape(decoder_inputs)[1]
<del> pos_encoding = model_utils.get_position_encoding(
<del> length, self.params["hidden_size"])
<add> pos_layer = position_embedding.RelativePositionEmbedding(
<add> hidden_size=self.params["hidden_size"])
<add> pos_encoding = pos_layer(decoder_inputs)
<ide> pos_encoding = tf.cast(pos_encoding, self.params["dtype"])
<ide> decoder_inputs += pos_encoding
<ide> if training:
<ide> def decode(self, targets, encoder_outputs, attention_bias, training):
<ide> def _get_symbols_to_logits_fn(self, max_decode_length, training):
<ide> """Returns a decoding function that calculates logits of the next tokens."""
<ide>
<del> timing_signal = model_utils.get_position_encoding(
<del> max_decode_length + 1, self.params["hidden_size"])
<add> pos_layer = position_embedding.RelativePositionEmbedding(
<add> hidden_size=self.params["hidden_size"],
<add> length=max_decode_length + 1)
<add> timing_signal = pos_layer(None)
<ide> timing_signal = tf.cast(timing_signal, self.params["dtype"])
<ide> decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
<ide> max_decode_length, dtype=self.params["dtype"]) | 4 |
PHP | PHP | add missing `throwunless` magic method | ff5c52b7556a41901b961d9ceee4fee55da27b3a | <ide><path>src/Illuminate/Http/Client/Factory.php
<ide> * @method \Illuminate\Http\Client\PendingRequest withoutVerifying()
<ide> * @method \Illuminate\Http\Client\PendingRequest throw(callable $callback = null)
<ide> * @method \Illuminate\Http\Client\PendingRequest throwIf($condition)
<add> * @method \Illuminate\Http\Client\PendingRequest throwUnless($condition)
<ide> * @method array pool(callable $callback)
<ide> * @method \Illuminate\Http\Client\Response delete(string $url, array $data = [])
<ide> * @method \Illuminate\Http\Client\Response get(string $url, array|string|null $query = null)
<ide><path>src/Illuminate/Support/Facades/Http.php
<ide> * @method static \Illuminate\Http\Client\PendingRequest withoutVerifying()
<ide> * @method static \Illuminate\Http\Client\PendingRequest throw(callable $callback = null)
<ide> * @method static \Illuminate\Http\Client\PendingRequest throwIf($condition)
<add> * @method \Illuminate\Http\Client\PendingRequest throwUnless($condition)
<ide> * @method static array pool(callable $callback)
<ide> * @method static \Illuminate\Http\Client\Response delete(string $url, array $data = [])
<ide> * @method static \Illuminate\Http\Client\Response get(string $url, array|string|null $query = null) | 2 |
Python | Python | test random input for bubble sort | 9b3f7c36d0e116fac5df72fa35aa2fe74bb1b927 | <ide><path>sorts/bubble_sort.py
<ide> def bubble_sort(collection):
<ide> Examples:
<ide> >>> bubble_sort([0, 5, 2, 3, 2])
<ide> [0, 2, 2, 3, 5]
<del>
<del> >>> bubble_sort([])
<del> []
<del>
<del> >>> bubble_sort([-2, -45, -5])
<del> [-45, -5, -2]
<del>
<del> >>> bubble_sort([-23, 0, 6, -4, 34])
<del> [-23, -4, 0, 6, 34]
<del>
<add> >>> bubble_sort([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2])
<add> True
<add> >>> bubble_sort([]) == sorted([])
<add> True
<add> >>> bubble_sort([-2, -45, -5]) == sorted([-2, -45, -5])
<add> True
<ide> >>> bubble_sort([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34])
<ide> True
<add> >>> bubble_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c'])
<add> True
<add> >>> import random
<add> >>> collection = random.sample(range(-50, 50), 100)
<add> >>> bubble_sort(collection) == sorted(collection)
<add> True
<add> >>> import string
<add> >>> collection = random.choices(string.ascii_letters + string.digits, k=100)
<add> >>> bubble_sort(collection) == sorted(collection)
<add> True
<ide> """
<ide> length = len(collection)
<ide> for i in range(length - 1):
<ide> def bubble_sort(collection):
<ide>
<ide>
<ide> if __name__ == "__main__":
<add> import doctest
<ide> import time
<ide>
<add> doctest.testmod()
<add>
<ide> user_input = input("Enter numbers separated by a comma:").strip()
<ide> unsorted = [int(item) for item in user_input.split(",")]
<ide> start = time.process_time() | 1 |
Javascript | Javascript | increase loop protect timeout to 1000 | bfdcd1ce25f3771b269a243b27343f29564d3c44 | <ide><path>public/js/lib/loop-protect/loop-protect.js
<ide> if (typeof DEBUG === 'undefined') { DEBUG = true; }
<ide> var reSingle = /\b(for|while|do)\b/;
<ide> var labelRe = /\b([a-z_]{1}\w+:)/i;
<ide> var comments = /(?:\/\*(?:[\s\S]*?)\*\/)|(?:([\s;])+\/\/(?:.*)$)/gm;
<del> var loopTimeout = 500;
<add> var loopTimeout = 1000;
<ide>
<ide> var loopProtect = rewriteLoops;
<ide> | 1 |
PHP | PHP | update doc return to reflect assoc json decode | afdc47dab018010b9918e71dfb1ce982519bf709 | <ide><path>src/Http/Client/Response.php
<ide> public function body($parser = null)
<ide> /**
<ide> * Get the response body as JSON decoded data.
<ide> *
<del> * @return mixed
<add> * @return array|null
<ide> */
<ide> protected function _getJson()
<ide> { | 1 |
Text | Text | correct duplicate error | 08451ce95e16224e1afba6e890e33b430d662779 | <ide><path>docs/02-Scales.md
<ide> min | Number | - | User defined minimum number for the scale, overrides minimum
<ide> max | Number | - | User defined maximum number for the scale, overrides maximum value from data.
<ide> maxTicksLimit | Number | 11 | Maximum number of ticks and gridlines to show. If not defined, it will limit to 11 ticks but will show all gridlines.
<ide> showLabelBackdrop | Boolean | true | If true, draw a background behind the tick labels
<del>stepSize | Number | - | User defined fixed step size for the scale. If set, the scale ticks will be enumerated by multiple of stepSize, having one tick per increment. If not set, the ticks are labeled automatically using the nice numbers algorithm.
<add>fixedStepSize | Number | - | User defined fixed step size for the scale. If set, the scale ticks will be enumerated by multiple of stepSize, having one tick per increment. If not set, the ticks are labeled automatically using the nice numbers algorithm.
<ide> stepSize | Number | - | if defined, it can be used along with the min and the max to give a custom number of steps. See the example below.
<ide> suggestedMax | Number | - | User defined maximum number for the scale, overrides maximum value *except for if* it is lower than the maximum value.
<ide> suggestedMin | Number | - | User defined minimum number for the scale, overrides minimum value *except for if* it is higher than the minimum value. | 1 |
Go | Go | remove lock in datastore for global scope | 115cdb52b3cb97e436ac138d119a931b1bcdc993 | <ide><path>libnetwork/datastore/datastore.go
<ide> var (
<ide> )
<ide>
<ide> type datastore struct {
<del> scope string
<del> store store.Store
<del> cache *cache
<del> watchCh chan struct{}
<del> active bool
<add> scope string
<add> store store.Store
<add> cache *cache
<add> watchCh chan struct{}
<add> active bool
<add> sequential bool
<ide> sync.Mutex
<ide> }
<ide>
<ide> func newClient(scope string, kv string, addr string, config *store.Config, cache
<ide> if cached && scope != LocalScope {
<ide> return nil, fmt.Errorf("caching supported only for scope %s", LocalScope)
<ide> }
<add> sequential := false
<add> if scope == LocalScope {
<add> sequential = true
<add> }
<ide>
<ide> if config == nil {
<ide> config = &store.Config{}
<ide> func newClient(scope string, kv string, addr string, config *store.Config, cache
<ide> return nil, err
<ide> }
<ide>
<del> ds := &datastore{scope: scope, store: store, active: true, watchCh: make(chan struct{})}
<add> ds := &datastore{scope: scope, store: store, active: true, watchCh: make(chan struct{}), sequential: sequential}
<ide> if cached {
<ide> ds.cache = newCache(ds)
<ide> }
<ide> func (ds *datastore) PutObjectAtomic(kvObject KVObject) error {
<ide> pair *store.KVPair
<ide> err error
<ide> )
<del> ds.Lock()
<del> defer ds.Unlock()
<add> if ds.sequential {
<add> ds.Lock()
<add> defer ds.Unlock()
<add> }
<ide>
<ide> if kvObject == nil {
<ide> return types.BadRequestErrorf("invalid KV Object : nil")
<ide> add_cache:
<ide>
<ide> // PutObject adds a new Record based on an object into the datastore
<ide> func (ds *datastore) PutObject(kvObject KVObject) error {
<del> ds.Lock()
<del> defer ds.Unlock()
<add> if ds.sequential {
<add> ds.Lock()
<add> defer ds.Unlock()
<add> }
<ide>
<ide> if kvObject == nil {
<ide> return types.BadRequestErrorf("invalid KV Object : nil")
<ide> func (ds *datastore) putObjectWithKey(kvObject KVObject, key ...string) error {
<ide>
<ide> // GetObject returns a record matching the key
<ide> func (ds *datastore) GetObject(key string, o KVObject) error {
<del> ds.Lock()
<del> defer ds.Unlock()
<add> if ds.sequential {
<add> ds.Lock()
<add> defer ds.Unlock()
<add> }
<ide>
<ide> if ds.cache != nil {
<ide> return ds.cache.get(key, o)
<ide> func (ds *datastore) ensureParent(parent string) error {
<ide> }
<ide>
<ide> func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) {
<del> ds.Lock()
<del> defer ds.Unlock()
<add> if ds.sequential {
<add> ds.Lock()
<add> defer ds.Unlock()
<add> }
<ide>
<ide> if ds.cache != nil {
<ide> return ds.cache.list(kvObject)
<ide> func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) {
<ide>
<ide> // DeleteObject unconditionally deletes a record from the store
<ide> func (ds *datastore) DeleteObject(kvObject KVObject) error {
<del> ds.Lock()
<del> defer ds.Unlock()
<add> if ds.sequential {
<add> ds.Lock()
<add> defer ds.Unlock()
<add> }
<ide>
<ide> // cleaup the cache first
<ide> if ds.cache != nil {
<ide> func (ds *datastore) DeleteObject(kvObject KVObject) error {
<ide>
<ide> // DeleteObjectAtomic performs atomic delete on a record
<ide> func (ds *datastore) DeleteObjectAtomic(kvObject KVObject) error {
<del> ds.Lock()
<del> defer ds.Unlock()
<add> if ds.sequential {
<add> ds.Lock()
<add> defer ds.Unlock()
<add> }
<ide>
<ide> if kvObject == nil {
<ide> return types.BadRequestErrorf("invalid KV Object : nil")
<ide> del_cache:
<ide>
<ide> // DeleteTree unconditionally deletes a record from the store
<ide> func (ds *datastore) DeleteTree(kvObject KVObject) error {
<del> ds.Lock()
<del> defer ds.Unlock()
<add> if ds.sequential {
<add> ds.Lock()
<add> defer ds.Unlock()
<add> }
<ide>
<ide> // cleaup the cache first
<ide> if ds.cache != nil { | 1 |
Java | Java | defer exchangefilterfunction to subscription time | d463598c09ad653d821bb9048c19f34661198390 | <ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/function/client/DefaultWebClient.java
<ide> /*
<del> * Copyright 2002-2018 the original author or authors.
<add> * Copyright 2002-2019 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> public Mono<ClientResponse> exchange() {
<ide> ClientRequest request = (this.inserter != null ?
<ide> initRequestBuilder().body(this.inserter).build() :
<ide> initRequestBuilder().build());
<del> return exchangeFunction.exchange(request).switchIfEmpty(NO_HTTP_CLIENT_RESPONSE_ERROR);
<add> return Mono.defer(() -> exchangeFunction.exchange(request))
<add> .switchIfEmpty(NO_HTTP_CLIENT_RESPONSE_ERROR);
<ide> }
<ide>
<ide> private ClientRequest.Builder initRequestBuilder() {
<ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/function/client/ExchangeFilterFunction.java
<ide> /*
<del> * Copyright 2002-2018 the original author or authors.
<add> * Copyright 2002-2019 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> import org.springframework.util.Assert;
<ide>
<ide> /**
<del> * Represents a function that filters an{@linkplain ExchangeFunction exchange function}.
<add> * Represents a function that filters an {@linkplain ExchangeFunction exchange function}.
<add> * <p>The filter is executed when a {@code Subscriber} subscribes to the
<add> * {@code Publisher} returned by the {@code WebClient}.
<ide> *
<ide> * @author Arjen Poutsma
<ide> * @since 5.0
<ide><path>spring-webflux/src/test/java/org/springframework/web/reactive/function/client/DefaultWebClientTests.java
<ide> /*
<del> * Copyright 2002-2018 the original author or authors.
<add> * Copyright 2002-2019 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> * Unit tests for {@link DefaultWebClient}.
<ide> *
<ide> * @author Rossen Stoyanchev
<add> * @author Brian Clozel
<ide> */
<ide> public class DefaultWebClientTests {
<ide>
<ide> public class DefaultWebClientTests {
<ide> public void setup() {
<ide> MockitoAnnotations.initMocks(this);
<ide> this.exchangeFunction = mock(ExchangeFunction.class);
<del> when(this.exchangeFunction.exchange(this.captor.capture())).thenReturn(Mono.empty());
<add> ClientResponse mockResponse = mock(ClientResponse.class);
<add> when(this.exchangeFunction.exchange(this.captor.capture())).thenReturn(Mono.just(mockResponse));
<ide> this.builder = WebClient.builder().baseUrl("/base").exchangeFunction(this.exchangeFunction);
<ide> }
<ide>
<ide>
<ide> @Test
<ide> public void basic() {
<del> this.builder.build().get().uri("/path").exchange();
<add> this.builder.build().get().uri("/path")
<add> .exchange().block(Duration.ofSeconds(10));
<ide>
<ide> ClientRequest request = verifyAndGetRequest();
<ide> assertEquals("/base/path", request.url().toString());
<ide> public void basic() {
<ide> public void uriBuilder() {
<ide> this.builder.build().get()
<ide> .uri(builder -> builder.path("/path").queryParam("q", "12").build())
<del> .exchange();
<add> .exchange().block(Duration.ofSeconds(10));
<ide>
<ide> ClientRequest request = verifyAndGetRequest();
<ide> assertEquals("/base/path?q=12", request.url().toString());
<del> verifyNoMoreInteractions(this.exchangeFunction);
<ide> }
<ide>
<ide> @Test
<ide> public void uriBuilderWithPathOverride() {
<ide> this.builder.build().get()
<ide> .uri(builder -> builder.replacePath("/path").build())
<del> .exchange();
<add> .exchange().block(Duration.ofSeconds(10));
<ide>
<ide> ClientRequest request = verifyAndGetRequest();
<ide> assertEquals("/path", request.url().toString());
<del> verifyNoMoreInteractions(this.exchangeFunction);
<ide> }
<ide>
<ide> @Test
<ide> public void requestHeaderAndCookie() {
<ide> this.builder.build().get().uri("/path").accept(MediaType.APPLICATION_JSON)
<ide> .cookies(cookies -> cookies.add("id", "123")) // SPR-16178
<del> .exchange();
<add> .exchange().block(Duration.ofSeconds(10));
<ide>
<ide> ClientRequest request = verifyAndGetRequest();
<ide> assertEquals("application/json", request.headers().getFirst("Accept"));
<ide> assertEquals("123", request.cookies().getFirst("id"));
<del> verifyNoMoreInteractions(this.exchangeFunction);
<ide> }
<ide>
<ide> @Test
<ide> public void defaultHeaderAndCookie() {
<ide> .defaultHeader("Accept", "application/json").defaultCookie("id", "123")
<ide> .build();
<ide>
<del> client.get().uri("/path").exchange();
<add> client.get().uri("/path").exchange().block(Duration.ofSeconds(10));
<ide>
<ide> ClientRequest request = verifyAndGetRequest();
<ide> assertEquals("application/json", request.headers().getFirst("Accept"));
<ide> assertEquals("123", request.cookies().getFirst("id"));
<del> verifyNoMoreInteractions(this.exchangeFunction);
<ide> }
<ide>
<ide> @Test
<ide> public void defaultHeaderAndCookieOverrides() {
<ide> .defaultCookie("id", "123")
<ide> .build();
<ide>
<del> client.get().uri("/path").header("Accept", "application/xml").cookie("id", "456").exchange();
<add> client.get().uri("/path")
<add> .header("Accept", "application/xml")
<add> .cookie("id", "456")
<add> .exchange().block(Duration.ofSeconds(10));
<ide>
<ide> ClientRequest request = verifyAndGetRequest();
<ide> assertEquals("application/xml", request.headers().getFirst("Accept"));
<ide> assertEquals("456", request.cookies().getFirst("id"));
<del> verifyNoMoreInteractions(this.exchangeFunction);
<ide> }
<ide>
<ide> @Test
<ide> public void defaultRequest() {
<ide>
<ide> try {
<ide> context.set("bar");
<del> client.get().uri("/path").attribute("foo", "bar").exchange();
<add> client.get().uri("/path").attribute("foo", "bar")
<add> .exchange().block(Duration.ofSeconds(10));
<ide> }
<ide> finally {
<ide> context.remove();
<ide> public void withStringAttribute() {
<ide> this.builder.filter(filter).build()
<ide> .get().uri("/path")
<ide> .attribute("foo", "bar")
<del> .exchange();
<add> .exchange().block(Duration.ofSeconds(10));
<ide>
<ide> assertEquals("bar", actual.get("foo"));
<ide>
<ide> public void withNullAttribute() {
<ide> this.builder.filter(filter).build()
<ide> .get().uri("/path")
<ide> .attribute("foo", null)
<del> .exchange();
<add> .exchange().block(Duration.ofSeconds(10));
<ide>
<ide> assertNull(actual.get("foo"));
<ide>
<ide> public void apply() {
<ide> .defaultCookie("id", "123"))
<ide> .build();
<ide>
<del> client.get().uri("/path").exchange();
<add> client.get().uri("/path").exchange().block(Duration.ofSeconds(10));
<ide>
<ide> ClientRequest request = verifyAndGetRequest();
<ide> assertEquals("application/json", request.headers().getFirst("Accept"));
<ide> assertEquals("123", request.cookies().getFirst("id"));
<del> verifyNoMoreInteractions(this.exchangeFunction);
<ide> }
<ide>
<ide> @Test
<ide> public void switchToErrorOnEmptyClientResponseMono() {
<add> ExchangeFunction exchangeFunction = mock(ExchangeFunction.class);
<add> when(exchangeFunction.exchange(any())).thenReturn(Mono.empty());
<add> WebClient.Builder builder = WebClient.builder().baseUrl("/base").exchangeFunction(exchangeFunction);
<ide> StepVerifier.create(builder.build().get().uri("/path").exchange())
<ide> .expectErrorMessage("The underlying HTTP client completed without emitting a response.")
<ide> .verify(Duration.ofSeconds(5));
<ide> }
<ide>
<add> @Test
<add> public void shouldApplyFiltersAtSubscription() {
<add> WebClient client = this.builder
<add> .filter((request, next) -> {
<add> return next.exchange(ClientRequest
<add> .from(request)
<add> .header("Custom", "value")
<add> .build());
<add> })
<add> .build();
<add> Mono<ClientResponse> exchange = client.get().uri("/path").exchange();
<add> verifyZeroInteractions(this.exchangeFunction);
<add> exchange.block(Duration.ofSeconds(10));
<add> ClientRequest request = verifyAndGetRequest();
<add> assertEquals("value", request.headers().getFirst("Custom"));
<add> }
<add>
<ide> private ClientRequest verifyAndGetRequest() {
<ide> ClientRequest request = this.captor.getValue();
<ide> Mockito.verify(this.exchangeFunction).exchange(request); | 3 |
Ruby | Ruby | use `skip` so we can see what tests are not run | 908796dac5c702123415727fd28f6f9633dc2354 | <ide><path>activerecord/test/cases/base_test.rb
<ide> def test_readonly_attributes
<ide> assert_equal "changed", post.body
<ide> end
<ide>
<del> unless current_adapter?(:MysqlAdapter)
<del> def test_unicode_column_name
<del> columns = Weird.columns_hash.keys
<del> weird = Weird.create(:なまえ => 'たこ焼き仮面')
<del> assert_equal 'たこ焼き仮面', weird.なまえ
<del> end
<add> def test_unicode_column_name
<add> skip "not on mysql" if current_adapter?(:MysqlAdapter)
<add> columns = Weird.columns_hash.keys
<add> weird = Weird.create(:なまえ => 'たこ焼き仮面')
<add> assert_equal 'たこ焼き仮面', weird.なまえ
<ide> end
<ide>
<ide> def test_non_valid_identifier_column_name | 1 |
PHP | PHP | fix docblock tag | bd22019d2d3f8b68924042d3156b2b08775439c7 | <ide><path>tests/TestCase/Form/FormTest.php
<ide> public function testSchema()
<ide> * Test validator()
<ide> *
<ide> * @return void
<del> * @deprecated
<add> * @group deprecated
<ide> */
<ide> public function testValidator()
<ide> { | 1 |
Javascript | Javascript | workaround a gc bug in chrome < 50 | e34ef23ab868b36d18ee845f78536f0987edf56a | <ide><path>src/ng/compile.js
<ide> function $CompileProvider($provide, $$sanitizeUriProvider) {
<ide> compileNode = $compileNode[0];
<ide> replaceWith(jqCollection, sliceArgs($template), compileNode);
<ide>
<add> // Support: Chrome < 50
<add> // https://github.com/angular/angular.js/issues/14041
<add>
<add> // In the versions of V8 prior to Chrome 50, the document fragment that is created
<add> // in the `replaceWith` function is improperly garbage collected despite still
<add> // being referenced by the `parentNode` property of all of the child nodes. By adding
<add> // a reference to the fragment via a different property, we can avoid that incorrect
<add> // behavior.
<add> // TODO: remove this line after Chrome 50 has been released
<add> $template[0].$$parentNode = $template[0].parentNode;
<add>
<ide> childTranscludeFn = compilationGenerator(mightHaveMultipleTransclusionError, $template, transcludeFn, terminalPriority,
<ide> replaceDirective && replaceDirective.name, {
<ide> // Don't pass in: | 1 |
Javascript | Javascript | push dist to same remote as project | 1ba45fcc15c894cad591d93cbb88010df5f235fe | <ide><path>build/release/dist.js
<ide> module.exports = function( Release, complete ) {
<ide> fs = require( "fs" ),
<ide> shell = require( "shelljs" ),
<ide> pkg = require( Release.dir.repo + "/package.json" ),
<add> distRemote = Release.remote.replace( "jquery", "jquery-dist" ),
<ide> // These files are included with the distrubtion
<ide> files = [
<ide> "src",
<ide> module.exports = function( Release, complete ) {
<ide> * Clone the distribution repo
<ide> */
<ide> function clone() {
<del> var distRemote = Release.remote.replace( "jquery", "jquery-dist" );
<del>
<ide> Release.chdir( Release.dir.base );
<ide> Release.dir.dist = Release.dir.base + "/dist";
<ide>
<ide> module.exports = function( Release, complete ) {
<ide> Release.chdir( Release.dir.dist );
<ide>
<ide> console.log( "Pushing release to dist repo..." );
<del> Release.exec( "git push origin master --tags",
<add> Release.exec( "git push " + distRemote + " master --tags",
<ide> "Error pushing master and tags to git repo." );
<ide>
<ide> // Set repo for npm publish | 1 |
Java | Java | fix android activity template. fixes | f93cf6ca1921b8bd252866c40b52cb2a6a20ae2f | <ide><path>local-cli/generator-android/templates/package/MainActivity.java
<ide> package <%= package %>;
<ide>
<ide> import com.facebook.react.ReactActivity;
<add>import com.facebook.react.ReactPackage;
<ide> import com.facebook.react.shell.MainReactPackage;
<ide>
<add>import java.util.Arrays;
<add>import java.util.List;
<add>
<ide> public class MainActivity extends ReactActivity {
<ide>
<ide> /**
<ide> protected boolean getUseDeveloperSupport() {
<ide> * or modules besides the default ones, add more packages here.
<ide> */
<ide> @Override
<del> protected abstract List<ReactPackage> getPackages() {
<add> protected List<ReactPackage> getPackages() {
<ide> return Arrays.<ReactPackage>asList(
<ide> new MainReactPackage());
<ide> } | 1 |
Text | Text | fix comparison between sorting and search | c5e7a9aa78eb083ac9838e0e2a46dfc5a27b2dee | <ide><path>guide/english/algorithms/search-algorithms/linear-search/index.md
<ide> end
<ide>
<ide> ## Why linear search is not efficient
<ide>
<del>There is no doubt that linear search is simple but because it compares each element one by one, it is time consuming and hence not much efficient. If we have to find a number from say, 1000000 numbers and number is at the last location, linear search technique would become quite tedious. So, also learn about bubble sort, quick sort,etc. which are much more efficient than linear search.
<add>There is no doubt that linear search is simple but because it compares each element one by one, it is time consuming and hence not very efficient. If we have to find a number from say, 1000000 numbers and number is at the last location, linear search technique would become quite tedious. So, also learn about binary search, exponential search, etc. which are much more efficient than linear search.
<ide>
<ide> #### Relevant Video:
<ide> | 1 |
Javascript | Javascript | fix faulty relpath test | 9cd932f4359daf4d9add982cda70dfa5215f3f8a | <ide><path>test/parallel/test-fs-realpath-native.js
<ide> const common = require('../common');
<ide> const assert = require('assert');
<ide> const fs = require('fs');
<ide>
<del>if (!common.isOSX) common.skip('MacOS-only test.');
<add>const filename = __filename.toLowerCase();
<ide>
<del>assert.strictEqual(fs.realpathSync.native('/users'), '/Users');
<del>fs.realpath.native('/users', common.mustCall(function(err, res) {
<del> assert.ifError(err);
<del> assert.strictEqual(res, '/Users');
<del> assert.strictEqual(this, undefined);
<del>}));
<add>assert.strictEqual(
<add> fs.realpathSync.native('./test/parallel/test-fs-realpath-native.js')
<add> .toLowerCase(),
<add> filename);
<add>
<add>fs.realpath.native(
<add> './test/parallel/test-fs-realpath-native.js',
<add> common.mustCall(function(err, res) {
<add> assert.ifError(err);
<add> assert.strictEqual(res.toLowerCase(), filename);
<add> assert.strictEqual(this, undefined);
<add> })); | 1 |
Ruby | Ruby | remove call to self.locale from helpers | c178a87b4326edd491922136c0a55bf4b889473d | <ide><path>actionpack/lib/action_view/helpers/active_record_helper.rb
<ide> def error_messages_for(*params)
<ide> end
<ide>
<ide> count = objects.inject(0) {|sum, object| sum + object.errors.count }
<del> locale = options[:locale]
<del> locale ||= self.locale if respond_to?(:locale)
<del>
<ide> unless count.zero?
<ide> html = {}
<ide> [:id, :class].each do |key|
<ide> def error_messages_for(*params)
<ide> end
<ide> options[:object_name] ||= params.first
<ide>
<del> I18n.with_options :locale => locale, :scope => [:active_record, :error] do |locale|
<add> I18n.with_options :locale => options[:locale], :scope => [:active_record, :error] do |locale|
<ide> header_message = if options.include?(:header_message)
<ide> options[:header_message]
<ide> else
<ide><path>actionpack/lib/action_view/helpers/date_helper.rb
<ide> module DateHelper
<ide> # distance_of_time_in_words(Time.now, Time.now) # => less than a minute
<ide> #
<ide> def distance_of_time_in_words(from_time, to_time = 0, include_seconds = false, options = {})
<del> locale = options[:locale]
<del> locale ||= self.locale if respond_to?(:locale)
<del>
<ide> from_time = from_time.to_time if from_time.respond_to?(:to_time)
<ide> to_time = to_time.to_time if to_time.respond_to?(:to_time)
<ide> distance_in_minutes = (((to_time - from_time).abs)/60).round
<ide> distance_in_seconds = ((to_time - from_time).abs).round
<ide>
<del> I18n.with_options :locale => locale, :scope => :'datetime.distance_in_words' do |locale|
<add> I18n.with_options :locale => options[:locale], :scope => :'datetime.distance_in_words' do |locale|
<ide> case distance_in_minutes
<ide> when 0..1
<ide> return distance_in_minutes == 0 ?
<ide><path>actionpack/lib/action_view/helpers/number_helper.rb
<ide> def number_to_phone(number, options = {})
<ide> # number_to_currency(1234567890.50, :unit => "£", :separator => ",", :delimiter => "", :format => "%n %u")
<ide> # # => 1234567890,50 £
<ide> def number_to_currency(number, options = {})
<del> options = options.symbolize_keys
<del>
<del> locale = options[:locale]
<del> locale ||= self.locale if respond_to?(:locale)
<del>
<del> defaults = :'currency.format'.t(locale) || {}
<add> options = options.symbolize_keys
<add> defaults = :'currency.format'.t(options[:locale]) || {}
<add>
<ide> precision = options[:precision] || defaults[:precision]
<ide> unit = options[:unit] || defaults[:unit]
<ide> separator = options[:separator] || defaults[:separator] | 3 |
Python | Python | add tests for params handling in dag construction | d8433679240614af1fcb4b4faa99845e5c7480e1 | <ide><path>tests/models.py
<ide> from airflow.operators.dummy_operator import DummyOperator
<ide>
<ide>
<add>class DagTest(unittest.TestCase):
<add>
<add> def test_parms_not_passed_is_empty_dict(self):
<add> """
<add> Test that when 'params' is _not_ passed to a new Dag, that the params
<add> attribute is set to an empty dictionary.
<add> """
<add> dag = models.DAG('test-dag')
<add>
<add> assert type(dag.params) == dict
<add> assert len(dag.params) == 0
<add>
<add> def test_params_passed_and_params_in_default_args_no_override(self):
<add> """
<add> Test that when 'params' exists as a key passed to the default_args dict
<add> in addition to params being passed explicitly as an argument to the
<add> dag, that the 'params' key of the default_args dict is merged with the
<add> dict of the params argument.
<add> """
<add> params1 = {'parameter1': 1}
<add> params2 = {'parameter2': 2}
<add>
<add> dag = models.DAG('test-dag',
<add> default_args={'params': params1},
<add> params=params2)
<add>
<add> params_combined = params1.copy()
<add> params_combined.update(params2)
<add> assert dag.params == params_combined
<add>
<add>
<ide> class DagRunTest(unittest.TestCase):
<ide> def test_id_for_date(self):
<ide> run_id = models.DagRun.id_for_date( | 1 |
Ruby | Ruby | pass the pk to compile_update | 8d7d2df3e2f9162d892817a4ec2d687b21c2900a | <ide><path>activerecord/lib/active_record/counter_cache.rb
<ide> def reset_counters(id, *counters)
<ide>
<ide> stmt = unscoped.where(arel_table[primary_key].eq(object.id)).arel.compile_update({
<ide> arel_table[counter_name] => object.send(association).count
<del> })
<add> }, primary_key)
<ide> connection.update stmt
<ide> end
<ide> return true
<ide><path>activerecord/lib/active_record/locking/optimistic.rb
<ide> def update_record(attribute_names = @attributes.keys) #:nodoc:
<ide> relation.table[self.class.primary_key].eq(id).and(
<ide> relation.table[lock_col].eq(self.class.quote_value(previous_lock_value, column_for_attribute(lock_col)))
<ide> )
<del> ).arel.compile_update(arel_attributes_with_values_for_update(attribute_names))
<add> ).arel.compile_update(
<add> arel_attributes_with_values_for_update(attribute_names),
<add> self.class.primary_key
<add> )
<ide>
<ide> affected_rows = self.class.connection.update stmt
<ide>
<ide><path>activerecord/lib/active_record/relation.rb
<ide> def insert(values) # :nodoc:
<ide>
<ide> def update_record(values, id, id_was) # :nodoc:
<ide> substitutes, binds = substitute_values values
<del> um = @klass.unscoped.where(@klass.arel_table[@klass.primary_key].eq(id_was || id)).arel.compile_update(substitutes)
<add> um = @klass.unscoped.where(@klass.arel_table[@klass.primary_key].eq(id_was || id)).arel.compile_update(substitutes, @klass.primary_key)
<ide>
<ide> @klass.connection.update(
<ide> um, | 3 |
Javascript | Javascript | remove ios platform check for running devtools | 1b40db7ec0acb3a38a2a1c73623dbce420d2c1e5 | <ide><path>Libraries/JavaScriptAppEngine/Initialization/InitializeJavaScriptAppEngine.js
<ide> function setUpMapAndSet() {
<ide> function setUpDevTools() {
<ide> if (__DEV__) {
<ide> // not when debugging in chrome
<del> if (!window.document && require('Platform').OS === 'ios') {
<add> if (!window.document) {
<ide> const setupDevtools = require('setupDevtools');
<ide> setupDevtools();
<ide> } | 1 |
Javascript | Javascript | fix build by checking view_preserves_context | 3ff2d411108a8f893c2c5d0eb9c886a233a542e0 | <ide><path>packages/ember-views/tests/views/view/template_test.js
<ide>
<ide> var set = Ember.set, get = Ember.get;
<ide>
<add>var VIEW_PRESERVES_CONTEXT = Ember.VIEW_PRESERVES_CONTEXT;
<add>
<ide> module("Ember.View - Template Functionality");
<ide>
<ide> test("should call the function of the associated template", function() {
<ide> test("should call the function of the associated template", function() {
<ide> view.createElement();
<ide> });
<ide>
<del>
<ide> ok(view.$('#twas-called').length, "the named template was called");
<ide> });
<ide>
<ide> test("should call the function of the associated template with itself as the con
<ide> view.createElement();
<ide> });
<ide>
<del>
<ide> equal("template was called for Tom DAAAALE", view.$('#twas-called').text(), "the named template was called with the view as the data source");
<ide> });
<ide>
<ide> test("should not use defaultTemplate if template is provided", function() {
<ide> view.createElement();
<ide> });
<ide>
<del>
<ide> equal("foo", view.$().text(), "default template was not printed");
<ide> });
<ide>
<ide> test("should not use defaultTemplate if template is provided", function() {
<ide> view.createElement();
<ide> });
<ide>
<del>
<ide> equal("foo", view.$().text(), "default template was not printed");
<ide> });
<ide>
<del>
<ide> test("should render an empty element if no template is specified", function() {
<ide> var view;
<add>
<ide> view = Ember.View.create();
<ide> Ember.run(function(){
<ide> view.createElement();
<ide> });
<add>
<ide> equal(view.$().html(), '', "view div should be empty");
<ide> });
<ide>
<ide> test("should provide a controller to the template if a controller is specified on the view", function() {
<del> expect(7);
<add> expect(VIEW_PRESERVES_CONTEXT ? 7 : 5);
<ide>
<ide> var Controller1 = Ember.Object.extend({
<ide> toString: function() { return "Controller1"; }
<ide> test("should provide a controller to the template if a controller is specified o
<ide> parentView.destroy();
<ide> });
<ide>
<del>
<ide> var parentViewWithControllerlessChild = Ember.View.create({
<ide> controller: controller1,
<ide>
<ide> test("should provide a controller to the template if a controller is specified o
<ide>
<ide> strictEqual(optionsDataKeywordsControllerForView, controller1, "passes the original controller in the data");
<ide> strictEqual(optionsDataKeywordsControllerForChildView, controller1, "passes the controller in the data to child views");
<del> strictEqual(contextForView, controller2, "passes the controller in as the main context of the parent view");
<del> strictEqual(contextForControllerlessView, controller1, "passes the controller in as the main context of the child view");
<add>
<add> if (VIEW_PRESERVES_CONTEXT) {
<add> strictEqual(contextForView, controller2, "passes the controller in as the main context of the parent view");
<add> strictEqual(contextForControllerlessView, controller1, "passes the controller in as the main context of the child view");
<add> }
<ide> }); | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.