prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># Python script to perform consistency analysis on metabolic models # Copyright (C) 2015 Miguel Ponce de Leon # Contact: [email protected] # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import csv, re from cobra.core import Gene,Model from settings import REACTION_PREFIX def read_ec_numbers(fname): rxn2ec = {row[0]:row[1] for row in csv.reader(open(fname))} ECs_rxns = {} for rxn,ec in rxn2ec.items(): if not re.search('^[1-6]\.[0-9][0-9]*\.[0-9][0-9]*',ec): continue elif ec not in ECs_rxns: ECs_rxns[ec] = [] ECs_rxns[ec].append(rxn) return ECs_rxns def add_expantion_fluxes(model,metabolites={},prefix='EFFLUX_',set_as_objective=False,copy_model=True): if copy_model: model = model.copy() for metab,coef in metabolites.items(): if not hasattr(metab, 'id'): metab = model.metabolites.get_by_id(metab) reaction = cobra.Reaction(prefix+metab.id) reaction.add_metabolites({metab : coef}) if set_as_objective: reaction.objective_coefficient = 1 model.add_reaction(reaction) return model def correct_seed_model(model,metamodel): if 'rxn02297' in model.reactions and 'rxn02296' not in model.reactions: new_rxn = metamodel.reactions.new_rxn00021.copy() old_rxn = model.reactions.rxn02297 model.add_reaction(new_rxn) new_rxn.gene_reaction_rule = old_rxn.gene_reaction_rule model.remove_reactions([old_rxn]) # the following metabolites should be removed because they appear as conserved pools # on the biomass = [r for r in model.reactions if r.startswith('bio')][0] conflictive_metabolties = ['cpd01997_c','cpd03422_c','cpd11416_c'] #conflictive_metabolties = ['cpd01997_c','cpd03422_c'] for m in conflictive_metabolties: #print m if m not in model.metabolites: continue metabolite = model.metabolites.get_by_id(m) if metabolite not in biomass.products: continue s_coeff = biomass.get_coefficient(metabolite.id) biomass.add_metabolites({metabolite:-s_coeff}) if 'EX_cpd11416_c' in model.reactions: model.remove_reactions(['EX_cpd11416_c']) if 'rxn05029' in model.reactions: model.remove_reactions(['rxn05029']) return model def prepare_model(model,metamodel,reactions_to_remove=[],correct_seed=False): model = model.copy() if len(reactions_to_remove) > 0: model.remove_reactions(reactions_to_remove) if correct_seed: correct_seed_model(model,metamodel)<|fim▁hole|> list_of_reactions = [r for r in model.reactions] for r in list_of_reactions: if r not in metamodel.reactions: print "%s not in metamodel %s" % (r.id,metamodel.id) continue reaction_reference = metamodel.reactions.get_by_id(r.id) result = r - reaction_reference if len(result.metabolites) > 0: genes = r.genes gene_rule = r.gene_reaction_rule model.remove_reactions([r]) model.add_reaction(reaction_reference.copy()) new_reaction = model.reactions.get_by_id(r.id) new_reaction._genes = genes new_reaction.gene_reaction_rule = gene_rule for g in genes: g._reaction.add(new_reaction) model.reactions.get_by_id(r.id).lower_bound = reaction_reference.lower_bound metabolites_to_remove = [m.id for m in model.metabolites if len(m.reactions) == 0] for m in metabolites_to_remove: if m not in model.metabolites: continue model.metabolites.get_by_id(m).remove_from_model() return model def create_consisten_model(model,metamodel,consistent_reactions): consistent_model = Model() consistent_model.id = model.id consistent_model.description = model.id auxiliar_gene = Gene('MODULAR_GAPFILLING') auxiliar_gene._model = consistent_model consistent_model.genes.append(auxiliar_gene) for reaction_id in consistent_reactions: new_reaction = metamodel.reactions.get_by_id(reaction_id).copy() if reaction_id in model.reactions: reaction_reference = model.reactions.get_by_id(reaction_id) gene_list = [] for gene in reaction_reference.genes: if gene.id in consistent_model.genes: gene_list.append(consistent_model.genes.get_by_id(gene.id)) else: new_gene = Gene(gene.id) new_gene._model = consistent_model consistent_model.genes.append(new_gene) gene_list.append(new_gene) for gene in gene_list: gene._reaction.add(new_reaction) new_reaction._genes = gene_list new_reaction.gene_reaction_rule = reaction_reference.gene_reaction_rule else: new_reaction.gene_reaction_rule = auxiliar_gene.name auxiliar_gene._reaction.add(new_reaction) consistent_model.add_reaction(new_reaction) return consistent_model def get_full_coupled_sets(reaction_names,fctab, exclude_preffix=None): """ Interpretation for element (i, j): 1 - fully coupled <=> 2 - partially coupled <-> 3 - reaction i is directionally coupled to j ( v[i]<>0 -> v[j]<>0 ) 4 - reaction j is directionally coupled to i ( v[j]<>0 -> v[i]<>0 ) 5 - uncoupled """ assert fctab.shape[0] == len(reaction_names) already_coupled = set() coupling_sets = [] for i in np.arange(fctab.shape[0]): if i in already_coupled: continue indexes = np.where(fctab[i,:]==1)[0] if len(indexes) < 2: continue coupling_sets.append(indexes) already_coupled = already_coupled.union(indexes) #coupling_sets = np.array([np.array(b) for b in set([tuple(a) for a in fctab])]) #coupling_sets = [subset for subset in [np.where(es==1)[0] for es in coupling_sets] if len(subset)>1] result = {} counter = 1 for subset in coupling_sets: rs_id = 'RS_'+str(counter) if exclude_preffix: reaction_ids = [reaction_names[i] for i in subset if not re.search(exclude_preffix,reaction_names[i])] else: reaction_ids = [reaction_names[i] for i in subset] if len(reaction_ids) > 1: result[rs_id] = reaction_ids counter += 1 return result def decorate_graph(G,labels={},colors={}): for tail,head in G.edges(): G.edge[tail][head]['graphics'] = {} G.edge[tail][head]['graphics']['targetArrow'] = "standard" try: width = float(G[tail][head]['label']) G.edge[tail][head]['graphics']['width'] = width except: G.edge[tail][head]['graphics']['width'] = 1.0 for n in G.nodes(): label = n if n in labels: G.node[n]['label'] = labels[n] label = labels[n] graphics = {} if n in colors: color = colors[n] else: color = None if G.node[n]['node_class'] == 'reaction': outline = "#000000" if not color: color = "#c0c0c0" height = 16.0 width = max((len(label) * 8.0),85.0) graphics = {"w":width, "h":height, "type":"roundrectangle", "fill":color, "outline":outline} elif G.node[n]['node_class'] == 'metabolite': outline = "#ffffff" if not color: color = "#ffffff" height = 15.0 width = max((len(label) * 8.0),60.0) if n in colors: color = colors[n] outline = "#000000" graphics = {"w":width, "h":height, "type":"rectangle", "fill":color, "outline":outline} G.node[n]['graphics'] = graphics return G def csv_save(a_list,fname): if not isinstance(a_list,list): return elif len(a_list)<1: return elif not isinstance(a_list[0],list): a_list = [[e] for e in a_list] f = open(fname,'w') w = csv.writer(f) x = w.writerows(a_list) f.close() return x def f_rxn(x): return re.search(REACTION_PREFIX,x) def f_ex(x): return re.search(EXCHANGE_PREFIX,x) def f_flux(x): return f_ex(x) or f_rxn(x)<|fim▁end|>
<|file_name|>app.e2e-spec.ts<|end_file_name|><|fim▁begin|>import { NgLandingPage } from './app.po'; describe('ng-landing App', () => { let page: NgLandingPage; beforeEach(() => { page = new NgLandingPage();<|fim▁hole|> page.navigateTo(); expect(page.getParagraphText()).toEqual('Welcome to app!'); }); });<|fim▁end|>
}); it('should display welcome message', () => {
<|file_name|>0019_auto__del_likedcomment__del_comment__add_field_node_abs_parent__chg_fi.py<|end_file_name|><|fim▁begin|># encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting model 'LikedComment' db.delete_table('forum_likedcomment') # Deleting model 'Comment' db.delete_table(u'comment') # Adding field 'Node.abs_parent' db.add_column('forum_node', 'abs_parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='all_children', null=True, to=orm['forum.Node']), keep_default=False) # Changing field 'Question.last_activity_by' db.alter_column(u'question', 'last_activity_by_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['forum.User'])) def backwards(self, orm): # Adding model 'LikedComment' db.create_table('forum_likedcomment', ( ('comment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.Comment'])), ('canceled', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.User'])), ('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal('forum', ['LikedComment']) # Adding model 'Comment' db.create_table(u'comment', ( ('comment', self.gf('django.db.models.fields.CharField')(max_length=300)), ('node', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', null=True, to=orm['forum.Node'])), ('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)), ('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='deleted_comments', null=True, to=orm['forum.User'], blank=True)), <|fim▁hole|> ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal('forum', ['Comment']) # Deleting field 'Node.abs_parent' db.delete_column('forum_node', 'abs_parent_id') # Changing field 'Question.last_activity_by' db.alter_column(u'question', 'last_activity_by_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.User'])) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'forum.activity': { 'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']"}) }, 'forum.anonymousnode': { 'Meta': {'object_name': 'AnonymousNode', '_ormbases': ['forum.Node']}, 'convertible_to': ('django.db.models.fields.CharField', [], {'default': "'node'", 'max_length': '16'}), 'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['forum.Node']", 'unique': 'True', 'primary_key': 'True'}), 'validation_hash': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_content'", 'to': "orm['forum.Node']"}) }, 'forum.answer': { 'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"}, 'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'accepted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']", 'null': 'True'}), 'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['forum.Node']", 'unique': 'True', 'primary_key': 'True'}), 'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'forum.authkeyuserassociation': { 'Meta': {'object_name': 'AuthKeyUserAssociation'}, 'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_keys'", 'to': "orm['forum.User']"}) }, 'forum.award': { 'Meta': {'unique_together': "(('content_type', 'object_id', 'user', 'badge'),)", 'object_name': 'Award', 'db_table': "u'award'"}, 'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['forum.Badge']"}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'awards'", 'to': "orm['forum.User']"}) }, 'forum.badge': { 'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"}, 'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['forum.User']"}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}), 'type': ('django.db.models.fields.SmallIntegerField', [], {}) }, 'forum.favoritequestion': { 'Meta': {'unique_together': "(('question', 'user'),)", 'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"}, 'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['forum.User']"}) }, 'forum.flaggeditem': { 'Meta': {'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"}, 'canceled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'null': 'True', 'to': "orm['forum.Node']"}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['forum.User']"}) }, 'forum.keyvalue': { 'Meta': {'object_name': 'KeyValue'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'value': ('forum.models.utils.PickledObjectField', [], {}) }, 'forum.markedtag': { 'Meta': {'object_name': 'MarkedTag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['forum.Tag']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['forum.User']"}) }, 'forum.node': { 'Meta': {'object_name': 'Node'}, 'abs_parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_children'", 'null': 'True', 'to': "orm['forum.Node']"}), 'active_revision': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'active'", 'unique': 'True', 'null': 'True', 'to': "orm['forum.NodeRevision']"}), 'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nodes'", 'to': "orm['forum.User']"}), 'body': ('django.db.models.fields.TextField', [], {}), 'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_nodes'", 'null': 'True', 'to': "orm['forum.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_nodes'", 'null': 'True', 'to': "orm['forum.User']"}), 'node_type': ('django.db.models.fields.CharField', [], {'default': "'node'", 'max_length': '16'}), 'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['forum.Node']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'nodes'", 'to': "orm['forum.Tag']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'forum.noderevision': { 'Meta': {'unique_together': "(('node', 'revision'),)", 'object_name': 'NodeRevision'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'noderevisions'", 'to': "orm['forum.User']"}), 'body': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Node']"}), 'revised_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'revision': ('django.db.models.fields.PositiveIntegerField', [], {}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}) }, 'forum.question': { 'Meta': {'object_name': 'Question', 'db_table': "u'question'"}, 'accepted_answer': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'question_accepting'", 'unique': 'True', 'null': 'True', 'to': "orm['forum.Answer']"}), 'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['forum.User']"}), 'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['forum.User']"}), 'favourite_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'null': 'True', 'to': "orm['forum.User']"}), 'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['forum.Node']", 'unique': 'True'}), 'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'subscriptions'", 'through': "'QuestionSubscription'", 'to': "orm['forum.User']"}), 'view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'forum.questionsubscription': { 'Meta': {'object_name': 'QuestionSubscription'}, 'auto_subscription': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_view': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 17, 1, 11, 40, 975000)'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']"}) }, 'forum.repute': { 'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"}, 'canceled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reputes'", 'null': 'True', 'to': "orm['forum.Node']"}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}), 'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}), 'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reputes'", 'to': "orm['forum.User']"}), 'user_previous_rep': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}) }, 'forum.subscriptionsettings': { 'Meta': {'object_name': 'SubscriptionSettings'}, 'all_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'all_questions_watched_tags': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'enable_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'member_joins': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}), 'new_question': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '1'}), 'new_question_watched_tags': ('django.db.models.fields.CharField', [], {'default': "'i'", 'max_length': '1'}), 'notify_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'notify_answers': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'notify_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'notify_comments_own_post': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'notify_reply_to_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'questions_answered': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'questions_asked': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'questions_commented': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'questions_viewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'subscribed_questions': ('django.db.models.fields.CharField', [], {'default': "'i'", 'max_length': '1'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'subscription_settings'", 'unique': 'True', 'to': "orm['forum.User']"}) }, 'forum.tag': { 'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['forum.User']"}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['forum.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'marked_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'marked_tags'", 'through': "'MarkedTag'", 'to': "orm['forum.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'forum.user': { 'Meta': {'object_name': 'User', '_ormbases': ['auth.User']}, 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}), 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'forum.validationhash': { 'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'ValidationHash'}, 'expiration': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 18, 1, 11, 41, 269000)'}), 'hash_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'seed': ('django.db.models.fields.CharField', [], {'max_length': '12'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '12'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']"}) }, 'forum.vote': { 'Meta': {'object_name': 'Vote', 'db_table': "u'vote'"}, 'canceled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'null': 'True', 'to': "orm['forum.Node']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['forum.User']"}), 'vote': ('django.db.models.fields.SmallIntegerField', [], {}), 'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}) } } complete_apps = ['forum']<|fim▁end|>
('score', self.gf('django.db.models.fields.IntegerField')(default=0)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['forum.User'])), ('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
<|file_name|>api.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.7 # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create an API definition by interpreting a discovery document. This module interprets a discovery document to create a tree of classes which represent the API structure in a way that is useful for generating a library. For each discovery element (e.g. schemas, resources, methods, ...) there is a class to represent it which is directly usable in the templates. The instances of those classes are annotated with extra variables for use in the template which are language specific. The current way to make use of this class is to create a programming language specific subclass of Api, which adds annotations and template variables appropriate for that language. TODO(user): Refactor this so that the API can be loaded first, then annotated. """ __author__ = '[email protected] (Tony Aiuto)' import json import logging import operator import urlparse from googleapis.codegen import data_types from googleapis.codegen import template_objects from googleapis.codegen import utilities from googleapis.codegen.api_exception import ApiException from googleapis.codegen.schema import Schema from googleapis.codegen.utilities import convert_size _DEFAULT_SERVICE_HOST = 'www.googleapis.com' _DEFAULT_OWNER_DOMAIN = 'google.com' _DEFAULT_OWNER_NAME = 'Google' _RECOGNIZED_GOOGLE_DOMAINS = ( 'google.com', 'googleapis.com', 'googleplex.com' ) # Recognized names of request and response fields used for paging. _PAGE_TOKEN_NAMES = ('pageToken', 'nextPageToken') _LOGGER = logging.getLogger('codegen') class Api(template_objects.CodeObject): """An API definition. This class holds a discovery centric definition of an API. It contains members such as "resources" and "schemas" which relate directly to discovery concepts. It defines several properties that can be used in code generation templates: name: The API name. version: The API version. versionNoDots: The API version with all '.' characters replaced with '_'. This is typically used in class names. versionNoDash: The API version with all '-' characters replaced with '_'. This is typically used in file names where '-' has meaning. authScopes: The list of the OAuth scopes used by this API. dataWrapper: True if the API definition contains the 'dataWrapper' feature. methods: The list of top level API methods. models: The list of API data models, both from the schema section of discovery and from anonymous objects defined in method definitions. parameters: The list of global method parameters (applicable to all methods) resources: The list of API resources """ def __init__(self, discovery_doc, language=None): super(Api, self).__init__(discovery_doc, self, wire_name=discovery_doc['name']) name = self.values['name'] self._validator.ValidateApiName(name) if name != 'freebase': self._validator.ValidateApiVersion(self.values['version']) canonical_name = self.values.get('canonicalName') or name if not self.values.get('canonicalName'): self.values['canonicalName'] = canonical_name self._class_name = self.ToClassName(canonical_name, self) # Guard against language implementor not taking care of spaces self._class_name = self._class_name.replace(' ', '') self._NormalizeOwnerInformation() self._language = language self._template_dir = None self._surface_features = {} self._schemas = {} self._methods_by_name = {} self._all_methods = [] self.SetTemplateValue('className', self._class_name) self.SetTemplateValue('versionNoDots', self.values['version'].replace('.', '_')) self.SetTemplateValue('versionNoDash', self.values['version'].replace('-', '_')) self.SetTemplateValue('dataWrapper', 'dataWrapper' in discovery_doc.get('features', [])) self.values.setdefault('title', name) self.values.setdefault('exponentialBackoffDefault', False) if not self.values.get('revision'): self.values['revision'] = 'snapshot' self._NormalizeUrlComponents() # Information for variant subtypes, a dictionary of the format: # # { 'wireName': {'discriminant': discriminant, 'value': value, # 'schema': schema}, # ... } # # ... where wireName is the name of variant subtypes, discriminant # the field name of the discriminant, value the discriminant value # for this variant, and schema the base schema. # # This information cannot be stored in the referred schema at # reading time because at the time we read it from the base # schema, the referenced variant schemas may not yet be loaded. So # we first store it here, and after all schemas have been loaded, # update the schema template properties. self._variant_info = {} # Build data types and methods self._SetupModules() self.void_type = data_types.Void(self) self._BuildSchemaDefinitions() self._BuildResourceDefinitions() self.SetTemplateValue('resources', self._resources) # Make data models part of the api dictionary self.SetTemplateValue('models', self.ModelClasses()) # Replace methods dict with Methods self._top_level_methods = [] method_dict = self.values.get('methods') or {} for name in sorted(method_dict): self._top_level_methods.append(Method(self, name, method_dict[name])) self.SetTemplateValue('methods', self._top_level_methods) # Global parameters self._parameters = [] param_dict = self.values.get('parameters') or {} for name in sorted(param_dict): parameter = Parameter(self, name, param_dict[name], self) self._parameters.append(parameter) if name == 'alt': self.SetTemplateValue('alt', parameter) self.SetTemplateValue('parameters', self._parameters) # Auth scopes self._authscopes = [] if (self.values.get('auth') and self.values['auth'].get('oauth2') and self.values['auth']['oauth2'].get('scopes')): for value, auth_dict in sorted( self.values['auth']['oauth2']['scopes'].iteritems()): self._authscopes.append(AuthScope(self, value, auth_dict)) self.SetTemplateValue('authscopes', self._authscopes) @property def all_schemas(self): """The dictionary of all the schema objects found in the API.""" return self._schemas def _SetupModules(self): """Compute and set the module(s) which this API belongs under.""" # The containing module is based on the owner information. path = self.values.get('modulePath') or self.values.get('packagePath') self._containing_module = template_objects.Module( package_path=path, owner_name=self.values.get('owner'), owner_domain=self.values.get('ownerDomain')) self.SetTemplateValue('containingModule', self._containing_module) # The API is a child of the containing_module base = self.values['name'] # TODO(user): Introduce a breaking change where we always prefer # canonicalName. if self.values.get('packagePath'): # Lowercase the canonical name only for non-cloud-endpoints Google APIs. # This is to avoid breaking changes to existing Google-owned Cloud # Endpoints APIs. if self.values.get('rootUrl').find('.googleapis.com') > 0: base = self.values.get('canonicalName').lower() or base else: base = self.values.get('canonicalName') or base if self.values.get('version_module'): base = '%s/%s' % (base, self.values['versionNoDots']) self._module = template_objects.Module(package_path=base, parent=self._containing_module) self.SetTemplateValue('module', self._module) # The default module for data models defined by this API. self._model_module = template_objects.Module(package_path=None, parent=self._module) def _BuildResourceDefinitions(self): """Loop over the resources in the discovery doc and build definitions.""" self._resources = [] def_dict = self.values.get('resources') or {} for name in sorted(def_dict): resource = Resource(self, name, def_dict[name], parent=self) self._resources.append(resource) def _BuildSchemaDefinitions(self): """Loop over the schemas in the discovery doc and build definitions.""" schemas = self.values.get('schemas') if schemas: for name in sorted(schemas): def_dict = schemas[name] # Upgrade the string format schema to a dict. if isinstance(def_dict, unicode): def_dict = json.loads(def_dict) self._schemas[name] = self.DataTypeFromJson(def_dict, name) # Late bind info for variant types, and mark the discriminant # field and value. for name, info in self._variant_info.iteritems(): if name not in self._schemas: # The error will be reported elsewhere continue schema = self._schemas[name] for prop in schema.values.get('properties'): if prop.values['wireName'] == info['discriminant']: # Filter out the discriminant property as it is already # contained in the base type. schema.SetTemplateValue( 'properties', [p for p in schema.values.get('properties') if p != prop]) break else: logging.warn("Variant schema '%s' for base schema '%s' " "has not the expected discriminant property '%s'.", name, info['schema'].values['wireName'], info['discriminant']) schema.SetTemplateValue('superClass', info['schema'].class_name) # TODO(user): baseType is for backwards compatability only. It should # have always been a different name. When the old Java generators roll # off, remove it. schema.SetTemplateValue('baseType', info['schema'].class_name) schema.SetTemplateValue('discriminantValue', info['value']) def _NormalizeOwnerInformation(self): """Ensure that owner and ownerDomain are set to sane values.""" owner_domain = self.get('ownerDomain', '') if not owner_domain: root_url = self.get('rootUrl') if root_url: owner_domain = urlparse.urlparse(root_url).hostname # Normalize google domains. if any(owner_domain.endswith(d) for d in _RECOGNIZED_GOOGLE_DOMAINS): owner_domain = 'google.com' if owner_domain: owner_domain = utilities.SanitizeDomain(owner_domain)<|fim▁hole|> if not self.get('ownerName'): if owner_domain == _DEFAULT_OWNER_DOMAIN: owner_name = _DEFAULT_OWNER_NAME else: owner_name = owner_domain.replace('.', '_') self.SetTemplateValue('ownerName', owner_name) if not self.get('owner'): self.SetTemplateValue('owner', self['ownerName'].lower()) def _NormalizeUrlComponents(self): """Sets template values concerning the path to the service. Sets rootUrl and servicePath from the values given or defaults based on what is available. Verifies them for safeness. The hierarchy of the possible inputs is: use rootUrl + servicePath as the best choice if it exists (v1new) or rpcPath or use baseUrl (v1) or use basePath (v1) or restBasePath (v0.3) or default to 'api/version' Raises: ValueError: if the values available are inconsistent or disallowed. """ # If both rootUrl and servicePath exist, they equal what is in baseUrl. root_url = self.values.get('rootUrl') service_path = self.values.get('servicePath') rpc_path = self.values.get('rpcPath') if root_url: # oauth2 has a servicePath of "". This is wierd but OK for that API, but # it means we must explicitly check against None. if service_path is not None: base_url = root_url + service_path elif rpc_path: base_url = rpc_path else: raise ValueError('Neither servicePath nor rpcPath is defined.') else: base_url = self.values.get('baseUrl') # If we have a full path ('https://superman.appspot.com/kryptonite/hurts'), # then go with that, otherwise just use the various things which might # hint at the servicePath. best_path = (base_url or self.values.get('basePath') or self.values.get('restBasePath') or '/%s/%s/' % (self.values['name'], self.values['version'])) if best_path.find('..') >= 0: raise ValueError('api path must not contain ".." (%s)' % best_path) # And let urlparse to the grunt work of normalizing and parsing. url_parts = urlparse.urlparse(best_path) scheme = url_parts.scheme or 'https' service_host = url_parts.netloc or _DEFAULT_SERVICE_HOST base_path = url_parts.path if not root_url: self._api.SetTemplateValue('rootUrl', '%s://%s/' % (scheme, service_host)) if service_path is None: self._api.SetTemplateValue('servicePath', base_path[1:]) # Make sure template writers do not revert self._api.DeleteTemplateValue('baseUrl') self._api.DeleteTemplateValue('basePath') self._api.DeleteTemplateValue('serviceHost') def ModelClasses(self): """Return all the model classes.""" ret = set( s for s in self._schemas.itervalues() if isinstance(s, Schema) or isinstance(s, data_types.MapDataType)) return sorted(ret, key=operator.attrgetter('class_name')) def TopLevelModelClasses(self): """Return the models which are not children of another model.""" return [m for m in self.ModelClasses() if not m.parent] def DataTypeFromJson(self, type_dict, default_name, parent=None, wire_name=None): """Returns a schema object represented by a JSON Schema dictionary. Evaluate a JSON schema dictionary and return an appropriate schema object. If a data type is defined in-line, then create the schema dynamically. If the schema is a $ref to another, return the previously created schema or a lazy reference. If the type_dict is None, a blank schema will be created. Args: type_dict: A dict of the form expected of a request or response member of a method description. See the Discovery specification for more. default_name: The unique name to give the schema if we have to create it. parent: The schema where I was referenced. If we cannot determine that this is a top level schema, set the parent to this. wire_name: The name which will identify objects of this type in data on the wire. Returns: A Schema object. """ # new or not initialized, create a fresh one schema = Schema.Create(self, default_name, type_dict or {}, wire_name, parent) # Only put it in our by-name list if it is a real object if isinstance(schema, Schema) or isinstance(schema, data_types.MapDataType): # Use the path to the schema as a key. This means that an anonymous class # for the 'person' property under the schema 'Activity' will have the # unique name 'Activity.person', rather than 'ActivityPerson'. path = '.'.join( [a.values.get('wireName', '<anon>') for a in schema.full_path]) _LOGGER.debug('DataTypeFromJson: add %s to cache', path) self._schemas[path] = schema return schema def AddMethod(self, method): """Add a new method to the set of all methods.""" self._all_methods.append(method) self._methods_by_name[method.values['rpcMethod']] = method def MethodByName(self, method_name): """Find a method by name. Args: method_name: (str) the full RPC name of a method defined by this API. Returns: Method object or None if not found. """ return self._methods_by_name.get(method_name) def SchemaByName(self, schema_name): """Find a schema by name. Args: schema_name: (str) name of a schema defined by this API. Returns: Schema object or None if not found. """ return self._schemas.get(schema_name, None) def SetVariantInfo(self, ref, discriminant, value, schema): """Sets variant info for the given reference.""" if ref in self._variant_info: logging.warning("Base type of '%s' changed from '%s' to '%s'. " "This is an indication that a variant schema is used " "from multiple base schemas and may result in an " "inconsistent model.", ref, self._base_type[ref].wireName, schema.wireName) self._variant_info[ref] = {'discriminant': discriminant, 'value': value, 'schema': schema} def VisitAll(self, func): """Visit all nodes of an API tree and apply a function to each. Walks a tree and calls a function on each element of it. This should be called after the API is fully loaded. Args: func: (function) Method to call on each object. """ _LOGGER.debug('Applying function to all nodes') func(self._containing_module) func(self._module) func(self._model_module) for resource in self.values['resources']: self._VisitResource(resource, func) # Top level methods for method in self.values['methods']: self._VisitMethod(method, func) for parameter in self.values['parameters']: func(parameter) func(parameter.data_type) for schema in self._schemas.values(): self._VisitSchema(schema, func) for scope in self.GetTemplateValue('authscopes') or []: func(scope) def _VisitMethod(self, method, func): """Visit a method, calling a function on every child. Args: method: (Method) The Method to visit. func: (function) Method to call on each object. """ func(method) for parameter in method.parameters: func(parameter) def _VisitResource(self, resource, func): """Visit a resource tree, calling a function on every child. Calls down recursively to sub resources. Args: resource: (Resource) The Resource to visit. func: (function) Method to call on each object. """ func(resource) for method in resource.values['methods']: self._VisitMethod(method, func) for r in resource.values['resources']: self._VisitResource(r, func) def _VisitSchema(self, schema, func): """Visit a schema tree, calling a function on every child. Args: schema: (Schema) The Schema to visit. func: (function) Method to call on each object. """ func(schema) func(schema.module) for prop in schema.values.get('properties', []): func(prop) for child in self.children: func(child) # Do not warn about unused arguments, pylint: disable=unused-argument def ToClassName(self, s, element, element_type=None): """Convert a name to a suitable class name in the target language. This default implementation camel cases the string, which is appropriate for some languages. Subclasses are encouraged to override this. Args: s: (str) A rosy name of data element. element: (object) The object we are making a class name for. element_type: (str) Deprecated. The kind of object we are making a class name for. E.g. resource, method, schema. TODO(user): replace type in favor of class of element, but that will require changing the place where we call ToClassName with no element. Returns: A name suitable for use as a class in the generator's target language. """ return utilities.CamelCase(s).replace(' ', '') def NestedClassNameForProperty(self, name, schema): """Returns the class name of an object nested in a property.""" # TODO(user): This functionality belongs in the language model, but # because of the way the api is bootstrapped, that isn't available when we # need it. When language model is available from the start, this should be # moved. return '%s%s' % (schema.class_name, utilities.CamelCase(name)) @property def class_name(self): return self.values['className'] @property def model_module(self): return self._model_module @property def containing_module(self): return self._containing_module @property def all_methods(self): """All the methods in the entire API.""" return self._all_methods @property def top_level_methods(self): """All the methods at the API top level (not in a resource).""" return self._top_level_methods class Resource(template_objects.CodeObject): def __init__(self, api, name, def_dict, parent=None): """Creates a Resource. Args: api: (Api) The Api which owns this Resource. name: (string) The discovery name of the Resource. def_dict: (dict) The discovery dictionary for this Resource. parent: (CodeObject) The resource containing this method, if any. Top level resources have the API as a parent. """ super(Resource, self).__init__(def_dict, api, parent=parent, wire_name=name) self.ValidateName(name) class_name = api.ToClassName(name, self, element_type='resource') self.SetTemplateValue('className', class_name) # Replace methods dict with Methods self._methods = [] method_dict = self.values.get('methods') or {} for name in sorted(method_dict): self._methods.append(Method(api, name, method_dict[name], parent=self)) self.SetTemplateValue('methods', self._methods) # Get sub resources self._resources = [] r_def_dict = self.values.get('resources') or {} for name in sorted(r_def_dict): r = Resource(api, name, r_def_dict[name], parent=self) self._resources.append(r) self.SetTemplateValue('resources', self._resources) @property def methods(self): return self._methods @property def methods_dict(self): return {method['wireName']: method for method in self._methods} class AuthScope(template_objects.CodeObject): """The definition of an auth scope. An AuthScope defines these template values value: The scope url name: a sanitized version of the value, transformed so it generally can be used as an indentifier in code. Deprecated, use constantName description: the description of the scope. It also provides a template property which can be used after a language binding is set. constantName: A transformation of the value so it is suitable as a constant name in the specific language. """ GOOGLE_PREFIX = 'https://www.googleapis.com/auth/' HTTPS_PREFIX = 'https://' def __init__(self, api, value, def_dict): """Construct an auth scope. Args: api: (Api) The Api which owns this Property value: (string) The unique identifier of this scope, often a URL def_dict: (dict) The discovery dictionary for this auth scope. """ super(AuthScope, self).__init__(def_dict, api, wire_name=value) self._module = api.module self.SetTemplateValue('value', value) while value.endswith('/'): value = value[:-1] if 'description' not in self.values: self.SetTemplateValue('description', value) # Strip the common prefix to get a unique identifying name if value.startswith(AuthScope.GOOGLE_PREFIX): scope_id = value[len(AuthScope.GOOGLE_PREFIX):] elif value.startswith(AuthScope.HTTPS_PREFIX): # some comon scopes are are just a URL scope_id = value[len(AuthScope.HTTPS_PREFIX):] else: scope_id = value # We preserve the value stripped of the most common prefixes so we can # use it for building constantName in templates. self.SetTemplateValue('lastPart', scope_id) # replace all non alphanumeric with '_' to form 'name' name = ''.join([(c if c.isalnum() else '_') for c in scope_id.upper()]) self.SetTemplateValue('name', name) @property def constantName(self): # pylint: disable=g-bad-name """Overrides default behavior of constantName.""" return self._language_model.ApplyPolicy('constant', self, self.values['lastPart']) class Method(template_objects.CodeObject): """The definition of a method.""" def __init__(self, api, name, def_dict, parent=None): """Construct a method. Methods in REST discovery are inside of a resource. Note that the method name and id are calculable from each other. id will always be equal to api_name.resource_name[.sub_resource...].method_name. At least it should be, as that is the transformation Discovery makes from the API definition, which is essentially a flat list of methods, into a hierarchy of resources. Args: api: (Api) The Api which owns this Method. name: (string) The discovery name of the Method. def_dict: (dict) The discovery dictionary for this Method. parent: (CodeObject) The resource containing this Method, if any. Raises: ApiException: If the httpMethod type is not one we know how to handle. """ super(Method, self).__init__(def_dict, api, parent=(parent or api)) # TODO(user): Fix java templates to name vs. wireName correctly. Then # change the __init__ to have wire_name=def_dict.get('id') or name # then eliminate this line. self.SetTemplateValue('wireName', name) self.ValidateName(name) class_name = api.ToClassName(name, self, element_type='method') if parent and class_name == parent.values['className']: # Some languages complain when the collection name is the same as the # method name. class_name = '%sRequest' % class_name # The name is the key of the dict defining use. The id field is what you # have to use to call the method via RPC. That is unique, name might not be. self.SetTemplateValue('name', name) # Fix up very old discovery, which does not have an id. if 'id' not in self.values: self.values['id'] = name self.SetTemplateValue('className', class_name) http_method = def_dict.get('httpMethod', 'POST').upper() self.SetTemplateValue('httpMethod', http_method) self.SetTemplateValue('rpcMethod', def_dict.get('rpcMethod') or def_dict['id']) rest_path = def_dict.get('path') or def_dict.get('restPath') # TODO(user): if rest_path is not set, raise a good error and fail fast. self.SetTemplateValue('restPath', rest_path) # Figure out the input and output types and schemas for this method. expected_request = self.values.get('request') if expected_request: # TODO(user): RequestBody is only used if the schema is anonymous. # When we go to nested models, this could be a nested class off the # Method, making it unique without the silly name. Same for ResponseBody. request_schema = api.DataTypeFromJson(expected_request, '%sRequestContent' % name, parent=self) self.SetTemplateValue('requestType', request_schema) expected_response = def_dict.get('response') or def_dict.get('returns') if expected_response: response_schema = api.DataTypeFromJson(expected_response, '%sResponse' % name, parent=self) if self.values['wireName'] == 'get': response_schema.values['associatedResource'] = parent self.SetTemplateValue('responseType', response_schema) else: self.SetTemplateValue('responseType', api.void_type) # Make sure we can handle this method type and do any fixups. if http_method not in ['DELETE', 'GET', 'OPTIONS', 'PATCH', 'POST', 'PUT', 'PROPFIND', 'PROPPATCH', 'REPORT']: raise ApiException('Unknown HTTP method: %s' % http_method, def_dict) if http_method == 'GET': self.SetTemplateValue('requestType', None) # Replace parameters dict with Parameters. We try to order them by their # position in the request path so that the generated code can track the # more human readable definition, rather than the order of the parameters # in the discovery doc. order = self.values.get('parameterOrder', []) req_parameters = [] opt_parameters = [] for name, def_dict in self.values.get('parameters', {}).iteritems(): param = Parameter(api, name, def_dict, self) if name == 'alt': # Treat the alt parameter differently self.SetTemplateValue('alt', param) continue # Standard params are part of the generic request class # We want to push all parameters that aren't declared inside # parameterOrder after those that are. if param.values['wireName'] in order: req_parameters.append(param) else: # optional parameters are appended in the order they're declared. opt_parameters.append(param) # pylint: disable=g-long-lambda req_parameters.sort(lambda x, y: cmp(order.index(x.values['wireName']), order.index(y.values['wireName']))) # sort optional parameters by name to avoid code churn opt_parameters.sort(lambda x, y: cmp(x.values['wireName'], y.values['wireName'])) req_parameters.extend(opt_parameters) self.SetTemplateValue('parameters', req_parameters) self._InitMediaUpload(parent) self._InitPageable(api) api.AddMethod(self) def _InitMediaUpload(self, parent): media_upload = self.values.get('mediaUpload') if media_upload: if parent: parent.SetTemplateValue('isMedia', True) # Get which MIME Media Ranges are accepted for media uploads to this # method. accepted_mime_ranges = media_upload.get('accept') self.SetTemplateValue('accepted_mime_ranges', accepted_mime_ranges) max_size = media_upload.get('maxSize') self.SetTemplateValue('max_size', max_size) self.SetTemplateValue('max_size_bytes', convert_size.ConvertSize(max_size)) # Find which upload protocols are supported. upload_protocols = media_upload['protocols'] for upload_protocol in upload_protocols: self._SetUploadTemplateValues( upload_protocol, upload_protocols[upload_protocol]) def _InitPageable(self, api): response_type = self.values.get('responseType') if response_type == api.void_type: return next_page_token_name = self.FindPageToken( response_type.values.get('properties')) if not next_page_token_name: return is_page_token_parameter = True page_token_name = self.FindPageToken(self.optional_parameters) if not page_token_name: # page token may be field of request body instead of query parameter is_page_token_parameter = False request_type = self.values.get('requestType') if request_type: page_token_name = self.FindPageToken( request_type.values.get('properties')) if not page_token_name: return self.SetTemplateValue('isPageable', True) self.SetTemplateValue('isPagingStyleStandard', (is_page_token_parameter and page_token_name == 'pageToken' and next_page_token_name == 'nextPageToken')) def _SetUploadTemplateValues(self, upload_protocol, protocol_dict): """Sets upload specific template values. Args: upload_protocol: (str) The name of the upload protocol. Eg: 'simple' or 'resumable'. protocol_dict: (dict) The dictionary that corresponds to this upload protocol. It typically contains keys like 'path', 'multipart' etc. """ self.SetTemplateValue('%s_upload_supported' % upload_protocol, True) upload_path = protocol_dict.get('path') if upload_path: self.SetTemplateValue('%s_upload_path' % upload_protocol, upload_path) self.SetTemplateValue('%s_upload_multipart' % upload_protocol, protocol_dict.get('multipart', False)) @property def media_upload_parameters(self): return self.values.get('mediaUpload') @property def parameters(self): return self.values['parameters'] @property def optional_parameters(self): return [p for p in self.values['parameters'] if not p.required] @property def required_parameters(self): return [p for p in self.values['parameters'] if p.required] @property def path_parameters(self): return [p for p in self.values['parameters'] if p.location == 'path'] @property def query_parameters(self): return [p for p in self.values['parameters'] if p.location == 'query'] @staticmethod def FindCodeObjectWithWireName(things, wire_name): """Looks for an element having the given wire_name. Args: things: (array of DataType) List of parameters or properties to search. wire_name: (str) The wireName we are looking to find. Returns: None or element with the given wire_name. """ if not things: return None for e in things: if e.values['wireName'] == wire_name: return e return None @staticmethod def FindPageToken(things): """Looks for an element with a wireName like a page token. Args: things: (array of DataType) List of parameters or properties to search. Returns: None or page token name found. """ for token_name in _PAGE_TOKEN_NAMES: if Method.FindCodeObjectWithWireName(things, token_name): return token_name return None # # Expose some properties with the naming convention we use in templates # def optionalParameters(self): # pylint: disable=g-bad-name return self.optional_parameters def requiredParameters(self): # pylint: disable=g-bad-name return self.required_parameters def pathParameters(self): # pylint: disable=g-bad-name return self.path_parameters def queryParameters(self): # pylint: disable=g-bad-name return self.query_parameters class Parameter(template_objects.CodeObject): """The definition of a method parameter.""" def __init__(self, api, name, def_dict, method): super(Parameter, self).__init__(def_dict, api, parent=method, wire_name=name) self.ValidateName(name) self.schema = api # TODO(user): Deal with dots in names better. What we should do is: # For x.y, x.z create a little class X, with members y and z. Then # have the constructor method take an X. self._repeated = self.values.get('repeated', False) self._required = self.values.get('required', False) self._location = (self.values.get('location') or self.values.get('restParameterType') or 'query') # TODO(user): Why not just use Schema.Create here? referenced_schema = self.values.get('$ref') if referenced_schema: self._data_type = (api.SchemaByName(referenced_schema) or data_types.SchemaReference(referenced_schema, api)) elif def_dict.get('type') == 'array': self._data_type = Schema.Create(api, name, def_dict, name, method) elif self.values.get('enum'): self._data_type = data_types.Enum(def_dict, api, name, self.values.get('enum'), self.values.get('enumDescriptions'), parent=method) self.SetTemplateValue('enumType', self._data_type) else: self._data_type = data_types.PrimitiveDataType(def_dict, api, parent=self) if self._repeated: self._data_type = data_types.ArrayDataType(name, self._data_type, parent=self) @property def repeated(self): return self._repeated @property def required(self): return self._required @property def location(self): return self._location @property def code_type(self): return self._data_type.code_type @property def data_type(self): return self._data_type<|fim▁end|>
else: owner_domain = _DEFAULT_OWNER_DOMAIN self.SetTemplateValue('ownerDomain', owner_domain)
<|file_name|>p2p-segwit.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # Copyright (c) 2016 The PlanBcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test segwit transactions and blocks on P2P network.""" from test_framework.mininode import * from test_framework.test_framework import PlanbcoinTestFramework from test_framework.util import * from test_framework.script import * from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER from test_framework.key import CECKey, CPubKey import time import random from binascii import hexlify # The versionbit bit used to signal activation of SegWit VB_WITNESS_BIT = 1 VB_PERIOD = 144 VB_ACTIVATION_THRESHOLD = 108 VB_TOP_BITS = 0x20000000 MAX_SIGOP_COST = 80000 # Calculate the virtual size of a witness block: # (base + witness/4) def get_virtual_size(witness_block): base_size = len(witness_block.serialize()) total_size = len(witness_block.serialize(with_witness=True)) # the "+3" is so we round up vsize = int((3*base_size + total_size + 3)/4) return vsize class TestNode(NodeConnCB): def __init__(self): super().__init__() self.getdataset = set() def on_getdata(self, conn, message): for inv in message.inv: self.getdataset.add(inv.hash) def announce_tx_and_wait_for_getdata(self, tx, timeout=60): with mininode_lock: self.last_message.pop("getdata", None) self.send_message(msg_inv(inv=[CInv(1, tx.sha256)])) self.wait_for_getdata(timeout) def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): with mininode_lock: self.last_message.pop("getdata", None) self.last_message.pop("getheaders", None) msg = msg_headers() msg.headers = [ CBlockHeader(block) ] if use_header: self.send_message(msg) else: self.send_message(msg_inv(inv=[CInv(2, block.sha256)])) self.wait_for_getheaders() self.send_message(msg) self.wait_for_getdata() def request_block(self, blockhash, inv_type, timeout=60): with mininode_lock: self.last_message.pop("block", None) self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) self.wait_for_block(blockhash, timeout) return self.last_message["block"].block def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None): tx_message = msg_tx(tx) if with_witness: tx_message = msg_witness_tx(tx) self.send_message(tx_message) self.sync_with_ping() assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted) if (reason != None and not accepted): # Check the rejection reason as well. with mininode_lock: assert_equal(self.last_message["reject"].reason, reason) # Test whether a witness block had the correct effect on the tip def test_witness_block(self, block, accepted, with_witness=True): if with_witness: self.send_message(msg_witness_block(block)) else: self.send_message(msg_block(block)) self.sync_with_ping() assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted) # Used to keep track of anyone-can-spend outputs that we can use in the tests class UTXO(object): def __init__(self, sha256, n, nValue): self.sha256 = sha256 self.n = n self.nValue = nValue # Helper for getting the script associated with a P2PKH def GetP2PKHScript(pubkeyhash): return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)]) # Add signature for a P2PK witness program. def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key): tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value) signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1') txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script] txTo.rehash() class SegWitTest(PlanbcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]] def setup_network(self): self.setup_nodes() connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) self.sync_all() ''' Helpers ''' # Build a block on top of node0's tip. def build_next_block(self, nVersion=4): tip = self.nodes[0].getbestblockhash() height = self.nodes[0].getblockcount() + 1 block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 block = create_block(int(tip, 16), create_coinbase(height), block_time) block.nVersion = nVersion block.rehash() return block # Adds list of transactions to block, adds witness commitment, then solves. def update_witness_block_with_transactions(self, block, tx_list, nonce=0): block.vtx.extend(tx_list) add_witness_commitment(block, nonce) block.solve() return ''' Individual tests ''' def test_witness_services(self): self.log.info("Verifying NODE_WITNESS service bit") assert((self.test_node.connection.nServices & NODE_WITNESS) != 0) # See if sending a regular transaction works, and create a utxo # to use in later tests. def test_non_witness_transaction(self): # Mine a block with an anyone-can-spend coinbase, # let it mature, then try to spend it. self.log.info("Testing non-witness transaction") block = self.build_next_block(nVersion=1) block.solve() self.test_node.send_message(msg_block(block)) self.test_node.sync_with_ping() # make sure the block was processed txid = block.vtx[0].sha256 self.nodes[0].generate(99) # let the block mature # Create a transaction that spends the coinbase tx = CTransaction() tx.vin.append(CTxIn(COutPoint(txid, 0), b"")) tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE]))) tx.calc_sha256() # Check that serializing it with or without witness is the same # This is a sanity check of our testing framework. assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize()) self.test_node.send_message(msg_witness_tx(tx)) self.test_node.sync_with_ping() # make sure the tx was processed assert(tx.hash in self.nodes[0].getrawmempool()) # Save this transaction for later self.utxo.append(UTXO(tx.sha256, 0, 49*100000000)) self.nodes[0].generate(1) # Verify that blocks with witnesses are rejected before activation. def test_unnecessary_witness_before_segwit_activation(self): self.log.info("Testing behavior of unnecessary witnesses") # For now, rely on earlier tests to have created at least one utxo for # us to use assert(len(self.utxo) > 0) assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active') tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])] # Verify the hash with witness differs from the txid # (otherwise our testing framework must be broken!) tx.rehash() assert(tx.sha256 != tx.calc_sha256(with_witness=True)) # Construct a segwit-signaling block that includes the transaction. block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT))) self.update_witness_block_with_transactions(block, [tx]) # Sending witness data before activation is not allowed (anti-spam # rule). self.test_node.test_witness_block(block, accepted=False) # TODO: fix synchronization so we can test reject reason # Right now, planbcoind delays sending reject messages for blocks # until the future, making synchronization here difficult. #assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness") # But it should not be permanently marked bad... # Resend without witness information. self.test_node.send_message(msg_block(block)) self.test_node.sync_with_ping() assert_equal(self.nodes[0].getbestblockhash(), block.hash) sync_blocks(self.nodes) # Create a p2sh output -- this is so we can pass the standardness # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped # in P2SH). p2sh_program = CScript([OP_TRUE]) p2sh_pubkey = hash160(p2sh_program) scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) # Now check that unnecessary witnesses can't be used to blind a node # to a transaction, eg by violating standardness checks. tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey)) tx2.rehash() self.test_node.test_transaction_acceptance(tx2, False, True) self.nodes[0].generate(1) sync_blocks(self.nodes) # We'll add an unnecessary witness to this transaction that would cause # it to be non-standard, to test that violating policy with a witness before # segwit activation doesn't blind a node to a transaction. Transactions # rejected for having a witness before segwit activation shouldn't be added # to the rejection cache. tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program]))) tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey)) tx3.wit.vtxinwit.append(CTxInWitness()) tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000] tx3.rehash() # Note that this should be rejected for the premature witness reason, # rather than a policy check, since segwit hasn't activated yet. self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet') # If we send without witness, it should be accepted. self.std_node.test_transaction_acceptance(tx3, False, True) # Now create a new anyone-can-spend utxo for the next test. tx4 = CTransaction() tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program]))) tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE]))) tx4.rehash() self.test_node.test_transaction_acceptance(tx3, False, True) self.test_node.test_transaction_acceptance(tx4, False, True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Update our utxo list; we spent the first entry. self.utxo.pop(0) self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue)) # Mine enough blocks for segwit's vb state to be 'started'. def advance_to_segwit_started(self): height = self.nodes[0].getblockcount() # Will need to rewrite the tests here if we are past the first period assert(height < VB_PERIOD - 1) # Genesis block is 'defined'. assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined') # Advance to end of period, status should now be 'started' self.nodes[0].generate(VB_PERIOD-height-1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') # Mine enough blocks to lock in segwit, but don't activate. # TODO: we could verify that lockin only happens at the right threshold of # signalling blocks, rather than just at the right period boundary. def advance_to_segwit_lockin(self): height = self.nodes[0].getblockcount() assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') # Advance to end of period, and verify lock-in happens at the end self.nodes[0].generate(VB_PERIOD-1) height = self.nodes[0].getblockcount() assert((height % VB_PERIOD) == VB_PERIOD - 2) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') self.nodes[0].generate(1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') # Mine enough blocks to activate segwit. # TODO: we could verify that activation only happens at the right threshold # of signalling blocks, rather than just at the right period boundary. def advance_to_segwit_active(self): assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') height = self.nodes[0].getblockcount() self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') self.nodes[0].generate(1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active') # This test can only be run after segwit has activated def test_witness_commitments(self): self.log.info("Testing witness commitments") # First try a correct witness commitment. block = self.build_next_block() add_witness_commitment(block) block.solve() # Test the test -- witness serialization should be different assert(msg_witness_block(block).serialize() != msg_block(block).serialize()) # This empty block should be valid. self.test_node.test_witness_block(block, accepted=True) # Try to tweak the nonce block_2 = self.build_next_block() add_witness_commitment(block_2, nonce=28) block_2.solve() # The commitment should have changed! assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]) # This should also be valid. self.test_node.test_witness_block(block_2, accepted=True) # Now test commitments with actual transactions assert (len(self.utxo) > 0) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) # Let's construct a witness program witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() # tx2 will spend tx1, and send back to a regular anyone-can-spend address tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx2.rehash() block_3 = self.build_next_block() self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1) # Add an extra OP_RETURN output that matches the witness commitment template, # even though it has extra data after the incorrect commitment. # This block should fail. block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10]))) block_3.vtx[0].rehash() block_3.hashMerkleRoot = block_3.calc_merkle_root() block_3.rehash() block_3.solve() self.test_node.test_witness_block(block_3, accepted=False) # Add a different commitment with different nonce, but in the # right location, and with some funds burned(!). # This should succeed (nValue shouldn't affect finding the # witness commitment). add_witness_commitment(block_3, nonce=0) block_3.vtx[0].vout[0].nValue -= 1 block_3.vtx[0].vout[-1].nValue += 1 block_3.vtx[0].rehash() block_3.hashMerkleRoot = block_3.calc_merkle_root() block_3.rehash() assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns block_3.solve() self.test_node.test_witness_block(block_3, accepted=True) # Finally test that a block with no witness transactions can # omit the commitment. block_4 = self.build_next_block() tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) tx3.rehash() block_4.vtx.append(tx3) block_4.hashMerkleRoot = block_4.calc_merkle_root() block_4.solve() self.test_node.test_witness_block(block_4, with_witness=False, accepted=True) # Update available utxo's for use in later test. self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) def test_block_malleability(self): self.log.info("Testing witness block malleability") # Make sure that a block that has too big a virtual size # because of a too-large coinbase witness is not permanently # marked bad. block = self.build_next_block() add_witness_commitment(block) block.solve() block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000) assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE) # We can't send over the p2p network, because this is too big to relay # TODO: repeat this test with a block that can be relayed self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() != block.hash) block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop() assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE) self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() == block.hash) # Now make sure that malleating the witness nonce doesn't # result in a block permanently marked bad. block = self.build_next_block() add_witness_commitment(block) block.solve() # Change the nonce -- should not cause the block to be permanently # failed block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ] self.test_node.test_witness_block(block, accepted=False) # Changing the witness nonce doesn't change the block hash block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ] self.test_node.test_witness_block(block, accepted=True) def test_witness_block_size(self): self.log.info("Testing witness block size limit") # TODO: Test that non-witness carrying blocks can't exceed 1MB # Skipping this test for now; this is covered in p2p-fullblocktest.py # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. block = self.build_next_block() assert(len(self.utxo) > 0) # Create a P2WSH transaction. # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE. # This should give us plenty of room to tweak the spending tx's # virtual size. NUM_DROPS = 200 # 201 max ops per script! NUM_OUTPUTS = 50 witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE]) witness_hash = uint256_from_str(sha256(witness_program)) scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)]) prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) value = self.utxo[0].nValue parent_tx = CTransaction() parent_tx.vin.append(CTxIn(prevout, b"")) child_value = int(value/NUM_OUTPUTS) for i in range(NUM_OUTPUTS): parent_tx.vout.append(CTxOut(child_value, scriptPubKey)) parent_tx.vout[0].nValue -= 50000 assert(parent_tx.vout[0].nValue > 0) parent_tx.rehash() child_tx = CTransaction() for i in range(NUM_OUTPUTS): child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] for i in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program] child_tx.rehash() self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) vsize = get_virtual_size(block) additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4 i = 0 while additional_bytes > 0: # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1 extra_bytes = min(additional_bytes+1, 55) block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes) additional_bytes -= extra_bytes i += 1 block.vtx[0].vout.pop() # Remove old commitment add_witness_commitment(block) block.solve() vsize = get_virtual_size(block) assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1) # Make sure that our test case would exceed the old max-network-message # limit assert(len(block.serialize(True)) > 2*1024*1024) self.test_node.test_witness_block(block, accepted=False) # Now resize the second transaction to make the block fit. cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0]) block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1) block.vtx[0].vout.pop() add_witness_commitment(block) block.solve() assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE) self.test_node.test_witness_block(block, accepted=True) # Update available utxo's self.utxo.pop(0) self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue)) # submitblock will try to add the nonce automatically, so that mining # software doesn't need to worry about doing so itself. def test_submit_block(self): block = self.build_next_block() # Try using a custom nonce and then don't supply it. # This shouldn't possibly work. add_witness_commitment(block, nonce=1) block.vtx[0].wit = CTxWitness() # drop the nonce block.solve() self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() != block.hash) # Now redo commitment with the standard nonce, but let planbcoind fill it in. add_witness_commitment(block, nonce=0) block.vtx[0].wit = CTxWitness() block.solve() self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert_equal(self.nodes[0].getbestblockhash(), block.hash) # This time, add a tx with non-empty witness, but don't supply # the commitment. block_2 = self.build_next_block() add_witness_commitment(block_2) block_2.solve() # Drop commitment and nonce -- submitblock should not fill in. block_2.vtx[0].vout.pop() block_2.vtx[0].wit = CTxWitness() self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True))) # Tip should not advance! assert(self.nodes[0].getbestblockhash() != block_2.hash) # Consensus tests of extra witness data in a transaction. def test_extra_witness_data(self): self.log.info("Testing extra witness data in tx") assert(len(self.utxo) > 0) block = self.build_next_block() witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) # First try extra witness data on a tx that doesn't require a witness tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey)) tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])] tx.rehash() self.update_witness_block_with_transactions(block, [tx]) # Extra witness data should not be allowed. self.test_node.test_witness_block(block, accepted=False) # Try extra signature data. Ok if we're not spending a witness output. block.vtx[1].wit.vtxinwit = [] block.vtx[1].vin[0].scriptSig = CScript([OP_0]) block.vtx[1].rehash() add_witness_commitment(block) block.solve() self.test_node.test_witness_block(block, accepted=True) # Now try extra witness/signature data on an input that DOES require a # witness tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ] tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ] block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) # This has extra witness data, so it should fail. self.test_node.test_witness_block(block, accepted=False) # Now get rid of the extra witness, but add extra scriptSig data tx2.vin[0].scriptSig = CScript([OP_TRUE]) tx2.vin[1].scriptSig = CScript([OP_TRUE]) tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0) tx2.wit.vtxinwit[1].scriptWitness.stack = [] tx2.rehash() add_witness_commitment(block) block.solve() # This has extra signature data for a witness input, so it should fail. self.test_node.test_witness_block(block, accepted=False) # Now get rid of the extra scriptsig on the witness input, and verify # success (even with extra scriptsig data in the non-witness input) tx2.vin[0].scriptSig = b"" tx2.rehash() add_witness_commitment(block) block.solve() self.test_node.test_witness_block(block, accepted=True) # Update utxo for later tests self.utxo.pop(0) self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_max_witness_push_length(self): ''' Should only allow up to 520 byte pushes in witness stack ''' self.log.info("Testing maximum witness push size") MAX_SCRIPT_ELEMENT_SIZE = 520 assert(len(self.utxo)) block = self.build_next_block() witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) # First try a 521-byte stack element tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) self.test_node.test_witness_block(block, accepted=False) # Now reduce the length of the stack element tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE) add_witness_commitment(block) block.solve() self.test_node.test_witness_block(block, accepted=True) # Update the utxo for later tests self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_max_witness_program_length(self): # Can create witness outputs that are long, but can't be greater than # 10k bytes to successfully spend self.log.info("Testing maximum witness program length") assert(len(self.utxo)) MAX_PROGRAM_LENGTH = 10000 # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes. long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE]) assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1) long_witness_hash = sha256(long_witness_program) long_scriptPubKey = CScript([OP_0, long_witness_hash]) block = self.build_next_block() tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey)) tx.rehash() tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) self.test_node.test_witness_block(block, accepted=False) # Try again with one less byte in the witness program witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE]) assert(len(witness_program) == MAX_PROGRAM_LENGTH) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey) tx.rehash() tx2.vin[0].prevout.hash = tx.sha256 tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program] tx2.rehash() block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx, tx2]) self.test_node.test_witness_block(block, accepted=True) self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_witness_input_length(self): ''' Ensure that vin length must match vtxinwit length ''' self.log.info("Testing witness input length") assert(len(self.utxo)) witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) # Create a transaction that splits our utxo into many outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) nValue = self.utxo[0].nValue for i in range(10): tx.vout.append(CTxOut(int(nValue/10), scriptPubKey)) tx.vout[0].nValue -= 1000 assert(tx.vout[0].nValue >= 0) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) # Try various ways to spend tx that should all break. # This "broken" transaction serializer will not normalize # the length of vtxinwit. class BrokenCTransaction(CTransaction): def serialize_with_witness(self): flags = 0 if not self.wit.is_null(): flags |= 1 r = b"" r += struct.pack("<i", self.nVersion) if flags: dummy = [] r += ser_vector(dummy) r += struct.pack("<B", flags) r += ser_vector(self.vin) r += ser_vector(self.vout) if flags & 1: r += self.wit.serialize() r += struct.pack("<I", self.nLockTime) return r tx2 = BrokenCTransaction() for i in range(10): tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE]))) # First try using a too long vtxinwit for i in range(11): tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program] block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) self.test_node.test_witness_block(block, accepted=False) # Now try using a too short vtxinwit tx2.wit.vtxinwit.pop() tx2.wit.vtxinwit.pop() block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) self.test_node.test_witness_block(block, accepted=False) # Now make one of the intermediate witnesses be incorrect tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program] tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) self.test_node.test_witness_block(block, accepted=False) # Fix the broken witness and the block should be accepted. tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) self.test_node.test_witness_block(block, accepted=True) self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_witness_tx_relay_before_segwit_activation(self): self.log.info("Testing relay of witness transactions") # Generate a transaction that doesn't require a witness, but send it # with a witness. Should be rejected for premature-witness, but should # not be added to recently rejected list. assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ] tx.rehash() tx_hash = tx.sha256 tx_value = tx.vout[0].nValue # Verify that if a peer doesn't set nServices to include NODE_WITNESS, # the getdata is just for the non-witness portion. self.old_node.announce_tx_and_wait_for_getdata(tx) assert(self.old_node.last_message["getdata"].inv[0].type == 1) # Since we haven't delivered the tx yet, inv'ing the same tx from # a witness transaction ought not result in a getdata. try: self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2) self.log.error("Error: duplicate tx getdata!") assert(False) except AssertionError as e: pass # Delivering this transaction with witness should fail (no matter who # its from) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False) self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False) # But eliminating the witness should fix it self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True) # Cleanup: mine the first transaction and update utxo self.nodes[0].generate(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.utxo.pop(0) self.utxo.append(UTXO(tx_hash, 0, tx_value)) # After segwit activates, verify that mempool: # - rejects transactions with unnecessary/extra witnesses # - accepts transactions with valid witnesses # and that witness transactions are relayed to non-upgraded peers. def test_tx_relay_after_segwit_activation(self): self.log.info("Testing relay of witness transactions") # Generate a transaction that doesn't require a witness, but send it # with a witness. Should be rejected because we can't use a witness # when spending a non-witness output. assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ] tx.rehash() tx_hash = tx.sha256 # Verify that unnecessary witnesses are rejected. self.test_node.announce_tx_and_wait_for_getdata(tx) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False) # Verify that removing the witness succeeds. self.test_node.announce_tx_and_wait_for_getdata(tx) self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True) # Now try to add extra witness data to a valid witness tx. witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey)) tx2.rehash() tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) # Add too-large for IsStandard witness and check that it does not enter reject filter p2sh_program = CScript([OP_TRUE]) p2sh_pubkey = hash160(p2sh_program) witness_program2 = CScript([b'a'*400000]) tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]))) tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2] tx3.rehash() # Node will not be blinded to the transaction self.std_node.announce_tx_and_wait_for_getdata(tx3) self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size') self.std_node.announce_tx_and_wait_for_getdata(tx3) self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size') # Remove witness stuffing, instead add extra witness push on stack tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE])) tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ] tx3.rehash() self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True) self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False) # Get rid of the extra witness, and verify acceptance. tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] # Also check that old_node gets a tx announcement, even though this is # a witness transaction. self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True) self.old_node.wait_for_inv([CInv(1, tx3.sha256)]) # Test that getrawtransaction returns correct witness information # hash, size, vsize raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1) assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True)) assert_equal(raw_tx["size"], len(tx3.serialize_with_witness())) vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4 assert_equal(raw_tx["vsize"], vsize) assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1) assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii')) assert(vsize != raw_tx["size"]) # Cleanup: mine the transactions and update utxo for next test self.nodes[0].generate(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) # Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG # This is true regardless of segwit activation. # Also test that we don't ask for blocks from unupgraded peers def test_block_relay(self, segwit_activated): self.log.info("Testing block relay") blocktype = 2|MSG_WITNESS_FLAG # test_node has set NODE_WITNESS, so all getdata requests should be for # witness blocks. # Test announcing a block via inv results in a getdata, and that # announcing a version 4 or random VB block with a header results in a getdata block1 = self.build_next_block() block1.solve() self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) self.test_node.test_witness_block(block1, True) block2 = self.build_next_block(nVersion=4) block2.solve() self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) self.test_node.test_witness_block(block2, True) block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15))) block3.solve() self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) self.test_node.test_witness_block(block3, True) # Check that we can getdata for witness blocks or regular blocks, # and the right thing happens. if segwit_activated == False: # Before activation, we should be able to request old blocks with # or without witness, and they should be the same. chain_height = self.nodes[0].getblockcount() # Pick 10 random blocks on main chain, and verify that getdata's # for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal. all_heights = list(range(chain_height+1)) random.shuffle(all_heights) all_heights = all_heights[0:10] for height in all_heights: block_hash = self.nodes[0].getblockhash(height) rpc_block = self.nodes[0].getblock(block_hash, False) block_hash = int(block_hash, 16) block = self.test_node.request_block(block_hash, 2) wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG) assert_equal(block.serialize(True), wit_block.serialize(True)) assert_equal(block.serialize(), hex_str_to_bytes(rpc_block)) else: # After activation, witness blocks and non-witness blocks should # be different. Verify rpc getblock() returns witness blocks, while # getdata respects the requested type. block = self.build_next_block() self.update_witness_block_with_transactions(block, []) # This gives us a witness commitment. assert(len(block.vtx[0].wit.vtxinwit) == 1) assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1) self.test_node.test_witness_block(block, accepted=True) # Now try to retrieve it... rpc_block = self.nodes[0].getblock(block.hash, False) non_wit_block = self.test_node.request_block(block.sha256, 2) wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG) assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block)) assert_equal(wit_block.serialize(False), non_wit_block.serialize()) assert_equal(wit_block.serialize(True), block.serialize(True)) # Test size, vsize, weight rpc_details = self.nodes[0].getblock(block.hash, True) assert_equal(rpc_details["size"], len(block.serialize(True))) assert_equal(rpc_details["strippedsize"], len(block.serialize(False))) weight = 3*len(block.serialize(False)) + len(block.serialize(True)) assert_equal(rpc_details["weight"], weight) # Upgraded node should not ask for blocks from unupgraded block4 = self.build_next_block(nVersion=4) block4.solve() self.old_node.getdataset = set() # Blocks can be requested via direct-fetch (immediately upon processing the announcement) # or via parallel download (with an indeterminate delay from processing the announcement) # so to test that a block is NOT requested, we could guess a time period to sleep for, # and then check. We can avoid the sleep() by taking advantage of transaction getdata's # being processed after block getdata's, and announce a transaction as well, # and then check to see if that particular getdata has been received. # Since 0.14, inv's will only be responded to with a getheaders, so send a header # to announce this block. msg = msg_headers() msg.headers = [ CBlockHeader(block4) ] self.old_node.send_message(msg) self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0]) assert(block4.sha256 not in self.old_node.getdataset) # V0 segwit outputs should be standard after activation, but not before. def test_standardness_v0(self, segwit_activated): self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before")) assert(len(self.utxo)) witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) p2sh_pubkey = hash160(witness_program) p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) # First prepare a p2sh output (so that spending it will pass standardness) p2sh_tx = CTransaction() p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)] p2sh_tx.rehash() # Mine it on test_node to create the confirmed output. self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Now test standardness of v0 P2WSH outputs. # Start by creating a transaction with two outputs. tx = CTransaction() tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)] tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later tx.rehash() self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated) # Now create something that looks like a P2PKH output. This won't be spendable. scriptPubKey = CScript([OP_0, hash160(witness_hash)]) tx2 = CTransaction() if segwit_activated: # if tx was accepted, then we spend the second output. tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")] tx2.vout = [CTxOut(7000, scriptPubKey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] else: # if tx wasn't accepted, we just re-spend the p2sh output we started with. tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)] tx2.rehash() self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated) # Now update self.utxo for later tests. tx3 = CTransaction() if segwit_activated: # tx and tx2 were both accepted. Don't bother trying to reclaim the # P2PKH output; just send tx's first output back to an anyone-can-spend. sync_mempools([self.nodes[0], self.nodes[1]]) tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))] tx3.wit.vtxinwit.append(CTxInWitness()) tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx3.rehash() self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True) else: # tx and tx2 didn't go anywhere; just clean up the p2sh_tx output. tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)] tx3.rehash() self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) assert_equal(len(self.nodes[1].getrawmempool()), 0) # Verify that future segwit upgraded transactions are non-standard, # but valid in blocks. Can run this before and after segwit activation. def test_segwit_versions(self): self.log.info("Testing standardness/consensus for segwit versions (0-16)") assert(len(self.utxo)) NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16 if (len(self.utxo) < NUM_TESTS): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS for i in range(NUM_TESTS): tx.vout.append(CTxOut(split_value, CScript([OP_TRUE]))) tx.rehash() block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) self.utxo.pop(0) for i in range(NUM_TESTS): self.utxo.append(UTXO(tx.sha256, i, split_value)) sync_blocks(self.nodes) temp_utxo = [] tx = CTransaction() count = 0 witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) assert_equal(len(self.nodes[1].getrawmempool()), 0) for version in list(range(OP_1, OP_16+1)) + [OP_0]: count += 1 # First try to spend to a future version segwit scriptPubKey. scriptPubKey = CScript([CScriptOp(version), witness_hash]) tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)] tx.rehash() self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False) self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True) self.utxo.pop(0) temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue)) self.nodes[0].generate(1) # Mine all the transactions sync_blocks(self.nodes) assert(len(self.nodes[0].getrawmempool()) == 0) # Finally, verify that version 0 -> version 1 transactions # are non-standard scriptPubKey = CScript([CScriptOp(OP_1), witness_hash]) tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] tx2.rehash() # Gets accepted to test_node, because standardness of outputs isn't # checked with fRequireStandard self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True) self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False) temp_utxo.pop() # last entry in temp_utxo was the output we just spent temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) # Spend everything in temp_utxo back to an OP_TRUE output. tx3 = CTransaction() total_value = 0 for i in temp_utxo: tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) total_value += i.nValue tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE]))) tx3.rehash() # Spending a higher version witness output is not allowed by policy, # even with fRequireStandard=false. self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False) self.test_node.sync_with_ping() with mininode_lock: assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason) # Building a block with the transaction must be valid, however. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2, tx3]) self.test_node.test_witness_block(block, accepted=True) sync_blocks(self.nodes) # Add utxo to our list self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) def test_premature_coinbase_witness_spend(self): self.log.info("Testing premature coinbase witness spend") block = self.build_next_block() # Change the output of the block to be a witness output. witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) block.vtx[0].vout[0].scriptPubKey = scriptPubKey # This next line will rehash the coinbase and update the merkle # root, and solve. self.update_witness_block_with_transactions(block, []) self.test_node.test_witness_block(block, accepted=True) spend_tx = CTransaction() spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)] spend_tx.wit.vtxinwit.append(CTxInWitness()) spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] spend_tx.rehash() # Now test a premature spend. self.nodes[0].generate(98) sync_blocks(self.nodes) block2 = self.build_next_block() self.update_witness_block_with_transactions(block2, [spend_tx]) self.test_node.test_witness_block(block2, accepted=False) # Advancing one more block should allow the spend. self.nodes[0].generate(1) block2 = self.build_next_block() self.update_witness_block_with_transactions(block2, [spend_tx]) self.test_node.test_witness_block(block2, accepted=True) sync_blocks(self.nodes) def test_signature_version_1(self): self.log.info("Testing segwit signature hash version 1") key = CECKey() key.set_secretbytes(b"9") pubkey = CPubKey(key.get_pubkey()) witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) # First create a witness output for use in the tests. assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True) # Mine this transaction in preparation for following tests. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) sync_blocks(self.nodes) self.utxo.pop(0) # Test each hashtype prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) for sigflag in [ 0, SIGHASH_ANYONECANPAY ]: for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]: hashtype |= sigflag block = self.build_next_block() tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey)) tx.wit.vtxinwit.append(CTxInWitness()) # Too-large input value sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key) self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=False) # Too-small input value sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key) block.vtx.pop() # remove last tx self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=False) # Now try correct value sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) block.vtx.pop() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) # Test combinations of signature hashes. # Split the utxo into a lot of outputs. # Randomly choose up to 10 to spend, sign with different hashtypes, and # output to a random number of outputs. Repeat NUM_TESTS times. # Ensure that we've tested a situation where we use SIGHASH_SINGLE with # an input index > number of outputs. NUM_TESTS = 500 temp_utxos = [] tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) split_value = prev_utxo.nValue // NUM_TESTS for i in range(NUM_TESTS): tx.vout.append(CTxOut(split_value, scriptPubKey)) tx.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) for i in range(NUM_TESTS): temp_utxos.append(UTXO(tx.sha256, i, split_value)) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) block = self.build_next_block() used_sighash_single_out_of_bounds = False for i in range(NUM_TESTS): # Ping regularly to keep the connection alive if (not i % 100): self.test_node.sync_with_ping() # Choose random number of inputs to use. num_inputs = random.randint(1, 10) # Create a slight bias for producing more utxos num_outputs = random.randint(1, 11) random.shuffle(temp_utxos) assert(len(temp_utxos) > num_inputs) tx = CTransaction() total_value = 0 for i in range(num_inputs): tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b"")) tx.wit.vtxinwit.append(CTxInWitness()) total_value += temp_utxos[i].nValue split_value = total_value // num_outputs for i in range(num_outputs): tx.vout.append(CTxOut(split_value, scriptPubKey)) for i in range(num_inputs): # Now try to sign each input, using a random hashtype. anyonecanpay = 0 if random.randint(0, 1): anyonecanpay = SIGHASH_ANYONECANPAY hashtype = random.randint(1, 3) | anyonecanpay sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key) if (hashtype == SIGHASH_SINGLE and i >= num_outputs): used_sighash_single_out_of_bounds = True tx.rehash() for i in range(num_outputs): temp_utxos.append(UTXO(tx.sha256, i, split_value)) temp_utxos = temp_utxos[num_inputs:] block.vtx.append(tx) # Test the block periodically, if we're close to maxblocksize if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000): self.update_witness_block_with_transactions(block, []) self.test_node.test_witness_block(block, accepted=True) block = self.build_next_block() if (not used_sighash_single_out_of_bounds): self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value") # Test the transactions we've added to the block if (len(block.vtx) > 1): self.update_witness_block_with_transactions(block, []) self.test_node.test_witness_block(block, accepted=True) # Now test witness version 0 P2PKH transactions pubkeyhash = hash160(pubkey) scriptPKH = CScript([OP_0, pubkeyhash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH)) tx.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) script = GetP2PKHScript(pubkeyhash) sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL # Check that we can't have a scriptSig tx2.vin[0].scriptSig = CScript([signature, pubkey]) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx, tx2]) self.test_node.test_witness_block(block, accepted=False) # Move the signature to the witness. block.vtx.pop() tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] tx2.vin[0].scriptSig = b"" tx2.rehash() self.update_witness_block_with_transactions(block, [tx2]) self.test_node.test_witness_block(block, accepted=True) temp_utxos.pop(0) # Update self.utxos for later tests. Just spend everything in # temp_utxos to a corresponding entry in self.utxos tx = CTransaction() index = 0 for i in temp_utxos: # Just spend to our usual anyone-can-spend output # Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up # the signatures as we go. tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key) index += 1 block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) for i in range(len(tx.vout)): self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue)) # Test P2SH wrapped witness programs. def test_p2sh_witness(self, segwit_activated): self.log.info("Testing P2SH witness transactions") assert(len(self.utxo)) # Prepare the p2sh-wrapped witness output witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) p2wsh_pubkey = CScript([OP_0, witness_hash]) p2sh_witness_hash = hash160(p2wsh_pubkey) scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script # Fund the P2SH output tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() # Verify mempool acceptance and block validity self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated) sync_blocks(self.nodes) # Now test attempts to spend the output. spend_tx = CTransaction() spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig)) spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) spend_tx.rehash() # This transaction should not be accepted into the mempool pre- or # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which # will require a witness to spend a witness program regardless of # segwit activation. Note that older planbcoind's that are not # segwit-aware would also reject this for failing CLEANSTACK. self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False) # Try to put the witness script in the scriptSig, should also fail. spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) spend_tx.rehash() self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False) # Now put the witness script in the witness, should succeed after # segwit activates. spend_tx.vin[0].scriptSig = scriptSig spend_tx.rehash() spend_tx.wit.vtxinwit.append(CTxInWitness()) spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ] # Verify mempool acceptance self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated) block = self.build_next_block() self.update_witness_block_with_transactions(block, [spend_tx]) # If we're before activation, then sending this without witnesses # should be valid. If we're after activation, then sending this with # witnesses should be valid. if segwit_activated: self.test_node.test_witness_block(block, accepted=True) else: self.test_node.test_witness_block(block, accepted=True, with_witness=False) # Update self.utxo self.utxo.pop(0) self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue)) # Test the behavior of starting up a segwit-aware node after the softfork # has activated. As segwit requires different block data than pre-segwit # nodes would have stored, this requires special handling. # To enable this test, pass --oldbinary=<path-to-pre-segwit-planbcoind> to # the test. def test_upgrade_after_activation(self, node_id): self.log.info("Testing software upgrade after softfork activation") assert(node_id != 0) # node0 is assumed to be a segwit-active planbcoind # Make sure the nodes are all up sync_blocks(self.nodes) # Restart with the new binary self.stop_node(node_id) self.nodes[node_id] = self.start_node(node_id, self.options.tmpdir) connect_nodes(self.nodes[0], node_id) sync_blocks(self.nodes) # Make sure that this peer thinks segwit has activated. assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active") # Make sure this peers blocks match those of node0. height = self.nodes[node_id].getblockcount() while height >= 0: block_hash = self.nodes[node_id].getblockhash(height) assert_equal(block_hash, self.nodes[0].getblockhash(height)) assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash)) height -= 1 def test_witness_sigops(self): '''Ensure sigop counting is correct inside witnesses.''' self.log.info("Testing sigops limit") assert(len(self.utxo)) # Keep this under MAX_OPS_PER_SCRIPT (201) witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) sigops_per_script = 20*5 + 193*1 # We'll produce 2 extra outputs, one with a program that would take us # over max sig ops, and one with a program that would exactly reach max # sig ops outputs = (MAX_SIGOP_COST // sigops_per_script) + 2 extra_sigops_available = MAX_SIGOP_COST % sigops_per_script # We chose the number of checkmultisigs/checksigs to make this work: assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT # This script, when spent with the first # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction, # would push us just over the block sigop limit. witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF]) witness_hash_toomany = sha256(witness_program_toomany) scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany]) # If we spend this script instead, we would exactly reach our sigop # limit (for witness sigops). witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF]) witness_hash_justright = sha256(witness_program_justright) scriptPubKey_justright = CScript([OP_0, witness_hash_justright]) # First split our available utxo into a bunch of outputs split_value = self.utxo[0].nValue // outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) for i in range(outputs): tx.vout.append(CTxOut(split_value, scriptPubKey)) tx.vout[-2].scriptPubKey = scriptPubKey_toomany tx.vout[-1].scriptPubKey = scriptPubKey_justright tx.rehash() block_1 = self.build_next_block() self.update_witness_block_with_transactions(block_1, [tx]) self.test_node.test_witness_block(block_1, accepted=True) tx2 = CTransaction() # If we try to spend the first n-1 outputs from tx, that should be # too many sigops. total_value = 0 for i in range(outputs-1): tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ] total_value += tx.vout[i].nValue tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ] tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE]))) tx2.rehash() block_2 = self.build_next_block() self.update_witness_block_with_transactions(block_2, [tx2]) self.test_node.test_witness_block(block_2, accepted=False) # Try dropping the last input in tx2, and add an output that has # too many sigops (contributing to legacy sigop count). checksig_count = (extra_sigops_available // 4) + 1 scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count) tx2.vout.append(CTxOut(0, scriptPubKey_checksigs)) tx2.vin.pop() tx2.wit.vtxinwit.pop() tx2.vout[0].nValue -= tx.vout[-2].nValue tx2.rehash() block_3 = self.build_next_block() self.update_witness_block_with_transactions(block_3, [tx2]) self.test_node.test_witness_block(block_3, accepted=False) # If we drop the last checksig in this output, the tx should succeed. block_4 = self.build_next_block() tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1)) tx2.rehash() self.update_witness_block_with_transactions(block_4, [tx2]) self.test_node.test_witness_block(block_4, accepted=True) # Reset the tip back down for the next test sync_blocks(self.nodes) for x in self.nodes: x.invalidateblock(block_4.hash) # Try replacing the last input of tx2 to be spending the last # output of tx block_5 = self.build_next_block() tx2.vout.pop() tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ] tx2.rehash() self.update_witness_block_with_transactions(block_5, [tx2]) self.test_node.test_witness_block(block_5, accepted=True) # TODO: test p2sh sigop counting def test_getblocktemplate_before_lockin(self): self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)") # Node0 is segwit aware, node2 is not. for node in [self.nodes[0], self.nodes[2]]: gbt_results = node.getblocktemplate() block_version = gbt_results['version'] # If we're not indicating segwit support, we will still be # signalling for segwit activation. assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0]) # If we don't specify the segwit rule, then we won't get a default # commitment. assert('default_witness_commitment' not in gbt_results) # Workaround: # Can either change the tip, or change the mempool and wait 5 seconds # to trigger a recomputation of getblocktemplate. txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16) # Using mocktime lets us avoid sleep() sync_mempools(self.nodes) self.nodes[0].setmocktime(int(time.time())+10) self.nodes[2].setmocktime(int(time.time())+10) for node in [self.nodes[0], self.nodes[2]]: gbt_results = node.getblocktemplate({"rules" : ["segwit"]}) block_version = gbt_results['version'] if node == self.nodes[2]: # If this is a non-segwit node, we should still not get a witness # commitment, nor a version bit signalling segwit. assert_equal(block_version & (1 << VB_WITNESS_BIT), 0) assert('default_witness_commitment' not in gbt_results) else: # For segwit-aware nodes, check the version bit and the witness # commitment are correct. assert(block_version & (1 << VB_WITNESS_BIT) != 0) assert('default_witness_commitment' in gbt_results) witness_commitment = gbt_results['default_witness_commitment'] # Check that default_witness_commitment is present. witness_root = CBlock.get_merkle_root([ser_uint256(0), ser_uint256(txid)]) script = get_witness_script(witness_root, 0) assert_equal(witness_commitment, bytes_to_hex_str(script)) # undo mocktime self.nodes[0].setmocktime(0) self.nodes[2].setmocktime(0) # Uncompressed pubkeys are no longer supported in default relay policy, # but (for now) are still valid in blocks. def test_uncompressed_pubkey(self): self.log.info("Testing uncompressed pubkeys") # Segwit transactions using uncompressed pubkeys are not accepted # under default policy, but should still pass consensus. key = CECKey() key.set_secretbytes(b"9") key.set_compressed(False) pubkey = CPubKey(key.get_pubkey()) assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey assert(len(self.utxo) > 0) utxo = self.utxo.pop(0) # Test 1: P2WPKH # First create a P2WPKH output that uses an uncompressed pubkey pubkeyhash = hash160(pubkey) scriptPKH = CScript([OP_0, pubkeyhash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b"")) tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH)) tx.rehash() # Confirm it in a block. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) # Now try to spend it. Send it to a P2WSH output, which we'll # use in the next test. witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) witness_hash = sha256(witness_program) scriptWSH = CScript([OP_0, witness_hash]) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH)) script = GetP2PKHScript(pubkeyhash) sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ] tx2.rehash() # Should fail policy test. self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) self.test_node.test_witness_block(block, accepted=True) # Test 2: P2WSH # Try to spend the P2WSH output created in last test. # Send it to a P2SH(P2WSH) output, which we'll use in the next test. p2sh_witness_hash = hash160(scriptWSH) scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) scriptSig = CScript([scriptWSH]) tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH)) tx3.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) # Should fail policy test. self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx3]) self.test_node.test_witness_block(block, accepted=True) # Test 3: P2SH(P2WSH) # Try to spend the P2SH output created in the last test. # Send it to a P2PKH output, which we'll use in the next test. scriptPubKey = GetP2PKHScript(pubkeyhash) tx4 = CTransaction() tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig)) tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey)) tx4.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) # Should fail policy test. self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx4]) self.test_node.test_witness_block(block, accepted=True) # Test 4: Uncompressed pubkeys should still be valid in non-segwit # transactions. tx5 = CTransaction() tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b"")) tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE]))) (sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx5.vin[0].scriptSig = CScript([signature, pubkey]) tx5.rehash() # Should pass policy and consensus. self.test_node.test_transaction_acceptance(tx5, True, True) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx5]) self.test_node.test_witness_block(block, accepted=True) self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue)) def test_non_standard_witness(self): self.log.info("Testing detection of non-standard P2WSH witness") pad = chr(1).encode('latin-1') # Create scripts for tests scripts = [] scripts.append(CScript([OP_DROP] * 100)) scripts.append(CScript([OP_DROP] * 99)) scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60)) scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61)) p2wsh_scripts = [] assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) # For each script, generate a pair of P2WSH and P2SH-P2WSH output. outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2) for i in scripts: p2wsh = CScript([OP_0, sha256(i)]) p2sh = hash160(p2wsh) p2wsh_scripts.append(p2wsh) tx.vout.append(CTxOut(outputvalue, p2wsh)) tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL]))) tx.rehash() txid = tx.sha256 self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Creating transactions for tests p2wsh_txs = [] p2sh_txs = [] for i in range(len(scripts)): p2wsh_tx = CTransaction() p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2))) p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) p2wsh_tx.rehash() p2wsh_txs.append(p2wsh_tx) p2sh_tx = CTransaction() p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]]))) p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) p2sh_tx.wit.vtxinwit.append(CTxInWitness()) p2sh_tx.rehash() p2sh_txs.append(p2sh_tx) # Testing native P2WSH # Witness stack size, excluding witnessScript, over 100 is non-standard p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard') # Non-standard nodes should accept self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True) # Stack element size over 80 bytes is non-standard p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard') # Non-standard nodes should accept self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True) # Standard nodes should accept if element size is not over 80 bytes p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True) # witnessScript size at 3600 bytes is standard p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True) self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True) # witnessScript size at 3601 bytes is non-standard p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard') # Non-standard nodes should accept self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True) # Repeating the same tests with P2SH-P2WSH p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard') self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True) p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard') self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True) p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True) p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True) self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)<|fim▁hole|> self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node # Valid but non-standard transactions in a block should be accepted by standard node sync_blocks(self.nodes) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) self.utxo.pop(0) def run_test(self): # Setup the p2p connections and start up the network thread. self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK self.old_node = TestNode() # only NODE_NETWORK self.std_node = TestNode() # for testing node1 (fRequireStandard=true) self.p2p_connections = [self.test_node, self.old_node] self.connections = [] self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS)) self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK)) self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS)) self.test_node.add_connection(self.connections[0]) self.old_node.add_connection(self.connections[1]) self.std_node.add_connection(self.connections[2]) NetworkThread().start() # Start up network handling in another thread # Keep a place to store utxo's that can be used in later tests self.utxo = [] # Test logic begins here self.test_node.wait_for_verack() self.log.info("Starting tests before segwit lock in:") self.test_witness_services() # Verifies NODE_WITNESS self.test_non_witness_transaction() # non-witness tx's are accepted self.test_unnecessary_witness_before_segwit_activation() self.test_block_relay(segwit_activated=False) # Advance to segwit being 'started' self.advance_to_segwit_started() sync_blocks(self.nodes) self.test_getblocktemplate_before_lockin() sync_blocks(self.nodes) # At lockin, nothing should change. self.log.info("Testing behavior post lockin, pre-activation") self.advance_to_segwit_lockin() # Retest unnecessary witnesses self.test_unnecessary_witness_before_segwit_activation() self.test_witness_tx_relay_before_segwit_activation() self.test_block_relay(segwit_activated=False) self.test_p2sh_witness(segwit_activated=False) self.test_standardness_v0(segwit_activated=False) sync_blocks(self.nodes) # Now activate segwit self.log.info("Testing behavior after segwit activation") self.advance_to_segwit_active() sync_blocks(self.nodes) # Test P2SH witness handling again self.test_p2sh_witness(segwit_activated=True) self.test_witness_commitments() self.test_block_malleability() self.test_witness_block_size() self.test_submit_block() self.test_extra_witness_data() self.test_max_witness_push_length() self.test_max_witness_program_length() self.test_witness_input_length() self.test_block_relay(segwit_activated=True) self.test_tx_relay_after_segwit_activation() self.test_standardness_v0(segwit_activated=True) self.test_segwit_versions() self.test_premature_coinbase_witness_spend() self.test_uncompressed_pubkey() self.test_signature_version_1() self.test_non_standard_witness() sync_blocks(self.nodes) self.test_upgrade_after_activation(node_id=2) self.test_witness_sigops() if __name__ == '__main__': SegWitTest().main()<|fim▁end|>
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard') self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
<|file_name|>images_client.py<|end_file_name|><|fim▁begin|># Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT<|fim▁hole|>import json from six.moves.urllib import parse as urllib from tempest_lib import exceptions as lib_exc from tempest.api_schema.response.compute.v2_1 import images as schema from tempest.common import service_client from tempest.common import waiters class ImagesClientJSON(service_client.ServiceClient): def create_image(self, server_id, name, meta=None): """Creates an image of the original server.""" post_body = { 'createImage': { 'name': name, } } if meta is not None: post_body['createImage']['metadata'] = meta post_body = json.dumps(post_body) resp, body = self.post('servers/%s/action' % str(server_id), post_body) self.validate_response(schema.create_image, resp, body) return service_client.ResponseBody(resp, body) def list_images(self, params=None): """Returns a list of all images filtered by any parameters.""" url = 'images' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) self.validate_response(schema.list_images, resp, body) return service_client.ResponseBodyList(resp, body['images']) def list_images_with_detail(self, params=None): """Returns a detailed list of images filtered by any parameters.""" url = 'images/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) self.validate_response(schema.list_images_details, resp, body) return service_client.ResponseBodyList(resp, body['images']) def show_image(self, image_id): """Returns the details of a single image.""" resp, body = self.get("images/%s" % str(image_id)) self.expected_success(200, resp.status) body = json.loads(body) self.validate_response(schema.get_image, resp, body) return service_client.ResponseBody(resp, body['image']) def delete_image(self, image_id): """Deletes the provided image.""" resp, body = self.delete("images/%s" % str(image_id)) self.validate_response(schema.delete, resp, body) return service_client.ResponseBody(resp, body) def wait_for_image_status(self, image_id, status): """Waits for an image to reach a given status.""" waiters.wait_for_image_status(self, image_id, status) def list_image_metadata(self, image_id): """Lists all metadata items for an image.""" resp, body = self.get("images/%s/metadata" % str(image_id)) body = json.loads(body) self.validate_response(schema.image_metadata, resp, body) return service_client.ResponseBody(resp, body['metadata']) def set_image_metadata(self, image_id, meta): """Sets the metadata for an image.""" post_body = json.dumps({'metadata': meta}) resp, body = self.put('images/%s/metadata' % str(image_id), post_body) body = json.loads(body) self.validate_response(schema.image_metadata, resp, body) return service_client.ResponseBody(resp, body['metadata']) def update_image_metadata(self, image_id, meta): """Updates the metadata for an image.""" post_body = json.dumps({'metadata': meta}) resp, body = self.post('images/%s/metadata' % str(image_id), post_body) body = json.loads(body) self.validate_response(schema.image_metadata, resp, body) return service_client.ResponseBody(resp, body['metadata']) def get_image_metadata_item(self, image_id, key): """Returns the value for a specific image metadata key.""" resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key)) body = json.loads(body) self.validate_response(schema.image_meta_item, resp, body) return service_client.ResponseBody(resp, body['meta']) def set_image_metadata_item(self, image_id, key, meta): """Sets the value for a specific image metadata key.""" post_body = json.dumps({'meta': meta}) resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key), post_body) body = json.loads(body) self.validate_response(schema.image_meta_item, resp, body) return service_client.ResponseBody(resp, body['meta']) def delete_image_metadata_item(self, image_id, key): """Deletes a single image metadata key/value pair.""" resp, body = self.delete("images/%s/metadata/%s" % (str(image_id), key)) self.validate_response(schema.delete, resp, body) return service_client.ResponseBody(resp, body) def is_resource_deleted(self, id): try: self.show_image(id) except lib_exc.NotFound: return True return False @property def resource_type(self): """Returns the primary type of resource this client works with.""" return 'image'<|fim▁end|>
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
<|file_name|>plugin_stop.py<|end_file_name|><|fim▁begin|># coding:utf8 class Plugin(object): __doc__ = '''Плагин предназначен для остановки бота. Для использования необходимо иметь уровень доступа {protection} или выше Ключевые слова: [{keywords}] Использование: {keyword}<|fim▁hole|> name = 'stop' keywords = (u'стоп', name, '!') protection = 3 argument_required = False def respond(self, msg, rsp, utils, *args, **kwargs): utils.stop_bot() rsp.text = u'Завершаю работу. Удачного времени суток!' return rsp<|fim▁end|>
Пример: {keyword}'''
<|file_name|>hd_keychain.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2013-2016 John Connor (BM-NC49AxAjcqVcF5jNPu85Rb8MJ2d9JqZt) * * This file is part of vcash. * * vcash is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License with * additional permissions to the one published by the Free Software * Foundation, either version 3 of the License, or (at your option) * any later version. For more information see LICENSE. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <cassert> #include <iostream> #include <stdexcept> #include <sstream> #include <coin/address.hpp> #include <coin/base58.hpp> #include <coin/big_number.hpp> #include <coin/hd_ecdsa.hpp> #include <coin/hd_keychain.hpp> #include <coin/logger.hpp> using namespace coin; std::uint32_t hd_keychain::g_private_version = private_version; std::uint32_t hd_keychain::g_public_version = public_version; /** * The curve order. */ static const big_number g_curve_order( "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141" ); hd_keychain::hd_keychain() { // ... } hd_keychain::hd_keychain( const std::vector<std::uint8_t> & key, const std::vector<std::uint8_t> & chain_code, const std::uint32_t & child_num, const std::uint32_t & parent_fingerprint, const std::uint32_t & depth ) : m_depth(depth) , m_parent_fingerprint(parent_fingerprint) , m_child_count(child_num) , m_chain_code(chain_code) , m_key(key) { if (m_chain_code.size() != 32) { throw std::runtime_error("Invalid chain code."); } if (m_key.size() == 32) { big_number n; n.set_vector_no_reverse(m_key); if (n >= g_curve_order || n.is_zero()) { throw std::runtime_error("Invalid key."); } std::vector<std::uint8_t> private_key; private_key.push_back(0x00); private_key.insert(private_key.end(), m_key.begin(), m_key.end()); m_key = private_key; } else if (m_key.size() == 33) { try { hd_ecdsa::point K(m_key); } catch (...) { throw std::runtime_error("Invalid key."); } } else { throw std::runtime_error("Invalid key."); } m_version = is_private() ? g_private_version : g_public_version; update_public_key(); m_is_valid = true; } hd_keychain::hd_keychain(const std::vector<std::uint8_t> & extkey) { if (extkey.size() != 78) { throw std::runtime_error("Invalid extended key length."); } m_version = ((std::uint32_t)extkey[0] << 24) | ((std::uint32_t)extkey[1] << 16) | ((std::uint32_t)extkey[2] << 8) | (std::uint32_t)extkey[3] ; m_depth = extkey[4]; m_parent_fingerprint = ((std::uint32_t)extkey[5] << 24) | ((std::uint32_t)extkey[6] << 16) | ((std::uint32_t)extkey[7] << 8) | (std::uint32_t)extkey[8] ; m_child_count = ((std::uint32_t)extkey[9] << 24) | ((std::uint32_t)extkey[10] << 16) | ((uint32_t)extkey[11] << 8) | (uint32_t)extkey[12] ; m_chain_code.assign(extkey.begin() + 13, extkey.begin() + 45); m_key.assign(extkey.begin() + 45, extkey.begin() + 78); update_public_key(); m_is_valid = true; } hd_keychain::hd_keychain(const hd_keychain & other) { m_is_valid = other.m_is_valid; if (m_is_valid == true) { m_version = other.m_version; m_depth = other.m_depth; m_parent_fingerprint = other.m_parent_fingerprint; m_child_count = other.m_child_count; m_chain_code = other.m_chain_code; m_key = other.m_key; update_public_key(); } } hd_keychain & hd_keychain::operator = (const hd_keychain & rhs) { m_is_valid = rhs.m_is_valid; if (m_is_valid == true) { m_version = rhs.m_version; m_depth = rhs.m_depth; m_parent_fingerprint = rhs.m_parent_fingerprint; m_child_count = rhs.m_child_count; m_chain_code = rhs.m_chain_code; m_key = rhs.m_key; update_public_key(); } return *this; } bool hd_keychain::operator == (const hd_keychain & rhs) const { return m_is_valid && rhs.m_is_valid && m_version == rhs.m_version && m_depth == rhs.m_depth && m_parent_fingerprint == rhs.m_parent_fingerprint && m_child_count == rhs.m_child_count && m_chain_code == rhs.m_chain_code && m_key == rhs.m_key ; } bool hd_keychain::operator != (const hd_keychain & rhs) const { return !(*this == rhs); } void hd_keychain::set_is_valid(const bool & val) { m_is_valid = val; } const bool hd_keychain::is_valid() const { return m_is_valid; } const bool hd_keychain::is_private() const { return m_key.size() == 33 && m_key[0] == 0x00; } const std::vector<std::uint8_t> hd_keychain::extended_key() const { std::vector<std::uint8_t> ret; ret.push_back((std::uint32_t)m_version >> 24); ret.push_back(((std::uint32_t)m_version >> 16) & 0xff); ret.push_back(((std::uint32_t)m_version >> 8) & 0xff); ret.push_back((std::uint32_t)m_version & 0xff); ret.push_back(m_depth); ret.push_back((std::uint32_t)m_parent_fingerprint >> 24); ret.push_back(((std::uint32_t)m_parent_fingerprint >> 16) & 0xff); ret.push_back(((std::uint32_t)m_parent_fingerprint >> 8) & 0xff); ret.push_back((std::uint32_t)m_parent_fingerprint & 0xff); ret.push_back((std::uint32_t)m_child_count >> 24); ret.push_back(((std::uint32_t)m_child_count >> 16) & 0xff); ret.push_back(((std::uint32_t)m_child_count >> 8) & 0xff); ret.push_back((std::uint32_t)m_child_count & 0xff); ret.insert(ret.end(), m_chain_code.begin(), m_chain_code.end()); ret.insert(ret.end(), m_key.begin(), m_key.end()); return ret; } void hd_keychain::set_version(const std::uint32_t & val) { m_version = val; } const std::uint32_t & hd_keychain::version() const { return m_version; } void hd_keychain::set_depth(const std::uint8_t & val) { m_depth = val; } const std::uint8_t & hd_keychain::depth() const { return m_depth; } void hd_keychain::set_parent_fingerprint(const std::uint32_t & val) { m_parent_fingerprint = val; } const std::uint32_t & hd_keychain::parent_fingerprint() const { return m_parent_fingerprint; } void hd_keychain::set_child_count(const std::uint32_t & val) { m_child_count = val; } const std::uint32_t & hd_keychain::child_count() const { return m_child_count; } void hd_keychain::set_chain_code(const std::vector<std::uint8_t> & val) { m_chain_code = val; } const std::vector<std::uint8_t> & hd_keychain::chain_code() const { return m_chain_code; } void hd_keychain::set_key(const std::vector<std::uint8_t> & val) { m_key = val; } const std::vector<std::uint8_t> & hd_keychain::key() const { return m_key; } std::vector<std::uint8_t> hd_keychain::privkey() const { if (is_private() == true) { return std::vector<std::uint8_t> (m_key.begin() + 1, m_key.end()); } return std::vector<std::uint8_t> (); } void hd_keychain::set_pubkey(const std::vector<std::uint8_t> & val) { m_pubkey = val; } const std::vector<std::uint8_t> & hd_keychain::pubkey() const { return m_pubkey; } std::vector<std::uint8_t> hd_keychain::uncompressed_pubkey() const { hd_ecdsa::key k; k.set_public_key(m_pubkey); return k.get_public_key(false); } std::vector<std::uint8_t> hd_keychain::get_hash() const { auto digest = hash::sha256_ripemd160(&m_pubkey[0], m_pubkey.size()); return std::vector<std::uint8_t> (&digest[0], &digest[0] + digest.size()); } std::uint32_t hd_keychain::fingerprint() const { auto digest = get_hash(); return (std::uint32_t)digest[0] << 24 | (std::uint32_t)digest[1] << 16 | (std::uint32_t)digest[2] << 8 | (std::uint32_t)digest[3] ; } std::vector<std::uint8_t> hd_keychain::full_hash() const { std::vector<std::uint8_t> data(m_pubkey); data.insert(data.end(), m_chain_code.begin(), m_chain_code.end()); auto digest = hash::sha256_ripemd160(&data[0], data.size()); return std::vector<std::uint8_t> (&digest[0], &digest[0] + digest.size()); } hd_keychain hd_keychain::get_public() const { if (m_is_valid == false) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } hd_keychain ret; ret.set_is_valid(m_is_valid); ret.set_version(g_public_version); ret.set_depth(m_depth); ret.set_parent_fingerprint(m_parent_fingerprint); ret.set_child_count(m_child_count); ret.set_chain_code(m_chain_code); ret.set_key(m_pubkey); ret.set_pubkey(m_pubkey); return ret; } hd_keychain hd_keychain::get_child(const std::uint32_t & index) const { if (m_is_valid == false) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } bool priv_derivation = 0x80000000 & index; if (is_private() == false && priv_derivation) { throw std::runtime_error( std::string(__FUNCTION__) + ": tried to derive private key on public key." ); } hd_keychain ret; ret.set_is_valid(false); std::vector<std::uint8_t> data; data.insert( data.end(), (priv_derivation ? m_key : m_pubkey).begin(), (priv_derivation ? m_key : m_pubkey).end() ); data.push_back(index >> 24); data.push_back((index >> 16) & 0xff); data.push_back((index >> 8) & 0xff); data.push_back(index & 0xff); auto digest = crypto::hmac_sha512(m_chain_code, data); std::vector<std::uint8_t> l32(digest.begin(), digest.begin() + 32); big_number i_l; i_l.set_vector_no_reverse(l32); auto foo1 = i_l.get_hex(); auto foo2 = g_curve_order.get_hex(); if (i_l >= g_curve_order) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } if (is_private() == true) { big_number k; k.set_vector_no_reverse(m_key); k += i_l; k %= g_curve_order; if (k.is_zero() == true) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } auto child_key = k.get_vector_no_reverse(); std::vector<std::uint8_t> padded_key(33 - child_key.size(), 0); padded_key.insert( padded_key.end(), child_key.begin(), child_key.end() ); ret.set_key(padded_key); ret.update_public_key(); } else { hd_ecdsa::point point_k; point_k.set_bytes(m_pubkey); point_k.generator_mul(l32); if (point_k.is_at_infinity()) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } ret.set_key(point_k.bytes()); ret.set_pubkey(point_k.bytes()); } ret.set_version(m_version); ret.set_depth(m_depth + 1); ret.set_parent_fingerprint(fingerprint()); ret.set_child_count(index); ret.m_chain_code.assign(digest.begin() + 32, digest.end()); ret.set_is_valid(true); return ret; } hd_keychain hd_keychain::get_child(const std::string & path) const { if (path.size() == 0) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } std::vector<uint32_t> paths; std::size_t i = 0; std::uint64_t n = 0; while (i < path.size()) { char c = path[i]; if (c >= '0' && c <= '9') { n *= 10; n += (std::uint32_t)(c - '0'); if (n >= 0x80000000) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } i++; if (i >= path.size()) { paths.push_back((std::uint32_t)n); } } else if (c == '\'') { if (i + 1 < path.size()) { if ( (i + 2 >= path.size()) || (path[i + 1] != '/') ||<|fim▁hole|> ) { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } } n |= 0x80000000; paths.push_back((std::uint32_t)n); n = 0; i += 2; } else if (c == '/') { if (i + 1 >= path.size() || path[i + 1] < '0' || path[i + 1] > '9') { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } paths.push_back((std::uint32_t)n); n = 0; i++; } else { throw std::runtime_error( std::string(__FUNCTION__) + ": invalid hd_keychain" ); } } hd_keychain ret(*this); for (auto i : paths) { ret = ret.get_child(i); } return ret; } hd_keychain hd_keychain::get_child_node( const std::uint32_t & index, const bool & private_derivation ) const { std::uint32_t mask = private_derivation ? 0x80000000ull : 0x00000000ull; return get_child(mask).get_child(index); } std::vector<std::uint8_t> hd_keychain::get_private_signing_key( const std::uint32_t & index ) const { assert(index != 0); return get_child(index).privkey(); } std::vector<std::uint8_t> hd_keychain::get_public_signing_key( const std::uint32_t & index, const bool & compressed ) const { assert(index != 0); return compressed ? get_child(index).pubkey() : get_child(index).uncompressed_pubkey() ; } void hd_keychain::set_versions( const std::uint32_t & private_version, const std::uint32_t & public_version ) { g_private_version = private_version; g_public_version = public_version; } std::string hd_keychain::to_string() const { std::stringstream ss; ss << "hd_keychain: " << std::endl; ss << "\tversion: " << std::hex << m_version << std::endl; ss << "\tdepth: " << static_cast<std::int32_t> (depth()) << std::endl; ss << "\tparent_fingerprint: " << m_parent_fingerprint << std::endl; ss << "\tchild_num: " << m_child_count << std::endl; ss << "\tchain_code: " << utility::hex_string(m_chain_code) << std::endl; ss << "\tkey: " << utility::hex_string(m_key) << std::endl; ss << "\thash: " << utility::hex_string(get_hash()) << std::endl; return ss.str(); } void hd_keychain::update_public_key() { if (is_private() == true) { hd_ecdsa::key key_curve; key_curve.set_private_key( std::vector<std::uint8_t> (m_key.begin() + 1, m_key.end()) ); m_pubkey = key_curve.get_public_key(); } else { m_pubkey = m_key; } } inline std::uint32_t test_p(const std::uint32_t & i) { return 0x80000000 | i; } inline bool test_is_p(const std::uint32_t & i) { return 0x80000000 & i; } std::string test_s(const std::uint32_t & i) { std::stringstream ss; ss << (0x7fffffff & i); if (test_is_p(i)) { ss << "'"; } return ss.str(); } void test_show_key(const hd_keychain & keychain) { auto extended_key = keychain.extended_key(); base58 b58; b58.set_data( constants::test_net ? 111 : 0, reinterpret_cast<char *> (&extended_key[0]), extended_key.size() ); std::cout << " * ext " << (keychain.is_private() ? "prv" : "pub") << ": " << b58.to_string(false) << std::endl; } void test_show_step( const std::string & chainname, const hd_keychain & public_hd_keychain, const hd_keychain & private_hd_keychain ) { std::cout << "* [" << chainname << "]" << std::endl; test_show_key(public_hd_keychain); test_show_key(private_hd_keychain); } int hd_keychain::run_test() { #if 1 const std::vector<std::uint8_t> seed = utility::from_hex("000102030405060708090a0b0c0d0e0f") ; const std::uint32_t chain[] = { test_p(0), 1, test_p(2), 2, 1000000000 }; #else const std::vector<std::uint8_t> seed = utility::from_hex("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab" "7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754" "514e4b484542") ; const std::uint32_t chain[] = { 0, test_p(2147483647), 1, test_p(2147483646), 2 }; #endif const auto chain_length = sizeof(chain) / sizeof(std::uint32_t); try { hd_keychain::set_versions(0x0488ADE4, 0x0488B21E); std::cout << "Seed: " << utility::hex_string(seed) << std::endl; hd_keychain::seed hd_seed(seed); auto k = hd_seed.get_master_key(); auto c = hd_seed.get_master_chain_code(); std::stringstream chainname; chainname << "Chain m"; hd_keychain prv(k, c); hd_keychain pub = prv.get_public(); test_show_step(chainname.str(), pub, prv); hd_keychain parentpub; for (auto k = 0; k < chain_length; k++) { chainname << "/" << test_s(chain[k]); if (test_is_p(chain[k]) == false) { parentpub = pub; } prv = prv.get_child(chain[k]); assert(prv.is_valid()); pub = prv.get_public(); assert(pub.is_valid()); if (test_is_p(chain[k]) == false) { auto parentpubChild = parentpub.get_child(chain[k]); assert(pub == parentpubChild); } test_show_step(chainname.str(), pub, prv); } return 0; } catch (const std::exception & e) { std::cout << "Error: " << e.what() << std::endl; } return 0; }<|fim▁end|>
(path[i + 2] < '0') || (path[i + 2] > '9')
<|file_name|>default.py<|end_file_name|><|fim▁begin|># -*- coding: cp1254 -*- # please visit http://www.iptvxtra.net<|fim▁hole|>icondir = xbmc.translatePath("special://home/addons/plugin.audio.radio7ulm/icons/") plugin_handle = int(sys.argv[1]) def add_video_item(url, infolabels, img=''): listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img, thumbnailImage=img) listitem.setInfo('video', infolabels) listitem.setProperty('IsPlayable', 'true') xbmcplugin.addDirectoryItem(plugin_handle, url, listitem, isFolder=False) add_video_item('http://srv01.radio7.fmstreams.de/stream1/livestream.mp3',{ 'title': 'Radio 7 - Webradio'},img=icondir + 'radio-7_web.png') add_video_item('http://srv02.radio7.fmstreams.de/radio7_upa',{ 'title': 'Radio 7 - 80er'},img=icondir + 'radio-7_80er.png') add_video_item('http://srv02.radio7.fmstreams.de/radio7_downa',{ 'title': 'Radio 7 - Herz'},img=icondir + 'radio-7_herz.png') add_video_item('http://str0.creacast.com/radio7_acta',{ 'title': 'Radio 7 - OnTour'},img=icondir + 'radio-7_ontour.png') add_video_item('http://srv01.radio7.fmstreams.de/stream5/livestream.mp3',{ 'title': 'Radio 7 - Live'},img=icondir + 'radio-7_live.png') xbmcplugin.endOfDirectory(plugin_handle) xbmc.executebuiltin("Container.SetViewMode(500)")<|fim▁end|>
import xbmc,xbmcgui,xbmcplugin,sys
<|file_name|>call.go<|end_file_name|><|fim▁begin|>/* * * Copyright 2014, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above<|fim▁hole|> * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package grpc import ( "bytes" "io" "time" "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/transport" ) // recvResponse receives and parses an RPC response. // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error c.headerMD, err = stream.Header() if err != nil { return err } p := &parser{r: stream} for { if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { if err == io.EOF { break } return err } } c.trailerMD = stream.Trailer() return nil } // sendRequest writes out various information of an RPC such as Context and Message. func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err } defer func() { if err != nil { if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() var cbuf *bytes.Buffer if compressor != nil { cbuf = new(bytes.Buffer) } outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) if err != nil { return nil, err } // Sent successfully. return stream, nil } // Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if err != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.SetError() } }() } topts := &transport.Options{ Last: true, Delay: false, } var ( lastErr error // record the error that happened ) for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return toRPCErr(lastErr) } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } t, err = cc.dopts.picker.Pick(ctx) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // Receive the response lastErr = recvResponse(cc.dopts, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } return Errorf(stream.StatusCode(), stream.StatusDesc()) } }<|fim▁end|>
<|file_name|>stack.rs<|end_file_name|><|fim▁begin|>// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use Buildable; use Container; use StackTransitionType; use Widget; use ffi;<|fim▁hole|>use glib::Value; use glib::object::Downcast; use glib::object::IsA; use glib::signal::SignalHandlerId; use glib::signal::connect; use glib::translate::*; use glib_ffi; use gobject_ffi; use std::boxed::Box as Box_; use std::mem; use std::mem::transmute; use std::ptr; glib_wrapper! { pub struct Stack(Object<ffi::GtkStack, ffi::GtkStackClass>): Container, Widget, Buildable; match fn { get_type => || ffi::gtk_stack_get_type(), } } impl Stack { #[cfg(any(feature = "v3_10", feature = "dox"))] pub fn new() -> Stack { assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(ffi::gtk_stack_new()).downcast_unchecked() } } } #[cfg(any(feature = "v3_10", feature = "dox"))] impl Default for Stack { fn default() -> Self { Self::new() } } pub trait StackExt { #[cfg(any(feature = "v3_10", feature = "dox"))] fn add_named<P: IsA<Widget>>(&self, child: &P, name: &str); #[cfg(any(feature = "v3_10", feature = "dox"))] fn add_titled<P: IsA<Widget>>(&self, child: &P, name: &str, title: &str); #[cfg(any(feature = "v3_12", feature = "dox"))] fn get_child_by_name(&self, name: &str) -> Option<Widget>; #[cfg(any(feature = "v3_16", feature = "dox"))] fn get_hhomogeneous(&self) -> bool; #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_homogeneous(&self) -> bool; #[cfg(any(feature = "v3_18", feature = "dox"))] fn get_interpolate_size(&self) -> bool; #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_transition_duration(&self) -> u32; #[cfg(any(feature = "v3_12", feature = "dox"))] fn get_transition_running(&self) -> bool; #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_transition_type(&self) -> StackTransitionType; #[cfg(any(feature = "v3_16", feature = "dox"))] fn get_vhomogeneous(&self) -> bool; #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_visible_child(&self) -> Option<Widget>; #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_visible_child_name(&self) -> Option<String>; #[cfg(any(feature = "v3_16", feature = "dox"))] fn set_hhomogeneous(&self, hhomogeneous: bool); #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_homogeneous(&self, homogeneous: bool); #[cfg(any(feature = "v3_18", feature = "dox"))] fn set_interpolate_size(&self, interpolate_size: bool); #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_transition_duration(&self, duration: u32); #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_transition_type(&self, transition: StackTransitionType); #[cfg(any(feature = "v3_16", feature = "dox"))] fn set_vhomogeneous(&self, vhomogeneous: bool); #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_visible_child<P: IsA<Widget>>(&self, child: &P); #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_visible_child_full(&self, name: &str, transition: StackTransitionType); #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_visible_child_name(&self, name: &str); fn get_property_homogeneous(&self) -> bool; fn set_property_homogeneous(&self, homogeneous: bool); fn get_property_interpolate_size(&self) -> bool; fn set_property_interpolate_size(&self, interpolate_size: bool); fn get_property_transition_duration(&self) -> u32; fn set_property_transition_duration(&self, transition_duration: u32); fn get_property_transition_running(&self) -> bool; fn get_property_transition_type(&self) -> StackTransitionType; fn set_property_transition_type(&self, transition_type: StackTransitionType); fn get_property_visible_child(&self) -> Option<Widget>; fn set_property_visible_child<P: IsA<Widget> + IsA<glib::object::Object> + glib::value::SetValueOptional>(&self, visible_child: Option<&P>); fn get_property_visible_child_name(&self) -> Option<String>; fn set_property_visible_child_name(&self, visible_child_name: Option<&str>); fn get_child_icon_name<T: IsA<Widget>>(&self, item: &T) -> Option<String>; fn set_child_icon_name<'a, P: Into<Option<&'a str>>, T: IsA<Widget>>(&self, item: &T, icon_name: P); fn get_child_name<T: IsA<Widget>>(&self, item: &T) -> Option<String>; fn set_child_name<'a, P: Into<Option<&'a str>>, T: IsA<Widget>>(&self, item: &T, name: P); fn get_child_needs_attention<T: IsA<Widget>>(&self, item: &T) -> bool; fn set_child_needs_attention<T: IsA<Widget>>(&self, item: &T, needs_attention: bool); fn get_child_position<T: IsA<Widget>>(&self, item: &T) -> i32; fn set_child_position<T: IsA<Widget>>(&self, item: &T, position: i32); fn get_child_title<T: IsA<Widget>>(&self, item: &T) -> Option<String>; fn set_child_title<'a, P: Into<Option<&'a str>>, T: IsA<Widget>>(&self, item: &T, title: P); #[cfg(any(feature = "v3_16", feature = "dox"))] fn connect_property_hhomogeneous_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_homogeneous_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_interpolate_size_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_transition_duration_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_transition_running_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_transition_type_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[cfg(any(feature = "v3_16", feature = "dox"))] fn connect_property_vhomogeneous_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_visible_child_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_visible_child_name_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<Stack> + IsA<Container> + IsA<glib::object::Object>> StackExt for O { #[cfg(any(feature = "v3_10", feature = "dox"))] fn add_named<P: IsA<Widget>>(&self, child: &P, name: &str) { unsafe { ffi::gtk_stack_add_named(self.to_glib_none().0, child.to_glib_none().0, name.to_glib_none().0); } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn add_titled<P: IsA<Widget>>(&self, child: &P, name: &str, title: &str) { unsafe { ffi::gtk_stack_add_titled(self.to_glib_none().0, child.to_glib_none().0, name.to_glib_none().0, title.to_glib_none().0); } } #[cfg(any(feature = "v3_12", feature = "dox"))] fn get_child_by_name(&self, name: &str) -> Option<Widget> { unsafe { from_glib_none(ffi::gtk_stack_get_child_by_name(self.to_glib_none().0, name.to_glib_none().0)) } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn get_hhomogeneous(&self) -> bool { unsafe { from_glib(ffi::gtk_stack_get_hhomogeneous(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_homogeneous(&self) -> bool { unsafe { from_glib(ffi::gtk_stack_get_homogeneous(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_18", feature = "dox"))] fn get_interpolate_size(&self) -> bool { unsafe { from_glib(ffi::gtk_stack_get_interpolate_size(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_transition_duration(&self) -> u32 { unsafe { ffi::gtk_stack_get_transition_duration(self.to_glib_none().0) } } #[cfg(any(feature = "v3_12", feature = "dox"))] fn get_transition_running(&self) -> bool { unsafe { from_glib(ffi::gtk_stack_get_transition_running(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_transition_type(&self) -> StackTransitionType { unsafe { from_glib(ffi::gtk_stack_get_transition_type(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn get_vhomogeneous(&self) -> bool { unsafe { from_glib(ffi::gtk_stack_get_vhomogeneous(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_visible_child(&self) -> Option<Widget> { unsafe { from_glib_none(ffi::gtk_stack_get_visible_child(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn get_visible_child_name(&self) -> Option<String> { unsafe { from_glib_none(ffi::gtk_stack_get_visible_child_name(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn set_hhomogeneous(&self, hhomogeneous: bool) { unsafe { ffi::gtk_stack_set_hhomogeneous(self.to_glib_none().0, hhomogeneous.to_glib()); } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_homogeneous(&self, homogeneous: bool) { unsafe { ffi::gtk_stack_set_homogeneous(self.to_glib_none().0, homogeneous.to_glib()); } } #[cfg(any(feature = "v3_18", feature = "dox"))] fn set_interpolate_size(&self, interpolate_size: bool) { unsafe { ffi::gtk_stack_set_interpolate_size(self.to_glib_none().0, interpolate_size.to_glib()); } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_transition_duration(&self, duration: u32) { unsafe { ffi::gtk_stack_set_transition_duration(self.to_glib_none().0, duration); } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_transition_type(&self, transition: StackTransitionType) { unsafe { ffi::gtk_stack_set_transition_type(self.to_glib_none().0, transition.to_glib()); } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn set_vhomogeneous(&self, vhomogeneous: bool) { unsafe { ffi::gtk_stack_set_vhomogeneous(self.to_glib_none().0, vhomogeneous.to_glib()); } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_visible_child<P: IsA<Widget>>(&self, child: &P) { unsafe { ffi::gtk_stack_set_visible_child(self.to_glib_none().0, child.to_glib_none().0); } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_visible_child_full(&self, name: &str, transition: StackTransitionType) { unsafe { ffi::gtk_stack_set_visible_child_full(self.to_glib_none().0, name.to_glib_none().0, transition.to_glib()); } } #[cfg(any(feature = "v3_10", feature = "dox"))] fn set_visible_child_name(&self, name: &str) { unsafe { ffi::gtk_stack_set_visible_child_name(self.to_glib_none().0, name.to_glib_none().0); } } fn get_property_homogeneous(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_ffi::g_object_get_property(self.to_glib_none().0, "homogeneous".to_glib_none().0, value.to_glib_none_mut().0); value.get().unwrap() } } fn set_property_homogeneous(&self, homogeneous: bool) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "homogeneous".to_glib_none().0, Value::from(&homogeneous).to_glib_none().0); } } fn get_property_interpolate_size(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_ffi::g_object_get_property(self.to_glib_none().0, "interpolate-size".to_glib_none().0, value.to_glib_none_mut().0); value.get().unwrap() } } fn set_property_interpolate_size(&self, interpolate_size: bool) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "interpolate-size".to_glib_none().0, Value::from(&interpolate_size).to_glib_none().0); } } fn get_property_transition_duration(&self) -> u32 { unsafe { let mut value = Value::from_type(<u32 as StaticType>::static_type()); gobject_ffi::g_object_get_property(self.to_glib_none().0, "transition-duration".to_glib_none().0, value.to_glib_none_mut().0); value.get().unwrap() } } fn set_property_transition_duration(&self, transition_duration: u32) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "transition-duration".to_glib_none().0, Value::from(&transition_duration).to_glib_none().0); } } fn get_property_transition_running(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_ffi::g_object_get_property(self.to_glib_none().0, "transition-running".to_glib_none().0, value.to_glib_none_mut().0); value.get().unwrap() } } fn get_property_transition_type(&self) -> StackTransitionType { unsafe { let mut value = Value::from_type(<StackTransitionType as StaticType>::static_type()); gobject_ffi::g_object_get_property(self.to_glib_none().0, "transition-type".to_glib_none().0, value.to_glib_none_mut().0); value.get().unwrap() } } fn set_property_transition_type(&self, transition_type: StackTransitionType) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "transition-type".to_glib_none().0, Value::from(&transition_type).to_glib_none().0); } } fn get_property_visible_child(&self) -> Option<Widget> { unsafe { let mut value = Value::from_type(<Widget as StaticType>::static_type()); gobject_ffi::g_object_get_property(self.to_glib_none().0, "visible-child".to_glib_none().0, value.to_glib_none_mut().0); value.get() } } fn set_property_visible_child<P: IsA<Widget> + IsA<glib::object::Object> + glib::value::SetValueOptional>(&self, visible_child: Option<&P>) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "visible-child".to_glib_none().0, Value::from(visible_child).to_glib_none().0); } } fn get_property_visible_child_name(&self) -> Option<String> { unsafe { let mut value = Value::from_type(<String as StaticType>::static_type()); gobject_ffi::g_object_get_property(self.to_glib_none().0, "visible-child-name".to_glib_none().0, value.to_glib_none_mut().0); value.get() } } fn set_property_visible_child_name(&self, visible_child_name: Option<&str>) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "visible-child-name".to_glib_none().0, Value::from(visible_child_name).to_glib_none().0); } } fn get_child_icon_name<T: IsA<Widget>>(&self, item: &T) -> Option<String> { unsafe { let mut value = Value::from_type(<String as StaticType>::static_type()); ffi::gtk_container_child_get_property(self.to_glib_none().0, item.to_glib_none().0, "icon-name".to_glib_none().0, value.to_glib_none_mut().0); value.get() } } fn set_child_icon_name<'a, P: Into<Option<&'a str>>, T: IsA<Widget>>(&self, item: &T, icon_name: P) { let icon_name = icon_name.into(); unsafe { ffi::gtk_container_child_set_property(self.to_glib_none().0, item.to_glib_none().0, "icon-name".to_glib_none().0, Value::from(icon_name).to_glib_none().0); } } fn get_child_name<T: IsA<Widget>>(&self, item: &T) -> Option<String> { unsafe { let mut value = Value::from_type(<String as StaticType>::static_type()); ffi::gtk_container_child_get_property(self.to_glib_none().0, item.to_glib_none().0, "name".to_glib_none().0, value.to_glib_none_mut().0); value.get() } } fn set_child_name<'a, P: Into<Option<&'a str>>, T: IsA<Widget>>(&self, item: &T, name: P) { let name = name.into(); unsafe { ffi::gtk_container_child_set_property(self.to_glib_none().0, item.to_glib_none().0, "name".to_glib_none().0, Value::from(name).to_glib_none().0); } } fn get_child_needs_attention<T: IsA<Widget>>(&self, item: &T) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); ffi::gtk_container_child_get_property(self.to_glib_none().0, item.to_glib_none().0, "needs-attention".to_glib_none().0, value.to_glib_none_mut().0); value.get().unwrap() } } fn set_child_needs_attention<T: IsA<Widget>>(&self, item: &T, needs_attention: bool) { unsafe { ffi::gtk_container_child_set_property(self.to_glib_none().0, item.to_glib_none().0, "needs-attention".to_glib_none().0, Value::from(&needs_attention).to_glib_none().0); } } fn get_child_position<T: IsA<Widget>>(&self, item: &T) -> i32 { unsafe { let mut value = Value::from_type(<i32 as StaticType>::static_type()); ffi::gtk_container_child_get_property(self.to_glib_none().0, item.to_glib_none().0, "position".to_glib_none().0, value.to_glib_none_mut().0); value.get().unwrap() } } fn set_child_position<T: IsA<Widget>>(&self, item: &T, position: i32) { unsafe { ffi::gtk_container_child_set_property(self.to_glib_none().0, item.to_glib_none().0, "position".to_glib_none().0, Value::from(&position).to_glib_none().0); } } fn get_child_title<T: IsA<Widget>>(&self, item: &T) -> Option<String> { unsafe { let mut value = Value::from_type(<String as StaticType>::static_type()); ffi::gtk_container_child_get_property(self.to_glib_none().0, item.to_glib_none().0, "title".to_glib_none().0, value.to_glib_none_mut().0); value.get() } } fn set_child_title<'a, P: Into<Option<&'a str>>, T: IsA<Widget>>(&self, item: &T, title: P) { let title = title.into(); unsafe { ffi::gtk_container_child_set_property(self.to_glib_none().0, item.to_glib_none().0, "title".to_glib_none().0, Value::from(title).to_glib_none().0); } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn connect_property_hhomogeneous_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::hhomogeneous", transmute(notify_hhomogeneous_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } fn connect_property_homogeneous_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::homogeneous", transmute(notify_homogeneous_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } fn connect_property_interpolate_size_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::interpolate-size", transmute(notify_interpolate_size_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } fn connect_property_transition_duration_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::transition-duration", transmute(notify_transition_duration_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } fn connect_property_transition_running_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::transition-running", transmute(notify_transition_running_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } fn connect_property_transition_type_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::transition-type", transmute(notify_transition_type_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn connect_property_vhomogeneous_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::vhomogeneous", transmute(notify_vhomogeneous_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } fn connect_property_visible_child_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::visible-child", transmute(notify_visible_child_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } fn connect_property_visible_child_name_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe { let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f)); connect(self.to_glib_none().0, "notify::visible-child-name", transmute(notify_visible_child_name_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _) } } } #[cfg(any(feature = "v3_16", feature = "dox"))] unsafe extern "C" fn notify_hhomogeneous_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } unsafe extern "C" fn notify_homogeneous_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } unsafe extern "C" fn notify_interpolate_size_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } unsafe extern "C" fn notify_transition_duration_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } unsafe extern "C" fn notify_transition_running_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } unsafe extern "C" fn notify_transition_type_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } #[cfg(any(feature = "v3_16", feature = "dox"))] unsafe extern "C" fn notify_vhomogeneous_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } unsafe extern "C" fn notify_visible_child_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) } unsafe extern "C" fn notify_visible_child_name_trampoline<P>(this: *mut ffi::GtkStack, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer) where P: IsA<Stack> { let f: &&(Fn(&P) + 'static) = transmute(f); f(&Stack::from_glib_borrow(this).downcast_unchecked()) }<|fim▁end|>
use glib; use glib::StaticType;
<|file_name|>asdf.rs<|end_file_name|><|fim▁begin|>// compiler switches #![allow( unused_imports, unused_variables, )] // libraries extern crate matasano; // imports use std::collections::BTreeMap; // local imports use matasano::bytes::*; <|fim▁hole|> let blocks = blockify( &bytes, 5); for block in blocks { println!( "block: {:?}", block);} } fn blockify( bytes: &[u8], size: usize) -> Vec<Bytes> { let mut blocks = vec![ Bytes::new(); size]; for (&byte, i) in bytes.iter().zip( (0..size).cycle()) { blocks[i].push( byte);} println!( "blocks size: {}", blocks.len()); println!( "blocks[0] size: {}", blocks[0].len()); return blocks;}<|fim▁end|>
fn main(){ let bytes : Bytes = (0..80).collect();
<|file_name|>Sentence_Terminal-code-points.js<|end_file_name|><|fim▁begin|>// All code points with the `Sentence_Terminal` property as per Unicode v9.0.0: [ 0x21, 0x2E, 0x3F, 0x589, 0x61F, 0x6D4, 0x700, 0x701, 0x702, 0x7F9, 0x964, 0x965, 0x104A, 0x104B, 0x1362, 0x1367, 0x1368, 0x166E, 0x1735, 0x1736, 0x1803, 0x1809, 0x1944, 0x1945, 0x1AA8, 0x1AA9, 0x1AAA, 0x1AAB, 0x1B5A, 0x1B5B, 0x1B5E, 0x1B5F, 0x1C3B, 0x1C3C, 0x1C7E, 0x1C7F, 0x203C, 0x203D, 0x2047, 0x2048, 0x2049, 0x2E2E, 0x2E3C,<|fim▁hole|> 0xA4FF, 0xA60E, 0xA60F, 0xA6F3, 0xA6F7, 0xA876, 0xA877, 0xA8CE, 0xA8CF, 0xA92F, 0xA9C8, 0xA9C9, 0xAA5D, 0xAA5E, 0xAA5F, 0xAAF0, 0xAAF1, 0xABEB, 0xFE52, 0xFE56, 0xFE57, 0xFF01, 0xFF0E, 0xFF1F, 0xFF61, 0x10A56, 0x10A57, 0x11047, 0x11048, 0x110BE, 0x110BF, 0x110C0, 0x110C1, 0x11141, 0x11142, 0x11143, 0x111C5, 0x111C6, 0x111CD, 0x111DE, 0x111DF, 0x11238, 0x11239, 0x1123B, 0x1123C, 0x112A9, 0x1144B, 0x1144C, 0x115C2, 0x115C3, 0x115C9, 0x115CA, 0x115CB, 0x115CC, 0x115CD, 0x115CE, 0x115CF, 0x115D0, 0x115D1, 0x115D2, 0x115D3, 0x115D4, 0x115D5, 0x115D6, 0x115D7, 0x11641, 0x11642, 0x1173C, 0x1173D, 0x1173E, 0x11C41, 0x11C42, 0x16A6E, 0x16A6F, 0x16AF5, 0x16B37, 0x16B38, 0x16B44, 0x1BC9F, 0x1DA88 ];<|fim▁end|>
0x3002,
<|file_name|>client.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union from google.api_core import client_options as client_options_lib from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.ads.googleads.v9.resources.types import account_budget_proposal from google.ads.googleads.v9.services.types import ( account_budget_proposal_service, ) from .transports.base import ( AccountBudgetProposalServiceTransport, DEFAULT_CLIENT_INFO, ) from .transports.grpc import AccountBudgetProposalServiceGrpcTransport class AccountBudgetProposalServiceClientMeta(type): """Metaclass for the AccountBudgetProposalService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[AccountBudgetProposalServiceTransport]] _transport_registry["grpc"] = AccountBudgetProposalServiceGrpcTransport def get_transport_class( cls, label: str = None, ) -> Type[AccountBudgetProposalServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class AccountBudgetProposalServiceClient( metaclass=AccountBudgetProposalServiceClientMeta ): """A service for managing account-level budgets via proposals. A proposal is a request to create a new budget or make changes to an existing one. Reads for account-level budgets managed by these proposals will be supported in a future version. Until then, please use the BudgetOrderService from the AdWords API. Learn more at https://developers.google.com/adwords/api/docs/guides/budget- order Mutates: The CREATE operation creates a new proposal. UPDATE operations aren't supported. The REMOVE operation cancels a pending proposal. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "googleads.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT )<|fim▁hole|> @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AccountBudgetProposalServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info( info ) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AccountBudgetProposalServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> AccountBudgetProposalServiceTransport: """Return the transport used by the client instance. Returns: AccountBudgetProposalServiceTransport: The transport used by the client instance. """ return self._transport def __enter__(self): return self def __exit__(self, type, value, traceback): """Releases underlying transport's resources. .. warning:: ONLY use as a context manager if the transport is NOT shared with other clients! Exiting the with block will CLOSE the transport and may cause errors in other clients! """ self.transport.close() @staticmethod def account_budget_path(customer_id: str, account_budget_id: str,) -> str: """Return a fully-qualified account_budget string.""" return "customers/{customer_id}/accountBudgets/{account_budget_id}".format( customer_id=customer_id, account_budget_id=account_budget_id, ) @staticmethod def parse_account_budget_path(path: str) -> Dict[str, str]: """Parse a account_budget path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/accountBudgets/(?P<account_budget_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def account_budget_proposal_path( customer_id: str, account_budget_proposal_id: str, ) -> str: """Return a fully-qualified account_budget_proposal string.""" return "customers/{customer_id}/accountBudgetProposals/{account_budget_proposal_id}".format( customer_id=customer_id, account_budget_proposal_id=account_budget_proposal_id, ) @staticmethod def parse_account_budget_proposal_path(path: str) -> Dict[str, str]: """Parse a account_budget_proposal path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/accountBudgetProposals/(?P<account_budget_proposal_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def billing_setup_path(customer_id: str, billing_setup_id: str,) -> str: """Return a fully-qualified billing_setup string.""" return "customers/{customer_id}/billingSetups/{billing_setup_id}".format( customer_id=customer_id, billing_setup_id=billing_setup_id, ) @staticmethod def parse_billing_setup_path(path: str) -> Dict[str, str]: """Parse a billing_setup path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/billingSetups/(?P<billing_setup_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Return a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Return a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Return a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Return a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path ) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[ str, AccountBudgetProposalServiceTransport, None ] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the account budget proposal service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.AccountBudgetProposalServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( "true", "false", ): raise ValueError( "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" ) use_client_cert = ( os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" ) ssl_credentials = None is_mtls = False if use_client_cert: if client_options.client_cert_source: import grpc # type: ignore cert, key = client_options.client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) is_mtls = True else: creds = SslCredentials() is_mtls = creds.is_mtls ssl_credentials = creds.ssl_credentials if is_mtls else None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, AccountBudgetProposalServiceTransport): # transport is a AccountBudgetProposalServiceTransport instance. if credentials: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) self._transport = transport elif isinstance(transport, str): Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, host=self.DEFAULT_ENDPOINT ) else: self._transport = AccountBudgetProposalServiceGrpcTransport( credentials=credentials, host=api_endpoint, ssl_channel_credentials=ssl_credentials, client_info=client_info, ) def get_account_budget_proposal( self, request: Union[ account_budget_proposal_service.GetAccountBudgetProposalRequest, dict, ] = None, *, resource_name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> account_budget_proposal.AccountBudgetProposal: r"""Returns an account-level budget proposal in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Args: request (Union[google.ads.googleads.v9.services.types.GetAccountBudgetProposalRequest, dict]): The request object. Request message for [AccountBudgetProposalService.GetAccountBudgetProposal][google.ads.googleads.v9.services.AccountBudgetProposalService.GetAccountBudgetProposal]. resource_name (:class:`str`): Required. The resource name of the account-level budget proposal to fetch. This corresponds to the ``resource_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v9.resources.types.AccountBudgetProposal: An account-level budget proposal. All fields prefixed with 'proposed' may not necessarily be applied directly. For example, proposed spending limits may be adjusted before their application. This is true if the 'proposed' field has an 'approved' counterpart, e.g. spending limits. Please note that the proposal type (proposal_type) changes which fields are required and which must remain empty. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([resource_name]): raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a account_budget_proposal_service.GetAccountBudgetProposalRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, account_budget_proposal_service.GetAccountBudgetProposalRequest, ): request = account_budget_proposal_service.GetAccountBudgetProposalRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if resource_name is not None: request.resource_name = resource_name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.get_account_budget_proposal ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("resource_name", request.resource_name),) ), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def mutate_account_budget_proposal( self, request: Union[ account_budget_proposal_service.MutateAccountBudgetProposalRequest, dict, ] = None, *, customer_id: str = None, operation: account_budget_proposal_service.AccountBudgetProposalOperation = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> account_budget_proposal_service.MutateAccountBudgetProposalResponse: r"""Creates, updates, or removes account budget proposals. Operation statuses are returned. List of thrown errors: `AccountBudgetProposalError <>`__ `AuthenticationError <>`__ `AuthorizationError <>`__ `DatabaseError <>`__ `DateError <>`__ `FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__ `InternalError <>`__ `MutateError <>`__ `QuotaError <>`__ `RequestError <>`__ `StringLengthError <>`__ Args: request (Union[google.ads.googleads.v9.services.types.MutateAccountBudgetProposalRequest, dict]): The request object. Request message for [AccountBudgetProposalService.MutateAccountBudgetProposal][google.ads.googleads.v9.services.AccountBudgetProposalService.MutateAccountBudgetProposal]. customer_id (:class:`str`): Required. The ID of the customer. This corresponds to the ``customer_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. operation (:class:`google.ads.googleads.v9.services.types.AccountBudgetProposalOperation`): Required. The operation to perform on an individual account-level budget proposal. This corresponds to the ``operation`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v9.services.types.MutateAccountBudgetProposalResponse: Response message for account-level budget mutate operations. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([customer_id, operation]): raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a account_budget_proposal_service.MutateAccountBudgetProposalRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, account_budget_proposal_service.MutateAccountBudgetProposalRequest, ): request = account_budget_proposal_service.MutateAccountBudgetProposalRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if customer_id is not None: request.customer_id = customer_id if operation is not None: request.operation = operation # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.mutate_account_budget_proposal ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("customer_id", request.customer_id),) ), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response __all__ = ("AccountBudgetProposalServiceClient",)<|fim▁end|>
<|file_name|>hub_api.rs<|end_file_name|><|fim▁begin|>// This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. //! Implementation of the Philips Hue API //! //! This module is used in various places, for example in the Hub //! objects and in the Light objects. use serde_json; use std; use std::collections::BTreeMap; use std::error::Error; use super::http; use super::structs; #[derive(Debug, Clone)] pub struct HubApi { pub id: String, pub ip: String, pub token: String, } impl std::fmt::Display for HubApi { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "Hue Bridge id:{} at {:?}", self.id, self.ip) } } impl HubApi { pub fn new(id: &str, ip: &str, token: &str) -> HubApi { HubApi { id: id.to_owned(), ip: ip.to_owned(), token: token.to_owned(), } } pub fn update_token(&mut self, token: &str) { self.token = token.to_owned(); } pub fn get(&self, cmd: &str) -> Result<String, Box<Error>> { let url = format!("http://{}/api/{}/{}", self.ip, self.token, cmd); debug!("GET request to Philips Hue bridge {}: {}", self.id, url); let content = http::get(&url); trace!("Philips Hue API response: {:?}", content); content } #[allow(dead_code)] pub fn post(&self, cmd: &str, data: &str) -> Result<String, Box<Error>> { let url = format!("http://{}/api/{}/{}", self.ip, self.token, cmd); debug!("POST request to Philips Hue bridge {}: {} data: {}", self.id, url, data); let content = http::post(&url, data); trace!("Philips Hue API response: {:?}", content); content } pub fn post_unauth(&self, cmd: &str, data: &str) -> Result<String, Box<Error>> { let url = format!("http://{}/{}", self.ip, cmd); debug!("POST request to Philips Hue bridge {}: {} data: {}", self.id, url, data); let content = http::post(&url, data); trace!("Philips Hue API response: {:?}", content); content } pub fn put(&self, cmd: &str, data: &str) -> Result<String, Box<Error>> { let url = format!("http://{}/api/{}/{}", self.ip, self.token, cmd); debug!("PUT request to Philips Hue bridge {}: {} data: {}", self.id, url, data); let content = http::put(&url, data); trace!("Philips Hue API response: {:?}", content); content } pub fn is_available(&self) -> bool { let url = format!("http://{}/", self.ip); let content = http::get(&url); match content { Ok(value) => value.contains("hue personal wireless lighting"), Err(_) => false, } } pub fn get_settings(&self) -> String { // [{"error":{"type":1,"address":"/","description":"unauthorized user"}}] self.get("").unwrap_or("".to_owned()) // TODO no unwrap } pub fn is_paired(&self) -> bool { let settings = self.get_settings(); !settings.contains("unauthorized user") } pub fn try_pairing(&self) -> Result<Option<String>, ()> { #[derive(Deserialize, Debug)] struct PairingResponse { success: Option<SuccessResponse>, error: Option<ErrorResponse>, } #[derive(Deserialize, Debug)] struct SuccessResponse { username: String, } #[derive(Deserialize, Debug)] struct ErrorResponse { #[serde(rename="type")] error_type: u32, address: String, description: String, } let url = "api"; let req = json!({ devicetype: "foxbox_hub"}); let response = self.post_unauth(&url, &req).unwrap_or("[]".to_owned()); let mut response: Vec<PairingResponse> = structs::parse_json(&response) .unwrap_or(Vec::new()); if response.len() != 1 { error!("Pairing request to Philips Hue bridge {} yielded unexpected response", self.id); return Err(()); } let response = match response.pop() { Some(response) => response, None => return Err(()), }; if let Some(success) = response.success { Ok(Some(success.username)) } else { if let Some(error) = response.error { if error.description.contains("link button not pressed") { debug!("Push pairing button on Philips Hue bridge {}", self.id); Ok(None) } else { error!("Error while pairing with Philips Hue bridge {}: {}", self.id, error.description); Err(()) } } else { error!("Pairing request to Philips Hue bridge {} \ yielded unexpected response", self.id); Err(()) } } } pub fn get_lights(&self) -> Vec<String> { let mut lights: Vec<String> = Vec::new(); let url = "lights"; let res = self.get(url).unwrap(); // TODO: remove unwrap let json: BTreeMap<String, structs::SettingsLightEntry> = structs::parse_json(&res) .unwrap(); // TODO: no unwrap for key in json.keys() { lights.push(key.to_owned()); } lights } pub fn get_light_status(&self, id: &str) -> structs::SettingsLightEntry { let url = format!("lights/{}", id); let res = self.get(&url).unwrap(); // TODO: remove unwrap<|fim▁hole|> pub fn set_light_power(&self, light_id: &str, on: bool) { let url = format!("lights/{}/state", light_id); let cmd = json!({ on: on }); let _ = self.put(&url, &cmd); } pub fn set_light_color(&self, light_id: &str, hsv: (u32, u32, u32)) { let (hue, sat, val) = hsv; let url = format!("lights/{}/state", light_id); let cmd = json!({ hue: hue, sat: sat, bri: val }); let _ = self.put(&url, &cmd); } pub fn set_light_brightness(&self, light_id: &str, bri: u32) { let url = format!("lights/{}/state", light_id); let cmd = json!({ bri: bri }); let _ = self.put(&url, &cmd); } }<|fim▁end|>
structs::parse_json(&res).unwrap() // TODO no unwrap }
<|file_name|>region-object-lifetime-5.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Various tests related to testing how region inference works // with respect to the object receivers. trait Foo { fn borrowed<'a>(&'a self) -> &'a ();<|fim▁hole|>} // Here, the object is bounded by an anonymous lifetime and returned // as `&'static`, so you get an error. fn owned_receiver(x: Box<Foo>) -> &'static () { x.borrowed() //~ ERROR `*x` does not live long enough } fn main() {}<|fim▁end|>
<|file_name|>bcp.py<|end_file_name|><|fim▁begin|>import utils import os import shutil import sys def go( boost_root ): OUTPUT = "src/third_party/boost" if os.path.exists( OUTPUT ): shutil.rmtree( OUTPUT ) cmd = [ "bcp" , "--scan" , "--boost=%s" % boost_root ] src = utils.getAllSourceFiles() cmd += src cmd.append( OUTPUT ) if not os.path.exists( OUTPUT ): os.makedirs( OUTPUT ) <|fim▁hole|> res = utils.execsys( cmd ) out = open( OUTPUT + "/bcp-out.txt" , 'w' ) out.write( res[0] ) out.close() out = open( OUTPUT + "/notes.txt" , 'w' ) out.write( "command: " + " ".join( cmd ) ) out.close() print( res[1] ) if __name__ == "__main__": if len(sys.argv) == 1: print( "usage: python %s <boost root directory>" % sys.argv[0] ) sys.exit(1) go( sys.argv[1] )<|fim▁end|>
<|file_name|>test_keyboard_interrupt.py<|end_file_name|><|fim▁begin|># Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test management of KeyboardInterrupt in stratisd. """<|fim▁hole|>import stratis_cli from .._misc import SimTestCase class KeyboardInterruptTestCase(SimTestCase): """ Test behavior of stratis on KeyboardInterrupt. """ def test_catch_keyboard_exception(self): """ Verify that the KeyboardInterrupt is propagated by the run() method. ./bin/stratis contains a try block at the outermost level which then catches the KeyboardInterrupt and exits with an error message. The KeyboardInterrupt is most likely raised in the dbus-python method which is actually communicating on the D-Bus, but it is fairly difficult to get at that method. Instead settle for getting at the calling method generated by dbus-python-client-gen. """ def raise_keyboard_interrupt(_): """ Just raise the interrupt. """ raise KeyboardInterrupt() # pylint: disable=import-outside-toplevel # isort: LOCAL from stratis_cli._actions import _data # pylint: disable=protected-access stratis_cli._actions._data.Manager.Properties.Version.Get = ( raise_keyboard_interrupt ) with self.assertRaises(KeyboardInterrupt): stratis_cli.run()(["daemon", "version"])<|fim▁end|>
# isort: LOCAL
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ use super::module::IRModule; use super::span::*; use crate::runtime::function::Result; use crate::runtime::object::{Object, ObjectPtr}; use crate::runtime::{ array::Array, function::{self, Function, ToFunction}, string::String as TString, }; /// The diagnostic interface to TVM, used for reporting and rendering /// diagnostic information by the compiler. This module exposes /// three key abstractions: a Diagnostic, the DiagnosticContext, /// and the DiagnosticRenderer. use tvm_macros::{external, Object}; pub mod codespan; external! { #[name("runtime.ArrayGetItem")] fn get_renderer() -> DiagnosticRenderer; #[name("diagnostics.DiagnosticRenderer")] fn diagnostic_renderer(func: Function) -> DiagnosticRenderer; #[name("diagnostics.Emit")] fn emit(ctx: DiagnosticContext, diagnostic: Diagnostic) -> (); #[name("diagnostics.DiagnosticContextDefault")] fn diagnostic_context_default(module: IRModule) -> DiagnosticContext; #[name("diagnostics.DiagnosticContextRender")] fn diagnostic_context_render(ctx: DiagnosticContext) -> (); #[name("diagnostics.DiagnosticRendererRender")] fn diagnositc_renderer_render(renderer: DiagnosticRenderer, ctx: DiagnosticContext) -> (); #[name("diagnostics.ClearRenderer")] fn clear_renderer() -> (); } /// The diagnostic level, controls the printing of the message. #[repr(C)] #[derive(PartialEq, Eq, Debug)] pub enum DiagnosticLevel { Bug = 10, Error = 20, Warning = 30, Note = 40, Help = 50, } /// A compiler diagnostic. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "Diagnostic"] #[type_key = "Diagnostic"] pub struct DiagnosticNode { pub base: Object, /// The level. pub level: DiagnosticLevel, /// The span at which to report an error. pub span: Span, /// The diagnostic message. pub message: TString, } impl Diagnostic { pub fn new(level: DiagnosticLevel, span: Span, message: TString) -> Diagnostic { let node = DiagnosticNode { base: Object::base::<DiagnosticNode>(), level, span, message, }; ObjectPtr::new(node).into() } pub fn bug(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Bug, span) } pub fn error(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Error, span) } pub fn warning(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Warning, span) } pub fn note(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Note, span) } pub fn help(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Help, span) } } /// A wrapper around std::stringstream to build a diagnostic. pub struct DiagnosticBuilder { /// The level. pub level: DiagnosticLevel, /// The span of the diagnostic. pub span: Span, /// The in progress message. pub message: String, } impl DiagnosticBuilder { pub fn new(level: DiagnosticLevel, span: Span) -> DiagnosticBuilder { DiagnosticBuilder { level, span, message: "".into(), } } } /// Display diagnostics in a given display format.<|fim▁hole|>/// /// A diagnostic renderer is responsible for converting the /// raw diagnostics into consumable output. /// /// For example the terminal renderer will render a sequence /// of compiler diagnostics to std::out and std::err in /// a human readable form. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticRenderer"] #[type_key = "DiagnosticRenderer"] /// A diagnostic renderer, which given a diagnostic context produces a "rendered" /// form of the diagnostics for either human or computer consumption. pub struct DiagnosticRendererNode { /// The base type. pub base: Object, // TODO(@jroesch): we can't easily exposed packed functions due to // memory layout // missing field here } impl DiagnosticRenderer { /// Render the provided context. pub fn render(&self, ctx: DiagnosticContext) -> Result<()> { diagnositc_renderer_render(self.clone(), ctx) } } #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticContext"] #[type_key = "DiagnosticContext"] /// A diagnostic context for recording errors against a source file. pub struct DiagnosticContextNode { // The base type. pub base: Object, /// The Module to report against. pub module: IRModule, /// The set of diagnostics to report. pub diagnostics: Array<Diagnostic>, /// The renderer set for the context. pub renderer: DiagnosticRenderer, } /// A diagnostic context which records active errors /// and contains a renderer. impl DiagnosticContext { pub fn new<F>(module: IRModule, render_func: F) -> DiagnosticContext where F: Fn(DiagnosticContext) -> () + 'static, { let renderer = diagnostic_renderer(render_func.to_function()).unwrap(); let node = DiagnosticContextNode { base: Object::base::<DiagnosticContextNode>(), module, diagnostics: Array::from_vec(vec![]).unwrap(), renderer, }; DiagnosticContext(Some(ObjectPtr::new(node))) } pub fn default(module: IRModule) -> DiagnosticContext { diagnostic_context_default(module).unwrap() } /// Emit a diagnostic. pub fn emit(&mut self, diagnostic: Diagnostic) -> Result<()> { emit(self.clone(), diagnostic) } /// Render the errors and raise a DiagnosticError exception. pub fn render(&mut self) -> Result<()> { diagnostic_context_render(self.clone()) } /// Emit a diagnostic and then immediately attempt to render all errors. pub fn emit_fatal(&mut self, diagnostic: Diagnostic) -> Result<()> { self.emit(diagnostic)?; self.render()?; Ok(()) } } /// Override the global diagnostics renderer. // render_func: Option[Callable[[DiagnosticContext], None]] // If the render_func is None it will remove the current custom renderer // and return to default behavior. fn override_renderer<F>(opt_func: Option<F>) -> Result<()> where F: Fn(DiagnosticContext) -> () + 'static, { match opt_func { None => clear_renderer(), Some(func) => { let func = func.to_function(); let render_factory = move || diagnostic_renderer(func.clone()).unwrap(); function::register_override(render_factory, "diagnostics.OverrideRenderer", true)?; Ok(()) } } }<|fim▁end|>
<|file_name|>index.rtr.js<|end_file_name|><|fim▁begin|>(function (angular) { angular.module("rmApp", ["ui.router", "rmApp.templates", "rmApp.controllers"]) .config(["$stateProvider", "$urlRouterProvider", "$locationProvider", function ($stateProvider, $urlRouterProvider, $locationProvider) { $urlRouterProvider.otherwise("/"); $locationProvider.html5Mode(false); // $stateProvider .state("index", { url: "/", templateUrl: "index.tpl.html", controller: "rmCtrl" }) .state("home", { url: "/home", templateUrl: "home.tpl.html", controller: "rmCtrl" }).state("about", { url: "/about", templateUrl: "about.tpl.html", controller: "rmCtrl" }).state("contact", { url: "/contact", templateUrl: "contact.tpl.html", controller: "rmCtrl" }).state("spinner1", { url: "/spinner1", templateUrl: "spinner1.tpl.html",<|fim▁hole|> url: "/spinner2", templateUrl: "spinner2.tpl.html", controller: "rmCtrl" }); }]); })(angular);<|fim▁end|>
controller: "rmCtrl" }).state("spinner2", {
<|file_name|>mock-server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # Copyright 2015, 2016 Endless Mobile, Inc. # This file is part of eos-event-recorder-daemon. # # eos-event-recorder-daemon is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or (at your # option) any later version. # # eos-event-recorder-daemon is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License # along with eos-event-recorder-daemon. If not, see # <http://www.gnu.org/licenses/>. import gzip import http.server import sys class PrintingHTTPRequestHandler(http.server.BaseHTTPRequestHandler): def do_PUT(self): print(self.path, flush=True) content_encoding = self.headers['X-Endless-Content-Encoding'] print(content_encoding, flush=True) content_length = int(self.headers['Content-Length']) compressed_request_body = self.rfile.read(content_length) decompressed_request_body = gzip.decompress(compressed_request_body) print(len(decompressed_request_body), flush=True) sys.stdout.buffer.write(decompressed_request_body) sys.stdout.buffer.flush() status_code_str = sys.stdin.readline() status_code = int(status_code_str) self.send_response(status_code) self.end_headers() <|fim▁hole|># A metrics server that simply prints the requests it receives to stdout class MockServer(http.server.HTTPServer): def __init__(self): SERVER_ADDRESS = ('localhost', 0) super().__init__(SERVER_ADDRESS, PrintingHTTPRequestHandler) if __name__ == '__main__': mock_server = MockServer() print(mock_server.server_port, flush=True) mock_server.serve_forever()<|fim▁end|>
<|file_name|>RonnyView.java<|end_file_name|><|fim▁begin|>package de.baleipzig.iris.ui.playground; import com.vaadin.navigator.View; import com.vaadin.navigator.ViewChangeListener; import com.vaadin.spring.annotation.SpringView; import com.vaadin.spring.annotation.UIScope; import com.vaadin.ui.Label; import com.vaadin.ui.VerticalLayout;<|fim▁hole|> @UIScope @SpringView(name = RonnyView.VIEW_NAME) public class RonnyView extends VerticalLayout implements View { public static final String VIEW_NAME = "ronny"; @PostConstruct void init() { this.addComponent(new Label("Ronnys Spielwiese")); } @Override public void enter(ViewChangeListener.ViewChangeEvent event) { } }<|fim▁end|>
import javax.annotation.PostConstruct;
<|file_name|>constants.rs<|end_file_name|><|fim▁begin|>//! Defines some usefull constants /// Properties names constant for HTTP headers <|fim▁hole|> pub const ACCEPT: &'static str = "Accept"; pub const DATE: &'static str = "Date"; pub const LOCATION: &'static str = "Location"; } /// Mime types constants pub mod mimetypes { pub const TEXT_PLAIN: &'static str = "text/plain"; pub const APP_JSON: &'static str = "application/json"; pub const APP_XML: &'static str = "application/xml"; pub const APP_OCTET_STREAM: &'static str = "application/octet-stream"; }<|fim▁end|>
pub mod properties { pub const CONTENT_LENGTH: &'static str = "Content-Length"; pub const CONTENT_TYPE: &'static str = "Content-Type";
<|file_name|>RunHill.py<|end_file_name|><|fim▁begin|>""" AUTHOR: Peter Collins, 2005. This software is Copyright (C) 2004-2008 Bristol University and is released under the GNU General Public License version 2. MODULE: RunHill PURPOSE: A sample setup and configuration for the normalization algorithms. NOTES: See RunConfig.py for configuration options """ import sys import RunConfig degree = 6 if len(sys.argv)>1: degree = int(sys.argv[1]) # pull things into the global context for profile # from RunConfig import run_nf # degree 6 runs in about 2m, 8 in 20m, 10 in 2h config = { "tolerance" : 5.0e-14 , "degree" : degree , "system" : "Hill" , "do_stream" : False , "compute_diagonalisation" : True , "run_normal_form_python" : False , "run_normal_form_cpp" : True } RunConfig.NfConfig(config).run_examp() # Now do a python run if degree is < 7 config["compute_diagonalisation"] = False<|fim▁hole|>config["run_normal_form_cpp"] = False if degree < 7: RunConfig.NfConfig(config).run_examp()<|fim▁end|>
config["run_normal_form_python"] = True
<|file_name|>cre.locate_sample.py<|end_file_name|><|fim▁begin|>#!/hpf/largeprojects/ccmbio/naumenko/tools/bcbio/anaconda/bin/python """ Looks for a specific sample """ import re import sys import os import os.path sample = sys.argv[1] family,sample_only = sample.split("_") match = re.match('\d*',family) if match: prefix=str(int(match.group(0))/100)<|fim▁hole|> report=0 bam=0 errors = [] if os.path.isfile(report_path+'/'+family+'.csv'): #print("Report exists") report=1 else: errors.append('Error: no report') if os.path.isfile(report_path+'/'+sample+'.bam'): #print("Bam exists") bam=1 else: errors.append(' ERROR: no bam') if (bam==1 and report==1): print(sample+'\t'+os.getcwd()+"/"+report_path+"\t"+os.getcwd()+"/"+report_path+'/'+sample+'.bam') else: print(sample+'\t'+' '.join(errors)) else: print("Family ID is not starting with digital")<|fim▁end|>
report_path = prefix+'x/'+family
<|file_name|>credential-types.strings.js<|end_file_name|><|fim▁begin|><|fim▁hole|> let t = this.t; let ns = this.credential_types; ns.deleteCredentialType = { CREDENTIAL_TYPE_IN_USE: t.s('This credential type is currently being used by one or more credentials. Credentials that use this credential type must be deleted before the credential type can be deleted.') }; } CredentialTypesStrings.$inject = ['BaseStringService']; export default CredentialTypesStrings;<|fim▁end|>
function CredentialTypesStrings (BaseString) { BaseString.call(this, 'credential_types');
<|file_name|>bookmarkletSource.js<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2005-2013 University of Sydney * * Licensed under the GNU License, Version 3.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.gnu.org/licenses/gpl-3.0.txt * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /** * brief description of file * * @author Tom Murtagh * @author Kim Jackson * @author Ian Johnson <[email protected]> * @author Stephen White <[email protected]> * @author Artem Osmakov <[email protected]> * @copyright (C) 2005-2013 University of Sydney * @link http://Sydney.edu.au/Heurist * @version 3.1.0 * @license http://www.gnu.org/licenses/gpl-3.0.txt GNU License 3.0 * @package Heurist academic knowledge management system<|fim▁hole|>javascript:(function(){h='http://heuristscholar.org/h3/';d=document;c=d.contentType;if(c=='text/html'||!c){if(d.getElementById('__heurist_bookmarklet_div'))return Heurist.init();s=d.createElement('script');s.type='text/javascript';s.src=(h+'import/bookmarklet/bookmarkletPopup.php?'+new Date().getTime()).slice(0,-8);d.getElementsByTagName('head')[0].appendChild(s);}else{e=encodeURIComponent;w=open(h+'records/add/addRecordPopup.php?t='+e(d.title)+'&u='+e(location.href));window.setTimeout('w.focus()',200);}})();<|fim▁end|>
* @subpackage !!!subpackagename for file such as Administration, Search, Edit, Application, Library */
<|file_name|>TestYamlConfigReader.java<|end_file_name|><|fim▁begin|>/* * (C) Copyright 2017 Netcentric AG. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package biz.netcentric.cq.tools.actool.configreader; import java.util.Map; import biz.netcentric.cq.tools.actool.configmodel.AceBean; import biz.netcentric.cq.tools.actool.configmodel.AuthorizableConfigBean; import biz.netcentric.cq.tools.actool.validators.exceptions.AcConfigBeanValidationException; /** Subclass of YamlConfigReader only used for unit tests. Overrides bean setup-methods from YamlConfigReader to set up TestAceBean and * TestAuthorizableConfigBean in order to set the assertedExceptionString set in test yaml files for later evaluation in unit tests. Also * overrides getNewAceBean() and getNewAuthorizableConfigBean() to return the correct testing type in order to make the downcast in * overridden setup-methods possible. * * @author jochenkoschorke */ public class TestYamlConfigReader extends YamlConfigReader { protected final String ASSERTED_EXCEPTION = "assertedException"; @Override protected void setupAceBean(final String principal, final Map<String, ?> currentAceDefinition, final AceBean tmpAclBean, String sourceFile) { super.setupAceBean(principal, currentAceDefinition, tmpAclBean, sourceFile); ((TestAceBean) tmpAclBean).setAssertedExceptionString(getMapValueAsString( currentAceDefinition, ASSERTED_EXCEPTION)); } @Override protected void setupAuthorizableBean( final AuthorizableConfigBean authorizableConfigBean, final Map<String, Object> currentPrincipalDataMap, final String authorizableId, boolean isGroupSection) throws AcConfigBeanValidationException {<|fim▁hole|> @Override protected AceBean getNewAceBean() { return new TestAceBean(); } @Override protected AuthorizableConfigBean getNewAuthorizableConfigBean() { return new TestAuthorizableConfigBean(); } }<|fim▁end|>
super.setupAuthorizableBean(authorizableConfigBean, currentPrincipalDataMap, authorizableId, isGroupSection); ((TestAuthorizableConfigBean) authorizableConfigBean).setAssertedExceptionString(getMapValueAsString( currentPrincipalDataMap, ASSERTED_EXCEPTION)); }
<|file_name|>account_document_tax.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ##############################################################################<|fim▁hole|>from openerp import models, fields, api from openerp.exceptions import ValidationError class AccountDocumentTax(models.AbstractModel): _name = 'account.document.tax' currency_id = fields.Many2one('res.currency') amount = fields.Monetary('Importe', currency_field='currency_id', required=True) base = fields.Monetary('Base', currency_field='currency_id') jurisdiction = fields.Selection( [ ('nacional', 'Nacional'), ('provincial', 'Provincial'), ('municipal', 'Municipal') ], string='Jurisdiccion', required=True, ) name = fields.Char('Nombre', required=True) company_id = fields.Many2one( 'res.company', string='Compania', required=True, default=lambda self: self.env.user.company_id, ) @api.constrains('amount') def check_amount(self): for tax in self: if tax.amount <= 0: raise ValidationError('El monto del impuesto debe ser mayor a 0') @api.constrains('base') def check_base(self): for tax in self: if tax.base < 0: raise ValidationError('La base del impuesto no puede ser negativa') # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:<|fim▁end|>
<|file_name|>StringData.js<|end_file_name|><|fim▁begin|>/** * Data that contains a string. * @param {string} value * @param {boolean} [isValid=true] * @constructor */ function StringData(value, isValid) { Data.call(this, Data.types.string, value, isValid); }<|fim▁hole|> * If the value could represent a number, it is converted to valid NumData. Otherwise, invalid NumData(0) is returned * @return {NumData} */ StringData.prototype.asNum = function() { if (this.isNumber()) { return new NumData(parseFloat(this.getValue()), this.isValid); } else { return new NumData(0, false); } }; /** * The string is a valid boolean if it is "true" or "false" (any casing) * @return {BoolData} */ StringData.prototype.asBool = function() { if (this.getValue().toUpperCase() === "TRUE") { return new BoolData(true, this.isValid); } else if (this.getValue().toUpperCase() === "FALSE") { return new BoolData(false, this.isValid); } return new BoolData(false, false); }; /** * @return {StringData} */ StringData.prototype.asString = function() { return this; }; /** * Checks to see if the number can be converted to a valid number * @return {boolean} */ StringData.prototype.isNumber = function() { //from https://en.wikipedia.org/wiki/Regular_expression const numberRE = /^[+-]?(\d+(\.\d+)?|\.\d+)([eE][+-]?\d+)?$/; return numberRE.test(this.getValue()); }; /** * Imports StringData from XML * @param {Node} dataNode * @return {StringData|null} */ StringData.importXml = function(dataNode) { const value = XmlWriter.getTextNode(dataNode, "value"); if (value == null) return null; return new StringData(value); };<|fim▁end|>
StringData.prototype = Object.create(Data.prototype); StringData.prototype.constructor = StringData; /**
<|file_name|>chooser.js<|end_file_name|><|fim▁begin|>function(modal) { function ajaxifyLinks (context) { $('a.address-choice', context).click(function() { modal.loadUrl(this.href); return false; }); $('.pagination a', context).click(function() { var page = this.getAttribute("data-page"); setPage(page); return false; }); }; var searchUrl = $('form.address-search', modal.body).attr('action') function search() { $.ajax({ url: searchUrl, data: {q: $('#id_q').val()}, success: function(data, status) { $('#search-results').html(data); ajaxifyLinks($('#search-results')); } }); return false; }; function setPage(page) { if($('#id_q').val().length){ dataObj = {q: $('#id_q').val(), p: page}; } else { dataObj = {p: page}; } $.ajax({ url: searchUrl, data: dataObj, success: function(data, status) { $('#search-results').html(data); ajaxifyLinks($('#search-results')); } }); return false; } ajaxifyLinks(modal.body); function submitForm() { var formdata = new FormData(this); $.ajax({ url: this.action, <|fim▁hole|> processData: false, contentType: false, type: 'POST', dataType: 'text', success: function(response){ modal.loadResponseText(response); } }); return false; } $('form.address-create', modal.body).submit(submitForm); $('form.address-edit', modal.body).submit(submitForm); $('form.address-search', modal.body).submit(search); $('#id_q').on('input', function() { clearTimeout($.data(this, 'timer')); var wait = setTimeout(search, 50); $(this).data('timer', wait); }); {% url 'wagtailadmin_tag_autocomplete' as autocomplete_url %} $('#id_tags', modal.body).tagit({ autocomplete: {source: "{{ autocomplete_url|addslashes }}"} }); function detectErrors() { var errorSections = {}; // First count up all the errors $('form.address-create .error-message').each(function(){ var parentSection = $(this).closest('section'); if(!errorSections[parentSection.attr('id')]){ errorSections[parentSection.attr('id')] = 0; } errorSections[parentSection.attr('id')] = errorSections[parentSection.attr('id')]+1; }); // Now identify them on each tab for(var index in errorSections) { $('.tab-nav a[href=#'+ index +']').addClass('errors').attr('data-count', errorSections[index]); } } detectErrors(); }<|fim▁end|>
data: formdata,
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>import os<|fim▁hole|> os.environ['DJANGO_SETTINGS_MODULE'] = 'flooding.windows' from django.core import management if __name__ == '__main__': management.execute_from_command_line()<|fim▁end|>
if not 'DJANGO_SETTINGS_MODULE' in os.environ:
<|file_name|>plotfs_detec.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import vtktools import sys import math import re import matplotlib.pyplot as plt import getopt from scipy.special import erf from numpy import poly1d from matplotlib.pyplot import figure, show from numpy import pi, sin, linspace from matplotlib.mlab import stineman_interp from numpy import exp, cos<|fim▁hole|>from fluidity_tools import stat_parser def mirror(x): return 13800-x def usage(): print('Usage:') print('plotfs_detec.py [-w] --file=detector_filename --save=filename') print('--save=... saves the plots as images instead of plotting them on the screen.') print('-w plots the wetting procedure (drying is default).') # should be copied from the diamond extrude function. X is 2 dimensional def bathymetry_function(X): return -5.0*X/13800 ################# Main ########################### def main(argv=None): filename='' timestep_ana=0.0 dzero=0.01 save='' # If nonempty, we save the plots as images instead if showing them wetting=False try: opts, args = getopt.getopt(sys.argv[1:], ":w", ['file=','save=']) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt == '--file': filename=arg elif opt == '--save': save=arg elif opt == '-w': wetting=True if filename=='': print('No filename specified. You have to give the detectors filename.') usage() sys.exit(2) ####################### Print time plot ########################### print('Generating time plot') s = stat_parser(filename) timesteps=s["ElapsedTime"]["value"] timestep=timesteps[1]-timesteps[0] print("Found ", len(timesteps), " timesteps with dt=", timestep) if timestep_ana==0.0: timestep_ana=timestep fs=s["water"]["FreeSurface"] print("Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ").") # Get and plot results plt.ion() # swith on interactive mode fig2 = figure() ax2 = fig2.add_subplot(111) if wetting: ##plot_start=90 # in timesteps plot_start=18 # in timesteps, after 18 timesteps the waterlevel reaches its lowest point ##plot_end=114 # in timesteps plot_end=54 # in timesteps plot_name='Wetting' else: plot_start=54 # in timesteps plot_end=89 # in timesteps plot_name='Drying' for t in range(0,len(timesteps)): # ignore the first waveperiod if t<plot_start: continue if t>plot_end: continue fsvalues=[] xcoords=[] for name, item in fs.iteritems(): #print name xcoords.append(mirror(s[name]['position'][0][0])) #print xcoord fsvalues.append(fs[name][t]) # Plot result of one timestep ax2.plot(xcoords,fsvalues,'r,', label='Numerical solution') # Plot Analytical solution fsvalues_ana=[] offset=-bathymetry_function(0.0)+dzero xcoords.sort() for x in xcoords: fsvalues_ana.append(bathymetry_function(mirror(x))-offset) # Plot vertical line in bathmetry on right boundary xcoords.append(xcoords[len(xcoords)-1]+0.000000001) fsvalues_ana.append(2.1) ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry') #plt.legend() if t==plot_end: # change from meters in kilometers in the x-axis # return locs, labels where locs is an array of tick locations and # labels is an array of tick labels. locs, labels = plt.xticks() for i in range(0,len(locs)): labels[i]=str(locs[i]/1000) plt.xticks(locs, labels) plt.ylim(-2.2,1.4) #plt.title(plot_name) plt.xlabel('Position [km]') plt.ylabel('Free surface [m]') if save=='': plt.draw() raw_input("Please press Enter") else: plt.savefig(save+'_'+plot_name+'.pdf', facecolor='white', edgecolor='black', dpi=100) plt.cla() t=t+1 # Make video from the images: # mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi if __name__ == "__main__": main()<|fim▁end|>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors, // refer to the AUTHORS file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern mod glfw; #[link(name="glfw")] extern {} #[start] fn start(argc: int, argv: **u8) -> int { std::rt::start_on_main_thread(argc, argv, main) } fn main() { glfw::set_error_callback(~ErrorContext); do glfw::start { glfw::window_hint::visible(true); let window = glfw::Window::create(640, 480, "Defaults", glfw::Windowed) .expect("Failed to create GLFW window."); window.make_context_current(); let (width, height) = window.get_size(); println!("window size: ({}, {})", width, height); println!("Context version: {:s}", window.get_context_version().to_str()); println!("OpenGL forward compatible: {}", window.is_opengl_forward_compat()); println!("OpenGL debug context: {}", window.is_opengl_debug_context()); println!("OpenGL profile: {}", window.get_opengl_profile()); let gl_params = [ (gl::RED_BITS, None, "red bits" ), (gl::GREEN_BITS, None, "green bits" ), (gl::BLUE_BITS, None, "blue bits" ), (gl::ALPHA_BITS, None, "alpha bits" ), (gl::DEPTH_BITS, None, "depth bits" ), (gl::STENCIL_BITS, None, "stencil bits" ), (gl::ACCUM_RED_BITS, None, "accum red bits" ), (gl::ACCUM_GREEN_BITS, None, "accum green bits" ), (gl::ACCUM_BLUE_BITS, None, "accum blue bits" ), (gl::ACCUM_ALPHA_BITS, None, "accum alpha bits" ), (gl::STEREO, None, "stereo" ), (gl::SAMPLES_ARB, Some("GL_ARB_multisample"), "FSAA samples" ), ]; for &(param, ext, name) in gl_params.iter() { if ext.map_default(true, |s| { glfw::extension_supported(s) }) { let value = 0; unsafe { gl::GetIntegerv(param, &value) }; println!("OpenGL {:s}: {}", name, value); }; } } } struct ErrorContext; impl glfw::ErrorCallback for ErrorContext { fn call(&self, _: glfw::Error, description: ~str) { println!("GLFW Error: {:s}", description); } } mod gl { use std::libc; #[cfg(target_os = "macos")] #[link(name="OpenGL", kind="framework")] extern { } #[cfg(target_os = "linux")] #[link(name="GL")] extern { } pub type GLenum = libc::c_uint; pub type GLint = libc::c_int; pub static RED_BITS : GLenum = 0x0D52; pub static GREEN_BITS : GLenum = 0x0D53; pub static BLUE_BITS : GLenum = 0x0D54; pub static ALPHA_BITS : GLenum = 0x0D55; pub static DEPTH_BITS : GLenum = 0x0D56; pub static STENCIL_BITS : GLenum = 0x0D57; pub static ACCUM_RED_BITS : GLenum = 0x0D58; pub static ACCUM_GREEN_BITS : GLenum = 0x0D59; pub static ACCUM_BLUE_BITS : GLenum = 0x0D5A; pub static ACCUM_ALPHA_BITS : GLenum = 0x0D5B; pub static STEREO : GLenum = 0x0C33; pub static SAMPLES_ARB : GLenum = 0x80A9; #[inline(never)]<|fim▁hole|> extern "C" { fn glGetIntegerv(pname: GLenum, params: *GLint); } }<|fim▁end|>
pub unsafe fn GetIntegerv(pname: GLenum, params: *GLint) { glGetIntegerv(pname, params) }
<|file_name|>missing-return.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // error-pattern: return fn f() -> int { } fn main() { f(); }<|fim▁end|>
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! **Canonicalization** is the key to constructing a query in the //! middle of type inference. Ordinarily, it is not possible to store //! types from type inference in query keys, because they contain //! references to inference variables whose lifetimes are too short //! and so forth. Canonicalizing a value T1 using `canonicalize_query` //! produces two things: //! //! - a value T2 where each unbound inference variable has been //! replaced with a **canonical variable**; //! - a map M (of type `CanonicalVarValues`) from those canonical //! variables back to the original. //! //! We can then do queries using T2. These will give back constraints //! on the canonical variables which can be translated, using the map //! M, into constraints in our source context. This process of //! translating the results back is done by the //! `instantiate_query_result` method. //! //! For a more detailed look at what is happening here, check //! out the [chapter in the rustc dev guide][c]. //! //! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html <|fim▁hole|>use rustc_index::vec::IndexVec; use rustc_middle::ty::fold::TypeFoldable; use rustc_middle::ty::subst::GenericArg; use rustc_middle::ty::{self, BoundVar, List}; use rustc_span::source_map::Span; pub use rustc_middle::infer::canonical::*; use substitute::CanonicalExt; mod canonicalizer; pub mod query_response; mod substitute; impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { /// Creates a substitution S for the canonical value with fresh /// inference variables and applies it to the canonical value. /// Returns both the instantiated result *and* the substitution S. /// /// This is only meant to be invoked as part of constructing an /// inference context at the start of a query (see /// `InferCtxtBuilder::enter_with_canonical`). It basically /// brings the canonical value "into scope" within your new infcx. /// /// At the end of processing, the substitution S (once /// canonicalized) then represents the values that you computed /// for each of the canonical inputs to your query. pub fn instantiate_canonical_with_fresh_inference_vars<T>( &self, span: Span, canonical: &Canonical<'tcx, T>, ) -> (T, CanonicalVarValues<'tcx>) where T: TypeFoldable<'tcx>, { // For each universe that is referred to in the incoming // query, create a universe in our local inference context. In // practice, as of this writing, all queries have no universes // in them, so this code has no effect, but it is looking // forward to the day when we *do* want to carry universes // through into queries. let universes: IndexVec<ty::UniverseIndex, _> = std::iter::once(ty::UniverseIndex::ROOT) .chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe())) .collect(); let canonical_inference_vars = self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]); let result = canonical.substitute(self.tcx, &canonical_inference_vars); (result, canonical_inference_vars) } /// Given the "infos" about the canonical variables from some /// canonical, creates fresh variables with the same /// characteristics (see `instantiate_canonical_var` for /// details). You can then use `substitute` to instantiate the /// canonical variable with these inference variables. fn instantiate_canonical_vars( &self, span: Span, variables: &List<CanonicalVarInfo<'tcx>>, universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> CanonicalVarValues<'tcx> { let var_values: IndexVec<BoundVar, GenericArg<'tcx>> = variables .iter() .map(|info| self.instantiate_canonical_var(span, info, &universe_map)) .collect(); CanonicalVarValues { var_values } } /// Given the "info" about a canonical variable, creates a fresh /// variable for it. If this is an existentially quantified /// variable, then you'll get a new inference variable; if it is a /// universally quantified variable, you get a placeholder. fn instantiate_canonical_var( &self, span: Span, cv_info: CanonicalVarInfo<'tcx>, universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> GenericArg<'tcx> { match cv_info.kind { CanonicalVarKind::Ty(ty_kind) => { let ty = match ty_kind { CanonicalTyVarKind::General(ui) => self.next_ty_var_in_universe( TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span }, universe_map(ui), ), CanonicalTyVarKind::Int => self.next_int_var(), CanonicalTyVarKind::Float => self.next_float_var(), }; ty.into() } CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, name }) => { let universe_mapped = universe_map(universe); let placeholder_mapped = ty::PlaceholderType { universe: universe_mapped, name }; self.tcx.mk_ty(ty::Placeholder(placeholder_mapped)).into() } CanonicalVarKind::Region(ui) => self .next_region_var_in_universe( RegionVariableOrigin::MiscVariable(span), universe_map(ui), ) .into(), CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, name }) => { let universe_mapped = universe_map(universe); let placeholder_mapped = ty::PlaceholderRegion { universe: universe_mapped, name }; self.tcx.mk_region(ty::RePlaceholder(placeholder_mapped)).into() } CanonicalVarKind::Const(ui) => self .next_const_var_in_universe( self.next_ty_var_in_universe( TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span }, universe_map(ui), ), ConstVariableOrigin { kind: ConstVariableOriginKind::MiscVariable, span }, universe_map(ui), ) .into(), CanonicalVarKind::PlaceholderConst(ty::PlaceholderConst { universe, name }) => { let universe_mapped = universe_map(universe); let placeholder_mapped = ty::PlaceholderConst { universe: universe_mapped, name }; self.tcx .mk_const(ty::Const { val: ty::ConstKind::Placeholder(placeholder_mapped), ty: name.ty, }) .into() } } } }<|fim▁end|>
use crate::infer::{ConstVariableOrigin, ConstVariableOriginKind}; use crate::infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin, TypeVariableOriginKind};
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use bitmap::Image; // see read_ppm implementation in the bitmap library pub fn main() {<|fim▁hole|> // read a PPM image, which was produced by the write-a-ppm-file task let image = Image::read_ppm("./test_image.ppm").unwrap(); println!("Read using nom parsing:"); println!("Format: {:?}", image.format); println!("Dimensions: {} x {}", image.height, image.width); } #[cfg(test)] mod tests { extern crate rand; use bitmap::{Color, Image}; use std::env; #[test] fn read_ppm() { let mut image = Image::new(2, 1); image[(0, 0)] = Color { red: 255, green: 0, blue: 0, }; image[(1, 0)] = Color { red: 0, green: 255, blue: 0, }; let fname = format!( "{}/test-{}.ppm", env::temp_dir().to_str().unwrap(), self::rand::random::<i32>(), ); image.write_ppm(&fname).unwrap(); image = Image::read_ppm(&fname).unwrap(); assert_eq!(image.width, 2); assert_eq!(image.height, 1); assert_eq!( image.data, vec![ Color { red: 255, green: 0, blue: 0 }, Color { red: 0, green: 255, blue: 0 } ] ) } }<|fim▁end|>
<|file_name|>compressor.rs<|end_file_name|><|fim▁begin|>/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.<|fim▁hole|> */ use std::io; use snap::read::FrameDecoder; use snap::write::FrameEncoder; /// A trait that provides a compression and decompression strategy for this filter. /// Conversion takes place on a mutable Vec, to ensure the most performant compression or /// decompression operation can occur. pub(crate) trait Compressor { /// Compress the contents of the Vec - overwriting the original content. fn encode(&self, contents: &mut Vec<u8>) -> io::Result<()>; /// Decompress the contents of the Vec - overwriting the original content. fn decode(&self, contents: &mut Vec<u8>) -> io::Result<()>; } pub(crate) struct Snappy {} impl Compressor for Snappy { fn encode(&self, contents: &mut Vec<u8>) -> io::Result<()> { let input = std::mem::take(contents); let mut wtr = FrameEncoder::new(contents); io::copy(&mut input.as_slice(), &mut wtr)?; Ok(()) } fn decode(&self, contents: &mut Vec<u8>) -> io::Result<()> { let input = std::mem::take(contents); let mut rdr = FrameDecoder::new(input.as_slice()); io::copy(&mut rdr, contents)?; Ok(()) } }<|fim▁end|>
<|file_name|>limits.py<|end_file_name|><|fim▁begin|># Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. This module handles rate liming at a per-user level, so it should not be used to prevent intentional Denial of Service attacks, as we can assume a DOS can easily come through multiple user accounts. DOS protection should be done at a different layer. Instead this module should be used to protect against unintentional user actions. With that in mind the limits set here should be high enough as to not rate-limit any intentional actions. To find good rate-limit values, check how long requests are taking (see logs) in your environment to assess your capabilities and multiply out to get figures. NOTE: As the rate-limiting here is done in memory, this only works per process (each process will have its own rate limiting counter). """ import collections import copy import httplib import math import re import time from oslo.serialization import jsonutils from oslo.utils import importutils import webob.dec import webob.exc from nova.api.openstack.compute.views import limits as limits_views from nova.api.openstack import wsgi from nova.i18n import _ from nova import quota from nova import utils from nova import wsgi as base_wsgi QUOTAS = quota.QUOTAS LIMITS_PREFIX = "limits." class LimitsController(object): """Controller for accessing limits in the OpenStack API.""" def index(self, req): """Return all global and rate limit information.""" context = req.environ['nova.context'] project_id = req.params.get('tenant_id', context.project_id)<|fim▁hole|> builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def create(self, req, body): """Create a new limit.""" raise webob.exc.HTTPNotImplemented() def delete(self, req, id): """Delete the limit.""" raise webob.exc.HTTPNotImplemented() def detail(self, req): """Return limit details.""" raise webob.exc.HTTPNotImplemented() def show(self, req, id): """Show limit information.""" raise webob.exc.HTTPNotImplemented() def update(self, req, id, body): """Update existing limit.""" raise webob.exc.HTTPNotImplemented() def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """Stores information about a limit for HTTP requests.""" UNITS = dict([(v, k) for k, v in utils.TIME_UNITS.items()]) def __init__(self, verb, uri, regex, value, unit): """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = (_("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") % {'value': self.value, 'verb': self.verb, 'uri': self.uri, 'unit_string': self.unit_string}) self.error_message = msg def __call__(self, verb, url): """Represents a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 120, utils.TIME_UNITS['MINUTE']), Limit("POST", "*/servers", "^/servers", 120, utils.TIME_UNITS['MINUTE']), Limit("PUT", "*", ".*", 120, utils.TIME_UNITS['MINUTE']), Limit("GET", "*changes-since*", ".*changes-since.*", 120, utils.TIME_UNITS['MINUTE']), Limit("DELETE", "*", ".*", 120, utils.TIME_UNITS['MINUTE']), Limit("GET", "*/os-fping", "^/os-fping", 12, utils.TIME_UNITS['MINUTE']), ] class RateLimitingMiddleware(base_wsgi.Middleware): """Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize new `RateLimitingMiddleware`. It wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("nova.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.RateLimitFault(msg, error, retry) req.environ["nova.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith(LIMITS_PREFIX): username = key[len(LIMITS_PREFIX):] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in utils.TIME_UNITS: raise ValueError("Invalid units specified") unit = utils.TIME_UNITS[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/<username>`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dumps({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = httplib.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return []<|fim▁end|>
quotas = QUOTAS.get_project_quotas(context, project_id, usages=False) abs_limits = dict((k, v['limit']) for k, v in quotas.items()) rate_limits = req.environ.get("nova.limits", [])
<|file_name|>intrinsic-return-address.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(intrinsics)] use std::ptr; struct Point { x: f32, y: f32, z: f32, } <|fim▁hole|>} fn f(result: &mut uint) -> Point { unsafe { *result = return_address() as uint; Point { x: 1.0, y: 2.0, z: 3.0, } } } fn main() { let mut intrinsic_reported_address = 0; let pt = f(&mut intrinsic_reported_address); let actual_address = &pt as *const Point as uint; assert_eq!(intrinsic_reported_address, actual_address); }<|fim▁end|>
extern "rust-intrinsic" { fn return_address() -> *const u8;
<|file_name|>qgsnewnamedialog.cpp<|end_file_name|><|fim▁begin|>/*************************************************************************** qgsnewnamedialog.cpp ------------------- begin : May, 2015 copyright : (C) 2015 Radim Blazek email : [email protected] ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include <QLabel> #include <QLineEdit> #include <QPushButton> #include <QRegExpValidator> #include <QSizePolicy> #include "qgslogger.h" #include "qgsnewnamedialog.h" QgsNewNameDialog::QgsNewNameDialog( const QString& source, const QString& initial, const QStringList& extensions, const QStringList& existing, const QRegExp& regexp, Qt::CaseSensitivity cs, QWidget *parent, Qt::WindowFlags flags ) : QgsDialog( parent, flags, QDialogButtonBox::Ok | QDialogButtonBox::Cancel ) , mExiting( existing ) , mExtensions( extensions ) , mCaseSensitivity( cs ) , mNamesLabel( 0 ) , mRegexp( regexp ) { setWindowTitle( tr( "New name" ) ); QDialog::layout()->setSizeConstraint( QLayout::SetMinimumSize ); layout()->setSizeConstraint( QLayout::SetMinimumSize ); layout()->setSpacing( 6 ); mOkString = buttonBox()->button( QDialogButtonBox::Ok )->text(); QString hintString; QString nameDesc = mExtensions.isEmpty() ? tr( "name" ) : tr( "base name" ); if ( source.isEmpty() ) { hintString = tr( "Enter new %1" ).arg( nameDesc ); } else { hintString = tr( "Enter new %1 for %2" ).arg( nameDesc ).arg( source ); } QLabel* hintLabel = new QLabel( hintString, this ); layout()->addWidget( hintLabel ); mLineEdit = new QLineEdit( initial, this ); if ( !regexp.isEmpty() ) { QRegExpValidator *validator = new QRegExpValidator( regexp, this ); mLineEdit->setValidator( validator ); } connect( mLineEdit, SIGNAL( textChanged( QString ) ), this, SLOT( nameChanged() ) ); layout()->addWidget( mLineEdit ); mNamesLabel = new QLabel( " ", this ); mNamesLabel->setSizePolicy( QSizePolicy::Minimum, QSizePolicy::Minimum ); if ( !mExtensions.isEmpty() ) { mNamesLabel->setWordWrap( true ); layout()->addWidget( mNamesLabel ); } mErrorLabel = new QLabel( " ", this ); mErrorLabel->setSizePolicy( QSizePolicy::Minimum, QSizePolicy::Minimum ); mErrorLabel->setWordWrap( true ); layout()->addWidget( mErrorLabel ); nameChanged(); } QString QgsNewNameDialog::highlightText( const QString& text ) { return "<b>" + text + "</b>"; } void QgsNewNameDialog::nameChanged() {<|fim▁hole|> QString namesString = tr( "Full names" ) + ": "; if ( !mExtensions.isEmpty() ) { mNamesLabel->setText( namesString ); } mErrorLabel->setText( " " ); // space to keep vertical space QPushButton* okButton = buttonBox()->button( QDialogButtonBox::Ok ); okButton->setText( mOkString ); okButton->setEnabled( true ); QString newName = name(); if ( newName.length() == 0 || ( !mRegexp.isEmpty() && !mRegexp.exactMatch( newName ) ) ) { //mErrorLabel->setText( highlightText( tr( "Enter new name" ) ); okButton->setEnabled( false ); return; } QStringList newNames = fullNames( newName, mExtensions ); if ( !mExtensions.isEmpty() ) { namesString += " " + newNames.join( ", " ); mNamesLabel->setText( namesString ); } QStringList conflicts = matching( newNames, mExiting, mCaseSensitivity ); if ( !conflicts.isEmpty() ) { mErrorLabel->setText( highlightText( tr( "%n Name(s) %1 exists", 0, conflicts.size() ).arg( conflicts.join( ", " ) ) ) ); okButton->setText( tr( "Overwrite" ) ); return; } } QString QgsNewNameDialog::name() const { return mLineEdit->text().trimmed(); } QStringList QgsNewNameDialog::fullNames( const QString& name, const QStringList& extensions ) { QStringList list; foreach ( QString ext, extensions ) { list << name + ext; } if ( list.isEmpty() ) { list << name; } return list; } QStringList QgsNewNameDialog::matching( const QStringList& newNames, const QStringList& existingNames, Qt::CaseSensitivity cs ) { QStringList list; foreach ( QString newName, newNames ) { foreach ( QString existingName, existingNames ) { if ( existingName.compare( newName, cs ) == 0 ) { list << existingName; } } } return list; } bool QgsNewNameDialog::exists( const QString& name, const QStringList& extensions, const QStringList& existing, Qt::CaseSensitivity cs ) { QStringList newNames = fullNames( name, extensions ); QStringList conflicts = matching( newNames, existing, cs ); return conflicts.size() > 0; }<|fim▁end|>
QgsDebugMsg( "entered" );
<|file_name|>config.go<|end_file_name|><|fim▁begin|>package main type FluxConfig struct { Iam string `toml:"iam"` Url string `toml:"url"` Port int `toml:"port"` Logdir string `toml:"logdir"` Balancer FluxCluster `toml:"cluster"` //Jwts []JwtAuth `toml:"jwt"` } type JwtAuth struct { RequiredClaims []JwtClaim `toml:"claim"`<|fim▁hole|> type JwtClaim struct { Key string `toml:"key"` Value string `toml:"value"` } type FluxCluster struct { Name string `toml:"name"` BalancerAddress string `toml:"address"` BalancerPort int `toml:"port"` Scramble bool `toml:"scramble"` }<|fim▁end|>
DecryptionSecret string `toml:"secret"` }
<|file_name|>FacetFilter.js<|end_file_name|><|fim▁begin|>/** * Copyright (C) 2005-2015 Alfresco Software Limited. * * This file is part of Alfresco * * Alfresco is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Alfresco is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Alfresco. If not, see <http://www.gnu.org/licenses/>. */ /** * * @module alfresco/search/FacetFilter * @extends external:dijit/_WidgetBase * @mixes external:dojo/_TemplatedMixin * @mixes module:alfresco/core/Core * @mixes module:alfresco/documentlibrary/_AlfDocumentListTopicMixin * @author Dave Draper */ define(["dojo/_base/declare", "dijit/_WidgetBase", "dijit/_TemplatedMixin", "dijit/_OnDijitClickMixin", "dojo/text!./templates/FacetFilter.html", "alfresco/core/Core", "dojo/_base/lang", "dojo/_base/array", "dojo/dom-construct", "dojo/dom-class", "dojo/on", "alfresco/util/hashUtils", "dojo/io-query", "alfresco/core/ArrayUtils"], function(declare, _WidgetBase, _TemplatedMixin, _OnDijitClickMixin, template, AlfCore, lang, array, domConstruct, domClass, on, hashUtils, ioQuery, arrayUtils) { return declare([_WidgetBase, _TemplatedMixin, AlfCore], { /** * An array of the i18n files to use with this widget. * * @instance * @type {object[]} * @default [{i18nFile: "./i18n/FacetFilter.properties"}] */ i18nRequirements: [{i18nFile: "./i18n/FacetFilter.properties"}], /** * An array of the CSS files to use with this widget. * * @instance cssRequirements {Array} * @type {object[]} * @default [{cssFile:"./css/FacetFilter.css"}] */ cssRequirements: [{cssFile:"./css/FacetFilter.css"}], /** * The HTML template to use for the widget. * @instance * @type {string} */ templateString: template, /** * Indicate whether or not the filter is currently applied * * @instance * @type {boolean} * @default */ applied: false, /** * The alt-text to use for the image that indicates that a filter has been applied * * @instance * @type {string} * @default */ appliedFilterAltText: "facet.filter.applied.alt-text", /** * The path to use as the source for the image that indicates that a filter has been applied * * @instance * @type {string} * @default */ appliedFilterImageSrc: "12x12-selected-icon.png", /** * The facet qname * * @instance * @type {string} * @default */ facet: null, /** * The filter (or more accurately the filterId) for this filter * * @instance * @type {string} * @default */ filter: null, /** * Additional data for the filter (appended after the filter with a bar, e.g. tag|sometag) * * @instance * @type {string} * @default */ filterData: "", /** * Indicates that the filter should be hidden. This will be set to "true" if any required data is missing * * @instance * @type {boolean} * @default */ hide: false, /** * When this is set to true the current URL hash fragment will be used to initialise the facet selection * and when the facet is selected the hash fragment will be updated with the facet selection. * * @instance * @type {boolean} * @default */ useHash: false, /** * Sets up the attributes required for the HTML template. * @instance */ postMixInProperties: function alfresco_search_FacetFilter__postMixInProperties() { if (this.label && this.facet && this.filter && this.hits) { this.label = this.message(this.label); // Localize the alt-text for the applied filter message... this.appliedFilterAltText = this.message(this.appliedFilterAltText, {0: this.label}); // Set the source for the image to use to indicate that a filter is applied... this.appliedFilterImageSrc = require.toUrl("alfresco/search") + "/css/images/" + this.appliedFilterImageSrc; } else { // Hide the filter if there is no label or no link... this.alfLog("warn", "Not enough information provided for filter. It will not be displayed", this); this.hide = true; } }, /** * @instance */ postCreate: function alfresco_search_FacetFilter__postCreate() { if (this.hide === true) { domClass.add(this.domNode, "hidden"); } if (this.applied) { domClass.remove(this.removeNode, "hidden"); domClass.add(this.labelNode, "applied"); } }, /** * If the filter has previously been applied then it is removed, if the filter is not applied * then it is applied. * * @instance */ onToggleFilter: function alfresco_search_FacetFilter__onToggleFilter(/*jshint unused:false*/ evt) { if (this.applied) { this.onClearFilter(); } else { this.onApplyFilter(); } }, /** * Applies the current filter by publishing the details of the filter along with the facet to * which it belongs and then displays the "applied" image. * * @instance */ onApplyFilter: function alfresco_search_FacetFilter__onApplyFilter() { var fullFilter = this.facet + "|" + this.filter; if(this.useHash) { this._updateHash(fullFilter, "add"); } else { this.alfPublish("ALF_APPLY_FACET_FILTER", { filter: fullFilter }); } domClass.remove(this.removeNode, "hidden"); domClass.add(this.labelNode, "applied"); this.applied = true; }, /** * Removes the current filter by publishing the details of the filter along with the facet * to which it belongs and then hides the "applied" image * * @instance */ onClearFilter: function alfresco_search_FacetFilter__onClearFilter() { var fullFilter = this.facet + "|" + this.filter; if(this.useHash) { this._updateHash(fullFilter, "remove"); } else { this.alfPublish("ALF_REMOVE_FACET_FILTER", { filter: fullFilter }); } domClass.add(this.removeNode, "hidden"); domClass.remove(this.labelNode, "applied");<|fim▁hole|> /** * Performs updates to the url hash as facets are selected and de-selected * * @instance */ _updateHash: function alfresco_search_FacetFilter___updateHash(fullFilter, mode) { // Get the existing hash and extract the individual facetFilters into an array var aHash = hashUtils.getHash(), facetFilters = ((aHash.facetFilters) ? aHash.facetFilters : ""), facetFiltersArr = (facetFilters === "") ? [] : facetFilters.split(","); // Add or remove the filter from the hash object if(mode === "add" && !arrayUtils.arrayContains(facetFiltersArr, fullFilter)) { facetFiltersArr.push(fullFilter); } else if (mode === "remove" && arrayUtils.arrayContains(facetFiltersArr, fullFilter)) { facetFiltersArr.splice(facetFiltersArr.indexOf(fullFilter), 1); } // Put the manipulated filters back into the hash object or remove the property if empty if(facetFiltersArr.length < 1) { delete aHash.facetFilters; } else { aHash.facetFilters = facetFiltersArr.join(); } // Send the hash value back to navigation this.alfPublish("ALF_NAVIGATE_TO_PAGE", { url: ioQuery.objectToQuery(aHash), type: "HASH" }, true); } }); });<|fim▁end|>
this.applied = false; },
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.http import HttpResponse from django.core.servers.basehttp import FileWrapper from django.contrib.auth.models import User from django.shortcuts import render_to_response, redirect, get_object_or_404 from requests import get from urllib import urlretrieve from common.models import Repository from common.util import get_context def cgit_url(user_name, repo_name, method, path, query=None): url = 'http://localhost:8080/view' if method == 'summary': base = '%s/%s/%s' %(url, user_name, repo_name) else: base = '%s/%s/%s/%s' %(url, user_name, repo_name, method) if path is not None: base = '%s/%s' %(base, path) if query is not None and len(query)>1: base = "%s?%s" % (base, query) print base return base def cumulative_path(path): if path is None or len(path) == 0: return path c = [path[0]] for part in path[1:]: c.append('%s/%s'%(c[-1], part)) return c def view_index(request): return redirect('index') def user_index(request, user_name): return redirect('repo_list', user_name) def repo_plain(request, user_name, repo_name, path, prefix='plain'): user = request.user owner = get_object_or_404(User, username=user_name) repo = get_object_or_404(Repository, owner=owner, name=repo_name) collaborators = repo.collaborators.all() access = repo.user_access(user) if access is None: return HttpResponse('Not authorized', status=401) query = request.GET.urlencode() print query url = cgit_url(user_name, repo_name, prefix, path, query) (fname, info) = urlretrieve(url) response = HttpResponse(FileWrapper(open(fname)), content_type='text/plain') return response def repo_snapshot(request, user_name, repo_name, path): user = request.user owner = get_object_or_404(User, username=user_name) repo = get_object_or_404(Repository, owner=owner, name=repo_name) collaborators = repo.collaborators.all() access = repo.user_access(user) if access is None: return HttpResponse('Not authorized', status=401) query = request.GET.urlencode() filename = path.split('/')[-1] url = cgit_url(user_name, repo_name, 'snapshot', path, query) (fname, info) = urlretrieve(url) response = HttpResponse(FileWrapper(open(fname)), content_type='application/force-download') response['Content-Disposition'] = 'attachment; filename="%s"' % filename return response def repo_browse(request, user_name, repo_name, method='summary', path=None): user = request.user owner = get_object_or_404(User, username=user_name)<|fim▁hole|> access = repo.user_access(user) if access is None: return HttpResponse('Not authorized', status=401) commit_id = request.GET.get('id') q = request.GET.get('q', '') qtype = request.GET.get('qt', 'grep') messages = { 'grep' : 'Log Message', 'author': 'Author', 'committer' : 'Committer', 'range' : 'Range' } search_text = messages.get(qtype, messages['grep']) if method == 'tree': file_path = path.split('/') path_parts = cumulative_path(file_path) file_path = zip(file_path, path_parts) else: file_path = None query = request.GET.urlencode() url = cgit_url(user_name, repo_name, method, path, query) text = get(url) context = get_context(request, {'owner': owner, 'repo_html':text.text, 'repo':repo, 'access':access, 'id':commit_id, 'method':method, 'q':q, 'qtype':qtype, 'search_text':search_text, 'file_path':file_path}) return render_to_response('viewer/repo_view.html', context)<|fim▁end|>
repo = get_object_or_404(Repository, owner=owner, name=repo_name) collaborators = repo.collaborators.all()
<|file_name|>lightbot.js<|end_file_name|><|fim▁begin|><|fim▁hole|>// var Spark = require("spark-io"); var Spark = require("../"); var five = require("johnny-five"); var Sumobot = require("sumobot")(five); keypress(process.stdin); var board = new five.Board({ io: new Spark({ token: process.env.SPARK_TOKEN, deviceId: process.env.SPARK_DEVICE_2 }) }); board.on("ready", function() { console.log("Welcome to Sumobot Jr: Light Bot!"); var bot = new Sumobot({ left: "D0", right: "D1", speed: 0.50 }); var light = new five.Sensor("A0"); var isQuitting = false; light.on("change", function() { if (isQuitting || this.value === null) { return; } if (this.value < 512) { bot.fwd(); } else { bot.rev(); } }); // Ensure the bot is stopped bot.stop(); });<|fim▁end|>
var keypress = require("keypress");
<|file_name|>b.js<|end_file_name|><|fim▁begin|>function almostPerfect() {<|fim▁hole|> for(var i = 0; i < 10; i++) almostPerfect();<|fim▁end|>
console.log("Hello, world!"); };
<|file_name|>instance_pit_of_saron.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2005-2011 MaNGOS <http://www.getmangos.com/> * * Copyright (C) 2008-2011 Trinity <http://www.trinitycore.org/> * * Copyright (C) 2006-2011 ScriptDev2 <http://www.scriptdev2.com/> * * Copyright (C) 2010-2011 VoragineCore <http://www.projectvoragine.com/> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ScriptPCH.h" #include "pit_of_saron.h" #define MAX_ENCOUNTER 3 /* Pit of Saron encounters: 0- Forgemaster Garfrost 1- Krick and Ick 2- Scourgelord Tyrannus */ class instance_pit_of_saron : public InstanceMapScript { public: instance_pit_of_saron() : InstanceMapScript("instance_pit_of_saron", 658) { } InstanceScript* GetInstanceScript(InstanceMap* pMap) const { return new instance_pit_of_saron_InstanceMapScript(pMap); } struct instance_pit_of_saron_InstanceMapScript : public InstanceScript { instance_pit_of_saron_InstanceMapScript(Map* pMap) : InstanceScript(pMap) {}; uint64 uiKrick; uint64 uiIck; uint64 uiGarfrost; uint64 uiTyrannus; uint64 uiRimefang; uint64 uiJainaOrSylvanas1; uint64 uiJainaOrSylvanas2; uint32 uiTeamInInstance; uint32 uiEncounter[MAX_ENCOUNTER]; void Initialize() { for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) uiEncounter[i] = NOT_STARTED; uiGarfrost = 0; uiKrick = 0; uiIck = 0; uiTyrannus = 0; } bool IsEncounterInProgress() const { for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) if (uiEncounter[i] == IN_PROGRESS) return true; return false; } void OnCreatureCreate(Creature* creature) { Map::PlayerList const &players = instance->GetPlayers(); if (!players.isEmpty()) { if (Player* pPlayer = players.begin()->getSource()) uiTeamInInstance = pPlayer->GetTeam(); } switch(creature->GetEntry()) { case CREATURE_KRICK: uiKrick = creature->GetGUID(); break; case CREATURE_ICK: uiIck = creature->GetGUID();<|fim▁hole|> case CREATURE_GARFROST: uiGarfrost = creature->GetGUID(); break; case CREATURE_TYRANNUS: uiTyrannus = creature->GetGUID(); break; case CREATURE_RIMEFANG: uiRimefang = creature->GetGUID(); break; case NPC_SYLVANAS_PART1: if (uiTeamInInstance == ALLIANCE) creature->UpdateEntry(NPC_JAINA_PART1, ALLIANCE); uiJainaOrSylvanas1 = creature->GetGUID(); break; case NPC_SYLVANAS_PART2: if (uiTeamInInstance == ALLIANCE) creature->UpdateEntry(NPC_JAINA_PART2, ALLIANCE); uiJainaOrSylvanas2 = creature->GetGUID(); break; case NPC_KILARA: if (uiTeamInInstance == ALLIANCE) creature->UpdateEntry(NPC_ELANDRA, ALLIANCE); break; case NPC_KORALEN: if (uiTeamInInstance == ALLIANCE) creature->UpdateEntry(NPC_KORLAEN, ALLIANCE); break; case NPC_CHAMPION_1_HORDE: if (uiTeamInInstance == ALLIANCE) creature->UpdateEntry(NPC_CHAMPION_1_ALLIANCE, ALLIANCE); break; case NPC_CHAMPION_2_HORDE: if (uiTeamInInstance == ALLIANCE) creature->UpdateEntry(NPC_CHAMPION_2_ALLIANCE, ALLIANCE); break; case NPC_CHAMPION_3_HORDE: // No 3rd set for Alliance? if (uiTeamInInstance == ALLIANCE) creature->UpdateEntry(NPC_CHAMPION_2_ALLIANCE, ALLIANCE); break; } } uint64 GetData64(uint32 identifier) { switch(identifier) { case DATA_GARFROST: return uiGarfrost; case DATA_KRICK: return uiKrick; case DATA_ICK: return uiIck; case DATA_TYRANNUS: return uiTyrannus; case DATA_RIMEFANG: return uiRimefang; case DATA_JAINA_SYLVANAS_1: return uiJainaOrSylvanas1; case DATA_JAINA_SYLVANAS_2: return uiJainaOrSylvanas2; } return 0; } void SetData(uint32 type, uint32 data) { switch(type) { case DATA_GARFROST_EVENT: uiEncounter[0] = data; break; case DATA_TYRANNUS_EVENT: uiEncounter[1] = data; break; case DATA_KRICKANDICK_EVENT: uiEncounter[2] = data; break; } if (data == DONE) SaveToDB(); } uint32 GetData(uint32 type) { switch(type) { case DATA_GARFROST_EVENT: return uiEncounter[0]; case DATA_TYRANNUS_EVENT: return uiEncounter[1]; case DATA_KRICKANDICK_EVENT: return uiEncounter[2]; } return 0; } std::string GetSaveData() { OUT_SAVE_INST_DATA; std::string str_data; std::ostringstream saveStream; saveStream << "P S " << uiEncounter[0] << " " << uiEncounter[1] << " " << uiEncounter[2]; str_data = saveStream.str(); OUT_SAVE_INST_DATA_COMPLETE; return str_data; } void Load(const char* in) { if (!in) { OUT_LOAD_INST_DATA_FAIL; return; } OUT_LOAD_INST_DATA(in); char dataHead1, dataHead2; uint16 data0, data1, data2; std::istringstream loadStream(in); loadStream >> dataHead1 >> dataHead2 >> data0 >> data1 >> data2; if (dataHead1 == 'P' && dataHead2 == 'S') { uiEncounter[0] = data0; uiEncounter[1] = data1; uiEncounter[2] = data2; for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) if (uiEncounter[i] == IN_PROGRESS) uiEncounter[i] = NOT_STARTED; } else OUT_LOAD_INST_DATA_FAIL; OUT_LOAD_INST_DATA_COMPLETE; } }; }; void AddSC_instance_pit_of_saron() { new instance_pit_of_saron(); }<|fim▁end|>
break;
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from pivoteer import *<|fim▁hole|><|fim▁end|>
from pivotEngine import * from pivotUtils import *
<|file_name|>LabeledStatementTransformer.ts<|end_file_name|><|fim▁begin|>import { inject, injectable, } from 'inversify'; import { ServiceIdentifiers } from '../../container/ServiceIdentifiers'; import * as estraverse from '@javascript-obfuscator/estraverse'; import * as ESTree from 'estree'; import { TNodeWithLexicalScope } from '../../types/node/TNodeWithLexicalScope'; import { IIdentifierReplacer } from '../../interfaces/node-transformers/rename-identifiers-transformers/replacer/IIdentifierReplacer'; import { IOptions } from '../../interfaces/options/IOptions'; import { IRandomGenerator } from '../../interfaces/utils/IRandomGenerator'; import { IVisitor } from '../../interfaces/node-transformers/IVisitor'; import { NodeTransformationStage } from '../../enums/node-transformers/NodeTransformationStage'; import { AbstractNodeTransformer } from '../AbstractNodeTransformer'; import { NodeGuards } from '../../node/NodeGuards'; import { NodeLexicalScopeUtils } from '../../node/NodeLexicalScopeUtils'; /** * replaces: * label: { * for (var i = 0; i < 1000; i++) { * break label; * } * } * * on: * _0x12d45f: { * for (var i = 0; i < 1000; i++) { * break _0x12d45f; * } * } * */ @injectable() export class LabeledStatementTransformer extends AbstractNodeTransformer { /** * @type {IIdentifierReplacer} */ private readonly identifierReplacer: IIdentifierReplacer; /** * @param {IIdentifierReplacer} identifierReplacer * @param {IRandomGenerator} randomGenerator * @param {IOptions} options */ public constructor ( @inject(ServiceIdentifiers.IIdentifierReplacer) identifierReplacer: IIdentifierReplacer, @inject(ServiceIdentifiers.IRandomGenerator) randomGenerator: IRandomGenerator, @inject(ServiceIdentifiers.IOptions) options: IOptions ) { super(randomGenerator, options); this.identifierReplacer = identifierReplacer; } /** * @param {NodeTransformationStage} nodeTransformationStage * @returns {IVisitor | null} */ public getVisitor (nodeTransformationStage: NodeTransformationStage): IVisitor | null { switch (nodeTransformationStage) { case NodeTransformationStage.RenameIdentifiers: return { enter: (node: ESTree.Node, parentNode: ESTree.Node | null): ESTree.Node | undefined => { if (parentNode && NodeGuards.isLabeledStatementNode(node)) { return this.transformNode(node, parentNode); } } }; default: return null; } } /** * @param {LabeledStatement} labeledStatementNode * @param {NodeGuards} parentNode * @returns {NodeGuards} */ public transformNode (labeledStatementNode: ESTree.LabeledStatement, parentNode: ESTree.Node): ESTree.Node { const lexicalScopeNode: TNodeWithLexicalScope | undefined = NodeLexicalScopeUtils.getLexicalScope(labeledStatementNode); if (!lexicalScopeNode) { return labeledStatementNode;<|fim▁hole|> this.storeLabeledStatementName(labeledStatementNode, lexicalScopeNode); this.replaceLabeledStatementName(labeledStatementNode, lexicalScopeNode); return labeledStatementNode; } /** * @param {LabeledStatement} labeledStatementNode * @param {TNodeWithLexicalScope} lexicalScopeNode */ private storeLabeledStatementName ( labeledStatementNode: ESTree.LabeledStatement, lexicalScopeNode: TNodeWithLexicalScope ): void { this.identifierReplacer.storeLocalName(labeledStatementNode.label, lexicalScopeNode); } /** * @param {LabeledStatement} labeledStatementNode * @param {TNodeWithLexicalScope} lexicalScopeNode */ private replaceLabeledStatementName ( labeledStatementNode: ESTree.LabeledStatement, lexicalScopeNode: TNodeWithLexicalScope ): void { estraverse.replace(labeledStatementNode, { enter: (node: ESTree.Node, parentNode: ESTree.Node | null): void => { if (parentNode && NodeGuards.isLabelIdentifierNode(node, parentNode)) { const newIdentifier: ESTree.Identifier = this.identifierReplacer .replace(node, lexicalScopeNode); node.name = newIdentifier.name; } } }); } }<|fim▁end|>
}
<|file_name|>792_number-of-matching-subsequences.py<|end_file_name|><|fim▁begin|>import bisect class Solution: def numMatchingSubseq(self, S: str, words): d = {} for i, x in enumerate(S): if x not in d: d[x] = [i] else: d[x].append(i)<|fim▁hole|> ans = [] for w in words: i = -1 result = True for x in w: if x not in d: result = False break idx = bisect.bisect_left(d[x], i+1) if idx >= len(d[x]): result = False break i = d[x][idx] if result: ans.append(w) return len(ans) print(Solution().numMatchingSubseq("abcde", ["a", "bb", "acd", "ace"]))<|fim▁end|>
<|file_name|>cksum.rs<|end_file_name|><|fim▁begin|>#![crate_name = "uu_cksum"] /* * This file is part of the uutils coreutils package. * * (c) Michael Gehring <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ #[macro_use] extern crate uucore; use std::fs::File; use std::io::{self, stdin, BufReader, Read}; #[cfg(not(windows))] use std::mem; use std::path::Path; include!(concat!(env!("OUT_DIR"), "/crc_table.rs")); static SYNTAX: &'static str = "[OPTIONS] [FILE]..."; static SUMMARY: &'static str = "Print CRC and size for each file";<|fim▁hole|>#[inline] fn crc_update(crc: u32, input: u8) -> u32 { (crc << 8) ^ CRC_TABLE[((crc >> 24) as usize ^ input as usize) & 0xFF] } #[inline] fn crc_final(mut crc: u32, mut length: usize) -> u32 { while length != 0 { crc = crc_update(crc, length as u8); length >>= 8; } !crc } #[cfg(windows)] fn init_byte_array() -> Vec<u8> { vec![0; 1024 * 1024] } #[cfg(not(windows))] fn init_byte_array() -> [u8; 1024 * 1024] { unsafe { mem::uninitialized() } } #[inline] fn cksum(fname: &str) -> io::Result<(u32, usize)> { let mut crc = 0u32; let mut size = 0usize; let file; let mut rd: Box<Read> = match fname { "-" => Box::new(stdin()), _ => { file = try!(File::open(&Path::new(fname))); Box::new(BufReader::new(file)) } }; let mut bytes = init_byte_array(); loop { match rd.read(&mut bytes) { Ok(num_bytes) => { if num_bytes == 0 { return Ok((crc_final(crc, size), size)); } for &b in bytes[..num_bytes].iter() { crc = crc_update(crc, b); } size += num_bytes; } Err(err) => return Err(err), } } //Ok((0 as u32,0 as usize)) } pub fn uumain(args: Vec<String>) -> i32 { let matches = new_coreopts!(SYNTAX, SUMMARY, LONG_HELP).parse(args); let files = matches.free; if files.is_empty() { match cksum("-") { Ok((crc, size)) => println!("{} {}", crc, size), Err(err) => { show_error!("{}", err); return 2; } } return 0; } let mut exit_code = 0; for fname in &files { match cksum(fname.as_ref()) { Ok((crc, size)) => println!("{} {} {}", crc, size, fname), Err(err) => { show_error!("'{}' {}", fname, err); exit_code = 2; } } } exit_code }<|fim▁end|>
static LONG_HELP: &'static str = "";
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>use alloc::boxed::Box; use collections::string::String; use core::fmt::{self, Debug, Display}; use core::marker::{Send, Sync, Reflect}; /// Base functionality for all errors in Rust. //#[stable(feature = "rust1", since = "1.0.0")] pub trait Error: Debug + Display + Reflect { /// A short description of the error. /// /// The description should not contain newlines or sentence-ending /// punctuation, to facilitate embedding in larger user-facing /// strings. //#[stable(feature = "rust1", since = "1.0.0")] fn description(&self) -> &str;<|fim▁hole|> /// The lower-level cause of this error, if any. //#[stable(feature = "rust1", since = "1.0.0")] fn cause(&self) -> Option<&Error> { None } } //#[stable(feature = "rust1", since = "1.0.0")] impl<'a, E: Error + 'a> From<E> for Box<Error + 'a> { fn from(err: E) -> Box<Error + 'a> { Box::new(err) } } //#[stable(feature = "rust1", since = "1.0.0")] impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<Error + Send + Sync + 'a> { fn from(err: E) -> Box<Error + Send + Sync + 'a> { Box::new(err) } } //#[stable(feature = "rust1", since = "1.0.0")] impl From<String> for Box<Error + Send + Sync> { fn from(err: String) -> Box<Error + Send + Sync> { #[derive(Debug)] struct StringError(String); impl Error for StringError { fn description(&self) -> &str { &self.0 } } impl Display for StringError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt(&self.0, f) } } Box::new(StringError(err)) } } //#[stable(feature = "string_box_error", since = "1.7.0")] impl From<String> for Box<Error> { fn from(str_err: String) -> Box<Error> { let err1: Box<Error + Send + Sync> = From::from(str_err); let err2: Box<Error> = err1; err2 } } //#[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b> From<&'b str> for Box<Error + Send + Sync + 'a> { fn from(err: &'b str) -> Box<Error + Send + Sync + 'a> { From::from(String::from(err)) } } //#[stable(feature = "string_box_error", since = "1.7.0")] impl<'a> From<&'a str> for Box<Error> { fn from(err: &'a str) -> Box<Error> { From::from(String::from(err)) } }<|fim▁end|>
<|file_name|>text.ts<|end_file_name|><|fim▁begin|>export function escapeText(text: string) { // http://www.javascriptkit.com/jsref/escapesequence.shtml // \b Backspace. // \f Form feed. // \n Newline. // \O Nul character. // \r Carriage return. // \t Horizontal tab. // \v Vertical tab. // \' Single quote or apostrophe. // \" Double quote. // \\ Backslash. // \ddd The Latin-1 character specified by the three octal digits between 0 and 377. // ie, copyright symbol is \251. // \xdd The Latin-1 character specified by the two hexadecimal digits dd between 00 and FF. // ie, copyright symbol is \xA9. // \udddd The Unicode character specified by the four hexadecimal digits dddd. // ie, copyright symbol is \u00A9. let _backspace = '\b'.charCodeAt(0); let _formFeed = '\f'.charCodeAt(0); let _newLine = '\n'.charCodeAt(0); let _nullChar = 0; let _carriageReturn = '\r'.charCodeAt(0); let _tab = '\t'.charCodeAt(0); let _verticalTab = '\v'.charCodeAt(0); let _backslash = '\\'.charCodeAt(0); let _doubleQuote = '"'.charCodeAt(0); let startPos = 0, chrCode, replaceWith = null, resultPieces = []; let len: number = text.length;<|fim▁hole|> for (let i = 0; i < len; i++) { chrCode = text.charCodeAt(i); switch (chrCode) { case _backspace: replaceWith = '\\b'; break; case _formFeed: replaceWith = '\\f'; break; case _newLine: replaceWith = '\\n'; break; case _nullChar: replaceWith = '\\0'; break; case _carriageReturn: replaceWith = '\\r'; break; case _tab: replaceWith = '\\t'; break; case _verticalTab: replaceWith = '\\v'; break; case _backslash: replaceWith = '\\\\'; break; case _doubleQuote: replaceWith = '\\"'; break; } if (replaceWith !== null) { resultPieces.push(text.substring(startPos, i)); resultPieces.push(replaceWith); startPos = i + 1; replaceWith = null; } } resultPieces.push(text.substring(startPos, len)); return resultPieces.join(''); }<|fim▁end|>
<|file_name|>StructureScopedBinder.ts<|end_file_name|><|fim▁begin|>import {ScopedStructure} from "./../../../../structures"; import {Scope} from "./../../../../definitions"; import {ScopedBinder} from "./../../../base"; export class StructureScopedBinder extends ScopedBinder { constructor(private readonly structure: ScopedStructure) { super(); } <|fim▁hole|> getScope() { return this.structure.scope || Scope.Public; } }<|fim▁end|>
<|file_name|>Target.py<|end_file_name|><|fim▁begin|>''' Kurgan AI Web Application Security Analyzer. http://www.kurgan.com.br/ Author: Glaudson Ocampos - <[email protected]> Created in May, 11th 2016. ''' import requests import config import sys import warnings import validators sys.path.append('../') class Target(object): host = None method = '' headers = {} uri = '' webserver = '' scheme = '' port = '' path = '' url = '' baseUrl = '' def __init__(self): method = 'GET' headers = {'User-Agent':'Kurgan-AI/0.0.1'} uri = '/' scheme = 'http' def set_host(self, val): self.host = val def get_host(self): return self.host def set_port(self, val): self.port = val def get_port(self): return self.port def set_method(self, val): self.method = val def get_method(self): return self.method def set_headers(self,val): self.headers = val def get_headers(self): return self.headers def set_webserver(self, val): self.webserver = val def get_webserver(self): return self.webserver def set_scheme(self, val): self.scheme = val def get_scheme(self): return self.scheme def set_path(self,val): self.path = val def get_path(self): return self.path def set_url(self, val): self.url = val def get_url(self): return self.url def set_baseUrl(self, val): self.baseUrl = val def get_baseUrl(self):<|fim▁hole|> def send_request(self): warnings.filterwarnings('ignore') url = self.scheme+'://'+self.host+':'+str(self.port)+"/"+str(self.path) if config.FOLLOW_URL is True: r = requests.get(url, allow_redirects=True, verify=False) else: r = requests.get(url, allow_redirects=False, verify=False) return r def send_request_head(self, v_url): warnings.filterwarnings('ignore') if config.FOLLOW_URL is True: r = requests.request('HEAD',v_url, allow_redirects=True, verify=False) else: r = requests.request('HEAD',v_url, allow_redirects=False, verify=False) return r def get_options(self): url = self.scheme+'://'+self.host+':'+str(self.port)+"/"+str(self.path) r = requests.options(url) if r.status_code == 200: if 'allow' in r.headers: return r.headers['allow'] else: return None<|fim▁end|>
return self.baseUrl
<|file_name|>test_Sexp.py<|end_file_name|><|fim▁begin|>import unittest import copy import gc import rpy2.rinterface as rinterface rinterface.initr()<|fim▁hole|> x = "a" self.assertRaises(ValueError, rinterface.Sexp, x) def testNew(self): sexp = rinterface.baseenv.get("letters") sexp_new = rinterface.Sexp(sexp) idem = rinterface.baseenv.get("identical") self.assertTrue(idem(sexp, sexp_new)[0]) sexp_new2 = rinterface.Sexp(sexp) self.assertTrue(idem(sexp, sexp_new2)[0]) del(sexp) self.assertTrue(idem(sexp_new, sexp_new2)[0]) def testTypeof_get(self): sexp = rinterface.baseenv.get("letters") self.assertEquals(sexp.typeof, rinterface.STRSXP) sexp = rinterface.baseenv.get("pi") self.assertEquals(sexp.typeof, rinterface.REALSXP) sexp = rinterface.baseenv.get("plot") self.assertEquals(sexp.typeof, rinterface.CLOSXP) def testDo_slot(self): data_func = rinterface.baseenv.get("data") data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP)) sexp = rinterface.globalenv.get("iris") names = sexp.do_slot("names") iris_names = ("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species") self.assertEquals(len(iris_names), len(names)) for i, n in enumerate(iris_names): self.assertEquals(iris_names[i], names[i]) self.assertRaises(LookupError, sexp.do_slot, "foo") def testDo_slot_assign(self): data_func = rinterface.baseenv.get("data") data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP)) sexp = rinterface.globalenv.get("iris") iris_names = rinterface.StrSexpVector(['a', 'b', 'c', 'd', 'e']) sexp.do_slot_assign("names", iris_names) names = [x for x in sexp.do_slot("names")] self.assertEquals(['a', 'b', 'c', 'd', 'e'], names) def testDo_slot_assign_create(self): #test that assigning slots is also creating the slot x = rinterface.IntSexpVector([1,2,3]) x.do_slot_assign("foo", rinterface.StrSexpVector(["bar", ])) slot = x.do_slot("foo") self.assertEquals(1, len(slot)) self.assertEquals("bar", slot[0]) def testSexp_rsame_true(self): sexp_a = rinterface.baseenv.get("letters") sexp_b = rinterface.baseenv.get("letters") self.assertTrue(sexp_a.rsame(sexp_b)) def testSexp_rsame_false(self): sexp_a = rinterface.baseenv.get("letters") sexp_b = rinterface.baseenv.get("pi") self.assertFalse(sexp_a.rsame(sexp_b)) def testSexp_rsame_wrongType(self): sexp_a = rinterface.baseenv.get("letters") self.assertRaises(ValueError, sexp_a.rsame, 'foo') def testSexp_sexp(self): sexp = rinterface.IntSexpVector([1,2,3]) cobj = sexp.__sexp__ sexp = rinterface.IntSexpVector([4,5,6,7]) self.assertEquals(4, len(sexp)) sexp.__sexp__ = cobj self.assertEquals(3, len(sexp)) def testSexp_sexp_wrongtypeof(self): sexp = rinterface.IntSexpVector([1,2,3]) cobj = sexp.__sexp__ sexp = rinterface.StrSexpVector(['a', 'b']) self.assertEquals(2, len(sexp)) self.assertRaises(ValueError, sexp.__setattr__, '__sexp__', cobj) def testSexp_sexp_destroyCobj(self): sexp = rinterface.IntSexpVector([1,2,3]) cobj = sexp.__sexp__ del(cobj) gc.collect() # no real test, just make sure that it does # not cause a segfault def testSexp_deepcopy(self): sexp = rinterface.IntSexpVector([1,2,3]) self.assertEquals(0, sexp.named) rinterface.baseenv.get("identity")(sexp) self.assertEquals(2, sexp.named) sexp2 = sexp.__deepcopy__() self.assertEquals(sexp.typeof, sexp2.typeof) self.assertEquals(list(sexp), list(sexp2)) self.assertFalse(sexp.rsame(sexp2)) self.assertEquals(0, sexp2.named) # should be the same as above, but just in case: sexp3 = copy.deepcopy(sexp) self.assertEquals(sexp.typeof, sexp3.typeof) self.assertEquals(list(sexp), list(sexp3)) self.assertFalse(sexp.rsame(sexp3)) self.assertEquals(0, sexp3.named) def suite(): suite = unittest.TestLoader().loadTestsFromTestCase(SexpTestCase) return suite if __name__ == '__main__': tr = unittest.TextTestRunner(verbosity = 2) tr.run(suite())<|fim▁end|>
class SexpTestCase(unittest.TestCase): def testNew_invalid(self):
<|file_name|>vision-gen.go<|end_file_name|><|fim▁begin|>// Package vision provides access to the Cloud Vision API. // // See https://cloud.google.com/vision/ // // Usage example: // // import "google.golang.org/api/vision/v1" // ... // visionService, err := vision.New(oauthHttpClient) package vision // import "google.golang.org/api/vision/v1" import ( "bytes" "encoding/json" "errors" "fmt" context "golang.org/x/net/context" ctxhttp "golang.org/x/net/context/ctxhttp" gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = ctxhttp.Do const apiId = "vision:v1" const apiName = "vision" const apiVersion = "v1" const basePath = "https://vision.googleapis.com/" // OAuth2 scopes used by this API. const ( // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // Apply machine learning models to understand and label images CloudVisionScope = "https://www.googleapis.com/auth/cloud-vision" ) func New(client *http.Client) (*Service, error) {<|fim▁hole|> s.Files = NewFilesService(s) s.Images = NewImagesService(s) s.Locations = NewLocationsService(s) s.Operations = NewOperationsService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Files *FilesService Images *ImagesService Locations *LocationsService Operations *OperationsService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewFilesService(s *Service) *FilesService { rs := &FilesService{s: s} return rs } type FilesService struct { s *Service } func NewImagesService(s *Service) *ImagesService { rs := &ImagesService{s: s} return rs } type ImagesService struct { s *Service } func NewLocationsService(s *Service) *LocationsService { rs := &LocationsService{s: s} rs.Operations = NewLocationsOperationsService(s) return rs } type LocationsService struct { s *Service Operations *LocationsOperationsService } func NewLocationsOperationsService(s *Service) *LocationsOperationsService { rs := &LocationsOperationsService{s: s} return rs } type LocationsOperationsService struct { s *Service } func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs } type OperationsService struct { s *Service } // AnnotateFileResponse: Response to a single file annotation request. A // file may contain one or more // images, which individually have their own responses. type AnnotateFileResponse struct { // InputConfig: Information about the file for which this response is // generated. InputConfig *InputConfig `json:"inputConfig,omitempty"` // Responses: Individual responses to images found within the file. Responses []*AnnotateImageResponse `json:"responses,omitempty"` // ForceSendFields is a list of field names (e.g. "InputConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "InputConfig") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AnnotateFileResponse) MarshalJSON() ([]byte, error) { type NoMethod AnnotateFileResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AnnotateImageRequest: Request for performing Google Cloud Vision API // tasks over a user-provided // image, with user-requested features. type AnnotateImageRequest struct { // Features: Requested features. Features []*Feature `json:"features,omitempty"` // Image: The image to be processed. Image *Image `json:"image,omitempty"` // ImageContext: Additional context that may accompany the image. ImageContext *ImageContext `json:"imageContext,omitempty"` // ForceSendFields is a list of field names (e.g. "Features") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Features") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AnnotateImageRequest) MarshalJSON() ([]byte, error) { type NoMethod AnnotateImageRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AnnotateImageResponse: Response to an image annotation request. type AnnotateImageResponse struct { // Context: If present, contextual information is needed to understand // where this image // comes from. Context *ImageAnnotationContext `json:"context,omitempty"` // CropHintsAnnotation: If present, crop hints have completed // successfully. CropHintsAnnotation *CropHintsAnnotation `json:"cropHintsAnnotation,omitempty"` // Error: If set, represents the error message for the operation. // Note that filled-in image annotations are guaranteed to be // correct, even when `error` is set. Error *Status `json:"error,omitempty"` // FaceAnnotations: If present, face detection has completed // successfully. FaceAnnotations []*FaceAnnotation `json:"faceAnnotations,omitempty"` // FullTextAnnotation: If present, text (OCR) detection or document // (OCR) text detection has // completed successfully. // This annotation provides the structural hierarchy for the OCR // detected // text. FullTextAnnotation *TextAnnotation `json:"fullTextAnnotation,omitempty"` // ImagePropertiesAnnotation: If present, image properties were // extracted successfully. ImagePropertiesAnnotation *ImageProperties `json:"imagePropertiesAnnotation,omitempty"` // LabelAnnotations: If present, label detection has completed // successfully. LabelAnnotations []*EntityAnnotation `json:"labelAnnotations,omitempty"` // LandmarkAnnotations: If present, landmark detection has completed // successfully. LandmarkAnnotations []*EntityAnnotation `json:"landmarkAnnotations,omitempty"` // LogoAnnotations: If present, logo detection has completed // successfully. LogoAnnotations []*EntityAnnotation `json:"logoAnnotations,omitempty"` // SafeSearchAnnotation: If present, safe-search annotation has // completed successfully. SafeSearchAnnotation *SafeSearchAnnotation `json:"safeSearchAnnotation,omitempty"` // TextAnnotations: If present, text (OCR) detection has completed // successfully. TextAnnotations []*EntityAnnotation `json:"textAnnotations,omitempty"` // WebDetection: If present, web detection has completed successfully. WebDetection *WebDetection `json:"webDetection,omitempty"` // ForceSendFields is a list of field names (e.g. "Context") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Context") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AnnotateImageResponse) MarshalJSON() ([]byte, error) { type NoMethod AnnotateImageResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AsyncAnnotateFileRequest: An offline file annotation request. type AsyncAnnotateFileRequest struct { // Features: Required. Requested features. Features []*Feature `json:"features,omitempty"` // ImageContext: Additional context that may accompany the image(s) in // the file. ImageContext *ImageContext `json:"imageContext,omitempty"` // InputConfig: Required. Information about the input file. InputConfig *InputConfig `json:"inputConfig,omitempty"` // OutputConfig: Required. The desired output location and metadata // (e.g. format). OutputConfig *OutputConfig `json:"outputConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "Features") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Features") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AsyncAnnotateFileRequest) MarshalJSON() ([]byte, error) { type NoMethod AsyncAnnotateFileRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AsyncAnnotateFileResponse: The response for a single offline file // annotation request. type AsyncAnnotateFileResponse struct { // OutputConfig: The output location and metadata from // AsyncAnnotateFileRequest. OutputConfig *OutputConfig `json:"outputConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "OutputConfig") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AsyncAnnotateFileResponse) MarshalJSON() ([]byte, error) { type NoMethod AsyncAnnotateFileResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AsyncBatchAnnotateFilesRequest: Multiple async file annotation // requests are batched into a single service // call. type AsyncBatchAnnotateFilesRequest struct { // Requests: Individual async file annotation requests for this batch. Requests []*AsyncAnnotateFileRequest `json:"requests,omitempty"` // ForceSendFields is a list of field names (e.g. "Requests") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Requests") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AsyncBatchAnnotateFilesRequest) MarshalJSON() ([]byte, error) { type NoMethod AsyncBatchAnnotateFilesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AsyncBatchAnnotateFilesResponse: Response to an async batch file // annotation request. type AsyncBatchAnnotateFilesResponse struct { // Responses: The list of file annotation responses, one for each // request in // AsyncBatchAnnotateFilesRequest. Responses []*AsyncAnnotateFileResponse `json:"responses,omitempty"` // ForceSendFields is a list of field names (e.g. "Responses") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Responses") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AsyncBatchAnnotateFilesResponse) MarshalJSON() ([]byte, error) { type NoMethod AsyncBatchAnnotateFilesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BatchAnnotateImagesRequest: Multiple image annotation requests are // batched into a single service call. type BatchAnnotateImagesRequest struct { // Requests: Individual image annotation requests for this batch. Requests []*AnnotateImageRequest `json:"requests,omitempty"` // ForceSendFields is a list of field names (e.g. "Requests") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Requests") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BatchAnnotateImagesRequest) MarshalJSON() ([]byte, error) { type NoMethod BatchAnnotateImagesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BatchAnnotateImagesResponse: Response to a batch image annotation // request. type BatchAnnotateImagesResponse struct { // Responses: Individual responses to image annotation requests within // the batch. Responses []*AnnotateImageResponse `json:"responses,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Responses") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Responses") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *BatchAnnotateImagesResponse) MarshalJSON() ([]byte, error) { type NoMethod BatchAnnotateImagesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Block: Logical element on the page. type Block struct { // BlockType: Detected block type (text, image etc) for this block. // // Possible values: // "UNKNOWN" - Unknown block type. // "TEXT" - Regular text block. // "TABLE" - Table block. // "PICTURE" - Image block. // "RULER" - Horizontal/vertical line box. // "BARCODE" - Barcode block. BlockType string `json:"blockType,omitempty"` // BoundingBox: The bounding box for the block. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // // * when the text is horizontal it might look like: // // 0----1 // | | // 3----2 // // * when it's rotated 180 degrees around the top-left corner it // becomes: // // 2----3 // | | // 1----0 // // and the vertice order will still be (0, 1, 2, 3). BoundingBox *BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results on the block. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Paragraphs: List of paragraphs in this block (if this blocks is of // type text). Paragraphs []*Paragraph `json:"paragraphs,omitempty"` // Property: Additional information detected for the block. Property *TextProperty `json:"property,omitempty"` // ForceSendFields is a list of field names (e.g. "BlockType") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BlockType") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Block) MarshalJSON() ([]byte, error) { type NoMethod Block raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Block) UnmarshalJSON(data []byte) error { type NoMethod Block var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // BoundingPoly: A bounding polygon for the detected image annotation. type BoundingPoly struct { // NormalizedVertices: The bounding polygon normalized vertices. NormalizedVertices []*NormalizedVertex `json:"normalizedVertices,omitempty"` // Vertices: The bounding polygon vertices. Vertices []*Vertex `json:"vertices,omitempty"` // ForceSendFields is a list of field names (e.g. "NormalizedVertices") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NormalizedVertices") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *BoundingPoly) MarshalJSON() ([]byte, error) { type NoMethod BoundingPoly raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for // Operations.CancelOperation. type CancelOperationRequest struct { } // Color: Represents a color in the RGBA color space. This // representation is designed // for simplicity of conversion to/from color representations in // various // languages over compactness; for example, the fields of this // representation // can be trivially provided to the constructor of "java.awt.Color" in // Java; it // can also be trivially provided to UIColor's // "+colorWithRed:green:blue:alpha" // method in iOS; and, with just a little work, it can be easily // formatted into // a CSS "rgba()" string in JavaScript, as well. Here are some // examples: // // Example (Java): // // import com.google.type.Color; // // // ... // public static java.awt.Color fromProto(Color protocolor) { // float alpha = protocolor.hasAlpha() // ? protocolor.getAlpha().getValue() // : 1.0; // // return new java.awt.Color( // protocolor.getRed(), // protocolor.getGreen(), // protocolor.getBlue(), // alpha); // } // // public static Color toProto(java.awt.Color color) { // float red = (float) color.getRed(); // float green = (float) color.getGreen(); // float blue = (float) color.getBlue(); // float denominator = 255.0; // Color.Builder resultBuilder = // Color // .newBuilder() // .setRed(red / denominator) // .setGreen(green / denominator) // .setBlue(blue / denominator); // int alpha = color.getAlpha(); // if (alpha != 255) { // result.setAlpha( // FloatValue // .newBuilder() // .setValue(((float) alpha) / denominator) // .build()); // } // return resultBuilder.build(); // } // // ... // // Example (iOS / Obj-C): // // // ... // static UIColor* fromProto(Color* protocolor) { // float red = [protocolor red]; // float green = [protocolor green]; // float blue = [protocolor blue]; // FloatValue* alpha_wrapper = [protocolor alpha]; // float alpha = 1.0; // if (alpha_wrapper != nil) { // alpha = [alpha_wrapper value]; // } // return [UIColor colorWithRed:red green:green blue:blue // alpha:alpha]; // } // // static Color* toProto(UIColor* color) { // CGFloat red, green, blue, alpha; // if (![color getRed:&red green:&green blue:&blue // alpha:&alpha]) { // return nil; // } // Color* result = [Color alloc] init]; // [result setRed:red]; // [result setGreen:green]; // [result setBlue:blue]; // if (alpha <= 0.9999) { // [result setAlpha:floatWrapperWithValue(alpha)]; // } // [result autorelease]; // return result; // } // // ... // // Example (JavaScript): // // // ... // // var protoToCssColor = function(rgb_color) { // var redFrac = rgb_color.red || 0.0; // var greenFrac = rgb_color.green || 0.0; // var blueFrac = rgb_color.blue || 0.0; // var red = Math.floor(redFrac * 255); // var green = Math.floor(greenFrac * 255); // var blue = Math.floor(blueFrac * 255); // // if (!('alpha' in rgb_color)) { // return rgbToCssColor_(red, green, blue); // } // // var alphaFrac = rgb_color.alpha.value || 0.0; // var rgbParams = [red, green, blue].join(','); // return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); // }; // // var rgbToCssColor_ = function(red, green, blue) { // var rgbNumber = new Number((red << 16) | (green << 8) | blue); // var hexString = rgbNumber.toString(16); // var missingZeros = 6 - hexString.length; // var resultBuilder = ['#']; // for (var i = 0; i < missingZeros; i++) { // resultBuilder.push('0'); // } // resultBuilder.push(hexString); // return resultBuilder.join(''); // }; // // // ... type Color struct { // Alpha: The fraction of this color that should be applied to the // pixel. That is, // the final pixel color is defined by the equation: // // pixel color = alpha * (this color) + (1.0 - alpha) * (background // color) // // This means that a value of 1.0 corresponds to a solid color, // whereas // a value of 0.0 corresponds to a completely transparent color. // This // uses a wrapper message rather than a simple float scalar so that it // is // possible to distinguish between a default value and the value being // unset. // If omitted, this color object is to be rendered as a solid color // (as if the alpha value had been explicitly given with a value of // 1.0). Alpha float64 `json:"alpha,omitempty"` // Blue: The amount of blue in the color as a value in the interval [0, // 1]. Blue float64 `json:"blue,omitempty"` // Green: The amount of green in the color as a value in the interval // [0, 1]. Green float64 `json:"green,omitempty"` // Red: The amount of red in the color as a value in the interval [0, // 1]. Red float64 `json:"red,omitempty"` // ForceSendFields is a list of field names (e.g. "Alpha") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Alpha") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Color) MarshalJSON() ([]byte, error) { type NoMethod Color raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Color) UnmarshalJSON(data []byte) error { type NoMethod Color var s1 struct { Alpha gensupport.JSONFloat64 `json:"alpha"` Blue gensupport.JSONFloat64 `json:"blue"` Green gensupport.JSONFloat64 `json:"green"` Red gensupport.JSONFloat64 `json:"red"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Alpha = float64(s1.Alpha) s.Blue = float64(s1.Blue) s.Green = float64(s1.Green) s.Red = float64(s1.Red) return nil } // ColorInfo: Color information consists of RGB channels, score, and the // fraction of // the image that the color occupies in the image. type ColorInfo struct { // Color: RGB components of the color. Color *Color `json:"color,omitempty"` // PixelFraction: The fraction of pixels the color occupies in the // image. // Value in range [0, 1]. PixelFraction float64 `json:"pixelFraction,omitempty"` // Score: Image-specific score for this color. Value in range [0, 1]. Score float64 `json:"score,omitempty"` // ForceSendFields is a list of field names (e.g. "Color") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Color") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ColorInfo) MarshalJSON() ([]byte, error) { type NoMethod ColorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *ColorInfo) UnmarshalJSON(data []byte) error { type NoMethod ColorInfo var s1 struct { PixelFraction gensupport.JSONFloat64 `json:"pixelFraction"` Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.PixelFraction = float64(s1.PixelFraction) s.Score = float64(s1.Score) return nil } // CropHint: Single crop hint that is used to generate a new crop when // serving an image. type CropHint struct { // BoundingPoly: The bounding polygon for the crop region. The // coordinates of the bounding // box are in the original image's scale, as returned in `ImageParams`. BoundingPoly *BoundingPoly `json:"boundingPoly,omitempty"` // Confidence: Confidence of this being a salient region. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // ImportanceFraction: Fraction of importance of this salient region // with respect to the original // image. ImportanceFraction float64 `json:"importanceFraction,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingPoly") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingPoly") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CropHint) MarshalJSON() ([]byte, error) { type NoMethod CropHint raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *CropHint) UnmarshalJSON(data []byte) error { type NoMethod CropHint var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` ImportanceFraction gensupport.JSONFloat64 `json:"importanceFraction"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) s.ImportanceFraction = float64(s1.ImportanceFraction) return nil } // CropHintsAnnotation: Set of crop hints that are used to generate new // crops when serving images. type CropHintsAnnotation struct { // CropHints: Crop hint results. CropHints []*CropHint `json:"cropHints,omitempty"` // ForceSendFields is a list of field names (e.g. "CropHints") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CropHints") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CropHintsAnnotation) MarshalJSON() ([]byte, error) { type NoMethod CropHintsAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CropHintsParams: Parameters for crop hints annotation request. type CropHintsParams struct { // AspectRatios: Aspect ratios in floats, representing the ratio of the // width to the height // of the image. For example, if the desired aspect ratio is 4/3, // the // corresponding float value should be 1.33333. If not specified, // the // best possible crop is returned. The number of provided aspect ratios // is // limited to a maximum of 16; any aspect ratios provided after the 16th // are // ignored. AspectRatios []float64 `json:"aspectRatios,omitempty"` // ForceSendFields is a list of field names (e.g. "AspectRatios") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AspectRatios") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CropHintsParams) MarshalJSON() ([]byte, error) { type NoMethod CropHintsParams raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DetectedBreak: Detected start or end of a structural component. type DetectedBreak struct { // IsPrefix: True if break prepends the element. IsPrefix bool `json:"isPrefix,omitempty"` // Type: Detected break type. // // Possible values: // "UNKNOWN" - Unknown break label type. // "SPACE" - Regular space. // "SURE_SPACE" - Sure space (very wide). // "EOL_SURE_SPACE" - Line-wrapping break. // "HYPHEN" - End-line hyphen that is not present in text; does not // co-occur with // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. // "LINE_BREAK" - Line break that ends a paragraph. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "IsPrefix") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "IsPrefix") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *DetectedBreak) MarshalJSON() ([]byte, error) { type NoMethod DetectedBreak raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DetectedLanguage: Detected language for a structural component. type DetectedLanguage struct { // Confidence: Confidence of detected language. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // LanguageCode: The BCP-47 language code, such as "en-US" or "sr-Latn". // For more // information, // see // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. LanguageCode string `json:"languageCode,omitempty"` // ForceSendFields is a list of field names (e.g. "Confidence") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Confidence") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *DetectedLanguage) MarshalJSON() ([]byte, error) { type NoMethod DetectedLanguage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *DetectedLanguage) UnmarshalJSON(data []byte) error { type NoMethod DetectedLanguage var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // DominantColorsAnnotation: Set of dominant colors and their // corresponding scores. type DominantColorsAnnotation struct { // Colors: RGB color values with their score and pixel fraction. Colors []*ColorInfo `json:"colors,omitempty"` // ForceSendFields is a list of field names (e.g. "Colors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Colors") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *DominantColorsAnnotation) MarshalJSON() ([]byte, error) { type NoMethod DominantColorsAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining // duplicated // empty messages in your APIs. A typical example is to use it as the // request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns // (google.protobuf.Empty); // } // // The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` } // EntityAnnotation: Set of detected entity features. type EntityAnnotation struct { // BoundingPoly: Image region to which this entity belongs. Not // produced // for `LABEL_DETECTION` features. BoundingPoly *BoundingPoly `json:"boundingPoly,omitempty"` // Confidence: **Deprecated. Use `score` instead.** // The accuracy of the entity detection in an image. // For example, for an image in which the "Eiffel Tower" entity is // detected, // this field represents the confidence that there is a tower in the // query // image. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Description: Entity textual description, expressed in its `locale` // language. Description string `json:"description,omitempty"` // Locale: The language code for the locale in which the entity // textual // `description` is expressed. Locale string `json:"locale,omitempty"` // Locations: The location information for the detected entity. // Multiple // `LocationInfo` elements can be present because one location // may // indicate the location of the scene in the image, and another // location // may indicate the location of the place where the image was // taken. // Location information is usually present for landmarks. Locations []*LocationInfo `json:"locations,omitempty"` // Mid: Opaque entity ID. Some IDs may be available in // [Google Knowledge Graph // Search // API](https://developers.google.com/knowledge-graph/). Mid string `json:"mid,omitempty"` // Properties: Some entities may have optional user-supplied `Property` // (name/value) // fields, such a score or string that qualifies the entity. Properties []*Property `json:"properties,omitempty"` // Score: Overall score of the result. Range [0, 1]. Score float64 `json:"score,omitempty"` // Topicality: The relevancy of the ICA (Image Content Annotation) label // to the // image. For example, the relevancy of "tower" is likely higher to an // image // containing the detected "Eiffel Tower" than to an image containing // a // detected distant towering building, even though the confidence // that // there is a tower in each image may be the same. Range [0, 1]. Topicality float64 `json:"topicality,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingPoly") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingPoly") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *EntityAnnotation) MarshalJSON() ([]byte, error) { type NoMethod EntityAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *EntityAnnotation) UnmarshalJSON(data []byte) error { type NoMethod EntityAnnotation var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` Score gensupport.JSONFloat64 `json:"score"` Topicality gensupport.JSONFloat64 `json:"topicality"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) s.Score = float64(s1.Score) s.Topicality = float64(s1.Topicality) return nil } // FaceAnnotation: A face annotation object contains the results of face // detection. type FaceAnnotation struct { // AngerLikelihood: Anger likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. AngerLikelihood string `json:"angerLikelihood,omitempty"` // BlurredLikelihood: Blurred likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. BlurredLikelihood string `json:"blurredLikelihood,omitempty"` // BoundingPoly: The bounding polygon around the face. The coordinates // of the bounding box // are in the original image's scale, as returned in `ImageParams`. // The bounding box is computed to "frame" the face in accordance with // human // expectations. It is based on the landmarker results. // Note that one or more x and/or y coordinates may not be generated in // the // `BoundingPoly` (the polygon will be unbounded) if only a partial // face // appears in the image to be annotated. BoundingPoly *BoundingPoly `json:"boundingPoly,omitempty"` // DetectionConfidence: Detection confidence. Range [0, 1]. DetectionConfidence float64 `json:"detectionConfidence,omitempty"` // FdBoundingPoly: The `fd_bounding_poly` bounding polygon is tighter // than the // `boundingPoly`, and encloses only the skin part of the face. // Typically, it // is used to eliminate the face from any image analysis that detects // the // "amount of skin" visible in an image. It is not based on // the // landmarker results, only on the initial face detection, hence // the <code>fd</code> (face detection) prefix. FdBoundingPoly *BoundingPoly `json:"fdBoundingPoly,omitempty"` // HeadwearLikelihood: Headwear likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. HeadwearLikelihood string `json:"headwearLikelihood,omitempty"` // JoyLikelihood: Joy likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. JoyLikelihood string `json:"joyLikelihood,omitempty"` // LandmarkingConfidence: Face landmarking confidence. Range [0, 1]. LandmarkingConfidence float64 `json:"landmarkingConfidence,omitempty"` // Landmarks: Detected face landmarks. Landmarks []*Landmark `json:"landmarks,omitempty"` // PanAngle: Yaw angle, which indicates the leftward/rightward angle // that the face is // pointing relative to the vertical plane perpendicular to the image. // Range // [-180,180]. PanAngle float64 `json:"panAngle,omitempty"` // RollAngle: Roll angle, which indicates the amount of // clockwise/anti-clockwise rotation // of the face relative to the image vertical about the axis // perpendicular to // the face. Range [-180,180]. RollAngle float64 `json:"rollAngle,omitempty"` // SorrowLikelihood: Sorrow likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. SorrowLikelihood string `json:"sorrowLikelihood,omitempty"` // SurpriseLikelihood: Surprise likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. SurpriseLikelihood string `json:"surpriseLikelihood,omitempty"` // TiltAngle: Pitch angle, which indicates the upwards/downwards angle // that the face is // pointing relative to the image's horizontal plane. Range [-180,180]. TiltAngle float64 `json:"tiltAngle,omitempty"` // UnderExposedLikelihood: Under-exposed likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. UnderExposedLikelihood string `json:"underExposedLikelihood,omitempty"` // ForceSendFields is a list of field names (e.g. "AngerLikelihood") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AngerLikelihood") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *FaceAnnotation) MarshalJSON() ([]byte, error) { type NoMethod FaceAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *FaceAnnotation) UnmarshalJSON(data []byte) error { type NoMethod FaceAnnotation var s1 struct { DetectionConfidence gensupport.JSONFloat64 `json:"detectionConfidence"` LandmarkingConfidence gensupport.JSONFloat64 `json:"landmarkingConfidence"` PanAngle gensupport.JSONFloat64 `json:"panAngle"` RollAngle gensupport.JSONFloat64 `json:"rollAngle"` TiltAngle gensupport.JSONFloat64 `json:"tiltAngle"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.DetectionConfidence = float64(s1.DetectionConfidence) s.LandmarkingConfidence = float64(s1.LandmarkingConfidence) s.PanAngle = float64(s1.PanAngle) s.RollAngle = float64(s1.RollAngle) s.TiltAngle = float64(s1.TiltAngle) return nil } // Feature: The type of Google Cloud Vision API detection to perform, // and the maximum // number of results to return for that type. Multiple `Feature` objects // can // be specified in the `features` list. type Feature struct { // MaxResults: Maximum number of results of this type. Does not apply // to // `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`. MaxResults int64 `json:"maxResults,omitempty"` // Model: Model to use for the feature. // Supported values: "builtin/stable" (the default if unset) // and // "builtin/latest". Model string `json:"model,omitempty"` // Type: The feature type. // // Possible values: // "TYPE_UNSPECIFIED" - Unspecified feature type. // "FACE_DETECTION" - Run face detection. // "LANDMARK_DETECTION" - Run landmark detection. // "LOGO_DETECTION" - Run logo detection. // "LABEL_DETECTION" - Run label detection. // "TEXT_DETECTION" - Run text detection / optical character // recognition (OCR). Text detection // is optimized for areas of text within a larger image; if the image // is // a document, use `DOCUMENT_TEXT_DETECTION` instead. // "DOCUMENT_TEXT_DETECTION" - Run dense text document OCR. Takes // precedence when both // `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present. // "SAFE_SEARCH_DETECTION" - Run Safe Search to detect potentially // unsafe // or undesirable content. // "IMAGE_PROPERTIES" - Compute a set of image properties, such as // the // image's dominant colors. // "CROP_HINTS" - Run crop hints. // "WEB_DETECTION" - Run web detection. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxResults") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "MaxResults") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Feature) MarshalJSON() ([]byte, error) { type NoMethod Feature raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GcsDestination: The Google Cloud Storage location where the output // will be written to. type GcsDestination struct { // Uri: Google Cloud Storage URI where the results will be stored. // Results will // be in JSON format and preceded by its corresponding input URI. This // field // can either represent a single file, or a prefix for multiple // outputs. // Prefixes must end in a `/`. // // Examples: // // * File: gs://bucket-name/filename.json // * Prefix: gs://bucket-name/prefix/here/ // * File: gs://bucket-name/prefix/here // // If multiple outputs, each response is still AnnotateFileResponse, // each of // which contains some subset of the full list of // AnnotateImageResponse. // Multiple outputs can happen if, for example, the output JSON is too // large // and overflows into multiple sharded files. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "Uri") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Uri") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GcsDestination) MarshalJSON() ([]byte, error) { type NoMethod GcsDestination raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GcsSource: The Google Cloud Storage location where the input will be // read from. type GcsSource struct { // Uri: Google Cloud Storage URI for the input file. This must only be // a // Google Cloud Storage object. Wildcards are not currently supported. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "Uri") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Uri") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GcsSource) MarshalJSON() ([]byte, error) { type NoMethod GcsSource raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1AnnotateFileResponse: Response to a single // file annotation request. A file may contain one or more // images, which individually have their own responses. type GoogleCloudVisionV1p2beta1AnnotateFileResponse struct { // InputConfig: Information about the file for which this response is // generated. InputConfig *GoogleCloudVisionV1p2beta1InputConfig `json:"inputConfig,omitempty"` // Responses: Individual responses to images found within the file. Responses []*GoogleCloudVisionV1p2beta1AnnotateImageResponse `json:"responses,omitempty"` // ForceSendFields is a list of field names (e.g. "InputConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "InputConfig") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1AnnotateFileResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1AnnotateFileResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1AnnotateImageResponse: Response to an image // annotation request. type GoogleCloudVisionV1p2beta1AnnotateImageResponse struct { // Context: If present, contextual information is needed to understand // where this image // comes from. Context *GoogleCloudVisionV1p2beta1ImageAnnotationContext `json:"context,omitempty"` // CropHintsAnnotation: If present, crop hints have completed // successfully. CropHintsAnnotation *GoogleCloudVisionV1p2beta1CropHintsAnnotation `json:"cropHintsAnnotation,omitempty"` // Error: If set, represents the error message for the operation. // Note that filled-in image annotations are guaranteed to be // correct, even when `error` is set. Error *Status `json:"error,omitempty"` // FaceAnnotations: If present, face detection has completed // successfully. FaceAnnotations []*GoogleCloudVisionV1p2beta1FaceAnnotation `json:"faceAnnotations,omitempty"` // FullTextAnnotation: If present, text (OCR) detection or document // (OCR) text detection has // completed successfully. // This annotation provides the structural hierarchy for the OCR // detected // text. FullTextAnnotation *GoogleCloudVisionV1p2beta1TextAnnotation `json:"fullTextAnnotation,omitempty"` // ImagePropertiesAnnotation: If present, image properties were // extracted successfully. ImagePropertiesAnnotation *GoogleCloudVisionV1p2beta1ImageProperties `json:"imagePropertiesAnnotation,omitempty"` // LabelAnnotations: If present, label detection has completed // successfully. LabelAnnotations []*GoogleCloudVisionV1p2beta1EntityAnnotation `json:"labelAnnotations,omitempty"` // LandmarkAnnotations: If present, landmark detection has completed // successfully. LandmarkAnnotations []*GoogleCloudVisionV1p2beta1EntityAnnotation `json:"landmarkAnnotations,omitempty"` // LogoAnnotations: If present, logo detection has completed // successfully. LogoAnnotations []*GoogleCloudVisionV1p2beta1EntityAnnotation `json:"logoAnnotations,omitempty"` // SafeSearchAnnotation: If present, safe-search annotation has // completed successfully. SafeSearchAnnotation *GoogleCloudVisionV1p2beta1SafeSearchAnnotation `json:"safeSearchAnnotation,omitempty"` // TextAnnotations: If present, text (OCR) detection has completed // successfully. TextAnnotations []*GoogleCloudVisionV1p2beta1EntityAnnotation `json:"textAnnotations,omitempty"` // WebDetection: If present, web detection has completed successfully. WebDetection *GoogleCloudVisionV1p2beta1WebDetection `json:"webDetection,omitempty"` // ForceSendFields is a list of field names (e.g. "Context") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Context") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1AnnotateImageResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1AnnotateImageResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse: The response for // a single offline file annotation request. type GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse struct { // OutputConfig: The output location and metadata from // AsyncAnnotateFileRequest. OutputConfig *GoogleCloudVisionV1p2beta1OutputConfig `json:"outputConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "OutputConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "OutputConfig") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1AsyncBatchAnnotateFilesResponse: Response // to an async batch file annotation request. type GoogleCloudVisionV1p2beta1AsyncBatchAnnotateFilesResponse struct { // Responses: The list of file annotation responses, one for each // request in // AsyncBatchAnnotateFilesRequest. Responses []*GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse `json:"responses,omitempty"` // ForceSendFields is a list of field names (e.g. "Responses") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Responses") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1AsyncBatchAnnotateFilesResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1AsyncBatchAnnotateFilesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1Block: Logical element on the page. type GoogleCloudVisionV1p2beta1Block struct { // BlockType: Detected block type (text, image etc) for this block. // // Possible values: // "UNKNOWN" - Unknown block type. // "TEXT" - Regular text block. // "TABLE" - Table block. // "PICTURE" - Image block. // "RULER" - Horizontal/vertical line box. // "BARCODE" - Barcode block. BlockType string `json:"blockType,omitempty"` // BoundingBox: The bounding box for the block. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // // * when the text is horizontal it might look like: // // 0----1 // | | // 3----2 // // * when it's rotated 180 degrees around the top-left corner it // becomes: // // 2----3 // | | // 1----0 // // and the vertice order will still be (0, 1, 2, 3). BoundingBox *GoogleCloudVisionV1p2beta1BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results on the block. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Paragraphs: List of paragraphs in this block (if this blocks is of // type text). Paragraphs []*GoogleCloudVisionV1p2beta1Paragraph `json:"paragraphs,omitempty"` // Property: Additional information detected for the block. Property *GoogleCloudVisionV1p2beta1TextAnnotationTextProperty `json:"property,omitempty"` // ForceSendFields is a list of field names (e.g. "BlockType") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BlockType") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Block) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Block raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1Block) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1Block var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // GoogleCloudVisionV1p2beta1BoundingPoly: A bounding polygon for the // detected image annotation. type GoogleCloudVisionV1p2beta1BoundingPoly struct { // NormalizedVertices: The bounding polygon normalized vertices. NormalizedVertices []*GoogleCloudVisionV1p2beta1NormalizedVertex `json:"normalizedVertices,omitempty"` // Vertices: The bounding polygon vertices. Vertices []*GoogleCloudVisionV1p2beta1Vertex `json:"vertices,omitempty"` // ForceSendFields is a list of field names (e.g. "NormalizedVertices") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NormalizedVertices") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1BoundingPoly) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1BoundingPoly raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1ColorInfo: Color information consists of // RGB channels, score, and the fraction of // the image that the color occupies in the image. type GoogleCloudVisionV1p2beta1ColorInfo struct { // Color: RGB components of the color. Color *Color `json:"color,omitempty"` // PixelFraction: The fraction of pixels the color occupies in the // image. // Value in range [0, 1]. PixelFraction float64 `json:"pixelFraction,omitempty"` // Score: Image-specific score for this color. Value in range [0, 1]. Score float64 `json:"score,omitempty"` // ForceSendFields is a list of field names (e.g. "Color") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Color") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1ColorInfo) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1ColorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1ColorInfo) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1ColorInfo var s1 struct { PixelFraction gensupport.JSONFloat64 `json:"pixelFraction"` Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.PixelFraction = float64(s1.PixelFraction) s.Score = float64(s1.Score) return nil } // GoogleCloudVisionV1p2beta1CropHint: Single crop hint that is used to // generate a new crop when serving an image. type GoogleCloudVisionV1p2beta1CropHint struct { // BoundingPoly: The bounding polygon for the crop region. The // coordinates of the bounding // box are in the original image's scale, as returned in `ImageParams`. BoundingPoly *GoogleCloudVisionV1p2beta1BoundingPoly `json:"boundingPoly,omitempty"` // Confidence: Confidence of this being a salient region. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // ImportanceFraction: Fraction of importance of this salient region // with respect to the original // image. ImportanceFraction float64 `json:"importanceFraction,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingPoly") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingPoly") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1CropHint) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1CropHint raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1CropHint) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1CropHint var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` ImportanceFraction gensupport.JSONFloat64 `json:"importanceFraction"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) s.ImportanceFraction = float64(s1.ImportanceFraction) return nil } // GoogleCloudVisionV1p2beta1CropHintsAnnotation: Set of crop hints that // are used to generate new crops when serving images. type GoogleCloudVisionV1p2beta1CropHintsAnnotation struct { // CropHints: Crop hint results. CropHints []*GoogleCloudVisionV1p2beta1CropHint `json:"cropHints,omitempty"` // ForceSendFields is a list of field names (e.g. "CropHints") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CropHints") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1CropHintsAnnotation) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1CropHintsAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1DominantColorsAnnotation: Set of dominant // colors and their corresponding scores. type GoogleCloudVisionV1p2beta1DominantColorsAnnotation struct { // Colors: RGB color values with their score and pixel fraction. Colors []*GoogleCloudVisionV1p2beta1ColorInfo `json:"colors,omitempty"` // ForceSendFields is a list of field names (e.g. "Colors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Colors") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1DominantColorsAnnotation) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1DominantColorsAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1EntityAnnotation: Set of detected entity // features. type GoogleCloudVisionV1p2beta1EntityAnnotation struct { // BoundingPoly: Image region to which this entity belongs. Not // produced // for `LABEL_DETECTION` features. BoundingPoly *GoogleCloudVisionV1p2beta1BoundingPoly `json:"boundingPoly,omitempty"` // Confidence: **Deprecated. Use `score` instead.** // The accuracy of the entity detection in an image. // For example, for an image in which the "Eiffel Tower" entity is // detected, // this field represents the confidence that there is a tower in the // query // image. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Description: Entity textual description, expressed in its `locale` // language. Description string `json:"description,omitempty"` // Locale: The language code for the locale in which the entity // textual // `description` is expressed. Locale string `json:"locale,omitempty"` // Locations: The location information for the detected entity. // Multiple // `LocationInfo` elements can be present because one location // may // indicate the location of the scene in the image, and another // location // may indicate the location of the place where the image was // taken. // Location information is usually present for landmarks. Locations []*GoogleCloudVisionV1p2beta1LocationInfo `json:"locations,omitempty"` // Mid: Opaque entity ID. Some IDs may be available in // [Google Knowledge Graph // Search // API](https://developers.google.com/knowledge-graph/). Mid string `json:"mid,omitempty"` // Properties: Some entities may have optional user-supplied `Property` // (name/value) // fields, such a score or string that qualifies the entity. Properties []*GoogleCloudVisionV1p2beta1Property `json:"properties,omitempty"` // Score: Overall score of the result. Range [0, 1]. Score float64 `json:"score,omitempty"` // Topicality: The relevancy of the ICA (Image Content Annotation) label // to the // image. For example, the relevancy of "tower" is likely higher to an // image // containing the detected "Eiffel Tower" than to an image containing // a // detected distant towering building, even though the confidence // that // there is a tower in each image may be the same. Range [0, 1]. Topicality float64 `json:"topicality,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingPoly") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingPoly") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1EntityAnnotation) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1EntityAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1EntityAnnotation) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1EntityAnnotation var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` Score gensupport.JSONFloat64 `json:"score"` Topicality gensupport.JSONFloat64 `json:"topicality"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) s.Score = float64(s1.Score) s.Topicality = float64(s1.Topicality) return nil } // GoogleCloudVisionV1p2beta1FaceAnnotation: A face annotation object // contains the results of face detection. type GoogleCloudVisionV1p2beta1FaceAnnotation struct { // AngerLikelihood: Anger likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. AngerLikelihood string `json:"angerLikelihood,omitempty"` // BlurredLikelihood: Blurred likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. BlurredLikelihood string `json:"blurredLikelihood,omitempty"` // BoundingPoly: The bounding polygon around the face. The coordinates // of the bounding box // are in the original image's scale, as returned in `ImageParams`. // The bounding box is computed to "frame" the face in accordance with // human // expectations. It is based on the landmarker results. // Note that one or more x and/or y coordinates may not be generated in // the // `BoundingPoly` (the polygon will be unbounded) if only a partial // face // appears in the image to be annotated. BoundingPoly *GoogleCloudVisionV1p2beta1BoundingPoly `json:"boundingPoly,omitempty"` // DetectionConfidence: Detection confidence. Range [0, 1]. DetectionConfidence float64 `json:"detectionConfidence,omitempty"` // FdBoundingPoly: The `fd_bounding_poly` bounding polygon is tighter // than the // `boundingPoly`, and encloses only the skin part of the face. // Typically, it // is used to eliminate the face from any image analysis that detects // the // "amount of skin" visible in an image. It is not based on // the // landmarker results, only on the initial face detection, hence // the <code>fd</code> (face detection) prefix. FdBoundingPoly *GoogleCloudVisionV1p2beta1BoundingPoly `json:"fdBoundingPoly,omitempty"` // HeadwearLikelihood: Headwear likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. HeadwearLikelihood string `json:"headwearLikelihood,omitempty"` // JoyLikelihood: Joy likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. JoyLikelihood string `json:"joyLikelihood,omitempty"` // LandmarkingConfidence: Face landmarking confidence. Range [0, 1]. LandmarkingConfidence float64 `json:"landmarkingConfidence,omitempty"` // Landmarks: Detected face landmarks. Landmarks []*GoogleCloudVisionV1p2beta1FaceAnnotationLandmark `json:"landmarks,omitempty"` // PanAngle: Yaw angle, which indicates the leftward/rightward angle // that the face is // pointing relative to the vertical plane perpendicular to the image. // Range // [-180,180]. PanAngle float64 `json:"panAngle,omitempty"` // RollAngle: Roll angle, which indicates the amount of // clockwise/anti-clockwise rotation // of the face relative to the image vertical about the axis // perpendicular to // the face. Range [-180,180]. RollAngle float64 `json:"rollAngle,omitempty"` // SorrowLikelihood: Sorrow likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. SorrowLikelihood string `json:"sorrowLikelihood,omitempty"` // SurpriseLikelihood: Surprise likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. SurpriseLikelihood string `json:"surpriseLikelihood,omitempty"` // TiltAngle: Pitch angle, which indicates the upwards/downwards angle // that the face is // pointing relative to the image's horizontal plane. Range [-180,180]. TiltAngle float64 `json:"tiltAngle,omitempty"` // UnderExposedLikelihood: Under-exposed likelihood. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. UnderExposedLikelihood string `json:"underExposedLikelihood,omitempty"` // ForceSendFields is a list of field names (e.g. "AngerLikelihood") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AngerLikelihood") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1FaceAnnotation) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1FaceAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1FaceAnnotation) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1FaceAnnotation var s1 struct { DetectionConfidence gensupport.JSONFloat64 `json:"detectionConfidence"` LandmarkingConfidence gensupport.JSONFloat64 `json:"landmarkingConfidence"` PanAngle gensupport.JSONFloat64 `json:"panAngle"` RollAngle gensupport.JSONFloat64 `json:"rollAngle"` TiltAngle gensupport.JSONFloat64 `json:"tiltAngle"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.DetectionConfidence = float64(s1.DetectionConfidence) s.LandmarkingConfidence = float64(s1.LandmarkingConfidence) s.PanAngle = float64(s1.PanAngle) s.RollAngle = float64(s1.RollAngle) s.TiltAngle = float64(s1.TiltAngle) return nil } // GoogleCloudVisionV1p2beta1FaceAnnotationLandmark: A face-specific // landmark (for example, a face feature). type GoogleCloudVisionV1p2beta1FaceAnnotationLandmark struct { // Position: Face landmark position. Position *GoogleCloudVisionV1p2beta1Position `json:"position,omitempty"` // Type: Face landmark type. // // Possible values: // "UNKNOWN_LANDMARK" - Unknown face landmark detected. Should not be // filled. // "LEFT_EYE" - Left eye. // "RIGHT_EYE" - Right eye. // "LEFT_OF_LEFT_EYEBROW" - Left of left eyebrow. // "RIGHT_OF_LEFT_EYEBROW" - Right of left eyebrow. // "LEFT_OF_RIGHT_EYEBROW" - Left of right eyebrow. // "RIGHT_OF_RIGHT_EYEBROW" - Right of right eyebrow. // "MIDPOINT_BETWEEN_EYES" - Midpoint between eyes. // "NOSE_TIP" - Nose tip. // "UPPER_LIP" - Upper lip. // "LOWER_LIP" - Lower lip. // "MOUTH_LEFT" - Mouth left. // "MOUTH_RIGHT" - Mouth right. // "MOUTH_CENTER" - Mouth center. // "NOSE_BOTTOM_RIGHT" - Nose, bottom right. // "NOSE_BOTTOM_LEFT" - Nose, bottom left. // "NOSE_BOTTOM_CENTER" - Nose, bottom center. // "LEFT_EYE_TOP_BOUNDARY" - Left eye, top boundary. // "LEFT_EYE_RIGHT_CORNER" - Left eye, right corner. // "LEFT_EYE_BOTTOM_BOUNDARY" - Left eye, bottom boundary. // "LEFT_EYE_LEFT_CORNER" - Left eye, left corner. // "RIGHT_EYE_TOP_BOUNDARY" - Right eye, top boundary. // "RIGHT_EYE_RIGHT_CORNER" - Right eye, right corner. // "RIGHT_EYE_BOTTOM_BOUNDARY" - Right eye, bottom boundary. // "RIGHT_EYE_LEFT_CORNER" - Right eye, left corner. // "LEFT_EYEBROW_UPPER_MIDPOINT" - Left eyebrow, upper midpoint. // "RIGHT_EYEBROW_UPPER_MIDPOINT" - Right eyebrow, upper midpoint. // "LEFT_EAR_TRAGION" - Left ear tragion. // "RIGHT_EAR_TRAGION" - Right ear tragion. // "LEFT_EYE_PUPIL" - Left eye pupil. // "RIGHT_EYE_PUPIL" - Right eye pupil. // "FOREHEAD_GLABELLA" - Forehead glabella. // "CHIN_GNATHION" - Chin gnathion. // "CHIN_LEFT_GONION" - Chin left gonion. // "CHIN_RIGHT_GONION" - Chin right gonion. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Position") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Position") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1FaceAnnotationLandmark) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1FaceAnnotationLandmark raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1GcsDestination: The Google Cloud Storage // location where the output will be written to. type GoogleCloudVisionV1p2beta1GcsDestination struct { // Uri: Google Cloud Storage URI where the results will be stored. // Results will // be in JSON format and preceded by its corresponding input URI. This // field // can either represent a single file, or a prefix for multiple // outputs. // Prefixes must end in a `/`. // // Examples: // // * File: gs://bucket-name/filename.json // * Prefix: gs://bucket-name/prefix/here/ // * File: gs://bucket-name/prefix/here // // If multiple outputs, each response is still AnnotateFileResponse, // each of // which contains some subset of the full list of // AnnotateImageResponse. // Multiple outputs can happen if, for example, the output JSON is too // large // and overflows into multiple sharded files. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "Uri") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Uri") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1GcsDestination) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1GcsDestination raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1GcsSource: The Google Cloud Storage // location where the input will be read from. type GoogleCloudVisionV1p2beta1GcsSource struct { // Uri: Google Cloud Storage URI for the input file. This must only be // a // Google Cloud Storage object. Wildcards are not currently supported. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "Uri") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Uri") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1GcsSource) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1GcsSource raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1ImageAnnotationContext: If an image was // produced from a file (e.g. a PDF), this message gives // information about the source of that image. type GoogleCloudVisionV1p2beta1ImageAnnotationContext struct { // PageNumber: If the file was a PDF or TIFF, this field gives the page // number within // the file used to produce the image. PageNumber int64 `json:"pageNumber,omitempty"` // Uri: The URI of the file used to produce the image. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "PageNumber") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "PageNumber") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1ImageAnnotationContext) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1ImageAnnotationContext raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1ImageProperties: Stores image properties, // such as dominant colors. type GoogleCloudVisionV1p2beta1ImageProperties struct { // DominantColors: If present, dominant colors completed successfully. DominantColors *GoogleCloudVisionV1p2beta1DominantColorsAnnotation `json:"dominantColors,omitempty"` // ForceSendFields is a list of field names (e.g. "DominantColors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DominantColors") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1ImageProperties) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1ImageProperties raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1InputConfig: The desired input location and // metadata. type GoogleCloudVisionV1p2beta1InputConfig struct { // GcsSource: The Google Cloud Storage location to read the input from. GcsSource *GoogleCloudVisionV1p2beta1GcsSource `json:"gcsSource,omitempty"` // MimeType: The type of the file. Currently only "application/pdf" and // "image/tiff" // are supported. Wildcards are not supported. MimeType string `json:"mimeType,omitempty"` // ForceSendFields is a list of field names (e.g. "GcsSource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "GcsSource") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1InputConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1InputConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1LocationInfo: Detected entity location // information. type GoogleCloudVisionV1p2beta1LocationInfo struct { // LatLng: lat/long location coordinates. LatLng *LatLng `json:"latLng,omitempty"` // ForceSendFields is a list of field names (e.g. "LatLng") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "LatLng") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1LocationInfo) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1LocationInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1NormalizedVertex: A vertex represents a 2D // point in the image. // NOTE: the normalized vertex coordinates are relative to the original // image // and range from 0 to 1. type GoogleCloudVisionV1p2beta1NormalizedVertex struct { // X: X coordinate. X float64 `json:"x,omitempty"` // Y: Y coordinate. Y float64 `json:"y,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1NormalizedVertex) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1NormalizedVertex raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1NormalizedVertex) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1NormalizedVertex var s1 struct { X gensupport.JSONFloat64 `json:"x"` Y gensupport.JSONFloat64 `json:"y"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.X = float64(s1.X) s.Y = float64(s1.Y) return nil } // GoogleCloudVisionV1p2beta1OperationMetadata: Contains metadata for // the BatchAnnotateImages operation. type GoogleCloudVisionV1p2beta1OperationMetadata struct { // CreateTime: The time when the batch request was received. CreateTime string `json:"createTime,omitempty"` // State: Current state of the batch operation. // // Possible values: // "STATE_UNSPECIFIED" - Invalid. // "CREATED" - Request is received. // "RUNNING" - Request is actively being processed. // "DONE" - The batch processing is done. // "CANCELLED" - The batch processing was cancelled. State string `json:"state,omitempty"` // UpdateTime: The time when the operation result was last updated. UpdateTime string `json:"updateTime,omitempty"` // ForceSendFields is a list of field names (e.g. "CreateTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CreateTime") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1OperationMetadata raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1OutputConfig: The desired output location // and metadata. type GoogleCloudVisionV1p2beta1OutputConfig struct { // BatchSize: The max number of response protos to put into each output // JSON file on // Google Cloud Storage. // The valid range is [1, 100]. If not specified, the default value is // 20. // // For example, for one pdf file with 100 pages, 100 response protos // will // be generated. If `batch_size` = 20, then 5 json files each // containing 20 response protos will be written under the // prefix // `gcs_destination`.`uri`. // // Currently, batch_size only applies to GcsDestination, with potential // future // support for other output configurations. BatchSize int64 `json:"batchSize,omitempty"` // GcsDestination: The Google Cloud Storage location to write the // output(s) to. GcsDestination *GoogleCloudVisionV1p2beta1GcsDestination `json:"gcsDestination,omitempty"` // ForceSendFields is a list of field names (e.g. "BatchSize") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BatchSize") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1OutputConfig) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1OutputConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1Page: Detected page from OCR. type GoogleCloudVisionV1p2beta1Page struct { // Blocks: List of blocks of text, images etc on this page. Blocks []*GoogleCloudVisionV1p2beta1Block `json:"blocks,omitempty"` // Confidence: Confidence of the OCR results on the page. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Height: Page height. For PDFs the unit is points. For images // (including // TIFFs) the unit is pixels. Height int64 `json:"height,omitempty"` // Property: Additional information detected on the page. Property *GoogleCloudVisionV1p2beta1TextAnnotationTextProperty `json:"property,omitempty"` // Width: Page width. For PDFs the unit is points. For images // (including // TIFFs) the unit is pixels. Width int64 `json:"width,omitempty"` // ForceSendFields is a list of field names (e.g. "Blocks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Blocks") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Page) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Page raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1Page) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1Page var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // GoogleCloudVisionV1p2beta1Paragraph: Structural unit of text // representing a number of words in certain order. type GoogleCloudVisionV1p2beta1Paragraph struct { // BoundingBox: The bounding box for the paragraph. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it // becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox *GoogleCloudVisionV1p2beta1BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results for the paragraph. Range // [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Property: Additional information detected for the paragraph. Property *GoogleCloudVisionV1p2beta1TextAnnotationTextProperty `json:"property,omitempty"` // Words: List of words in this paragraph. Words []*GoogleCloudVisionV1p2beta1Word `json:"words,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingBox") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingBox") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Paragraph) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Paragraph raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1Paragraph) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1Paragraph var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // GoogleCloudVisionV1p2beta1Position: A 3D position in the image, used // primarily for Face detection landmarks. // A valid Position must have both x and y coordinates. // The position coordinates are in the same scale as the original image. type GoogleCloudVisionV1p2beta1Position struct { // X: X coordinate. X float64 `json:"x,omitempty"` // Y: Y coordinate. Y float64 `json:"y,omitempty"` // Z: Z coordinate (or depth). Z float64 `json:"z,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Position) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Position raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1Position) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1Position var s1 struct { X gensupport.JSONFloat64 `json:"x"` Y gensupport.JSONFloat64 `json:"y"` Z gensupport.JSONFloat64 `json:"z"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.X = float64(s1.X) s.Y = float64(s1.Y) s.Z = float64(s1.Z) return nil } // GoogleCloudVisionV1p2beta1Property: A `Property` consists of a // user-supplied name/value pair. type GoogleCloudVisionV1p2beta1Property struct { // Name: Name of the property. Name string `json:"name,omitempty"` // Uint64Value: Value of numeric properties. Uint64Value uint64 `json:"uint64Value,omitempty,string"` // Value: Value of the property. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Name") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Property) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Property raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1SafeSearchAnnotation: Set of features // pertaining to the image, computed by computer vision // methods over safe-search verticals (for example, adult, spoof, // medical, // violence). type GoogleCloudVisionV1p2beta1SafeSearchAnnotation struct { // Adult: Represents the adult content likelihood for the image. Adult // content may // contain elements such as nudity, pornographic images or cartoons, // or // sexual activities. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Adult string `json:"adult,omitempty"` // Medical: Likelihood that this is a medical image. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Medical string `json:"medical,omitempty"` // Racy: Likelihood that the request image contains racy content. Racy // content may // include (but is not limited to) skimpy or sheer clothing, // strategically // covered nudity, lewd or provocative poses, or close-ups of // sensitive // body areas. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Racy string `json:"racy,omitempty"` // Spoof: Spoof likelihood. The likelihood that an modification // was made to the image's canonical version to make it appear // funny or offensive. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Spoof string `json:"spoof,omitempty"` // Violence: Likelihood that this image contains violent content. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Violence string `json:"violence,omitempty"` // ForceSendFields is a list of field names (e.g. "Adult") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Adult") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1SafeSearchAnnotation) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1SafeSearchAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1Symbol: A single symbol representation. type GoogleCloudVisionV1p2beta1Symbol struct { // BoundingBox: The bounding box for the symbol. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it // becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox *GoogleCloudVisionV1p2beta1BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results for the symbol. Range [0, // 1]. Confidence float64 `json:"confidence,omitempty"` // Property: Additional information detected for the symbol. Property *GoogleCloudVisionV1p2beta1TextAnnotationTextProperty `json:"property,omitempty"` // Text: The actual UTF-8 representation of the symbol. Text string `json:"text,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingBox") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingBox") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Symbol) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Symbol raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1Symbol) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1Symbol var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // GoogleCloudVisionV1p2beta1TextAnnotation: TextAnnotation contains a // structured representation of OCR extracted text. // The hierarchy of an OCR extracted text structure is like this: // TextAnnotation -> Page -> Block -> Paragraph -> Word -> // Symbol // Each structural component, starting from Page, may further have their // own // properties. Properties describe detected languages, breaks etc.. // Please refer // to the TextAnnotation.TextProperty message definition below for // more // detail. type GoogleCloudVisionV1p2beta1TextAnnotation struct { // Pages: List of pages detected by OCR. Pages []*GoogleCloudVisionV1p2beta1Page `json:"pages,omitempty"` // Text: UTF-8 text detected on the pages. Text string `json:"text,omitempty"` // ForceSendFields is a list of field names (e.g. "Pages") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Pages") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1TextAnnotation) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1TextAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak: Detected start // or end of a structural component. type GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak struct { // IsPrefix: True if break prepends the element. IsPrefix bool `json:"isPrefix,omitempty"` // Type: Detected break type. // // Possible values: // "UNKNOWN" - Unknown break label type. // "SPACE" - Regular space. // "SURE_SPACE" - Sure space (very wide). // "EOL_SURE_SPACE" - Line-wrapping break. // "HYPHEN" - End-line hyphen that is not present in text; does not // co-occur with // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. // "LINE_BREAK" - Line break that ends a paragraph. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "IsPrefix") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "IsPrefix") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage: Detected // language for a structural component. type GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage struct { // Confidence: Confidence of detected language. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // LanguageCode: The BCP-47 language code, such as "en-US" or "sr-Latn". // For more // information, // see // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. LanguageCode string `json:"languageCode,omitempty"` // ForceSendFields is a list of field names (e.g. "Confidence") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Confidence") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // GoogleCloudVisionV1p2beta1TextAnnotationTextProperty: Additional // information detected on the structural component. type GoogleCloudVisionV1p2beta1TextAnnotationTextProperty struct { // DetectedBreak: Detected start or end of a text segment. DetectedBreak *GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak `json:"detectedBreak,omitempty"` // DetectedLanguages: A list of detected languages together with // confidence. DetectedLanguages []*GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage `json:"detectedLanguages,omitempty"` // ForceSendFields is a list of field names (e.g. "DetectedBreak") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DetectedBreak") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1TextAnnotationTextProperty) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1TextAnnotationTextProperty raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1Vertex: A vertex represents a 2D point in // the image. // NOTE: the vertex coordinates are in the same scale as the original // image. type GoogleCloudVisionV1p2beta1Vertex struct { // X: X coordinate. X int64 `json:"x,omitempty"` // Y: Y coordinate. Y int64 `json:"y,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Vertex) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Vertex raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1WebDetection: Relevant information for the // image from the Internet. type GoogleCloudVisionV1p2beta1WebDetection struct { // BestGuessLabels: The service's best guess as to the topic of the // request image. // Inferred from similar images on the open web. BestGuessLabels []*GoogleCloudVisionV1p2beta1WebDetectionWebLabel `json:"bestGuessLabels,omitempty"` // FullMatchingImages: Fully matching images from the Internet. // Can include resized copies of the query image. FullMatchingImages []*GoogleCloudVisionV1p2beta1WebDetectionWebImage `json:"fullMatchingImages,omitempty"` // PagesWithMatchingImages: Web pages containing the matching images // from the Internet. PagesWithMatchingImages []*GoogleCloudVisionV1p2beta1WebDetectionWebPage `json:"pagesWithMatchingImages,omitempty"` // PartialMatchingImages: Partial matching images from the // Internet. // Those images are similar enough to share some key-point features. // For // example an original image will likely have partial matching for its // crops. PartialMatchingImages []*GoogleCloudVisionV1p2beta1WebDetectionWebImage `json:"partialMatchingImages,omitempty"` // VisuallySimilarImages: The visually similar image results. VisuallySimilarImages []*GoogleCloudVisionV1p2beta1WebDetectionWebImage `json:"visuallySimilarImages,omitempty"` // WebEntities: Deduced entities from similar images on the Internet. WebEntities []*GoogleCloudVisionV1p2beta1WebDetectionWebEntity `json:"webEntities,omitempty"` // ForceSendFields is a list of field names (e.g. "BestGuessLabels") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BestGuessLabels") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1WebDetection) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1WebDetection raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1WebDetectionWebEntity: Entity deduced from // similar images on the Internet. type GoogleCloudVisionV1p2beta1WebDetectionWebEntity struct { // Description: Canonical description of the entity, in English. Description string `json:"description,omitempty"` // EntityId: Opaque entity ID. EntityId string `json:"entityId,omitempty"` // Score: Overall relevancy score for the entity. // Not normalized and not comparable across different image queries. Score float64 `json:"score,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1WebDetectionWebEntity) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1WebDetectionWebEntity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1WebDetectionWebEntity) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1WebDetectionWebEntity var s1 struct { Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Score = float64(s1.Score) return nil } // GoogleCloudVisionV1p2beta1WebDetectionWebImage: Metadata for online // images. type GoogleCloudVisionV1p2beta1WebDetectionWebImage struct { // Score: (Deprecated) Overall relevancy score for the image. Score float64 `json:"score,omitempty"` // Url: The result image URL. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Score") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Score") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1WebDetectionWebImage) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1WebDetectionWebImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1WebDetectionWebImage) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1WebDetectionWebImage var s1 struct { Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Score = float64(s1.Score) return nil } // GoogleCloudVisionV1p2beta1WebDetectionWebLabel: Label to provide // extra metadata for the web detection. type GoogleCloudVisionV1p2beta1WebDetectionWebLabel struct { // Label: Label for extra metadata. Label string `json:"label,omitempty"` // LanguageCode: The BCP-47 language code for `label`, such as "en-US" // or "sr-Latn". // For more information, // see // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. LanguageCode string `json:"languageCode,omitempty"` // ForceSendFields is a list of field names (e.g. "Label") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Label") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1WebDetectionWebLabel) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1WebDetectionWebLabel raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p2beta1WebDetectionWebPage: Metadata for web // pages. type GoogleCloudVisionV1p2beta1WebDetectionWebPage struct { // FullMatchingImages: Fully matching images on the page. // Can include resized copies of the query image. FullMatchingImages []*GoogleCloudVisionV1p2beta1WebDetectionWebImage `json:"fullMatchingImages,omitempty"` // PageTitle: Title for the web page, may contain HTML markups. PageTitle string `json:"pageTitle,omitempty"` // PartialMatchingImages: Partial matching images on the page. // Those images are similar enough to share some key-point features. // For // example an original image will likely have partial matching for // its // crops. PartialMatchingImages []*GoogleCloudVisionV1p2beta1WebDetectionWebImage `json:"partialMatchingImages,omitempty"` // Score: (Deprecated) Overall relevancy score for the web page. Score float64 `json:"score,omitempty"` // Url: The result web page URL. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "FullMatchingImages") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FullMatchingImages") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1WebDetectionWebPage) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1WebDetectionWebPage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1WebDetectionWebPage) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1WebDetectionWebPage var s1 struct { Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Score = float64(s1.Score) return nil } // GoogleCloudVisionV1p2beta1Word: A word representation. type GoogleCloudVisionV1p2beta1Word struct { // BoundingBox: The bounding box for the word. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it // becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox *GoogleCloudVisionV1p2beta1BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results for the word. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Property: Additional information detected for the word. Property *GoogleCloudVisionV1p2beta1TextAnnotationTextProperty `json:"property,omitempty"` // Symbols: List of symbols in the word. // The order of the symbols follows the natural reading order. Symbols []*GoogleCloudVisionV1p2beta1Symbol `json:"symbols,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingBox") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingBox") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p2beta1Word) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p2beta1Word raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p2beta1Word) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p2beta1Word var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // GoogleCloudVisionV1p3beta1BatchOperationMetadata: Metadata for the // batch operations such as the current state. // // This is included in the `metadata` field of the `Operation` returned // by the // `GetOperation` call of the `google::longrunning::Operations` service. type GoogleCloudVisionV1p3beta1BatchOperationMetadata struct { // EndTime: The time when the batch request is finished // and // google.longrunning.Operation.done is set to true. EndTime string `json:"endTime,omitempty"` // State: The current state of the batch operation. // // Possible values: // "STATE_UNSPECIFIED" - Invalid. // "PROCESSING" - Request is actively being processed. // "SUCCESSFUL" - The request is done and at least one item has been // successfully // processed. // "FAILED" - The request is done and no item has been successfully // processed. // "CANCELLED" - The request is done after the // longrunning.Operations.CancelOperation has // been called by the user. Any records that were processed before // the // cancel command are output as specified in the request. State string `json:"state,omitempty"` // SubmitTime: The time when the batch request was submitted to the // server. SubmitTime string `json:"submitTime,omitempty"` // ForceSendFields is a list of field names (e.g. "EndTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EndTime") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p3beta1BatchOperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p3beta1BatchOperationMetadata raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p3beta1BoundingPoly: A bounding polygon for the // detected image annotation. type GoogleCloudVisionV1p3beta1BoundingPoly struct { // NormalizedVertices: The bounding polygon normalized vertices. NormalizedVertices []*GoogleCloudVisionV1p3beta1NormalizedVertex `json:"normalizedVertices,omitempty"` // Vertices: The bounding polygon vertices. Vertices []*GoogleCloudVisionV1p3beta1Vertex `json:"vertices,omitempty"` // ForceSendFields is a list of field names (e.g. "NormalizedVertices") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NormalizedVertices") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p3beta1BoundingPoly) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p3beta1BoundingPoly raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p3beta1ImportProductSetsResponse: Response message // for the `ImportProductSets` method. // // This message is returned by // the // google.longrunning.Operations.GetOperation method in the // returned // google.longrunning.Operation.response field. type GoogleCloudVisionV1p3beta1ImportProductSetsResponse struct { // ReferenceImages: The list of reference_images that are imported // successfully. ReferenceImages []*GoogleCloudVisionV1p3beta1ReferenceImage `json:"referenceImages,omitempty"` // Statuses: The rpc status for each ImportProductSet request, including // both successes // and errors. // // The number of statuses here matches the number of lines in the csv // file, // and statuses[i] stores the success or failure status of processing // the i-th // line of the csv, starting from line 0. Statuses []*Status `json:"statuses,omitempty"` // ForceSendFields is a list of field names (e.g. "ReferenceImages") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ReferenceImages") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p3beta1ImportProductSetsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p3beta1ImportProductSetsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p3beta1NormalizedVertex: A vertex represents a 2D // point in the image. // NOTE: the normalized vertex coordinates are relative to the original // image // and range from 0 to 1. type GoogleCloudVisionV1p3beta1NormalizedVertex struct { // X: X coordinate. X float64 `json:"x,omitempty"` // Y: Y coordinate. Y float64 `json:"y,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p3beta1NormalizedVertex) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p3beta1NormalizedVertex raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *GoogleCloudVisionV1p3beta1NormalizedVertex) UnmarshalJSON(data []byte) error { type NoMethod GoogleCloudVisionV1p3beta1NormalizedVertex var s1 struct { X gensupport.JSONFloat64 `json:"x"` Y gensupport.JSONFloat64 `json:"y"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.X = float64(s1.X) s.Y = float64(s1.Y) return nil } // GoogleCloudVisionV1p3beta1ReferenceImage: A `ReferenceImage` // represents a product image and its associated metadata, // such as bounding boxes. type GoogleCloudVisionV1p3beta1ReferenceImage struct { // BoundingPolys: Bounding polygons around the areas of interest in the // reference image. // Optional. If this field is empty, the system will try to detect // regions of // interest. At most 10 bounding polygons will be used. // // The provided shape is converted into a non-rotated rectangle. // Once // converted, the small edge of the rectangle must be greater than or // equal // to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; // 1:5 // is not). BoundingPolys []*GoogleCloudVisionV1p3beta1BoundingPoly `json:"boundingPolys,omitempty"` // Name: The resource name of the reference image. // // Format // is: // // `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referen // ceImages/IMAGE_ID`. // // This field is ignored when creating a reference image. Name string `json:"name,omitempty"` // Uri: The Google Cloud Storage URI of the reference image. // // The URI must start with `gs://`. // // Required. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingPolys") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingPolys") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p3beta1ReferenceImage) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p3beta1ReferenceImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GoogleCloudVisionV1p3beta1Vertex: A vertex represents a 2D point in // the image. // NOTE: the vertex coordinates are in the same scale as the original // image. type GoogleCloudVisionV1p3beta1Vertex struct { // X: X coordinate. X int64 `json:"x,omitempty"` // Y: Y coordinate. Y int64 `json:"y,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GoogleCloudVisionV1p3beta1Vertex) MarshalJSON() ([]byte, error) { type NoMethod GoogleCloudVisionV1p3beta1Vertex raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Image: Client image to perform Google Cloud Vision API tasks over. type Image struct { // Content: Image content, represented as a stream of bytes. // Note: As with all `bytes` fields, protobuffers use a pure // binary // representation, whereas JSON representations use base64. Content string `json:"content,omitempty"` // Source: Google Cloud Storage image location, or publicly-accessible // image // URL. If both `content` and `source` are provided for an image, // `content` // takes precedence and is used to perform the image annotation request. Source *ImageSource `json:"source,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Content") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Image) MarshalJSON() ([]byte, error) { type NoMethod Image raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ImageAnnotationContext: If an image was produced from a file (e.g. a // PDF), this message gives // information about the source of that image. type ImageAnnotationContext struct { // PageNumber: If the file was a PDF or TIFF, this field gives the page // number within // the file used to produce the image. PageNumber int64 `json:"pageNumber,omitempty"` // Uri: The URI of the file used to produce the image. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "PageNumber") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "PageNumber") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ImageAnnotationContext) MarshalJSON() ([]byte, error) { type NoMethod ImageAnnotationContext raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ImageContext: Image context and/or feature-specific parameters. type ImageContext struct { // CropHintsParams: Parameters for crop hints annotation request. CropHintsParams *CropHintsParams `json:"cropHintsParams,omitempty"` // LanguageHints: List of languages to use for TEXT_DETECTION. In most // cases, an empty value // yields the best results since it enables automatic language // detection. For // languages based on the Latin alphabet, setting `language_hints` is // not // needed. In rare cases, when the language of the text in the image is // known, // setting a hint will help get better results (although it will be // a // significant hindrance if the hint is wrong). Text detection returns // an // error if one or more of the specified languages is not one of // the // [supported languages](/vision/docs/languages). LanguageHints []string `json:"languageHints,omitempty"` // LatLongRect: Not used. LatLongRect *LatLongRect `json:"latLongRect,omitempty"` // WebDetectionParams: Parameters for web detection. WebDetectionParams *WebDetectionParams `json:"webDetectionParams,omitempty"` // ForceSendFields is a list of field names (e.g. "CropHintsParams") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CropHintsParams") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ImageContext) MarshalJSON() ([]byte, error) { type NoMethod ImageContext raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ImageProperties: Stores image properties, such as dominant colors. type ImageProperties struct { // DominantColors: If present, dominant colors completed successfully. DominantColors *DominantColorsAnnotation `json:"dominantColors,omitempty"` // ForceSendFields is a list of field names (e.g. "DominantColors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DominantColors") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ImageProperties) MarshalJSON() ([]byte, error) { type NoMethod ImageProperties raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ImageSource: External image source (Google Cloud Storage or web URL // image location). type ImageSource struct { // GcsImageUri: **Use `image_uri` instead.** // // The Google Cloud Storage URI of the // form // `gs://bucket_name/object_name`. Object versioning is not supported. // See // [Google Cloud Storage // Request // URIs](https://cloud.google.com/storage/docs/reference-uris) for more // info. GcsImageUri string `json:"gcsImageUri,omitempty"` // ImageUri: The URI of the source image. Can be either: // // 1. A Google Cloud Storage URI of the form // `gs://bucket_name/object_name`. Object versioning is not // supported. See // [Google Cloud Storage Request // URIs](https://cloud.google.com/storage/docs/reference-uris) for // more // info. // // 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images // from // HTTP/HTTPS URLs, Google cannot guarantee that the request will be // completed. Your request may fail if the specified host denies the // request (e.g. due to request throttling or DOS prevention), or if // Google // throttles requests to the site for abuse prevention. You should // not // depend on externally-hosted images for production // applications. // // When both `gcs_image_uri` and `image_uri` are specified, `image_uri` // takes // precedence. ImageUri string `json:"imageUri,omitempty"` // ForceSendFields is a list of field names (e.g. "GcsImageUri") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "GcsImageUri") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ImageSource) MarshalJSON() ([]byte, error) { type NoMethod ImageSource raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InputConfig: The desired input location and metadata. type InputConfig struct { // GcsSource: The Google Cloud Storage location to read the input from. GcsSource *GcsSource `json:"gcsSource,omitempty"` // MimeType: The type of the file. Currently only "application/pdf" and // "image/tiff" // are supported. Wildcards are not supported. MimeType string `json:"mimeType,omitempty"` // ForceSendFields is a list of field names (e.g. "GcsSource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "GcsSource") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *InputConfig) MarshalJSON() ([]byte, error) { type NoMethod InputConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Landmark: A face-specific landmark (for example, a face feature). type Landmark struct { // Position: Face landmark position. Position *Position `json:"position,omitempty"` // Type: Face landmark type. // // Possible values: // "UNKNOWN_LANDMARK" - Unknown face landmark detected. Should not be // filled. // "LEFT_EYE" - Left eye. // "RIGHT_EYE" - Right eye. // "LEFT_OF_LEFT_EYEBROW" - Left of left eyebrow. // "RIGHT_OF_LEFT_EYEBROW" - Right of left eyebrow. // "LEFT_OF_RIGHT_EYEBROW" - Left of right eyebrow. // "RIGHT_OF_RIGHT_EYEBROW" - Right of right eyebrow. // "MIDPOINT_BETWEEN_EYES" - Midpoint between eyes. // "NOSE_TIP" - Nose tip. // "UPPER_LIP" - Upper lip. // "LOWER_LIP" - Lower lip. // "MOUTH_LEFT" - Mouth left. // "MOUTH_RIGHT" - Mouth right. // "MOUTH_CENTER" - Mouth center. // "NOSE_BOTTOM_RIGHT" - Nose, bottom right. // "NOSE_BOTTOM_LEFT" - Nose, bottom left. // "NOSE_BOTTOM_CENTER" - Nose, bottom center. // "LEFT_EYE_TOP_BOUNDARY" - Left eye, top boundary. // "LEFT_EYE_RIGHT_CORNER" - Left eye, right corner. // "LEFT_EYE_BOTTOM_BOUNDARY" - Left eye, bottom boundary. // "LEFT_EYE_LEFT_CORNER" - Left eye, left corner. // "RIGHT_EYE_TOP_BOUNDARY" - Right eye, top boundary. // "RIGHT_EYE_RIGHT_CORNER" - Right eye, right corner. // "RIGHT_EYE_BOTTOM_BOUNDARY" - Right eye, bottom boundary. // "RIGHT_EYE_LEFT_CORNER" - Right eye, left corner. // "LEFT_EYEBROW_UPPER_MIDPOINT" - Left eyebrow, upper midpoint. // "RIGHT_EYEBROW_UPPER_MIDPOINT" - Right eyebrow, upper midpoint. // "LEFT_EAR_TRAGION" - Left ear tragion. // "RIGHT_EAR_TRAGION" - Right ear tragion. // "LEFT_EYE_PUPIL" - Left eye pupil. // "RIGHT_EYE_PUPIL" - Right eye pupil. // "FOREHEAD_GLABELLA" - Forehead glabella. // "CHIN_GNATHION" - Chin gnathion. // "CHIN_LEFT_GONION" - Chin left gonion. // "CHIN_RIGHT_GONION" - Chin right gonion. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Position") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Position") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Landmark) MarshalJSON() ([]byte, error) { type NoMethod Landmark raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // LatLng: An object representing a latitude/longitude pair. This is // expressed as a pair // of doubles representing degrees latitude and degrees longitude. // Unless // specified otherwise, this must conform to the // <a // href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 // st // andard</a>. Values must be within normalized ranges. type LatLng struct { // Latitude: The latitude in degrees. It must be in the range [-90.0, // +90.0]. Latitude float64 `json:"latitude,omitempty"` // Longitude: The longitude in degrees. It must be in the range [-180.0, // +180.0]. Longitude float64 `json:"longitude,omitempty"` // ForceSendFields is a list of field names (e.g. "Latitude") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Latitude") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *LatLng) MarshalJSON() ([]byte, error) { type NoMethod LatLng raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *LatLng) UnmarshalJSON(data []byte) error { type NoMethod LatLng var s1 struct { Latitude gensupport.JSONFloat64 `json:"latitude"` Longitude gensupport.JSONFloat64 `json:"longitude"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Latitude = float64(s1.Latitude) s.Longitude = float64(s1.Longitude) return nil } // LatLongRect: Rectangle determined by min and max `LatLng` pairs. type LatLongRect struct { // MaxLatLng: Max lat/long pair. MaxLatLng *LatLng `json:"maxLatLng,omitempty"` // MinLatLng: Min lat/long pair. MinLatLng *LatLng `json:"minLatLng,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxLatLng") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "MaxLatLng") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *LatLongRect) MarshalJSON() ([]byte, error) { type NoMethod LatLongRect raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for // Operations.ListOperations. type ListOperationsResponse struct { // NextPageToken: The standard List next-page token. NextPageToken string `json:"nextPageToken,omitempty"` // Operations: A list of operations that matches the specified filter in // the request. Operations []*Operation `json:"operations,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // LocationInfo: Detected entity location information. type LocationInfo struct { // LatLng: lat/long location coordinates. LatLng *LatLng `json:"latLng,omitempty"` // ForceSendFields is a list of field names (e.g. "LatLng") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "LatLng") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *LocationInfo) MarshalJSON() ([]byte, error) { type NoMethod LocationInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // NormalizedVertex: A vertex represents a 2D point in the image. // NOTE: the normalized vertex coordinates are relative to the original // image // and range from 0 to 1. type NormalizedVertex struct { // X: X coordinate. X float64 `json:"x,omitempty"` // Y: Y coordinate. Y float64 `json:"y,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *NormalizedVertex) MarshalJSON() ([]byte, error) { type NoMethod NormalizedVertex raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *NormalizedVertex) UnmarshalJSON(data []byte) error { type NoMethod NormalizedVertex var s1 struct { X gensupport.JSONFloat64 `json:"x"` Y gensupport.JSONFloat64 `json:"y"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.X = float64(s1.X) s.Y = float64(s1.Y) return nil } // Operation: This resource represents a long-running operation that is // the result of a // network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in // progress. // If `true`, the operation is completed, and either `error` or // `response` is // available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` // Metadata: Service-specific metadata associated with the operation. // It typically // contains progress information and common metadata such as create // time. // Some services might not provide such metadata. Any method that // returns a // long-running operation should document the metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same // service that // originally returns it. If you use the default HTTP mapping, // the // `name` should have the format of `operations/some/unique/name`. Name string `json:"name,omitempty"` // Response: The normal response of the operation in case of success. // If the original // method returns no data on success, such as `Delete`, the response // is // `google.protobuf.Empty`. If the original method is // standard // `Get`/`Create`/`Update`, the response should be the resource. For // other // methods, the response should have the type `XxxResponse`, where // `Xxx` // is the original method name. For example, if the original method // name // is `TakeSnapshot()`, the inferred response type // is // `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Done") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Done") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // OperationMetadata: Contains metadata for the BatchAnnotateImages // operation. type OperationMetadata struct { // CreateTime: The time when the batch request was received. CreateTime string `json:"createTime,omitempty"` // State: Current state of the batch operation. // // Possible values: // "STATE_UNSPECIFIED" - Invalid. // "CREATED" - Request is received. // "RUNNING" - Request is actively being processed. // "DONE" - The batch processing is done. // "CANCELLED" - The batch processing was cancelled. State string `json:"state,omitempty"` // UpdateTime: The time when the operation result was last updated. UpdateTime string `json:"updateTime,omitempty"` // ForceSendFields is a list of field names (e.g. "CreateTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CreateTime") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // OutputConfig: The desired output location and metadata. type OutputConfig struct { // BatchSize: The max number of response protos to put into each output // JSON file on // Google Cloud Storage. // The valid range is [1, 100]. If not specified, the default value is // 20. // // For example, for one pdf file with 100 pages, 100 response protos // will // be generated. If `batch_size` = 20, then 5 json files each // containing 20 response protos will be written under the // prefix // `gcs_destination`.`uri`. // // Currently, batch_size only applies to GcsDestination, with potential // future // support for other output configurations. BatchSize int64 `json:"batchSize,omitempty"` // GcsDestination: The Google Cloud Storage location to write the // output(s) to. GcsDestination *GcsDestination `json:"gcsDestination,omitempty"` // ForceSendFields is a list of field names (e.g. "BatchSize") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BatchSize") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *OutputConfig) MarshalJSON() ([]byte, error) { type NoMethod OutputConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Page: Detected page from OCR. type Page struct { // Blocks: List of blocks of text, images etc on this page. Blocks []*Block `json:"blocks,omitempty"` // Confidence: Confidence of the OCR results on the page. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Height: Page height. For PDFs the unit is points. For images // (including // TIFFs) the unit is pixels. Height int64 `json:"height,omitempty"` // Property: Additional information detected on the page. Property *TextProperty `json:"property,omitempty"` // Width: Page width. For PDFs the unit is points. For images // (including // TIFFs) the unit is pixels. Width int64 `json:"width,omitempty"` // ForceSendFields is a list of field names (e.g. "Blocks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Blocks") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Page) MarshalJSON() ([]byte, error) { type NoMethod Page raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Page) UnmarshalJSON(data []byte) error { type NoMethod Page var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // Paragraph: Structural unit of text representing a number of words in // certain order. type Paragraph struct { // BoundingBox: The bounding box for the paragraph. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it // becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox *BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results for the paragraph. Range // [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Property: Additional information detected for the paragraph. Property *TextProperty `json:"property,omitempty"` // Words: List of words in this paragraph. Words []*Word `json:"words,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingBox") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingBox") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Paragraph) MarshalJSON() ([]byte, error) { type NoMethod Paragraph raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Paragraph) UnmarshalJSON(data []byte) error { type NoMethod Paragraph var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // Position: A 3D position in the image, used primarily for Face // detection landmarks. // A valid Position must have both x and y coordinates. // The position coordinates are in the same scale as the original image. type Position struct { // X: X coordinate. X float64 `json:"x,omitempty"` // Y: Y coordinate. Y float64 `json:"y,omitempty"` // Z: Z coordinate (or depth). Z float64 `json:"z,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Position) MarshalJSON() ([]byte, error) { type NoMethod Position raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Position) UnmarshalJSON(data []byte) error { type NoMethod Position var s1 struct { X gensupport.JSONFloat64 `json:"x"` Y gensupport.JSONFloat64 `json:"y"` Z gensupport.JSONFloat64 `json:"z"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.X = float64(s1.X) s.Y = float64(s1.Y) s.Z = float64(s1.Z) return nil } // Property: A `Property` consists of a user-supplied name/value pair. type Property struct { // Name: Name of the property. Name string `json:"name,omitempty"` // Uint64Value: Value of numeric properties. Uint64Value uint64 `json:"uint64Value,omitempty,string"` // Value: Value of the property. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Name") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Property) MarshalJSON() ([]byte, error) { type NoMethod Property raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SafeSearchAnnotation: Set of features pertaining to the image, // computed by computer vision // methods over safe-search verticals (for example, adult, spoof, // medical, // violence). type SafeSearchAnnotation struct { // Adult: Represents the adult content likelihood for the image. Adult // content may // contain elements such as nudity, pornographic images or cartoons, // or // sexual activities. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Adult string `json:"adult,omitempty"` // Medical: Likelihood that this is a medical image. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Medical string `json:"medical,omitempty"` // Racy: Likelihood that the request image contains racy content. Racy // content may // include (but is not limited to) skimpy or sheer clothing, // strategically // covered nudity, lewd or provocative poses, or close-ups of // sensitive // body areas. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Racy string `json:"racy,omitempty"` // Spoof: Spoof likelihood. The likelihood that an modification // was made to the image's canonical version to make it appear // funny or offensive. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Spoof string `json:"spoof,omitempty"` // Violence: Likelihood that this image contains violent content. // // Possible values: // "UNKNOWN" - Unknown likelihood. // "VERY_UNLIKELY" - It is very unlikely that the image belongs to the // specified vertical. // "UNLIKELY" - It is unlikely that the image belongs to the specified // vertical. // "POSSIBLE" - It is possible that the image belongs to the specified // vertical. // "LIKELY" - It is likely that the image belongs to the specified // vertical. // "VERY_LIKELY" - It is very likely that the image belongs to the // specified vertical. Violence string `json:"violence,omitempty"` // ForceSendFields is a list of field names (e.g. "Adult") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Adult") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SafeSearchAnnotation) MarshalJSON() ([]byte, error) { type NoMethod SafeSearchAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is // suitable for different // programming environments, including REST APIs and RPC APIs. It is // used by // [gRPC](https://github.com/grpc). The error model is designed to // be: // // - Simple to use and understand for most users // - Flexible enough to meet unexpected needs // // # Overview // // The `Status` message contains three pieces of data: error code, error // message, // and error details. The error code should be an enum value // of // google.rpc.Code, but it may accept additional error codes if needed. // The // error message should be a developer-facing English message that // helps // developers *understand* and *resolve* the error. If a localized // user-facing // error message is needed, put the localized message in the error // details or // localize it in the client. The optional error details may contain // arbitrary // information about the error. There is a predefined set of error // detail types // in the package `google.rpc` that can be used for common error // conditions. // // # Language mapping // // The `Status` message is the logical representation of the error // model, but it // is not necessarily the actual wire format. When the `Status` message // is // exposed in different client libraries and different wire protocols, // it can be // mapped differently. For example, it will likely be mapped to some // exceptions // in Java, but more likely mapped to some error codes in C. // // # Other uses // // The error model and the `Status` message can be used in a variety // of // environments, either with or without APIs, to provide a // consistent developer experience across different // environments. // // Example uses of this error model include: // // - Partial errors. If a service needs to return partial errors to the // client, // it may embed the `Status` in the normal response to indicate the // partial // errors. // // - Workflow errors. A typical workflow has multiple steps. Each step // may // have a `Status` message for error reporting. // // - Batch operations. If a client uses batch request and batch // response, the // `Status` message should be used directly inside batch response, // one for // each error sub-response. // // - Asynchronous operations. If an API call embeds asynchronous // operation // results in its response, the status of those operations should // be // represented directly using the `Status` message. // // - Logging. If some API errors are stored in logs, the message // `Status` could // be used directly after any stripping needed for security/privacy // reasons. type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` // Details: A list of messages that carry the error details. There is a // common set of // message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in // English. Any // user-facing error message should be localized and sent in // the // google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Status) MarshalJSON() ([]byte, error) { type NoMethod Status raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Symbol: A single symbol representation. type Symbol struct { // BoundingBox: The bounding box for the symbol. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it // becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox *BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results for the symbol. Range [0, // 1]. Confidence float64 `json:"confidence,omitempty"` // Property: Additional information detected for the symbol. Property *TextProperty `json:"property,omitempty"` // Text: The actual UTF-8 representation of the symbol. Text string `json:"text,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingBox") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingBox") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Symbol) MarshalJSON() ([]byte, error) { type NoMethod Symbol raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Symbol) UnmarshalJSON(data []byte) error { type NoMethod Symbol var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // TextAnnotation: TextAnnotation contains a structured representation // of OCR extracted text. // The hierarchy of an OCR extracted text structure is like this: // TextAnnotation -> Page -> Block -> Paragraph -> Word -> // Symbol // Each structural component, starting from Page, may further have their // own // properties. Properties describe detected languages, breaks etc.. // Please refer // to the TextAnnotation.TextProperty message definition below for // more // detail. type TextAnnotation struct { // Pages: List of pages detected by OCR. Pages []*Page `json:"pages,omitempty"` // Text: UTF-8 text detected on the pages. Text string `json:"text,omitempty"` // ForceSendFields is a list of field names (e.g. "Pages") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Pages") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *TextAnnotation) MarshalJSON() ([]byte, error) { type NoMethod TextAnnotation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TextProperty: Additional information detected on the structural // component. type TextProperty struct { // DetectedBreak: Detected start or end of a text segment. DetectedBreak *DetectedBreak `json:"detectedBreak,omitempty"` // DetectedLanguages: A list of detected languages together with // confidence. DetectedLanguages []*DetectedLanguage `json:"detectedLanguages,omitempty"` // ForceSendFields is a list of field names (e.g. "DetectedBreak") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DetectedBreak") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *TextProperty) MarshalJSON() ([]byte, error) { type NoMethod TextProperty raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Vertex: A vertex represents a 2D point in the image. // NOTE: the vertex coordinates are in the same scale as the original // image. type Vertex struct { // X: X coordinate. X int64 `json:"x,omitempty"` // Y: Y coordinate. Y int64 `json:"y,omitempty"` // ForceSendFields is a list of field names (e.g. "X") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "X") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Vertex) MarshalJSON() ([]byte, error) { type NoMethod Vertex raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // WebDetection: Relevant information for the image from the Internet. type WebDetection struct { // BestGuessLabels: The service's best guess as to the topic of the // request image. // Inferred from similar images on the open web. BestGuessLabels []*WebLabel `json:"bestGuessLabels,omitempty"` // FullMatchingImages: Fully matching images from the Internet. // Can include resized copies of the query image. FullMatchingImages []*WebImage `json:"fullMatchingImages,omitempty"` // PagesWithMatchingImages: Web pages containing the matching images // from the Internet. PagesWithMatchingImages []*WebPage `json:"pagesWithMatchingImages,omitempty"` // PartialMatchingImages: Partial matching images from the // Internet. // Those images are similar enough to share some key-point features. // For // example an original image will likely have partial matching for its // crops. PartialMatchingImages []*WebImage `json:"partialMatchingImages,omitempty"` // VisuallySimilarImages: The visually similar image results. VisuallySimilarImages []*WebImage `json:"visuallySimilarImages,omitempty"` // WebEntities: Deduced entities from similar images on the Internet. WebEntities []*WebEntity `json:"webEntities,omitempty"` // ForceSendFields is a list of field names (e.g. "BestGuessLabels") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BestGuessLabels") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *WebDetection) MarshalJSON() ([]byte, error) { type NoMethod WebDetection raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // WebDetectionParams: Parameters for web detection request. type WebDetectionParams struct { // IncludeGeoResults: Whether to include results derived from the geo // information in the image. IncludeGeoResults bool `json:"includeGeoResults,omitempty"` // ForceSendFields is a list of field names (e.g. "IncludeGeoResults") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "IncludeGeoResults") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *WebDetectionParams) MarshalJSON() ([]byte, error) { type NoMethod WebDetectionParams raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // WebEntity: Entity deduced from similar images on the Internet. type WebEntity struct { // Description: Canonical description of the entity, in English. Description string `json:"description,omitempty"` // EntityId: Opaque entity ID. EntityId string `json:"entityId,omitempty"` // Score: Overall relevancy score for the entity. // Not normalized and not comparable across different image queries. Score float64 `json:"score,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *WebEntity) MarshalJSON() ([]byte, error) { type NoMethod WebEntity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *WebEntity) UnmarshalJSON(data []byte) error { type NoMethod WebEntity var s1 struct { Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Score = float64(s1.Score) return nil } // WebImage: Metadata for online images. type WebImage struct { // Score: (Deprecated) Overall relevancy score for the image. Score float64 `json:"score,omitempty"` // Url: The result image URL. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Score") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Score") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *WebImage) MarshalJSON() ([]byte, error) { type NoMethod WebImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *WebImage) UnmarshalJSON(data []byte) error { type NoMethod WebImage var s1 struct { Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Score = float64(s1.Score) return nil } // WebLabel: Label to provide extra metadata for the web detection. type WebLabel struct { // Label: Label for extra metadata. Label string `json:"label,omitempty"` // LanguageCode: The BCP-47 language code for `label`, such as "en-US" // or "sr-Latn". // For more information, // see // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. LanguageCode string `json:"languageCode,omitempty"` // ForceSendFields is a list of field names (e.g. "Label") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Label") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *WebLabel) MarshalJSON() ([]byte, error) { type NoMethod WebLabel raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // WebPage: Metadata for web pages. type WebPage struct { // FullMatchingImages: Fully matching images on the page. // Can include resized copies of the query image. FullMatchingImages []*WebImage `json:"fullMatchingImages,omitempty"` // PageTitle: Title for the web page, may contain HTML markups. PageTitle string `json:"pageTitle,omitempty"` // PartialMatchingImages: Partial matching images on the page. // Those images are similar enough to share some key-point features. // For // example an original image will likely have partial matching for // its // crops. PartialMatchingImages []*WebImage `json:"partialMatchingImages,omitempty"` // Score: (Deprecated) Overall relevancy score for the web page. Score float64 `json:"score,omitempty"` // Url: The result web page URL. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "FullMatchingImages") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FullMatchingImages") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *WebPage) MarshalJSON() ([]byte, error) { type NoMethod WebPage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *WebPage) UnmarshalJSON(data []byte) error { type NoMethod WebPage var s1 struct { Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Score = float64(s1.Score) return nil } // Word: A word representation. type Word struct { // BoundingBox: The bounding box for the word. // The vertices are in the order of top-left, top-right, // bottom-right, // bottom-left. When a rotation of the bounding box is detected the // rotation // is represented as around the top-left corner as defined when the text // is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it // becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox *BoundingPoly `json:"boundingBox,omitempty"` // Confidence: Confidence of the OCR results for the word. Range [0, 1]. Confidence float64 `json:"confidence,omitempty"` // Property: Additional information detected for the word. Property *TextProperty `json:"property,omitempty"` // Symbols: List of symbols in the word. // The order of the symbols follows the natural reading order. Symbols []*Symbol `json:"symbols,omitempty"` // ForceSendFields is a list of field names (e.g. "BoundingBox") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoundingBox") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Word) MarshalJSON() ([]byte, error) { type NoMethod Word raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Word) UnmarshalJSON(data []byte) error { type NoMethod Word var s1 struct { Confidence gensupport.JSONFloat64 `json:"confidence"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Confidence = float64(s1.Confidence) return nil } // method id "vision.files.asyncBatchAnnotate": type FilesAsyncBatchAnnotateCall struct { s *Service asyncbatchannotatefilesrequest *AsyncBatchAnnotateFilesRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // AsyncBatchAnnotate: Run asynchronous image detection and annotation // for a list of generic // files, such as PDF files, which may contain multiple pages and // multiple // images per page. Progress and results can be retrieved through // the // `google.longrunning.Operations` interface. // `Operation.metadata` contains `OperationMetadata` // (metadata). // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` // (results). func (r *FilesService) AsyncBatchAnnotate(asyncbatchannotatefilesrequest *AsyncBatchAnnotateFilesRequest) *FilesAsyncBatchAnnotateCall { c := &FilesAsyncBatchAnnotateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.asyncbatchannotatefilesrequest = asyncbatchannotatefilesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *FilesAsyncBatchAnnotateCall) Fields(s ...googleapi.Field) *FilesAsyncBatchAnnotateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *FilesAsyncBatchAnnotateCall) Context(ctx context.Context) *FilesAsyncBatchAnnotateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *FilesAsyncBatchAnnotateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *FilesAsyncBatchAnnotateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.asyncbatchannotatefilesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/files:asyncBatchAnnotate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "vision.files.asyncBatchAnnotate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *FilesAsyncBatchAnnotateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Run asynchronous image detection and annotation for a list of generic\nfiles, such as PDF files, which may contain multiple pages and multiple\nimages per page. Progress and results can be retrieved through the\n`google.longrunning.Operations` interface.\n`Operation.metadata` contains `OperationMetadata` (metadata).\n`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).", // "flatPath": "v1/files:asyncBatchAnnotate", // "httpMethod": "POST", // "id": "vision.files.asyncBatchAnnotate", // "parameterOrder": [], // "parameters": {}, // "path": "v1/files:asyncBatchAnnotate", // "request": { // "$ref": "AsyncBatchAnnotateFilesRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-vision" // ] // } } // method id "vision.images.annotate": type ImagesAnnotateCall struct { s *Service batchannotateimagesrequest *BatchAnnotateImagesRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Annotate: Run image detection and annotation for a batch of images. func (r *ImagesService) Annotate(batchannotateimagesrequest *BatchAnnotateImagesRequest) *ImagesAnnotateCall { c := &ImagesAnnotateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.batchannotateimagesrequest = batchannotateimagesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ImagesAnnotateCall) Fields(s ...googleapi.Field) *ImagesAnnotateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ImagesAnnotateCall) Context(ctx context.Context) *ImagesAnnotateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ImagesAnnotateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ImagesAnnotateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchannotateimagesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/images:annotate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "vision.images.annotate" call. // Exactly one of *BatchAnnotateImagesResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *BatchAnnotateImagesResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ImagesAnnotateCall) Do(opts ...googleapi.CallOption) (*BatchAnnotateImagesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &BatchAnnotateImagesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Run image detection and annotation for a batch of images.", // "flatPath": "v1/images:annotate", // "httpMethod": "POST", // "id": "vision.images.annotate", // "parameterOrder": [], // "parameters": {}, // "path": "v1/images:annotate", // "request": { // "$ref": "BatchAnnotateImagesRequest" // }, // "response": { // "$ref": "BatchAnnotateImagesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-vision" // ] // } } // method id "vision.locations.operations.get": type LocationsOperationsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets the latest state of a long-running operation. Clients can // use this // method to poll the operation result at intervals as recommended by // the API // service. func (r *LocationsOperationsService) Get(name string) *LocationsOperationsGetCall { c := &LocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *LocationsOperationsGetCall) Fields(s ...googleapi.Field) *LocationsOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *LocationsOperationsGetCall) IfNoneMatch(entityTag string) *LocationsOperationsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *LocationsOperationsGetCall) Context(ctx context.Context) *LocationsOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *LocationsOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *LocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "vision.locations.operations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *LocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", // "flatPath": "v1/locations/{locationsId}/operations/{operationsId}", // "httpMethod": "GET", // "id": "vision.locations.operations.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The name of the operation resource.", // "location": "path", // "pattern": "^locations/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-vision" // ] // } } // method id "vision.operations.cancel": type OperationsCancelCall struct { s *Service name string canceloperationrequest *CancelOperationRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Cancel: Starts asynchronous cancellation on a long-running operation. // The server // makes a best effort to cancel the operation, but success is // not // guaranteed. If the server doesn't support this method, it // returns // `google.rpc.Code.UNIMPLEMENTED`. Clients can // use // Operations.GetOperation or // other methods to check whether the cancellation succeeded or whether // the // operation completed despite cancellation. On successful // cancellation, // the operation is not deleted; instead, it becomes an operation // with // an Operation.error value with a google.rpc.Status.code of // 1, // corresponding to `Code.CANCELLED`. func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.canceloperationrequest = canceloperationrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OperationsCancelCall) Fields(s ...googleapi.Field) *OperationsCancelCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OperationsCancelCall) Context(ctx context.Context) *OperationsCancelCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OperationsCancelCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "vision.operations.cancel" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", // "flatPath": "v1/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "vision.operations.cancel", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The name of the operation resource to be cancelled.", // "location": "path", // "pattern": "^operations/.+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}:cancel", // "request": { // "$ref": "CancelOperationRequest" // }, // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-vision" // ] // } } // method id "vision.operations.delete": type OperationsDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a long-running operation. This method indicates that // the client is // no longer interested in the operation result. It does not cancel // the // operation. If the server doesn't support this method, it // returns // `google.rpc.Code.UNIMPLEMENTED`. func (r *OperationsService) Delete(name string) *OperationsDeleteCall { c := &OperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OperationsDeleteCall) Fields(s ...googleapi.Field) *OperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OperationsDeleteCall) Context(ctx context.Context) *OperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "vision.operations.delete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *OperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "vision.operations.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The name of the operation resource to be deleted.", // "location": "path", // "pattern": "^operations/.+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-vision" // ] // } } // method id "vision.operations.get": type OperationsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets the latest state of a long-running operation. Clients can // use this // method to poll the operation result at intervals as recommended by // the API // service. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "vision.operations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", // "flatPath": "v1/operations/{operationsId}", // "httpMethod": "GET", // "id": "vision.operations.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The name of the operation resource.", // "location": "path", // "pattern": "^operations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-vision" // ] // } } // method id "vision.operations.list": type OperationsListCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists operations that match the specified filter in the // request. If the // server doesn't support this method, it returns // `UNIMPLEMENTED`. // // NOTE: the `name` binding allows API services to override the // binding // to use different resource name schemes, such as `users/*/operations`. // To // override the binding, API services can add a binding such // as // "/v1/{name=users/*}/operations" to their service configuration. // For backwards compatibility, the default name includes the // operations // collection id, however overriding users must ensure the name // binding // is the parent resource, without the operations collection id. func (r *OperationsService) List(name string) *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Filter sets the optional parameter "filter": The standard list // filter. func (c *OperationsListCall) Filter(filter string) *OperationsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The standard list // page size. func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": The standard list // page token. func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "vision.operations.list" call. // Exactly one of *ListOperationsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ListOperationsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ListOperationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", // "flatPath": "v1/operations", // "httpMethod": "GET", // "id": "vision.operations.list", // "parameterOrder": [ // "name" // ], // "parameters": { // "filter": { // "description": "The standard list filter.", // "location": "query", // "type": "string" // }, // "name": { // "description": "The name of the operation's parent resource.", // "location": "path", // "pattern": "^operations$", // "required": true, // "type": "string" // }, // "pageSize": { // "description": "The standard list page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "The standard list page token.", // "location": "query", // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { // "$ref": "ListOperationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-vision" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } }<|fim▁end|>
if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath}
<|file_name|>container_linux.go<|end_file_name|><|fim▁begin|>// +build linux package libcontainer import ( "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "sync" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/libcontainer/cgroups" "github.com/docker/libcontainer/configs" "github.com/docker/libcontainer/criurpc" "github.com/golang/protobuf/proto" ) const stdioFdCount = 3 type linuxContainer struct { id string root string config *configs.Config cgroupManager cgroups.Manager initPath string initArgs []string initProcess parentProcess criuPath string m sync.Mutex } // ID returns the container's unique ID func (c *linuxContainer) ID() string { return c.id } // Config returns the container's configuration func (c *linuxContainer) Config() configs.Config { return *c.config } func (c *linuxContainer) Status() (Status, error) { c.m.Lock() defer c.m.Unlock() return c.currentStatus() } func (c *linuxContainer) State() (*State, error) { c.m.Lock() defer c.m.Unlock() return c.currentState() } func (c *linuxContainer) Processes() ([]int, error) { pids, err := c.cgroupManager.GetPids() if err != nil { return nil, newSystemError(err) } return pids, nil } func (c *linuxContainer) Stats() (*Stats, error) { var ( err error stats = &Stats{} ) if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { return stats, newSystemError(err) } for _, iface := range c.config.Networks { switch iface.Type { case "veth": istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) if err != nil { return stats, newSystemError(err) } stats.Interfaces = append(stats.Interfaces, istats) } } return stats, nil } func (c *linuxContainer) Set(config configs.Config) error { c.m.Lock() defer c.m.Unlock() c.config = &config return c.cgroupManager.Set(c.config) } func (c *linuxContainer) Start(process *Process) error { c.m.Lock() defer c.m.Unlock() status, err := c.currentStatus() if err != nil { return err } doInit := status == Destroyed parent, err := c.newParentProcess(process, doInit) if err != nil { return newSystemError(err) } if err := parent.start(); err != nil { // terminate the process to ensure that it properly is reaped. if err := parent.terminate(); err != nil { logrus.Warn(err) } return newSystemError(err) } process.ops = parent if doInit { c.updateState(parent) } return nil } func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) { parentPipe, childPipe, err := newPipe() if err != nil { return nil, newSystemError(err) } cmd, err := c.commandTemplate(p, childPipe) if err != nil { return nil, newSystemError(err) } if !doInit { return c.newSetnsProcess(p, cmd, parentPipe, childPipe), nil } return c.newInitProcess(p, cmd, parentPipe, childPipe) } func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) { cmd := &exec.Cmd{ Path: c.initPath, Args: c.initArgs, } cmd.Stdin = p.Stdin cmd.Stdout = p.Stdout cmd.Stderr = p.Stderr cmd.Dir = c.config.Rootfs if cmd.SysProcAttr == nil { cmd.SysProcAttr = &syscall.SysProcAttr{} } cmd.ExtraFiles = append(p.ExtraFiles, childPipe) cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1)) // NOTE: when running a container with no PID namespace and the parent process spawning the container is // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason // even with the parent still running. if c.config.ParentDeathSignal > 0 { cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal) } return cmd, nil } func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*initProcess, error) { t := "_LIBCONTAINER_INITTYPE=standard" cloneFlags := c.config.Namespaces.CloneFlags() if cloneFlags&syscall.CLONE_NEWUSER != 0 { if err := c.addUidGidMappings(cmd.SysProcAttr); err != nil { // user mappings are not supported return nil, err } // Default to root user when user namespaces are enabled. if cmd.SysProcAttr.Credential == nil { cmd.SysProcAttr.Credential = &syscall.Credential{} } } cmd.Env = append(cmd.Env, t) cmd.SysProcAttr.Cloneflags = cloneFlags return &initProcess{ cmd: cmd, childPipe: childPipe, parentPipe: parentPipe, manager: c.cgroupManager, config: c.newInitConfig(p), }, nil } func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) *setnsProcess { cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPID=%d", c.initProcess.pid()), "_LIBCONTAINER_INITTYPE=setns", ) if p.consolePath != "" { cmd.Env = append(cmd.Env, "_LIBCONTAINER_CONSOLE_PATH="+p.consolePath) } // TODO: set on container for process management return &setnsProcess{ cmd: cmd, cgroupPaths: c.cgroupManager.GetPaths(), childPipe: childPipe, parentPipe: parentPipe, config: c.newInitConfig(p), } } func (c *linuxContainer) newInitConfig(process *Process) *initConfig { return &initConfig{ Config: c.config, Args: process.Args, Env: process.Env, User: process.User, Cwd: process.Cwd, Console: process.consolePath, Capabilities: process.Capabilities, PassedFilesCount: len(process.ExtraFiles), } } func newPipe() (parent *os.File, child *os.File, err error) { fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) if err != nil { return nil, nil, err } return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil } func (c *linuxContainer) Destroy() error { c.m.Lock() defer c.m.Unlock() status, err := c.currentStatus() if err != nil { return err } if status != Destroyed { return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped) } if !c.config.Namespaces.Contains(configs.NEWPID) { if err := killCgroupProcesses(c.cgroupManager); err != nil { logrus.Warn(err) } } err = c.cgroupManager.Destroy() if rerr := os.RemoveAll(c.root); err == nil { err = rerr } c.initProcess = nil return err } func (c *linuxContainer) Pause() error { c.m.Lock() defer c.m.Unlock() return c.cgroupManager.Freeze(configs.Frozen) } func (c *linuxContainer) Resume() error { c.m.Lock() defer c.m.Unlock() return c.cgroupManager.Freeze(configs.Thawed) } func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) { return notifyOnOOM(c.cgroupManager.GetPaths()) } // XXX debug support, remove when debugging done. func addArgsFromEnv(evar string, args *[]string) { if e := os.Getenv(evar); e != "" { for _, f := range strings.Fields(e) { *args = append(*args, f) } } fmt.Printf(">>> criu %v\n", *args) } func (c *linuxContainer) checkCriuVersion() error { var x, y, z int out, err := exec.Command(c.criuPath, "-V").Output() if err != nil { return err } n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2 if err != nil { n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6 } if n < 2 || err != nil { return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err) } if x*10000+y*100+z < 10502 { return fmt.Errorf("CRIU version must be 1.5.2 or higher") } return nil } const descriptors_filename = "descriptors.json" func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { c.m.Lock() defer c.m.Unlock() if err := c.checkCriuVersion(); err != nil { return err } if criuOpts.ImagesDirectory == "" { criuOpts.ImagesDirectory = filepath.Join(c.root, "criu.image") } // Since a container can be C/R'ed multiple times, // the checkpoint directory may already exist. if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) { return err } if criuOpts.WorkDirectory == "" { criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") } if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) { return err } workDir, err := os.Open(criuOpts.WorkDirectory) if err != nil { return err } defer workDir.Close() imageDir, err := os.Open(criuOpts.ImagesDirectory) if err != nil { return err } defer imageDir.Close() rpcOpts := criurpc.CriuOpts{ ImagesDirFd: proto.Int32(int32(imageDir.Fd())), WorkDirFd: proto.Int32(int32(workDir.Fd())), LogLevel: proto.Int32(4), LogFile: proto.String("dump.log"), Root: proto.String(c.config.Rootfs), ManageCgroups: proto.Bool(true), NotifyScripts: proto.Bool(true), Pid: proto.Int32(int32(c.initProcess.pid())), ShellJob: proto.Bool(criuOpts.ShellJob), LeaveRunning: proto.Bool(criuOpts.LeaveRunning), TcpEstablished: proto.Bool(criuOpts.TcpEstablished), ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), } // append optional criu opts, e.g., page-server and port if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 { rpcOpts.Ps = &criurpc.CriuPageServerInfo{ Address: proto.String(criuOpts.PageServer.Address), Port: proto.Int32(criuOpts.PageServer.Port), } } t := criurpc.CriuReqType_DUMP req := criurpc.CriuReq{ Type: &t, Opts: &rpcOpts, } for _, m := range c.config.Mounts { if m.Device == "bind" { mountDest := m.Destination if strings.HasPrefix(mountDest, c.config.Rootfs) { mountDest = mountDest[len(c.config.Rootfs):] } extMnt := new(criurpc.ExtMountMap) extMnt.Key = proto.String(mountDest) extMnt.Val = proto.String(mountDest) req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) } } // Write the FD info to a file in the image directory fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors()) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptors_filename), fdsJSON, 0655) if err != nil { return err } err = c.criuSwrk(nil, &req, criuOpts) if err != nil { return err } return nil } func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { c.m.Lock() defer c.m.Unlock() if err := c.checkCriuVersion(); err != nil { return err } if criuOpts.WorkDirectory == "" { criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") } // Since a container can be C/R'ed multiple times, // the work directory may already exist. if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) { return err } workDir, err := os.Open(criuOpts.WorkDirectory) if err != nil { return err } defer workDir.Close() if criuOpts.ImagesDirectory == "" { criuOpts.ImagesDirectory = filepath.Join(c.root, "criu.image") } imageDir, err := os.Open(criuOpts.ImagesDirectory) if err != nil { return err } defer imageDir.Close() // CRIU has a few requirements for a root directory: // * it must be a mount point // * its parent must not be overmounted // c.config.Rootfs is bind-mounted to a temporary directory // to satisfy these requirements. root := filepath.Join(c.root, "criu-root") if err := os.Mkdir(root, 0755); err != nil { return err } defer os.Remove(root) root, err = filepath.EvalSymlinks(root) if err != nil { return err } err = syscall.Mount(c.config.Rootfs, root, "", syscall.MS_BIND|syscall.MS_REC, "") if err != nil { return err } defer syscall.Unmount(root, syscall.MNT_DETACH) t := criurpc.CriuReqType_RESTORE req := criurpc.CriuReq{ Type: &t, Opts: &criurpc.CriuOpts{ ImagesDirFd: proto.Int32(int32(imageDir.Fd())), WorkDirFd: proto.Int32(int32(workDir.Fd())), EvasiveDevices: proto.Bool(true), LogLevel: proto.Int32(4), LogFile: proto.String("restore.log"), RstSibling: proto.Bool(true), Root: proto.String(root), ManageCgroups: proto.Bool(true), NotifyScripts: proto.Bool(true), ShellJob: proto.Bool(criuOpts.ShellJob), ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), TcpEstablished: proto.Bool(criuOpts.TcpEstablished), }, } for _, m := range c.config.Mounts { if m.Device == "bind" { mountDest := m.Destination if strings.HasPrefix(mountDest, c.config.Rootfs) { mountDest = mountDest[len(c.config.Rootfs):] } extMnt := new(criurpc.ExtMountMap) extMnt.Key = proto.String(mountDest) extMnt.Val = proto.String(m.Source) req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) } } for _, iface := range c.config.Networks { switch iface.Type { case "veth": veth := new(criurpc.CriuVethPair) veth.IfOut = proto.String(iface.HostInterfaceName) veth.IfIn = proto.String(iface.Name) req.Opts.Veths = append(req.Opts.Veths, veth) break case "loopback":<|fim▁hole|> } } var ( fds []string fdJSON []byte ) if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptors_filename)); err != nil { return err } if err = json.Unmarshal(fdJSON, &fds); err != nil { return err } for i := range fds { if s := fds[i]; strings.Contains(s, "pipe:") { inheritFd := new(criurpc.InheritFd) inheritFd.Key = proto.String(s) inheritFd.Fd = proto.Int32(int32(i)) req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd) } } err = c.criuSwrk(process, &req, criuOpts) if err != nil { return err } return nil } func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts) error { fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET|syscall.SOCK_CLOEXEC, 0) if err != nil { return err } criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client") criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server") defer criuClient.Close() defer criuServer.Close() args := []string{"swrk", "3"} cmd := exec.Command(c.criuPath, args...) if process != nil { cmd.Stdin = process.Stdin cmd.Stdout = process.Stdout cmd.Stderr = process.Stderr } cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer) if err := cmd.Start(); err != nil { return err } criuServer.Close() defer func() { criuClient.Close() _, err := cmd.Process.Wait() if err != nil { return } }() var extFds []string if process != nil { extFds, err = getPipeFds(cmd.Process.Pid) if err != nil { return err } } data, err := proto.Marshal(req) if err != nil { return err } _, err = criuClient.Write(data) if err != nil { return err } buf := make([]byte, 10*4096) for true { n, err := criuClient.Read(buf) if err != nil { return err } if n == 0 { return fmt.Errorf("unexpected EOF") } if n == len(buf) { return fmt.Errorf("buffer is too small") } resp := new(criurpc.CriuResp) err = proto.Unmarshal(buf[:n], resp) if err != nil { return err } if !resp.GetSuccess() { return fmt.Errorf("criu failed: type %s errno %d", req.GetType().String(), resp.GetCrErrno()) } t := resp.GetType() switch { case t == criurpc.CriuReqType_NOTIFY: if err := c.criuNotifications(resp, process, opts, extFds); err != nil { return err } t = criurpc.CriuReqType_NOTIFY req = &criurpc.CriuReq{ Type: &t, NotifySuccess: proto.Bool(true), } data, err = proto.Marshal(req) if err != nil { return err } n, err = criuClient.Write(data) if err != nil { return err } continue case t == criurpc.CriuReqType_RESTORE: case t == criurpc.CriuReqType_DUMP: break default: return fmt.Errorf("unable to parse the response %s", resp.String()) } break } // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors. // Here we want to wait only the CRIU process. st, err := cmd.Process.Wait() if err != nil { return err } if !st.Success() { return fmt.Errorf("criu failed: %s", st.String()) } return nil } // block any external network activity func lockNetwork(config *configs.Config) error { for _, config := range config.Networks { strategy, err := getStrategy(config.Type) if err != nil { return err } if err := strategy.detach(config); err != nil { return err } } return nil } func unlockNetwork(config *configs.Config) error { for _, config := range config.Networks { strategy, err := getStrategy(config.Type) if err != nil { return err } if err = strategy.attach(config); err != nil { return err } } return nil } func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string) error { notify := resp.GetNotify() if notify == nil { return fmt.Errorf("invalid response: %s", resp.String()) } switch { case notify.GetScript() == "post-dump": if !opts.LeaveRunning { f, err := os.Create(filepath.Join(c.root, "checkpoint")) if err != nil { return err } f.Close() } break case notify.GetScript() == "network-unlock": if err := unlockNetwork(c.config); err != nil { return err } break case notify.GetScript() == "network-lock": if err := lockNetwork(c.config); err != nil { return err } break case notify.GetScript() == "post-restore": pid := notify.GetPid() r, err := newRestoredProcess(int(pid), fds) if err != nil { return err } // TODO: crosbymichael restore previous process information by saving the init process information in // the container's state file or separate process state files. if err := c.updateState(r); err != nil { return err } process.ops = r break } return nil } func (c *linuxContainer) updateState(process parentProcess) error { c.initProcess = process state, err := c.currentState() if err != nil { return err } f, err := os.Create(filepath.Join(c.root, stateFilename)) if err != nil { return err } defer f.Close() os.Remove(filepath.Join(c.root, "checkpoint")) return json.NewEncoder(f).Encode(state) } func (c *linuxContainer) currentStatus() (Status, error) { if _, err := os.Stat(filepath.Join(c.root, "checkpoint")); err == nil { return Checkpointed, nil } if c.initProcess == nil { return Destroyed, nil } // return Running if the init process is alive if err := syscall.Kill(c.initProcess.pid(), 0); err != nil { if err == syscall.ESRCH { return Destroyed, nil } return 0, newSystemError(err) } if c.config.Cgroups != nil && c.config.Cgroups.Freezer == configs.Frozen { return Paused, nil } return Running, nil } func (c *linuxContainer) currentState() (*State, error) { status, err := c.currentStatus() if err != nil { return nil, err } if status == Destroyed { return nil, newGenericError(fmt.Errorf("container destroyed"), ContainerNotExists) } startTime, err := c.initProcess.startTime() if err != nil { return nil, newSystemError(err) } state := &State{ ID: c.ID(), Config: *c.config, InitProcessPid: c.initProcess.pid(), InitProcessStartTime: startTime, CgroupPaths: c.cgroupManager.GetPaths(), NamespacePaths: make(map[configs.NamespaceType]string), ExternalDescriptors: c.initProcess.externalDescriptors(), } for _, ns := range c.config.Namespaces { state.NamespacePaths[ns.Type] = ns.GetPath(c.initProcess.pid()) } for _, nsType := range configs.NamespaceTypes() { if _, ok := state.NamespacePaths[nsType]; !ok { ns := configs.Namespace{Type: nsType} state.NamespacePaths[ns.Type] = ns.GetPath(c.initProcess.pid()) } } return state, nil }<|fim▁end|>
break
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! This module contains shared types and messages for use by devtools/script. //! The traits are here instead of in script so that the devtools crate can be //! modified independently of the rest of Servo. #![crate_name = "style_traits"] #![crate_type = "rlib"] #![deny(unsafe_code, missing_docs)] #![cfg_attr(feature = "servo", feature(plugin))] extern crate app_units; #[macro_use] extern crate cssparser; extern crate euclid; #[cfg(feature = "servo")] extern crate heapsize; #[cfg(feature = "servo")] #[macro_use] extern crate heapsize_derive; extern crate rustc_serialize; #[cfg(feature = "servo")] #[macro_use] extern crate serde_derive; /// Opaque type stored in type-unsafe work queues for parallel layout. /// Must be transmutable to and from `TNode`. pub type UnsafeNode = (usize, usize); /// One CSS "px" in the coordinate system of the "initial viewport": /// http://www.w3.org/TR/css-device-adapt/#initial-viewport /// /// `ViewportPx` is equal to `DeviceIndependentPixel` times a "page zoom" factor controlled by the user. This is /// the desktop-style "full page" zoom that enlarges content but then reflows the layout viewport /// so it still exactly fits the visible area. /// /// At the default zoom level of 100%, one `PagePx` is equal to one `DeviceIndependentPixel`. However, if the /// document is zoomed in or out then this scale may be larger or smaller. #[derive(Clone, Copy, Debug)] pub enum ViewportPx {} <|fim▁hole|>/// `PagePx` is equal to `ViewportPx` multiplied by a "viewport zoom" factor controlled by the user. /// This is the mobile-style "pinch zoom" that enlarges content without reflowing it. When the /// viewport zoom is not equal to 1.0, then the layout viewport is no longer the same physical size /// as the viewable area. #[derive(Clone, Copy, Debug)] pub enum PagePx {} // In summary, the hierarchy of pixel units and the factors to convert from one to the next: // // DevicePixel // / hidpi_ratio => DeviceIndependentPixel // / desktop_zoom => ViewportPx // / pinch_zoom => PagePx pub mod cursor; #[macro_use] pub mod values; pub mod viewport; pub use values::{ToCss, OneOrMoreCommaSeparated};<|fim▁end|>
/// One CSS "px" in the root coordinate system for the content document. ///
<|file_name|>generated.rs<|end_file_name|><|fim▁begin|>// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= use std::error::Error; use std::fmt; use std::io; #[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoFuture}; use rusoto_core::credential::{CredentialsError, ProvideAwsCredentials}; use rusoto_core::request::HttpDispatchError; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::signature::SignedRequest; use serde_json; use serde_json::from_slice; use serde_json::Value as SerdeJsonValue; /// <p>The input for the BulkPublish operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct BulkPublishRequest { /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>The output for the BulkPublish operation.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct BulkPublishResponse { /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_id: Option<String>, } /// <p>Configuration options for configure Cognito streams.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CognitoStreams { /// <p>The ARN of the role Amazon Cognito can assume in order to publish to the stream. This role must grant access to Amazon Cognito (cognito-sync) to invoke PutRecord on your Cognito stream.</p> #[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option<String>, /// <p>The name of the Cognito stream to receive updates. This stream must be in the developers account and in the same region as the identity pool.</p> #[serde(rename = "StreamName")] #[serde(skip_serializing_if = "Option::is_none")] pub stream_name: Option<String>, /// <p>Status of the Cognito streams. Valid values are: <p>ENABLED - Streaming of updates to identity pool is enabled.</p> <p>DISABLED - Streaming of updates to identity pool is disabled. Bulk publish will also fail if StreamingStatus is DISABLED.</p></p> #[serde(rename = "StreamingStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub streaming_status: Option<String>, } /// <p>A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don&#39;t exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct Dataset { /// <p>Date on which the dataset was created.</p> #[serde(rename = "CreationDate")] #[serde(skip_serializing_if = "Option::is_none")] pub creation_date: Option<f64>, /// <p>Total size in bytes of the records in this dataset.</p> #[serde(rename = "DataStorage")] #[serde(skip_serializing_if = "Option::is_none")] pub data_storage: Option<i64>, /// <p>A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, &#39;_&#39; (underscore), &#39;-&#39; (dash), and &#39;.&#39; (dot).</p> #[serde(rename = "DatasetName")] #[serde(skip_serializing_if = "Option::is_none")] pub dataset_name: Option<String>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_id: Option<String>, /// <p>The device that made the last change to this dataset.</p> #[serde(rename = "LastModifiedBy")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, /// <p>Date when the dataset was last modified.</p> #[serde(rename = "LastModifiedDate")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_date: Option<f64>, /// <p>Number of records in this dataset.</p> #[serde(rename = "NumRecords")] #[serde(skip_serializing_if = "Option::is_none")] pub num_records: Option<i64>, } /// <p>A request to delete the specific dataset.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteDatasetRequest { /// <p>A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, &#39;_&#39; (underscore), &#39;-&#39; (dash), and &#39;.&#39; (dot).</p> #[serde(rename = "DatasetName")] pub dataset_name: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>Response to a successful DeleteDataset request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct DeleteDatasetResponse { /// <p>A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don&#39;t exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.</p> #[serde(rename = "Dataset")] #[serde(skip_serializing_if = "Option::is_none")] pub dataset: Option<Dataset>, } /// <p>A request for meta data about a dataset (creation date, number of records, size) by owner and dataset name.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeDatasetRequest { /// <p>A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, &#39;_&#39; (underscore), &#39;-&#39; (dash), and &#39;.&#39; (dot).</p> #[serde(rename = "DatasetName")] pub dataset_name: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>Response to a successful DescribeDataset request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct DescribeDatasetResponse { /// <p>Meta data for a collection of data for an identity. An identity can have multiple datasets. A dataset can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don&#39;t exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.</p> #[serde(rename = "Dataset")] #[serde(skip_serializing_if = "Option::is_none")] pub dataset: Option<Dataset>, } /// <p>A request for usage information about the identity pool.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeIdentityPoolUsageRequest { /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>Response to a successful DescribeIdentityPoolUsage request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct DescribeIdentityPoolUsageResponse { /// <p>Information about the usage of the identity pool.</p> #[serde(rename = "IdentityPoolUsage")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_usage: Option<IdentityPoolUsage>, } /// <p>A request for information about the usage of an identity pool.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DescribeIdentityUsageRequest { /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>The response to a successful DescribeIdentityUsage request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct DescribeIdentityUsageResponse { /// <p>Usage information for the identity.</p> #[serde(rename = "IdentityUsage")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_usage: Option<IdentityUsage>, } /// <p>The input for the GetBulkPublishDetails operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetBulkPublishDetailsRequest { /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>The output for the GetBulkPublishDetails operation.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct GetBulkPublishDetailsResponse { /// <p>If BulkPublishStatus is SUCCEEDED, the time the last bulk publish operation completed.</p> #[serde(rename = "BulkPublishCompleteTime")] #[serde(skip_serializing_if = "Option::is_none")] pub bulk_publish_complete_time: Option<f64>, /// <p>The date/time at which the last bulk publish was initiated.</p> #[serde(rename = "BulkPublishStartTime")] #[serde(skip_serializing_if = "Option::is_none")] pub bulk_publish_start_time: Option<f64>, /// <p>Status of the last bulk publish operation, valid values are: <p>NOT<em>STARTED - No bulk publish has been requested for this identity pool</p> <p>IN</em>PROGRESS - Data is being published to the configured stream</p> <p>SUCCEEDED - All data for the identity pool has been published to the configured stream</p> <p>FAILED - Some portion of the data has failed to publish, check FailureMessage for the cause.</p></p> #[serde(rename = "BulkPublishStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub bulk_publish_status: Option<String>, /// <p>If BulkPublishStatus is FAILED this field will contain the error message that caused the bulk publish to fail.</p> #[serde(rename = "FailureMessage")] #[serde(skip_serializing_if = "Option::is_none")] pub failure_message: Option<String>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_id: Option<String>, } /// <p>A request for a list of the configured Cognito Events</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetCognitoEventsRequest { /// <p>The Cognito Identity Pool ID for the request</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>The response from the GetCognitoEvents request</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct GetCognitoEventsResponse { /// <p>The Cognito Events returned from the GetCognitoEvents request</p> #[serde(rename = "Events")] #[serde(skip_serializing_if = "Option::is_none")] pub events: Option<::std::collections::HashMap<String, String>>, } /// <p>The input for the GetIdentityPoolConfiguration operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetIdentityPoolConfigurationRequest { /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. This is the ID of the pool for which to return a configuration.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>The output for the GetIdentityPoolConfiguration operation.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct GetIdentityPoolConfigurationResponse { /// <p>Options to apply to this identity pool for Amazon Cognito streams.</p> #[serde(rename = "CognitoStreams")] #[serde(skip_serializing_if = "Option::is_none")] pub cognito_streams: Option<CognitoStreams>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito.</p> #[serde(rename = "IdentityPoolId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_id: Option<String>, /// <p>Options to apply to this identity pool for push synchronization.</p> #[serde(rename = "PushSync")] #[serde(skip_serializing_if = "Option::is_none")] pub push_sync: Option<PushSync>, } /// <p>Usage information for the identity pool.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct IdentityPoolUsage { /// <p>Data storage information for the identity pool.</p> #[serde(rename = "DataStorage")] #[serde(skip_serializing_if = "Option::is_none")] pub data_storage: Option<i64>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_id: Option<String>, /// <p>Date on which the identity pool was last modified.</p> #[serde(rename = "LastModifiedDate")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_date: Option<f64>, /// <p>Number of sync sessions for the identity pool.</p> #[serde(rename = "SyncSessionsCount")] #[serde(skip_serializing_if = "Option::is_none")] pub sync_sessions_count: Option<i64>, } /// <p>Usage information for the identity.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct IdentityUsage { /// <p>Total data storage for this identity.</p> #[serde(rename = "DataStorage")] #[serde(skip_serializing_if = "Option::is_none")] pub data_storage: Option<i64>, /// <p>Number of datasets for the identity.</p> #[serde(rename = "DatasetCount")] #[serde(skip_serializing_if = "Option::is_none")] pub dataset_count: Option<i64>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_id: Option<String>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_id: Option<String>, /// <p>Date on which the identity was last modified.</p> #[serde(rename = "LastModifiedDate")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_date: Option<f64>, } /// <p>Request for a list of datasets for an identity.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListDatasetsRequest { /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, /// <p>The maximum number of results to be returned.</p> #[serde(rename = "MaxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option<i64>, /// <p>A pagination token for obtaining the next page of results.</p> #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Returned for a successful ListDatasets request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct ListDatasetsResponse { /// <p>Number of datasets returned.</p> #[serde(rename = "Count")] #[serde(skip_serializing_if = "Option::is_none")] pub count: Option<i64>, /// <p>A set of datasets.</p> #[serde(rename = "Datasets")] #[serde(skip_serializing_if = "Option::is_none")] pub datasets: Option<Vec<Dataset>>, /// <p>A pagination token for obtaining the next page of results.</p> #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>A request for usage information on an identity pool.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListIdentityPoolUsageRequest { /// <p>The maximum number of results to be returned.</p> #[serde(rename = "MaxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option<i64>, /// <p>A pagination token for obtaining the next page of results.</p> #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Returned for a successful ListIdentityPoolUsage request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct ListIdentityPoolUsageResponse { /// <p>Total number of identities for the identity pool.</p> #[serde(rename = "Count")] #[serde(skip_serializing_if = "Option::is_none")] pub count: Option<i64>, /// <p>Usage information for the identity pools.</p> #[serde(rename = "IdentityPoolUsages")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_usages: Option<Vec<IdentityPoolUsage>>, /// <p>The maximum number of results to be returned.</p> #[serde(rename = "MaxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option<i64>, /// <p>A pagination token for obtaining the next page of results.</p> #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>A request for a list of records.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListRecordsRequest { /// <p>A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, &#39;_&#39; (underscore), &#39;-&#39; (dash), and &#39;.&#39; (dot).</p> #[serde(rename = "DatasetName")] pub dataset_name: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, /// <p>The last server sync count for this record.</p> #[serde(rename = "LastSyncCount")] #[serde(skip_serializing_if = "Option::is_none")] pub last_sync_count: Option<i64>, /// <p>The maximum number of results to be returned.</p> #[serde(rename = "MaxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option<i64>, /// <p>A pagination token for obtaining the next page of results.</p> #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>A token containing a session ID, identity ID, and expiration.</p> #[serde(rename = "SyncSessionToken")] #[serde(skip_serializing_if = "Option::is_none")] pub sync_session_token: Option<String>, } /// <p>Returned for a successful ListRecordsRequest.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct ListRecordsResponse { /// <p>Total number of records.</p> #[serde(rename = "Count")] #[serde(skip_serializing_if = "Option::is_none")] pub count: Option<i64>, /// <p>A boolean value specifying whether to delete the dataset locally.</p> #[serde(rename = "DatasetDeletedAfterRequestedSyncCount")] #[serde(skip_serializing_if = "Option::is_none")] pub dataset_deleted_after_requested_sync_count: Option<bool>, /// <p>Indicates whether the dataset exists.</p> #[serde(rename = "DatasetExists")] #[serde(skip_serializing_if = "Option::is_none")] pub dataset_exists: Option<bool>, /// <p>Server sync count for this dataset.</p> #[serde(rename = "DatasetSyncCount")] #[serde(skip_serializing_if = "Option::is_none")] pub dataset_sync_count: Option<i64>, /// <p>The user/device that made the last change to this record.</p> #[serde(rename = "LastModifiedBy")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, /// <p>Names of merged datasets.</p> #[serde(rename = "MergedDatasetNames")] #[serde(skip_serializing_if = "Option::is_none")] pub merged_dataset_names: Option<Vec<String>>, /// <p>A pagination token for obtaining the next page of results.</p> #[serde(rename = "NextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>A list of all records.</p> #[serde(rename = "Records")] #[serde(skip_serializing_if = "Option::is_none")] pub records: Option<Vec<Record>>, /// <p>A token containing a session ID, identity ID, and expiration.</p> #[serde(rename = "SyncSessionToken")] #[serde(skip_serializing_if = "Option::is_none")] pub sync_session_token: Option<String>, } /// <p>Configuration options to be applied to the identity pool.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PushSync { /// <p>List of SNS platform application ARNs that could be used by clients.</p> #[serde(rename = "ApplicationArns")] #[serde(skip_serializing_if = "Option::is_none")] pub application_arns: Option<Vec<String>>, /// <p>A role configured to allow Cognito to call SNS on behalf of the developer.</p> #[serde(rename = "RoleArn")] #[serde(skip_serializing_if = "Option::is_none")] pub role_arn: Option<String>, } /// <p>The basic data structure of a dataset.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct Record { /// <p>The last modified date of the client device.</p> #[serde(rename = "DeviceLastModifiedDate")] #[serde(skip_serializing_if = "Option::is_none")] pub device_last_modified_date: Option<f64>, /// <p>The key for the record.</p> #[serde(rename = "Key")] #[serde(skip_serializing_if = "Option::is_none")] pub key: Option<String>, /// <p>The user/device that made the last change to this record.</p> #[serde(rename = "LastModifiedBy")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, /// <p>The date on which the record was last modified.</p> #[serde(rename = "LastModifiedDate")] #[serde(skip_serializing_if = "Option::is_none")] pub last_modified_date: Option<f64>, /// <p>The server sync count for this record.</p> #[serde(rename = "SyncCount")] #[serde(skip_serializing_if = "Option::is_none")] pub sync_count: Option<i64>, /// <p>The value for the record.</p> #[serde(rename = "Value")] #[serde(skip_serializing_if = "Option::is_none")] pub value: Option<String>, } /// <p>An update operation for a record.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RecordPatch { /// <p>The last modified date of the client device.</p> #[serde(rename = "DeviceLastModifiedDate")] #[serde(skip_serializing_if = "Option::is_none")] pub device_last_modified_date: Option<f64>, /// <p>The key associated with the record patch.</p> #[serde(rename = "Key")] pub key: String, /// <p>An operation, either replace or remove.</p> #[serde(rename = "Op")] pub op: String, /// <p>Last known server sync count for this record. Set to 0 if unknown.</p> #[serde(rename = "SyncCount")] pub sync_count: i64, /// <p>The value associated with the record patch.</p> #[serde(rename = "Value")] #[serde(skip_serializing_if = "Option::is_none")] pub value: Option<String>, } /// <p>A request to RegisterDevice.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RegisterDeviceRequest { /// <p>The unique ID for this identity.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. Here, the ID of the pool that the identity belongs to.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, /// <p>The SNS platform type (e.g. GCM, SDM, APNS, APNS_SANDBOX).</p> #[serde(rename = "Platform")] pub platform: String, /// <p>The push token.</p> #[serde(rename = "Token")] pub token: String, } /// <p>Response to a RegisterDevice request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct RegisterDeviceResponse { /// <p>The unique ID generated for this device by Cognito.</p> #[serde(rename = "DeviceId")] #[serde(skip_serializing_if = "Option::is_none")] pub device_id: Option<String>, } /// <p>A request to configure Cognito Events</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SetCognitoEventsRequest { /// <p>The events to configure</p> #[serde(rename = "Events")] pub events: ::std::collections::HashMap<String, String>, /// <p>The Cognito Identity Pool to use when configuring Cognito Events</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>The input for the SetIdentityPoolConfiguration operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SetIdentityPoolConfigurationRequest { /// <p>Options to apply to this identity pool for Amazon Cognito streams.</p> #[serde(rename = "CognitoStreams")] #[serde(skip_serializing_if = "Option::is_none")] pub cognito_streams: Option<CognitoStreams>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. This is the ID of the pool to modify.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, /// <p>Options to apply to this identity pool for push synchronization.</p> #[serde(rename = "PushSync")] #[serde(skip_serializing_if = "Option::is_none")] pub push_sync: Option<PushSync>, } /// <p>The output for the SetIdentityPoolConfiguration operation</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct SetIdentityPoolConfigurationResponse { /// <p>Options to apply to this identity pool for Amazon Cognito streams.</p> #[serde(rename = "CognitoStreams")] #[serde(skip_serializing_if = "Option::is_none")] pub cognito_streams: Option<CognitoStreams>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito.</p> #[serde(rename = "IdentityPoolId")] #[serde(skip_serializing_if = "Option::is_none")] pub identity_pool_id: Option<String>, /// <p>Options to apply to this identity pool for push synchronization.</p> #[serde(rename = "PushSync")] #[serde(skip_serializing_if = "Option::is_none")] pub push_sync: Option<PushSync>, } /// <p>A request to SubscribeToDatasetRequest.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct SubscribeToDatasetRequest { /// <p>The name of the dataset to subcribe to.</p> #[serde(rename = "DatasetName")] pub dataset_name: String, /// <p>The unique ID generated for this device by Cognito.</p> #[serde(rename = "DeviceId")] pub device_id: String, /// <p>Unique ID for this identity.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. The ID of the pool to which the identity belongs.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>Response to a SubscribeToDataset request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct SubscribeToDatasetResponse {} /// <p>A request to UnsubscribeFromDataset.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UnsubscribeFromDatasetRequest { /// <p>The name of the dataset from which to unsubcribe.</p> #[serde(rename = "DatasetName")] pub dataset_name: String, /// <p>The unique ID generated for this device by Cognito.</p> #[serde(rename = "DeviceId")] pub device_id: String, /// <p>Unique ID for this identity.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. The ID of the pool to which this identity belongs.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, } /// <p>Response to an UnsubscribeFromDataset request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct UnsubscribeFromDatasetResponse {} /// <p>A request to post updates to records or add and delete records for a dataset and user.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateRecordsRequest { /// <p>Intended to supply a device ID that will populate the lastModifiedBy field referenced in other methods. The ClientContext field is not yet implemented.</p> #[serde(rename = "ClientContext")] #[serde(skip_serializing_if = "Option::is_none")] pub client_context: Option<String>, /// <p>A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, &#39;_&#39; (underscore), &#39;-&#39; (dash), and &#39;.&#39; (dot).</p> #[serde(rename = "DatasetName")] pub dataset_name: String, /// <p>The unique ID generated for this device by Cognito.</p> #[serde(rename = "DeviceId")] #[serde(skip_serializing_if = "Option::is_none")] pub device_id: Option<String>, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityId")] pub identity_id: String, /// <p>A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.</p> #[serde(rename = "IdentityPoolId")] pub identity_pool_id: String, /// <p>A list of patch operations.</p> #[serde(rename = "RecordPatches")] #[serde(skip_serializing_if = "Option::is_none")] pub record_patches: Option<Vec<RecordPatch>>, /// <p>The SyncSessionToken returned by a previous call to ListRecords for this dataset and identity.</p> #[serde(rename = "SyncSessionToken")] pub sync_session_token: String, } /// <p>Returned for a successful UpdateRecordsRequest.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] #[cfg_attr(test, derive(Serialize))] pub struct UpdateRecordsResponse { /// <p>A list of records that have been updated.</p> #[serde(rename = "Records")] #[serde(skip_serializing_if = "Option::is_none")] pub records: Option<Vec<Record>>, } /// Errors returned by BulkPublish #[derive(Debug, PartialEq)] pub enum BulkPublishError { /// <p>An exception thrown when a bulk publish operation is requested less than 24 hours after a previous bulk publish operation completed successfully.</p> AlreadyStreamed(String), /// <p>An exception thrown when there is an IN_PROGRESS bulk publish operation for the given identity pool.</p> DuplicateRequest(String), /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl BulkPublishError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> BulkPublishError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "AlreadyStreamedException" => { return BulkPublishError::AlreadyStreamed(String::from(error_message)) } "DuplicateRequestException" => { return BulkPublishError::DuplicateRequest(String::from(error_message)) } "InternalErrorException" => { return BulkPublishError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return BulkPublishError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return BulkPublishError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return BulkPublishError::ResourceNotFound(String::from(error_message)) } "ValidationException" => { return BulkPublishError::Validation(error_message.to_string()) } _ => {} } } return BulkPublishError::Unknown(res); } } impl From<serde_json::error::Error> for BulkPublishError { fn from(err: serde_json::error::Error) -> BulkPublishError { BulkPublishError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for BulkPublishError { fn from(err: CredentialsError) -> BulkPublishError { BulkPublishError::Credentials(err) } } impl From<HttpDispatchError> for BulkPublishError { fn from(err: HttpDispatchError) -> BulkPublishError { BulkPublishError::HttpDispatch(err) } } impl From<io::Error> for BulkPublishError { fn from(err: io::Error) -> BulkPublishError { BulkPublishError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for BulkPublishError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for BulkPublishError { fn description(&self) -> &str { match *self { BulkPublishError::AlreadyStreamed(ref cause) => cause, BulkPublishError::DuplicateRequest(ref cause) => cause, BulkPublishError::InternalError(ref cause) => cause, BulkPublishError::InvalidParameter(ref cause) => cause, BulkPublishError::NotAuthorized(ref cause) => cause, BulkPublishError::ResourceNotFound(ref cause) => cause, BulkPublishError::Validation(ref cause) => cause, BulkPublishError::Credentials(ref err) => err.description(), BulkPublishError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), BulkPublishError::ParseError(ref cause) => cause, BulkPublishError::Unknown(_) => "unknown error", } } } /// Errors returned by DeleteDataset #[derive(Debug, PartialEq)] pub enum DeleteDatasetError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if an update can&#39;t be applied because the resource was changed by another call and this would result in a conflict.</p> ResourceConflict(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl DeleteDatasetError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> DeleteDatasetError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return DeleteDatasetError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return DeleteDatasetError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return DeleteDatasetError::NotAuthorized(String::from(error_message)) } "ResourceConflictException" => { return DeleteDatasetError::ResourceConflict(String::from(error_message)) } "ResourceNotFoundException" => { return DeleteDatasetError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return DeleteDatasetError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return DeleteDatasetError::Validation(error_message.to_string()) } _ => {} } } return DeleteDatasetError::Unknown(res); } } impl From<serde_json::error::Error> for DeleteDatasetError { fn from(err: serde_json::error::Error) -> DeleteDatasetError { DeleteDatasetError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for DeleteDatasetError { fn from(err: CredentialsError) -> DeleteDatasetError { DeleteDatasetError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDatasetError { fn from(err: HttpDispatchError) -> DeleteDatasetError { DeleteDatasetError::HttpDispatch(err) } } impl From<io::Error> for DeleteDatasetError { fn from(err: io::Error) -> DeleteDatasetError { DeleteDatasetError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDatasetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDatasetError { fn description(&self) -> &str { match *self { DeleteDatasetError::InternalError(ref cause) => cause, DeleteDatasetError::InvalidParameter(ref cause) => cause, DeleteDatasetError::NotAuthorized(ref cause) => cause, DeleteDatasetError::ResourceConflict(ref cause) => cause, DeleteDatasetError::ResourceNotFound(ref cause) => cause, DeleteDatasetError::TooManyRequests(ref cause) => cause, DeleteDatasetError::Validation(ref cause) => cause, DeleteDatasetError::Credentials(ref err) => err.description(), DeleteDatasetError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteDatasetError::ParseError(ref cause) => cause, DeleteDatasetError::Unknown(_) => "unknown error", } } } /// Errors returned by DescribeDataset #[derive(Debug, PartialEq)] pub enum DescribeDatasetError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl DescribeDatasetError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> DescribeDatasetError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return DescribeDatasetError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return DescribeDatasetError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return DescribeDatasetError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return DescribeDatasetError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return DescribeDatasetError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return DescribeDatasetError::Validation(error_message.to_string()) } _ => {} } } return DescribeDatasetError::Unknown(res); } } impl From<serde_json::error::Error> for DescribeDatasetError { fn from(err: serde_json::error::Error) -> DescribeDatasetError { DescribeDatasetError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for DescribeDatasetError { fn from(err: CredentialsError) -> DescribeDatasetError { DescribeDatasetError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDatasetError { fn from(err: HttpDispatchError) -> DescribeDatasetError { DescribeDatasetError::HttpDispatch(err) } } impl From<io::Error> for DescribeDatasetError { fn from(err: io::Error) -> DescribeDatasetError { DescribeDatasetError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDatasetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDatasetError { fn description(&self) -> &str { match *self { DescribeDatasetError::InternalError(ref cause) => cause, DescribeDatasetError::InvalidParameter(ref cause) => cause, DescribeDatasetError::NotAuthorized(ref cause) => cause, DescribeDatasetError::ResourceNotFound(ref cause) => cause, DescribeDatasetError::TooManyRequests(ref cause) => cause, DescribeDatasetError::Validation(ref cause) => cause, DescribeDatasetError::Credentials(ref err) => err.description(), DescribeDatasetError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DescribeDatasetError::ParseError(ref cause) => cause, DescribeDatasetError::Unknown(_) => "unknown error", } } } /// Errors returned by DescribeIdentityPoolUsage #[derive(Debug, PartialEq)] pub enum DescribeIdentityPoolUsageError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl DescribeIdentityPoolUsageError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> DescribeIdentityPoolUsageError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return DescribeIdentityPoolUsageError::InternalError(String::from( error_message, )) } "InvalidParameterException" => { return DescribeIdentityPoolUsageError::InvalidParameter(String::from( error_message, )) } "NotAuthorizedException" => { return DescribeIdentityPoolUsageError::NotAuthorized(String::from( error_message, )) } "ResourceNotFoundException" => { return DescribeIdentityPoolUsageError::ResourceNotFound(String::from( error_message, )) } "TooManyRequestsException" => { return DescribeIdentityPoolUsageError::TooManyRequests(String::from( error_message, )) } "ValidationException" => { return DescribeIdentityPoolUsageError::Validation(error_message.to_string()) } _ => {} } } return DescribeIdentityPoolUsageError::Unknown(res); } } impl From<serde_json::error::Error> for DescribeIdentityPoolUsageError { fn from(err: serde_json::error::Error) -> DescribeIdentityPoolUsageError { DescribeIdentityPoolUsageError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for DescribeIdentityPoolUsageError { fn from(err: CredentialsError) -> DescribeIdentityPoolUsageError { DescribeIdentityPoolUsageError::Credentials(err) } } impl From<HttpDispatchError> for DescribeIdentityPoolUsageError { fn from(err: HttpDispatchError) -> DescribeIdentityPoolUsageError { DescribeIdentityPoolUsageError::HttpDispatch(err) } } impl From<io::Error> for DescribeIdentityPoolUsageError { fn from(err: io::Error) -> DescribeIdentityPoolUsageError { DescribeIdentityPoolUsageError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeIdentityPoolUsageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeIdentityPoolUsageError { fn description(&self) -> &str { match *self { DescribeIdentityPoolUsageError::InternalError(ref cause) => cause, DescribeIdentityPoolUsageError::InvalidParameter(ref cause) => cause, DescribeIdentityPoolUsageError::NotAuthorized(ref cause) => cause, DescribeIdentityPoolUsageError::ResourceNotFound(ref cause) => cause, DescribeIdentityPoolUsageError::TooManyRequests(ref cause) => cause, DescribeIdentityPoolUsageError::Validation(ref cause) => cause, DescribeIdentityPoolUsageError::Credentials(ref err) => err.description(), DescribeIdentityPoolUsageError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeIdentityPoolUsageError::ParseError(ref cause) => cause, DescribeIdentityPoolUsageError::Unknown(_) => "unknown error", } } } /// Errors returned by DescribeIdentityUsage #[derive(Debug, PartialEq)] pub enum DescribeIdentityUsageError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl DescribeIdentityUsageError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> DescribeIdentityUsageError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return DescribeIdentityUsageError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return DescribeIdentityUsageError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return DescribeIdentityUsageError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return DescribeIdentityUsageError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return DescribeIdentityUsageError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return DescribeIdentityUsageError::Validation(error_message.to_string()) } _ => {} } } return DescribeIdentityUsageError::Unknown(res); } } impl From<serde_json::error::Error> for DescribeIdentityUsageError { fn from(err: serde_json::error::Error) -> DescribeIdentityUsageError { DescribeIdentityUsageError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for DescribeIdentityUsageError { fn from(err: CredentialsError) -> DescribeIdentityUsageError { DescribeIdentityUsageError::Credentials(err) } } impl From<HttpDispatchError> for DescribeIdentityUsageError { fn from(err: HttpDispatchError) -> DescribeIdentityUsageError { DescribeIdentityUsageError::HttpDispatch(err) } } impl From<io::Error> for DescribeIdentityUsageError { fn from(err: io::Error) -> DescribeIdentityUsageError { DescribeIdentityUsageError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeIdentityUsageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeIdentityUsageError { fn description(&self) -> &str { match *self { DescribeIdentityUsageError::InternalError(ref cause) => cause, DescribeIdentityUsageError::InvalidParameter(ref cause) => cause, DescribeIdentityUsageError::NotAuthorized(ref cause) => cause, DescribeIdentityUsageError::ResourceNotFound(ref cause) => cause, DescribeIdentityUsageError::TooManyRequests(ref cause) => cause, DescribeIdentityUsageError::Validation(ref cause) => cause, DescribeIdentityUsageError::Credentials(ref err) => err.description(), DescribeIdentityUsageError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeIdentityUsageError::ParseError(ref cause) => cause, DescribeIdentityUsageError::Unknown(_) => "unknown error", } } } /// Errors returned by GetBulkPublishDetails #[derive(Debug, PartialEq)] pub enum GetBulkPublishDetailsError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl GetBulkPublishDetailsError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> GetBulkPublishDetailsError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return GetBulkPublishDetailsError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return GetBulkPublishDetailsError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return GetBulkPublishDetailsError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return GetBulkPublishDetailsError::ResourceNotFound(String::from(error_message)) } "ValidationException" => { return GetBulkPublishDetailsError::Validation(error_message.to_string()) } _ => {} } } return GetBulkPublishDetailsError::Unknown(res); } } impl From<serde_json::error::Error> for GetBulkPublishDetailsError { fn from(err: serde_json::error::Error) -> GetBulkPublishDetailsError { GetBulkPublishDetailsError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for GetBulkPublishDetailsError { fn from(err: CredentialsError) -> GetBulkPublishDetailsError { GetBulkPublishDetailsError::Credentials(err) } } impl From<HttpDispatchError> for GetBulkPublishDetailsError { fn from(err: HttpDispatchError) -> GetBulkPublishDetailsError { GetBulkPublishDetailsError::HttpDispatch(err) } } impl From<io::Error> for GetBulkPublishDetailsError { fn from(err: io::Error) -> GetBulkPublishDetailsError { GetBulkPublishDetailsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBulkPublishDetailsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBulkPublishDetailsError { fn description(&self) -> &str { match *self { GetBulkPublishDetailsError::InternalError(ref cause) => cause, GetBulkPublishDetailsError::InvalidParameter(ref cause) => cause, GetBulkPublishDetailsError::NotAuthorized(ref cause) => cause, GetBulkPublishDetailsError::ResourceNotFound(ref cause) => cause, GetBulkPublishDetailsError::Validation(ref cause) => cause, GetBulkPublishDetailsError::Credentials(ref err) => err.description(), GetBulkPublishDetailsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBulkPublishDetailsError::ParseError(ref cause) => cause, GetBulkPublishDetailsError::Unknown(_) => "unknown error", } } } /// Errors returned by GetCognitoEvents #[derive(Debug, PartialEq)] pub enum GetCognitoEventsError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl GetCognitoEventsError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> GetCognitoEventsError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return GetCognitoEventsError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return GetCognitoEventsError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return GetCognitoEventsError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return GetCognitoEventsError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return GetCognitoEventsError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return GetCognitoEventsError::Validation(error_message.to_string()) } _ => {} } } return GetCognitoEventsError::Unknown(res); } } impl From<serde_json::error::Error> for GetCognitoEventsError { fn from(err: serde_json::error::Error) -> GetCognitoEventsError { GetCognitoEventsError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for GetCognitoEventsError { fn from(err: CredentialsError) -> GetCognitoEventsError { GetCognitoEventsError::Credentials(err) } } impl From<HttpDispatchError> for GetCognitoEventsError { fn from(err: HttpDispatchError) -> GetCognitoEventsError { GetCognitoEventsError::HttpDispatch(err) } } impl From<io::Error> for GetCognitoEventsError { fn from(err: io::Error) -> GetCognitoEventsError { GetCognitoEventsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetCognitoEventsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetCognitoEventsError { fn description(&self) -> &str { match *self { GetCognitoEventsError::InternalError(ref cause) => cause, GetCognitoEventsError::InvalidParameter(ref cause) => cause, GetCognitoEventsError::NotAuthorized(ref cause) => cause, GetCognitoEventsError::ResourceNotFound(ref cause) => cause, GetCognitoEventsError::TooManyRequests(ref cause) => cause, GetCognitoEventsError::Validation(ref cause) => cause, GetCognitoEventsError::Credentials(ref err) => err.description(), GetCognitoEventsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetCognitoEventsError::ParseError(ref cause) => cause, GetCognitoEventsError::Unknown(_) => "unknown error", } } } /// Errors returned by GetIdentityPoolConfiguration #[derive(Debug, PartialEq)] pub enum GetIdentityPoolConfigurationError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl GetIdentityPoolConfigurationError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> GetIdentityPoolConfigurationError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return GetIdentityPoolConfigurationError::InternalError(String::from( error_message, )) } "InvalidParameterException" => { return GetIdentityPoolConfigurationError::InvalidParameter(String::from( error_message, )) } "NotAuthorizedException" => { return GetIdentityPoolConfigurationError::NotAuthorized(String::from( error_message, )) } "ResourceNotFoundException" => { return GetIdentityPoolConfigurationError::ResourceNotFound(String::from( error_message, )) } "TooManyRequestsException" => { return GetIdentityPoolConfigurationError::TooManyRequests(String::from( error_message, )) } "ValidationException" => { return GetIdentityPoolConfigurationError::Validation(error_message.to_string()) } _ => {} } } return GetIdentityPoolConfigurationError::Unknown(res); } } impl From<serde_json::error::Error> for GetIdentityPoolConfigurationError { fn from(err: serde_json::error::Error) -> GetIdentityPoolConfigurationError { GetIdentityPoolConfigurationError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for GetIdentityPoolConfigurationError { fn from(err: CredentialsError) -> GetIdentityPoolConfigurationError { GetIdentityPoolConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetIdentityPoolConfigurationError { fn from(err: HttpDispatchError) -> GetIdentityPoolConfigurationError { GetIdentityPoolConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetIdentityPoolConfigurationError { fn from(err: io::Error) -> GetIdentityPoolConfigurationError { GetIdentityPoolConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetIdentityPoolConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetIdentityPoolConfigurationError { fn description(&self) -> &str { match *self { GetIdentityPoolConfigurationError::InternalError(ref cause) => cause, GetIdentityPoolConfigurationError::InvalidParameter(ref cause) => cause, GetIdentityPoolConfigurationError::NotAuthorized(ref cause) => cause, GetIdentityPoolConfigurationError::ResourceNotFound(ref cause) => cause, GetIdentityPoolConfigurationError::TooManyRequests(ref cause) => cause, GetIdentityPoolConfigurationError::Validation(ref cause) => cause, GetIdentityPoolConfigurationError::Credentials(ref err) => err.description(), GetIdentityPoolConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetIdentityPoolConfigurationError::ParseError(ref cause) => cause, GetIdentityPoolConfigurationError::Unknown(_) => "unknown error", } } } /// Errors returned by ListDatasets #[derive(Debug, PartialEq)] pub enum ListDatasetsError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl ListDatasetsError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> ListDatasetsError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return ListDatasetsError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return ListDatasetsError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return ListDatasetsError::NotAuthorized(String::from(error_message)) } "TooManyRequestsException" => { return ListDatasetsError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return ListDatasetsError::Validation(error_message.to_string()) } _ => {} } } return ListDatasetsError::Unknown(res); } } impl From<serde_json::error::Error> for ListDatasetsError { fn from(err: serde_json::error::Error) -> ListDatasetsError { ListDatasetsError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for ListDatasetsError { fn from(err: CredentialsError) -> ListDatasetsError { ListDatasetsError::Credentials(err) } } impl From<HttpDispatchError> for ListDatasetsError { fn from(err: HttpDispatchError) -> ListDatasetsError { ListDatasetsError::HttpDispatch(err) } } impl From<io::Error> for ListDatasetsError { fn from(err: io::Error) -> ListDatasetsError { ListDatasetsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListDatasetsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListDatasetsError { fn description(&self) -> &str { match *self { ListDatasetsError::InternalError(ref cause) => cause, ListDatasetsError::InvalidParameter(ref cause) => cause, ListDatasetsError::NotAuthorized(ref cause) => cause, ListDatasetsError::TooManyRequests(ref cause) => cause, ListDatasetsError::Validation(ref cause) => cause, ListDatasetsError::Credentials(ref err) => err.description(), ListDatasetsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListDatasetsError::ParseError(ref cause) => cause, ListDatasetsError::Unknown(_) => "unknown error", } } } /// Errors returned by ListIdentityPoolUsage #[derive(Debug, PartialEq)] pub enum ListIdentityPoolUsageError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl ListIdentityPoolUsageError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> ListIdentityPoolUsageError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return ListIdentityPoolUsageError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return ListIdentityPoolUsageError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return ListIdentityPoolUsageError::NotAuthorized(String::from(error_message)) } "TooManyRequestsException" => { return ListIdentityPoolUsageError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return ListIdentityPoolUsageError::Validation(error_message.to_string()) } _ => {} } } return ListIdentityPoolUsageError::Unknown(res); } } impl From<serde_json::error::Error> for ListIdentityPoolUsageError { fn from(err: serde_json::error::Error) -> ListIdentityPoolUsageError { ListIdentityPoolUsageError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for ListIdentityPoolUsageError { fn from(err: CredentialsError) -> ListIdentityPoolUsageError { ListIdentityPoolUsageError::Credentials(err) } } impl From<HttpDispatchError> for ListIdentityPoolUsageError { fn from(err: HttpDispatchError) -> ListIdentityPoolUsageError { ListIdentityPoolUsageError::HttpDispatch(err) } } impl From<io::Error> for ListIdentityPoolUsageError { fn from(err: io::Error) -> ListIdentityPoolUsageError { ListIdentityPoolUsageError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListIdentityPoolUsageError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListIdentityPoolUsageError { fn description(&self) -> &str { match *self { ListIdentityPoolUsageError::InternalError(ref cause) => cause, ListIdentityPoolUsageError::InvalidParameter(ref cause) => cause, ListIdentityPoolUsageError::NotAuthorized(ref cause) => cause, ListIdentityPoolUsageError::TooManyRequests(ref cause) => cause, ListIdentityPoolUsageError::Validation(ref cause) => cause, ListIdentityPoolUsageError::Credentials(ref err) => err.description(), ListIdentityPoolUsageError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListIdentityPoolUsageError::ParseError(ref cause) => cause, ListIdentityPoolUsageError::Unknown(_) => "unknown error", } } } /// Errors returned by ListRecords #[derive(Debug, PartialEq)] pub enum ListRecordsError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl ListRecordsError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> ListRecordsError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return ListRecordsError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return ListRecordsError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return ListRecordsError::NotAuthorized(String::from(error_message)) } "TooManyRequestsException" => { return ListRecordsError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return ListRecordsError::Validation(error_message.to_string()) } _ => {} } } return ListRecordsError::Unknown(res); } } impl From<serde_json::error::Error> for ListRecordsError { fn from(err: serde_json::error::Error) -> ListRecordsError { ListRecordsError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for ListRecordsError { fn from(err: CredentialsError) -> ListRecordsError { ListRecordsError::Credentials(err) } } impl From<HttpDispatchError> for ListRecordsError { fn from(err: HttpDispatchError) -> ListRecordsError { ListRecordsError::HttpDispatch(err) } } impl From<io::Error> for ListRecordsError { fn from(err: io::Error) -> ListRecordsError { ListRecordsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListRecordsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListRecordsError { fn description(&self) -> &str { match *self { ListRecordsError::InternalError(ref cause) => cause, ListRecordsError::InvalidParameter(ref cause) => cause, ListRecordsError::NotAuthorized(ref cause) => cause, ListRecordsError::TooManyRequests(ref cause) => cause, ListRecordsError::Validation(ref cause) => cause, ListRecordsError::Credentials(ref err) => err.description(), ListRecordsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListRecordsError::ParseError(ref cause) => cause, ListRecordsError::Unknown(_) => "unknown error", } } } /// Errors returned by RegisterDevice #[derive(Debug, PartialEq)] pub enum RegisterDeviceError { /// <p>Indicates an internal service error.</p> InternalError(String), InvalidConfiguration(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl RegisterDeviceError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> RegisterDeviceError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return RegisterDeviceError::InternalError(String::from(error_message)) } "InvalidConfigurationException" => { return RegisterDeviceError::InvalidConfiguration(String::from(error_message)) } "InvalidParameterException" => { return RegisterDeviceError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return RegisterDeviceError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return RegisterDeviceError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return RegisterDeviceError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return RegisterDeviceError::Validation(error_message.to_string()) } _ => {} } } return RegisterDeviceError::Unknown(res); } } impl From<serde_json::error::Error> for RegisterDeviceError { fn from(err: serde_json::error::Error) -> RegisterDeviceError { RegisterDeviceError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for RegisterDeviceError { fn from(err: CredentialsError) -> RegisterDeviceError { RegisterDeviceError::Credentials(err) } } impl From<HttpDispatchError> for RegisterDeviceError { fn from(err: HttpDispatchError) -> RegisterDeviceError { RegisterDeviceError::HttpDispatch(err) } } impl From<io::Error> for RegisterDeviceError { fn from(err: io::Error) -> RegisterDeviceError { RegisterDeviceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RegisterDeviceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RegisterDeviceError { fn description(&self) -> &str { match *self { RegisterDeviceError::InternalError(ref cause) => cause, RegisterDeviceError::InvalidConfiguration(ref cause) => cause, RegisterDeviceError::InvalidParameter(ref cause) => cause, RegisterDeviceError::NotAuthorized(ref cause) => cause, RegisterDeviceError::ResourceNotFound(ref cause) => cause, RegisterDeviceError::TooManyRequests(ref cause) => cause, RegisterDeviceError::Validation(ref cause) => cause, RegisterDeviceError::Credentials(ref err) => err.description(), RegisterDeviceError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), RegisterDeviceError::ParseError(ref cause) => cause, RegisterDeviceError::Unknown(_) => "unknown error", } } } /// Errors returned by SetCognitoEvents #[derive(Debug, PartialEq)] pub enum SetCognitoEventsError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl SetCognitoEventsError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> SetCognitoEventsError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return SetCognitoEventsError::InternalError(String::from(error_message)) } "InvalidParameterException" => { return SetCognitoEventsError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return SetCognitoEventsError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return SetCognitoEventsError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return SetCognitoEventsError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return SetCognitoEventsError::Validation(error_message.to_string()) } _ => {} } } return SetCognitoEventsError::Unknown(res); } } impl From<serde_json::error::Error> for SetCognitoEventsError { fn from(err: serde_json::error::Error) -> SetCognitoEventsError { SetCognitoEventsError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for SetCognitoEventsError { fn from(err: CredentialsError) -> SetCognitoEventsError { SetCognitoEventsError::Credentials(err) } } impl From<HttpDispatchError> for SetCognitoEventsError { fn from(err: HttpDispatchError) -> SetCognitoEventsError { SetCognitoEventsError::HttpDispatch(err) } } impl From<io::Error> for SetCognitoEventsError { fn from(err: io::Error) -> SetCognitoEventsError { SetCognitoEventsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for SetCognitoEventsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for SetCognitoEventsError { fn description(&self) -> &str { match *self { SetCognitoEventsError::InternalError(ref cause) => cause, SetCognitoEventsError::InvalidParameter(ref cause) => cause, SetCognitoEventsError::NotAuthorized(ref cause) => cause, SetCognitoEventsError::ResourceNotFound(ref cause) => cause, SetCognitoEventsError::TooManyRequests(ref cause) => cause, SetCognitoEventsError::Validation(ref cause) => cause, SetCognitoEventsError::Credentials(ref err) => err.description(), SetCognitoEventsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), SetCognitoEventsError::ParseError(ref cause) => cause, SetCognitoEventsError::Unknown(_) => "unknown error", } } } /// Errors returned by SetIdentityPoolConfiguration #[derive(Debug, PartialEq)] pub enum SetIdentityPoolConfigurationError { /// <p>Thrown if there are parallel requests to modify a resource.</p> ConcurrentModification(String), /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl SetIdentityPoolConfigurationError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> SetIdentityPoolConfigurationError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str())<|fim▁hole|> // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "ConcurrentModificationException" => { return SetIdentityPoolConfigurationError::ConcurrentModification(String::from( error_message, )) } "InternalErrorException" => { return SetIdentityPoolConfigurationError::InternalError(String::from( error_message, )) } "InvalidParameterException" => { return SetIdentityPoolConfigurationError::InvalidParameter(String::from( error_message, )) } "NotAuthorizedException" => { return SetIdentityPoolConfigurationError::NotAuthorized(String::from( error_message, )) } "ResourceNotFoundException" => { return SetIdentityPoolConfigurationError::ResourceNotFound(String::from( error_message, )) } "TooManyRequestsException" => { return SetIdentityPoolConfigurationError::TooManyRequests(String::from( error_message, )) } "ValidationException" => { return SetIdentityPoolConfigurationError::Validation(error_message.to_string()) } _ => {} } } return SetIdentityPoolConfigurationError::Unknown(res); } } impl From<serde_json::error::Error> for SetIdentityPoolConfigurationError { fn from(err: serde_json::error::Error) -> SetIdentityPoolConfigurationError { SetIdentityPoolConfigurationError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for SetIdentityPoolConfigurationError { fn from(err: CredentialsError) -> SetIdentityPoolConfigurationError { SetIdentityPoolConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for SetIdentityPoolConfigurationError { fn from(err: HttpDispatchError) -> SetIdentityPoolConfigurationError { SetIdentityPoolConfigurationError::HttpDispatch(err) } } impl From<io::Error> for SetIdentityPoolConfigurationError { fn from(err: io::Error) -> SetIdentityPoolConfigurationError { SetIdentityPoolConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for SetIdentityPoolConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for SetIdentityPoolConfigurationError { fn description(&self) -> &str { match *self { SetIdentityPoolConfigurationError::ConcurrentModification(ref cause) => cause, SetIdentityPoolConfigurationError::InternalError(ref cause) => cause, SetIdentityPoolConfigurationError::InvalidParameter(ref cause) => cause, SetIdentityPoolConfigurationError::NotAuthorized(ref cause) => cause, SetIdentityPoolConfigurationError::ResourceNotFound(ref cause) => cause, SetIdentityPoolConfigurationError::TooManyRequests(ref cause) => cause, SetIdentityPoolConfigurationError::Validation(ref cause) => cause, SetIdentityPoolConfigurationError::Credentials(ref err) => err.description(), SetIdentityPoolConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } SetIdentityPoolConfigurationError::ParseError(ref cause) => cause, SetIdentityPoolConfigurationError::Unknown(_) => "unknown error", } } } /// Errors returned by SubscribeToDataset #[derive(Debug, PartialEq)] pub enum SubscribeToDatasetError { /// <p>Indicates an internal service error.</p> InternalError(String), InvalidConfiguration(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl SubscribeToDatasetError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> SubscribeToDatasetError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return SubscribeToDatasetError::InternalError(String::from(error_message)) } "InvalidConfigurationException" => { return SubscribeToDatasetError::InvalidConfiguration(String::from( error_message, )) } "InvalidParameterException" => { return SubscribeToDatasetError::InvalidParameter(String::from(error_message)) } "NotAuthorizedException" => { return SubscribeToDatasetError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return SubscribeToDatasetError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return SubscribeToDatasetError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return SubscribeToDatasetError::Validation(error_message.to_string()) } _ => {} } } return SubscribeToDatasetError::Unknown(res); } } impl From<serde_json::error::Error> for SubscribeToDatasetError { fn from(err: serde_json::error::Error) -> SubscribeToDatasetError { SubscribeToDatasetError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for SubscribeToDatasetError { fn from(err: CredentialsError) -> SubscribeToDatasetError { SubscribeToDatasetError::Credentials(err) } } impl From<HttpDispatchError> for SubscribeToDatasetError { fn from(err: HttpDispatchError) -> SubscribeToDatasetError { SubscribeToDatasetError::HttpDispatch(err) } } impl From<io::Error> for SubscribeToDatasetError { fn from(err: io::Error) -> SubscribeToDatasetError { SubscribeToDatasetError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for SubscribeToDatasetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for SubscribeToDatasetError { fn description(&self) -> &str { match *self { SubscribeToDatasetError::InternalError(ref cause) => cause, SubscribeToDatasetError::InvalidConfiguration(ref cause) => cause, SubscribeToDatasetError::InvalidParameter(ref cause) => cause, SubscribeToDatasetError::NotAuthorized(ref cause) => cause, SubscribeToDatasetError::ResourceNotFound(ref cause) => cause, SubscribeToDatasetError::TooManyRequests(ref cause) => cause, SubscribeToDatasetError::Validation(ref cause) => cause, SubscribeToDatasetError::Credentials(ref err) => err.description(), SubscribeToDatasetError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } SubscribeToDatasetError::ParseError(ref cause) => cause, SubscribeToDatasetError::Unknown(_) => "unknown error", } } } /// Errors returned by UnsubscribeFromDataset #[derive(Debug, PartialEq)] pub enum UnsubscribeFromDatasetError { /// <p>Indicates an internal service error.</p> InternalError(String), InvalidConfiguration(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl UnsubscribeFromDatasetError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> UnsubscribeFromDatasetError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return UnsubscribeFromDatasetError::InternalError(String::from(error_message)) } "InvalidConfigurationException" => { return UnsubscribeFromDatasetError::InvalidConfiguration(String::from( error_message, )) } "InvalidParameterException" => { return UnsubscribeFromDatasetError::InvalidParameter(String::from( error_message, )) } "NotAuthorizedException" => { return UnsubscribeFromDatasetError::NotAuthorized(String::from(error_message)) } "ResourceNotFoundException" => { return UnsubscribeFromDatasetError::ResourceNotFound(String::from( error_message, )) } "TooManyRequestsException" => { return UnsubscribeFromDatasetError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return UnsubscribeFromDatasetError::Validation(error_message.to_string()) } _ => {} } } return UnsubscribeFromDatasetError::Unknown(res); } } impl From<serde_json::error::Error> for UnsubscribeFromDatasetError { fn from(err: serde_json::error::Error) -> UnsubscribeFromDatasetError { UnsubscribeFromDatasetError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for UnsubscribeFromDatasetError { fn from(err: CredentialsError) -> UnsubscribeFromDatasetError { UnsubscribeFromDatasetError::Credentials(err) } } impl From<HttpDispatchError> for UnsubscribeFromDatasetError { fn from(err: HttpDispatchError) -> UnsubscribeFromDatasetError { UnsubscribeFromDatasetError::HttpDispatch(err) } } impl From<io::Error> for UnsubscribeFromDatasetError { fn from(err: io::Error) -> UnsubscribeFromDatasetError { UnsubscribeFromDatasetError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UnsubscribeFromDatasetError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UnsubscribeFromDatasetError { fn description(&self) -> &str { match *self { UnsubscribeFromDatasetError::InternalError(ref cause) => cause, UnsubscribeFromDatasetError::InvalidConfiguration(ref cause) => cause, UnsubscribeFromDatasetError::InvalidParameter(ref cause) => cause, UnsubscribeFromDatasetError::NotAuthorized(ref cause) => cause, UnsubscribeFromDatasetError::ResourceNotFound(ref cause) => cause, UnsubscribeFromDatasetError::TooManyRequests(ref cause) => cause, UnsubscribeFromDatasetError::Validation(ref cause) => cause, UnsubscribeFromDatasetError::Credentials(ref err) => err.description(), UnsubscribeFromDatasetError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } UnsubscribeFromDatasetError::ParseError(ref cause) => cause, UnsubscribeFromDatasetError::Unknown(_) => "unknown error", } } } /// Errors returned by UpdateRecords #[derive(Debug, PartialEq)] pub enum UpdateRecordsError { /// <p>Indicates an internal service error.</p> InternalError(String), /// <p>The AWS Lambda function returned invalid output or an exception.</p> InvalidLambdaFunctionOutput(String), /// <p>Thrown when a request parameter does not comply with the associated constraints.</p> InvalidParameter(String), /// <p>AWS Lambda throttled your account, please contact AWS Support</p> LambdaThrottled(String), /// <p>Thrown when the limit on the number of objects or operations has been exceeded.</p> LimitExceeded(String), /// <p>Thrown when a user is not authorized to access the requested resource.</p> NotAuthorized(String), /// <p>Thrown if an update can&#39;t be applied because the resource was changed by another call and this would result in a conflict.</p> ResourceConflict(String), /// <p>Thrown if the resource doesn&#39;t exist.</p> ResourceNotFound(String), /// <p>Thrown if the request is throttled.</p> TooManyRequests(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An error occurred parsing the response payload. ParseError(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(BufferedHttpResponse), } impl UpdateRecordsError { // see boto RestJSONParser impl for parsing errors // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L838-L850 pub fn from_response(res: BufferedHttpResponse) -> UpdateRecordsError { if let Ok(json) = from_slice::<SerdeJsonValue>(&res.body) { let error_type = match res.headers.get("x-amzn-errortype") { Some(raw_error_type) => raw_error_type .split(':') .next() .unwrap_or_else(|| "Unknown"), _ => json .get("code") .or_else(|| json.get("Code")) .and_then(|c| c.as_str()) .unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message" // see boto BaseJSONParser impl for parsing message // https://github.com/boto/botocore/blob/4dff78c840403d1d17db9b3f800b20d3bd9fbf9f/botocore/parsers.py#L595-L598 let error_message = json .get("message") .or_else(|| json.get("Message")) .and_then(|m| m.as_str()) .unwrap_or(""); match error_type { "InternalErrorException" => { return UpdateRecordsError::InternalError(String::from(error_message)) } "InvalidLambdaFunctionOutputException" => { return UpdateRecordsError::InvalidLambdaFunctionOutput(String::from( error_message, )) } "InvalidParameterException" => { return UpdateRecordsError::InvalidParameter(String::from(error_message)) } "LambdaThrottledException" => { return UpdateRecordsError::LambdaThrottled(String::from(error_message)) } "LimitExceededException" => { return UpdateRecordsError::LimitExceeded(String::from(error_message)) } "NotAuthorizedException" => { return UpdateRecordsError::NotAuthorized(String::from(error_message)) } "ResourceConflictException" => { return UpdateRecordsError::ResourceConflict(String::from(error_message)) } "ResourceNotFoundException" => { return UpdateRecordsError::ResourceNotFound(String::from(error_message)) } "TooManyRequestsException" => { return UpdateRecordsError::TooManyRequests(String::from(error_message)) } "ValidationException" => { return UpdateRecordsError::Validation(error_message.to_string()) } _ => {} } } return UpdateRecordsError::Unknown(res); } } impl From<serde_json::error::Error> for UpdateRecordsError { fn from(err: serde_json::error::Error) -> UpdateRecordsError { UpdateRecordsError::ParseError(err.description().to_string()) } } impl From<CredentialsError> for UpdateRecordsError { fn from(err: CredentialsError) -> UpdateRecordsError { UpdateRecordsError::Credentials(err) } } impl From<HttpDispatchError> for UpdateRecordsError { fn from(err: HttpDispatchError) -> UpdateRecordsError { UpdateRecordsError::HttpDispatch(err) } } impl From<io::Error> for UpdateRecordsError { fn from(err: io::Error) -> UpdateRecordsError { UpdateRecordsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UpdateRecordsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UpdateRecordsError { fn description(&self) -> &str { match *self { UpdateRecordsError::InternalError(ref cause) => cause, UpdateRecordsError::InvalidLambdaFunctionOutput(ref cause) => cause, UpdateRecordsError::InvalidParameter(ref cause) => cause, UpdateRecordsError::LambdaThrottled(ref cause) => cause, UpdateRecordsError::LimitExceeded(ref cause) => cause, UpdateRecordsError::NotAuthorized(ref cause) => cause, UpdateRecordsError::ResourceConflict(ref cause) => cause, UpdateRecordsError::ResourceNotFound(ref cause) => cause, UpdateRecordsError::TooManyRequests(ref cause) => cause, UpdateRecordsError::Validation(ref cause) => cause, UpdateRecordsError::Credentials(ref err) => err.description(), UpdateRecordsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), UpdateRecordsError::ParseError(ref cause) => cause, UpdateRecordsError::Unknown(_) => "unknown error", } } } /// Trait representing the capabilities of the Amazon Cognito Sync API. Amazon Cognito Sync clients implement this trait. pub trait CognitoSync { /// <p>Initiates a bulk publish of all existing datasets for an Identity Pool to the configured stream. Customers are limited to one successful bulk publish per 24 hours. Bulk publish is an asynchronous request, customers can see the status of the request via the GetBulkPublishDetails operation.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn bulk_publish( &self, input: BulkPublishRequest, ) -> RusotoFuture<BulkPublishResponse, BulkPublishError>; /// <p>Deletes the specific dataset. The dataset will be deleted permanently, and the action can't be undone. Datasets that this dataset was merged with will no longer report the merge. Any subsequent operation on this dataset will result in a ResourceNotFoundException.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.</p> fn delete_dataset( &self, input: DeleteDatasetRequest, ) -> RusotoFuture<DeleteDatasetResponse, DeleteDatasetError>; /// <p>Gets meta data about a dataset by identity and dataset name. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.</p> fn describe_dataset( &self, input: DescribeDatasetRequest, ) -> RusotoFuture<DescribeDatasetResponse, DescribeDatasetError>; /// <p>Gets usage details (for example, data storage) about a particular identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn describe_identity_pool_usage( &self, input: DescribeIdentityPoolUsageRequest, ) -> RusotoFuture<DescribeIdentityPoolUsageResponse, DescribeIdentityPoolUsageError>; /// <p>Gets usage information for an identity, including number of datasets and data usage.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.</p> fn describe_identity_usage( &self, input: DescribeIdentityUsageRequest, ) -> RusotoFuture<DescribeIdentityUsageResponse, DescribeIdentityUsageError>; /// <p>Get the status of the last BulkPublish operation for an identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn get_bulk_publish_details( &self, input: GetBulkPublishDetailsRequest, ) -> RusotoFuture<GetBulkPublishDetailsResponse, GetBulkPublishDetailsError>; /// <p>Gets the events and the corresponding Lambda functions associated with an identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn get_cognito_events( &self, input: GetCognitoEventsRequest, ) -> RusotoFuture<GetCognitoEventsResponse, GetCognitoEventsError>; /// <p>Gets the configuration settings of an identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn get_identity_pool_configuration( &self, input: GetIdentityPoolConfigurationRequest, ) -> RusotoFuture<GetIdentityPoolConfigurationResponse, GetIdentityPoolConfigurationError>; /// <p>Lists datasets for an identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.</p> <p>ListDatasets can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use the Cognito Identity credentials to make this API call.</p> fn list_datasets( &self, input: ListDatasetsRequest, ) -> RusotoFuture<ListDatasetsResponse, ListDatasetsError>; /// <p>Gets a list of identity pools registered with Cognito.</p> <p>ListIdentityPoolUsage can only be called with developer credentials. You cannot make this API call with the temporary user credentials provided by Cognito Identity.</p> fn list_identity_pool_usage( &self, input: ListIdentityPoolUsageRequest, ) -> RusotoFuture<ListIdentityPoolUsageResponse, ListIdentityPoolUsageError>; /// <p>Gets paginated records, optionally changed after a particular sync count for a dataset and identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.</p> <p>ListRecords can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.</p> fn list_records( &self, input: ListRecordsRequest, ) -> RusotoFuture<ListRecordsResponse, ListRecordsError>; /// <p>Registers a device to receive push sync notifications.</p> <p>This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.</p> fn register_device( &self, input: RegisterDeviceRequest, ) -> RusotoFuture<RegisterDeviceResponse, RegisterDeviceError>; /// <p>Sets the AWS Lambda function for a given event type for an identity pool. This request only updates the key/value pair specified. Other key/values pairs are not updated. To remove a key value pair, pass a empty value for the particular key.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn set_cognito_events( &self, input: SetCognitoEventsRequest, ) -> RusotoFuture<(), SetCognitoEventsError>; /// <p>Sets the necessary configuration for push sync.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn set_identity_pool_configuration( &self, input: SetIdentityPoolConfigurationRequest, ) -> RusotoFuture<SetIdentityPoolConfigurationResponse, SetIdentityPoolConfigurationError>; /// <p>Subscribes to receive notifications when a dataset is modified by another device.</p> <p>This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.</p> fn subscribe_to_dataset( &self, input: SubscribeToDatasetRequest, ) -> RusotoFuture<SubscribeToDatasetResponse, SubscribeToDatasetError>; /// <p>Unsubscribes from receiving notifications when a dataset is modified by another device.</p> <p>This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.</p> fn unsubscribe_from_dataset( &self, input: UnsubscribeFromDatasetRequest, ) -> RusotoFuture<UnsubscribeFromDatasetResponse, UnsubscribeFromDatasetError>; /// <p>Posts updates to records and adds and deletes records for a dataset and user.</p> <p>The sync count in the record patch is your last known sync count for that record. The server will reject an UpdateRecords request with a ResourceConflictException if you try to patch a record with a new value but a stale sync count.</p> <p>For example, if the sync count on the server is 5 for a key called highScore and you try and submit a new highScore with sync count of 4, the request will be rejected. To obtain the current sync count for a record, call ListRecords. On a successful update of the record, the response returns the new sync count for that record. You should present that sync count the next time you try to update that same record. When the record does not exist, specify the sync count as 0.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.</p> fn update_records( &self, input: UpdateRecordsRequest, ) -> RusotoFuture<UpdateRecordsResponse, UpdateRecordsError>; } /// A client for the Amazon Cognito Sync API. pub struct CognitoSyncClient { client: Client, region: region::Region, } impl CognitoSyncClient { /// Creates a client backed by the default tokio event loop. /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> CognitoSyncClient { CognitoSyncClient { client: Client::shared(), region: region, } } pub fn new_with<P, D>( request_dispatcher: D, credentials_provider: P, region: region::Region, ) -> CognitoSyncClient where P: ProvideAwsCredentials + Send + Sync + 'static, P::Future: Send, D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { CognitoSyncClient { client: Client::new_with(credentials_provider, request_dispatcher), region: region, } } } impl CognitoSync for CognitoSyncClient { /// <p>Initiates a bulk publish of all existing datasets for an Identity Pool to the configured stream. Customers are limited to one successful bulk publish per 24 hours. Bulk publish is an asynchronous request, customers can see the status of the request via the GetBulkPublishDetails operation.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn bulk_publish( &self, input: BulkPublishRequest, ) -> RusotoFuture<BulkPublishResponse, BulkPublishError> { let request_uri = format!( "/identitypools/{identity_pool_id}/bulkpublish", identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("POST", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<BulkPublishResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(BulkPublishError::from_response(response))), ) } }) } /// <p>Deletes the specific dataset. The dataset will be deleted permanently, and the action can't be undone. Datasets that this dataset was merged with will no longer report the merge. Any subsequent operation on this dataset will result in a ResourceNotFoundException.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.</p> fn delete_dataset( &self, input: DeleteDatasetRequest, ) -> RusotoFuture<DeleteDatasetResponse, DeleteDatasetError> { let request_uri = format!( "/identitypools/{identity_pool_id}/identities/{identity_id}/datasets/{dataset_name}", dataset_name = input.dataset_name, identity_id = input.identity_id, identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("DELETE", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<DeleteDatasetResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(DeleteDatasetError::from_response(response))), ) } }) } /// <p>Gets meta data about a dataset by identity and dataset name. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.</p> fn describe_dataset( &self, input: DescribeDatasetRequest, ) -> RusotoFuture<DescribeDatasetResponse, DescribeDatasetError> { let request_uri = format!( "/identitypools/{identity_pool_id}/identities/{identity_id}/datasets/{dataset_name}", dataset_name = input.dataset_name, identity_id = input.identity_id, identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<DescribeDatasetResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(DescribeDatasetError::from_response(response))), ) } }) } /// <p>Gets usage details (for example, data storage) about a particular identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn describe_identity_pool_usage( &self, input: DescribeIdentityPoolUsageRequest, ) -> RusotoFuture<DescribeIdentityPoolUsageResponse, DescribeIdentityPoolUsageError> { let request_uri = format!( "/identitypools/{identity_pool_id}", identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<DescribeIdentityPoolUsageResponse>(&body).unwrap(); result })) } else { Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeIdentityPoolUsageError::from_response(response)) })) } }) } /// <p>Gets usage information for an identity, including number of datasets and data usage.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.</p> fn describe_identity_usage( &self, input: DescribeIdentityUsageRequest, ) -> RusotoFuture<DescribeIdentityUsageResponse, DescribeIdentityUsageError> { let request_uri = format!( "/identitypools/{identity_pool_id}/identities/{identity_id}", identity_id = input.identity_id, identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<DescribeIdentityUsageResponse>(&body).unwrap(); result })) } else { Box::new( response.buffer().from_err().and_then(|response| { Err(DescribeIdentityUsageError::from_response(response)) }), ) } }) } /// <p>Get the status of the last BulkPublish operation for an identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn get_bulk_publish_details( &self, input: GetBulkPublishDetailsRequest, ) -> RusotoFuture<GetBulkPublishDetailsResponse, GetBulkPublishDetailsError> { let request_uri = format!( "/identitypools/{identity_pool_id}/getBulkPublishDetails", identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("POST", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<GetBulkPublishDetailsResponse>(&body).unwrap(); result })) } else { Box::new( response.buffer().from_err().and_then(|response| { Err(GetBulkPublishDetailsError::from_response(response)) }), ) } }) } /// <p>Gets the events and the corresponding Lambda functions associated with an identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn get_cognito_events( &self, input: GetCognitoEventsRequest, ) -> RusotoFuture<GetCognitoEventsResponse, GetCognitoEventsError> { let request_uri = format!( "/identitypools/{identity_pool_id}/events", identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<GetCognitoEventsResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(GetCognitoEventsError::from_response(response))), ) } }) } /// <p>Gets the configuration settings of an identity pool.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn get_identity_pool_configuration( &self, input: GetIdentityPoolConfigurationRequest, ) -> RusotoFuture<GetIdentityPoolConfigurationResponse, GetIdentityPoolConfigurationError> { let request_uri = format!( "/identitypools/{identity_pool_id}/configuration", identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<GetIdentityPoolConfigurationResponse>(&body) .unwrap(); result })) } else { Box::new(response.buffer().from_err().and_then(|response| { Err(GetIdentityPoolConfigurationError::from_response(response)) })) } }) } /// <p>Lists datasets for an identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.</p> <p>ListDatasets can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use the Cognito Identity credentials to make this API call.</p> fn list_datasets( &self, input: ListDatasetsRequest, ) -> RusotoFuture<ListDatasetsResponse, ListDatasetsError> { let request_uri = format!( "/identitypools/{identity_pool_id}/identities/{identity_id}/datasets", identity_id = input.identity_id, identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); let mut params = Params::new(); if let Some(ref x) = input.max_results { params.put("maxResults", x); } if let Some(ref x) = input.next_token { params.put("nextToken", x); } request.set_params(params); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<ListDatasetsResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(ListDatasetsError::from_response(response))), ) } }) } /// <p>Gets a list of identity pools registered with Cognito.</p> <p>ListIdentityPoolUsage can only be called with developer credentials. You cannot make this API call with the temporary user credentials provided by Cognito Identity.</p> fn list_identity_pool_usage( &self, input: ListIdentityPoolUsageRequest, ) -> RusotoFuture<ListIdentityPoolUsageResponse, ListIdentityPoolUsageError> { let request_uri = "/identitypools"; let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); let mut params = Params::new(); if let Some(ref x) = input.max_results { params.put("maxResults", x); } if let Some(ref x) = input.next_token { params.put("nextToken", x); } request.set_params(params); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<ListIdentityPoolUsageResponse>(&body).unwrap(); result })) } else { Box::new( response.buffer().from_err().and_then(|response| { Err(ListIdentityPoolUsageError::from_response(response)) }), ) } }) } /// <p>Gets paginated records, optionally changed after a particular sync count for a dataset and identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.</p> <p>ListRecords can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.</p> fn list_records( &self, input: ListRecordsRequest, ) -> RusotoFuture<ListRecordsResponse, ListRecordsError> { let request_uri = format!("/identitypools/{identity_pool_id}/identities/{identity_id}/datasets/{dataset_name}/records", dataset_name = input.dataset_name, identity_id = input.identity_id, identity_pool_id = input.identity_pool_id); let mut request = SignedRequest::new("GET", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); let mut params = Params::new(); if let Some(ref x) = input.last_sync_count { params.put("lastSyncCount", x); } if let Some(ref x) = input.max_results { params.put("maxResults", x); } if let Some(ref x) = input.next_token { params.put("nextToken", x); } if let Some(ref x) = input.sync_session_token { params.put("syncSessionToken", x); } request.set_params(params); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<ListRecordsResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(ListRecordsError::from_response(response))), ) } }) } /// <p>Registers a device to receive push sync notifications.</p> <p>This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.</p> fn register_device( &self, input: RegisterDeviceRequest, ) -> RusotoFuture<RegisterDeviceResponse, RegisterDeviceError> { let request_uri = format!( "/identitypools/{identity_pool_id}/identity/{identity_id}/device", identity_id = input.identity_id, identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("POST", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); let encoded = Some(serde_json::to_vec(&input).unwrap()); request.set_payload(encoded); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<RegisterDeviceResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(RegisterDeviceError::from_response(response))), ) } }) } /// <p>Sets the AWS Lambda function for a given event type for an identity pool. This request only updates the key/value pair specified. Other key/values pairs are not updated. To remove a key value pair, pass a empty value for the particular key.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn set_cognito_events( &self, input: SetCognitoEventsRequest, ) -> RusotoFuture<(), SetCognitoEventsError> { let request_uri = format!( "/identitypools/{identity_pool_id}/events", identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("POST", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); let encoded = Some(serde_json::to_vec(&input).unwrap()); request.set_payload(encoded); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let result = ::std::mem::drop(response); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(SetCognitoEventsError::from_response(response))), ) } }) } /// <p>Sets the necessary configuration for push sync.</p> <p>This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.</p> fn set_identity_pool_configuration( &self, input: SetIdentityPoolConfigurationRequest, ) -> RusotoFuture<SetIdentityPoolConfigurationResponse, SetIdentityPoolConfigurationError> { let request_uri = format!( "/identitypools/{identity_pool_id}/configuration", identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("POST", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); let encoded = Some(serde_json::to_vec(&input).unwrap()); request.set_payload(encoded); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<SetIdentityPoolConfigurationResponse>(&body) .unwrap(); result })) } else { Box::new(response.buffer().from_err().and_then(|response| { Err(SetIdentityPoolConfigurationError::from_response(response)) })) } }) } /// <p>Subscribes to receive notifications when a dataset is modified by another device.</p> <p>This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.</p> fn subscribe_to_dataset( &self, input: SubscribeToDatasetRequest, ) -> RusotoFuture<SubscribeToDatasetResponse, SubscribeToDatasetError> { let request_uri = format!("/identitypools/{identity_pool_id}/identities/{identity_id}/datasets/{dataset_name}/subscriptions/{device_id}", dataset_name = input.dataset_name, device_id = input.device_id, identity_id = input.identity_id, identity_pool_id = input.identity_pool_id); let mut request = SignedRequest::new("POST", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<SubscribeToDatasetResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(SubscribeToDatasetError::from_response(response))), ) } }) } /// <p>Unsubscribes from receiving notifications when a dataset is modified by another device.</p> <p>This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.</p> fn unsubscribe_from_dataset( &self, input: UnsubscribeFromDatasetRequest, ) -> RusotoFuture<UnsubscribeFromDatasetResponse, UnsubscribeFromDatasetError> { let request_uri = format!("/identitypools/{identity_pool_id}/identities/{identity_id}/datasets/{dataset_name}/subscriptions/{device_id}", dataset_name = input.dataset_name, device_id = input.device_id, identity_id = input.identity_id, identity_pool_id = input.identity_pool_id); let mut request = SignedRequest::new("DELETE", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<UnsubscribeFromDatasetResponse>(&body).unwrap(); result })) } else { Box::new( response.buffer().from_err().and_then(|response| { Err(UnsubscribeFromDatasetError::from_response(response)) }), ) } }) } /// <p>Posts updates to records and adds and deletes records for a dataset and user.</p> <p>The sync count in the record patch is your last known sync count for that record. The server will reject an UpdateRecords request with a ResourceConflictException if you try to patch a record with a new value but a stale sync count.</p> <p>For example, if the sync count on the server is 5 for a key called highScore and you try and submit a new highScore with sync count of 4, the request will be rejected. To obtain the current sync count for a record, call ListRecords. On a successful update of the record, the response returns the new sync count for that record. You should present that sync count the next time you try to update that same record. When the record does not exist, specify the sync count as 0.</p> <p>This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.</p> fn update_records( &self, input: UpdateRecordsRequest, ) -> RusotoFuture<UpdateRecordsResponse, UpdateRecordsError> { let request_uri = format!( "/identitypools/{identity_pool_id}/identities/{identity_id}/datasets/{dataset_name}", dataset_name = input.dataset_name, identity_id = input.identity_id, identity_pool_id = input.identity_pool_id ); let mut request = SignedRequest::new("POST", "cognito-sync", &self.region, &request_uri); request.set_content_type("application/x-amz-json-1.1".to_owned()); let encoded = Some(serde_json::to_vec(&input).unwrap()); request.set_payload(encoded); if let Some(ref client_context) = input.client_context { request.add_header("x-amz-Client-Context", &client_context.to_string()); } self.client.sign_and_dispatch(request, |response| { if response.status.as_u16() == 200 { Box::new(response.buffer().from_err().map(|response| { let mut body = response.body; if body == b"null" { body = b"{}".to_vec(); } debug!("Response body: {:?}", body); debug!("Response status: {}", response.status); let result = serde_json::from_slice::<UpdateRecordsResponse>(&body).unwrap(); result })) } else { Box::new( response .buffer() .from_err() .and_then(|response| Err(UpdateRecordsError::from_response(response))), ) } }) } } #[cfg(test)] mod protocol_tests {}<|fim▁end|>
.unwrap_or_else(|| "Unknown"), }; // message can come in either "message" or "Message"
<|file_name|>DateValueSortFunction.java<|end_file_name|><|fim▁begin|>import com.google.common.base.Function; import javax.annotation.Nullable; /** * Created by ckale on 10/29/14. */ public class DateValueSortFunction implements Function<PojoDTO, Long>{ @Nullable @Override<|fim▁hole|><|fim▁end|>
public Long apply(@Nullable final PojoDTO input) { return input.getDateTime().getMillis(); } }
<|file_name|>PropertiesFileMergerTask.java<|end_file_name|><|fim▁begin|>/* ********************************************************************** /* * NOTE: This copyright does *not* cover user programs that use Hyperic * program services by normal system calls through the application * program interfaces provided as part of the Hyperic Plug-in Development * Kit or the Hyperic Client Development Kit - this is merely considered * normal use of the program, and does *not* fall under the heading of * "derived work". * * Copyright (C) [2004-2012], VMware, Inc. * This file is part of Hyperic. * * Hyperic is free software; you can redistribute it and/or modify * it under the terms version 2 of the GNU General Public License as * published by the Free Software Foundation. This program is distributed * in the hope that it will be useful, but WITHOUT ANY WARRANTY; without * even the implied warranty of MERCHANTABILITY or FITNESS FOR A * PARTICULAR PURPOSE. See the GNU General Public License for more<|fim▁hole|> * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA. */ package org.hyperic.tools.ant; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; public class PropertiesFileMergerTask extends Properties{ private static Method saveConvertMethod ; private String fileContent ; private Map<String,String[]> delta ; private boolean isLoaded ; static { try{ saveConvertMethod = Properties.class.getDeclaredMethod("saveConvert", String.class, boolean.class, boolean.class) ; saveConvertMethod.setAccessible(true) ; }catch(Throwable t) { throw (t instanceof RuntimeException ? (RuntimeException) t: new RuntimeException(t)) ; }//EO catch block }//EO static block public PropertiesFileMergerTask() { this.delta = new HashMap<String, String[]>() ; }//EOM @Override public synchronized Object put(Object key, Object value) { Object oPrevious = null ; try{ oPrevious = super.put(key, value); if(this.isLoaded && !value.equals(oPrevious)) this.delta.put(key.toString(), new String[] { value.toString(), (String) oPrevious}) ; return oPrevious ; }catch(Throwable t) { t.printStackTrace() ; throw new RuntimeException(t) ; }//EO catch block }//EOM @Override public final synchronized Object remove(Object key) { final Object oExisting = super.remove(key); this.delta.remove(key) ; return oExisting ; }//EOM public static final PropertiesFileMergerTask load(final File file) throws IOException { InputStream fis = null, fis1 = null ; try{ if(!file.exists()) throw new IOException(file + " does not exist or is not readable") ; //else final PropertiesFileMergerTask properties = new PropertiesFileMergerTask() ; fis = new FileInputStream(file) ; //first read the content into a string final byte[] arrFileContent = new byte[(int)fis.available()] ; fis.read(arrFileContent) ; properties.fileContent = new String(arrFileContent) ; fis1 = new ByteArrayInputStream(arrFileContent) ; properties.load(fis1); // System.out.println(properties.fileContent); return properties ; }catch(Throwable t) { throw (t instanceof IOException ? (IOException)t : new IOException(t)) ; }finally{ if(fis != null) fis.close() ; if(fis1 != null) fis1.close() ; }//EO catch block }//EOM @Override public synchronized void load(InputStream inStream) throws IOException { try{ super.load(inStream); }finally{ this.isLoaded = true ; }//EO catch block }//EOm public final void store(final File outputFile, final String comments) throws IOException { if(this.delta.isEmpty()) return ; FileOutputStream fos = null ; String key = null, value = null ; Pattern pattern = null ; Matcher matcher = null ; String[] arrValues = null; try{ for(Map.Entry<String,String[]> entry : this.delta.entrySet()) { key = (String) saveConvertMethod.invoke(this, entry.getKey(), true/*escapeSpace*/, true /*escUnicode*/); arrValues = entry.getValue() ; value = (String) saveConvertMethod.invoke(this, arrValues[0], false/*escapeSpace*/, true /*escUnicode*/); //if the arrValues[1] == null then this is a new property if(arrValues[1] == null) { this.fileContent = this.fileContent + "\n" + key + "=" + value ; }else { //pattern = Pattern.compile(key+"\\s*=(\\s*.*\\s*)"+ arrValues[1].replaceAll("\\s+", "(\\\\s*.*\\\\s*)") , Pattern.MULTILINE) ; pattern = Pattern.compile(key+"\\s*=.*\n", Pattern.MULTILINE) ; matcher = pattern.matcher(this.fileContent) ; this.fileContent = matcher.replaceAll(key + "=" + value) ; }//EO else if existing property System.out.println("Adding/Replacing " + key + "-->" + arrValues[1] + " with: " + value) ; }//EO while there are more entries ; fos = new FileOutputStream(outputFile) ; fos.write(this.fileContent.getBytes()) ; }catch(Throwable t) { throw (t instanceof IOException ? (IOException)t : new IOException(t)) ; }finally{ if(fos != null) { fos.flush() ; fos.close() ; }//EO if bw was initialized }//EO catch block }//EOM public static void main(String[] args) throws Throwable { ///FOR DEBUG String s = " 1 2 4 sdf \\\\\nsdfsd" ; final Pattern pattern = Pattern.compile("test.prop2\\s*=.*(?:\\\\?\\s*)(\n)" , Pattern.MULTILINE) ; final Matcher matcher = pattern.matcher("test.prop2="+s) ; System.out.println(matcher.replaceAll("test.prop2=" + "newvalue$1")) ; ///FOR DEBUG if(true) return ; final String path = "/tmp/confs/hq-server-46.conf" ; final File file = new File(path) ; final PropertiesFileMergerTask properties = PropertiesFileMergerTask.load(file) ; /* final Pattern pattern = Pattern.compile("test.prop1\\s*=this(\\s*.*\\s*)is(\\s*.*\\s*)the(\\s*.*\\s*)value" , Pattern.MULTILINE) ; final Matcher matcher = pattern.matcher(properties.fileContent) ; System.out.println( matcher.replaceAll("test.prop1=new value") ) ; System.out.println("\n\n--> " + properties.get("test.prop1")) ;*/ final String overridingConfPath = "/tmp/confs/hq-server-5.conf" ; //final Properties overrdingProperties = new Properties() ; final FileInputStream fis = new FileInputStream(overridingConfPath) ; properties.load(fis) ; fis.close() ; ///properties.putAll(overrdingProperties) ; final String outputPath = "/tmp/confs/output-hq-server.conf" ; final File outputFile = new File(outputPath) ; final String comments = "" ; properties.store(outputFile, comments) ; }//EOM }//EOC<|fim▁end|>
* details.
<|file_name|>test_cache_manager.py<|end_file_name|><|fim▁begin|># coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import shutil import tempfile from pants.base.build_invalidator import CacheKey, CacheKeyGenerator from pants.base.cache_manager import InvalidationCacheManager, InvalidationCheck, VersionedTarget from pants_test.base_test import BaseTest class AppendingCacheKeyGenerator(CacheKeyGenerator): """Generates cache keys for versions of target sets.""" @staticmethod def combine_cache_keys(cache_keys): if len(cache_keys) == 1: return cache_keys[0] else: sorted_cache_keys = sorted(cache_keys) # For commutativity. combined_id = ','.join([cache_key.id for cache_key in sorted_cache_keys]) combined_hash = ','.join([cache_key.hash for cache_key in sorted_cache_keys]) combined_num_sources = reduce(lambda x, y: x + y, [cache_key.num_sources for cache_key in sorted_cache_keys], 0) return CacheKey(combined_id, combined_hash, combined_num_sources) def key_for_target(self, target, sources=None, transitive=False, fingerprint_strategy=None): return CacheKey(target.id, target.id, target.num_chunking_units) def key_for(self, tid, sources): return CacheKey(tid, tid, len(sources)) def print_vt(vt): print('%d (%s) %s: [ %s ]' % (len(vt.targets), vt.cache_key, vt.valid, ', '.join(['%s(%s)' % (v.id, v.cache_key) for v in vt.versioned_targets]))) class InvalidationCacheManagerTest(BaseTest): class TestInvalidationCacheManager(InvalidationCacheManager):<|fim▁hole|> def setUp(self): super(InvalidationCacheManagerTest, self).setUp() self._dir = tempfile.mkdtemp() self.cache_manager = InvalidationCacheManagerTest.TestInvalidationCacheManager(self._dir) def tearDown(self): shutil.rmtree(self._dir, ignore_errors=True) super(InvalidationCacheManagerTest, self).tearDown() def make_vts(self, target): return VersionedTarget(self.cache_manager, target, target.id) def test_partition(self): # The default EmptyPayload chunking unit happens to be 1, so each of these Targets # has a chunking unit contribution of 1 a = self.make_target(':a', dependencies=[]) b = self.make_target(':b', dependencies=[a]) c = self.make_target(':c', dependencies=[b]) d = self.make_target(':d', dependencies=[c, a]) e = self.make_target(':e', dependencies=[d]) targets = [a, b, c, d, e] def print_partitions(partitions): strs = [] for partition in partitions: strs.append('(%s)' % ', '.join([t.id for t in partition.targets])) print('[%s]' % ' '.join(strs)) # Verify basic data structure soundness. all_vts = self.cache_manager._wrap_targets(targets) invalid_vts = filter(lambda vt: not vt.valid, all_vts) self.assertEquals(5, len(invalid_vts)) self.assertEquals(5, len(all_vts)) vts_targets = [vt.targets[0] for vt in all_vts] self.assertEquals(set(targets), set(vts_targets)) # Test a simple partition. ic = InvalidationCheck(all_vts, [], 3) partitioned = ic.all_vts_partitioned print_partitions(partitioned) # Several correct partitionings are possible, but in all cases 4 1-source targets will be # added to the first partition before it exceeds the limit of 3, and the final target will # be in a partition by itself. self.assertEquals(2, len(partitioned)) self.assertEquals(4, len(partitioned[0].targets)) self.assertEquals(1, len(partitioned[1].targets)) # Test partition with colors. red = 'red' blue = 'blue' colors = { a: blue, b: red, c: red, d: red, e: blue } # As a reference, we partition without colors. ic = InvalidationCheck(all_vts, [], 2) partitioned = ic.all_vts_partitioned print_partitions(partitioned) self.assertEquals(2, len(partitioned)) self.assertEquals(3, len(partitioned[0].targets)) self.assertEquals(2, len(partitioned[1].targets)) # Now apply color restrictions. ic = InvalidationCheck(all_vts, [], 2, target_colors=colors) partitioned = ic.all_vts_partitioned print_partitions(partitioned) self.assertEquals(3, len(partitioned)) self.assertEquals(1, len(partitioned[0].targets)) self.assertEquals(3, len(partitioned[1].targets)) self.assertEquals(1, len(partitioned[2].targets))<|fim▁end|>
def __init__(self, tmpdir): InvalidationCacheManager.__init__(self, AppendingCacheKeyGenerator(), tmpdir, True, None)
<|file_name|>ExampleUnitTest.java<|end_file_name|><|fim▁begin|>package wanghaisheng.com.yakerweather; import org.junit.Test; import static org.junit.Assert.*;<|fim▁hole|>/** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } }<|fim▁end|>
<|file_name|>test_onset.py<|end_file_name|><|fim▁begin|>''' Unit tests for mir_eval.onset ''' import numpy as np import json import mir_eval import glob import warnings import nose.tools A_TOL = 1e-12 # Path to the fixture files REF_GLOB = 'data/onset/ref*.txt' EST_GLOB = 'data/onset/est*.txt' SCORES_GLOB = 'data/onset/output*.json' def __unit_test_onset_function(metric): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') # First, test for a warning on empty onsets metric(np.array([]), np.arange(10)) assert len(w) == 1 assert issubclass(w[-1].category, UserWarning) assert str(w[-1].message) == "Reference onsets are empty." metric(np.arange(10), np.array([])) assert len(w) == 2 assert issubclass(w[-1].category, UserWarning) assert str(w[-1].message) == "Estimated onsets are empty." # And that the metric is 0 assert np.allclose(metric(np.array([]), np.array([])), 0) # Now test validation function - onsets must be 1d ndarray onsets = np.array([[1., 2.]]) nose.tools.assert_raises(ValueError, metric, onsets, onsets) # onsets must be in seconds (so not huge) onsets = np.array([1e10, 1e11]) nose.tools.assert_raises(ValueError, metric, onsets, onsets) # onsets must be sorted onsets = np.array([2., 1.]) nose.tools.assert_raises(ValueError, metric, onsets, onsets) # Valid onsets which are the same produce a score of 1 for all metrics onsets = np.arange(10, dtype=np.float) assert np.allclose(metric(onsets, onsets), 1) def __check_score(sco_f, metric, score, expected_score): assert np.allclose(score, expected_score, atol=A_TOL) <|fim▁hole|> # Load in all files in the same order ref_files = sorted(glob.glob(REF_GLOB)) est_files = sorted(glob.glob(EST_GLOB)) sco_files = sorted(glob.glob(SCORES_GLOB)) # Unit tests for metric in [mir_eval.onset.f_measure]: yield (__unit_test_onset_function, metric) # Regression tests for ref_f, est_f, sco_f in zip(ref_files, est_files, sco_files): with open(sco_f, 'r') as f: expected_scores = json.load(f) # Load in an example onset annotation reference_onsets = mir_eval.io.load_events(ref_f) # Load in an example onset tracker output estimated_onsets = mir_eval.io.load_events(est_f) # Compute scores scores = mir_eval.onset.evaluate(reference_onsets, estimated_onsets) # Compare them for metric in scores: # This is a simple hack to make nosetest's messages more useful yield (__check_score, sco_f, metric, scores[metric], expected_scores[metric])<|fim▁end|>
def test_onset_functions():
<|file_name|>http_loader.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use resource_task::{Metadata, Payload, Done, LoadResponse, LoadData, start_sending_opt}; use log; use std::collections::HashSet; use http::client::{RequestWriter, NetworkStream}; use http::headers::HeaderEnum; use std::io::Reader; use servo_util::task::spawn_named; use url::Url; pub fn factory(load_data: LoadData, start_chan: Sender<LoadResponse>) { spawn_named("http_loader", proc() load(load_data, start_chan))<|fim▁hole|>fn send_error(url: Url, err: String, start_chan: Sender<LoadResponse>) { match start_sending_opt(start_chan, Metadata::default(url), None) { Ok(p) => p.send(Done(Err(err))), _ => {} }; } pub fn load(load_data: LoadData, start_chan: Sender<LoadResponse>) { // FIXME: At the time of writing this FIXME, servo didn't have any central // location for configuration. If you're reading this and such a // repository DOES exist, please update this constant to use it. let max_redirects = 50u; let mut iters = 0u; let mut url = load_data.url.clone(); let mut redirected_to = HashSet::new(); // Loop to handle redirects. loop { iters = iters + 1; if iters > max_redirects { send_error(url, "too many redirects".to_string(), start_chan); return; } if redirected_to.contains(&url) { send_error(url, "redirect loop".to_string(), start_chan); return; } redirected_to.insert(url.clone()); match url.scheme.as_slice() { "http" | "https" => {} _ => { let s = format!("{:s} request, but we don't support that scheme", url.scheme); send_error(url, s, start_chan); return; } } info!("requesting {:s}", url.serialize()); let request = RequestWriter::<NetworkStream>::new(load_data.method.clone(), url.clone()); let mut writer = match request { Ok(w) => box w, Err(e) => { send_error(url, e.desc.to_string(), start_chan); return; } }; // Preserve the `host` header set automatically by RequestWriter. let host = writer.headers.host.clone(); writer.headers = load_data.headers.clone(); writer.headers.host = host; if writer.headers.accept_encoding.is_none() { // We currently don't support HTTP Compression (FIXME #2587) writer.headers.accept_encoding = Some(String::from_str("identity".as_slice())) } match load_data.data { Some(ref data) => { writer.headers.content_length = Some(data.len()); match writer.write(data.as_slice()) { Err(e) => { send_error(url, e.desc.to_string(), start_chan); return; } _ => {} } }, _ => {} } let mut response = match writer.read_response() { Ok(r) => r, Err((_, e)) => { send_error(url, e.desc.to_string(), start_chan); return; } }; /* We are currently unable to test this due to the stream member of ResponseReader being private in rust-http let tcpstream: TcpStream = match response.stream.wrapped { NormalStream(TcpStream) => TcpStream.clone(), SslProtectedStream(SslStream<TcpStream>) => panic!("invalid network stream") }; */ // Dump headers, but only do the iteration if info!() is enabled. info!("got HTTP response {:s}, headers:", response.status.to_string()); if log_enabled!(log::INFO) { for header in response.headers.iter() { info!(" - {:s}: {:s}", header.header_name(), header.header_value()); } } if 3 == (response.status.code() / 100) { match response.headers.location { Some(new_url) => { // CORS (http://fetch.spec.whatwg.org/#http-fetch, status section, point 9, 10) match load_data.cors { Some(ref c) => { if c.preflight { // The preflight lied send_error(url, "Preflight fetch inconsistent with main fetch".to_string(), start_chan); return; } else { // XXXManishearth There are some CORS-related steps here, // but they don't seem necessary until credentials are implemented } } _ => {} } info!("redirecting to {:s}", new_url.serialize()); url = new_url; continue; } None => () } } let mut metadata = Metadata::default(url); metadata.set_content_type(&response.headers.content_type); metadata.headers = Some(response.headers.clone()); metadata.status = response.status.clone(); /* Once the stream is made public in ResponseReader, change None in the function below to tcpstream variable created above after the changes are tested. */ let progress_chan = match start_sending_opt(start_chan, metadata, None) { Ok(p) => p, _ => return }; loop { let mut buf = Vec::with_capacity(1024); unsafe { buf.set_len(1024); } match response.read(buf.as_mut_slice()) { Ok(len) => { unsafe { buf.set_len(len); } if progress_chan.send_opt(Payload(buf)).is_err() { // The send errors when the receiver is out of scope, // which will happen if the fetch has timed out (or has been aborted) // so we don't need to continue with the loading of the file here. return; } } Err(_) => { let _ = progress_chan.send_opt(Done(Ok(()))); break; } } } // We didn't get redirected. break; } }<|fim▁end|>
}
<|file_name|>redisstore.go<|end_file_name|><|fim▁begin|>package gocaptcha import ( "bytes" "crypto/md5" "encoding/gob" "encoding/hex" "fmt" "log" "strconv" "strings" "time" "gopkg.in/redis.v2" ) func init() { RegisterStore(storeName, CreateCaptchaRedisStore) } const ( captchaKeyFormat = "captcha:text:%s;rand:%s;time:%x;" storeName = "redis" ) type CaptchaRedisStore struct { lifeTime time.Duration stg *redis.Client } func CreateCaptchaRedisStore(config *StoreConfig) (StoreInterface, error) { lifeTime := config.LifeTime if config.Servers == nil || len(config.Servers) == 0 { return nil, fmt.Errorf("servers must not be empty") } fullAddr := strings.TrimPrefix(config.Servers[0], "redis://") pieces := strings.SplitN(fullAddr, "/", 2) db := 0 addr := pieces[0] if len(pieces) == 2 { db, _ = strconv.Atoi(pieces[1]) } opt := redis.Options{} opt.Addr = addr opt.DB = int64(db) opt.PoolSize = 0 stg := redis.NewTCPClient(&opt) return &CaptchaRedisStore{lifeTime, stg}, nil } func (this *CaptchaRedisStore) Get(key string) *CaptchaInfo { s, err := this.stg.Get(key).Result() if err != nil { log.Printf("get key in redis error:%s", err) return nil } captcha := CaptchaInfo{} this.decodeCaptachInfo([]byte(s), &captcha) return &captcha } func (this *CaptchaRedisStore) Add(captcha *CaptchaInfo) string { key := fmt.Sprintf(captchaKeyFormat, captcha.Text, randStr(20), captcha.CreateTime.Unix()) key = hex.EncodeToString(md5.New().Sum([]byte(key))) key = key[:32] val, err := this.encodeCaptchaInfo(captcha) if err == nil { if seterr := this.stg.SetEx(key, this.lifeTime, string(val)); seterr != nil { log.Printf("add key in redis error:%s", seterr) } } return key } func (this *CaptchaRedisStore) Del(key string) { this.stg.Del(key) } func (this *CaptchaRedisStore) Destroy() { } func (this *CaptchaRedisStore) OnConstruct() { } func (this *CaptchaRedisStore) OnDestruct() { } <|fim▁hole|> buf := new(bytes.Buffer) encoder := gob.NewEncoder(buf) err := encoder.Encode(captcha) if err != nil { log.Printf("encode captcha info error:%s", err) return nil, err } return buf.Bytes(), nil } func (this *CaptchaRedisStore) decodeCaptachInfo(b []byte, ret *CaptchaInfo) { buf := bytes.NewBuffer(b) decoder := gob.NewDecoder(buf) if err := decoder.Decode(ret); err != nil { log.Printf("decode captcha info error:%s", err) } return }<|fim▁end|>
func (this *CaptchaRedisStore) encodeCaptchaInfo(captcha *CaptchaInfo) ([]byte, error) {
<|file_name|>test_operator.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: skip-file from __future__ import print_function from __future__ import division import numpy as np import mxnet as mx import copy import math import random import itertools from distutils.version import LooseVersion from numpy.testing import assert_allclose, assert_array_equal from mxnet.test_utils import * from mxnet.operator import * from mxnet.base import py_str, MXNetError, _as_list from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises from common import xfail_when_nonstandard_decimal_separator, with_environment import pytest import os @assert_raises_cudnn_not_satisfied(min_version='5.1.10') @pytest.mark.serial def test_rnn_with_new_param(): rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm'] ngates_ = [1, 1, 3, 4] num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8 for bidirectional in [False, True]: directions = 2 if bidirectional else 1 for mode, ngates in zip(rnn_modes, ngates_): first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \ * ngates * (num_layers - 1) param_size = (first_layer_size + rest_layer_size) * directions sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional, state_outputs=False, state_size=state_size, name='rnn') bind_dict = { 'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)), 'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)), 'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size)) } if mode == 'lstm': bind_dict['rnn_state_cell'] = mx.ndarray.zeros( shape=(num_layers * directions, batch_size, state_size)) ex = sym._bind(default_context(), bind_dict) ex.forward(is_train=True) ex01 = ex.output_dict['rnn_output'].asnumpy() ex.forward(is_train=False) ex02 = ex.output_dict['rnn_output'].asnumpy() assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4) bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)) ex.copy_params_from(bind_dict) ex.forward(is_train=True) ex03 = ex.output_dict['rnn_output'].asnumpy() ex.forward(is_train=False) ex04 = ex.output_dict['rnn_output'].asnumpy() assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4) @pytest.mark.serial def test_lstm_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') CX = mx.sym.Variable('state_cell') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX, state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM') exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() @pytest.mark.serial def test_gru_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU') exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() @pytest.mark.serial def test_rnntanh_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH') exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() @pytest.mark.serial def test_rnnrelu_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU') exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() def test_RNN_float64(): if default_context().device_type == 'gpu': return sym = mx.sym.RNN( mx.sym.Variable('in'), mx.sym.Variable('par'), mx.sym.Variable('s'), state_size = (2), num_layers = 1, mode = 'rnn_tanh' ) dtype = 'float64' explicit_grad = { 'in': mx.nd.ones([2, 1, 2], dtype=dtype), 'par': mx.nd.ones([12], dtype=dtype), 's': mx.nd.ones([1, 1, 2], dtype=dtype) } args_grad = explicit_grad grad_req = 'write' ex = sym._bind(default_context(), { 'in': mx.nd.ones([2, 1, 2], dtype=dtype), 'par': mx.nd.ones([12], dtype=dtype), 's': mx.nd.ones([1, 1, 2], dtype=dtype) }, args_grad = args_grad, grad_req = grad_req ) ex.forward() ex.outputs[0].wait_to_read() def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) x /= np.sum(x, axis=axis, keepdims=True) return x def check_elementwise_sum_with_shape(shape, n): # forward inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)] out = mx.symbol.ElementWiseSum(*inputs, name='esum') arr = [mx.nd.empty(shape) for i in range(n)] arr_grad = [mx.nd.empty(shape) for i in range(n)] for i in range(n): arr[i][:] = np.random.uniform(-10, 10, shape) exec1 = out._bind(default_context(), args=arr, args_grad=arr_grad) exec1.forward(is_train=True) out1 = exec1.outputs[0] out = sum(a.asnumpy() for a in arr) assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5) out_grad = mx.nd.empty(shape) out_grad[:] = np.random.uniform(-10, 10, shape) # backward exec1.backward([out_grad]) for a in arr_grad: assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5) @pytest.mark.serial def test_elementwise_sum(): nrepeat = 2 maxdim = 4 for repeat in range(nrepeat): for dim in range(1, maxdim): shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim)) check_elementwise_sum_with_shape(shape, np.random.randint(1, 8)) def check_concat_with_shape(shapes, dimension, skip_second): # if skip_second is True, second argument will not have gradient. # it is to test #1130 n = len(shapes) # forward target_dim = 0 for shape in shapes: target_dim += shape[dimension] inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)] out = mx.symbol.Concat(*inputs, name='conc',dim=dimension) arr = [mx.nd.empty(shape) for shape in shapes] for i in range(n): arr[i][:] = shapes[i][dimension] arr_np = [np.copy(narray.asnumpy()) for narray in arr] arr_grad = [mx.nd.empty(shape) for shape in shapes] dict_grad = {} arg_names = out.list_arguments() for name, g in zip(arg_names, arr_grad): if not skip_second or name != 'arg1': dict_grad[name] = g args = out.list_arguments() arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes))) out_grad = mx.nd.empty(out_shapes[0]) exec1 = out._bind(default_context(), args=arr, args_grad=dict_grad) exec1.forward(is_train=True) out1 = exec1.outputs[0] ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension) assert_almost_equal(out1, ret) # backward out1.copyto(out_grad) out_grad[:] += 1 exec1.backward([out_grad]) for i, name in enumerate(arg_names): if not skip_second or name != 'arg1': grad = dict_grad[name] np_grad = arr_np[i] assert_almost_equal(grad, np_grad + 1) def test_concat(): for dimension in range(4): n = 2 merge = [2, 3, 4, 5, 6] a = 2 b = 3 c = 4 # test 2D if dimension<2: for dim in range(2, 6): shapes = [] for i in range(dim): if dimension == 0: shapes.append((merge[i], a)) elif dimension == 1: shapes.append((a, merge[i])) check_concat_with_shape(shapes,dimension,True) check_concat_with_shape(shapes,dimension,False) # Test negative dim check_concat_with_shape(shapes, dimension - 2, True) check_concat_with_shape(shapes, dimension - 2, False) #test 3D if dimension<3: for dim in range(2, 6): shapes = [] for i in range(dim): if dimension == 0: shapes.append((merge[i], a,b)) elif dimension ==1: shapes.append((a,merge[i],b)) elif dimension ==2: shapes.append((a,b,merge[i])) check_concat_with_shape(shapes,dimension,True) check_concat_with_shape(shapes,dimension,False) # Test negative dim check_concat_with_shape(shapes, dimension - 3, True) check_concat_with_shape(shapes, dimension - 3, False) # test 4D for dim in range(2, 6): shapes = [] for i in range(dim): if dimension == 0: shapes.append((merge[i],a,b,c)) elif dimension == 1: shapes.append((a,merge[i],b,c)) elif dimension ==2: shapes.append((a,b,merge[i],c)) elif dimension ==3: shapes.append((a,b,c,merge[i])) check_concat_with_shape(shapes,dimension,True) check_concat_with_shape(shapes,dimension,False) # Test negative dim check_concat_with_shape(shapes, dimension - 4, True) check_concat_with_shape(shapes, dimension - 4, False) def test_slice_channel(): def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis): ins = [] if squeeze_axis: shape = np.random.randint(2, 5, data_ndim).tolist() shape[axis] = num_outputs out_ele_shape = [ele for ele in shape] del out_ele_shape[axis] else: shape = np.random.randint(1, 5, data_ndim).tolist() shape[axis] *= num_outputs out_ele_shape = [ele for ele in shape] out_ele_shape[axis] //= num_outputs data_npy = np.random.normal(size=shape) out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)] data = mx.sym.Variable('data') sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis) exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape) outputs = exe.forward(is_train=True, data=data_npy) assert len(exe.outputs) == num_outputs for i in range(num_outputs): gt = data_npy.take(np.arange(i * shape[axis]/num_outputs, (i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis) if squeeze_axis: assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape)) else: assert_almost_equal(outputs[i], gt) # test backward ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)] exe.backward(out_grads=ograd) if squeeze_axis: assert_almost_equal(exe.grad_arrays[0], np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy], axis=axis)) else: assert_almost_equal(exe.grad_arrays[0], np.concatenate(out_grads_npy, axis=axis)) check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True) check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False) check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False) check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True) def test_python_op(): X = mx.symbol.Variable('X') op = mx.operator.NumpyOp() s = op.get_symbol(X, name='numpy_op') x = mx.ndarray.ones((10))*10 dx = mx.ndarray.zeros((10)) dy = mx.ndarray.ones((10)) exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx}) exec1.forward(is_train=True) assert_almost_equal(x, exec1.outputs[0]) exec1.backward(dy) assert_almost_equal(dy, dx) def test_swapaxes(): data = mx.symbol.Variable('data') shape = (2, 3, 4) data_tmp = np.ones(shape) data_tmp[0] = 1 data_tmp[1] = 2 arr_data = mx.nd.array(data_tmp) swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2) swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2) exe_c = swap._bind(default_context(), args=[arr_data]) exe_c.forward(is_train=True) out = exe_c.outputs[0] swap0_ = np.swapaxes(data_tmp, 0, 2) swap_ = np.swapaxes(swap0_, 1, 2) assert_almost_equal(out, swap_) config = [((1, 1, 2), 0, 1), ((1, 1, 2), -1, -2), ((4, 5, 6, 7), 1, 1), ((4, 5, 6, 7), 2, 3), ((4, 5, 6, 7), -2, 2), ((4, 5, 6, 7), -2, -3)] for shape, axis1, axis2 in config: data_np = np.random.uniform(size=shape) data_mx = mx.nd.array(data_np, dtype=data_np.dtype) ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2) ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2) exe_c = ret_mx._bind(default_context(), args=[data_mx]) exe_c.forward(is_train=True) out = exe_c.outputs[0] assert_almost_equal(out, ret_np) @xfail_when_nonstandard_decimal_separator def test_scalarop(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape)*5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0)) npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0)) npout = 2/npout_1 check_symbolic_forward(test, [data_tmp], [npout]) npout_grad = 2.*2/5 npout_grad = 2*npout_grad /(npout_1 *npout_1 ) check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad]) def test_scalar_pow(): data = mx.symbol.Variable('data') shape = (1, 1) data_tmp = np.ones(shape) test = data ** 2 check_numeric_gradient(test, [data_tmp]) check_symbolic_forward(test, [data_tmp], [data_tmp ** 2]) check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp]) def test_symbol_pow(): shape = (1, 1) data = mx.symbol.Variable('data') data_tmp = np.ones(shape)*2 exp = mx.symbol.Variable('exp') exp_tmp = np.ones(shape)*3 test = data**exp check_numeric_gradient(test, [data_tmp, exp_tmp]) check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp]) data_dir = data_tmp**(exp_tmp - 1) * exp_tmp exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp) check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir]) def test_fully_connected(): # Create data of given shape as a uniform distribution centered on 0.0 def random_data(shape, dtype=np.float32): return mx.nd.random.uniform(low=-0.5, high=0.5, shape=shape, dtype=dtype) data = mx.sym.var("data") fc_weight = mx.sym.var("weight") fc_bias = mx.sym.var("bias") fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc') data = random_data(shape=(5, 5, 5, 13)) fc_weight = random_data(shape=(10, 325)) fc_bias = random_data(shape=(10)) fc_bias2 = random_data(shape=(10, 1)) data_np = data.asnumpy().reshape(5, 325) fc_weight_np = np.transpose(fc_weight.asnumpy()) fc_bias_np = fc_bias.asnumpy() res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy() check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res}) check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}) # TODO: Fix Bug #15032 when bias has ndim > 1 #check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res}) def test_pow_fn(): shape = (3, 4) exp = mx.symbol.Variable("exp") x = np.ones(shape)*3 for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]: check_numeric_gradient(y, [x], numeric_eps=1E-3) check_symbolic_forward(y, [x], [2**x]) check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x]) def test_relu(): def frelu(x): return np.maximum(x, 0.0) def frelu_grad(x): return np.float32(1.0) * (x > np.float32(0.0)) shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.relu(x) xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32') eps = 1e-4 # Avoid finite difference method inaccuracies due to discontinuous gradient at the origin. # Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195. xa[abs(xa) < eps] = 1.0 ya = frelu(xa) ga = frelu_grad(xa) check_numeric_gradient(y, [xa], numeric_eps=eps) check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ga]) # NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues, # the analytical checks are still performed on each and every data type to verify the correctness. def test_leaky_relu(): def fleaky_relu(x, act_type, slope=0.25): neg_indices = x < 0 out = x.copy() if act_type == 'elu': out[neg_indices] = slope * np.expm1(out[neg_indices]) elif act_type == 'leaky': out[neg_indices] = slope * out[neg_indices] return out def fleaky_relu_grad(grad, x, y, act_type, slope=0.25): neg_indices = x < 0 out = np.ones(x.shape) if act_type == 'elu': out[neg_indices] = y[neg_indices] + slope elif act_type == 'leaky': out[neg_indices] = slope return out * grad for ndim in range(1, 4): shape = rand_shape_nd(ndim) x = mx.symbol.Variable("x") slp = 0.25 for dtype in [np.float16, np.float32, np.float64]: xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype) eps = 1e-4 rtol = 1e-2 atol = 1e-3 xa[abs(xa) < eps] = 1.0 for act_type in ['elu', 'leaky']: y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type) ya = fleaky_relu(xa, slope=slp, act_type=act_type) ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type) # Skip numeric check for float16 type to get rid of flaky behavior if dtype is not np.float16: check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype) # NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues, # the analytical checks are still performed on each and every data type to verify the correctness. def test_prelu(): def fprelu(x, gamma): pos_indices = x > 0 out = x.copy() if len(x.shape) == 4: out = out.transpose(2,3,0,1) out = np.multiply(out, gamma) out = out.transpose(2,3,0,1) else: out = np.multiply(out, gamma) out[pos_indices] = x[pos_indices] return out def fprelu_grad(x, y, gamma): pos_indices = x > 0 if len(x.shape) == 4: grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma) grad_x = grad_x.transpose(2,3,0,1) else: grad_x = np.multiply(np.ones(x.shape), gamma) grad_gam = np.zeros(gamma.shape) copy_x = x.copy() copy_x[pos_indices] = 0.0 grad_x[pos_indices] = 1.0 if len(gamma.shape) > 1 and len(x.shape) != 4: grad_gam = copy_x elif len(gamma.shape) > 1 and len(x.shape) == 4: grad_gam = np.sum(copy_x, axis=(2,3)) elif gamma.shape[0] == 1: grad_gam = np.sum(np.sum(copy_x)) elif gamma.shape[0] > 1 and len(x.shape) != 4: grad_gam = np.sum(copy_x, axis=0) elif gamma.shape[0] > 1 and len(x.shape) == 4: grad_gam = np.sum(copy_x, axis=(0,2,3)) return (grad_x, grad_gam) x = mx.symbol.Variable("x") gamma = mx.symbol.Variable("gamma") for shape in [(3,4), (3,4,4,5)]: for dtype in [np.float16, np.float32, np.float64]: for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]: gam_full = np.array([gam, gam, gam]) xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype) rtol = 1e-2 atol = 1e-3 eps = 1e-4 xa[abs(xa) < eps] = 1.0 y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu') ya = fprelu(xa, gam) ya_full = fprelu(xa, gam_full) g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam) g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full) # Skip numeric check for float16 type to get rid of flaky behavior if dtype is not np.float16: check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)], [g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype) def test_selu(): alpha = 1.6732632423543772848170429916717 lamb = 1.0507009873554804934193349852946 def fselu(x): neg_indices = x < 0 out = x.copy() out[neg_indices] = alpha * np.expm1(out[neg_indices]) return out * lamb def fselu_grad(grad, x, y): neg_indices = x < 0 out = np.ones(x.shape).astype(x.dtype) out[neg_indices] = y[neg_indices] + alpha return out * lamb shape = (3, 4) x = mx.sym.Variable("x") y = mx.sym.LeakyReLU(data=x, act_type="selu") for dtype in [np.float16, np.float32, np.float64]: xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype) eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4) if dtype is np.float16: xa /= 10.0 xa[abs(xa) < eps] = 0.01 ya = fselu(xa) ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya) check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype) def test_gelu(): CUBE_CONSTANT = 0.044715 ROOT_TWO_OVER_PI = 0.7978845608028654 def g(x): return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3)) def g_grad(x): return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2)) def f(x): return 1.0 + np.tanh(g(x)) def f_grad(x): return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x) def fgelu(x): return 0.5 * x * f(x) def fgelu_grad(grad, x, y): return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x)) shape = (3, 4) x = mx.sym.Variable("x") y = mx.sym.LeakyReLU(data=x, act_type="gelu") for dtype in [np.float16, np.float32, np.float64]: xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype) eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5) if dtype is np.float16: xa /= 10.0 xa[abs(xa) < eps] = 0.01 ya = fgelu(xa) ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya) check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype) def test_sigmoid(): def fsigmoid(a): return np.divide(1.0, (1.0 + np.exp(-a))) shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.sigmoid(x) xa = np.random.uniform(low=-1.0,high=1.0,size=shape) ya = fsigmoid(xa) check_numeric_gradient(y, [xa], numeric_eps=1E-3) check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)]) def test_shape_array(): for i in range(1,6): shape = rand_shape_nd(i) x = mx.sym.var('x') y = mx.sym.shape_array(x) xa = mx.nd.array(np.random.ranf(shape)) xg = mx.nd.empty(xa.shape) ya = np.shape(xa) yg = mx.nd.ones(ya) exe = y._bind(ctx=default_context(), args={'x': xa}, args_grad={'x': xg}) exe.forward(is_train=True) exe.backward([yg]) yo = exe.outputs[0].asnumpy() same(yo, ya) assert_almost_equal(xg, np.zeros_like(xg.asnumpy())) def test_size_array(): for i in range(1,6): shape = rand_shape_nd(i) x = mx.sym.var('x') y = mx.sym.size_array(x) xa = mx.nd.array(np.random.ranf(shape)) xg = mx.nd.empty(xa.shape) ya = np.size(xa) yg = mx.nd.ones(ya) exe = y._bind(ctx=default_context(), args={'x': xa}, args_grad={'x': xg}) exe.forward(is_train=True) exe.backward([yg]) yo = exe.outputs[0].asnumpy() same(yo, ya) assert_almost_equal(xg, np.zeros_like(xg.asnumpy())) def test_hard_sigmoid(): def fhardsigmoid(a, alpha=0.2, beta=0.5): return np.maximum(np.zeros(a.shape, dtype=a.dtype), np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta)) def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5): orig_out = fhardsigmoid(a, alpha, beta) res = out_grad * alpha res[orig_out <= 0.0] = 0.0 res[orig_out >= 1.0] = 0.0 return res shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.hard_sigmoid(x) for dtype in [np.float16, np.float32, np.float64]: if dtype is np.float16: rtol = 1e-2 else: rtol = 1e-3 atol = 1e-3 eps = 1e-3 xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype) # function not differentiable at x=2.5 and -2.5 xa[abs(xa-2.5) < eps] -= 2 * eps xa[abs(xa+2.5) < eps] += 2 * eps ya = fhardsigmoid(xa) grad_xa = fhardsigmoid_grad(xa, np.ones(shape)) if dtype is not np.float16: check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype) def test_softsign(): def fsoftsign(a): return np.divide(a, (1.0 + np.abs(a))) def fsoftsign_grad(a): return np.divide(1.0, np.square((1.0 + np.abs(a)))) shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.softsign(x) xa = np.random.uniform(low=-1.0,high=1.0,size=shape) ya = fsoftsign(xa) ya_grad = fsoftsign_grad(xa) check_numeric_gradient(y, [xa], numeric_eps=1E-3) check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad]) def test_binary_logic(): def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True): x = mx.symbol.Variable("x") y = mx.symbol.Variable("y") z = logic_sym(x, y) x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32) y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32) exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape) mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0] assert_almost_equal(mx_out, forward_gt(x_npy, y_npy)) exe.backward() if test_scalar: z_lscalar = logic_sym(1, y) z_rscalar = logic_sym(x, 1) exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape) exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape) mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0] mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0] assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy)) assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1)) exe_lscalar.backward() exe_rscalar.backward() # Test the no-broadcasting binary logic ops + scalar logic ops _inner_test(forward_gt=lambda x, y: x == y, logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x > y, logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x >= y, logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x < y, logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x <= y, logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x != y, logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10)) # Test the broadcasting binary logic ops _inner_test(forward_gt=lambda x, y: x == y, logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x > y, logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x >= y, logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x < y, logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x <= y, logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x != y, logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) def test_unary_logic(): def reference(a, dtype): return np.logical_not(a).astype(dtype) shape = (3, 4) xa = np.random.randint(-2, 2, size=shape).astype(np.float32) mx_xa = mx.nd.array(xa) mx_out = mx.nd.logical_not(mx_xa) assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype)) x = mx.sym.Variable('x') y = mx.sym.logical_not(data=x) exe = y._simple_bind(ctx=default_context(), x=shape) sym_out = exe.forward(is_train=True, x=mx_xa)[0] assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype)) def test_embedding(): in_dim = 10 out_dim = 4 batch = 24 data = mx.sym.Variable("data") embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed") exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,)) arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays)) grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays)) np_data = np.random.randint(low=0, high=in_dim, size=batch) np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape) np_onehot = np.zeros((batch, in_dim)) np_onehot[np.arange(batch), np_data] = 1.0 # forward arg_map["data"][:] = np_data arg_map["embed_weight"][:] = np_weight exe_test.forward(is_train=True) # Non-zero atol required, as exposed by seed 781663739 rtol = 1e-5 atol = 1e-5 assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol) # backward np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape) grad = mx.nd.zeros(np_grad.shape) grad[:] = np_grad exe_test.backward([grad]) assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol) # check ops handle duplicate input correctly. def test_binary_op_duplicate_input(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:] = 5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:] = 3 out_grad = mx.nd.empty(shape) out_grad[:] = 1 square = data * data exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_square.forward(is_train=True) assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp) exe_square.backward(out_grad) assert_almost_equal(arr_grad, 2.0 * data_tmp) def test_sign(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = mx.sym.sign(data) exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.sign(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2; npout_grad = out_grad.asnumpy() npout_grad = 0; exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) def test_round_ceil_floor(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5.543 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]= 2 test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data) exe_test = test._bind(default_context(), args=[arr_data]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp) assert_almost_equal(out, npout) def test_trunc(): data_tmp = np.random.rand(3, 4) * 10 - 5 arr_data = mx.nd.array(data_tmp) data = mx.symbol.Variable('data') test = mx.sym.trunc(data) exe_test = test._bind(default_context(), args=[arr_data]) exe_test.forward(is_train=True) out = exe_test.outputs[0] # 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. # Repro issue with seed 1660190454 npout = np.trunc(np.float32(data_tmp)) assert_almost_equal(out, npout) def test_rsqrt_cos_sin(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data) exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2 npout_grad = out_grad.asnumpy() npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp) exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) def test_maximum_minimum(): data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') shape = (3, 4) data_tmp1 = np.random.rand(3,4) data_tmp2 = np.random.rand(3,4) data_tmp1[:] = 2 data_tmp2[:] = 3 arr_data1 = mx.nd.array(data_tmp1) arr_data2 = mx.nd.array(data_tmp2) arr_grad1 = mx.nd.empty(shape) arr_grad2 = mx.nd.empty(shape) test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2) exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2 exe_test.backward(out_grad) npout_grad = np.ones(shape) npout_grad[:] = 2 mask1 = (data_tmp1 > data_tmp2).astype('float') mask2 = (data_tmp1 < data_tmp2).astype('float') npout_grad1 = npout_grad * mask1 + npout_grad * mask2 npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2) assert_almost_equal(arr_grad1, npout_grad1) assert_almost_equal(arr_grad2, npout_grad2) def test_maximum_minimum_scalar(): data1 = mx.symbol.Variable('data') shape = (3, 4) data_tmp1 = np.random.rand(3,4) data_tmp1[:] = 2 arr_data1 = mx.nd.array(data_tmp1) arr_grad1 = mx.nd.empty(shape) test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4) exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2 exe_test.backward(out_grad) npout_grad = np.ones(shape) npout_grad[:] = 2 mask1 = (data_tmp1 > 3).astype('float') mask2 = (9 > data_tmp1).astype('float') mask3 = (5 < data_tmp1).astype('float') mask4 = (data_tmp1 < 4).astype('float') npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4 assert_almost_equal(arr_grad1, npout_grad1) def test_abs(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = mx.sym.abs(data) exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = abs(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2; npout_grad = out_grad.asnumpy() npout_grad = npout_grad * np.sign(data_tmp) exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad): """configure A: input --> conv --> deconv --> output. the convolution and deconvoluiton has similar parameter which ensure the input shape is the same as output, and the same weights between conv and deconv; If the input value of forward() and backwrad() is the same, then the output value of them should also the same; """ assert input_shape[1] == num_filter data = mx.sym.Variable(name="data") conv = mx.sym.Convolution( data=data, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "conv") deconv = mx.sym.Deconvolution( data=conv, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "deconv") arg_names = deconv.list_arguments() arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape) input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context()) out_grad = input_data args = {} args["data"] = input_data args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1, (num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context()) args_grad = [mx.nd.empty(s) for s in arg_shapes] exe = deconv._bind(default_context(), args=args, args_grad=args_grad) exe.forward(is_train=True) out = exe.outputs[0] exe.backward(out_grad) assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3) args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes] args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy] exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add") exe.forward(is_train=True) out = exe.outputs[0].asnumpy() exe.backward(out_grad) assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3) def check_deconvolution_gradient(input_shape, num_filter, pad): """configure A: input --> conv --> output. configure B: input --> deconv --> output the convolution and deconvoluiton has similar parameter which ensure the input shape is the same as output; During backward(), if the input of A equals output of B, and the output of A equals input of B, then the grad of weight should be the same; """ ndim = len(pad) stride = (1,) * ndim kernel = tuple(2 * np.array(pad) + 1) data_conv = mx.sym.Variable(name="data_conv") conv = mx.sym.Convolution( data=data_conv, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "conv") data_deconv = mx.sym.Variable(name="data_deconv") deconv = mx.sym.Deconvolution( data=data_deconv, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "deconv") conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context()) conv_args = {} conv_args["data_conv"] = conv_data conv_args['conv_weight'] = \ mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context()) conv_args_grad = [mx.nd.zeros(conv_data.shape), mx.nd.zeros((num_filter, input_shape[1]) + kernel)] exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad) exe_conv.forward(is_train=True) conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context()) exe_conv.backward(conv_out_grad) deconv_data = conv_out_grad deconv_args = {} deconv_args['data_deconv'] = deconv_data deconv_args['deconv_weight'] = conv_args['conv_weight'] deconv_args_grad = [mx.nd.zeros(deconv_data.shape), mx.nd.zeros((num_filter, input_shape[1]) + kernel)] deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape), np.random.normal(size=(num_filter, input_shape[1]) + kernel)] deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]), mx.nd.array(deconv_addto_args_grad_npy[1])] exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad) exe_deconv.forward(is_train=True) deconv_out_grad = conv_data[:] exe_deconv.backward(deconv_out_grad) assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2) # Test AddTo exe_deconv_addto = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_addto_args_grad, grad_req="add") exe_deconv_addto.forward(is_train=True) deconv_out_grad = conv_data[:] exe_deconv_addto.backward(deconv_out_grad) assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1], deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2) def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None): data = mx.sym.Variable(name="data") if target_shape: deconv = mx.sym.Deconvolution( data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5, target_shape = target_shape) else: deconv = mx.sym.Deconvolution( data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5) arg_names = deconv.list_arguments() arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape) default_target_size = 8 if target_shape is None: target_shape = (default_target_size,) * len(kernel) assert out_shapes[0] == (input_shape[0], 5) + target_shape @pytest.mark.serial def test_deconvolution(): # 2D check_deconvolution_target_shape( input_shape = (2,3,4,4), kernel = (3,3), stride = (2,2), target_shape = (8,8), pad = (99,99), # will be ignored adj = (101,101), # will be ignored ) check_deconvolution_target_shape( input_shape = (2,3,4,4), kernel = (3,3), stride = (2,2), pad = (1,1), adj = (1,1), ) check_deconvolution_forward_backward( input_shape = (1,1,5,5), num_filter = 1, kernel = (3,3), stride = (1,1), pad = (1,1) ) check_deconvolution_forward_backward( input_shape = (32,3,28,28), num_filter = 3, kernel = (3,3), stride = (1,1), pad = (1,1) ) check_deconvolution_forward_backward( input_shape = (10, 3, 403, 403), num_filter = 3, kernel = (7,7), stride = (5,5), pad = (2,2) ) check_deconvolution_gradient( input_shape = (1,3,5,5), num_filter = 3, pad = (1,1) ) check_deconvolution_gradient( input_shape = (5,3,100,100), num_filter = 3, pad = (3,3) ) # 1D check_deconvolution_target_shape( input_shape = (2,3,4), kernel = (3,), stride = (2,), target_shape = (8,), pad = (99,), # will be ignored adj = (101,), # will be ignored ) check_deconvolution_target_shape( input_shape = (2,3,4), kernel = (3,), stride = (2,), pad = (1,), adj = (1,), ) check_deconvolution_forward_backward( input_shape = (1,1,5), num_filter = 1, kernel = (3,), stride = (1,), pad = (1,) ) check_deconvolution_forward_backward( input_shape = (32,3,28), num_filter = 3, kernel = (3,), stride = (1,), pad = (1,) ) check_deconvolution_forward_backward( input_shape = (10, 3, 403), num_filter = 3, kernel = (7,), stride = (5,), pad = (2,) ) check_deconvolution_gradient( input_shape = (1,3,5), num_filter = 3, pad = (1,) ) check_deconvolution_gradient( input_shape = (5,3,100), num_filter = 3, pad = (3,) ) def test_deconvolution_forward_with_bias(): """Check if deconvolution forward can work well with bias=True """ def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)): x = mx.sym.Variable('x') w = mx.sym.Variable('w') input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu()) y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad) exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null') exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape) exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape) exe.forward(is_train=False) o = exe.outputs[0] t = o.asnumpy() check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,)) check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,)) check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1)) check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1)) def check_nearest_upsampling_with_shape(shapes, scale, root_scale): arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)} arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)} up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale) exe = up._bind(default_context(), args=arr, args_grad=arr_grad) exe.forward(is_train=True) exe.backward(exe.outputs) for k in range(len(shapes)): name = 'arg_%d'%k assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4) def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter): def _init_bilinear(arr, f): weight = np.zeros(np.prod(arr.shape), dtype='float32') shape = arr.shape c = (2 * f - 1 - f % 2) / (2. * f) for i in range(np.prod(shape)): x = i % shape[3] y = (i // shape[3]) % shape[2] weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) arr[:] = weight.reshape(shape) return arr up = mx.sym.UpSampling(mx.sym.Variable("data"), mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale, num_filter=num_filter, num_args=2) arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape) arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()), 'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))} arr_grad = [mx.nd.empty(s) for s in arg_shapes] exe = up._bind(default_context(), args=arr, args_grad=arr_grad) exe.forward(is_train=True) out = exe.outputs[0].asnumpy() exe.backward(exe.outputs) target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale) assert out.shape == data_shape[:2] + target_shape def test_nearest_upsampling(): for root_scale in [1,2,3]: for scale in [1,2,3]: for num_shape in [1,2,3]: for base in [1,2,3]: shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)] check_nearest_upsampling_with_shape(shapes, scale, root_scale) def test_bilinear_upsampling(): rootscale = [2,3] scales = [1,2,3] filters = [1,2,3] bases = [1,2,3] for params in itertools.product(rootscale, scales, filters, bases): root_scale, scale, num_filter, base = params # bilinear upsampling takes only 1 data and 1 weight # multi input mode is not applicable dimension = base*root_scale*scale kernel = 2 * root_scale - root_scale % 2 data_shape = (1, num_filter, dimension, dimension) weight_shape = (1, num_filter, kernel, kernel) check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter) def test_batchnorm_training(): def check_batchnorm_training(stype): for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]: data_tmp = np.random.normal(-0.1, 0.1, size=shape) s = shape[1], gamma = np.ones(s) beta = np.ones(s) gamma[1] = 3 beta[0] = 3 rolling_mean = np.random.uniform(size=s) rolling_std = np.random.uniform(size=s) data = mx.symbol.Variable('data', stype=stype) in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype), mx.nd.array(beta).tostype(stype)] mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)] test = mx.symbol.BatchNorm(data, fix_gamma=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm(data, fix_gamma=False) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) # Test varying channel axis dim = len(shape) for chaxis in range(-dim, dim): chaxis_true = chaxis if chaxis < 0: chaxis_true = dim + chaxis shapex = shape channel_count = shapex[chaxis_true] data_tmp = np.random.normal(-0.1, 0.1, size=shapex) gamma = np.ones(channel_count) beta = np.ones(channel_count) if channel_count > 1: gamma[1] = 3 beta[0] = 3 in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype), mx.nd.array(beta).tostype(stype)] xrolling_mean = np.random.uniform(size=channel_count) xrolling_std = np.random.uniform(size=channel_count) xmean_std = [mx.nd.array(xrolling_mean).tostype(stype), mx.nd.array(xrolling_std).tostype(stype)] test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) check_batchnorm_training('default') @xfail_when_nonstandard_decimal_separator @pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm']) @pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4), (4, 6, 4, 5), (4, 5, 6, 4, 5)]) @pytest.mark.parametrize('fix_gamma', [False, True]) @pytest.mark.parametrize('cudnn_off', [False, True]) @pytest.mark.parametrize('output_mean_var', [False, True]) def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var): if op_name == 'BatchNorm': op = mx.nd.BatchNorm elif op_name == 'SyncBatchNorm': op = mx.nd.contrib.SyncBatchNorm else: raise ValueError(f'Not supported {op_name}') momentum = 0.9 epsilon = 1e-5 def _test_batchnorm_impl(axis, data_grad_req, gamma_grad_req, beta_grad_req): kwargs = dict(output_mean_var=output_mean_var) if op_name == 'SyncBatchNorm': if axis != 1: return key = str(op) + str(shape) + str(axis) kwargs.update(dict(key=key)) if cudnn_off: return else: kwargs.update(dict(axis=axis, cudnn_off=cudnn_off)) nch = shape[axis] if not fix_gamma: bn_gamma = mx.nd.random.uniform(shape=(nch,)) bn_gamma.attach_grad(grad_req=gamma_grad_req) else: bn_gamma = mx.nd.ones(shape=(nch,)) bn_beta = mx.nd.random.uniform(shape=(nch,)) bn_beta.attach_grad(grad_req=beta_grad_req) bn_running_mean = mx.nd.zeros(nch) bn_running_var = mx.nd.ones(nch) running_mean = mx.nd.zeros(nch) running_var = mx.nd.ones(nch) num_iters = 10 expand_shape = [1] * len(shape) expand_shape[axis] = shape[axis] data = mx.nd.random.uniform(shape=shape) data.attach_grad(grad_req=data_grad_req) adX, adW, adb = 0, 0, 0 is_train = data_grad_req != 'null' or \ (not fix_gamma and gamma_grad_req != 'null') or \ beta_grad_req != 'null' for _ in range(num_iters): if data_grad_req != 'add': data = mx.nd.random.uniform(shape=shape) data.attach_grad(grad_req=data_grad_req) ograd = mx.nd.random.uniform(shape=shape) with mx.autograd.record(): output = op(data, bn_gamma, bn_beta, bn_running_mean, bn_running_var, momentum=momentum, eps=epsilon, fix_gamma=fix_gamma, **kwargs) if output_mean_var: output, output_mean, output_std = output if is_train: output.backward(ograd) mx.nd.waitall() data_mean = data.mean( axis=axis, exclude=True, keepdims=True) data_var = (data - data_mean).square().mean(axis=axis, exclude=True, keepdims=True) target_output = (data - data_mean) / \ (data_var + epsilon).sqrt() * \ bn_gamma.reshape(expand_shape) + \ bn_beta.reshape(expand_shape) # squeeze data_mean and data_var data_mean_flat = data_mean.squeeze() data_var_flat = data_var.squeeze() running_mean = running_mean * momentum + \ data_mean_flat * (1 - momentum) m = np.prod(shape) / shape[axis] # cudnn uses m-1 in the denominator of its sample variance calculation, not m sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1) running_var = running_var * momentum + \ data_var_flat * sample_var_adjust * (1 - momentum) W = bn_gamma.reshape(expand_shape) dnx = ograd * W xsm = data - data_mean nd = 1.0 / mx.nd.sqrt(data_var + epsilon) nx = xsm * nd dvar = (dnx * xsm).sum(axis=axis, keepdims=True, exclude=True) * (-0.5) * mx.nd.power(nd, 3) dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \ dvar * xsm.mean(axis=axis, keepdims=True, exclude=True) * 2.0 dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m) dW = (ograd * nx).sum(axis=axis, exclude=True) db = ograd.sum(axis=axis, exclude=True) adX = dX if data_grad_req != 'add' else adX + dX adW = dW if gamma_grad_req != 'add' else adW + dW adb = db if beta_grad_req != 'add' else adb + db atol, rtol = 5e-2, 5e-2 if output_mean_var: assert_almost_equal(output_mean.asnumpy(), data_mean_flat.asnumpy(), atol=atol, rtol=rtol) if op != mx.nd.contrib.SyncBatchNorm: assert_almost_equal(output_std.asnumpy(), (1.0 / (data_var_flat + epsilon).sqrt()).asnumpy(), atol=atol, rtol=rtol) else: assert_almost_equal(output_std.asnumpy(), data_var_flat.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal(output.asnumpy(), target_output.asnumpy(), atol=atol, rtol=rtol) if is_train: assert_almost_equal(bn_running_mean.asnumpy( ), running_mean.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal(bn_running_var.asnumpy( ), running_var.asnumpy(), atol=atol, rtol=rtol) if data_grad_req != 'null': assert_almost_equal(data.grad.asnumpy(), adX.asnumpy(), atol=atol, rtol=rtol) if not fix_gamma: if gamma_grad_req != 'null': assert_almost_equal( bn_gamma.grad.asnumpy(), adW.asnumpy(), atol=atol, rtol=rtol) else: assert((bn_gamma.asnumpy() == 1).all()) if beta_grad_req != 'null': assert_almost_equal( bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol) grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add'] for data_grad_req in grad_reqs: for gamma_grad_req in grad_reqs: if fix_gamma and gamma_grad_req != 'null': continue for beta_grad_req in grad_reqs: for axis in range(len(shape)): _test_batchnorm_impl(axis, data_grad_req, gamma_grad_req, beta_grad_req) def test_groupnorm(): acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'} def x_hat_helper(x, num_groups, eps): dtype = x.dtype dshape = x.shape assert len(dshape) == 4 acc_type = acc_types[str(dtype)] new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3]) new_moments_shape = (dshape[0], num_groups, 1, 1, 1) data = x.reshape(new_shape) mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype) std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps) x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape) return x_hat, mean, std def np_groupnorm(data, gamma, beta, num_groups, eps): new_param_shape = (1, dshape[1], 1, 1) x_hat, mean, std = x_hat_helper(data, num_groups, eps) out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape) return out, mean, std def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps): x_hat, mean, std = x_hat_helper(data, num_groups, eps) new_shape = x_hat.shape dshape = data.shape dtype = data.dtype new_moments_shape = (new_shape[0], num_groups, 1, 1, 1) new_param_shape = (1, dshape[1], 1, 1) acc_type = acc_types[str(dtype)] ograd = ograd.reshape(new_shape) data = data.reshape(new_shape) gamma = gamma.reshape(new_param_shape) beta = beta.reshape(new_param_shape) mean = mean.reshape(new_moments_shape) std = std.reshape(new_moments_shape) beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten() gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten() x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1) ograd_mult = x_hat_grad / std red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype) data_grad = ograd_mult - red_out red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype) data_grad = data_grad - x_hat * red_out return data_grad.reshape(dshape), gamma_grad, beta_grad batch_size = random.randint(1, 8) num_groups = random.randint(2, 3) num_channels = random.randint(2, 3) * num_groups height = random.randint(1, 5) width = random.randint(1, 5) dshape = (batch_size, num_channels, height, width) param_shape = (num_channels,) temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width) np_data = np.random.uniform(0.2, 1.0, dshape) np_gamma = np.random.uniform(-1.0, 1.0, param_shape) np_beta = np.random.uniform(-1.0, 1.0, param_shape) data_sym = mx.sym.Variable("data") gamma_sym = mx.sym.Variable("gamma") beta_sym = mx.sym.Variable("beta") for dtype in [np.float16, np.float32, np.float64]: eps = 1e-2 if dtype == np.float16 else 1e-5 mx_data = mx.nd.array(np_data, dtype=dtype) mx_gamma = mx.nd.array(np_gamma, dtype=dtype) mx_beta = mx.nd.array(np_beta, dtype=dtype) np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype), np_gamma.astype(dtype), np_beta.astype(dtype), num_groups=num_groups, eps=eps) mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym, num_groups=num_groups, eps=eps, output_mean_var=True) check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std], rtol=1e-2 if dtype == np.float16 else 1e-3, atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype) mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym, num_groups=num_groups, eps=eps, output_mean_var=False) np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype) np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd, np_data.astype(dtype), np_gamma.astype(dtype), np_beta.astype(dtype), np_mean, np_std, num_groups, eps) check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)], [np_data_grad, np_gamma_grad, np_beta_grad], rtol=1e-2 if dtype == np.float16 else 1e-3, atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype) def test_convolution_grouping(): for dim in [1, 2, 3]: num_filter = 4 for num_group in [1, 2]: kernel = (3,) * dim shape = (1, 4) + (9,) * dim x = mx.sym.Variable('x') w = mx.sym.Variable('w') b = mx.sym.Variable('b') y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel) xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1) wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0) bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0) y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i], num_filter=num_filter//num_group, kernel=kernel) for i in range(num_group)]) exe1 = y1._simple_bind(default_context(), x=shape) exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,)) for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays): arr1[:] = np.float32(np.random.normal(size=arr1.shape)) arr2[:] = arr1 exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) exe2.forward(is_train=True) exe2.backward(exe2.outputs[0]) for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays): np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3) @pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052") def test_depthwise_convolution(): for dim in [1,2]: for num_base in [1, 4, 16, 32, 64]: for kernel_x in [3, 5]: for stride_x in [1, 2]: for pad_x in [0, 1]: for in_size in [7, 32]: kernel = (kernel_x,) * dim stride = (stride_x,) * dim pad = (pad_x,) * dim num_filter = num_base num_group = num_base shape = (2, num_base) + (in_size,) * dim x = mx.sym.Variable('x') w = mx.sym.Variable('w') b = mx.sym.Variable('b') y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel, stride=stride, pad=pad) xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1) wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0) bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0) y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i], num_filter=num_filter//num_group, kernel=kernel, stride=stride, pad=pad) for i in range(num_group)]) dev = default_context() exe1 = y1._simple_bind(dev, x=shape) exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel, b=(num_filter,)) for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays): arr1[:] = np.random.normal(size=arr1.shape) arr2[:] = arr1 exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) exe2.forward(is_train=True) exe2.backward(exe2.outputs[0]) for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays): assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3) def test_convolution_independent_gradients(): # NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603. # GPU context will be enabled after figuring out the possible issue tracked at # https://github.com/apache/incubator-mxnet/issues/15638. ctx = mx.cpu() atol = 1.0e-3 rtol = 1.0e-3 reqs = ["null", "write", "add"] var_names = ["x", "w", "b"] dims = [1, 2] num_bases = [1, 8] kernel_xs = [3, 5] stride_xs = [1, 2] pad_xs = [0, 1] in_sizes = [7, 32] no_biases = [True, False] for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \ itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases): # Prepare params shape kernel = (kernel_x,) * dim stride = (stride_x,) * dim pad = (pad_x,) * dim num_filter = num_base x_shape = (2, num_base) + (in_size,) * dim w_shape = (num_filter, num_base) + kernel # Symbols definition x = mx.sym.Variable('x') w = mx.sym.Variable('w') b = mx.sym.Variable('b') if not no_bias else None conv = mx.sym.Convolution(x, w, b, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=no_bias) for req_kind in reqs: # Binding args for conv with possible dependent gradients base_args = { 'x': mx.nd.random.normal(shape=x_shape, ctx=ctx), 'w': mx.nd.random.normal(shape=w_shape, ctx=ctx), 'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None} args1 = copy.deepcopy(base_args) grad1 = { 'x': mx.nd.zeros(shape=x_shape, ctx=ctx), 'w': mx.nd.zeros(shape=w_shape, ctx=ctx), 'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None} grad_req1 = [req_kind] * 3 grad_req1 = dict(zip(var_names, grad_req1)) exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1) exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) for x_req, w_req, b_req in itertools.product(reqs, repeat=3): # Binding args for conv with independent gradients args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1` grad2 = { 'x': mx.nd.zeros(shape=x_shape, ctx=ctx), 'w': mx.nd.zeros(shape=w_shape, ctx=ctx), 'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None} grad_req2 = {"x": x_req, "w": w_req, "b": b_req} exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2) exe2.forward(is_train=True) np.testing.assert_allclose(exe1.outputs[0].asnumpy(), exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol) exe2.backward(exe2.outputs[0]) for var_name in var_names: if var_name == "b" and no_bias: continue if grad_req2[var_name] == "null": exe2_var_grad = grad2[var_name].asnumpy() np.testing.assert_allclose(exe2_var_grad, np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol) if grad_req2[var_name] != grad_req1[var_name]: continue np.testing.assert_allclose(args1[var_name].asnumpy(), args2[var_name].asnumpy(), rtol=rtol, atol=atol) np.testing.assert_allclose(grad1[var_name].asnumpy(), grad2[var_name].asnumpy(), rtol=rtol, atol=atol) def gen_broadcast_data(idx): # Manually set test cases binary_op_data_shape = np.array( [[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]], [[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]], [[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]], [[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]], [[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]], [[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]], [[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]], [[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]], [[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]], [[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]], [[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]], [[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]], [[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]], [[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]], [[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]], [[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]], [[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]], [[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]], [[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]], [[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]], [[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]], [[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]], [[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]], [[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]], [[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]], [[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]], [[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]], [[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]], [[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]], [[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]], [[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]]) if idx < binary_op_data_shape.shape[0]: l_shape = binary_op_data_shape[idx][0] r_shape = binary_op_data_shape[idx][1] else: # Generate random data that has ndim between 1-7 and all the shape dims between 1-5 ndim = np.random.randint(1, 6) shape = np.random.randint(1, 6, size=(ndim,)) l_same_dim = np.random.randint(0, 5) r_same_dim = np.random.randint(0, 5) l_axis_flags = np.random.randint(0, 2, size=ndim) r_axis_flags = np.random.randint(0, 2, size=ndim) if l_same_dim == 4: l_axis_flags = np.ones(ndim) if r_same_dim == 4: r_axis_flags = np.ones(ndim) l_shape = shape.copy() r_shape = shape.copy() l_shape[np.where(l_axis_flags == 0)] = 1 r_shape[np.where(r_axis_flags == 0)] = 1 return [np.random.random(l_shape), np.random.random(r_shape)] def gen_broadcast_data_int(idx): d = gen_broadcast_data(idx); return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)] def gen_binary_data(dummy): ndim = np.random.randint(1, 6) shape = np.random.randint(1, 6, size=(ndim,)) #print("gen shape {}".format(shape)) return [np.random.random(shape), np.random.random(shape)] def gen_binary_data_int(dummy): d = gen_binary_data(dummy); return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)] def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None): sample_num = 200 for i in range(sample_num): d = gen_data(i) y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])}) y.forward(is_train=True) y = y.outputs[0].asnumpy() x = baseline(d[0], d[1]).astype(y.dtype) #np.set_printoptions(precision=20) a = d[0] b = d[1] #print("a: {} {}".format(a.dtype, a)) #print("a: {} {}".format(b.dtype, b)) #print("x: {} {}".format(x.dtype, x)) #print("y: {} {}".format(y.dtype, y)) if mx_nd_func is not None: d0 = mx.nd.array(d[0], dtype=d[0].dtype) d1 = mx.nd.array(d[1], dtype=d[1].dtype) assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol) idx = np.abs(x-y) > atol+rtol*np.abs(x) if idx.any(): import binascii np.set_printoptions(precision=20) logging.error('found precision problem:') d[0] = np.broadcast_to(d[0], x.shape) d[1] = np.broadcast_to(d[1], x.shape) logging.error('input a: {}'.format(d[0][idx])) logging.error('input b: {}'.format(d[1][idx])) logging.error("output x: {} {}".format(x.dtype, x)) logging.error("output y: {} {}".format(y.dtype, y)) def ftohex(xs): import struct return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten())) logging.error('output x in baseline(a, b): {}'.format(x[idx])) logging.error('output y in symbol(a, b): {}'.format(y[idx])) logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx]))) logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx]))) logging.error('input a hex: {}'.format(ftohex(d[0][idx]))) logging.error('input a hex: {}'.format(ftohex(d[1][idx]))) logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx])) assert_allclose(y, x, rtol=rtol, atol=atol) def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5): sample_num = 200 for i in range(sample_num): d = gen_data(i) out = np.random.random((d[0] + d[1]).shape) def reduce_op(shape, x): if shape == x.shape: return x keepdims_shape = list(x.shape) for i in range(len(shape)): if x.shape[i] != shape[i]: keepdims_shape[i] = 1 x = np.sum(x, axis=i).reshape(keepdims_shape) return x baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1]) x_1 = reduce_op(d[0].shape, baseline_grad1) x_2 = reduce_op(d[1].shape, baseline_grad2) y_1 = mx.nd.empty(d[0].shape) y_2 = mx.nd.empty(d[1].shape) y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])}, args_grad=[y_1, y_2]) o = y.forward(is_train=True) y.backward([mx.nd.array(out, dtype=o[0].dtype)]) assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol) assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol) def test_binary_op(): a = mx.sym.Variable('a') b = mx.sym.Variable('b') def test_bplus(a, b): c = a + b check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data) def test_bminus(a, b): c = a - b check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data) def test_bmul(a, b): c = a * b check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data) def test_bdiv(a, b): c = a / b check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data) def test_bmod(a, b): # Python and numpy operate only in double so to avoid numerical errors we have to use # doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044 #c = a % b c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64') # '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data) def test_bmod_int(a, b): c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32') check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int) def test_bpow(a, b): c = a ** b check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b, g_out * a ** b * np.log(a)), gen_binary_data) def test_bneq(a, b): c = a != b # '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32. # Issue exposed with seed 1644387363 check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data) test_bplus(a, b) test_bminus(a, b) test_bmul(a, b) test_bdiv(a, b) test_bmod(a, b) test_bmod_int(a, b) test_bpow(a, b) test_bneq(a, b) def test_broadcast_binary_op(): def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol): """This function ensures that checking the numerical gradient of broadcast_max/min is not crossing the boundary y=x where there is no gradient definition at those sigularities.""" x_max = np.max(x) y = x_max + 2 * delta + np.random.random(y.shape) check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol) x_min = np.min(x) y = x_min - 2 * delta - np.random.random(y.shape) check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol) a = mx.sym.Variable('a') b = mx.sym.Variable('b') def test_bplus(a, b): c = mx.sym.broadcast_plus(a, b) check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add) check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data) def test_bminus(a, b): c = mx.sym.broadcast_minus(a, b) check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data) def test_bmul(a, b): c = mx.sym.broadcast_mul(a, b) check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply) check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data) def test_bdiv(a, b): c = mx.sym.broadcast_div(a, b) check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide) check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data) def test_bmod(a_, b_): # Python and numpy operate only in double so to avoid numerical errors we have to use # doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044 a = mx.sym.cast(a_, dtype='float64') b = mx.sym.cast(b_, dtype='float64') # '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. c = mx.sym.broadcast_mod(a, b) check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data) def test_bmod_int(a, b): c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32')) check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int) def test_bpow(a, b): c = mx.sym.broadcast_power(a, b) check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power) check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b, g_out * a ** b * np.log(a)), gen_broadcast_data) def test_bequal(a, b): c = mx.sym.broadcast_equal(a, b) check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int, mx_nd_func=mx.nd.equal) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int) def test_bmax(a, b): c = mx.sym.broadcast_maximum(a, b) check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_bmin(a, b): c = mx.sym.broadcast_minimum(a, b) check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_band(a, b): c = mx.sym.broadcast_logical_and(a, b) check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_bor(a, b): c = mx.sym.broadcast_logical_or(a, b) check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_bxor(a, b): c = mx.sym.broadcast_logical_xor(a, b) check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) test_bplus(a, b) test_bminus(a, b) test_bmul(a, b) test_bdiv(a, b) test_bmod(a, b) test_bmod_int(a, b) test_bpow(a, b) test_bequal(a, b) test_bmax(a, b) test_bmin(a, b) test_band(a, b) test_bor(a, b) test_bxor(a, b) def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False): dim = len(dil) assert(len(kernel_shape) == dim) # Input for spike response data_size = 33 data_shape = (1, 1) + (data_size,) * dim center = (0,0) + (data_size // 2,) * dim spike_imgs = np.zeros(shape=data_shape, dtype=np.float32) spike_imgs[center] = 1.0 spike_img = mx.nd.array(spike_imgs) spike_img2 = mx.nd.array(spike_imgs) kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32) kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32) kernel = mx.symbol.Variable('kernel') in_img = mx.symbol.Variable('input') net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution') net.list_arguments() be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights}, args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } ) be.forward(True) out_o = be.outputs[0].asnumpy() ndo = be.outputs[0] out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32) out_grads[center] = 1.0 out_grad = mx.nd.array(out_grads) be.backward([out_grad]) vgrad = be.grad_arrays[0].asnumpy() out = out_o.reshape(out_o.shape[2:]) nz_loc = np.nonzero(out) assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5) assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5) # Now check whether the input gradient was computed correctly input_grad = mx.nd.array(vgrad) be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights}) be.forward(True) out_o = be.outputs[0].asnumpy() assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5) rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32) impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16] rnd_kernel = mx.nd.array(rnd_kernel_s) rnd_kernel2 = mx.nd.array(rnd_kernel_s) white_in = mx.nd.ones(shape=data_shape) white_in2 = mx.nd.ones(shape=data_shape) be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel}, args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } ) be.forward(True) be.backward([impulse_error]) out_orig = be.outputs[0].asnumpy() kernel_gradient = be.grad_arrays[1].asnumpy() dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient) be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel}) be.forward(True) out = be.outputs[0].asnumpy() # Now do a simple check of the kernel gradient assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001) def test_convolution_dilated_impulse_response(): # 1D for dil in [ (1,), (2,), (3,) ]: for ks in [ (1,), (2,), (3,), (4,)]: test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks) # 2D for dil in [ (1,1), (2,2), (3,3) ]: for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]: test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks) # 3D for dil in [ (1,1,1), (2,2,2), (3,3,3) ]: for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]: test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks) @pytest.mark.serial @pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [ ((2, 3, 5, 5), (0, -1), False, (2, 75)), ((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)), ((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)), ((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)), ((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)), ((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)), ((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)), ((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)), ((2, 3, 5, 6), (-3, -3), False, (6, 30)), ((2, 3, 5, 6), (-3, -1), False, (6, 30)), ((64,), (-4, 16, 4), False, (16, 4)), ((64,), (-4, 16, -1), False, (16, 4)), ((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)), ((2, 3, 5, 5), (0, -1), True, (5, 30)), ((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)), ((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)), ((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)), ((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)), ((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)), ((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)), ((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)), ((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)), ((2, 3, 5, 6), (-3, -3), True, (6, 30)), ((64,), (16, 4, -4), True, (16, 4)), ((64,), (16, -1, -4), True, (16, 4)), ((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)) ]) def test_reshape_new(src_shape, shape_args, reverse, dst_shape): net = mx.sym.Variable("data") net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(data=src_shape) assert output_shape[0] == dst_shape, \ 'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \ 'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse), str(dst_shape), str(output_shape[0])) dat_npy = np.random.rand(*src_shape) grad_npy = np.random.rand(*dst_shape) exe = net._simple_bind(default_context(), data=src_shape) exe.arg_dict['data'][:] = dat_npy exe.forward(is_train=True) assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \ 'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\ %(str(src_shape), str(shape_args), str(reverse), str(dst_shape)) exe.backward(out_grads=mx.nd.array(grad_npy)) assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \ 'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\ %(str(src_shape), str(shape_args), str(reverse), str(dst_shape)) for i in range(len(src_shape)): holdout_src_shape = list(src_shape) holdout_src_shape[i] = 0 holdout_src_shape = tuple(holdout_src_shape) net = mx.sym.Variable('data') net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape)) input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape) assert output_shape[0] == dst_shape, \ 'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \ 'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse), str(dst_shape), str(output_shape[0])) assert input_shape[0] == src_shape, \ 'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \ 'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse), str(dst_shape), str(output_shape[0])) def test_reshape_old(): net = mx.sym.Variable("data") net = mx.sym.Reshape(net, target_shape=(2, 0)) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5)) assert(output_shape[0] == (2, 75)) # Test for Flatten data = mx.sym.Variable("data") net = mx.sym.Flatten(data) exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7)) data_npy = np.random.normal(size=(5, 4, 3, 7)) out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7)) outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy() assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7))) exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())]) assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7))) def test_reshape_like(): def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape): lhs = mx.sym.Variable("lhs") rhs = mx.sym.Variable("rhs") net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape) assert output_shape[0] == dst_shape, \ 'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\ %(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend)) lhs_npy = np.random.rand(*lhs_shape) rhs_npy = np.random.rand(*rhs_shape) grad_npy = np.random.rand(*dst_shape) exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape) exe.arg_dict['lhs'][:] = lhs_npy exe.arg_dict['rhs'][:] = rhs_npy exe.forward(is_train=True) assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \ 'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\ %(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend)) exe.backward(out_grads=mx.nd.array(grad_npy)) assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \ 'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\ %(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend)) # Test new api (Using shape) test_cases = [ [(30,), (15,2,4), 0, None, 0, 2, (15,2)], [(30,), (15,2,4), None, 1, None, 2, (15,2)], [(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)], [(3,5), (1,15,4), 0, 2, 1, 2, (15,)], [(3,5), (1,15,4), 0, None, 1, -1, (15,)], [(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)], [(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)] ] # for test_case in test_cases: for test_case in test_cases: test_reshape_like_new(*test_case) # Test old api lhs = mx.sym.Variable("lhs") rhs = mx.sym.Variable("rhs") net = mx.sym.reshape_like(lhs, rhs) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2)) assert(output_shape[0] == (30,20,2)) def test_reduce(): sample_num = 500 def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0, test_exclude=True, test_none_axis=False): for i in range(sample_num): # Generate random data that has ndim between 1-7 and all the shape dims between 1-5 # Insert a NaN with probability equal to nan_prob ndim = np.random.randint(1, 6) shape = np.random.randint(1, 6, size=(ndim,)) axis_num = np.random.randint(0, ndim, size=1) axis_flags = np.random.randint(0, 2, size=ndim) if test_exclude: exclude = np.random.randint(0, 2) else: exclude = False axes = [] for (axis, flag) in enumerate(axis_flags): if flag: axes.append(axis) if 0 == len(axes): axes = None elif 1 == len(axes): axes = axes[0] else: axes = tuple(axes) keepdims = np.random.randint(0, 2) a = mx.symbol.Variable('a') if axes is None: if test_none_axis: b = mx_reduce_sym(a, keepdims=keepdims, axis=axes) else: b = mx_reduce_sym(a, keepdims=keepdims) elif exclude and isinstance(axes, tuple) and len(axes) < ndim: naxes = [i for i in range(ndim) if i not in axes] b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True) else: b = mx_reduce_sym(a, axis=axes, keepdims=keepdims) dat_npy = np.random.rand(*shape) # Test with both negative and positive values (randomly). Avoid having both in the same # test, which can be problematic for error checking due to near-zero values. if np.random.rand() > 0.5: dat_npy = -dat_npy if nan_prob > 0: dat_npy[np.random.rand(*shape) < nan_prob] = np.nan sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims)) if sum_groundtruth.shape == (): sum_groundtruth = np.array([sum_groundtruth]) grad_nd = mx.nd.empty(shape) outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape)) keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy, outdata=sum_groundtruth, axis=axes, keepdims=keepdims, keepdim_shape=keepdim_shape) net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)}, args_grad={'a': grad_nd}) net.forward(is_train=True) # check forward assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4) net.backward(out_grads=mx.nd.array(outgrad_npy)) bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape) # check backward assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4) test_none_axis = [True, False] for test_none in test_none_axis: test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape), mx.symbol.sum, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape)/(data.size/outdata.size), mx.symbol.mean, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data), mx.symbol.prod, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)), mx.symbol.nansum, 0.3, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data)), mx.symbol.nanprod, 0.3, test_none_axis=test_none) # grad of max and min are sensitive to the precision of the calculation. # Force numpy to match mxnet's float32. test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (np.equal(np.float32(data), outdata.reshape(keepdim_shape))), mx.symbol.max) test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (np.equal(np.float32(data), outdata.reshape(keepdim_shape))), mx.symbol.min) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)), mx.symbol.norm, test_exclude=False, test_none_axis=test_none) def test_broadcast(): sample_num = 200 for i in range(sample_num): # Generate random data that has ndim between 1-7 and all the shape dims between 1-5 ndim = np.random.randint(1, 6) target_shape = np.random.randint(1, 6, size=(ndim,)) axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1)))) shape = target_shape.copy() size = tuple([shape[ele] for ele in axis]) for ele in axis: shape[ele] = 1 target_shape_with_zero = list(target_shape) for idx in range(len(target_shape_with_zero)): if idx not in axis: target_shape_with_zero[idx] = 0 break a = mx.symbol.Variable('a') sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size) sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape)) sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero)) sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to) def test_broadcasting_ele(sym_bcast): dat_npy = np.random.rand(*shape) groundtruth = dat_npy grad_nd = mx.nd.empty(shape) outgrad_npy = np.random.rand(*target_shape) grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True, numpy_reduce_func=np.sum) net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)}, args_grad={'a': grad_nd}) net.forward(is_train=True) assert (net.outputs[0].shape == target_shape).all() assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4) net.backward(out_grads=mx.nd.array(outgrad_npy)) assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4) test_broadcasting_ele(sym_bcast_axis) test_broadcasting_ele(sym_bcast_to) test_broadcasting_ele(sym_bcast_to_with_zero) test_broadcasting_ele(sym_bcast_like) def test_transpose(): for ndim in range(1, 10): for t in range(5): dims = list(np.random.randint(1, 5, size=ndim)) axes = list(range(ndim)) random.shuffle(axes) axes = tuple(axes) x = mx.nd.array(np.random.normal(size=dims)) y = mx.nd.transpose(x, axes=axes) assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy()) y = mx.nd.transpose(x) assert_allclose(np.transpose(x.asnumpy()), y.asnumpy()) @pytest.mark.serial def test_pseudo2dtranspose(): def getTwoInts(mn, mx): n1 = np.random.randint(mn, mx) n2 = np.random.randint(mn, mx-1) n2 = n2 if n2 < n1 else n2+1 return tuple(np.sort([n1, n2])) def getTranspAxes(ndim): axes = list(range(ndim)) n1, n2 = getTwoInts(0,ndim) return tuple(axes[:n1]+axes[n2:]+axes[n1:n2]) for ndim in range(2, 7): for dt in ['int8', 'half', 'int32', 'int64']: for _ in range(5): dims = list(np.random.randint(5, 20, size=ndim)) axes = getTranspAxes(ndim) x = mx.nd.array(np.random.normal(size=dims), dtype=dt) y = mx.nd.transpose(x, axes=axes) assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy()) @pytest.mark.serial def test_big_transpose(): n = [1] d = list(np.random.randint(132, 160, size=1)) hw = list(np.random.randint(256, 320, size=2)) c = [10] dims = n + d + hw + c axes = (0,4,1,2,3) x_np = np.random.normal(size=dims).astype('uint8') x = mx.nd.array(x_np, dtype='uint8') y = mx.nd.transpose(x, axes=axes) assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8')) axes = (0,2,3,4,1) z = mx.nd.transpose(y, axes=axes) assert_allclose(x_np, z.asnumpy().astype('uint8')) @pytest.mark.serial def test_larger_transpose(): x = mx.nd.random.normal(shape=(50,51)) y = mx.nd.transpose(x) assert_allclose(np.transpose(x.asnumpy()), y.asnumpy()) def test_expand_dims(): for ndim in range(1, 6): for axis in range(-ndim + 1, ndim): x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim))) y = mx.nd.array(x) x1 = np.expand_dims(x, axis=axis) y1 = mx.nd.expand_dims(y, axis=axis) assert_allclose(x1, y1.asnumpy()) assert_allclose(x1.shape, y1.shape) def test_crop(): for ndim in range(1, 6): for t in range(5): dims = [] begin = [] end = [] idx = [] for i in range(ndim): d = random.randint(1, 5) b = random.randint(0, d-1) e = random.randint(b+1, d) if b == 0 and random.randint(0, 1): b = None elif b != 0 and random.randint(0, 1): b -= d if e == d and random.randint(0, 1): e = None elif e != d and random.randint(0, 1): e -= d dims.append(d) begin.append(b) end.append(e) idx.append(slice(b, e)) x = mx.nd.array(np.random.normal(size=dims)) y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end)) assert_allclose(x.asnumpy()[idx], y.asnumpy()) vx = mx.sym.Variable('x') vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end)) check_numeric_gradient(vy, [x.asnumpy()]) def test_slice_axis(): for ndim in range(1, 6): shape = np.random.randint(1, 11, size=(ndim,)) for t in range(ndim): d = shape[t] b = random.randint(0, d-1) e = random.randint(b+1, d) if np.random.rand() > 0.6: e = None else: if e < d and np.random.rand() > 0.5: e = e - d if np.random.rand() > 0.5: b = b - d idx = [] for i in range(ndim): idx.append(slice(0, shape[i])) idx[t] = slice(b, e) X = mx.symbol.Variable('X') x = mx.nd.array(np.random.normal(size=shape)) Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e) xgrad = mx.nd.empty(x.shape) exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad}) exec1.forward(is_train=True) y = exec1.outputs[0] assert_allclose(x.asnumpy()[idx], y.asnumpy()) exec1.backward([y]) xx = x.asnumpy() xx[:] = 0.0 xx[idx] = x.asnumpy()[idx] assert_allclose(xx, xgrad.asnumpy()) x_grad_npy = np.random.normal(size=x.shape) xgrad = mx.nd.array(x_grad_npy) exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add") exec2.forward(is_train=True) exec2.backward([exec2.outputs[0]]) xx = np.zeros(shape=x.shape, dtype=np.float32) xx[idx] = x.asnumpy()[idx] assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5) def test_slice_like(): for ndim in range(1, 6): from_shape = np.random.randint(1, 11, size=(ndim,)) shape = [s + np.random.randint(0, 3) for s in from_shape] for t in range(ndim): if t > 0: axes = np.random.randint(0, ndim, size=t).tolist() else: axes = [] idx = [] for i in range(ndim): idx.append(slice(0, shape[i])) if i in axes or not axes: idx[i] = slice(0, from_shape[i]) if axes: pos = np.random.randint(0, t) if axes[pos] > 0: axes[pos] -= ndim # negative index X = mx.symbol.Variable('X') X_1 = mx.symbol.Variable('X1') x = mx.nd.array(np.random.normal(size=shape)) x1 = mx.nd.array(np.random.normal(size=from_shape)) Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes) xgrad = mx.nd.empty(x.shape) xgrad1 = mx.nd.empty(x1.shape) exec1 = Y._bind(default_context(), args = [x, x1], args_grad = {'X': xgrad, 'X1': xgrad1}) exec1.forward(is_train=True) y = exec1.outputs[0] assert_allclose(x.asnumpy()[idx], y.asnumpy()) exec1.backward([y]) xx = x.asnumpy() xx[:] = 0.0 xx[idx] = x.asnumpy()[idx] assert_allclose(xx, xgrad.asnumpy()) assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy()) def test_slice_like_different_types(): x = [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.]] y = [[ 0., 0., 0.], [ 0., 0., 0.]] x = mx.nd.array(x) y = mx.nd.array(y).astype('int32') z = mx.nd.slice_like(x, y) assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]]) def test_reshape_like_different_types(): x = mx.nd.zeros((2, 3)) y = mx.nd.array([[1, 2], [3, 4], [5, 6]]) y = mx.nd.array(y).astype('int32') z = mx.nd.reshape_like(x, y) assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]]) def test_broadcast_like_different_types(): x = mx.nd.zeros((2, 1)) y = mx.nd.ones((2, 2)) y = mx.nd.array(y).astype('int32') z = mx.nd.broadcast_like(x, y) assert_allclose(z.asnumpy(), [[0,0],[0,0]]) assert x.dtype == z.dtype def test_flip(): for ndim in range(1, 6): for t in range(5): dims = [random.randint(1,10) for i in range(ndim)] axis = random.randint(0, ndim-1) idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)] x = mx.nd.array(np.random.normal(size=dims)) y = mx.nd.flip(x, axis=axis) assert_allclose(x.asnumpy()[idx], y.asnumpy()) def test_stn(): import sys np.set_printoptions(threshold=sys.maxsize) num_filter = 2 # conv of loc net kernel = (3, 3) # conv of loc net num_hidden = 6 # fc of loc net for n in [1, 2, 3, 4]: for c in [1, 2, 3, 4]: for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1 for w in [5, 9, 13, 17]: data_shape = (n, c, h, w) target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2)) data = mx.sym.Variable(name="data") loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv") loc = mx.sym.Flatten(data=loc) loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc") stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape, transform_type="affine", sampler_type="bilinear") arg_names = stn.list_arguments() arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape) # check shape assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1]) dev = default_context() #dev = mx.gpu(0) args = {} args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev) args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev) args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev) args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev) args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev) grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes] exe = stn._bind(dev, args=args, args_grad=grad_grad) exe.forward(is_train=True) out = exe.outputs[0] # check forward assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4) out_grad = mx.nd.ones(out.shape, ctx=dev) exe.backward([out_grad]) # check backward assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4) def test_stn_valid_sampling(): target_shape = ( 28, 28, ) src_shape = ( 42, 42, ) data = mx.sym.Variable(name="data") loc = mx.sym.Variable(name="loc") data_array = np.zeros(( 1, 1, ) + src_shape) # Have an ever so slight rotation. loc_array = np.array( [[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901, -0.000919065]]) stn = mx.sym.SpatialTransformer( data=data, loc=loc, target_shape=target_shape, transform_type="affine", sampler_type="bilinear") grad_req = {k: 'write' for k in stn.list_arguments()} grads = { 'data': mx.nd.array(np.zeros_like(data_array)), 'loc': mx.nd.array(np.zeros_like(loc_array)) } executor = stn._bind( ctx=default_context(), args={'data': mx.nd.array(data_array), 'loc': mx.nd.array(loc_array)}, grad_req=grad_req, args_grad=grads) executor.forward(is_train=True) executor.backward(mx.nd.ones(( 1, 1, ) + target_shape)) def test_dot(): ctx = default_context() dtypes = ['float32', 'float64'] ndims = [2] if ctx.device_type == 'gpu': dtypes += ['float16'] ndims += [1] # Test normal dot. for ndim in ndims: for data_type in dtypes: tol = 1e-2 if data_type == 'float16' else 1e-3 for m in range(1, 5): for k in range(1, 5): if ndim == 1 and k != 1: pass for n in range(1, 5): a_shape = (m, k) if ndim == 2 else (m,) b_shape = (k, n) if ndim == 2 else (n,) a_npy = np.random.normal(0, 1, (m, k)) a_npy = a_npy.astype(data_type) b_npy = np.random.normal(0, 1, (k, n)) b_npy = b_npy.astype(data_type) c_npy = np.empty((m, n), dtype=data_type) ograd_npy = np.random.normal(0, 1, (m, n)) ograd_npy = ograd_npy.astype(data_type) agrad_npy = np.empty((m, k), dtype=data_type) bgrad_npy = np.empty((k, n), dtype=data_type) c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :]) bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :]) agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T) a = mx.sym.Variable('a', dtype=data_type) b = mx.sym.Variable('b', dtype=data_type) c = mx.sym.dot(a, b) exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape) outputs = exe.forward(is_train=True, a=a_npy, b=b_npy) assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol) exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)]) assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol) assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol) # Test dot with transpose flag using gradient checker. def dot_sym(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y) def dot_sym_xT(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y, transpose_a=True) def dot_sym_yT(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y, transpose_b=True) def dot_sym_xT_yT(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y, transpose_a=True, transpose_b=True) for data_type in dtypes: for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]: m1_npy = np.random.uniform(-1, 1, ashape) m1_npy = m1_npy.astype(data_type) m2_npy = np.random.uniform(-1, 1, bshape) m2_npy = m2_npy.astype(data_type) check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) def test_batch_dot(): ctx = default_context() dtypes = ['float32', 'float64'] if ctx.device_type == 'gpu': dtypes += ['float16'] for data_type in dtypes: for batch_size in range(1, 5): for m in range(1, 5): for k in range(1, 5): for n in range(1, 5): transpose_a = (np.random.rand() > 0.5) transpose_b = (np.random.rand() > 0.5) a_npy = np.random.normal(0, 1, (batch_size, m, k)) a_npy = a_npy.astype(data_type) b_npy = np.random.normal(0, 1, (batch_size, k, n)) b_npy = b_npy.astype(data_type) c_npy = np.empty((batch_size, m, n), dtype=data_type) ograd_npy = np.random.normal(0, 1, (batch_size, m, n)) ograd_npy = ograd_npy.astype(data_type) agrad_npy = np.empty((batch_size, m, k), dtype=data_type) bgrad_npy = np.empty((batch_size, k, n), dtype=data_type) a_init_grad_npy = np.random.normal(size=(batch_size, m, k)) a_init_grad_npy = a_init_grad_npy.astype(data_type) b_init_grad_npy = np.random.normal(size=(batch_size, k, n)) b_init_grad_npy = b_init_grad_npy.astype(data_type) for i in range(batch_size): c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :]) bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :]) agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T) a = mx.sym.Variable('a', dtype=data_type) b = mx.sym.Variable('b', dtype=data_type) c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b) if transpose_a: a_npy = np.transpose(a_npy, axes=(0, 2, 1)) agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1)) a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1)) if transpose_b: b_npy = np.transpose(b_npy, axes=(0, 2, 1)) bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1)) b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1)) exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape, grad_req='write') exe_add = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape, grad_req='add') exe_add.grad_dict['a'][:] = a_init_grad_npy exe_add.grad_dict['b'][:] = b_init_grad_npy outputs = exe.forward(is_train=True, a=a_npy, b=b_npy) assert_almost_equal(outputs[0], c_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)]) assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) exe_add.forward(is_train=True, a=a_npy, b=b_npy) exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)]) assert_almost_equal(exe_add.grad_dict['a'], agrad_npy + a_init_grad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) assert_almost_equal(exe_add.grad_dict['b'], bgrad_npy + b_init_grad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply): img1 = mx.sym.Variable('img1') img2 = mx.sym.Variable('img2') return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement, stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply) def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply): # compute output's dimension paddedbottomheight = data1.shape[2] + 2 * pad_size paddedbottomwidth = data1.shape[3] + 2 * pad_size kernel_radius = (kernel_size - 1) // 2 border_size = max_displacement + kernel_radius top_width = (paddedbottomwidth - border_size * 2) // stride1 top_height = (paddedbottomheight - border_size * 2) // stride1 neighborhood_grid_radius = max_displacement // stride2 neighborhood_grid_width = neighborhood_grid_radius * 2 + 1 top_channels = neighborhood_grid_width * neighborhood_grid_width out = np.zeros((data1.shape[0], top_channels, top_height, top_width)) tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth)) tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth)) tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:] tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:] for i in range(top_height): for j in range(top_width): for nbatch in range(data1.shape[0]): # x1,y1 is the location in data1 , i,j is the location in output x1 = j * stride1 + max_displacement y1 = i * stride1 + max_displacement for top_channel in range(top_channels): s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2 s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2 # location in data2 x2 = x1 + s2o y2 = y1 + s2p for h in range(kernel_size): for w in range(kernel_size): for channel in range(data1.shape[1]): if is_multiply: out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w] else: out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w]) out /= float(kernel_size**2*data1.shape[1]) return out,tmp1,tmp2 def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply): # compute output's dimension paddedbottomheight = data1.shape[2] + 2 * pad_size paddedbottomwidth = data1.shape[3] + 2 * pad_size kernel_radius = (kernel_size - 1) // 2 border_size = max_displacement + kernel_radius top_width = (paddedbottomwidth - border_size * 2) // stride1 top_height = (paddedbottomheight - border_size * 2) // stride1 neighborhood_grid_radius = max_displacement // stride2 neighborhood_grid_width = neighborhood_grid_radius * 2 + 1 top_channels = neighborhood_grid_width * neighborhood_grid_width out = np.zeros((data1.shape[0], top_channels, top_height, top_width)) tmp1_grad = np.zeros(tmp1.shape) tmp2_grad = np.zeros(tmp2.shape) for i in range(top_height): for j in range(top_width): for nbatch in range(data1.shape[0]): # x1,y1 is the location in data1 , i,j is the location in output x1 = j * stride1 + max_displacement y1 = i * stride1 + max_displacement for top_channel in range(top_channels): s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2 s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2 # location in data2 x2 = x1 + s2o y2 = y1 + s2p for h in range(kernel_size): for w in range(kernel_size): for channel in range(data1.shape[1]): if is_multiply: tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w] tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w] else: sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1 tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn) tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1]) tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1]) return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]], def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype): img1 = np.random.random(data_shape) img1 = img1.astype(dtype) img2 = np.random.random(data_shape) img2 = img2.astype(dtype) net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply) net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply ) exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape) exe1.arg_dict['img1'][:] = img1 exe1.arg_dict['img2'][:] = img2 #cpu forward exe1.forward(is_train=True) # python forward forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply) # forward error assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4) # out_grad a = np.ones(forward_result.shape) out_grad1 = mx.nd.array(a,default_context()) # cpu backward exe1.backward(out_grads=out_grad1) # python backward grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply) # backward error assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4) assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4) def test_correlation(): def test_infer_type(dtype): a = mx.sym.Variable('a') b = mx.sym.Variable('b') corr = mx.sym.Correlation(data1=a, data2=b) arg_type1, out_type1, _ = corr.infer_type(a=dtype) if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype): msg = npt.npt.build_err_msg([a, b], err_msg="Inferred type from a is not as expected, " "Expected :%s %s %s, Got: %s %s %s" % (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]), names=['a', 'b']) raise AssertionError(msg) arg_type2, out_type2, _ = corr.infer_type(b=dtype) if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype): msg = npt.npt.build_err_msg([a, b], err_msg="Inferred type from b is not as expected, " "Expected :%s %s %s, Got: %s %s %s" % (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]), names=['a', 'b']) raise AssertionError(msg) for dtype in ['float16', 'float32']: test_infer_type(dtype) unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype) unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype) unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype) unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype) unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype) unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype) unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype) unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype) unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype) with pytest.raises(MXNetError): unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype) with pytest.raises(MXNetError): unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype) with pytest.raises(MXNetError): unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype) with pytest.raises(MXNetError): unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype) # Seed set because the test is not robust enough to operate on random data @pytest.mark.seed(1234) def test_roipooling(): data = mx.symbol.Variable(name='data') rois = mx.symbol.Variable(name='rois') test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1) x1 = np.random.rand(4, 3, 12, 8).astype('float32') x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32') check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data':'write', 'rois':'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1e-4) check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data':'add', 'rois':'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1E-4) def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"): # bind with label X = mx.symbol.Variable('X', dtype=dtype) Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width) x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu) # numpy result pad_grouped = list(zip(*[iter(list(pad_width))] * 2)) np_out = np.pad(x.asnumpy(), pad_grouped, mode) # mxnet result grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype) exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad}) exec1.forward(is_train=True) out = exec1.outputs[0] # compare numpy + mxnet assert_almost_equal(out, np_out) # grad check check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2) def test_pad(): ctx = default_context() shape1 = (2, 3, 3, 5) pad1 = (0, 0, 0, 0, 1, 2, 3, 4) shape2 = (2, 3, 3, 5, 4) pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1) # note: this op doesn't support ints yet. Add tests when supported dtypes = ["float16", "float32", "float64"] for dtype in dtypes: check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype) check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype) check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype) check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype) check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype) check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype) def np_instance_norm(data, weight, bias, eps): spatial_dims = data.shape[2::] num_spatial_vals = np.prod(np.array(spatial_dims)) scale = 1/float(num_spatial_vals) sum_axis = tuple(range(2, data.ndim)) mean = scale * np.sum(data, axis = sum_axis) mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape) var = scale * np.sum((data - mean)**2, axis = sum_axis) var = np.reshape(np.repeat(var, num_spatial_vals), data.shape) weightBatch = np.tile(weight, (data.shape[0], 1)) weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape) biasBatch = np.tile(bias, (data.shape[0], 1)) biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape) return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch def check_instance_norm_with_shape(shape, xpu): # bind with label eps = 0.001 X = mx.symbol.Variable('X') G = mx.symbol.Variable('G') B = mx.symbol.Variable('B') Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps) x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu) gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu) beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu) np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps) exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta}) exec1.forward(is_train=False) out = exec1.outputs[0] assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4) check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()}, numeric_eps=1e-2, rtol=1e-2, atol=1e-2) def test_instance_normalization(): check_instance_norm_with_shape((1, 1, 1), default_context()) check_instance_norm_with_shape((2, 1, 2), default_context()) check_instance_norm_with_shape((2,4,5,6), default_context()) check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context()) def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10): ctx = default_context() data = mx.symbol.Variable('data') out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps) in_data = np.random.uniform(-1, 1, in_shape).astype(dtype) # calculate numpy results if mode == 'channel': assert in_data.ndim > 2 np_norm = np.linalg.norm(in_data, axis=1) + norm_eps np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1) np_out = np.multiply(in_data, np_norm) elif mode == 'spatial': assert in_data.ndim > 2 s = in_data.shape np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2) np_out = np.multiply(in_data, np_norm.reshape(s)) elif mode == 'instance': assert in_data.ndim > 1 s = in_data.shape np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1) np_out = np.multiply(in_data, np_norm.reshape(s)) else: raise RuntimeError('Unknown l2 normalization mode') exe = out._simple_bind(ctx=ctx, data=in_data.shape) output = exe.forward(is_train=True, data=in_data) # compare numpy + mxnet assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5) # check gradient check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3) def test_l2_normalization(): for dtype in ['float16', 'float32', 'float64']: for mode in ['channel', 'spatial', 'instance']: nbatch = random.randint(1, 4) nchannel = random.randint(3, 5) height = random.randint(4, 6) check_l2_normalization((nbatch, nchannel, height), mode, dtype) width = random.randint(5, 7) check_l2_normalization((nbatch, nchannel, height, width), mode, dtype) def check_layer_normalization(in_shape, axis, eps, dtype=np.float32, forward_check_eps=1E-3, backward_check_eps=1E-3, npy_grad_check=True, finite_grad_check=True): def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5): if axis < 0: axis += data.ndim broadcast_shape = [1 for _ in range(data.ndim)] broadcast_shape[axis] = data.shape[axis] mean = data.mean(axis=axis, keepdims=True).astype(dtype) var = data.var(axis=axis, keepdims=True).astype(dtype) std = np.sqrt(var + dtype(eps)).astype(dtype) out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \ np.reshape(beta, broadcast_shape) return out def npy_layer_norm_grad(data, gamma, out_grad, axis, eps): if axis < 0: axis += data.ndim exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis]) data_mean = data.mean(axis=axis, keepdims=True) data_var = data.var(axis=axis, keepdims=True) data_std = np.sqrt(data_var + eps) centered_data = (data - data_mean) / data_std gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True) beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True) w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\ / data_std data_grad = w - w.mean(axis=axis, keepdims=True)\ - centered_data * (w * centered_data).mean(axis=axis, keepdims=True) gamma_grad = gamma_grad.reshape((-1,)) beta_grad = beta_grad.reshape((-1,)) return data_grad, gamma_grad, beta_grad ctx = default_context() data = np.random.normal(0, 1, in_shape).astype(dtype) gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) data_s = mx.symbol.Variable('data') gamma_s = mx.symbol.Variable('gamma') beta_s = mx.symbol.Variable('beta') out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps) exe = out_s._simple_bind(ctx, data=in_shape) exe.arg_dict['data'][:] = data exe.arg_dict['gamma'][:] = gamma exe.arg_dict['beta'][:] = beta out_nd = exe.forward()[0] out = npy_layer_norm(data, gamma, beta, axis, eps) assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps) if finite_grad_check: for req in ['write', 'add']: check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta}, grad_nodes={'data': req, 'gamma': req, 'beta': req}, numeric_eps=1e-2, rtol=1e-2, atol=1e-2) if npy_grad_check: # Test for grad_req = write out_grad = np.random.normal(0, 1, in_shape).astype(dtype) exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write') exe.arg_dict['data'][:] = data exe.arg_dict['gamma'][:] = gamma exe.arg_dict['beta'][:] = beta exe.forward() exe.backward([mx.nd.array(out_grad, ctx=ctx)]) gt_data_grad, gt_gamma_grad, gt_beta_grad =\ npy_layer_norm_grad(data, gamma, out_grad, axis, eps) assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps) # Test for grad_req = add out_grad = np.random.normal(0, 1, in_shape).astype(dtype) init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype) init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add') exe.arg_dict['data'][:] = data exe.arg_dict['gamma'][:] = gamma exe.arg_dict['beta'][:] = beta exe.grad_dict['data'][:] = init_data_grad exe.grad_dict['gamma'][:] = init_gamma_grad exe.grad_dict['beta'][:] = init_beta_grad exe.forward() exe.backward([mx.nd.array(out_grad, ctx=ctx)]) gt_data_grad, gt_gamma_grad, gt_beta_grad = \ npy_layer_norm_grad(data, gamma, out_grad, axis, eps) assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps) def test_norm(): try: import scipy assert LooseVersion(scipy.__version__) >= LooseVersion('0.1') from scipy.linalg import norm as sp_norm except (AssertionError, ImportError): print("Could not import scipy.linalg.norm or scipy is too old. " "Falling back to numpy.linalg.norm which is not numerically stable.") from numpy.linalg import norm as sp_norm def l1norm(input_data, axis=0, keepdims=True): return np.sum(abs(input_data), axis=axis, keepdims=keepdims) def l2norm(input_data, axis=0, keepdims=True): return sp_norm(input_data, axis=axis, keepdims=keepdims) ctx = default_context() data = mx.symbol.Variable('data') in_data_dim = random_sample([2,3,4], 1)[0] in_shape = rand_shape_nd(in_data_dim, dim=5) epsilon = 1e-3 acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64, np.int32: np.int32, np.int64: np.int64} dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64', np.int32: 'int32', np.int64: 'int64'} for enforce_safe_acc in ['1', '0']: with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc): for order in [1, 2]: for dtype in [np.float16, np.float32, np.float64]: for i in range(in_data_dim): for out_dtype in ['float32', 'float64']: backward_dtype = np.float32 if out_dtype == 'float32' else np.float64 accumulation_type = acc_type[dtype] if enforce_safe_acc == "0": backward_dtype = dtype out_dtype = dtype_to_str[dtype] accumulation_type = dtype skip_backward = 'int' in out_dtype in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type) in_data[abs(in_data) < epsilon] = 2 * epsilon norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True) npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i) npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)], rtol=1e-2 if dtype == np.float16 else 1e-3, atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype) if dtype is not np.float16 and not skip_backward: check_symbolic_backward(norm_sym, [in_data.astype(dtype)], [np.ones(npy_out.shape).astype(out_dtype)], [npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype) # Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509 # check gradient if dtype is not np.float16 and not skip_backward: check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3, dtype=backward_dtype) if i < in_data_dim-1: norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True) npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1)) npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)], rtol=1e-2 if dtype is np.float16 else 1e-3, atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx) if dtype is not np.float16 and not skip_backward: check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape).astype(out_dtype)], [npy_out_backward.astype(out_dtype)], rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype) # check gradient if dtype is not np.float16 and not skip_backward: check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3, dtype=backward_dtype) @pytest.mark.parametrize('enforce_safe_acc', ['1', '0']) @pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [ (np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]), (np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]), (np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]) ]) def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps, in_shape_l, finite_grad_check_l): with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc): for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l): for axis in range(-len(in_shape), len(in_shape)): for eps in [1E-2, 1E-3]: if dtype == np.float16: npy_grad_check = False else: npy_grad_check = True check_layer_normalization(in_shape, axis, eps, dtype=dtype, forward_check_eps=forward_check_eps, backward_check_eps=backward_check_eps, npy_grad_check=npy_grad_check, finite_grad_check=finite_grad_check) # Numpy Implementation of Sequence Ops def sequence_last_numpy(array, lengths, axis): # create new array of dims [batch, seqlen, ...] array2 = np.moveaxis(array, axis, 1) dims = array2.shape if lengths is None: return array2[:, -1] lengths = list(lengths) return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])]) def sequence_mask_numpy(array, lengths, axis, value): if lengths is None: return array arrayMask = array.copy() # conform to [batch, seqlen, ...] arrayMask = np.moveaxis(arrayMask, axis, 1) shape = arrayMask.shape lengths = list(lengths) for i in range(shape[0]): arrayMask[i, int(lengths[i]):] = value return np.moveaxis(arrayMask, 1, axis) def sequence_reverse_numpy(array, lengths, axis): rarray = array.copy() # conform to [batch, seqlen, ...] rarray = np.moveaxis(rarray, axis, 1) shape = rarray.shape if lengths is None: lengths = [shape[1]] * shape[0] lengths = list(lengths) for i in range(shape[0]): j = int(lengths[i]) rarray[i,:j] = rarray[i,:j][::-1] return np.moveaxis(rarray, 1, axis) def check_sequence_func(ftype, mask_value=0, axis=0): # bind with label xpu = default_context() X = mx.symbol.Variable('X') L = mx.symbol.Variable('L') # lengths shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)] for seqlenQ in [True, False]: for ary_dtype in [np.float32]: for idx_dtype in [np.int32, np.float32]: for s in shapes: x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu) batch = s[1] if (axis == 0) else s[0] seqlen = s[axis] l_np = np.random.randint(1, seqlen + 1, batch) l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu) if not seqlenQ: l_np = None args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis} if seqlenQ: args['sequence_length'] = L if ftype == "last": Y = mx.symbol.SequenceLast(**args) np_out = sequence_last_numpy(x.asnumpy(), l_np, axis) elif ftype == "mask": args['value'] = mask_value Y = mx.symbol.SequenceMask(**args) np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value) elif ftype == "reverse": Y = mx.symbol.SequenceReverse(**args) np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis) fargs = [x, l] if seqlenQ else [x] gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()] check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy") check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'}, numeric_eps=1e-2, rtol=1e-2) check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'}, numeric_eps=1e-3, rtol=1e-2, atol=1E-4) check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'}, numeric_eps=1e-3, rtol=1e-2, atol=1E-4) @pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395") def test_sequence_last(): check_sequence_func("last", axis=0) check_sequence_func("last", axis=1) def test_sequence_mask(): check_sequence_func("mask", axis = 0, mask_value=-2.3) check_sequence_func("mask", axis = 1, mask_value=0.3) def check_sequence_reverse(xpu): # sample data arr = np.array( [[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 13., 14., 15.], [ 16., 17., 18.]]]) arr1 = np.array( [[[ 13., 14., 15.], [ 16., 17., 18.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]]]) arr2 = np.array( [[[ 7., 8., 9.], [ 10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]], [[ 13., 14., 15.], [ 16., 17., 18.]]]) arr3 = np.array( [[[ 7., 8., 9.], [ 16., 17., 18.]], [[ 1., 2., 3.], [ 10., 11., 12.]], [[ 13., 14., 15.], [ 4., 5., 6.]]]) # test for matrix case seq_len_1 = [1, 2, 2] arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32) arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32) def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False): # MxNet symbol creation seq = mx.sym.Variable('seq') if sequence_length and use_sequence_length: seq_len = mx.sym.Variable('seq_len') else: # ensure that both are disabled, not just one seq_len=None use_sequence_length=False rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length) # MxNet symbol execution if sequence_length: bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)}) else: bound = rev._bind(xpu, {'seq': mx.nd.array(arr)}) fwd = bound.forward() return fwd[0].asnumpy() # test cases assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1) assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1) assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2) assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3) assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5) def test_sequence_reverse(): check_sequence_func("reverse", axis=0) check_sequence_reverse(mx.cpu()) def mathematical_core_binary(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call1, backward_numpy_call2, data1_init=2., data2_init=3., grad_init=2.): data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') shape = (3, 4) data_tmp1 = np.random.rand(3, 4) data_tmp2 = np.random.rand(3, 4) data_tmp1[:] = data1_init data_tmp2[:] = data2_init arr_data1 = mx.nd.array(data_tmp1) arr_data2 = mx.nd.array(data_tmp2) arr_grad1 = mx.nd.empty(shape) arr_grad2 = mx.nd.empty(shape) test = forward_mxnet_call(data1, data2) exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = forward_numpy_call(data_tmp1, data_tmp2) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = grad_init exe_test.backward(out_grad) npout_grad = np.ones(shape) npout_grad[:] = grad_init npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2) npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2) assert_almost_equal(arr_grad1, npout_grad1) assert_almost_equal(arr_grad2, npout_grad2) def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:] = data_init arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:] = 3 test = forward_mxnet_call(data) exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = forward_numpy_call(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = grad_init npout_grad = out_grad.asnumpy() temp = backward_numpy_call(data_tmp) npout_grad = npout_grad * temp exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) def test_special_functions_using_scipy(): try: from scipy import special as scipy_special except: print("Could not import scipy. Skipping unit tests for special functions") return # gamma mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x), lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5) # gammaln mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x), lambda x: scipy_special.psi(x), 0.5, 0.5) # erf mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x), lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5) # erfinv mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x), lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5) def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:] = data_init arr_data = mx.nd.array(data_tmp) test = forward_mxnet_call(data) exe_test = test._bind(default_context(), args=[arr_data]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = forward_numpy_call(data_tmp) assert_almost_equal(out, npout) def test_mathematical(): # rsqrt mathematical_core("rsqrt", lambda x: mx.sym.rsqrt(x), lambda x: 1 / np.sqrt(x), lambda x: -(1.0 / (2.0 * x * np.sqrt(x)))) # tan mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1) # arcsin mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x), lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5) # arccos mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x), lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5) # arctan mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x), lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5) # hypot mathematical_core_binary("hypot", lambda x, y: mx.sym.hypot(x, y), lambda x, y: np.hypot(x, y), lambda x, y: x / np.hypot(x, y), lambda x, y: y / np.hypot(x, y), 0.5, 0.5, 0.5) # hypot scalar mathematical_core("hypot scalar", lambda x: mx.sym.hypot(x, 3), lambda x: np.hypot(x, 3), lambda x: x / np.hypot(x, 3), 0.5, 0.5) # degrees mathematical_core("degrees", lambda x: mx.sym.degrees(x), lambda x: np.degrees(x), lambda x: 180./np.pi, 0.5, 0.5) # radians mathematical_core("radians", lambda x: mx.sym.radians(x), lambda x: np.radians(x), lambda x: np.pi / 180., 0.6, 1) # sinh mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x)) # cosh mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5) # tanh mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1) # arcsinh mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x), lambda x: 1./(x**2 + 1.)**(1./2.)) # arccosh mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x), lambda x: 1./(x**2 - 1.)**(1./2.)) # arctanh mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x), lambda x: -1./(x**2 - 1.), 0.5) # log1p mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x), lambda x: 1. / (1.0 + x), 0.5, 0.5) # expm1 mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x), lambda x: np.exp(x), 0.5, 0.5) # log10 mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x), lambda x: 1. / (x * np.log(10.))) # log2 mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x), lambda x: 1. / (x * np.log(2.))) # rint rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x)) # fix rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x)) def test_special_functions_using_scipy(): try: from scipy import special as scipy_special except: print("Could not import scipy. Skipping unit tests for special functions") return # gamma mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x), lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5) # gammaln mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x), lambda x: scipy_special.psi(x), 0.5, 0.5) def test_clip(): data = mx.symbol.Variable('data') shape = (30, 30) data_tmp = np.random.uniform(-1, 1, shape).astype('float32') test = mx.sym.clip(data, a_max=0.6, a_min=-0.6) check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)]) check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])]) def test_init(): def test_basic_val_init(sym_func, np_func, shape, dtype): x = sym_func(shape=shape, dtype=dtype) exe = x._bind(default_context(), args=[], args_grad=[]) exe.forward(is_train=True) assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype)) assert exe.outputs[0].asnumpy().dtype == dtype def test_arange(): # General Random Tests dtype_list = [np.float32, np.float64, np.int32, np.uint8] config_list = [(10,), (0, 10), (5, 100, 4), (50, -50, -2), (-100, 100, 1), (1.3, 456.6, 1.3)] for dtype in dtype_list: for config in config_list: repeats = random.choice([1, 3]) np_out = np.repeat(np.arange(*config, dtype=dtype), repeats) nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype) assert_almost_equal(np_out, nd_out) def test_arange_inferstop(): s = mx.sym.arange(start=0, stop=None, infer_range=True) s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5])) exe = s._bind(ctx=mx.cpu(), args={}) exe.forward() assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4])) def test_arange_like(): shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)] axis_list = [0, -1] for sh in shape_list: for axis in axis_list: val = np.random.rand(*sh) data = mx.nd.array(val) nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis) np_out = np.arange(start=0, stop=sh[axis]) assert_almost_equal(nd_out.asnumpy(), np_out) def test_arange_like_without_axis(): shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)] for sh in shape_list: val = np.random.rand(*sh) data = mx.nd.array(val) nd_out = mx.nd.contrib.arange_like(data, start=0) np_out = np.arange(start=0, stop=val.size) assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh)) test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32) test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32) test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16) test_arange() test_arange_inferstop() test_arange_like() test_arange_like_without_axis() def test_order(): ctx = default_context() def gt_topk(dat, axis, ret_typ, k, is_ascend): if ret_typ == "indices": if is_ascend: indices = np.arange(k) else: indices = np.arange(-1, -k-1, -1) ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap') elif ret_typ == "value": if is_ascend: indices = np.arange(k) else: indices = np.arange(-1, -k-1, -1) ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap') else: assert dat.shape == (5, 5, 5, 5) assert axis is None or axis == 1 ret = np.zeros(dat.shape) if is_ascend: indices = np.arange(k) else: indices = np.arange(-1, -k-1, -1) gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap') if axis is None: ret.ravel()[gt_argsort] = 1 else: for i in range(5): for j in range(5): for k in range(5): ret[i, gt_argsort[i, :, j, k], j, k] = 1 return ret dshape = (5, 5, 5, 5) a_npy = np.arange(np.prod(dshape)).astype(np.float32) np.random.shuffle(a_npy) a_npy = a_npy.reshape(dshape) a = mx.sym.Variable('a') def get_large_matrix(): data = np.array([np.arange(300096).astype(np.float32)]) data = np.repeat(data, 100, axis=0) np.apply_along_axis(np.random.shuffle, 1, data) return data large_matrix_npy = get_large_matrix() for axis in [1, 3, None]: for is_ascend in [True, False]: b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend) if axis is None: out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend) else: out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend) check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx) check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy]) b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5) check_symbolic_backward(sym=b, location={'a': large_matrix_npy}, out_grads=[np.random.normal(size=(100, 5))], expected=[np.zeros((100, 300096))]) check_symbolic_forward(b, location={'a': large_matrix_npy}, expected=[gt_topk(dat=large_matrix_npy, axis=1, ret_typ="indices", k=5, is_ascend=is_ascend)]) b = mx.sym.argsort(a, axis=1, is_ascend=False) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5, is_ascend=False)]) b = mx.sym.argmax(a, axis=1, keepdims=True) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1, is_ascend=False)]) b = mx.sym.argmin(a, axis=1, keepdims=True) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1, is_ascend=True)]) for dtype in [np.float16, np.float32, np.float64]: dshape = (5, 5, 5, 5) a_npy = np.arange(np.prod(dshape)).astype(dtype) np.random.shuffle(a_npy) a_npy = a_npy.reshape(dshape) a = mx.sym.Variable('a') for axis in [1, 3, None]: K = [1, 3, 5, 7] if axis is None else [1, 3, 5] for k in K: for is_ascend in [True, False]: b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k) out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend) check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx) check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy]) b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5) check_symbolic_backward(sym=b, location={'a': large_matrix_npy}, out_grads=[np.random.normal(size=(100, 5))], expected=[np.zeros((100, 300096))]) check_symbolic_forward(b, location={'a': large_matrix_npy}, expected=[gt_topk(dat=large_matrix_npy, axis=1, ret_typ="indices", k=5, is_ascend=is_ascend)]) b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 3))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3, is_ascend=False)]) b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3, is_ascend=True)]) def test_blockgrad(): a = mx.sym.Variable('a') b = mx.sym.BlockGrad(a) exe = b._simple_bind(ctx=default_context(), a=(10, 10)) a_npy = np.random.rand(10, 10) exe.forward(is_train=True, a=a_npy) assert_almost_equal(exe.outputs[0], a_npy) exe.backward() # No error if BlockGrad works def test_take_autograd_req(): row_len = 2 col_len = 8 shape = (row_len, col_len) sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32") sc.attach_grad() i = mx.nd.array([0], dtype="int64") j = mx.nd.array([0], dtype="int64") with mx.autograd.record(train_mode=True): xs = [] for _ in range(row_len): x_i = [] for _ in range(col_len): x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0) x_i.append(x_ij) j = j + 1 i = i + 1 j = j - col_len # reset j xs.append(mx.nd.stack(*x_i)) x = mx.nd.stack(*xs) x = x.sum() x.backward() assert_almost_equal(np.ones(sc.grad.shape), sc.grad) @pytest.mark.parametrize('mode,out_of_range', [ ('clip', True), ('wrap', True), ('raise', False) ]) @pytest.mark.parametrize('data_ndim', range(1, 5)) @pytest.mark.parametrize('idx_ndim', range(1, 4)) def test_take(mode, out_of_range, data_ndim, idx_ndim): def grad_helper(grad_in, axis, idx): if axis == 0: if axis == len(grad_in.shape) - 1: grad_in[idx] += 1.0 else: grad_in[idx, :] += 1.0 elif axis == 1: if axis == len(grad_in.shape) - 1: grad_in[:, idx] += 1.0 else: grad_in[:, idx, :] += 1.0 elif axis == 2: if axis == len(grad_in.shape) - 1: grad_in[:, :, idx] += 1.0 else: grad_in[:, :, idx, :] += 1.0 elif axis == 3: if axis == len(grad_in.shape) - 1: grad_in[:, :, :, idx] += 1.0 else: grad_in[:, :, :, idx, :] += 1.0 elif axis == 4: grad_in[:, :, :, :, idx] += 1.0 else: raise ValueError("axis %d is not supported..." % axis) for axis in range(-data_ndim, data_ndim): data_shape = () for _ in range(data_ndim): data_shape += (np.random.randint(low=1, high=5), ) idx_shape = () for _ in range(idx_ndim): idx_shape += (np.random.randint(low=1, high=5), ) data = mx.sym.Variable('a') idx = mx.sym.Variable('indices') idx = mx.sym.BlockGrad(idx) result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode) exe = result._simple_bind(default_context(), a=data_shape, indices=idx_shape) data_real = np.random.normal(size=data_shape).astype('float32') if out_of_range: idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape) if mode == 'raise': idx_real[idx_real == 0] = 1 idx_real *= data_shape[axis] else: idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape) if axis < 0: axis += len(data_shape) grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32') grad_in = np.zeros(data_shape, dtype='float32') exe.arg_dict['a'][:] = mx.nd.array(data_real) exe.arg_dict['indices'][:] = mx.nd.array(idx_real) exe.forward(is_train=True) if out_of_range and mode == 'raise': try: mx_out = exe.outputs[0].asnumpy() except MXNetError as e: return else: # Did not raise exception assert False, "did not raise %s" % MXNetError.__name__ assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode)) for i in np.nditer(idx_real): if mode == 'clip': i = np.clip(i, 0, data_shape[axis]) grad_helper(grad_in, axis, i) exe.backward([mx.nd.array(grad_out)]) assert_almost_equal(exe.grad_dict['a'], grad_in) def test_grid_generator(): # transform_type = affine test_case = [(20,21),(4,3),(6,12),(15,17)] for target_shape in test_case: affine_matrix = mx.sym.Variable('affine') grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape) exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write') # check forward exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]]) exe.forward(is_train=True) output = exe.outputs[0] output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0 output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0 xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1])) assert_almost_equal(output[0,0], yv.T) assert_almost_equal(output[0,1], xv.T) # check backward out_grad = np.random.normal(size=(1,2)+target_shape) exe.backward(mx.nd.array(out_grad)) tmp = np.zeros((3,target_shape[0]*target_shape[1])) tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1)) tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1)) tmp[2] = 1 grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6) assert_almost_equal(exe.grad_dict['affine'], grad_est) # check addto exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add') grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape) exe.grad_dict['affine'][:] = grid_grad_npy exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]]) exe.forward(is_train=True) exe.backward(mx.nd.array(out_grad)) assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy) # transform_type = warp test_case = [(12,21),(4,3),(6,12)] for target_shape in test_case: flow = mx.sym.Variable('flow') grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape) exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write') # check forward exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape) exe.forward(is_train=True) output = exe.outputs[0].asnumpy() output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0 output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0 xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1) assert_almost_equal(output[0,0], yv.T) assert_almost_equal(output[0,1], xv.T) # check backward out_grad = np.random.normal(size=(1,2)+target_shape) exe.backward(mx.nd.array(out_grad)) grad_est = np.zeros((1,2)+target_shape) grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0) grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0) assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3) # check addto exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add') flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape) exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape) exe_add.grad_dict['flow'][:] = flow_grad_npy exe_add.forward(is_train=True) exe_add.backward(mx.nd.array(out_grad)) assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5) def test_index2d(): for _ in range(30): n = np.random.randint(1, 100) m = np.random.randint(1, 500) data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context()) x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32') r = mx.nd.batch_take(data, x) assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()]) def test_cast(): for srctype in [np.int32, np.float32, np.float16]: for dsttype in [np.float32, np.int32, np.float16]: x = mx.sym.Variable('x', dtype=srctype) y = mx.sym.Cast(x, dtype=dsttype) exe = y._simple_bind(ctx=default_context(), x=(10, 10)) assert exe.arg_arrays[0].dtype == srctype X = np.random.uniform(-10, 10, size=(10, 10)) exe.arg_arrays[0][:] = X exe.forward(is_train=True) assert exe.outputs[0].dtype == dsttype exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context())) assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5) assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5) def get_cast_op_data(): FP16_FRACTION_BITS = 10 FP32_FRACTION_BITS = 23 FP32_EXP_MIN = -126 FP32_EXP_MAX = 127 # generate test cases in the vicinity of representable float16 mantissas # and mid-way between them, but over the full range of float32 exponents. for sign_bit in [0, 1]: for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2): denominator = 2**(FP16_FRACTION_BITS + 1) for numerator in range(0, denominator): fraction = numerator / float(denominator) for y in [-1.0, 0.0, 1.0]: small_delta = y / 2**FP32_FRACTION_BITS val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta) yield val # Add np.nan as a final data value to process yield np.nan # Test requires all platforms to round float32->float16 with same round-to-nearest-even policy. def test_cast_float32_to_float16(): input_np = np.array(list(get_cast_op_data())).astype(np.float32) # The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed # as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722 expected_output = input_np.astype(np.float64).astype(np.float16) def check_cast(op, input_np, expected_output): x = mx.sym.Variable('x', dtype=np.float32) sym = op(x, dtype=np.float16) ctx = default_context() exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)}) assert exe.arg_arrays[0].dtype == np.float32 exe.forward(is_train=True) assert exe.outputs[0].dtype == np.float16 sym_output = exe.outputs[0].asnumpy() for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output): assert (model_fp16_val == np_fp16_val) or \ (np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \ 'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format( fp32_val, model_fp16_val, np_fp16_val) check_cast(mx.sym.Cast, input_np, expected_output) if default_context().device_type == 'gpu': check_cast(mx.sym.amp_cast, input_np, expected_output) def test_amp_multicast(): if default_context().device_type == 'cpu': return x = mx.sym.Variable('x', dtype=np.float16) y = mx.sym.Variable('y', dtype=np.float32) z = mx.sym.Variable('z', dtype=np.float16) ctx = default_context() res = mx.sym.amp_multicast(x, y, z, num_outputs=3) exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx), 'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx), 'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)}) exe.forward(is_train=True) out1, out2, out3 = exe.outputs assert out1.asnumpy().dtype == np.float32 assert out2.asnumpy().dtype == np.float32 assert out3.asnumpy().dtype == np.float32 def check_amp_multicast(input_np, expected_output): x = mx.sym.Variable('x', dtype=np.float16) y = mx.sym.Variable('y', dtype=np.float32) z = mx.sym.Variable('z', dtype=np.float16) ctx = default_context() res = mx.sym.amp_multicast(x, y, z, num_outputs=3) exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx), 'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx), 'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)}) exe.forward(is_train=True) sym_output = exe.outputs[0].asnumpy() for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output): assert (model_fp16_val == np_fp16_val) or \ (np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \ 'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format( fp32_val, model_fp16_val, np_fp16_val) input_np = np.array(list(get_cast_op_data()), dtype=np.float16) expected_output = input_np.astype(np.float32) check_amp_multicast(input_np, expected_output) def test_all_finite(): data = mx.sym.Variable("data", dtype=np.float32) data2 = mx.sym.Variable("data2", dtype=np.float32) finite_arr = mx.nd.array([[0, 0]]) inf_arr = mx.nd.array([[np.inf, np.inf]]) z = mx.sym.all_finite(data) ctx = default_context() exe = z._bind(ctx, {'data': inf_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 0 exe = z._bind(ctx, {'data': finite_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 1 z = mx.sym.multi_all_finite(data, data2, num_arrays=2) exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 0 z = mx.sym.multi_all_finite(data, data2, num_arrays=2) exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 1 def test_repeat(): def test_repeat_forward(): ndim_max = 6 # max number of dims of the ndarray size_max = 10 # max number of elements in each dim repeats = 3 for ndim in range(1, ndim_max+1): shape = () for i in range(0, ndim): shape += (np.random.randint(1, size_max+1), ) a = np.random.random_sample(size=shape) aa = np.repeat(a, repeats) b = mx.nd.array(a, ctx=default_context()) bb = mx.nd.repeat(b, repeats) assert_almost_equal(aa, bb) for axis in range(0, ndim): aa = np.repeat(a, repeats, axis) bb = mx.nd.repeat(b, repeats, axis) assert_almost_equal(aa, bb) def test_repeat_backward(axis): data = mx.sym.Variable('data') n1 = 3 n2 = 4 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) repeats = 2 test = mx.sym.repeat(data, repeats=repeats, axis=axis) exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad]) npout_grad = np.random.randint(0, 10, n1 * n2 * repeats) if axis == 0: npout_grad = npout_grad.reshape(n1 * repeats, n2) elif axis == 1: npout_grad = npout_grad.reshape(n1, n2 * repeats) else: raise RuntimeError("Invalid axis value") out_grad = mx.nd.array(npout_grad) exe.backward(out_grad) expected_grad = np.zeros(shape) if axis == 0: for i in range(shape[0]): for j in range(shape[1]): k = i * repeats expected_grad[i][j] = sum(npout_grad[k:k + repeats, j]) elif axis == 1: for j in range(shape[1]): for i in range(shape[0]): k = j * repeats expected_grad[i][j] = sum(npout_grad[i, k:k + repeats]) else: raise RuntimeError("Invalid axis value") assert_almost_equal(expected_grad, arr_grad, rtol=1e-3) def test_repeat_numeric_gradient(): data = mx.sym.Variable('data') n1 = 3 n2 = 4 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) repeats = 2 test = mx.sym.repeat(data, repeats=repeats, axis=0) check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2) test_repeat_forward() test_repeat_backward(axis=0) test_repeat_backward(axis=1) test_repeat_numeric_gradient() def test_reverse(): data = mx.symbol.Variable('data') shape = (5, 5, 5) data_tmp = np.random.uniform(-1, 1, shape) test = mx.sym.reverse(data, axis=[1, 2]) grad = np.random.uniform(-1, 1, shape) check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2) check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]]) check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]]) def test_tile(): def test_normal_case(): ndim_min = 1 ndim_max = 5 # max number of dims of the ndarray size_max = 10 # max number of elements in each dim length_max = 3 # max length of reps rep_max = 10 # max number of tiling in each dim for ndim in range(ndim_min, ndim_max+1): shape = [] for i in range(1, ndim+1): shape.append(np.random.randint(1, size_max+1)) shape = tuple(shape) a = np.random.randint(0, 100, shape) b = mx.nd.array(a, dtype=a.dtype) reps_len = np.random.randint(1, length_max+1) reps_tuple = () for i in range(1, reps_len): reps_tuple += (np.random.randint(1, rep_max), ) reps_array = np.asarray(reps_tuple) a_tiled = np.tile(a, reps_array) b_tiled = mx.nd.tile(b, reps_tuple).asnumpy() assert same(a_tiled, b_tiled) def test_empty_tensor(): shape = (2, 3, 0, 4) with mx.np_shape(): a = np.array([], dtype=np.int32).reshape(shape) b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype) reps = (2, 4, 6) a_tiled = np.tile(a, reps) b_tiled = mx.nd.tile(b, reps).asnumpy() assert same(a_tiled, b_tiled) def test_empty_reps(): a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32) b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype) a_tiled = np.tile(a, ()) b_tiled = mx.nd.tile(b, ()).asnumpy() assert same(a_tiled, b_tiled) def test_tile_backward(): data = mx.sym.Variable('data') n1 = 2 n2 = 2 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) reps1 = 2 reps2 = 2 reps = (reps1, reps2) test = mx.sym.tile(data, reps=reps) exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad]) npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2) out_grad = mx.nd.array(npout_grad) exe.backward(out_grad) expected_grad = np.zeros(shape) for i in range(shape[0]): for j in range(shape[1]): expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2])) assert_almost_equal(expected_grad, arr_grad, rtol=1e-3) def test_tile_numeric_gradient(): data = mx.sym.Variable('data') n1 = 2 n2 = 2 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) reps1 = 2 reps2 = 2 reps = (reps1, reps2) test = mx.sym.tile(data, reps=reps) check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2) def test_invalid_reps(): data = mx.nd.arange(16).reshape((4, 4)) assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3)) assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3)) test_normal_case() with mx.np_shape(): test_empty_tensor() test_empty_reps() test_tile_backward() test_tile_numeric_gradient() test_invalid_reps() def test_one_hot(): def test_normal_case(index_type=np.int32): ndim_max = 6 dim_size_max = 20 depth = int(dim_size_max / 2) on_value = 1 off_value = 0 for ndim in range(1, ndim_max+1): shape = () for i in range(1, ndim+1): shape += (np.random.randint(1, dim_size_max+1), ) indices = np.random.randint(-dim_size_max, dim_size_max+1, size=np.prod(shape)).reshape(shape) mx_one_hot_array = mx.nd.one_hot( mx.nd.array(indices, ctx=default_context(), dtype=index_type), depth=depth, dtype=np.int32) expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32) expected_array[:] = off_value indices_1d = indices.flatten() row = 0 for idx in indices_1d: if 0 <= idx < depth: expected_array[row, idx] = on_value row += 1 expected_array = expected_array.reshape(shape + (depth, )) one_hot_array = mx_one_hot_array.asnumpy() assert same(expected_array, one_hot_array) def test_empty_indices(): shape = (2, 0, 9, 3) with mx.np_shape(): indices = np.array([]).reshape(shape) depth = 10 mx_one_hot_array = mx.nd.one_hot( mx.nd.array(indices, ctx=default_context(), dtype=np.int32), depth=depth, dtype=np.int32 ).asnumpy() expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,)) assert same(expected_array, mx_one_hot_array) def test_zero_depth(): shape = (2, 4, 9, 3) indices = np.ones(shape) depth = 0 mx_one_hot_array = mx.nd.one_hot( mx.nd.array(indices, ctx=default_context(), dtype=np.int32), depth=depth, dtype=np.int32).asnumpy() expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, )) assert same(expected_array, mx_one_hot_array) test_normal_case(index_type=np.int32) test_normal_case(index_type=np.float64) test_normal_case(index_type=np.float32) test_normal_case(index_type=np.float16) with mx.np_shape(): test_empty_indices() test_zero_depth() def test_where(): def get_forward_expected_output(condition, x, y): original_shape = x.shape out = np.zeros(original_shape) if condition.shape == x.shape: for index, c in np.ndenumerate(condition): if c != 0: out[index] = x[index] else: out[index] = y[index] elif condition.shape == (x.shape[0], ): s = x.shape m = s[0] n = int(np.prod(s)/s[0]) x2d = x.reshape((m, n)) y2d = y.reshape((m, n)) out = out.reshape((m, n)) for i in range(0, m): if condition[i] != 0: for j in range(0, n): out[i, j] = x2d[i, j] else: for j in range(0, n): out[i, j] = y2d[i, j] else: raise RuntimeError("Invalid condition shape for where op") out = out.reshape(original_shape) return out def get_forward_inputs_same_shape(shape): condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape) x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape) y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape) return condition_np, x_np, y_np def get_forward_inputs_condition_vector(shape): condition_np = np.random.randint(0, 2, shape[0]) x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape) y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape) return condition_np, x_np, y_np def get_backward_input(shape): return np.random.randint(20, 30, np.prod(shape)).reshape(shape) def get_backward_expected_outputs(grad_in, condition): shape = grad_in.shape grad_cond = np.zeros(condition.shape) grad_x = np.empty(shape) grad_y = np.empty(shape) for index, c in np.ndenumerate(condition): if 0 != c: grad_x[index] = grad_in[index] grad_y[index] = 0 else: grad_x[index] = 0 grad_y[index] = grad_in[index] return grad_cond, grad_x, grad_y def test_where_helper(shape, same_shape): if same_shape: condition_np, x_np, y_np = get_forward_inputs_same_shape(shape) else: condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape) out_expected = get_forward_expected_output(condition_np, x_np, y_np) grad_in_np = get_backward_input(shape) grad_expected_cond, grad_expected_x, grad_expected_y\ = get_backward_expected_outputs(grad_in_np, condition_np) condition = mx.sym.Variable('condition') x = mx.sym.Variable('x') y = mx.sym.Variable('y') grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int) where_sym = mx.sym.where(condition, x, y) # test req='write' where_exe_write = where_sym._simple_bind(ctx=default_context(), condition=condition_np.shape, x=x_np.shape, y=y_np.shape, grad_req='write') # test forward req='write' outputs = where_exe_write.forward(is_train=True, condition=condition_np, x=x_np, y=y_np) assert same(outputs[0].asnumpy(), out_expected) # test backward req='write' where_exe_write.backward(grad_in_mx.astype('float32')) assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x) assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y) assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond) # test req='add' x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape) y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape) where_exe_add = where_sym._simple_bind(ctx=default_context(), condition=condition_np.shape, x=x_np.shape, y=y_np.shape, grad_req='add') where_exe_add.grad_dict['x'][:] = x_grad_init where_exe_add.grad_dict['y'][:] = y_grad_init # test forward req='add' outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np) assert same(outputs[0].asnumpy(), out_expected) # test backward req='add' where_exe_add.backward(grad_in_mx.astype('float32')) x_ograd = where_exe_add.grad_dict['x'].asnumpy() y_ograd = where_exe_add.grad_dict['y'].asnumpy() assert same(x_ograd, grad_expected_x+x_grad_init) assert same(y_ograd, grad_expected_y+y_grad_init) def test_where_numeric_gradient(shape, same_shape): condition = mx.sym.Variable('condition') x = mx.sym.Variable('x') y = mx.sym.Variable('y') where_sym = mx.sym.where(condition, x, y) if same_shape: condition_np, x_np, y_np = get_forward_inputs_same_shape(shape) else: condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape) check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y']) def test_invalid_shape(): condition = mx.sym.Variable('condition') x = mx.sym.Variable('x') y = mx.sym.Variable('y') where_sym = mx.sym.where(condition, x, y) assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]), y=mx.nd.array([[8,9],[10,11],[12,13]]), condition=mx.nd.array([1,0])), MXNetError) assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]), y=mx.nd.array([[8,9],[10,11],[12,13]]), condition=mx.nd.array([1,0])), MXNetError) def test_1d_cond(): cond = mx.nd.array([1, 0, 1]) x = mx.nd.array([[2, 3], [4, 5], [6, 7]]) y = mx.nd.array([[7, 8], [9, 10], [10, 11]]) expect_out = np.array([[2, 3], [9, 10], [6, 7]]) out = mx.nd.where(cond, x, y).asnumpy() assert(expect_out.all() == out.all()) test_where_helper((5, 9), True) test_where_helper((5, 9), False) test_where_helper((5, 7, 9), True) test_where_helper((5, 7, 9), False) test_where_helper((10, 8, 15, 3), True) test_where_helper((10, 8, 15, 3), False) test_where_numeric_gradient((5, 9), True) test_where_numeric_gradient((5, 9), False) test_where_numeric_gradient((5, 7, 9), True) test_where_numeric_gradient((5, 7, 9), False) test_invalid_shape() test_1d_cond() def test_softmin(): for ndim in range(1, 5): for dtype in [np.float16, np.float32, np.float64]: rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3) shape = np.random.randint(1, 5, size=ndim) axis = np.random.randint(-ndim, ndim) data = np.random.uniform(-2, 2, size=shape).astype(dtype) data = data / 10 if dtype is np.float16 else data sym = mx.sym.softmin(axis=axis) expected_fwd = np_softmax(-data, axis=axis) expected_bwd = np.zeros(shape) check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype) for req in ['null', 'add', 'write']: check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd], rtol=rtol, atol=atol, grad_req=req, dtype=dtype) if dtype is not np.float16: check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype) def test_new_softmax(): for ndim in range(1, 5): shape = np.random.randint(1, 5, size=ndim) axis = np.random.randint(-ndim, ndim) data = np.random.uniform(-2, 2, size=shape) sym = mx.sym.softmax(axis=axis) expected_fwd = np_softmax(data, axis=axis) expected_bwd = np.zeros(shape) check_symbolic_forward(sym, [data], [expected_fwd]) for req in ['null', 'add', 'write']: check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd], rtol=1e-2, atol=1e-3, grad_req=req) check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3) def test_softmax_with_temperature(): for ndim in range(1, 5): shape = np.random.randint(1, 5, size=ndim) data = np.random.uniform(-2, 2, size=shape) for temp in range(1, 11): sym = mx.sym.softmax(axis=0, temperature=temp) expected_fwd = np_softmax(data, axis=0, temperature=temp) expected_bwd = np.zeros(shape) check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3) check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3) check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3) def test_log_softmax(): for ndim in range(1, 5): for _ in range(5): shape = np.random.randint(1, 5, size=ndim) axis = np.random.randint(0, ndim) data = np.random.uniform(-2, 2, size=shape) sym = mx.sym.log_softmax(axis=axis-ndim) check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4) check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2) def test_softmax_with_large_inputs(): def softmax_forward(input_data, true_output): data = mx.sym.Variable('data') out1 = data.softmax(axis=1) exec1 = out1._bind(default_context(), args={'data': input_data}) exec1.forward()[0].wait_to_read() ndarr = exec1.outputs[0][0][0][0] assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5) softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0])) @with_environment('MXNET_SAFE_ACCUMULATION', '1') def test_softmax_dtype(): def check_dtypes_almost_equal(op_name, atol, rtol, grad_atol, grad_rtol, idtype, ref_dtype, odtype=None): op = getattr(mx.nd, op_name) input_data = mx.random.uniform(shape=(100, 500)) dtype_input = input_data.astype(idtype) ref_input = input_data.astype(ref_dtype) dtype_input.attach_grad() ref_input.attach_grad() with mx.autograd.record(): dtype_softmax = op(dtype_input, axis=-1, dtype=odtype) ref_softmax = op(ref_input, axis=-1, dtype=odtype) assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol) dtype_softmax.backward() ref_softmax.backward() assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol) check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32') check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32') check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64') check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64') check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2, 'float16', 'float32') check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2, 'float16', 'float32', 'float32') check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3, 'float32', 'float64') check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3, 'float32', 'float64', 'float64') def test_softmax_with_length(): def np_softmax_with_length(data, length): res = np.zeros(data.shape) for i in range(length.shape[0]): for j in range(length.shape[1]): leng = int(length[i, j]) res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j]) return res ndim = 3 shape = rand_shape_nd(ndim, dim=10) len_shape = list(shape) del len_shape[1] len_shape = tuple(len_shape) for dtype in [np.float16, np.float32, np.float64]: mx_data = rand_ndarray(shape, dtype=dtype) np_data = mx_data.asnumpy() np_length = np.random.randint(1, shape[1] + 1, len_shape) mx_length = mx.nd.array(np_length, dtype=np.int32) np_out = np_softmax_with_length(np_data, np_length) data = mx.sym.Variable("data") length = mx.sym.Variable("length") mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1) location = {"data": mx_data, "length": mx_length} rtol = 1e-2 if dtype == np.float16 else 1e-3 atol = 1e-4 if dtype == np.float16 else 1e-5 check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy") check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)], [np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy") def test_pick(): def test_pick_helper(index_type=np.int32): for mode in ['clip', 'wrap']: ndim = np.random.randint(1, 5) bshape = np.random.randint(1, 10, size=ndim) axis = np.random.randint(0, ndim) sshape = bshape.copy() sshape[axis] = 1 data = np.random.uniform(-1, 1, size=bshape) if mode == 'wrap': index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape) else: index = np.random.randint(0, bshape[axis], size=sshape) exp = [] for i in range(ndim): if i == axis: if mode == 'wrap': exp.append(index % bshape[axis]) else: exp.append(index) else: ishape = [1 for _ in range(ndim)] ishape[i] = bshape[i] exp.append(np.arange(bshape[i]).reshape(ishape)) expected = data[exp] data = mx.nd.array(data, dtype='float32') index = mx.nd.array(index, dtype=index_type) out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode) assert_almost_equal(out.asnumpy(), expected) data_holder = data index_holder = index data = mx.sym.Variable('data') index = mx.sym.Variable('index') sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode) check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data']) test_pick_helper(np.int32) test_pick_helper(np.float32) def check_ctc_loss(acts, labels, loss_truth, contrib=False): in_var = mx.sym.Variable('input') labels_var = mx.sym.Variable('labels') if contrib: ctc = mx.sym.contrib.ctc_loss(in_var, labels_var) else: ctc = mx.sym.ctc_loss(in_var, labels_var) acts_nd = mx.nd.array(acts, ctx=default_context()) labels_nd = mx.nd.array(labels, ctx=default_context()) exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd]) # test forward with grad calc exe.forward(is_train=True) outTest = exe.outputs[0].copy() # test forward without grad calc exe.forward(is_train=False) outTrain = exe.outputs[0] # make sure losses calculated with both modes are the same assert_almost_equal(outTest, outTrain) # test against ground truth, if available if loss_truth is not None: assert_almost_equal(outTest, loss_truth) # test grad check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3) def test_ctc_loss(): # Test 1: check that batches are same + check against Torch WarpCTC acts = np.array([ [[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]], [[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]], [[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]], dtype=np.float32) labels = np.array([[2, 3, 0], [2, 3, 0]]) true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch for contrib in [False, True]: check_ctc_loss(acts, labels, true_loss, contrib=contrib) # Test 2: acts2 = np.array([ [[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]], [[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]], [[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32) labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32) true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch for contrib in [False, True]: check_ctc_loss(acts2, labels2, true_loss, contrib=contrib) # Test 3: check use integer type as label labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32) true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch for contrib in [False, True]: check_ctc_loss(acts2, labels3, true_loss, contrib=contrib) def test_ctc_loss_with_large_classes(): ctx = default_context() num_classes = 6000 seq_len = 8 batch_size = 2 data = np.empty((num_classes, 0)) for i in range(seq_len * batch_size) : row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1) data = np.append(data, row/13, axis=1) data = data.reshape(seq_len, batch_size, num_classes) label = np.array([ [100, 200, 300, 400, 500, 0, 0, 0], [1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32) nd_data = mx.nd.array(data) nd_label = mx.nd.array(label) loss = mx.nd.ctc_loss(data=nd_data, label=nd_label) expected_loss = np.array([688.02826, 145.34462]) assert_almost_equal(loss, expected_loss) def test_ctc_loss_grad(): def check_ctc_loss_grad(blank_label, contrib=False): # from tf vocab_size = 5 max_label_len = 5 padding_mask = -1+ (blank_label=='first') targets_0 = [0, 1, 2, 1, 0] loss_log_prob_0 = -3.34211 input_prob_matrix_0 = np.asarray( [[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688], [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533], [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]], dtype=np.float32) gradient_log_prob_0 = np.asarray( [[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436], [0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688], [0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533], [-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]], dtype=np.float32) targets_1 = [0, 1, 1, 0] loss_log_prob_1 = -5.42262 input_prob_matrix_1 = np.asarray( [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508], [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549], [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456], [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345], [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]], dtype=np.float32) gradient_log_prob_1 = np.asarray( [[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508], [0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549], [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544], [0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345], [-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]], dtype=np.float32) inputs = [ np.vstack( [input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]]) for t in range(5) ] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)] inputs = np.log(np.asarray(inputs, dtype=np.float32)) grad_truth = np.array([ np.vstack( [gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]]) for t in range(5) ] + 2 * [np.zeros((2, vocab_size+1), np.float32)]) if blank_label == 'first': inputs = np.roll(inputs, 1, axis=2) grad_truth = np.roll(grad_truth, 1, axis=2) labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x)) for x in [targets_0, targets_1]])+(blank_label == 'first')) seq_lens = np.array([5, 5], dtype=np.int32) label_lens = np.array([5, 4], dtype=np.int32) loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32) with default_context(): data = mx.nd.array(inputs) label = mx.nd.array(labels) data.attach_grad() with mx.autograd.record(): if contrib: l = mx.contrib.ndarray.CTCLoss(data, label, use_data_lengths=True, use_label_lengths=True, data_lengths=mx.nd.array(seq_lens), label_lengths=mx.nd.array(label_lens), blank_label=blank_label) else: l = mx.ndarray.CTCLoss(data, label, use_data_lengths=True, use_label_lengths=True, data_lengths=mx.nd.array(seq_lens), label_lengths=mx.nd.array(label_lens), blank_label=blank_label) l.backward() assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5) assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5) for contrib in [False, True]: for label in ['first', 'last']: check_ctc_loss_grad(label, contrib=contrib) def test_quantization_op(): min0 = mx.nd.array([0.0]) max0 = mx.nd.array([1.0]) a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]]) qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8') a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32') qa_real = mx.nd.array([[18, 75], [77, 109]]) a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]]) print(a_.asnumpy()) print(a_real.asnumpy()) assert same(qa.asnumpy(), qa_real.asnumpy()) assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2) def test_index_copy(): x = mx.nd.zeros((5,3)) t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]]) index = mx.nd.array([0,4,2], dtype=np.int64) tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]]) x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]]) t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]]) t.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.index_copy(x, index, t) out.backward() assert same(out.asnumpy(), tensor.asnumpy()) assert same(t.grad.asnumpy(), t_grad.asnumpy()) x.attach_grad() t.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.index_copy(x, index, t) out.backward() assert same(out.asnumpy(), tensor.asnumpy()) assert same(x.grad.asnumpy(), x_grad.asnumpy()) assert same(t.grad.asnumpy(), t_grad.asnumpy()) def test_boolean_mask(): data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]]) index = mx.nd.array([0, 1, 0]) data.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.boolean_mask(data, index) out.backward() data.grad.wait_to_read() expected = np.array([[4, 5, 6]]) expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]]) assert same(out.asnumpy(), expected) assert same(data.grad.asnumpy(), expected_grad) # test 0-size output mx.set_np_shape(True) data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]]) index = mx.nd.array([0, 0, 0]) data.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.boolean_mask(data, index) out.backward() data.grad.wait_to_read() expected = np.zeros((0, 3)) expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) assert same(out.asnumpy(), expected) assert same(data.grad.asnumpy(), expected_grad) mx.set_np_shape(False) # test gradient shape = (100, 30) a = mx.nd.random.randint(0, 100, shape=shape) a.attach_grad() bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50 ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50 mx_grad = mx.nd.zeros_like(a) mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add') T = 3 for _ in range(T): with mx.autograd.record(): b = mx.nd.contrib.boolean_mask(a, bi) c = mx.nd.contrib.boolean_mask(a, ci) su = b.sum() + c.sum() su.backward() grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1)) grad = np.tile(grad, (1,) + shape[1:]) # T times grad *= T assert_allclose(a.grad.asnumpy(), grad) a_np = a.asnumpy() assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')]) assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')]) def test_div_sqrt_dim(): data_tmp = np.random.normal(0, 1, (5, 10, 8)) data = mx.symbol.Variable('data') test = mx.sym.contrib.div_sqrt_dim(data) check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2) check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])]) # helper function to identify inputs likely to fail check_numeric_gradient tol test # due to finite difference method inaccuracies or function discontuities at the origin def bad_input_finder(f, f_grad, dtype): eps = default_numeric_eps()[np.dtype(dtype)] rtol = default_rtols()[np.dtype(dtype)] def expected_relative_error(x): fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps return abs(fd_gradient/f_grad(x) - 1) def is_fd_problem_input(x): return abs(x) < eps/2 or expected_relative_error(x) > rtol return np.vectorize(is_fd_problem_input) def test_reciprocal_op(): data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5 # Avoid possible division by 0 errors and finite difference method # inaccuracies by replacing problem inputs with 1.0. is_bad_input = bad_input_finder(np.reciprocal, lambda x: -np.reciprocal(x)**2, np.float32) data_tmp[is_bad_input(data_tmp)] = 1.0 data = mx.symbol.Variable('data') test = mx.sym.reciprocal(data) check_numeric_gradient(test, [data_tmp]) check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)]) def test_cbrt_op(): data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5 # Avoid possible division by 0 errors and finite difference method # inaccuracies by replacing problem inputs with 1.0. is_bad_input = bad_input_finder(np.cbrt, lambda x: 1./(3 * np.cbrt(x)**2), np.float32) data_tmp[is_bad_input(data_tmp)] = 1.0 data = mx.symbol.Variable('data') test = mx.sym.cbrt(data) check_numeric_gradient(test, [data_tmp]) check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)]) def test_rcbrt_op(): data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5 # Avoid possible division by 0 errors and finite difference method # inaccuracies by replacing problem inputs with 1.0. is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x), lambda x: -1./(3 * np.cbrt(x)**4), np.float32) data_tmp[is_bad_input(data_tmp)] = 1.0 data = mx.symbol.Variable('data') test = mx.sym.rcbrt(data) check_numeric_gradient(test, [data_tmp]) check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)]) def test_custom_op(): class Sqr(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): if in_data[0].stype == 'default': aux[0][:] = 1 self.assign(out_data[0], req[0], in_data[0]*in_data[0]) else: inp = in_data[0] csr_m = inp.data * inp.data out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape) self.assign(out_data[0], req[0], out) if (in_data[0].stype == 'csr'): assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray)) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0])) if in_data[0].stype == 'default': assert (aux[0].asnumpy() == 1).all() @mx.operator.register("sqr") class SqrProp(mx.operator.CustomOpProp): def __init__(self): super(SqrProp, self).__init__(need_top_grad=True) def list_arguments(self): return ['data'] def list_outputs(self): return ['output'] def list_auxiliary_states(self): return ['aux'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]], [in_shape[0]] def infer_type(self, in_type): return in_type, [in_type[0]], [in_type[0]] def infer_storage_type(self, in_stype): if in_stype[0] == 'default': return ['default'], ['default'], ['default'] return ['csr'], ['csr'], ['csr'] def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype): if in_stype[0] == 'default': return ['default'], ['default'], ['default'], ['default'], ['default'] return ['default'], ['csr'], ['csr'], ['csr'], ['csr'] def create_operator(self, ctx, shapes, dtypes): return Sqr() data = mx.symbol.Variable('data') aux = mx.symbol.Variable('aux') op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr') x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) aux = mx.nd.zeros_like(x) check_numeric_gradient(op, [x], [aux]) data = mx.symbol.cast(data, dtype='float64') op = mx.symbol.cast(op, dtype='float32') check_numeric_gradient(op, [x], [aux]) data = mx.symbol.Variable('data', stype='csr') aux = mx.symbol.Variable('aux') op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr') x = x.tostype('csr') aux = mx.nd.zeros_like(x) check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"}) x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) x2 = x2.tostype('csr') aux2 = mx.nd.zeros_like(x2) x2.attach_grad() with mx.autograd.record(): output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr') output.backward() expected_output = mx.nd.sparse.square(x2) expected_grad = 2 * x2 rtol = 1e-4 atol = 1e-6 assert_almost_equal(output, expected_output, rtol=rtol, atol=atol) assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol) # test for backward compatibility, i.e. the correctness of default implementation of # infer storage in custom operator class Mult(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], in_data[0]*in_data[1]) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], in_data[1]) self.assign(in_grad[1], req[1], in_data[0]) @mx.operator.register("mult") class MultProp(mx.operator.CustomOpProp): def __init__(self): super(MultProp, self).__init__(need_top_grad=True) def list_arguments(self): return ['lhs', 'rhs'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]], [] def create_operator(self, ctx, shapes, dtypes): return Mult() lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) lhs.attach_grad() rhs.attach_grad() with mx.autograd.record(): y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult') y.backward() assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol) assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol) class MultNoGrad(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], in_data[0]*in_data[1]) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], in_data[1]) self.assign(in_grad[1], req[1], in_data[0]) @mx.operator.register("mult_no_grad") class MultNoGradProp(mx.operator.CustomOpProp): def __init__(self): super(MultNoGradProp, self).__init__(need_top_grad=False) def list_arguments(self): return ['lhs', 'rhs'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]], [] def create_operator(self, ctx, shapes, dtypes): return MultNoGrad() def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype): return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype with mx.autograd.record(): y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad") y2.backward() assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol) assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol) class NoInputOp(mx.operator.CustomOp): def __init__(self, length, depth): super(NoInputOp, self).__init__() self.output = np.ones(shape=(length, depth), dtype=np.float32) def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], self.output) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass @mx.operator.register("no_input_op") class NoInputOpProp(mx.operator.CustomOpProp): def __init__(self, length, depth): super(NoInputOpProp, self).__init__() self.length = int(length) self.depth = int(depth) def list_arguments(self): return [] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return [], [(self.length, self.depth)], [] def infer_type(self, in_type): return [], [np.float32], [] def create_operator(self, ctx, shapes, dtypes): return NoInputOp(length=self.length, depth=self.depth) with mx.autograd.record(): x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op") assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32)) @pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467") def test_custom_op_fork(): # test custom operator fork # see https://github.com/apache/incubator-mxnet/issues/14396 class AdditionOP(mx.operator.CustomOp): def __init__(self): super(AdditionOP, self).__init__() def forward(self, is_train, req, in_data, out_data, aux): out_data[0][:] = in_data[0] + in_data[1] def backward(self, req, out_grad, in_data, out_data, in_grad, aux): in_grad[0][:] = out_grad[0] in_grad[1][:] = out_grad[0] @mx.operator.register("AdditionOP") class AdditionOPProp(mx.operator.CustomOpProp): def __init__(self): super(AdditionOPProp, self).__init__() def list_arguments(self): return ['a', 'b'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]] def create_operator(self, ctx, shapes, dtypes): return AdditionOP() if not sys.platform.startswith('win'): # no fork in windows def custom_add(): a = mx.nd.array([1, 2, 3]) b = mx.nd.array([4, 5, 6]) c = mx.nd.Custom(a, b, op_type='AdditionOP') assert_almost_equal((a + b).asnumpy(), c.asnumpy()) custom_add() from multiprocessing import Process p = Process(target=custom_add) p.daemon = True p.start() p.join(5) assert not p.is_alive() and p.exitcode == 0 def _build_dot_custom(fun_forward, name): class Dot(mx.operator.CustomOp): def __init__(self): super(Dot, self).__init__() def forward(self, is_train, req, in_data, out_data, aux): fun_forward(in_data, out_data) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass @mx.operator.register(name) class DotProp(mx.operator.CustomOpProp): def __init__(self): super(DotProp, self).__init__() def list_arguments(self): return ['a', 'b'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [(in_shape[0][0], in_shape[1][1])] def create_operator(self, ctx, shapes, dtypes): return Dot() def test_custom_op_exc(): # test except handling # see https://github.com/apache/incubator-mxnet/pull/14693 # 1. error in python code def custom_exc1(): def f(in_data, out_data): assert False out_data[0][:] = mx.nd.dot(in_data[0], in_data[1]) _build_dot_custom(f, 'Dot1') a = mx.nd.zeros((4, 1)) b = mx.nd.zeros((1, 4)) c = mx.nd.Custom(a, b, op_type='Dot1') c.wait_to_read() pytest.raises(MXNetError, custom_exc1) # 2. error in pushing operator to engine def custom_exc2(): def f(in_data, out_data): out_data[0][:] = mx.nd.dot(in_data[0], in_data[1]) _build_dot_custom(f, 'Dot2') a = mx.nd.zeros((4, 2)) b = mx.nd.zeros((1, 4)) # trigger error by invalid input shapes of operands c = mx.nd.Custom(a, b, op_type='Dot2') c.wait_to_read() pytest.raises(MXNetError, custom_exc2) # 3. error in real execution if default_context().device_type == 'cpu': def custom_exc3(): def f(in_data, out_data): dot = mx.nd.dot(in_data[0], in_data[1]) # input to Cholesky factorization should be # symmetric positive-definite, error will be # triggered in op execution on cpu out_data[0][:] = mx.nd.linalg.potrf(dot) out_data[0].wait_to_read() _build_dot_custom(f, 'Dot3') a = mx.nd.zeros((2, 1)) b = mx.nd.zeros((1, 2)) c = mx.nd.Custom(a, b, op_type='Dot3') c.wait_to_read() pytest.raises(MXNetError, custom_exc3) def custom_exc4(): def f(in_data, out_data): dot = mx.nd.dot(in_data[0], in_data[1]) # input to Cholesky factorization should be # symmetric positive-definite, error will be # triggered in op execution on cpu out_data[0][:] = mx.nd.linalg.potrf(dot) _build_dot_custom(f, 'Dot4') a = mx.nd.zeros((2, 1)) b = mx.nd.zeros((1, 2)) c = mx.nd.Custom(a, b, op_type='Dot4') c.wait_to_read() pytest.raises(MXNetError, custom_exc4) def test_psroipooling(): for num_rois in [1, 2]: for num_classes, num_group in itertools.product([2, 3], [2, 3]): for image_height, image_width in itertools.product([168, 224], [168, 224]): for grad_nodes in [['im_data']]: spatial_scale = 0.0625 feat_height = np.int(image_height * spatial_scale) feat_width = np.int(image_width * spatial_scale) im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width) rois_data = np.zeros([num_rois, 5]) rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1)) rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1)) im_data_var = mx.symbol.Variable(name="im_data") rois_data_var = mx.symbol.Variable(name="rois_data") op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale, group_size=num_group, pooled_size=num_group, output_dim=num_classes, name='test_op') rtol, atol = 1e-2, 1e-3 check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol, grad_nodes=grad_nodes) def test_psroipooling_with_type(): arg_params = { 'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])} # plain psroipooling sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool') ctx_list = [{'ctx': mx.cpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}}, {'ctx': mx.cpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}}, {'ctx': mx.cpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}}, ] check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write', 'psroipool_rois': 'null'}, arg_params=arg_params) def test_deformable_convolution(): for num_batch in [1, 2]: for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]): for input_height, input_width in itertools.product([5, 6], [5, 6]): for dilate in [(1, 1), (2, 2)]: for grad_nodes in [['im_data'], ['offset_data'], ['weight']]: output_height = input_height output_width = input_width im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width) offset_data = \ np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\ * 0.8 + 0.1 weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3)) bias = np.zeros(num_channel_data) im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray() offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray() weight_var = mx.symbol.Variable(name="weight").as_np_ndarray() bias_var = mx.symbol.Variable(name="bias").as_np_ndarray() op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var, offset=offset_data_var, weight=weight_var, bias=bias_var, num_filter=num_channel_data, pad=dilate, kernel=(3, 3), stride=(1, 1), dilate=dilate, num_deformable_group=num_deformable_group) if grad_nodes[0] == 'offset_data': # wider tolerance needed for coordinate differential rtol, atol = 1.0, 1e-2 else: rtol, atol = 0.05, 1e-3 # By now we only have gpu implementation if default_context().device_type == 'gpu': check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64) def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w): num_rois = input_rois.shape[0] output_offset = input_offset.copy() # simulate deformable psroipooling forward function for roi_idx in range(num_rois): sub_rois = input_rois[roi_idx, :].astype(np.float32) img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4] roi_start_w = round(x0) * spatial_scale - 0.5 roi_start_h = round(y0) * spatial_scale - 0.5 roi_end_w = round(x1 + 1) * spatial_scale - 0.5 roi_end_h = round(y1 + 1) * spatial_scale - 0.5 roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part for c_top in range(output_dim): channel_each_cls = output_dim / num_classes class_id = int(c_top / channel_each_cls) for ph in range(pooled_h): for pw in range(pooled_w): part_h = int(math.floor(float(ph) / pooled_h * part_size)) part_w = int(math.floor(float(pw) / pooled_w * part_size)) trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w need_check = True while need_check: pass_check = True for ih in range(sample_per_part): for iw in range(sample_per_part): h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5: continue w = min(max(w, 0.1), feat_w - 1.1) h = min(max(h, 0.1), feat_h - 1.1) # if the following condiiton holds, the sampling location is not differentiable # therefore we need to re-do the sampling process if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3: trans_x, trans_y = random.random() * trans_std, random.random() * trans_std pass_check = False break if not pass_check: break if pass_check: output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std need_check = False return output_offset @pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713") def test_deformable_psroipooling(): sample_per_part = 4 trans_std = 0.1 for num_rois in [1, 2]: for num_classes, num_group in itertools.product([2, 3], [2, 3]): for image_height, image_width in itertools.product([160, 224], [160, 224]): for grad_nodes in [['im_data'], ['offset_data']]: spatial_scale = 0.0625 stride = int(1 / spatial_scale) feat_height = np.int(image_height * spatial_scale) feat_width = np.int(image_width * spatial_scale) im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width) rois_data = np.zeros([num_rois, 5]) rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group) # at certain points, the bilinear interpolation function may be non-differentiable # to avoid this, we check whether the input locates on the valid points offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group, sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width) im_data_var = mx.symbol.Variable(name="im_data") rois_data_var = mx.symbol.Variable(name="rois_data") offset_data_var = mx.symbol.Variable(name="offset_data") op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var, trans=offset_data_var, spatial_scale=spatial_scale, sample_per_part=4, group_size=num_group, pooled_size=num_group, output_dim=num_classes, trans_std=0.1, no_trans=False, name='test_op') rtol, atol = 1e-2, 1e-3 # By now we only have gpu implementation if default_context().device_type == 'gpu': check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0)) def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None, rtol_bw = None, atol_bw = None, num_eps = None): def np_random_data(shape, dtype=np.float32): return np.random.uniform(low=-0.5, high=0.5, size=shape).astype(dtype) data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') data3 = mx.symbol.Variable('data3') check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) shape1 = (2, 3) shape2 = (3, 2) shape3 = (3, 3) shape4 = (2, 2) data_in1 = np_random_data(shape1, dtype) data_in2 = np_random_data(shape2, dtype) data_in3 = np_random_data(shape3, dtype) data_in4 = np_random_data(shape4, dtype) # Check all transpositions of gemm operator. data_in1_t = np.transpose(data_in1) data_in2_t = np.transpose(data_in2) res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.) check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2, data_in4]) res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., transpose_a=True, transpose_b=True) check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2, data_in3]) res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., transpose_a=True) check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1, data_in3]) res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., transpose_b=True) check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1, data_in4]) # Check batch of gemm. a = rep_3x(data_in1, 2, 3) b = rep_3x(data_in2, 3, 2) c = rep_3x(data_in4, 2, 2) r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4 r = rep_3x(r, 2, 2) test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.) check_fw(test_gemm, [a, b, c], [r]) if grad_check == 1: check_grad(test_gemm, [a, b, c]) # Check for different axis that describes matrix rows. a2 = np.copy(np.swapaxes(a, 0, 2)) b2 = np.copy(np.swapaxes(b, 0, 2)) c2 = np.copy(np.swapaxes(c, 0, 2)) r2 = np.copy(np.swapaxes(r, 0, 2)) test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0) check_fw(test_gemm, [a2, b2, c2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2, c2]) a2 = np.copy(np.swapaxes(a, 1, 2)) b2 = np.copy(np.swapaxes(b, 1, 2)) c2 = np.copy(np.swapaxes(c, 1, 2)) r2 = np.copy(np.swapaxes(r, 1, 2)) test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3) check_fw(test_gemm, [a2, b2, c2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2, c2]) # Check gemm2 operator same way as gemm. res_gemm = 4. * np.dot(data_in1, data_in2) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.) check_fw(test_gemm, [data_in1, data_in2], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2]) res_gemm = 4. * np.dot(data_in1_t, data_in2_t) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True, transpose_b=True) check_fw(test_gemm, [data_in1, data_in2], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2]) res_gemm = 4. * np.dot(data_in1_t, data_in1) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True) check_fw(test_gemm, [data_in1, data_in1], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1]) res_gemm = 4. * np.dot(data_in1, data_in1_t) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True) check_fw(test_gemm, [data_in1, data_in1], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1]) # Check batch of gemm2. a = rep_3x(data_in1, 2, 3) b = rep_3x(data_in2, 3, 2) r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.) check_fw(test_gemm, [a, b], [r]) if grad_check == 1: check_grad(test_gemm, [a, b]) a2 = np.copy(np.swapaxes(a, 0, 2)) b2 = np.copy(np.swapaxes(b, 0, 2)) r2 = np.copy(np.swapaxes(r, 0, 2)) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0) check_fw(test_gemm, [a2, b2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2]) a2 = np.copy(np.swapaxes(a, 1, 2)) b2 = np.copy(np.swapaxes(b, 1, 2)) r2 = np.copy(np.swapaxes(r, 1, 2)) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3) check_fw(test_gemm, [a2, b2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2]) # Test gemm separately from other la-operators. def test_gemm(): _gemm_test_helper(np.float64, True) with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'): _gemm_test_helper(np.float32, True) if default_context().device_type == 'gpu': with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'): _gemm_test_helper(np.float32, True) # Helper functions for test_laop def _make_symm_symbol(a, ndims): assert ndims >= 2 tr_shape = list(range(ndims)) tr_shape[-1] = ndims-2 tr_shape[-2] = ndims-1 tr_shape = tuple(tr_shape) return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape)) def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32): assert ndims >= 2 # The last two dimensions must both be m # Create mask for lower triangle and diagonal index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32) lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype) for j in range(1, m): part1 = mx.sym.zeros(shape=(j, m), dtype=dtype) index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32) part2 = mx.sym.one_hot(index, depth=m, dtype=dtype) lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0) if not lower: lt_mask = mx.sym.reshape(lt_mask, shape=(m, m)) lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0)) shp = tuple([1]*(ndims-2) + [m, m]) lt_mask = mx.sym.reshape(lt_mask, shape=shp) return mx.sym.broadcast_mul(a, lt_mask) # @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced # tracked at https://github.com/apache/incubator-mxnet/issues/11718 @xfail_when_nonstandard_decimal_separator def test_laop(): dtype = np.float64 rtol_fw = 1e-7 atol_fw = 1e-9 num_eps = 2e-6 rtol_bw = 1e-5 atol_bw = 1e-5 # enable numerical checking of gradients grad_check = 1 data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) def check_fw_grad(sym, location, expected): check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) if grad_check == 1: check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) matrix = np.array([[9., 3., -6., 12.], [3., 26., -7., -11.], [-6., -7., 9., 7.], [12., -11., 7., 65.]]) trian = np.array([[3., 0., 0., 0.], [1., 5., 0., 0.], [-2., -1., 2., 0.], [4., -3., 6., 2.]]) pow = np.array([[2., 1., 1., 1.], [1., 4., 1., 1.], [1., 1., 8., 1.], [1., 1., 1., 16.]]) inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.], [0.05/3., 0.05, 0.05, 0.], [2.65, 0.05, 2.5, -0.75], [-2.5/3., 0., -0.75, 0.25]]) ident = np.eye(4) shape = (4, 4, 1, 1) ones = mx.nd.ones(shape).asnumpy() for lower in [True, False]: upper = not lower # Tests with trivial 1x1 matrices. data_in = np.random.uniform(1, 10, shape) # test potrf # Note: Have to symmetrize input, for gradient test to work res_potrf = np.sqrt(data_in) test_potrf = mx.sym.linalg.potrf(data1, lower=lower) check_fw_grad(test_potrf, [data_in], [res_potrf]) # test potri res_potri = np.divide(ones, data_in * data_in) test_potri = mx.sym.linalg.potri(data1, lower=lower) check_fw_grad(test_potri, [data_in], [res_potri]) # test trsm trian_in = data_in * 7. test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower) check_fw_grad(test_trsm, [trian_in, data_in], [ones]) # test trmm trian_in = np.divide(ones, trian_in) test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True, rightside=True, lower=lower) check_fw_grad(test_trmm, [trian_in, data_in], [ones]) # test sumlogdiag res_sumlogdiag = np.reshape(np.log(data_in), (4, 4)) test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1) check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag]) # more elaborate example of Cholesky factorization low_trian = trian if upper: trian = np.transpose(trian) # test potrf test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower) a = rep_3x(matrix, 4, 4) r = rep_3x(trian, 4, 4) check_fw_grad(test_potrf, [a], [r]) #test potri data1_ltri = _make_triangle_symm( data1, ndims=4, m=4, lower=lower, dtype=dtype) test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower) a = rep_3x(trian, 4, 4) r = rep_3x(inv, 4, 4) check_fw_grad(test_potri, [a], [r]) # test trsm test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower) b = rep_3x(matrix, 4, 4) r = rep_3x(7. * np.transpose(low_trian), 4, 4) check_fw_grad(test_trsm, [a, b], [r]) test_trsm2 = mx.sym.linalg.trsm( data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower) r = rep_3x(-2. * low_trian, 4, 4) check_fw_grad(test_trsm2, [a, b], [r]) test_trsm3 = mx.sym.linalg.trsm( data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower) b = rep_3x(np.transpose(low_trian), 4, 4) r = rep_3x(0.5 * ident, 4, 4) check_fw_grad(test_trsm3, [a, b], [r]) test_trsm4 = mx.sym.linalg.trsm( data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower) b = rep_3x(low_trian, 4, 4) r = rep_3x(-0.5 * ident, 4, 4) check_fw_grad(test_trsm4, [a, b], [r]) # test trmm test_trmm = mx.sym.linalg.trmm( data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower) a = [a, rep_3x(matrix, 4, 4)] r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4) check_fw_grad(test_trmm, a, [r]) test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower) r = rep_3x(-2. * np.dot(trian, matrix), 4, 4) check_fw_grad(test_trmm2, a, [r]) test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower) r = rep_3x(np.dot(matrix, trian), 4, 4) check_fw_grad(test_trmm3, a, [r]) test_trmm4 = mx.sym.linalg.trmm( data1_ltri, data2, alpha=1.2, transpose=True, lower=lower) r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4) check_fw_grad(test_trmm4, a, [r]) # test sumlogdiag r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,)) check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r]) # Tests for operators linalg.syrk, linalg.gelqf def _gelqf_combined_symbol(a): q, l = mx.sym.linalg.gelqf(a) q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt') l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q') return mx.sym.Group([q_qt, l_q]) # NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the # backward gradient for the unused output is of dtype np.float32 then. # ==> Very annoying! def _gelqf_first_output(a): q, l = mx.sym.linalg.gelqf(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(q, bogus_scal) def _gelqf_second_output(a): q, l = mx.sym.linalg.gelqf(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(l, bogus_scal) def _syevd_combined_symbol(a): u, lam = mx.sym.linalg.syevd(a) u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut') lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u) ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True, transpose_b=False, name='Ut_L_U') return mx.sym.Group([u_ut, ut_lam_u]) def test_laop_2(): dtype = np.float64 rtol_fw = 1e-7 atol_fw = 1e-9 num_eps = 1e-6 rtol_bw = 1e-5 atol_bw = 1e-6 # enable numerical checking of gradients grad_check = 1 data1 = mx.symbol.Variable('data1') check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) # Tests for linalg.syrk mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)] for m, n, alpha in mnalpha_lst: #print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha)) data_in1 = np.random.uniform(1, 10, (m, n)) res_syrk1 = alpha * np.dot(data_in1, data_in1.T) test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha) check_fw(test_syrk1, [data_in1], [res_syrk1]) if grad_check == 1: check_grad(test_syrk1, [data_in1]) res_syrk2 = alpha * np.dot(data_in1.T, data_in1) test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha) check_fw(test_syrk2, [data_in1], [res_syrk2]) if grad_check == 1: check_grad(test_syrk2, [data_in1]) # Batch mode (3x the same thing) a_batch = rep_3x(data_in1, m, n) r1_batch = rep_3x(res_syrk1, m, m) check_fw(test_syrk1, [a_batch], [r1_batch]) if grad_check == 1: check_grad(test_syrk1, [a_batch]) r2_batch = rep_3x(res_syrk2, n, n) check_fw(test_syrk2, [a_batch], [r2_batch]) if grad_check == 1: check_grad(test_syrk2, [a_batch]) # Tests for linalg.gelqf # Currently disabled on GPU as they need cuda8 # and MxNet builds use cuda 7.5 if not (default_context() == mx.cpu()): return test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q)) test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling) test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling) mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)] for m, n in mn_lst: #print('gelqf: m={}, n={}'.format(m, n)) data_in1 = np.random.normal(0., 10., (m, n)) res_eye = np.eye(m) res_a = data_in1 check_fw(test_gelqf2, [data_in1], [res_eye, res_a]) if grad_check == 1: # A => Q check_grad(test_gelqf_q, [data_in1]) # A => L check_grad(test_gelqf_l, [data_in1]) # Batch mode (3x the same thing) a_batch = rep_3x(data_in1, m, n) reye_batch = rep_3x(res_eye, m, m) ra_batch = a_batch check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch]) if grad_check == 1: # A => Q check_grad(test_gelqf_q, [a_batch]) # A => L check_grad(test_gelqf_l, [a_batch]) # Tests for operator linalg.syevd def _syevd_first_output(a): u, lam = mx.sym.linalg.syevd(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(u, bogus_scal) def _syevd_second_output(a): u, lam = mx.sym.linalg.syevd(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(lam, bogus_scal) def _syevd_forward(a): lam, ut = np.linalg.eig(a) ind = np.argsort(lam) lam = lam[ind] u = ut[:, ind].T for i in range(0, a.shape[0]): _syevd_forw_eigvec_sign(u[i]) return u, lam def _syevd_forw_eigvec_sign(v): ind = np.argmax(np.abs(v)) if v[ind] < 0.: v[:] = -v def _syevd_backward(grad_u, grad_l, u, l): n = l.size assert grad_l.size == n assert grad_u.shape == (n, n) assert u.shape == (n, n) temp = np.dot(grad_u, u.T) temp2 = np.diag(grad_l) for i in range(1, n): for j in range(0, i): denom = 2. * (l[i] - l[j]) elem = (temp[i, j] - temp[j, i])/denom temp2[i, j] = elem temp2[j, i] = elem temp3 = np.dot(u.T, temp2) return np.dot(temp3, u) # Seed set because the test is not robust enough to operate on random data @pytest.mark.seed(1896893923) def test_laop_3(): # Currently disabled on GPU as syevd needs cuda8 # and MxNet builds use cuda 7.5 if not (default_context() == mx.cpu()): return dtype = np.float64 rtol_fw = 1e-6 atol_fw = 1e-6 num_eps = 1e-4 rtol_bw = 1e-2 atol_bw = 1e-2 # enable numerical checking of gradients grad_check = 1 data1 = mx.symbol.Variable('data1') check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) check_bw = lambda sym, location, out_grads, expected :\ check_symbolic_backward(sym, location, out_grads, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) # Tests for linalg.syevd test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U) data1_s2 = _make_symm_symbol(data1, ndims=2) test_syevd_u_2 = _syevd_first_output(data1_s2) test_syevd_l_2 = _syevd_second_output(data1_s2) data1_s4 = _make_symm_symbol(data1, ndims=4) test_syevd_u_4 = _syevd_first_output(data1_s4) test_syevd_l_4 = _syevd_second_output(data1_s4) n_lst = [4, 1, 2, 10, 14] for n in n_lst: #print('\n** syevd: n={}'.format(n)) data_in1 = np.random.normal(0., 10., (n, n)) data_in1 = 0.5 * (data_in1 + data_in1.T) res_eye = np.eye(n) res_a = data_in1 check_fw(test_syevd2, [data_in1], [res_eye, res_a]) # Check backward grad_u = np.random.normal(0., 2., (n, n)) grad_l = np.random.normal(0., 2., (n,)) bw_u, bw_l = _syevd_forward(data_in1) grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l) check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a]) if grad_check == 1: # A => U check_grad(test_syevd_u_2, [data_in1]) # A => L check_grad(test_syevd_l_2, [data_in1]) # Batch mode (3x the same thing) a_batch = rep_3x(data_in1, n, n) reye_batch = rep_3x(res_eye, n, n) ra_batch = a_batch check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch]) if grad_check == 1: # A => U check_grad(test_syevd_u_4, [a_batch]) # A => L check_grad(test_syevd_l_4, [a_batch]) # @piyushghai - Removing the fixed seed for this test. # Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721 def test_laop_4(): # Currently disabled on GPU as syevd needs cuda8 # and MxNet builds use cuda 7.5 if not (default_context() == mx.cpu()): return rtol_fw = 1e-6 atol_fw = 1e-6 data1 = mx.symbol.Variable('data1') check_fw = lambda sym, location, expected, dtype :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) a_np = np.array([[1., 2.], [2., 4.]]) u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]]) l_np = np.array([0., 5.]) test_syevd = mx.sym.linalg.syevd(data1) # float64 #print('float64') check_fw(test_syevd, [a_np], [u_np, l_np], np.float64) # float32 #print('float32') check_fw(test_syevd, [a_np], [u_np, l_np], np.float32) def test_laop_5(): # tests for diagonal and triangular matrix extraction and generation data = mx.symbol.Variable('data') # test complete range of small matrices to cover corner cases for n in range(1, 5): # test batched and non-batched processing for b in range(3): shape = (n, n) if b == 0 else (b, n, n) data_in = np.random.uniform(1, 10, shape) # test all legal offsets of the diagonal for offs in range(1-n, n): # test extraction of diagonal test_diag = mx.sym.linalg.extractdiag(data, offset=offs) res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs) check_symbolic_forward(test_diag, [data_in], [res_diag]) check_numeric_gradient(test_diag, [data_in]) # test generation of diagonal matrix test_diag2 = mx.sym.linalg.makediag(data, offset=offs) res_diag2 = None if b == 0: res_diag2 = np.diagflat(res_diag, k=offs) else: for i in range(b): res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n)) res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0) check_symbolic_forward(test_diag2, [res_diag], [res_diag2]) check_numeric_gradient(test_diag2, [res_diag]) # check both settings for parameter "lower" in case of zero offset lower_vals = [True] if offs != 0 else [True, False] for lower in lower_vals: # test extraction of triangle by doing a full roundtrip as the intermediate extracted # triangle has different orderings than numpy. test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower) test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower) extracts_lower = (offs < 0) or ((offs == 0) and lower) res_trian = None if b == 0: res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs) else: for i in range(b): res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs) res = np.reshape(res, (1, n, n)) res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0) check_symbolic_forward(test_trian, [data_in], [res_trian]) check_numeric_gradient(test_trian, [data_in]) # Tests for linalg.inverse @pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975") def test_laop_6(): dtype = np.float64 rtol_fw = 1e-7 atol_fw = 1e-9 num_eps = 1e-6 rtol_bw = 1e-4 atol_bw = 1e-6 data = mx.symbol.Variable('data') check_fw = lambda sym, location, expected:\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) ## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible; ## det is away from zero, so the value of logdet is stable v = np.random.random(4) a = np.eye(4) + np.outer(v, v) a = np.tile(a, (3, 1, 1)) permute_mat = np.eye(4)[[1, 0, 2, 3]] # test matrix inverse r = np.eye(4) r = np.tile(r, (3, 1, 1)) test_inverse = mx.sym.linalg.inverse(data) test_eye = mx.sym.linalg.gemm2(data, test_inverse) check_fw(test_eye, [a], [r]) check_grad(test_inverse, [a]) # test matrix determinant # det r = np.linalg.det(a) test_det = mx.sym.linalg.det(data) check_fw(test_det, [a], [r]) check_grad(test_det, [a]) # test slogdet r1 = np.array([1., 1., 1.]) r2 = np.log(np.abs(np.linalg.det(a))) test_sign, test_logabsdet = mx.sym.linalg.slogdet(data) check_fw(test_sign, [a], [r1]) check_fw(test_sign, [np.dot(a, permute_mat)], [-r1]) check_fw(test_logabsdet, [a], [r2]) check_grad(test_logabsdet, [a]) def test_stack(): for _ in range(100): ndim = random.randint(1, 5) axis = random.randint(0, ndim) if random.randint(0, 1): axis = axis - ndim - 1 nin = random.randint(1, 3) dshape = [random.randint(1, 5) for _ in range(ndim)] inputs = [np.random.uniform(size=dshape) for _ in range(nin)] output = np.stack(inputs, axis=axis) sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)] out = mx.sym.stack(*sym_ins, axis=axis) check_symbolic_forward(out, inputs, [output]) check_numeric_gradient(out, inputs) ## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed. ## tracked at https://github.com/apache/incubator-mxnet/issues/14288 def test_dropout(): def zero_count(array, ratio): zeros = 0 for i in array: if i == 0: zeros += 1 elif math.isnan(i): assert ratio == 1 # Only valid for ratio = 1 zeros += 1 return zeros def check_correctness(executor, input, ratio): input = input.ravel() output = executor.outputs[0].asnumpy().ravel() input_sum = np.sum(input) output_sum = np.sum(output) # Make sure input zeroes are none (test data setup check) assert zero_count(input, ratio) == 0 # count number of zeroes in output output_zeroes = zero_count(output, ratio) # Hopefully should be within ratio/2 % error = abs(output_sum - input_sum) / input_sum if ratio == 1.0: assert output_zeroes == len(input) elif ratio > 0.2: assert output_zeroes > 0 assert error < (ratio/2) elif ratio == 0: assert output_zeroes == 0 def check_dropout_ratio(ratio, shape, cudnn_off=True): # test dropout x = mx.sym.var('data') y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off) exe = y._simple_bind(ctx=default_context(), data=shape) if ratio == 1: max_value = float('nan') else: max_value = 1 if ratio == 0 else 1/ratio if ratio == 1: min_value = float('nan') else: min_value = 1 if ratio == 0 else 0 exe.arg_arrays[0][:] = 1 exe.forward(is_train=True) if not math.isnan(max_value): assert exe.outputs[0].asnumpy().max() > 0 else: assert math.isnan(exe.outputs[0].asnumpy().max()) if not math.isnan(min_value): assert exe.outputs[0].asnumpy().min() == min_value else: assert math.isnan(exe.outputs[0].asnumpy().min()) check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio) if ratio == 0.5: exe.backward([mx.nd.ones(shape)]) assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all() exe.forward(is_train=False) assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all() exe.backward([mx.nd.ones(shape)]) assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all() # test permanent dropout x = mx.sym.var('data') y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off) exe = y._simple_bind(ctx=default_context(), data=shape) exe.arg_arrays[0][:] = 1 exe.forward(is_train=True) assert exe.outputs[0].asnumpy().max() == max_value assert exe.outputs[0].asnumpy().min() == min_value exe.backward([mx.nd.ones(shape)]) assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all() exe.forward(is_train=False) assert exe.outputs[0].asnumpy().max() == max_value assert exe.outputs[0].asnumpy().min() == min_value exe.backward([mx.nd.ones(shape)]) assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all() def get_slice(x, axis, idx): ix = () for i in range(x.ndim): if i == axis: ix += (idx,) else: ix += (slice(None, None, None),) return x[ix] def check_dropout_axes(ratio, shape, axes, cudnn_off=True): compactshape = list(shape) for axis in axes: compactshape[axis] = 1 compactx = mx.random.uniform(shape=tuple(compactshape)) broadcastx = compactx.broadcast_to(shape) dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off) for axis in axes: target = get_slice(dropouty, axis, 0).asnumpy() for i in range(1, shape[axis]): assert(get_slice(dropouty, axis, i).asnumpy() == target).all() def check_passthrough(ratio, shape, cudnn_off=True): # test inference_mode forward and then backward a = mx.random.uniform(shape=shape) a.attach_grad() with mx.autograd.record(train_mode=False): b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity b.backward() assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy()) shape = (100, 100) check_dropout_ratio(0.5, shape) check_dropout_ratio(0.0, shape) check_dropout_ratio(1.0, shape) check_dropout_ratio(0.75, shape) check_dropout_ratio(0.25, shape) # check_dropout_ratio(0.5, shape, cudnn_off=False) # check_dropout_ratio(0.0, shape, cudnn_off=False) # check_dropout_ratio(1.0, shape, cudnn_off=False) # check_dropout_ratio(0.75, shape, cudnn_off=False)<|fim▁hole|> check_passthrough(1.0, shape) # check_passthrough(0.5, shape, cudnn_off=False) # check_passthrough(0.0, shape, cudnn_off=False) # check_passthrough(1.0, shape, cudnn_off=False) nshape = (10, 10, 10, 10) with mx.autograd.train_mode(): check_dropout_axes(0.25, nshape, axes = (0,)) check_dropout_axes(0.25, nshape, axes = (1,)) check_dropout_axes(0.25, nshape, axes = (2,)) check_dropout_axes(0.25, nshape, axes = (3,)) check_dropout_axes(0.25, nshape, axes = (0, 1)) check_dropout_axes(0.25, nshape, axes = (0, 2)) check_dropout_axes(0.25, nshape, axes = (0, 3)) check_dropout_axes(0.25, nshape, axes = (1, 2)) check_dropout_axes(0.25, nshape, axes = (1, 3)) check_dropout_axes(0.25, nshape, axes = (2, 3)) check_dropout_axes(0.25, nshape, axes = (0, 1, 2)) check_dropout_axes(0.25, nshape, axes = (0, 2, 3)) check_dropout_axes(0.25, nshape, axes = (1, 2, 3)) # check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False) # check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False) @pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290") def test_scatter_gather_nd(): def check(data, idx): data.attach_grad() with mx.autograd.record(): y = mx.nd.gather_nd(data, idx) y.backward(y) npidx = tuple(i.asnumpy() for i in idx) assert (data.asnumpy()[npidx] == y.asnumpy()).all() npdata = np.zeros_like(data.asnumpy()) npdata[npidx] = y.asnumpy() assert (npdata == data.grad.asnumpy()).all() assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all() for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']: data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6)) idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32') check(data, idx) idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32') check(data, idx) data = mx.nd.array([2, 3, 0], dtype=dtype) idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32') assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all() data = mx.nd.array([2, 3, 0], dtype=dtype) idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32') assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all() data_npy = np.random.randint(0, 10, (100,)) data = mx.nd.array(data_npy, dtype=dtype) idx = mx.nd.zeros(shape=(1, 100), dtype='int32') assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum()) if dtype == 'int64': data = mx.nd.array([2123162361283621, -31231236374787, -112372937128970, -1378278798172378], dtype=dtype) idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32') assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum()) def test_gather_nd_check_bound(): def _test_gather_nd_exception(data, indices): output = mx.nd.gather_nd(data, indices).asnumpy() # check if indices is out of bound data = mx.nd.array([[0, 1, 2], [3, 4, 5]]) indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]]) indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]]) assertRaises(IndexError, _test_gather_nd_exception, data, indices1) # IndexError: index 3 is out of bounds for axis 1 with size 3 assertRaises(IndexError, _test_gather_nd_exception, data, indices2) # IndexError: index -5 is out of bounds for axis 1 with size 3 # check if the negative indices are wrapped correctly indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]]) indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]]) data1 = mx.nd.gather_nd(data, indices1) data2 = mx.nd.gather_nd(data, indices2) assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5) def compare_forw_backw_unary_op( name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, shape, input_low, input_high, rtol, atol, dtype=np.float32): check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol, atol=atol, dtype=dtype) check_bw = lambda sym, location, out_grads, expected :\ check_symbolic_backward(sym, location, out_grads, expected, rtol=rtol, atol=atol, dtype=dtype) op_name = 'unary_op={}, dtype={}'.format(name, dtype) data = mx.symbol.Variable(op_name + '_data', dtype=dtype) # Comparison: Forward expression data_np = np.random.uniform(input_low, input_high, shape).astype(dtype) res_np = forward_numpy_call(data_np) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data), mx.sym.zeros_like(data), name=op_name) check_fw(op_ex, [data_np], [res_np]) # Comparison: Backward expression res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype) data_grad = backward_numpy_call(data_np) * res_grad check_bw(op_ex, [data_np], [res_grad], [data_grad]) def finite_diff_unary_op( name, forward_mxnet_call, shape, input_low, input_high, rtol, atol, num_eps): # Finite difference tests are done in float64 dtype = np.float64 check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol, atol=atol, dtype=dtype) data_np = np.random.uniform(input_low, input_high, shape).astype(dtype) data = mx.symbol.Variable('data', dtype=dtype) op_name = 'unary_op={}, dtype={}'.format(name, dtype) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data), mx.sym.zeros_like(data), name=op_name) check_grad(op_ex, [data_np]) def np_smooth_l1(x, sigma): issq = 1. / sigma / sigma absx = np.abs(x) temp = x * sigma return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq) def np_smooth_l1_grad(x, sigma): ssq = sigma * sigma return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x)) # Tests for unary operators (basic mathematical functions): # - Forward: Comparison to NumPy (several dtype) # - Backward: Comparison to NumPy (several dtype) # - Finite difference tests (only dtype = float64) # Seed set because the test is not robust enough to operate on random data @pytest.mark.seed(192837465) def test_unary_math_operators(): have_scipy = True try: from scipy import special as scipy_special except: print("Could not import scipy. Skipping unit tests for special functions") have_scipy = False shape=(9, 10) dtype_l = [np.float64, np.float32, np.float16] rtol_l = [1e-7, 1e-6, 1e-2] rtol_less_l = [1e-6, 1e-5, 1e-2] atol_l = [1e-7, 1e-6, 1e-2] atol_less_l = [1e-6, 1e-5, 1e-2] rtol_fd = 1e-5 atol_fd = 1e-6 num_eps = 1e-6 unary_ops = { 'arccos' : [lambda x: mx.sym.arccos(x), lambda x: np.arccos(x), lambda x: -1. / np.sqrt(1. - x ** 2.), -0.95, 0.95], 'arccosh': [lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x), lambda x: 1. / np.sqrt(x ** 2 - 1.), 1.05, 10.0], 'arcsin': [lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x), lambda x: 1. / np.sqrt(1. - x ** 2), -0.95, 0.95], 'arcsinh': [lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x), lambda x: 1. / np.sqrt(x**2 + 1.), -5.0, 5.0], 'arctan': [lambda x: mx.sym.arctan(x), lambda x: np.arctan(x), lambda x: 1. / (x ** 2. + 1.), -5.0, 5.0], 'arctanh': [lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x), lambda x: 1. / (1. - x ** 2), -0.95, 0.95], 'cbrt': [lambda x: mx.sym.cbrt(x), lambda x: np.cbrt(x), lambda x: 1. / (3. * np.cbrt(x) ** 2), -10.0, 10.0], 'cos': [lambda x: mx.sym.cos(x), lambda x: np.cos(x), lambda x: -np.sin(x), -5.0, 5.0], 'cosh': [lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), -2.0, 2.0], 'exp': [lambda x: mx.sym.exp(x), lambda x: np.exp(x), lambda x: np.exp(x), -4.0, 4.0], 'expm1': [lambda x: mx.sym.expm1(x), lambda x: np.expm1(x), lambda x: np.exp(x), -0.1, 0.1], 'log': [lambda x: mx.sym.log(x), lambda x: np.log(x), lambda x: 1. / x, 0.01, 100.0], 'log10': [lambda x: mx.sym.log10(x), lambda x: np.log10(x), lambda x: 1. / (x * np.log(10.)), 0.01, 100.0], 'log2': [lambda x: mx.sym.log2(x), lambda x: np.log2(x), lambda x: 1. / (x * np.log(2.)), 0.01, 100.0], 'log1p': [lambda x: mx.sym.log1p(x), lambda x: np.log1p(x), lambda x: 1. / (1. + x), -0.1, 0.1], 'rcbrt': [lambda x: mx.sym.rcbrt(x), lambda x: 1. / np.cbrt(x), lambda x: -1. / (3. * x * np.cbrt(x)), 0.01, 100.0], 'reciprocal': [lambda x: mx.sym.reciprocal(x), lambda x: 1. / x, lambda x: -1. / (x ** 2), 0.01, 100.0], 'relu': [lambda x: mx.sym.relu(x), lambda x: np.maximum(x, 0.), lambda x: 1. * (x > 0.), -5.0, 5.0], 'rsqrt': [lambda x: mx.sym.rsqrt(x), lambda x: 1. / np.sqrt(x), lambda x: -0.5 / (x * np.sqrt(x)), 0.01, 100.0], 'sigmoid': [lambda x: mx.sym.sigmoid(x), lambda x: 1. / (np.exp(-x) + 1.), lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.), -3.0, 3.0], 'softsign': [lambda x: mx.sym.softsign(x), lambda x: x / (1. + np.abs(x)), lambda x: 1. / np.square(1. + np.abs(x)), -3.0, 3.0], 'sin': [lambda x: mx.sym.sin(x), lambda x: np.sin(x), lambda x: np.cos(x), -5.0, 5.0], 'sinh': [lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x), -2.0, 2.0], 'sqrt': [lambda x: mx.sym.sqrt(x), lambda x: np.sqrt(x), lambda x: 0.5 / np.sqrt(x), 0.01, 100.0], 'tan': [lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1., -1.5, 1.5], 'tanh': [lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, -4.0, 4.0], 'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.), lambda x: np_smooth_l1(x, 1.), lambda x: np_smooth_l1_grad(x, 1.), -2.0, 2.0], 'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x), lambda x: np_smooth_l1(x, 1.), lambda x: np_smooth_l1_grad(x, 1.), -2.0, 2.0], 'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.), lambda x: np_smooth_l1(x, 2.), lambda x: np_smooth_l1_grad(x, 2.), -1.0, 1.0] } if have_scipy: unary_ops['gamma'] = [lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x), lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.01, 5.0] unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x), lambda x: scipy_special.psi(x), 0.01, 20.0] # Loop over operators for name, op in unary_ops.items(): # Loop over dtype's for ind in range(len(dtype_l)): dtype = dtype_l[ind] if name == 'gammaln' or name == 'gamma': rtol = rtol_less_l[ind] atol = atol_less_l[ind] else: rtol = rtol_l[ind] atol = atol_l[ind] compare_forw_backw_unary_op( name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol, dtype) # Finite difference testing finite_diff_unary_op( name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps) def compare_forw_backw_binary_op( name, forward_mxnet_call, forward_numpy_call, backward1_numpy_call, backward2_numpy_call, shape, input1_low, input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32): check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol, atol=atol, dtype=dtype) check_bw = lambda sym, location, out_grads, expected :\ check_symbolic_backward(sym, location, out_grads, expected, rtol=rtol, atol=atol, dtype=dtype) op_name = 'binary_op={}, dtype={}'.format(name, dtype) data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype) data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype) # Comparison: Forward expression data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype) data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype) res_np = forward_numpy_call(data1_np, data2_np) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1), name=op_name) check_fw(op_ex, [data1_np, data2_np], [res_np]) # Comparison: Backward expression res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype) data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad]) def finite_diff_binary_op( name, forward_mxnet_call, shape, input1_low, input1_high, input2_low, input2_high, rtol, atol, num_eps): # Finite difference tests are done in float64 dtype = np.float64 check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol, atol=atol, dtype=dtype) data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype) data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype) data1 = mx.symbol.Variable('data1', dtype=dtype) data2 = mx.symbol.Variable('data2', dtype=dtype) op_name = 'binary_op={}, dtype={}'.format(name, dtype) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1), name=op_name) check_grad(op_ex, [data1_np, data2_np]) # Tests for unary operators (basic mathematical functions): # - Forward: Comparison to NumPy (several dtype) # - Backward: Comparison to NumPy (several dtype) # - Finite difference tests (only dtype = float64) def test_binary_math_operators(): shape=(9, 10) dtype_l = [np.float64, np.float32, np.float16] rtol_l = [1e-7, 1e-6, 1e-2] atol_l = [1e-7, 1e-6, 1e-2] rtol_fd = 1e-5 atol_fd = 1e-6 num_eps = 1e-6 binary_ops = { 'hypot' : [lambda x, y: mx.sym.hypot(x, y), lambda x, y: np.hypot(x, y), lambda x, y: x / np.hypot(x, y), lambda x, y: y / np.hypot(x, y), -5.0, 5.0, -5.0, 5.0], 'pow': [lambda x, y: mx.sym.pow(x, y), lambda x, y: np.power(x, y), lambda x, y: np.power(x, y - 1.) * y, lambda x, y: np.power(x, y) * np.log(x), 0.2, 5.0, -4.0, 4.0], 'power': [lambda x, y: mx.sym.power(x, y), lambda x, y: np.power(x, y), lambda x, y: np.power(x, y - 1.) * y, lambda x, y: np.power(x, y) * np.log(x), 0.2, 5.0, -4.0, 4.0] } # Loop over operators for name, op in binary_ops.items(): # Loop over dtype's for ind in range(len(dtype_l)): dtype = dtype_l[ind] compare_forw_backw_binary_op( name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6], op[7], rtol_l[ind], atol_l[ind], dtype) # Finite difference testing finite_diff_binary_op( name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd, num_eps) @pytest.mark.serial def test_slice(): def test_slice_forward_backward(a, index): a_np = a.asnumpy() begin = [] end = [] step = [] for slice_i in index: begin.append(slice_i.start) end.append(slice_i.stop) step.append(slice_i.step) b = mx.nd.slice(a, begin=begin, end=end, step=step) b_np = a_np[index] assert same(b.asnumpy(), b_np) data = mx.sym.Variable('data') slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step) expected_in_grad = np.zeros_like(a_np) expected_in_grad[index] = b_np check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad]) shape = (16, 14, 17, 20) arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape) index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)), (slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)), (slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)), (slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)), (slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))] for index in index_list: test_slice_forward_backward(arr, index) # check numeric gradient in_data = np.arange(36).reshape(2, 2, 3, 3) data = mx.sym.Variable('data') slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1]) check_numeric_gradient(slice_sym, [in_data]) def test_slice_partial_infer(): def check_slice_partial_infer(data, begin, end, step, expected_out_shape): out = mx.sym.slice(data, begin=begin, end=end, step=step) assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1] def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape): out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end) assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1] var1 = mx.sym.var(name="data", shape=(0, 20)) check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10)) check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5)) check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7)) check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3)) var1 = mx.sym.var(name="data", shape=(10, 0)) check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0)) check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0)) with mx.np_shape(): var1 = mx.sym.var(name="data", shape=(-1, 20)) check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10)) check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5)) check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7)) check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3)) var1 = mx.sym.var(name='data', shape=(10, -1)) check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1)) check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1)) def test_float16_min_max(): """Test for issue: https://github.com/apache/incubator-mxnet/issues/9007""" a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16') assert a.dtype == np.float16 assert np.finfo('float16').min == mx.nd.min(a).asscalar() assert np.finfo('float16').max == mx.nd.max(a).asscalar() @mx.use_np_shape def test_zero_size_min_max(): def min(): a = mx.nd.zeros(shape=(5, 0)) a.min() def max(): a = mx.nd.zeros(shape=(5, 0)) a.max() pytest.raises(MXNetError, min) pytest.raises(MXNetError, max) def test_squeeze_op(): def check_squeeze_op(shape, axis=None): data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape) if axis is None: out = mx.nd.squeeze(data).asnumpy() out_expected = np.squeeze(data.asnumpy()) else: out = mx.nd.squeeze(data, axis=axis).asnumpy() out_expected = np.squeeze(data.asnumpy(), axis=axis) if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,) out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))])) assert same(out, out_expected) # check forward check_squeeze_op((1, 5, 1, 3, 1), 0) check_squeeze_op((1, 5, 1, 3, 1), 2) check_squeeze_op((1, 5, 1, 3, 1), 4) check_squeeze_op((1, 5, 1, 3, 1), (0, 4)) check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4)) check_squeeze_op((1, 5, 1, 3, 1)) check_squeeze_op((1, 1, 1, 1)) # check gradient data = mx.symbol.Variable('data') shape = (1, 2, 1, 3, 1) data_tmp = np.ones(shape) test = mx.sym.squeeze(data) check_numeric_gradient(test, [data_tmp]) test = mx.sym.squeeze(data, axis=2) check_numeric_gradient(test, [data_tmp]) test = mx.sym.squeeze(data, axis=(2, 4)) check_numeric_gradient(test, [data_tmp]) @pytest.mark.serial def test_adaptive_avg_pool_op(): def py_adaptive_avg_pool(x, height, width): # 2D per frame adaptive avg pool def adaptive_avg_pool_frame(x, y): isizeH, isizeW = x.shape osizeH, osizeW = y.shape for oh in range(osizeH): istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH)) iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH)) kH = iendH - istartH for ow in range(osizeW): istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW)) iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW)) kW = iendW - istartW xsum = 0 for ih in range(kH): for iw in range(kW): xsum += x[istartH+ih][istartW+iw] y[oh][ow] = xsum / kH / kW B,C,_,_ = x.shape y = np.empty([B,C,height, width], dtype=x.dtype) for b in range(B): for c in range(C): adaptive_avg_pool_frame(x[b][c], y[b][c]) return y def check_adaptive_avg_pool_op(shape, output_height, output_width=None): x = mx.nd.random.uniform(shape=shape) if output_width is None: y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height) npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height) else: y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width)) npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width) assert_almost_equal(y.asnumpy(), npy) shape = (2, 2, 10, 10) for i in range(1, 11): check_adaptive_avg_pool_op(shape, i) for j in range(1, 11): check_adaptive_avg_pool_op(shape, i, j) def test_bilinear_resize_op(): def py_bilinear_resize(x, outputHeight, outputWidth): batch, channel, inputHeight, inputWidth = x.shape if outputHeight == inputHeight and outputWidth == inputWidth: return x y = np.empty([batch, channel, outputHeight, outputWidth]) rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0 rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0 for h2 in range(outputHeight): h1r = 1.0 * h2 * rheight h1 = int(np.floor(h1r)) h1lambda = h1r - h1 h1p = 1 if h1 < (inputHeight - 1) else 0 for w2 in range(outputWidth): w1r = 1.0 * w2 * rwidth w1 = int(np.floor(w1r)) w1lambda = w1r - w1 w1p = 1 if w1 < (inputWidth - 1) else 0 for b in range(batch): for c in range(channel): y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \ w1lambda*x[b][c][h1][w1+w1p]) + \ h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \ w1lambda*x[b][c][h1+h1p][w1+w1p]) return y def py_bilinear_resize_backward(x, incoming_grads, mode='size'): data1 = np.zeros_like(x) data2 = incoming_grads batchsize = data1.shape[0] channels = data1.shape[1] height1 = data1.shape[2] width1 = data1.shape[3] height2 = data2.shape[2] width2 = data2.shape[3] rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0 rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0 # special case: just copy if height1 == height2 and width1 == width2: data1 += data2 return [data1] for h2 in range(0, height2): for w2 in range(0, width2): h1r = rheight * h2 h1 = int(h1r) h1p = 1 if (h1 < height1 - 1) else 0 h1lambda = h1r - h1 h0lambda = 1 - h1lambda # w1r = rwidth * w2 w1 = int(w1r) w1p = 1 if (w1 < width1 - 1) else 0 w1lambda = w1r - w1 w0lambda = 1 - w1lambda # for n in range(0, batchsize): for c in range(0, channels): d2val = data2[n][c][h2][w2] data1[n][c][h1][w1] += h0lambda * w0lambda * d2val data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val if mode == 'like': return data1, np.zeros_like(incoming_grads) return [data1] def check_bilinear_resize_op(shape, height, width): x = mx.nd.random.uniform(shape=shape) y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width) assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width)) x_scale = width / shape[-1] y_scale = height / shape[-2] y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale) assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width)) def check_bilinear_resize_align_corners_op(): img_shape = [1, 1, 3, 2] data = [64, 32, 32, 64, 50, 100] target_height = 6 target_width = 4 expected_data = {} # align_corners = False expected_data[0] = [ 64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000, 36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000 ] # align_corners = True expected_data[1] = [ 64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600, 35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000 ] x = np.array(data, dtype=np.float32).reshape(img_shape) x_nd = mx.nd.array(x) y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width)) y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False) assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3) y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width)) y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True) assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3) def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None): x = mx.nd.random.uniform(shape=shape) original_h = shape[2] original_w = shape[3] if mode == 'odd_scale': assert scale_height is not None and scale_width is not None new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \ int((original_h - 1) * scale_height) + 1 new_w = int(original_w * scale_width) if (original_w % 2) == 0 \ else int((original_w - 1) * scale_width) + 1 y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height, scale_width=scale_width, mode='odd_scale') elif mode == 'to_even_down': new_h = original_h if (original_h % 2) == 0 else original_h - 1 new_w = original_w if (original_w % 2) == 0 else original_w - 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down') elif mode == 'to_even_up': new_h = original_h if (original_h % 2) == 0 else original_h + 1 new_w = original_w if (original_w % 2) == 0 else original_w + 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up') elif mode == 'to_odd_down': new_h = original_h if (original_h % 2) == 1 else original_h - 1 new_w = original_w if (original_w % 2) == 1 else original_w - 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down') elif mode == 'to_odd_up': new_h = original_h if (original_h % 2) == 1 else original_h + 1 new_w = original_w if (original_w % 2) == 1 else original_w + 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up') elif mode == 'like': x_1 = mx.nd.random.uniform(shape=shape_1) new_h = x_1.shape[2] new_w = x_1.shape[3] y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like') new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int') new_shape_got = np.array(y.shape, dtype='int') data_sym = mx.sym.var('data') data_np = x.asnumpy() expected = py_bilinear_resize(data_np, new_h, new_w) out_grads = np.ones([shape[0], shape[1], new_h, new_w]) expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode) assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format( str(new_shape_desired.tolist()), str(new_shape_got.tolist()))) assert_almost_equal(y.asnumpy(), expected, 1e-3, 0) if mode != 'like': resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode) check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5) check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5) check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4) else: data_sym_like = mx.sym.var('data_like') resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode) date_np_like = x_1.asnumpy() check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5) check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5) check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4) shape = (2, 2, 10, 10) check_bilinear_resize_op(shape, 5, 5) check_bilinear_resize_op(shape, 10, 10) check_bilinear_resize_op(shape, 15, 15) check_bilinear_resize_op(shape, 3, 7) check_bilinear_resize_op(shape, 13, 17) shape = (2, 2, 20, 20) check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale') check_bilinear_resize_modes_op(shape, mode='to_even_down') check_bilinear_resize_modes_op(shape, mode='to_even_up') check_bilinear_resize_modes_op(shape, mode='to_odd_down') check_bilinear_resize_modes_op(shape, mode='to_odd_up') shape = (2, 2, 21, 21) check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale') check_bilinear_resize_modes_op(shape, mode='to_even_down') check_bilinear_resize_modes_op(shape, mode='to_even_up') check_bilinear_resize_modes_op(shape, mode='to_odd_down') check_bilinear_resize_modes_op(shape, mode='to_odd_up') shape_0 = (2, 2, 21, 21) shape_1 = (2, 2, 10, 10) check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like') check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like') check_bilinear_resize_align_corners_op() def test_multi_proposal_op(): # paramters feature_stride = 16 scales = (8, 16, 32) ratios = (0.5, 1, 2) rpn_pre_nms_top_n = 12000 rpn_post_nms_top_n = 2000 threshold = 0.7 rpn_min_size = 16 batch_size = 20 feat_len = (1000 + 15) // 16 H, W = feat_len, feat_len num_anchors = len(scales) * len(ratios) count_anchors = H * W * num_anchors ''' cls_prob: (batch_size, 2 * num_anchors, H, W) bbox_pred: (batch_size, 4 * num_anchors, H, W) im_info: (batch_size, 3) ''' cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32) bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32) im_info = mx.nd.empty((batch_size, 3), dtype = np.float32) cls_prob = mx.nd.array(np.random.random(cls_prob.shape)) bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape)) for i in range(batch_size): im_size = np.random.randint(100, feat_len * feature_stride, size = (2,)) im_scale = np.random.randint(70, 100) / 100.0 im_info[i, :] = [im_size[0], im_size[1], im_scale] def get_sub(arr, i): new_shape = list(arr.shape) new_shape[0] = 1 res = arr[i].reshape(new_shape) return res def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n): single_proposal = [] single_score = [] for i in range(batch_size): rois, score = mx.nd.contrib.Proposal( cls_prob = get_sub(cls_prob, i), bbox_pred = get_sub(bbox_pred, i), im_info = get_sub(im_info, i), feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = threshold, rpn_min_size = rpn_min_size, output_score = True) single_proposal.append(rois) single_score.append(score) multi_proposal, multi_score = mx.nd.contrib.MultiProposal( cls_prob = cls_prob, bbox_pred = bbox_pred, im_info = im_info, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = threshold, rpn_min_size = rpn_min_size, output_score = True) single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape) single_score = mx.nd.stack(*single_score).reshape(multi_score.shape) single_proposal_np = single_proposal.asnumpy() multi_proposal_np = multi_proposal.asnumpy() single_score_np = single_score.asnumpy() multi_score_np = multi_score.asnumpy() # check rois x1,y1,x2,y2 assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:]) # check rois batch_idx for i in range(batch_size): start = i * rpn_post_nms_top_n end = start + rpn_post_nms_top_n assert (multi_proposal_np[start:end, 0] == i).all() # check score assert np.allclose(single_score_np, multi_score_np) def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n): im_info_sym = mx.sym.Variable('im_info') cls_prob_sym = mx.sym.Variable('cls_prob') bbox_pred_sym = mx.sym.Variable('bbox_pred') sym = mx.sym.contrib.MultiProposal( cls_prob = cls_prob_sym, bbox_pred = bbox_pred_sym, im_info = im_info_sym, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = threshold, rpn_min_size = rpn_min_size, output_score = False) location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()] expected = [np.zeros_like(e) for e in location] out_grads = [np.ones((rpn_post_nms_top_n, 5))] check_symbolic_backward(sym, location, out_grads, expected) check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n) check_forward(rpn_pre_nms_top_n, 1500) check_forward(1000, 500) check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n) def test_quadratic_function(): def f(x, a, b, c): return a * x**2 + b * x + c a = np.random.random_sample() b = np.random.random_sample() c = np.random.random_sample() data = mx.symbol.Variable('data') quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c) for dtype in [np.float16, np.float32, np.float64]: tol = 1e-2 if dtype is np.float16 else 1e-5 for ndim in range(1, 6): shape = rand_shape_nd(ndim, 5) data_np = np.random.randn(*shape).astype(dtype) expected = f(data_np, a, b, c) backward_expected = 2 * a * data_np + b # check imperative forward output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c) assert_almost_equal(output, expected, rtol=tol, atol=tol) # check forward check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol) # check backward check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)], [backward_expected], rtol=tol, atol=tol) # check backward using finite difference check_numeric_gradient(quad_sym, [data_np], atol=0.001) def allclose_function(contexts): def getRandom(base, percent = 1.): return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100) title = 'exp' for ctx in contexts: title += ' cpu' if ctx == mx.cpu() else ' gpu' title += ' nElem shape' num_ctx = len(contexts) result = [False, False] for dtype in [np.float16, np.float32, np.float64]: rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5) atol = getRandom(1e-4 if dtype is np.float16 else 1e-7) print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol)) print(title) for ndim in range(1, 10): shape = rand_shape_nd(ndim, 8) a_np = np.random.randn(*shape).astype(dtype) b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype) expected = np.allclose(a_np, b_np, rtol, atol) for n, ctx in enumerate(contexts): a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx) b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx) output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol) result[n] = output.asnumpy() == 1 if expected != result[n]: # Preparing the output of elements of the array, which are considered as "not close" AND # corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close" v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU' if expected: v_cmp = 'Python' a_b = a_ctx.asnumpy() b_b = b_ctx.asnumpy() a_g = np.asarray(a_np) b_g = np.asarray(b_np) else: v_cmp = v_ctx v_ctx = 'Python' a_b = np.asarray(a_np) b_b = np.asarray(b_np) a_g = a_ctx.asnumpy() b_g = b_ctx.asnumpy() print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp)) frmt = " a[{0:d}]: b[{0:d}]:" \ " abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):" # Define the indices of all violations and corresponding values of coordinates bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b) a_values = [a_b[bad_indexes], a_g[bad_indexes]] b_values = [b_b[bad_indexes], b_g[bad_indexes]] idx = np.asarray(np.where(bad_indexes == True)) idx = idx.reshape(1, idx.size) idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten() for i in range(len(a_values[0])): flat_idx = idx_flat[i] print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx)) print(frmt.format(flat_idx)) for j in range(2): diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i]) print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff)) if num_ctx == 1: print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape)) else: print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape)) if expected != result[0] or num_ctx > 1 and expected != result[1]: assert False @pytest.mark.serial def test_allclose_function(): allclose_function([default_context()]) def test_histogram(): def f(x, bins=10, range=None): return np.histogram(x, bins, range=range) for ndim in range(1, 6): shape = rand_shape_nd(ndim) x = rand_ndarray(shape, stype='default', dtype=np.float64) mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64) np_bins = mx_bins.asnumpy() bin_cnt = random.randint(2, 10) bin_range = (-2.5, 2.5) mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range) np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range) assert_almost_equal(mx_bins1, np_bins1) assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5) mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins) np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins) assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5) assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5) data = mx.sym.Variable("data") bins = mx.sym.Variable("bins") histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range) histo2 = mx.sym.histogram(a=data, bins=bins) executor1 = histo1._bind(ctx=default_context(), args={"data" : x}) executor1.forward(is_train=False) assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False) executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins}) executor2.forward(is_train=False) assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False) @pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915") def test_activation(): shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)] dtype_l = [np.float64, np.float32, np.float16] rtol_l = [1e-7, 1e-6, 1e-2] atol_l = [1e-7, 1e-6, 1e-2] rtol_fd = 1e-5 atol_fd = 1e-6 num_eps = 1e-6 unary_ops = { 'relu': [lambda x: mx.sym.Activation(x, act_type='relu'), lambda x: np.maximum(x, 0.), lambda x: 1. * (x > 0.), -5.0, 5.0], 'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'), lambda x: 1. / (np.exp(-x) + 1.), lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.), -3.0, 3.0], 'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, -4.0, 4.0], 'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'), lambda x: np.log(1. + np.exp(x)), lambda x: 1. - 1 / (1 + np.exp(x)), -3.0, 3.0], 'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'), lambda x: x / (1. + np.abs(x)), lambda x: 1. / np.square(1. + np.abs(x)), -3.0, 3.0], } # Loop over operators for name, op in unary_ops.items(): # Loop over shapes for shape in shapes: # Loop over dtype's for ind in range(len(dtype_l)): dtype = dtype_l[ind] rtol = rtol_l[ind] atol = atol_l[ind] compare_forw_backw_unary_op( name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol, dtype) # Finite difference testing finite_diff_unary_op( name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps) @pytest.mark.serial def test_ravel(): # be aware that check_symbolic_forward will use float type internally # for the arrays and that limits the representable flat index range. # Taking dim==4 and a range of [0,..,100] for the data can already # cause precision issues and break this test. for dim in [1, 2, 3, 4]: data = np.random.randint(50, size=(dim, 500)) shape = tuple(np.add(np.amax(data, axis=1), [1])) a = mx.sym.Variable('a') ravel_npy = np.ravel_multi_index(data, shape) b = mx.sym.ravel_multi_index(a, shape=shape) check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy]) c = mx.sym.unravel_index(a, shape=shape) check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data]) # Test with leading dimension set to -1. shape2 = shape shape2 = (-1,)+shape[1:] b = mx.sym.ravel_multi_index(a, shape=shape2) check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy]) c = mx.sym.unravel_index(a, shape=shape2) check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data]) def test_unravel_index(): unravel_shape = (2, 10) unravel_size = np.prod(unravel_shape) for shape in [(10,), (2, 10), (3, 4, 5)]: a = np.random.randint(0, unravel_size, size=shape) b = np.stack(np.unravel_index(a, shape=unravel_shape), 0) a_mx = mx.nd.array(a) b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape) assert_array_equal(b, b_mx.asnumpy()) def test_context_num_gpus(): try: # Note: the test is run both on GPU and CPU hosts, so that we can not assert # on a specific number here. assert mx.context.num_gpus() >= 0 except mx.MXNetError as e: # Note: On a CPU only host CUDA sometimes is not able to determine the number # of GPUs if str(e).find("CUDA") == -1: raise e @pytest.mark.serial def test_op_roi_align(): T = np.float32 def assert_same_dtype(dtype_a, dtype_b): ''' Assert whether the two data type are the same Parameters ---------- dtype_a, dtype_b: type Input data types to compare ''' assert dtype_a == dtype_b,\ TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b)) def bilinear_interpolate(bottom, height, width, y, x): if y < -1.0 or y > height or x < -1.0 or x > width: return T(0.0), [] x = T(max(0.0, x)) y = T(max(0.0, y)) x_low = int(x) y_low = int(y) if x_low >= width - 1: x_low = x_high = width - 1 x = T(x_low) else: x_high = x_low + 1 if y_low >= height - 1: y_low = y_high = height - 1 y = T(y_low) else: y_high = y_low + 1 ly = y - T(y_low) lx = x - T(x_low) hy = T(1.0) - ly hx = T(1.0) - lx v1 = bottom[y_low, x_low] v2 = bottom[y_low, x_high] v3 = bottom[y_high, x_low] v4 = bottom[y_high, x_high] w1 = hy * hx w2 = hy * lx w3 = ly * hx w4 = ly * lx assert_same_dtype(w1.dtype, T) assert_same_dtype(w2.dtype, T) assert_same_dtype(w3.dtype, T) assert_same_dtype(w4.dtype, T) val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4 assert_same_dtype(val.dtype, T) grad = [(y_low, x_low, w1), (y_low, x_high, w2), (y_high, x_low, w3), (y_high, x_high, w4) ] return val, grad def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio, position_sensitive, dy): N, C, H, W = data.shape R = rois.shape[0] PH, PW = pooled_size assert rois.ndim == 2,\ ValueError( 'The ndim of rois should be 2 rather than %d' % rois.ndim) assert rois.shape[1] == 5,\ ValueError( 'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1]) assert_same_dtype(data.dtype, T) assert_same_dtype(rois.dtype, T) C_out = C // PH // PW if position_sensitive else C out = np.zeros((R, C_out, PH, PW), dtype=T) dx = np.zeros_like(data) drois = np.zeros_like(rois) for r in range(R): batch_ind = int(rois[r, 0]) sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale) roi_w = T(max(ew - sw, 1.0)) roi_h = T(max(eh - sh, 1.0)) bin_h = roi_h / T(PH) bin_w = roi_w / T(PW) bdata = data[batch_ind] if sampling_ratio > 0: roi_bin_grid_h = roi_bin_grid_w = sampling_ratio else: roi_bin_grid_h = int(np.ceil(roi_h / T(PH))) roi_bin_grid_w = int(np.ceil(roi_w / T(PW))) count = T(roi_bin_grid_h * roi_bin_grid_w) for c in range(C_out): for ph in range(PH): for pw in range(PW): val = T(0.0) c_in = c * PH * PW + ph * PW + pw if position_sensitive else c for iy in range(roi_bin_grid_h): y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \ bin_h / T(roi_bin_grid_h) for ix in range(roi_bin_grid_w): x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \ bin_w / T(roi_bin_grid_w) v, g = bilinear_interpolate( bdata[c_in], H, W, y, x) assert_same_dtype(v.dtype, T) val += v # compute grad for qy, qx, qw in g: assert_same_dtype(qw.dtype, T) dx[batch_ind, c_in, qy, qx] += dy[r, c, ph, pw] * qw / count out[r, c, ph, pw] = val / count assert_same_dtype(out.dtype, T) return out, [dx, drois] def test_roi_align_value(sampling_ratio=0, position_sensitive=False): ctx = default_context() dtype = np.float32 dlen = 224 N, C, H, W = 5, 3, 16, 16 R = 7 pooled_size = (3, 4) C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C spatial_scale = H * 1.0 / dlen data = mx.nd.array( np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype) center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx) pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1) rois = mx.nd.concat(batch_ind, pos, dim=1) data.attach_grad() rois.attach_grad() with mx.autograd.record(): output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sample_ratio=sampling_ratio, position_sensitive=position_sensitive) C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C dy = mx.nd.random.uniform(-1, 1, (R, C_out) + pooled_size, ctx=ctx, dtype=dtype) output.backward(dy) real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size, spatial_scale, sampling_ratio, position_sensitive, dy.asnumpy()) assert_almost_equal(output, real_output, atol=1e-3) assert_almost_equal(data.grad, dx, atol=1e-3) assert_almost_equal(rois.grad, drois, atol=1e-3) # modified from test_roipooling() def test_roi_align_autograd(sampling_ratio=0): ctx = default_context() data = mx.symbol.Variable(name='data') rois = mx.symbol.Variable(name='rois') test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1, sample_ratio=sampling_ratio) x1 = np.random.rand(4, 1, 12, 12).astype('float64') x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2]], dtype='float64') check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data': 'write', 'rois': 'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx) check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data': 'add', 'rois': 'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx) test_roi_align_value() test_roi_align_value(sampling_ratio=2) test_roi_align_value(position_sensitive=True) test_roi_align_autograd() def test_op_rroi_align(): T = np.float32 def assert_same_dtype(dtype_a, dtype_b): ''' Assert whether the two data type are the same Parameters ---------- dtype_a, dtype_b: type Input data types to compare ''' assert dtype_a == dtype_b,\ TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b)) def bilinear_interpolate(bottom, height, width, y, x): if y < -1.0 or y > height or x < -1.0 or x > width: return T(0.0) x = T(max(0.0, x)) y = T(max(0.0, y)) x_low = int(x) y_low = int(y) if x_low >= width - 1: x_low = x_high = width - 1 x = T(x_low) else: x_high = x_low + 1 if y_low >= height - 1: y_low = y_high = height - 1 y = T(y_low) else: y_high = y_low + 1 ly = y - T(y_low) lx = x - T(x_low) hy = T(1.0) - ly hx = T(1.0) - lx v1 = bottom[y_low, x_low] v2 = bottom[y_low, x_high] v3 = bottom[y_high, x_low] v4 = bottom[y_high, x_high] w1 = hy * hx w2 = hy * lx w3 = ly * hx w4 = ly * lx assert_same_dtype(w1.dtype, T) assert_same_dtype(w2.dtype, T) assert_same_dtype(w3.dtype, T) assert_same_dtype(w4.dtype, T) val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4 assert_same_dtype(val.dtype, T) return val def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio): N, C, H, W = data.shape R = rois.shape[0] PH, PW = pooled_size assert rois.ndim == 2,\ ValueError( 'The ndim of rois should be 2 rather than %d' % rois.ndim) assert rois.shape[1] == 6,\ ValueError( 'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1]) assert_same_dtype(data.dtype, T) assert_same_dtype(rois.dtype, T) out = np.zeros((R, C, PH, PW), dtype=T) for r in range(R): batch_ind = int(rois[r, 0]) roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale) roi_theta = T(rois[r,5] * np.pi / 180.0) roi_w = T(max(roi_w, 1.0)) roi_h = T(max(roi_h, 1.0)) bin_h = roi_h / T(PH) bin_w = roi_w / T(PW) bdata = data[batch_ind] if sampling_ratio > 0: roi_bin_grid_h = roi_bin_grid_w = sampling_ratio else: roi_bin_grid_h = int(np.ceil(roi_h / T(PH))) roi_bin_grid_w = int(np.ceil(roi_w / T(PW))) count = T(roi_bin_grid_h * roi_bin_grid_w) roi_start_h = T(-roi_h / 2.0) roi_start_w = T(-roi_w / 2.0) for c in range(C): for ph in range(PH): for pw in range(PW): val = T(0.0) for iy in range(roi_bin_grid_h): yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \ bin_h / T(roi_bin_grid_h) for ix in range(roi_bin_grid_w): xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \ bin_w / T(roi_bin_grid_w) x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h v = bilinear_interpolate( bdata[c], H, W, y, x) assert_same_dtype(v.dtype, T) val += v out[r, c, ph, pw] = val / count assert_same_dtype(out.dtype, T) return out def test_rroi_align_value(sampling_ratio=-1): ctx = default_context() if ctx.device_type == 'gpu': print('skipped testing rroi align for gpu since it is not supported yet') return dtype = np.float32 dlen = 224 N, C, H, W = 5, 3, 16, 16 R = 7 pooled_size = (3, 4) spatial_scale = H * 1.0 / dlen data = mx.nd.array( np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype) center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype) batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx) pos = mx.nd.concat(center_xy, wh, theta, dim=1) rois = mx.nd.concat(batch_ind, pos, dim=1) output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio) real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size, spatial_scale, sampling_ratio) assert_almost_equal(output.asnumpy(), real_output, atol=1e-3) test_rroi_align_value() test_rroi_align_value(sampling_ratio=2) def test_diag(): # Test 2d input h = np.random.randint(2,9) w = np.random.randint(2,9) a_np = np.random.random((h, w)).astype(np.float32) a = mx.nd.array(a_np).astype('float32') for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]: assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k)) # invalid k k = max(h,w) + 1 assertRaises(MXNetError, mx.nd.diag, a, k=k) # Test 2d backward, k=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=1) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=-1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-1) check_numeric_gradient(diag_sym, [a_np]) # test 1d input d = np.random.randint(2,9) a_np = np.random.random((d)) a = mx.nd.array(a_np) # k is random k = np.random.randint(-d,d) assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k)) # Test 2d backward, k=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=1) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=-1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-1) check_numeric_gradient(diag_sym, [a_np]) # Test 4d input x1 = np.random.randint(3,9) x2 = np.random.randint(3,9) x3 = np.random.randint(3,9) x4 = np.random.randint(3,9) a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32) a = mx.nd.array(a_np).astype('float32') # k = 0, axis1=0, axis2=1 r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1) assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1)) # k = 1, axis1=1, axis2=0 r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0) assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0)) # k = -1 axis1=1, axis3=3 r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3) assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3)) # k = 2, axis1=-2, axis2=0 r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0) assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0)) # Test 4d backward, k=0, axis1=3, axis2=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0) check_numeric_gradient(diag_sym, [a_np]) # Test 4d backward, k=1, axis1=1, axis2=2 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2) check_numeric_gradient(diag_sym, [a_np]) # Test 4d backward, k=-1, axis1=2, axis2=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0) check_numeric_gradient(diag_sym, [a_np]) # Test 4d backward, k=-2, axis1=1, axis2=-1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1) check_numeric_gradient(diag_sym, [a_np]) @pytest.mark.serial def test_depthtospace(): def f(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) return y block = random.randint(2, 4) rand_mul1 = random.randint(1, 4) n = random.randint(1, 5) c = block * block * rand_mul1 h = random.randint(1, 5) w = random.randint(1, 5) shape_inp = (n, c, h, w) data = rand_ndarray(shape_inp, 'default') data_np = data.asnumpy() expected = f(data_np, block) output = mx.nd.depth_to_space(data, block) assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3) shape_out = (n, c // (block ** 2), h * block, w * block) data = mx.sym.Variable('data') dts_sym = mx.sym.depth_to_space(data, block) check_numeric_gradient(dts_sym, [np.ones(shape_inp)]) check_symbolic_forward(dts_sym, [data_np], [expected]) check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)]) def test_invalid_depth_dim(): invalid_shape_inp = (n, block - 1, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.depth_to_space, data, block) def test_invalid_space_dim(): invalid_shape_inp = (n, block ** 2, 0, block + 1) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.depth_to_space, data, block) def test_invalid_block_size(): block = 0 invalid_shape_inp = (n , c, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.depth_to_space, data, block) test_invalid_depth_dim() test_invalid_space_dim() test_invalid_block_size() @pytest.mark.serial def test_spacetodepth(): def f(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize]) tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4]) y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize]) return y block = random.randint(2, 4) rand_mul1 = random.randint(1, 4) rand_mul2 = random.randint(1, 4) n = random.randint(1, 5) c = random.randint(1, 5) h = block * rand_mul1 w = block * rand_mul2 shape_inp = (n, c, h, w) data = rand_ndarray(shape_inp, 'default') data_np = data.asnumpy() expected = f(data_np, block) output = mx.nd.space_to_depth(data, block) assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3) shape_out = (n, c * (block ** 2), h // block, w // block) data = mx.sym.Variable('data') dts_sym = mx.sym.space_to_depth(data, block) check_numeric_gradient(dts_sym, [np.ones(shape_inp)]) check_symbolic_forward(dts_sym, [data_np], [expected]) check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)]) def test_invalid_space_dim(): invalid_shape_inp = (n , c, block - 1, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.space_to_depth, data, block) def test_invalid_block_size(): block = 0 invalid_shape_inp = (n, c, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.space_to_depth, data, block) def test_invalid_depth_dim(): invalid_shape_inp = (n, 0, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.space_to_depth, data, block) test_invalid_space_dim() test_invalid_block_size() test_invalid_depth_dim() def test_softmax_cross_entropy(): def f_sm_ce(data, label): return np.sum(-np.log(data) * label) data = mx.sym.Variable('data') label = mx.sym.Variable('label') sym = mx.sym.softmax_cross_entropy(data=data, label=label) num_labels = random.randint(100, 200) batch_size = random.randint(100, 200) np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy() np_sm = np_softmax(np_data) np_label = np.random.randint(0, num_labels, (batch_size, )) np_one_hot_label = np.zeros((batch_size, num_labels)) np_one_hot_label[np.arange(batch_size), np_label] = 1. check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5) def test_split_v2(): dim = random.randint(2, 6) shape = rand_shape_nd(dim) axis = random.randint(-dim, dim-1) axis_size = shape[axis] samples = random.randint(0, axis_size - 1) indices = sorted(random.sample([i for i in range(1, axis_size)], samples)) indices = tuple(indices) mx_data = rand_ndarray(shape) np_data = mx_data.asnumpy() np_out = np.split(np_data, indices_or_sections=indices, axis=axis) data = mx.sym.Variable("data") sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis) check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5) out_grad = [np.ones(arr.shape) for arr in np_out] check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)]) def test_moments(): dim = random.randint(2, 5) shape = rand_shape_nd(dim, dim=5) axes = [i for i in range(dim)] test_dims = random.sample(axes, random.randint(1, dim)) test_axes = tuple(sorted(test_dims)) np_a = np.random.uniform(-1.0, 1.0, shape) a = mx.nd.array(np_a) for keepdims in [True, False]: eps = 1e-3 np_a[abs(np_a) < eps] = 2 * eps np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims) np_var = np.var(np_a, axis=test_axes, keepdims=keepdims) mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes) N = np_a.size / np_mean.size mx_sym = mx.sym.Variable("data") mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims) mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1]) if len(np_mean.shape) == 0: np_mean = np_mean.reshape(mx_mean.shape) np_var = np_var.reshape(mx_var.shape) assert np_mean.shape == mx_mean.shape assert np_var.shape == mx_var.shape check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5) check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4) def test_invalid_kernel_size(): invalid_kernel_size = 28 assert_exception( mx.nd.Correlation, MXNetError, mx.nd.array(np.random.rand(1, 1, 28, 28)), mx.nd.array(np.random.rand(1, 1, 28, 28)), kernel_size=invalid_kernel_size) def test_valid_kernel_size(): valid_kernel_size = 9 mx.nd.Correlation( mx.nd.array(np.random.rand(1, 1, 28, 28)), mx.nd.array(np.random.rand(1, 1, 28, 28)), kernel_size=valid_kernel_size) def test_valid_max_pooling_pad_type_same(): import math input_data = mx.nd.array(np.random.rand(1,1,10)) stride = 2 kernel = 2 output_data=mx.nd.Pooling( input_data, kernel=kernel, stride=stride, pad=(0,0,0), pool_type='max', name='pooling', pooling_convention="same") assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2]) def test_invalid_max_pooling_pad_type_same(): import math input_data = mx.nd.array(np.random.rand(1,1,10)) stride = 2 kernel = 2 pad = 2 assert_exception( mx.nd.Pooling, MXNetError, input_data, stride=stride, kernel=kernel, pad=pad, pool_type='max', name='pooling', pooling_convention="same") @pytest.mark.serial def test_image_normalize(): # Part 1 - Test 3D input with 3D mean/std shape_3d = (3, 28, 28) mean = (0, 1, 2) std = (3, 2, 1) data_in_3d = mx.nd.random.uniform(0, 1, shape_3d) data_expected_3d = data_in_3d.asnumpy() data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0 data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0 data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0 data = mx.symbol.Variable('data') img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std) # check forward check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_3d = np.ones(shape_3d) grad_expected_3d[:][:][0] = 1 / 3.0 grad_expected_3d[:][:][1] = 1 / 2.0 grad_expected_3d[:][:][2] = 1 / 1.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)], expected=[grad_expected_3d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001) # Part 2 - Test 4D input with 3D mean/std shape_4d = (2, 3, 28, 28) data_in_4d = mx.nd.random.uniform(0, 1, shape_4d) data_expected_4d = data_in_4d.asnumpy() data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0 data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0 data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0 data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0 data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0 data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0 # check forward check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_4d = np.ones(shape_4d) grad_expected_4d[0][:][:][0] = 1 / 3.0 grad_expected_4d[0][:][:][1] = 1 / 2.0 grad_expected_4d[0][:][:][2] = 1 / 1.0 grad_expected_4d[1][:][:][0] = 1 / 3.0 grad_expected_4d[1][:][:][1] = 1 / 2.0 grad_expected_4d[1][:][:][2] = 1 / 1.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)], expected=[grad_expected_4d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001) # Part 3 - Test 3D input with scalar mean/std shape_3d = (3, 28, 28) mean = 1.0 std = 2.0 data_in_3d = mx.nd.random.uniform(0, 1, shape_3d) data_expected_3d = data_in_3d.asnumpy() data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0 data = mx.symbol.Variable('data') img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std) # check forward check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_3d = np.ones(shape_3d) grad_expected_3d[:][:][:] = 1 / 2.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)], expected=[grad_expected_3d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001) # Part 4 - Test 4D input with scalar mean/std shape_4d = (2, 3, 28, 28) data_in_4d = mx.nd.random.uniform(0, 1, shape_4d) data_expected_4d = data_in_4d.asnumpy() data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0 # check forward check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_4d = np.ones(shape_4d) grad_expected_4d[:][:][:][:] = 1 / 2.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)], expected=[grad_expected_4d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001) @pytest.mark.serial def test_index_array(): def test_index_array_default(): for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]: data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data) input_array = np.ones(shape) mgrid = np.mgrid[tuple(slice(0, x) for x in shape)] expected = np.stack(mgrid, axis=-1) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) @mx.use_np_shape def test_index_array_default_zero_dim(): data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data) input_array = np.ones(()) expected = np.zeros((0,)) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) @mx.use_np_shape def test_index_array_default_zero_size(): data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data) input_array = np.ones((0, 0, 0)) expected = np.zeros((0, 0, 0, 3)) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) def test_index_array_select_axes(): shape = (5, 7, 11, 13, 17, 19) for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]: data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data, axes=axes) input_array = np.ones(shape) mgrid = np.mgrid[tuple(slice(0, x) for x in shape)] expected = np.stack(mgrid, axis=-1)[..., axes] check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) @mx.use_np_shape def test_index_array_select_axes_zero_size(): data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data, axes=(2, 1)) input_array = np.ones((0, 0, 0, 0)) expected = np.zeros((0, 0, 2)) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) test_index_array_default() test_index_array_default_zero_dim() test_index_array_default_zero_size() test_index_array_select_axes() test_index_array_select_axes_zero_size() def test_scalar_tensor_creation(): assertRaises(MXNetError, mx.nd.zeros, shape=()) assertRaises(MXNetError, mx.nd.ones, shape=()) with mx.np_shape(): data_mx = mx.nd.ones(shape=()) data_np = np.ones((), dtype=data_mx.dtype) assert same(data_mx.asnumpy(), data_np) def test_zero_size_tensor_creation(): assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0)) assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0)) with mx.np_shape(): data_mx = mx.nd.ones(shape=(0, 1, 0, 4)) data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype) assert same(data_mx.asnumpy(), data_np) def test_concat_with_zero_size_tensor(): with mx.np_shape(): data1 = mx.nd.ones((0, 8, 12)) data2 = mx.nd.ones((3, 8, 12)) data3 = mx.nd.ones((0, 8, 12)) ret = mx.nd.Concat(data1, data2, data3, dim=0) assert ret.shape == (3, 8, 12) data1 = mx.nd.ones((0, 3, 10)) data2 = mx.nd.ones((0, 4, 10)) data3 = mx.nd.ones((0, 5, 10)) ret = mx.nd.Concat(data1, data2, data3, dim=1) assert ret.shape == (0, 12, 10) def test_np_shape_decorator(): @mx.use_np_shape def check_scalar_one(): """Generate scalar one tensor""" return mx.nd.ones(shape=()) assert check_scalar_one.__name__ == "check_scalar_one" assert check_scalar_one.__doc__ == "Generate scalar one tensor" assert check_scalar_one().shape == () for active in [True, False]: with mx.np_shape(active=active): assert check_scalar_one.__name__ == "check_scalar_one" assert check_scalar_one.__doc__ == "Generate scalar one tensor" assert check_scalar_one().shape == () @mx.use_np_shape def check_concat(shape1, shape2, axis): data1 = mx.nd.ones(shape1) data2 = mx.nd.ones(shape2) ret = mx.nd.Concat(data1, data2, dim=axis) expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis) assert ret.shape == expected_ret.shape check_concat((0, 3, 4), (5, 3, 4), 0) check_concat((8, 0, 5), (8, 7, 5), 1) check_concat((8, 0, 0), (8, 0, 0), 2) for active in [True, False]: check_concat((0, 3, 4), (5, 3, 4), 0) check_concat((8, 0, 5), (8, 7, 5), 1) check_concat((8, 0, 0), (8, 0, 0), 2) def test_add_n(): data_shape = (2, 2) input_num = 5 data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)] rslt = mx.nd.zeros(shape=data_shape) for i in range(input_num): rslt += data[i] add_n_rslt = mx.nd.add_n(*data, out=data[0]) assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5) def test_get_all_registered_operators(): ops = get_all_registered_operators() assert isinstance(ops, list) assert len(ops) > 0 assert 'Activation' in ops def test_get_operator_arguments(): operator_arguments = get_operator_arguments('Activation') assert isinstance(operator_arguments, OperatorArguments) assert operator_arguments.names == ['data', 'act_type'] assert operator_arguments.types \ == ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"] assert operator_arguments.narg == 2 def test_transpose_infer_shape_back(): o1 = mx.sym.ones(shape=[2,3]) o2 = mx.sym.ones(shape=[-1,-1]) t = mx.sym.transpose(o2) b = o1 + t x = b._bind(mx.cpu(), args={}) y = x.forward() assert(y[0].shape == (2,3)) def test_transpose_infer_shape_mixed(): o1 = mx.sym.ones(shape=[2,-1]) o2 = mx.sym.ones(shape=[3,-1]) t = mx.sym.transpose(o2) b = o1 + t x = b._bind(mx.cpu(), args={}) y = x.forward() assert(y[0].shape == (2,3)) def test_sample_normal_default_shape(): # Test case from https://github.com/apache/incubator-mxnet/issues/16135 s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5])) assert s.shape == (1,) s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=()) assert s.shape == (1,) s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1) assert s.shape == (1, 1) s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,)) assert s.shape == (1, 1) def test_large_tensor_disabled_err_msg(): LARGE_X = 4300000000 MEDIUM_X = 1000000000 SMALL_Y = 1 shape = (2, LARGE_X) def check_nd_array(): x = np.arange(0, LARGE_X) assertRaises(MXNetError, mx.nd.array, x) def check_nd_ones(): assertRaises(MXNetError, mx.nd.ones, shape) def check_nd_zeros(): assertRaises(MXNetError, mx.nd.zeros, shape) def check_nd_full(): val = 1 assertRaises(Exception, mx.nd.full, shape, val) def check_nd_arange(): start = 0 stop = LARGE_X assertRaises(Exception, mx.nd.arange, start, stop) def check_nd_random(): shape = (2, LARGE_X) def check_random_exp(): lam = 4 assertRaises(MXNetError, mx.nd.random_exponential, lam, shape) def check_random_gamma(): alpha = 9 beta = 0.5 assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape) def check_random_normal(): loc = 0 scale = 1 assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape) def check_random_poisson(): lam = 4 assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape) def check_random_randint(): low = 0 high = 1000000 assertRaises(MXNetError, mx.nd.random_randint, low, high, shape) def check_random_uniform(): low = 0 hight = 1 assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape) def check_multihead_attention_selfatt(dtype): def convert_weight(F, q_weight, k_weight, v_weight, num_heads): q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True) k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True) v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True) all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2) all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True) return all_weights def convert_bias(F, q_bias, k_bias, v_bias, num_heads): q_bias = F.reshape(q_bias, shape=(num_heads, -1)) k_bias = F.reshape(k_bias, shape=(num_heads, -1)) v_bias = F.reshape(v_bias, shape=(num_heads, -1)) all_bias = F.stack(q_bias, k_bias, v_bias, axis=1) all_bias = F.reshape(all_bias, shape=(-1,)) return all_bias batch_size = 2 qkv_length = 7 # length of a sequence qkv_dim = 9 # dimension of encoding num_heads = 3 # number of attention head head_dim = 5 # head size out_dim = 13 * num_heads qkv_units = num_heads * head_dim arg_params = { 'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype), 'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype), 'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype), 'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype), 'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype), } qkv = mx.sym.Variable('qkv') sonde = mx.sym.Variable('sonde') q_weight = mx.sym.Variable('q_weight') k_weight = mx.sym.Variable('k_weight') v_weight = mx.sym.Variable('v_weight') q_bias = mx.sym.Variable('q_bias') k_bias = mx.sym.Variable('k_bias') v_bias = mx.sym.Variable('v_bias') out_weight = mx.sym.Variable('out_weight') out_bias = mx.sym.Variable('out_bias') qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads) qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads) qkv = mx.sym.transpose(qkv, axes=(1, 0, 2)) qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False, num_hidden=qkv_units * 3, no_bias=False) att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk( qkv_proj, heads=num_heads) att_score = att_score + sonde weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt( qkv_proj, att_score, heads=num_heads) output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False, num_hidden=out_dim, no_bias=False) output = mx.sym.transpose(output, axes=(1, 0, 2)) output = mx.sym.Group([output, att_score]) executor = output._simple_bind(ctx=default_context(), qkv=(batch_size, qkv_length, qkv_dim), q_weight=(qkv_units, qkv_dim), q_bias=(qkv_units,), k_weight=(qkv_units, qkv_dim), k_bias=(qkv_units,), v_weight=(qkv_units, qkv_dim), v_bias=(qkv_units,), type_dict={'qkv': dtype, 'q_weight': dtype, 'k_weight': dtype, 'v_weight': dtype, 'q_bias': dtype, 'k_bias': dtype, 'v_bias': dtype, 'sonde': dtype}, grad_req='write') executor.copy_params_from(arg_params, {}) executor.arg_dict['sonde'][:] = 0. executor.arg_dict['sonde'].wait_to_read() executor.forward(is_train=True) output_shape = executor.outputs[0].shape output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1 output_opti = executor.outputs[0].asnumpy() att_score_opti = executor.outputs[1].asnumpy() executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)]) grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()} qkv = mx.sym.Variable('qkv') sonde = mx.sym.Variable('sonde') q_weight = mx.sym.Variable('q_weight') k_weight = mx.sym.Variable('k_weight') v_weight = mx.sym.Variable('v_weight') q_bias = mx.sym.Variable('q_bias') k_bias = mx.sym.Variable('k_bias') v_bias = mx.sym.Variable('v_bias') out_weight = mx.sym.Variable('out_weight') out_bias = mx.sym.Variable('out_bias') q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False, num_hidden=qkv_units, no_bias=False) k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False, num_hidden=qkv_units, no_bias=False) v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False, num_hidden=qkv_units, no_bias=False) q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1)) q = mx.sym.transpose(q, axes=(0, 2, 1, 3)) q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True) k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1)) k = mx.sym.transpose(k, axes=(0, 2, 1, 3)) k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True) q = mx.sym.contrib.div_sqrt_dim(q) att_score = mx.sym.batch_dot(q, k, transpose_b=True) att_score = att_score + sonde v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1)) v = mx.sym.transpose(v, axes=(0, 2, 1, 3)) v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True) weighted_value = mx.sym.batch_dot(att_score, v) weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0), reverse=True) weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3)) weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1)) output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False, num_hidden=out_dim, no_bias=False) output = mx.sym.Group([output, att_score]) executor = output._simple_bind(ctx=default_context(), qkv=(batch_size, qkv_length, qkv_dim), type_dict={'qkv': dtype}, grad_req='write') executor.copy_params_from(arg_params, {}) executor.arg_dict['sonde'][:] = 0. executor.arg_dict['sonde'].wait_to_read() executor.forward(is_train=True) output_orig = executor.outputs[0].asnumpy() att_score_orig = executor.outputs[1].asnumpy() executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)]) grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()} assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3) assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3) for k in grads_opti.keys(): assert(grads_orig[k].dtype == grads_opti[k].dtype) assert(grads_orig[k].shape == grads_opti[k].shape) assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3) @assert_raises_cuda_not_satisfied(min_version='9.1') @pytest.mark.serial def test_multihead_attention_selfatt(): dtypes = ['float32'] if default_context().device_type == 'gpu': dtypes += ['float16'] for dtype in dtypes: check_multihead_attention_selfatt(dtype=dtype) def check_multihead_attention_encdec(dtype): def convert_weight(F, k_weight, v_weight, num_heads): k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True) v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True) all_weights = F.concat(k_weight, v_weight, dim=-2) all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True) return all_weights def convert_bias(F, k_bias, v_bias, num_heads): k_bias = F.reshape(k_bias, shape=(num_heads, -1)) v_bias = F.reshape(v_bias, shape=(num_heads, -1)) all_bias = F.stack(k_bias, v_bias, axis=1) all_bias = F.reshape(all_bias, shape=(-1,)) return all_bias batch_size = 2 qkv_length = 7 # length of a sequence qkv_dim = 9 # dimension of encoding num_heads = 3 # number of attention head head_dim = 5 # head size out_dim = 13 * num_heads qkv_units = num_heads * head_dim arg_params = { 'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype), 'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype), 'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype), 'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype), 'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype), 'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype), } q = mx.sym.Variable('q') kv = mx.sym.Variable('kv') sonde = mx.sym.Variable('sonde') q_weight = mx.sym.Variable('q_weight') k_weight = mx.sym.Variable('k_weight') v_weight = mx.sym.Variable('v_weight') q_bias = mx.sym.Variable('q_bias') k_bias = mx.sym.Variable('k_bias') v_bias = mx.sym.Variable('v_bias') out_weight = mx.sym.Variable('out_weight') out_bias = mx.sym.Variable('out_bias') kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads) kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads) kv = mx.sym.transpose(kv, axes=(1, 0, 2)) kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False, num_hidden=qkv_units * 2, no_bias=False) q = mx.sym.transpose(q, axes=(1, 0, 2)) q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False, num_hidden=qkv_units, no_bias=False) att_score = mx.sym.contrib.interleaved_matmul_encdec_qk( q_proj, kv_proj, heads=num_heads) att_score = att_score + sonde weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt( kv_proj, att_score, heads=num_heads) output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False, num_hidden=out_dim, no_bias=False) output = mx.sym.transpose(output, axes=(1, 0, 2)) output = mx.sym.Group([output, att_score]) executor = output._simple_bind(ctx=default_context(), q=(batch_size, qkv_length, qkv_dim), kv=(batch_size, qkv_length, qkv_dim), q_weight=(qkv_units, qkv_dim), q_bias=(qkv_units,), k_weight=(qkv_units, qkv_dim), k_bias=(qkv_units,), v_weight=(qkv_units, qkv_dim), v_bias=(qkv_units,), out_weight=(out_dim, qkv_units), out_bias=(out_dim,), type_dict={'q': dtype, 'kv': dtype, 'q_weight': dtype, 'q_bias': dtype, 'k_weight': dtype, 'k_bias': dtype, 'v_weight': dtype, 'v_bias': dtype, 'out_weight': dtype, 'out_bias': dtype, }, grad_req='write') executor.copy_params_from(arg_params, {}) executor.arg_dict['sonde'][:] = 0. executor.arg_dict['sonde'].wait_to_read() executor.forward(is_train=True) output_shape = executor.outputs[0].shape output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1 output_opti = executor.outputs[0].asnumpy() att_score_opti = executor.outputs[1].asnumpy() executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)]) grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()} q = mx.sym.Variable('q') kv = mx.sym.Variable('kv') sonde = mx.sym.Variable('sonde') q_weight = mx.sym.Variable('q_weight') k_weight = mx.sym.Variable('k_weight') v_weight = mx.sym.Variable('v_weight') q_bias = mx.sym.Variable('q_bias') k_bias = mx.sym.Variable('k_bias') v_bias = mx.sym.Variable('v_bias') out_weight = mx.sym.Variable('out_weight') out_bias = mx.sym.Variable('out_bias') q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False, num_hidden=qkv_units, no_bias=False) k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False, num_hidden=qkv_units, no_bias=False) v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False, num_hidden=qkv_units, no_bias=False) q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1)) q = mx.sym.transpose(q, axes=(0, 2, 1, 3)) q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True) k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1)) k = mx.sym.transpose(k, axes=(0, 2, 1, 3)) k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True) q = mx.sym.contrib.div_sqrt_dim(q) att_score = mx.sym.batch_dot(q, k, transpose_b=True) att_score = att_score + sonde v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1)) v = mx.sym.transpose(v, axes=(0, 2, 1, 3)) v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True) weighted_value = mx.sym.batch_dot(att_score, v) weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0), reverse=True) weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3)) weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1)) output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False, num_hidden=out_dim, no_bias=False) output = mx.sym.Group([output, att_score]) executor = output._simple_bind(ctx=default_context(), q=(batch_size, qkv_length, qkv_dim), kv=(batch_size, qkv_length, qkv_dim), type_dict={'q': dtype, 'kv': dtype}, grad_req='write') executor.copy_params_from(arg_params, {}) executor.arg_dict['sonde'][:] = 0. executor.arg_dict['sonde'].wait_to_read() executor.forward(is_train=True) output_orig = executor.outputs[0].asnumpy() att_score_orig = executor.outputs[1].asnumpy() executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)]) grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()} assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3) assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3) for k in grads_opti.keys(): assert(grads_orig[k].dtype == grads_opti[k].dtype) assert(grads_orig[k].shape == grads_opti[k].shape) assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3) @assert_raises_cuda_not_satisfied(min_version='9.1') @pytest.mark.serial def test_multihead_attention_encdec(): dtypes = ['float32'] if default_context().device_type == 'gpu': dtypes += ['float16'] for dtype in dtypes: check_multihead_attention_encdec(dtype=dtype) @pytest.mark.serial def test_im2col_col2im(): def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0): pad_size = spatial + 2 * pad dilated_kernel = dilate * (kernel - 1) + 1 return (pad_size - dilated_kernel) // stride + 1 def build_kwargs(kernel, stride=1, dilate=1, pad=0): return {'kernel': (kernel, kernel), 'stride': (stride, stride), 'dilate': (dilate, dilate), 'pad': (pad, pad)} # use im2col to compute convolution def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0): batch_size = input_shape[0] channel = input_shape[1] kwargs = build_kwargs(kernel, stride, dilate, pad) data = mx.nd.uniform(shape=input_shape) col = mx.nd.im2col(data, **kwargs) w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel)) c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1)) hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad) wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad) c1 = c1.reshape((batch_size, num_filter, hos, wos)) c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs) assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5) test_conv_compute( input_shape = (5, 3, 30, 20), num_filter = 10, kernel = 3 ) test_conv_compute( input_shape = (5, 3, 30, 20), num_filter = 10, kernel = 3, stride = 2 ) test_conv_compute( input_shape = (5, 3, 30, 20), num_filter = 10, kernel = 3, stride = 2, dilate = 2 ) test_conv_compute( input_shape = (5, 3, 30, 20), num_filter = 10, kernel = 3, stride = 2, dilate = 2, pad = 1 ) # use composite of im2col and col2im to reconstruct image def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0): batch_size = input_shape[0] channel = input_shape[1] kwargs = build_kwargs(kernel, stride, dilate, pad) data = mx.nd.uniform(shape=input_shape) col = mx.nd.im2col(data, **kwargs) im1 = mx.nd.col2im(col, input_shape[2:], **kwargs) im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5) test_reconstruct( input_shape = (5, 3, 30, 20), kernel = 3 ) test_reconstruct( input_shape = (5, 3, 30, 20), kernel = 3, stride = 2 ) test_reconstruct( input_shape = (5, 3, 30, 20), kernel = 3, stride = 2, dilate = 2 ) test_reconstruct( input_shape = (5, 3, 30, 20), kernel = 3, stride = 2, dilate = 2, pad = 1 ) # test gradient # the grad of im2col is col2im, and vice versa def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0): # im2col data = mx.sym.Variable('data') kwargs = build_kwargs(kernel, stride, dilate, pad) sym = mx.sym.im2col(data, **kwargs) im = mx.nd.uniform(shape=input_shape) col = mx.nd.im2col(im, **kwargs) col_shape = col.shape expected = mx.nd.col2im(col, input_shape[2:], **kwargs) check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()]) # col2im data = mx.sym.Variable('data') sym = mx.sym.col2im(data, input_shape[2:], **kwargs) col = mx.nd.uniform(shape=col_shape) im = mx.nd.col2im(col, input_shape[2:], **kwargs) expected = mx.nd.im2col(im, **kwargs) check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()]) test_grad( input_shape = (5, 3, 30, 20), kernel = 3 ) test_grad( input_shape = (5, 3, 30, 20), kernel = 3, stride = 2 ) test_grad( input_shape = (5, 3, 30, 20), kernel = 3, stride = 2, dilate = 2 ) test_grad( input_shape = (5, 3, 30, 20), kernel = 3, stride = 2, dilate = 2, pad = 1 ) def test_elemwise_sum_for_gradient_accumulation(): for nrepeat in range(1, 10): stored_grad = dict() for grad_req in ['write', 'add']: a = mx.nd.array([1]) b = mx.nd.array([2]) if grad_req == 'write': a.attach_grad(grad_req='write') elif grad_req == 'add': a.attach_grad(grad_req='add') a.grad[:] = 0 with mx.autograd.record(): for _ in range(nrepeat): b = b * a b.backward() stored_grad[grad_req] = a.grad.asscalar() assert stored_grad['write'] == stored_grad['add'] assert stored_grad['write'] == 2 * nrepeat def test_elementwise_ops_on_misaligned_input(): a = mx.nd.array([1,2,3,4], dtype='float16') b = mx.nd.array([1,2,3,4], dtype='float16') c = a[1:3] d = b[1:3] # Note: testing just elemwise_add since all elemwise_ops # share the implementation mx.nd.elemwise_add(c, d, out=c) mx.nd.waitall() a = mx.nd.array([1,2,3,4], dtype='float16') b = mx.nd.array([1,2,3,4], dtype='float16') c = a[0:3] d = b[0:3] mx.nd.elemwise_add(c, d, out=c) mx.nd.waitall() assert a[3].asscalar() == 4.0 @pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64']) @pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10]) @pytest.mark.parametrize('both_ways', [False, True]) def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways): shape = list(rand_shape_2d()) + [lead_dim] small_shape = [shape[0], 1, lead_dim] if both_ways: # Broadcast in both ways [1, K, L] x [M, 1, L] big_shape = [1, shape[1], lead_dim] else: big_shape = shape size = np.product(shape) small_size = np.product(small_shape) big_size = np.product(big_shape) a = mx.nd.arange(5000) b = mx.nd.arange(5000) e = mx.nd.arange(5000) c = a[1:big_size + 1].reshape(big_shape) d = b[1:small_size + 1].reshape(small_shape) f = e[1:size + 1].reshape(shape) mx.nd.broadcast_add(c, d, out=f) expected = c.asnumpy() + d.asnumpy() mx.nd.waitall() assert_almost_equal(f, expected) @pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64']) @pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10]) @pytest.mark.parametrize('both_ways', [False, True]) def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways): shape = list(rand_shape_2d()) + [lead_dim] small_shape = [shape[0], shape[1], 1] if both_ways: # Broadcast in both ways [1, K, L] x [M, 1, 1] big_shape = [1, shape[1], lead_dim] else: big_shape = shape size = np.product(shape) small_size = np.product(small_shape) big_size = np.product(big_shape) a = mx.nd.arange(5000) b = mx.nd.arange(5000) e = mx.nd.arange(5000) c = a[1:big_size + 1].reshape(big_shape) d = b[1:small_size + 1].reshape(small_shape) f = e[1:size + 1].reshape(shape) mx.nd.broadcast_add(c, d, out=f) expected = c.asnumpy() + d.asnumpy() mx.nd.waitall() assert_almost_equal(f, expected)<|fim▁end|>
# check_dropout_ratio(0.25, shape, cudnn_off=False) check_passthrough(0.5, shape) check_passthrough(0.0, shape)
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from setuptools import setup, find_packages import os <|fim▁hole|> def reqs(*f): return list(filter(None, [strip_comments(l) for l in open( os.path.join(os.getcwd(), *f)).readlines()])) def get_version(version_tuple): if not isinstance(version_tuple[-1], int): return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] return '.'.join(map(str, version_tuple)) init = os.path.join(os.path.dirname(__file__), 'src', 'gmaps', '__init__.py') version_line = list(filter(lambda l: l.startswith('VERSION'), open(init)))[0] VERSION = get_version(eval(version_line.split('=')[-1])) INSTALL_REQUIRES = reqs('requirements.txt') README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read() PACKAGES = find_packages('src') PACKAGE_DIR = {'': 'src'} setup( name='python-gmaps', version=VERSION, author='Michał Jaworski', author_email='[email protected]', description='Google Maps API client', long_description=README, packages=PACKAGES, package_dir=PACKAGE_DIR, url='https://github.com/swistakm/python-gmaps', include_package_data=True, install_requires=INSTALL_REQUIRES, zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], )<|fim▁end|>
def strip_comments(l): return l.split('#', 1)[0].strip()
<|file_name|>ODLv21Listener.py<|end_file_name|><|fim▁begin|># Generated from java-escape by ANTLR 4.5 from antlr4 import * # This class defines a complete listener for a parse tree produced by ODLv21Parser. class ODLv21Listener(ParseTreeListener): # Enter a parse tree produced by ODLv21Parser#label. def enterLabel(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#label. def exitLabel(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#statement. def enterStatement(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#statement. def exitStatement(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#assignment_stmt.<|fim▁hole|> pass # Exit a parse tree produced by ODLv21Parser#assignment_stmt. def exitAssignment_stmt(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#pointer_stmt. def enterPointer_stmt(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#pointer_stmt. def exitPointer_stmt(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#object_stmt. def enterObject_stmt(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#object_stmt. def exitObject_stmt(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#group_stmt. def enterGroup_stmt(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#group_stmt. def exitGroup_stmt(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#value. def enterValue(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#value. def exitValue(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#date_time_value. def enterDate_time_value(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#date_time_value. def exitDate_time_value(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#sequence_value. def enterSequence_value(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#sequence_value. def exitSequence_value(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#sequence_1D. def enterSequence_1D(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#sequence_1D. def exitSequence_1D(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#sequence_2D. def enterSequence_2D(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#sequence_2D. def exitSequence_2D(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#set_value. def enterSet_value(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#set_value. def exitSet_value(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#ScalarInteger. def enterScalarInteger(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#ScalarInteger. def exitScalarInteger(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#ScalarBasedInteger. def enterScalarBasedInteger(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#ScalarBasedInteger. def exitScalarBasedInteger(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#ScalarFloat. def enterScalarFloat(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#ScalarFloat. def exitScalarFloat(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#ScalarScaledReal. def enterScalarScaledReal(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#ScalarScaledReal. def exitScalarScaledReal(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#ScalarIdentifier. def enterScalarIdentifier(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#ScalarIdentifier. def exitScalarIdentifier(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#ScalarSymbol. def enterScalarSymbol(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#ScalarSymbol. def exitScalarSymbol(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#ScalarString. def enterScalarString(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#ScalarString. def exitScalarString(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#units_expression. def enterUnits_expression(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#units_expression. def exitUnits_expression(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#units_factor. def enterUnits_factor(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#units_factor. def exitUnits_factor(self, ctx): pass # Enter a parse tree produced by ODLv21Parser#namespace_identifier. def enterNamespace_identifier(self, ctx): pass # Exit a parse tree produced by ODLv21Parser#namespace_identifier. def exitNamespace_identifier(self, ctx): pass<|fim▁end|>
def enterAssignment_stmt(self, ctx):
<|file_name|>alerter.py<|end_file_name|><|fim▁begin|>from amon.apps.alerts.checkers.system import system_alerts from amon.apps.alerts.checkers.process import process_alerts from amon.apps.alerts.checkers.plugin import plugin_alerts from amon.apps.alerts.checkers.healthcheck import healthcheck_alert_checker from amon.apps.alerts.models import alerts_model from amon.apps.plugins.models import plugin_model from amon.apps.processes.models import process_model from amon.utils.dates import unix_utc_now class Alerter(object): def check_tags(self, server=None, rule=None): valid_rule = True server_tags = server.get('tags', []) server_tags = [str(t) for t in server_tags] tags = rule.get('tags', []) tags = [str(t) for t in tags] # Check tags first if len(server_tags) > 0 and len(tags) > 0: valid_rule = set(tags).issubset(server_tags) return valid_rule class ServerAlerter(Alerter): def check(self, data, server): alerts = False account_id = server.get('account_id', None) # System alerts rules = alerts_model.get_alerts(type='system', server=server) if rules: alerts = system_alerts.check(data=data, rules=rules, server=server) if alerts: alerts_model.save_system_occurence(alerts, server_id=server['_id']) # Global rules global_rules = alerts_model.get_global_alerts(account_id=account_id) if global_rules: alerts = system_alerts.check(data=data, rules=global_rules, server=server) if alerts: alerts_model.save_system_occurence(alerts, server_id=server['_id']) return alerts # For the test suite class ProcessAlerter(Alerter): def check_rule_and_save(self, process_data_dict=None, rule=None, process_id=None, server_id=None): process_data = next((item for item in process_data_dict if item["p"] == process_id), None) if process_data: alert = process_alerts.check(process_data, rule) if alert: alerts_model.save_occurence(alert, server_id=server_id) def check(self, data, server): process_data_dict = data.get('data', None) rules = alerts_model.get_alerts(type='process', server=server)<|fim▁hole|> self.check_rule_and_save(process_id=process_id, rule=rule, process_data_dict=process_data_dict, server_id=server['_id']) # Global alerts rules = alerts_model.get_alerts(type='process_global') if len(rules) + len(process_data_dict) > 0: all_processes = process_model.get_all_for_server(server['_id']) for rule in rules: valid_rule = self.check_tags(server=server, rule=rule) if valid_rule: process_name = rule.get('process') process_id = None # Check if this server has a process with this name for p in all_processes.clone(): if p.get('name') == process_name: process_id = p.get('_id') if process_id: self.check_rule_and_save(process_id=process_id, rule=rule, process_data_dict=process_data_dict, server_id=server['_id']) class PluginAlerter(Alerter): def check(self, data=None, plugin=None, server=None): plugin_data = data.get('gauges', None) rules = alerts_model.get_alerts_for_plugin(plugin=plugin) if len(rules) > 0: for rule in rules: alert = plugin_alerts.check(data=plugin_data, rule=rule) if alert: alerts_model.save_occurence(alert) # Global alerts rules = alerts_model.get_alerts(type='plugin_global') if len(rules) > 0: all_plugins = plugin_model.get_for_server(server_id=server['_id']) for rule in rules: valid_rule = self.check_tags(server=server, rule=rule) if valid_rule: plugin_name = rule.get('plugin') plugin_id = None # Check if this server has a plugin with this name for p in all_plugins.clone(): if p.get('name') == plugin_name: plugin_id = p.get('_id') if plugin_id: alert = plugin_alerts.check(data=plugin_data, rule=rule) if alert: alerts_model.save_occurence(alert, server_id=server['_id']) class UptimeAlerter(object): def check(self, data, server): process_data_dict = data.get('data', None) rules = alerts_model.get_alerts(type='uptime', server=server) if len(rules) + len(process_data_dict) > 0: for rule in rules: process_id = rule['process'] process_data = next((item for item in process_data_dict if item["p"] == process_id), None) # Process is down if not process_data: alerts_model.save_uptime_occurence(rule, data=process_data) class NotSendingDataAlerter(object): def check(self): time_now = unix_utc_now() alerts = alerts_model.get_alerts_not_sending_data() for alert in alerts: period = alert.get('period') for server in alert.get('server_data'): last_check = server.get('last_check') # Skip all the servers with no agent installed if last_check != None: since_last_check = time_now - last_check # 65 seconds, 60 seconds sleep, 5 seconds to collect if since_last_check > (period + 10): # Trigger alert, add 10 seconds buffer alert['server'] = server alerts_model.save_notsendingdata_occurence(alert=alert) class HealthCheckAlerter(object): def check(self, data=None, server=None): alerts = alerts_model.get_alerts(type='health_check') for alert in alerts: # Data is list for d in data: trigger = healthcheck_alert_checker.check(data=d, rule=alert) # Will scan all the data, check for relevancy and then check the specific entry if trigger: alerts_model.save_healtcheck_occurence(trigger=trigger, server_id=server['_id']) server_alerter = ServerAlerter() process_alerter = ProcessAlerter() uptime_alerter = UptimeAlerter() plugin_alerter = PluginAlerter() health_check_alerter = HealthCheckAlerter() notsendingdata_alerter = NotSendingDataAlerter()<|fim▁end|>
if len(rules) + len(process_data_dict) > 0: for rule in rules: process_id = rule['process']
<|file_name|>MethodNative_string_isEmpty.java<|end_file_name|><|fim▁begin|>package org.bds.lang.nativeMethods.string; import java.io.File; import java.util.ArrayList; import java.util.Collections; import org.bds.lang.Parameters; import org.bds.lang.Type; import org.bds.lang.TypeList; import org.bds.lang.nativeMethods.MethodNative; import org.bds.run.BdsThread;<|fim▁hole|>import org.bds.util.Gpr; public class MethodNative_string_isEmpty extends MethodNative { public MethodNative_string_isEmpty() { super(); } @Override protected void initMethod() { functionName = "isEmpty"; classType = Type.STRING; returnType = Type.BOOL; String argNames[] = { "this" }; Type argTypes[] = { Type.STRING }; parameters = Parameters.get(argTypes, argNames); addNativeMethodToClassScope(); } @Override protected Object runMethodNative(BdsThread csThread, Object objThis) { return objThis.toString().isEmpty(); } }<|fim▁end|>
import org.bds.task.Task;
<|file_name|>sapi51.rs<|end_file_name|><|fim▁begin|>// Copyright © 2017 winapi-rs developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. //! SAPI 5.1 definitions use ctypes::{c_char, c_float, c_long, c_short, c_void}; use shared::guiddef::{CLSID, GUID, IID, REFCLSID, REFGUID, REFIID}; use shared::minwindef::{ BOOL, BYTE, DWORD, FILETIME, HKEY, HMODULE, LPARAM, UINT, ULONG, USHORT, WORD, WPARAM }; use shared::mmreg::WAVEFORMATEX; use shared::windef::HWND; use shared::wtypes::{BSTR, VARIANT_BOOL}; use shared::wtypesbase::{ CLSCTX_INPROC_HANDLER, CLSCTX_INPROC_SERVER, CLSCTX_LOCAL_SERVER, CLSCTX_REMOTE_SERVER }; use um::oaidl::{DISPID_NEWENUM, DISPID_VALUE, IDispatch, IDispatchVtbl, VARIANT}; use um::objidlbase::{IStream, IStreamVtbl, STREAM_SEEK_CUR, STREAM_SEEK_END, STREAM_SEEK_SET}; use um::servprov::{IServiceProvider, IServiceProviderVtbl}; use um::unknwnbase::{IUnknown, IUnknownVtbl}; use um::winnt::{HANDLE, HRESULT, LONG, LONGLONG, LPCWSTR, LPWSTR, ULONGLONG, WCHAR}; ENUM!{enum SPDATAKEYLOCATION { SPDKL_DefaultLocation = 0, SPDKL_CurrentUser = 1, SPDKL_LocalMachine = 2, SPDKL_CurrentConfig = 5, }} pub const SPDUI_EngineProperties: &'static str = "EngineProperties"; pub const SPDUI_AddRemoveWord: &'static str = "AddRemoveWord"; pub const SPDUI_UserTraining: &'static str = "UserTraining"; pub const SPDUI_MicTraining: &'static str = "MicTraining"; pub const SPDUI_RecoProfileProperties: &'static str = "RecoProfileProperties"; pub const SPDUI_AudioProperties: &'static str = "AudioProperties"; pub const SPDUI_AudioVolume: &'static str = "AudioVolume"; pub const SPDUI_UserEnrollment: &'static str = "UserEnrollment"; pub const SPDUI_ShareData: &'static str = "ShareData"; pub const SPDUI_Tutorial: &'static str = "Tutorial"; ENUM!{enum SPSTREAMFORMAT { SPSF_Default = -1i32 as u32, SPSF_NoAssignedFormat = 0, SPSF_Text = 1, SPSF_NonStandardFormat = 2, SPSF_ExtendedAudioFormat = 3, SPSF_8kHz8BitMono = 4, SPSF_8kHz8BitStereo = 5, SPSF_8kHz16BitMono = 6, SPSF_8kHz16BitStereo = 7, SPSF_11kHz8BitMono = 8, SPSF_11kHz8BitStereo = 9, SPSF_11kHz16BitMono = 10, SPSF_11kHz16BitStereo = 11, SPSF_12kHz8BitMono = 12, SPSF_12kHz8BitStereo = 13, SPSF_12kHz16BitMono = 14, SPSF_12kHz16BitStereo = 15, SPSF_16kHz8BitMono = 16, SPSF_16kHz8BitStereo = 17, SPSF_16kHz16BitMono = 18, SPSF_16kHz16BitStereo = 19, SPSF_22kHz8BitMono = 20, SPSF_22kHz8BitStereo = 21, SPSF_22kHz16BitMono = 22, SPSF_22kHz16BitStereo = 23, SPSF_24kHz8BitMono = 24, SPSF_24kHz8BitStereo = 25, SPSF_24kHz16BitMono = 26, SPSF_24kHz16BitStereo = 27, SPSF_32kHz8BitMono = 28, SPSF_32kHz8BitStereo = 29, SPSF_32kHz16BitMono = 30, SPSF_32kHz16BitStereo = 31, SPSF_44kHz8BitMono = 32, SPSF_44kHz8BitStereo = 33, SPSF_44kHz16BitMono = 34, SPSF_44kHz16BitStereo = 35, SPSF_48kHz8BitMono = 36, SPSF_48kHz8BitStereo = 37, SPSF_48kHz16BitMono = 38, SPSF_48kHz16BitStereo = 39, SPSF_TrueSpeech_8kHz1BitMono = 40, SPSF_CCITT_ALaw_8kHzMono = 41, SPSF_CCITT_ALaw_8kHzStereo = 42, SPSF_CCITT_ALaw_11kHzMono = 43, SPSF_CCITT_ALaw_11kHzStereo = 44, SPSF_CCITT_ALaw_22kHzMono = 45, SPSF_CCITT_ALaw_22kHzStereo = 46, SPSF_CCITT_ALaw_44kHzMono = 47, SPSF_CCITT_ALaw_44kHzStereo = 48, SPSF_CCITT_uLaw_8kHzMono = 49, SPSF_CCITT_uLaw_8kHzStereo = 50, SPSF_CCITT_uLaw_11kHzMono = 51, SPSF_CCITT_uLaw_11kHzStereo = 52, SPSF_CCITT_uLaw_22kHzMono = 53, SPSF_CCITT_uLaw_22kHzStereo = 54, SPSF_CCITT_uLaw_44kHzMono = 55, SPSF_CCITT_uLaw_44kHzStereo = 56, SPSF_ADPCM_8kHzMono = 57, SPSF_ADPCM_8kHzStereo = 58, SPSF_ADPCM_11kHzMono = 59, SPSF_ADPCM_11kHzStereo = 60, SPSF_ADPCM_22kHzMono = 61, SPSF_ADPCM_22kHzStereo = 62, SPSF_ADPCM_44kHzMono = 63, SPSF_ADPCM_44kHzStereo = 64, SPSF_GSM610_8kHzMono = 65, SPSF_GSM610_11kHzMono = 66, SPSF_GSM610_22kHzMono = 67, SPSF_GSM610_44kHzMono = 68, SPSF_NUM_FORMATS = 69, }} extern { pub static SPDFID_Text: GUID; pub static SPDFID_WaveFormatEx: GUID; } pub const SPREG_USER_ROOT: &'static str = "HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Speech"; pub const SPREG_LOCAL_MACHINE_ROOT: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech"; pub const SPCAT_AUDIOOUT: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AudioOutput"; pub const SPCAT_AUDIOIN: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AudioInput"; pub const SPCAT_VOICES: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices"; pub const SPCAT_RECOGNIZERS: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Recognizers"; pub const SPCAT_APPLEXICONS: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AppLexicons"; pub const SPCAT_PHONECONVERTERS: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\PhoneConverters"; pub const SPCAT_TEXTNORMALIZERS: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\TextNormalizers"; pub const SPCAT_RECOPROFILES: &'static str = "HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Speech\\RecoProfiles"; pub const SPMMSYS_AUDIO_IN_TOKEN_ID: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AudioInput\\TokenEnums\\MMAudioIn\\"; pub const SPMMSYS_AUDIO_OUT_TOKEN_ID: &'static str = "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AudioOutput\\TokenEnums\\MMAudioOut\\"; pub const SPCURRENT_USER_LEXICON_TOKEN_ID: &'static str = "HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Speech\\CurrentUserLexicon"; pub const SPCURRENT_USER_SHORTCUT_TOKEN_ID: &'static str = "HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Speech\\CurrentUserShortcut"; pub const SPTOKENVALUE_CLSID: &'static str = "CLSID"; pub const SPTOKENKEY_FILES: &'static str = "Files"; pub const SPTOKENKEY_UI: &'static str = "UI"; pub const SPTOKENKEY_ATTRIBUTES: &'static str = "Attributes"; pub const SPVOICECATEGORY_TTSRATE: &'static str = "DefaultTTSRate"; pub const SPPROP_RESOURCE_USAGE: &'static str = "ResourceUsage"; pub const SPPROP_HIGH_CONFIDENCE_THRESHOLD: &'static str = "HighConfidenceThreshold"; pub const SPPROP_NORMAL_CONFIDENCE_THRESHOLD: &'static str = "NormalConfidenceThreshold"; pub const SPPROP_LOW_CONFIDENCE_THRESHOLD: &'static str = "LowConfidenceThreshold"; pub const SPPROP_RESPONSE_SPEED: &'static str = "ResponseSpeed"; pub const SPPROP_COMPLEX_RESPONSE_SPEED: &'static str = "ComplexResponseSpeed"; pub const SPPROP_ADAPTATION_ON: &'static str = "AdaptationOn"; pub const SPPROP_PERSISTED_BACKGROUND_ADAPTATION: &'static str = "PersistedBackgroundAdaptation"; pub const SPPROP_PERSISTED_LANGUAGE_MODEL_ADAPTATION: &'static str = "PersistedLanguageModelAdaptation"; pub const SPPROP_UX_IS_LISTENING: &'static str = "UXIsListening"; pub const SPTOPIC_SPELLING: &'static str = "Spelling"; pub const SPWILDCARD: &'static str = "..."; pub const SPDICTATION: &'static str = "*"; pub const SPINFDICTATION: &'static str = "*+"; pub const SP_LOW_CONFIDENCE: c_char = -1; pub const SP_NORMAL_CONFIDENCE: c_char = 0; pub const SP_HIGH_CONFIDENCE: c_char = 1; pub const DEFAULT_WEIGHT: c_float = 1.0; pub const SP_MAX_WORD_LENGTH: ULONG = 128; pub const SP_MAX_PRON_LENGTH: ULONG = 384; RIDL!(#[uuid(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)] interface ISpNotifyCallback(ISpNotifyCallbackVtbl) { fn NotifyCallback( wParam: WPARAM, lParam: LPARAM, ) -> HRESULT, }); FN!( stdcall SPNOTIFYCALLBACK(wParam: WPARAM, lParam: LPARAM, ) -> ()); RIDL!(#[uuid(0x5eff4aef, 0x8487, 0x11d2, 0x96, 0x1c, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0x28)] interface ISpNotifySource(ISpNotifySourceVtbl): IUnknown(IUnknownVtbl) { fn SetNotifySink( pNotifySink: *mut ISpNotifySink, ) -> HRESULT, fn SetNotifyWindowMessage( hWnd: HWND, Msg: UINT, wParam: WPARAM, lParam: LPARAM, ) -> HRESULT, fn SetNotifyCallbackFunction( pfnCallback: SPNOTIFYCALLBACK, wParam: WPARAM, lParam: LPARAM, ) -> HRESULT, fn SetNotifyCallbackInterface( pSpCallback: *mut ISpNotifyCallback, wParam: WPARAM, lParam: LPARAM, ) -> HRESULT, fn SetNotifyWin32Event() -> HRESULT, fn WaitForNotifyEvent( dwMilliseconds: DWORD, ) -> HRESULT, fn GetNotifyEventHandle() -> HANDLE, }); RIDL!(#[uuid(0x259684dc, 0x37c3, 0x11d2, 0x96, 0x03, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0x28)] interface ISpNotifySink(ISpNotifySinkVtbl): IUnknown(IUnknownVtbl) { fn Notify() -> HRESULT, }); RIDL!(#[uuid(0xaca16614, 0x5d3d, 0x11d2, 0x96, 0x0e, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0x28)] interface ISpNotifyTranslator(ISpNotifyTranslatorVtbl): ISpNotifySink(ISpNotifySinkVtbl) { fn InitWindowMessage( hWnd: HWND, Msg: UINT, wParam: WPARAM, lParam: LPARAM, ) -> HRESULT, fn InitCallback( pfnCallback: SPNOTIFYCALLBACK, wParam: WPARAM, lParam: LPARAM, ) -> HRESULT, fn InitSpNotifyCallback( pSpCallback: *mut ISpNotifyCallback, wParam: WPARAM, lParam: LPARAM, ) -> HRESULT, fn InitWin32Event( hEvent: HANDLE, fCloseHandleOnRelease: BOOL, ) -> HRESULT, fn Wait( dwMilliseconds: DWORD, ) -> HRESULT, fn GetEventHandle() -> HANDLE, }); RIDL!(#[uuid(0x14056581, 0xe16c, 0x11d2, 0xbb, 0x90, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0xc0)] interface ISpDataKey(ISpDataKeyVtbl): IUnknown(IUnknownVtbl) { fn SetData( pszValueName: LPCWSTR, cbData: ULONG, pData: *const BYTE, ) -> HRESULT, fn GetData( pszValueName: LPCWSTR, pcbData: *mut ULONG, pData: *mut BYTE, ) -> HRESULT, fn SetStringValue( pszValueName: LPCWSTR, pszValue: LPCWSTR, ) -> HRESULT, fn GetStringValue( pszValueName: LPCWSTR, ppszValue: *mut LPWSTR, ) -> HRESULT, fn SetDWORD( pszValueName: LPCWSTR, dwValue: DWORD, ) -> HRESULT, fn GetDWORD( pszValueName: LPCWSTR, pdwValue: *mut DWORD, ) -> HRESULT, fn OpenKey( pszSubKeyName: LPCWSTR, ppSubKey: *mut *mut ISpDataKey, ) -> HRESULT, fn CreateKey( pszSubKey: LPCWSTR, ppSubKey: *mut *mut ISpDataKey, ) -> HRESULT, fn DeleteKey( pszSubKey: LPCWSTR, ) -> HRESULT, fn DeleteValue( pszValueName: LPCWSTR, ) -> HRESULT, fn EnumKeys( Index: ULONG, ppszSubKeyName: *mut LPWSTR, ) -> HRESULT, fn EnumValues( Index: ULONG, ppszValueName: *mut LPWSTR, ) -> HRESULT, }); RIDL!(#[uuid(0x92a66e2b, 0xc830, 0x4149, 0x83, 0xdf, 0x6f, 0xc2, 0xba, 0x1e, 0x7a, 0x5b)] interface ISpRegDataKey(ISpRegDataKeyVtbl): ISpDataKey(ISpDataKeyVtbl) { fn SetKey( hkey: HKEY, fReadOnly: BOOL, ) -> HRESULT, }); RIDL!(#[uuid(0x2d3d3845, 0x39af, 0x4850, 0xbb, 0xf9, 0x40, 0xb4, 0x97, 0x80, 0x01, 0x1d)] interface ISpObjectTokenCategory(ISpObjectTokenCategoryVtbl): ISpDataKey(ISpDataKeyVtbl) { fn SetId( pszCategoryId: LPCWSTR, fCreateIfNotExist: BOOL, ) -> HRESULT, fn GetId( ppszCoMemCategoryId: *mut LPWSTR, ) -> HRESULT, fn GetDataKey( spdkl: SPDATAKEYLOCATION, pppDataKey: *mut *mut ISpDataKey, ) -> HRESULT, fn EnumTokens( pzsReqAttribs: LPCWSTR, pszOptAttribs: LPCWSTR, ppEnum: *mut *mut IEnumSpObjectTokens, ) -> HRESULT, fn SetDefaultTokenId( pszTokenId: LPCWSTR, ) -> HRESULT, fn GetDefaultTokenId( ppszCoMemTokenId: *mut LPWSTR, ) -> HRESULT, }); RIDL!(#[uuid(0x14056589, 0xe16c, 0x11d2, 0xbb, 0x90, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0xc0)] interface ISpObjectToken(ISpObjectTokenVtbl): ISpDataKey(ISpDataKeyVtbl) { fn SetId( pszCategoryId: LPCWSTR, pszTokenId: LPCWSTR, fCreateIfNotExist: BOOL, ) -> HRESULT, fn GetId( ppszCoMemTokenId: *mut LPWSTR, ) -> HRESULT, fn GetCategory( ppTokenCategory: *mut *mut ISpObjectTokenCategory, ) -> HRESULT, fn CreateInstance( pUnkOuter: *mut IUnknown, dwClsContext: DWORD, riid: REFIID, ppvObject: *mut *mut c_void, ) -> HRESULT, fn GetStorageFileName( clsidCaller: REFCLSID, pszValueName: LPCWSTR, pszFileNameSpecifier: LPCWSTR, nFolder: ULONG, ppszFilePath: *mut LPWSTR, ) -> HRESULT, fn RemoveStorageFileName( pszKeyName: LPCWSTR, fDeleteFile: BOOL, ) -> HRESULT, fn Remove( pclsidCaller: *const CLSID, ) -> HRESULT, fn IsUISupported( pszTypeOfUI: LPCWSTR, pvExtraData: *mut c_void, cbExtraData: ULONG, punkObject: *mut IUnknown, pfSupported: *mut BOOL, ) -> HRESULT, fn DisplayUI( hwndParent: HWND, pszTitle: LPCWSTR, pszTypeOfUI: LPCWSTR, pvExtraData: *mut c_void, cbExtraData: ULONG, punkObject: *mut IUnknown, ) -> HRESULT, fn MatchesAttributes( pszAttributes: LPCWSTR, pfMatches: *mut BOOL, ) -> HRESULT, }); RIDL!(#[uuid(0xb8aab0cf, 0x346f, 0x49d8, 0x94, 0x99, 0xc8, 0xb0, 0x3f, 0x16, 0x1d, 0x51)] interface ISpObjectTokenInit(ISpObjectTokenInitVtbl): ISpObjectToken(ISpObjectTokenVtbl) { fn InitFromDataKey( pszCategoryId: LPCWSTR, pszTokenId: LPCWSTR, pDataKey: *mut ISpDataKey, ) -> HRESULT, }); RIDL!(#[uuid(0x06b64f9e, 0x7fda, 0x11d2, 0xb4, 0xf2, 0x00, 0xc0, 0x4f, 0x79, 0x73, 0x96)] interface IEnumSpObjectTokens(IEnumSpObjectTokensVtbl): IUnknown(IUnknownVtbl) { fn Next( celt: ULONG, pelt: *mut *mut ISpObjectToken, pceltFetched: *mut ULONG, ) -> HRESULT, fn Skip( celt: ULONG, ) -> HRESULT, fn Reset() -> HRESULT, fn Clone( ppEnum: *mut *mut IEnumSpObjectTokens, ) -> HRESULT, fn Item( Index: ULONG, ppToken: *mut *mut ISpObjectToken, ) -> HRESULT, fn GetCount( pCount: *mut ULONG, ) -> HRESULT, }); RIDL!(#[uuid(0x5b559f40, 0xe952, 0x11d2, 0xbb, 0x91, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0xc0)] interface ISpObjectWithToken(ISpObjectWithTokenVtbl): IUnknown(IUnknownVtbl) { fn SetObjectToken( pToken: *mut ISpObjectToken, ) -> HRESULT, fn GetObjectToken( ppToken: *mut *mut ISpObjectToken, ) -> HRESULT, }); RIDL!(#[uuid(0x93384e18, 0x5014, 0x43d5, 0xad, 0xbb, 0xa7, 0x8e, 0x05, 0x59, 0x26, 0xbd)] interface ISpResourceManager(ISpResourceManagerVtbl): IServiceProvider(IServiceProviderVtbl) { fn SetObject( guidServiceId: REFGUID, pUnkObject: *mut IUnknown, ) -> HRESULT, fn GetObject( guidServiceId: REFGUID, ObjectCLSID: REFCLSID, ObjectIID: REFIID, fReleaseWhenLastExternalRefReleased: BOOL, ppObject: *mut *mut c_void, ) -> HRESULT, }); ENUM!{enum SPEVENTLPARAMTYPE { SPET_LPARAM_IS_UNDEFINED = 0, SPET_LPARAM_IS_TOKEN, SPET_LPARAM_IS_OBJECT, SPET_LPARAM_IS_POINTER, SPET_LPARAM_IS_STRING, }} ENUM!{enum SPEVENTENUM { SPEI_UNDEFINED = 0, SPEI_START_INPUT_STREAM = 1, SPEI_END_INPUT_STREAM = 2, SPEI_VOICE_CHANGE = 3, SPEI_TTS_BOOKMARK = 4, SPEI_WORD_BOUNDARY = 5, SPEI_PHONEME = 6, SPEI_SENTENCE_BOUNDARY = 7, SPEI_VISEME = 8, SPEI_TTS_AUDIO_LEVEL = 9, SPEI_TTS_PRIVATE = 15, SPEI_MIN_TTS = 1, SPEI_MAX_TTS = 15, SPEI_END_SR_STREAM = 34, SPEI_SOUND_START = 35, SPEI_SOUND_END = 36, SPEI_PHRASE_START = 37, SPEI_RECOGNITION = 38, SPEI_HYPOTHESIS = 39, SPEI_SR_BOOKMARK = 40, SPEI_PROPERTY_NUM_CHANGE = 41, SPEI_PROPERTY_STRING_CHANGE = 42, SPEI_FALSE_RECOGNITION = 43, SPEI_INTERFERENCE = 44, SPEI_REQUEST_UI = 45, SPEI_RECO_STATE_CHANGE = 46, SPEI_ADAPTATION = 47, SPEI_START_SR_STREAM = 48, SPEI_RECO_OTHER_CONTEXT = 49, SPEI_SR_AUDIO_LEVEL = 50, SPEI_SR_PRIVATE = 52, SPEI_MIN_SR = 34, SPEI_MAX_SR = 52, SPEI_RESERVED1 = 30, SPEI_RESERVED2 = 33, SPEI_RESERVED3 = 63, }} pub const SPFEI_FLAGCHECK: ULONGLONG = (1 << SPEI_RESERVED1) | (1 << SPEI_RESERVED2); pub const SPFEI_ALL_TTS_EVENTS: ULONGLONG = 0x000000000000FFFE | SPFEI_FLAGCHECK; pub const SPFEI_ALL_SR_EVENTS: ULONGLONG = 0x003FFFFC00000000 | SPFEI_FLAGCHECK; pub const SPFEI_ALL_EVENTS: ULONGLONG = 0xEFFFFFFFFFFFFFFF; #[inline] pub fn SPFEI( SPEI_ord: SPEVENTENUM, ) -> ULONGLONG { (1 << SPEI_ord) | SPFEI_FLAGCHECK } STRUCT!{struct SPEVENT { bitfields: DWORD, ulStreamNum: ULONG, ullAudioStreamOffset: ULONGLONG, wParam: WPARAM, lParam: LPARAM, }} BITFIELD!{SPEVENT bitfields: SPEVENTENUM [ eEventId set_eEventId[0..16], ]} BITFIELD!{SPEVENT bitfields: SPEVENTLPARAMTYPE [ elParamType set_elParamType[16..32], ]} STRUCT!{struct SPSERIALIZEDEVENT { bitfields: DWORD, ulStreamNum: ULONG, ullAudioStreamOffset: ULONGLONG, SerializedwParam: ULONG, SerializedlParam: LONG, }} BITFIELD!{SPSERIALIZEDEVENT bitfields: SPEVENTENUM [ eEventId set_eEventId[0..16], ]} BITFIELD!{SPSERIALIZEDEVENT bitfields: SPEVENTLPARAMTYPE [ elParamType set_elParamType[16..32], ]} STRUCT!{struct SPSERIALIZEDEVENT64 { bitfields: DWORD, ulStreamNum: ULONG, ullAudioStreamOffset: ULONGLONG, SerializedwParam: ULONGLONG, SerializedlParam: LONGLONG, }} BITFIELD!{SPSERIALIZEDEVENT64 bitfields: SPEVENTENUM [ eEventId set_eEventId[0..16], ]} BITFIELD!{SPSERIALIZEDEVENT64 bitfields: SPEVENTLPARAMTYPE [ elParamType set_elParamType[16..32], ]} ENUM!{enum SPINTERFERENCE { SPINTERFERENCE_NONE = 0, SPINTERFERENCE_NOISE, SPINTERFERENCE_NOSIGNAL, SPINTERFERENCE_TOOLOUD, SPINTERFERENCE_TOOQUIET, SPINTERFERENCE_TOOFAST, SPINTERFERENCE_TOOSLOW, SPINTERFERENCE_LATENCY_WARNING, SPINTERFERENCE_LATENCY_TRUNCATE_BEGIN , SPINTERFERENCE_LATENCY_TRUNCATE_END, }} ENUM!{enum SPENDSRSTREAMFLAGS { SPESF_NONE = 0, SPESF_STREAM_RELEASED = 1 << 0, SPESF_EMULATED = 1 << 1, }} ENUM!{enum SPVFEATURE { SPVFEATURE_STRESSED = 1 << 0, SPVFEATURE_EMPHASIS = 1 << 1, }} ENUM!{enum SPVISEMES { SP_VISEME_0 = 0, SP_VISEME_1, SP_VISEME_2, SP_VISEME_3, SP_VISEME_4, SP_VISEME_5, SP_VISEME_6, SP_VISEME_7, SP_VISEME_8, SP_VISEME_9, SP_VISEME_10, SP_VISEME_11, SP_VISEME_12, SP_VISEME_13, SP_VISEME_14, SP_VISEME_15, SP_VISEME_16, SP_VISEME_17, SP_VISEME_18, SP_VISEME_19, SP_VISEME_20, SP_VISEME_21, }} STRUCT!{struct SPEVENTSOURCEINFO { ullEventInterest: ULONGLONG, ullQueuedInterest: ULONGLONG, ulCount: ULONG, }} RIDL!(#[uuid(0xbe7a9cce, 0x5f9e, 0x11d2, 0x96, 0x0f, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0x28)] interface ISpEventSource(ISpEventSourceVtbl): ISpNotifySource(ISpNotifySourceVtbl) { fn SetInterest( ullEventInterest: ULONGLONG, ullQueuedInterest: ULONGLONG, ) -> HRESULT, fn GetEvents( ulCount: ULONG, pEventArray: *mut SPEVENT, pulFetched: *mut ULONG, ) -> HRESULT, fn GetInfo( pInfo: *mut SPEVENTSOURCEINFO, ) -> HRESULT, }); RIDL!(#[uuid(0xbe7a9cc9, 0x5f9e, 0x11d2, 0x96, 0x0f, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0x28)] interface ISpEventSink(ISpEventSinkVtbl): IUnknown(IUnknownVtbl) { fn AddEvents( pEventArray: *const SPEVENT, ulCount: ULONG, ) -> HRESULT, fn GetEventInterest( pullEventInterest: *mut ULONGLONG, ) -> HRESULT, }); RIDL!(#[uuid(0xbed530be, 0x2606, 0x4f4d, 0xa1, 0xc0, 0x54, 0xc5, 0xcd, 0xa5, 0x56, 0x6f)] interface ISpStreamFormat(ISpStreamFormatVtbl): IStream(IStreamVtbl) { fn GetFormat( pguidFormatId: *mut GUID, ppCoMemWaveFormatEx: *mut *mut WAVEFORMATEX, ) -> HRESULT, }); ENUM!{enum SPFILEMODE { SPFM_OPEN_READONLY, SPFM_OPEN_READWRITE, SPFM_CREATE, SPFM_CREATE_ALWAYS, SPFM_NUM_MODES, }} RIDL!(#[uuid(0x12e3cca9, 0x7518, 0x44c5, 0xa5, 0xe7, 0xba, 0x5a, 0x79, 0xcb, 0x92, 0x9e)] interface ISpStream(ISpStreamVtbl): ISpStreamFormat(ISpStreamFormatVtbl) { fn SetBaseStream( pStream: *mut IStream, rguidFormat: REFGUID, pWaveFormatEx: *const WAVEFORMATEX, ) -> HRESULT, fn GetBaseStream( ppStream: *mut *mut IStream, ) -> HRESULT, fn BindToFile( pszFileName: LPCWSTR, eMode: SPFILEMODE, pFormatId: *const GUID, pWaveFormatEx: *const WAVEFORMATEX, ullEventInterest: ULONGLONG, ) -> HRESULT, fn Close() -> HRESULT, }); RIDL!(#[uuid(0x678a932c, 0xea71, 0x4446, 0x9b, 0x41, 0x78, 0xfd, 0xa6, 0x28, 0x0a, 0x29)] interface ISpStreamFormatConverter(ISpStreamFormatConverterVtbl): ISpStreamFormat(ISpStreamFormatVtbl) { fn SetBaseStream( pStream: *mut ISpStreamFormat, fSetFormatToBaseStreamFormat: BOOL, fWriteToBaseStream: BOOL, ) -> HRESULT, fn GetBaseStream( ppStream: *mut *mut ISpStreamFormat, ) -> HRESULT, fn SetFormat( rguidFormatIdOfConvertedStream: REFGUID, pWaveFormatExOfConvertedStream: *const WAVEFORMATEX, ) -> HRESULT, fn ResetSeekPosition() -> HRESULT, fn ScaleConvertedToBaseOffset( ullOffsetConvertedStream: ULONGLONG, pullOffsetBaseStream: *mut ULONGLONG, ) -> HRESULT, fn ScaleBaseToConvertedOffset( ullOffsetBaseStream: ULONGLONG, pullOffsetConvertedStream: *mut ULONGLONG, ) -> HRESULT, }); ENUM!{enum SPAUDIOSTATE { SPAS_CLOSED, SPAS_STOP, SPAS_PAUSE, SPAS_RUN, }} STRUCT!{struct SPAUDIOSTATUS { cbFreeBuffSpace: c_long, cbNonBlockingIO: ULONG, State: SPAUDIOSTATE, CurSeekPos: ULONGLONG, CurDevicePos: ULONGLONG, dwAudioLevel: DWORD, dwReserved2: DWORD, }} STRUCT!{struct SPAUDIOBUFFERINFO { ulMsMinNotification: ULONG, ulMsBufferSize: ULONG, ulMsEventBias: ULONG, }} RIDL!(#[uuid(0xc05c768f, 0xfae8, 0x4ec2, 0x8e, 0x07, 0x33, 0x83, 0x21, 0xc1, 0x24, 0x52)] interface ISpAudio(ISpAudioVtbl): ISpStreamFormat(ISpStreamFormatVtbl) { fn SetState( NewState: SPAUDIOSTATE, ullReserved: ULONGLONG, ) -> HRESULT, fn SetFormat( rguidFmtId: REFGUID, pWaveFormatEx: *const WAVEFORMATEX, ) -> HRESULT, fn GetStatus( pStatus: *mut SPAUDIOSTATUS, ) -> HRESULT, fn SetBufferInfo( pBuffInfo: *const SPAUDIOBUFFERINFO, ) -> HRESULT, fn GetBufferInfo( pBuffInfo: *mut SPAUDIOBUFFERINFO, ) -> HRESULT, fn GetDefaultFormat( pFormatId: *mut GUID, ppCoMemWaveFormatEx: *mut *mut WAVEFORMATEX, ) -> HRESULT, fn EventHandle() -> HANDLE, fn GetVolumeLevel( pLevel: *mut ULONG, ) -> HRESULT, fn SetVolumeLevel( Level: ULONG, ) -> HRESULT, fn GetBufferNotifySize( pcbSize: *mut ULONG, ) -> HRESULT, fn SetBufferNotifySize( cbSize: ULONG, ) -> HRESULT, }); RIDL!(#[uuid(0x15806f6e, 0x1d70, 0x4b48, 0x98, 0xe6, 0x3b, 0x1a, 0x00, 0x75, 0x09, 0xab)] interface ISpMMSysAudio(ISpMMSysAudioVtbl): ISpAudio(ISpAudioVtbl) { fn GetDeviceId( puDeviceId: *mut UINT, ) -> HRESULT, fn SetDeviceId( uDeviceId: UINT, ) -> HRESULT, fn GetMMHandle( pHandle: *mut *mut c_void, ) -> HRESULT, fn GetLineId( puLineId: *mut UINT, ) -> HRESULT, fn SetLineId( uLineId: UINT, ) -> HRESULT, }); RIDL!(#[uuid(0x10f63bce, 0x201a, 0x11d3, 0xac, 0x70, 0x00, 0xc0, 0x4f, 0x8e, 0xe6, 0xc0)] interface ISpTranscript(ISpTranscriptVtbl): IUnknown(IUnknownVtbl) { fn GetTranscript( ppszTranscript: *mut LPWSTR, ) -> HRESULT, fn AppendTranscript( pszTranscript: LPCWSTR, ) -> HRESULT, }); ENUM!{enum SPDISPLAYATTRIBUTES { SPAF_ONE_TRAILING_SPACE = 0x2, SPAF_TWO_TRAILING_SPACES = 0x4, SPAF_CONSUME_LEADING_SPACES = 0x8, SPAF_ALL = 0xf, }} pub type SPPHONEID = WCHAR; pub type PSPPHONEID = LPWSTR; pub type PCSPPHONEID = LPCWSTR; STRUCT!{struct SPPHRASEELEMENT { ulAudioTimeOffset: ULONG, ulAudioSizeTime: ULONG, ulAudioStreamOffset: ULONG, ulAudioSizeBytes: ULONG, ulRetainedStreamOffset: ULONG, ulRetainedSizeBytes: ULONG, pszDisplayText: LPCWSTR, pszLexicalForm: LPCWSTR, pszPronunciation: *const SPPHONEID, bDisplayAttributes: BYTE, RequiredConfidence: c_char, ActualConfidence: c_char, Reserved: BYTE, SREngineConfidence: c_float, }} STRUCT!{struct SPPHRASERULE { pszName: LPCWSTR, ulId: ULONG, ulFirstElement: ULONG, ulCountOfElements: ULONG, pNextSibling: *const SPPHRASERULE, pFirstChild: *const SPPHRASERULE, SREngineConfidence: c_float, Confidence: c_char, }} ENUM!{enum SPPHRASEPROPERTYUNIONTYPE { SPPPUT_UNUSED = 0, SPPPUT_ARRAY_INDEX, }} STRUCT!{struct SPPHRASEPROPERTY { pszName: LPCWSTR, bType: BYTE, bReserved: BYTE, usArrayIndex: u16, pszValue: LPCWSTR, vValue: VARIANT, ulFirstElement: ULONG, ulCountOfElements: ULONG, pNextSibling: *const SPPHRASEPROPERTY, pFirstChild: *const SPPHRASEPROPERTY, SREngineConfidence: c_float, Confidence: c_char, }} UNION!(SPPHRASEPROPERTY, bType, ulId, ulId_mut, ULONG); STRUCT!{struct SPPHRASEREPLACEMENT { bDisplayAttributes: BYTE, pszReplacementText: LPCWSTR, ulFirstElement: ULONG, ulCountOfElements: ULONG, }} STRUCT!{struct SPPHRASE { cbSize: ULONG, LangID: WORD, wHomophoneGroupId: WORD, ullGrammarID: ULONGLONG, ftStartTime: ULONGLONG, ullAudioStreamPosition: ULONGLONG, ulAudioSizeBytes: ULONG, ulRetainedSizeBytes: ULONG, ulAudioSizeTime: ULONG, Rule: SPPHRASERULE, pProperties: *const SPPHRASEPROPERTY, pElements: *const SPPHRASEELEMENT, cReplacements: ULONG, pReplacements: *const SPPHRASEREPLACEMENT, SREngineID: GUID, ulSREnginePrivateDataSize: ULONG, pSREnginePrivateData: *const BYTE, }} STRUCT!{struct SPSERIALIZEDPHRASE { ulSerializedSize: ULONG, }} ENUM!{enum SPVALUETYPE { SPDF_PROPERTY = 0x1, SPDF_REPLACEMENT = 0x2, SPDF_RULE = 0x4, SPDF_DISPLAYTEXT = 0x8, SPDF_LEXICALFORM = 0x10, SPDF_PRONUNCIATION = 0x20, SPDF_AUDIO = 0x40, SPDF_ALTERNATES = 0x80, SPDF_ALL = 0xff, }} STRUCT!{struct SPBINARYGRAMMAR { ulTotalSerializedSize: ULONG, }} ENUM!{enum SPPHRASERNG { SPPR_ALL_ELEMENTS = -1i32 as u32, }} pub const SP_GETWHOLEPHRASE: SPPHRASERNG = SPPR_ALL_ELEMENTS; pub const SPRR_ALL_ELEMENTS: SPPHRASERNG = SPPR_ALL_ELEMENTS; DECLARE_HANDLE!(SPSTATEHANDLE, SPSTATEHANDLE__); ENUM!{enum SPRECOEVENTFLAGS { SPREF_AutoPause = 1 << 0, SPREF_Emulated = 1 << 1, }} ENUM!{enum SPPARTOFSPEECH { SPPS_NotOverriden = -1i32 as u32, SPPS_Unknown = 0, SPPS_Noun = 0x1000, SPPS_Verb = 0x2000, SPPS_Modifier = 0x3000, SPPS_Function = 0x4000, SPPS_Interjection = 0x5000, }} ENUM!{enum SPLEXICONTYPE { eLEXTYPE_USER = 1 << 0, eLEXTYPE_APP = 1 << 1, eLEXTYPE_VENDORLEXICON = 1 << 2, eLEXTYPE_LETTERTOSOUND = 1 << 3, eLEXTYPE_MORPHOLOGY = 1 << 4, eLEXTYPE_RESERVED4 = 1 << 5, eLEXTYPE_USER_SHORTCUT = 1 << 6, eLEXTYPE_RESERVED6 = 1 << 7, eLEXTYPE_RESERVED7 = 1 << 8, eLEXTYPE_RESERVED8 = 1 << 9, eLEXTYPE_RESERVED9 = 1 << 10, eLEXTYPE_RESERVED10 = 1 << 11, eLEXTYPE_PRIVATE1 = 1 << 12, eLEXTYPE_PRIVATE2 = 1 << 13, eLEXTYPE_PRIVATE3 = 1 << 14, eLEXTYPE_PRIVATE4 = 1 << 15, eLEXTYPE_PRIVATE5 = 1 << 16, eLEXTYPE_PRIVATE6 = 1 << 17, eLEXTYPE_PRIVATE7 = 1 << 18, eLEXTYPE_PRIVATE8 = 1 << 19, eLEXTYPE_PRIVATE9 = 1 << 20, eLEXTYPE_PRIVATE10 = 1 << 21, eLEXTYPE_PRIVATE11 = 1 << 22, eLEXTYPE_PRIVATE12 = 1 << 23, eLEXTYPE_PRIVATE13 = 1 << 24, eLEXTYPE_PRIVATE14 = 1 << 25, eLEXTYPE_PRIVATE15 = 1 << 26, eLEXTYPE_PRIVATE16 = 1 << 27, eLEXTYPE_PRIVATE17 = 1 << 28, eLEXTYPE_PRIVATE18 = 1 << 29, eLEXTYPE_PRIVATE19 = 1 << 30, eLEXTYPE_PRIVATE20 = 1 << 31, }} ENUM!{enum SPWORDTYPE { eWORDTYPE_ADDED = 1 << 0, eWORDTYPE_DELETED = 1 << 1, }} STRUCT!{struct SPWORDPRONUNCIATION { pNextWordPronunciation: *mut SPWORDPRONUNCIATION, eLexiconType: SPLEXICONTYPE, LangID: WORD, wPronunciationFlags: WORD, ePartOfSpeech: SPPARTOFSPEECH, szPronunciation: [SPPHONEID; 1], }} STRUCT!{struct SPWORDPRONUNCIATIONLIST { ulSize: ULONG, pvBuffer: *mut BYTE, pFirstWordPronunciation: *mut SPWORDPRONUNCIATION, }} STRUCT!{struct SPWORD { pNextWord: *mut SPWORD, LangID: WORD, wReserved: WORD, eWordType: SPWORDTYPE, pszWord: LPWSTR, pFirstWordPronunciation: *mut SPWORDPRONUNCIATION, }} STRUCT!{struct SPWORDLIST { ulSize: ULONG, pvBuffer: *mut BYTE, pFirstWord: *mut SPWORD, }} RIDL!(#[uuid(0xda41a7c2, 0x5383, 0x4db2, 0x91, 0x6b, 0x6c, 0x17, 0x19, 0xe3, 0xdb, 0x58)] interface ISpLexicon(ISpLexiconVtbl): IUnknown(IUnknownVtbl) { fn GetPronunciations( pszWord: LPCWSTR,<|fim▁hole|> dwFlags: DWORD, pWordPronunciationList: *mut SPWORDPRONUNCIATIONLIST, ) -> HRESULT, fn AddPronunciation( pszWord: LPCWSTR, LangID: WORD, ePartOfSpeech: SPPARTOFSPEECH, pszPronunciation: PCSPPHONEID, ) -> HRESULT, fn RemovePronunciation( pszWord: LPCWSTR, LangID: WORD, ePartOfSpeech: SPPARTOFSPEECH, pszPronunciation: PCSPPHONEID, ) -> HRESULT, fn GetGeneration( pdwGeneration: *mut DWORD, ) -> HRESULT, fn GetGenerationChange( dwFlags: DWORD, pdwGeneration: *mut DWORD, pWordList: *mut SPWORDLIST, ) -> HRESULT, fn GetWords( dwFlags: DWORD, pdwGeneration: *mut DWORD, pdwCookie: *mut DWORD, pWordList: *mut SPWORDLIST, ) -> HRESULT, }); RIDL!(#[uuid(0x8565572f, 0xc094, 0x41cc, 0xb5, 0x6e, 0x10, 0xbd, 0x9c, 0x3f, 0xf0, 0x44)] interface ISpContainerLexicon(ISpContainerLexiconVtbl): ISpLexicon(ISpLexiconVtbl) { fn AddLexicon( pAddLexicon: *mut ISpLexicon, dwFlags: DWORD, ) -> HRESULT, }); RIDL!(#[uuid(0x8445c581, 0x0cac, 0x4a38, 0xab, 0xfe, 0x9b, 0x2c, 0xe2, 0x82, 0x64, 0x55)] interface ISpPhoneConverter(ISpPhoneConverterVtbl): ISpObjectWithToken(ISpObjectWithTokenVtbl) { fn PhoneToId( pszPhone: LPCWSTR, pId: *mut SPPHONEID, ) -> HRESULT, fn IdToPhone( pId: PCSPPHONEID, pszPhone: *mut WCHAR, ) -> HRESULT, }); STRUCT!{struct SPVPITCH { MiddleAdj: c_long, RangeAdj: c_long, }} ENUM!{enum SPVACTIONS { SPVA_Speak = 0, SPVA_Silence, SPVA_Pronounce, SPVA_Bookmark, SPVA_SpellOut, SPVA_Section, SPVA_ParseUnknownTag, }} STRUCT!{struct SPVCONTEXT { pCategory: LPCWSTR, pBefore: LPCWSTR, pAfter: LPCWSTR, }} STRUCT!{struct SPVSTATE { eAction: SPVACTIONS, LangID: WORD, wReserved: WORD, EmphAdj: c_long, RateAdj: c_long, Volume: ULONG, PitchAdj: SPVPITCH, SilenceMSecs: ULONG, pPhoneIds: *mut SPPHONEID, ePartOfSpeech: SPPARTOFSPEECH, Context: SPVCONTEXT, }} ENUM!{enum SPRUNSTATE { SPRS_DONE = 1 << 0, SPRS_IS_SPEAKING = 1 << 1, }} ENUM!{enum SPVLIMITS { SPMIN_VOLUME = 0, SPMAX_VOLUME = 100, SPMIN_RATE = -10i32 as u32, SPMAX_RATE = 10, }} ENUM!{enum SPVPRIORITY { SPVPRI_NORMAL = 0, SPVPRI_ALERT = 1 << 0, SPVPRI_OVER = 1 << 1, }} STRUCT!{struct SPVOICESTATUS { ulCurrentStream: ULONG, ulLastStreamQueued: ULONG, hrLastResult: HRESULT, dwRunningState: DWORD, ulInputWordPos: ULONG, ulInputWordLen: ULONG, ulInputSentPos: ULONG, ulInputSentLen: ULONG, lBookmarkId: LONG, PhonemeId: SPPHONEID, VisemeId: SPVISEMES, dwReserved1: DWORD, dwReserved2: DWORD, }} ENUM!{enum SPEAKFLAGS { SPF_DEFAULT = 0, SPF_ASYNC = 1 << 0, SPF_PURGEBEFORESPEAK = 1 << 1, SPF_IS_FILENAME = 1 << 2, SPF_IS_XML = 1 << 3, SPF_IS_NOT_XML = 1 << 4, SPF_PERSIST_XML = 1 << 5, SPF_NLP_SPEAK_PUNC = 1 << 6, SPF_NLP_MASK = SPF_NLP_SPEAK_PUNC, SPF_VOICE_MASK = SPF_ASYNC | SPF_PURGEBEFORESPEAK | SPF_IS_FILENAME | SPF_IS_XML | SPF_IS_NOT_XML | SPF_NLP_MASK | SPF_PERSIST_XML, SPF_UNUSED_FLAGS = !SPF_VOICE_MASK, }} RIDL!(#[uuid(0x6c44df74, 0x72b9, 0x4992, 0xa1, 0xec, 0xef, 0x99, 0x6e, 0x04, 0x22, 0xd4)] interface ISpVoice(ISpVoiceVtbl): ISpEventSource(ISpEventSourceVtbl) { fn SetOutput( pUnkOutput: *mut IUnknown, fAllowFormatChanges: BOOL, ) -> HRESULT, fn GetOutputObjectToken( ppObjectToken: *mut *mut ISpObjectToken, ) -> HRESULT, fn GetOutputStream( ppStream: *mut *mut ISpStreamFormat, ) -> HRESULT, fn Pause() -> HRESULT, fn Resume() -> HRESULT, fn SetVoice( pToken: *mut ISpObjectToken, ) -> HRESULT, fn GetVoice( ppToken: *mut *mut ISpObjectToken, ) -> HRESULT, fn Speak( pwcs: LPCWSTR, dwFlags: DWORD, pulStreamNumber: *mut ULONG, ) -> HRESULT, fn SpeakStream( pStream: *mut IStream, dwFlags: DWORD, pulStreamNumber: *mut ULONG, ) -> HRESULT, fn GetStatus( pStatus: *mut SPVOICESTATUS, ppszLastBookmark: *mut LPWSTR, ) -> HRESULT, fn Skip( pItemType: LPCWSTR, lNumItems: c_long, pulNumSkipped: *mut ULONG, ) -> HRESULT, fn SetPriority( ePriority: SPVPRIORITY, ) -> HRESULT, fn GetPriority( pePriority: *mut SPVPRIORITY, ) -> HRESULT, fn SetAlertBoundary( eBoundary: SPEVENTENUM, ) -> HRESULT, fn GetAlertBoundary( peBoundary: *mut SPEVENTENUM, ) -> HRESULT, fn SetRate( RateAdjust: c_long, ) -> HRESULT, fn GetRate( pRateAdjust: *mut c_long, ) -> HRESULT, fn SetVolume( usVolume: USHORT, ) -> HRESULT, fn GetVolume( pusVolume: *mut USHORT, ) -> HRESULT, fn WaitUntilDone( msTimeout: ULONG, ) -> HRESULT, fn SetSyncSpeakTimeout( msTimeout: ULONG, ) -> HRESULT, fn GetSyncSpeakTimeout( pmsTimeout: *mut ULONG, ) -> HRESULT, fn SpeakCompleteEvent() -> HANDLE, fn IsUISupported( pszTypeOfUI: LPCWSTR, pvExtraData: *mut c_void, cbExtraData: ULONG, pfSupported: *mut BOOL, ) -> HRESULT, fn DisplayUI( hwndParent: HWND, pszTitle: LPCWSTR, pszTypeOfUI: LPCWSTR, pvExtraData: *mut c_void, cbExtraData: ULONG, ) -> HRESULT, }); RIDL!(#[uuid(0x1a5c0354, 0xb621, 0x4b5a, 0x87, 0x91, 0xd3, 0x06, 0xed, 0x37, 0x9e, 0x53)] interface ISpPhrase(ISpPhraseVtbl): IUnknown(IUnknownVtbl) { fn GetPhrase( ppCoMemPhrase: *mut *mut SPPHRASE, ) -> HRESULT, fn GetSerializedPhrase( ppCoMemPhrase: *mut *mut SPSERIALIZEDPHRASE, ) -> HRESULT, fn GetText( ulStart: ULONG, ulCount: ULONG, fUseTextReplacements: BOOL, ppszCoMemText: *mut LPWSTR, pbDisplayAttributes: *mut BYTE, ) -> HRESULT, fn Discard( dwValueTypes: DWORD, ) -> HRESULT, }); RIDL!(#[uuid(0x8fcebc98, 0x4e49, 0x4067, 0x9c, 0x6c, 0xd8, 0x6a, 0x0e, 0x09, 0x2e, 0x3d)] interface ISpPhraseAlt(ISpPhraseAltVtbl): ISpPhrase(ISpPhraseVtbl) { fn GetAltInfo( pParent: *mut *mut ISpPhrase, pulStartElementInParent: *mut ULONG, pcElementsInParent: *mut ULONG, pcElementsInAlt: *mut ULONG, ) -> HRESULT, fn Commit() -> HRESULT, }); STRUCT!{struct SPRECORESULTTIMES { ftStreamTime: FILETIME, ullLength: ULONGLONG, dwTickCount: DWORD, ullStart: ULONGLONG, }} STRUCT!{struct SPSERIALIZEDRESULT { ulSerializedSize: ULONG, }} RIDL!(#[uuid(0x20b053be, 0xe235, 0x43cd, 0x9a, 0x2a, 0x8d, 0x17, 0xa4, 0x8b, 0x78, 0x42)] interface ISpRecoResult(ISpRecoResultVtbl): ISpPhrase(ISpPhraseVtbl) { fn GetResultTimes( pTimes: *mut SPRECORESULTTIMES, ) -> HRESULT, fn GetAlternates( ulStartElement: ULONG, cElements: ULONG, ulRequestCount: ULONG, ppPhrases: *mut *mut ISpPhraseAlt, pcPhrasesReturned: *mut ULONG, ) -> HRESULT, fn GetAudio( ulStartElement: ULONG, cElements: ULONG, ppStream: *mut *mut ISpStreamFormat, ) -> HRESULT, fn SpeakAudio( ulStartElement: ULONG, cElements: ULONG, dwFlags: DWORD, pulStreamNumber: *mut ULONG, ) -> HRESULT, fn Serialize( ppCoMemSerializedResult: *mut *mut SPSERIALIZEDRESULT, ) -> HRESULT, fn ScaleAudio( pAudioFormatId: *const GUID, pWaveFormatEx: *const WAVEFORMATEX, ) -> HRESULT, fn GetRecoContext( ppRecoContext: *mut *mut ISpRecoContext, ) -> HRESULT, }); STRUCT!{struct SPTEXTSELECTIONINFO { ulStartActiveOffset: ULONG, cchActiveChars: ULONG, ulStartSelection: ULONG, cchSelection: ULONG, }} ENUM!{enum SPWORDPRONOUNCEABLE { SPWP_UNKNOWN_WORD_UNPRONOUNCEABLE = 0, SPWP_UNKNOWN_WORD_PRONOUNCEABLE = 1, SPWP_KNOWN_WORD_PRONOUNCEABLE = 2, }} ENUM!{enum SPGRAMMARSTATE { SPGS_DISABLED = 0, SPGS_ENABLED = 1, SPGS_EXCLUSIVE = 3, }} ENUM!{enum SPCONTEXTSTATE { SPCS_DISABLED = 0, SPCS_ENABLED = 1, }} ENUM!{enum SPRULESTATE { SPRS_INACTIVE = 0, SPRS_ACTIVE = 1, SPRS_ACTIVE_WITH_AUTO_PAUSE = 3, }} pub const SP_STREAMPOS_ASAP: ULONGLONG = 0; pub const SP_STREAMPOS_REALTIME: ULONGLONG = -1i64 as u64; pub const SPRULETRANS_TEXTBUFFER: SPSTATEHANDLE = -1isize as SPSTATEHANDLE; pub const SPRULETRANS_WILDCARD: SPSTATEHANDLE = -2isize as SPSTATEHANDLE; pub const SPRULETRANS_DICTATION: SPSTATEHANDLE = -3isize as SPSTATEHANDLE; ENUM!{enum SPGRAMMARWORDTYPE { SPWT_DISPLAY, SPWT_LEXICAL, SPWT_PRONUNCIATION, SPWT_LEXICAL_NO_SPECIAL_CHARS, }} STRUCT!{struct SPPROPERTYINFO { pszName: LPCWSTR, ulId: ULONG, pszValue: LPCWSTR, vValue: VARIANT, }} ENUM!{enum SPCFGRULEATTRIBUTES { SPRAF_TopLevel = 1 << 0, SPRAF_Active = 1 << 1, SPRAF_Export = 1 << 2, SPRAF_Import = 1 << 3, SPRAF_Interpreter = 1 << 4, SPRAF_Dynamic = 1 << 5, SPRAF_AutoPause = 1 << 16, }} RIDL!(#[uuid(0x8137828f, 0x591a, 0x4a42, 0xbe, 0x58, 0x49, 0xea, 0x7e, 0xba, 0xac, 0x68)] interface ISpGrammarBuilder(ISpGrammarBuilderVtbl): IUnknown(IUnknownVtbl) { fn ResetGrammar( NewLanguage: WORD, ) -> HRESULT, fn GetRule( pszRuleName: LPCWSTR, dwRuleId: DWORD, dwAttributes: DWORD, fCreateIfNotExist: BOOL, phInitialState: *mut SPSTATEHANDLE, ) -> HRESULT, fn ClearRule( hState: SPSTATEHANDLE, ) -> HRESULT, fn CreateNewState( hState: SPSTATEHANDLE, phState: *mut SPSTATEHANDLE, ) -> HRESULT, fn AddWordTransition( hFromState: SPSTATEHANDLE, hToState: SPSTATEHANDLE, psz: LPCWSTR, pszSeparators: LPCWSTR, eWordType: SPGRAMMARWORDTYPE, Weight: c_float, pPropInfo: *const SPPROPERTYINFO, ) -> HRESULT, fn AddRuleTransition( hFromState: SPSTATEHANDLE, hToState: SPSTATEHANDLE, hRule: SPSTATEHANDLE, Weight: c_float, pPropInfo: *const SPPROPERTYINFO, ) -> HRESULT, fn AddResource( hRuleState: SPSTATEHANDLE, pszResourceName: LPCWSTR, pszResourceValue: LPCWSTR, ) -> HRESULT, fn Commit( dwReserved: DWORD, ) -> HRESULT, }); ENUM!{enum SPLOADOPTIONS { SPLO_STATIC = 0, SPLO_DYNAMIC = 1, }} RIDL!(#[uuid(0x2177db29, 0x7f45, 0x47d0, 0x85, 0x54, 0x06, 0x7e, 0x91, 0xc8, 0x05, 0x02)] interface ISpRecoGrammar(ISpRecoGrammarVtbl): ISpGrammarBuilder(ISpGrammarBuilderVtbl) { fn GetGrammarId( pullGrammarId: *mut ULONGLONG, ) -> HRESULT, fn GetRecoContext( ppRecoCtxt: *mut *mut ISpRecoContext, ) -> HRESULT, fn LoadCmdFromFile( pszFileName: LPCWSTR, Options: SPLOADOPTIONS, ) -> HRESULT, fn LoadCmdFromObject( rcid: REFCLSID, pszGrammarName: LPCWSTR, Options: SPLOADOPTIONS, ) -> HRESULT, fn LoadCmdFromResource( hModule: HMODULE, pszResourceName: LPCWSTR, pszResourceType: LPCWSTR, wLanguage: WORD, Options: SPLOADOPTIONS, ) -> HRESULT, fn LoadCmdFromMemory( pGrammar: *const SPBINARYGRAMMAR, Options: SPLOADOPTIONS, ) -> HRESULT, fn LoadCmdFromProprietaryGrammar( rguidParam: REFGUID, pszStringParam: LPCWSTR, pvDataPrarm: *const c_void, cbDataSize: ULONG, Options: SPLOADOPTIONS, ) -> HRESULT, fn SetRuleState( pszName: LPCWSTR, pReserved: *mut c_void, NewState: SPRULESTATE, ) -> HRESULT, fn SetRuleIdState( ulRuleId: ULONG, NewState: SPRULESTATE, ) -> HRESULT, fn LoadDictation( pszTopicName: LPCWSTR, Options: SPLOADOPTIONS, ) -> HRESULT, fn UnloadDictation() -> HRESULT, fn SetDictationState( NewState: SPRULESTATE, ) -> HRESULT, fn SetWordSequenceData( pText: *const WCHAR, cchText: ULONG, pInfo: *const SPTEXTSELECTIONINFO, ) -> HRESULT, fn SetTextSelection( pInfo: *const SPTEXTSELECTIONINFO, ) -> HRESULT, fn IsPronounceable( pszWord: LPCWSTR, pWordPronounceable: *mut SPWORDPRONOUNCEABLE, ) -> HRESULT, fn SetGrammarState( eGrammarState: SPGRAMMARSTATE, ) -> HRESULT, fn SaveCmd( pStream: *mut IStream, ppszCoMemErrorText: *mut LPWSTR, ) -> HRESULT, fn GetGrammarState( peGrammarState: *mut SPGRAMMARSTATE, ) -> HRESULT, }); STRUCT!{struct SPRECOCONTEXTSTATUS { eInterference: SPINTERFERENCE, szRequestTypeOfUI: [WCHAR; 255], dwReserved1: DWORD, dwReserved2: DWORD, }} ENUM!{enum SPBOOKMARKOPTIONS { SPBO_NONE = 0, SPBO_PAUSE = 1 << 0, }} ENUM!{enum SPAUDIOOPTIONS { SPAO_NONE = 0, SPAO_RETAIN_AUDIO = 1 << 0, }} RIDL!(#[uuid(0xf740a62f, 0x7c15, 0x489e, 0x82, 0x34, 0x94, 0x0a, 0x33, 0xd9, 0x27, 0x2d)] interface ISpRecoContext(ISpRecoContextVtbl): ISpEventSource(ISpEventSourceVtbl) { fn GetRecognizer( ppRecognizer: *mut *mut ISpRecognizer, ) -> HRESULT, fn CreateGrammer( ullGrammarId: ULONGLONG, ppGrammar: *mut *mut ISpRecoGrammar, ) -> HRESULT, fn GetStatus( pState: *mut SPRECOCONTEXTSTATUS, ) -> HRESULT, fn GetMaxAlternates( pcAlternates: *mut ULONG, ) -> HRESULT, fn SetMaxAlternates( cAlternates: ULONG, ) -> HRESULT, fn SetAudioOptions( Options: SPAUDIOOPTIONS, pAudioFormatId: *const GUID, pWaveFormatEx: *const WAVEFORMATEX, ) -> HRESULT, fn GetAudioOptions( pOptions: *mut SPAUDIOOPTIONS, pAudioFormatId: *mut GUID, ppCoMemWFEX: *mut *mut WAVEFORMATEX, ) -> HRESULT, fn DeserializeResult( pSerializedResult: *const SPSERIALIZEDRESULT, ppResult: *mut *mut ISpRecoResult, ) -> HRESULT, fn Bookmark( Options: SPBOOKMARKOPTIONS, ullStreamPosition: ULONGLONG, lparamEvent: LPARAM, ) -> HRESULT, fn SetAdaptionData( pAdaptionData: LPCWSTR, cch: ULONG, ) -> HRESULT, fn Pause( dwReserved: DWORD, ) -> HRESULT, fn Resume( dwReserved: DWORD, ) -> HRESULT, fn SetVoice( pVoice: *mut ISpVoice, fAllowFormatChanges: BOOL, ) -> HRESULT, fn GetVoice( ppVoice: *mut *mut ISpVoice, ) -> HRESULT, fn SetVoicePurgeEvent( ullEventIntereset: ULONGLONG, ) -> HRESULT, fn GetVoicePurgeEvent( pullEventIntereset: *mut ULONGLONG, ) -> HRESULT, fn SetContextState( eContextState: SPCONTEXTSTATE, ) -> HRESULT, fn GetContextState( peContextState: *mut SPCONTEXTSTATE, ) -> HRESULT, }); RIDL!(#[uuid(0x5b4fb971, 0xb115, 0x4de1, 0xad, 0x97, 0xe4, 0x82, 0xe3, 0xbf, 0x6e, 0xe4)] interface ISpProperties(ISpPropertiesVtbl): IUnknown(IUnknownVtbl) { fn SetPropertyNum( pName: LPCWSTR, lValue: LONG, ) -> HRESULT, fn GetPropertyNum( pName: LPCWSTR, plValue: *mut LONG, ) -> HRESULT, fn SetPropertyString( pName: LPCWSTR, pValue: LPCWSTR, ) -> HRESULT, fn GetPropertyString( pName: LPCWSTR, ppCoMemValue: *mut LPWSTR, ) -> HRESULT, }); pub const SP_MAX_LANGIDS: usize = 20; STRUCT!{struct SPRECOGNIZERSTATUS { AudioStatus: SPAUDIOSTATUS, ullRecognitionStreamPos: ULONGLONG, ulStreamNumber: ULONG, ulNumActive: ULONG, clsidEngine: CLSID, cLangIDs: ULONG, aLangID: [WORD; SP_MAX_LANGIDS], ullRecognitionStreamTime: ULONGLONG, }} ENUM!{enum SPWAVEFORMATTYPE { SPWF_INPUT, SPWF_SRENGINE, }} pub type SPSTREAMFORMATTYPE = SPWAVEFORMATTYPE; ENUM!{enum SPRECOSTATE { SPRST_INACTIVE, SPRST_ACTIVE, SPRST_ACTIVE_ALWAYS, SPRST_INACTIVE_WITH_PURGE, SPRST_NUM_STATES, }} RIDL!(#[uuid(0xc2b5f241, 0xdaa0, 0x4507, 0x9e, 0x16, 0x5a, 0x1e, 0xaa, 0x2b, 0x7a, 0x5c)] interface ISpRecognizer(ISpRecognizerVtbl): ISpProperties(ISpPropertiesVtbl) { fn SetRecognizer( pRecognizer: *mut ISpObjectToken, ) -> HRESULT, fn GetRecognizer( ppRecognizer: *mut *mut ISpObjectToken, ) -> HRESULT, fn SetInput( pUnkInput: *mut IUnknown, fAllowFormatChanges: BOOL, ) -> HRESULT, fn GetInputObjectToken( ppToken: *mut *mut ISpObjectToken, ) -> HRESULT, fn GetInputStream( ppStream: *mut *mut ISpStreamFormat, ) -> HRESULT, fn CreateRecoContext( ppNewCtxt: *mut *mut ISpRecoContext, ) -> HRESULT, fn GetRecoProfile( ppToken: *mut *mut ISpObjectToken, ) -> HRESULT, fn SetRecoProfile( pToken: *mut ISpObjectToken, ) -> HRESULT, fn IsSharedInstance() -> HRESULT, fn GetRecoState( pState: *mut SPRECOSTATE, ) -> HRESULT, fn SetRecoState( NewState: SPRECOSTATE, ) -> HRESULT, fn GetStatus( pStatus: *mut SPRECOGNIZERSTATUS, ) -> HRESULT, fn GetFormat( WaveFormatType: SPSTREAMFORMATTYPE, pFormatId: *mut GUID, ppCoMemWFEX: *mut WAVEFORMATEX, ) -> HRESULT, fn IsUISupported( pszTypeOfUI: LPCWSTR, pvExtraData: *mut c_void, cbExtraData: ULONG, pfSupported: *mut BOOL, ) -> HRESULT, fn DisplayUI( hwndParent: HWND, pszTitle: LPCWSTR, pszTypeOfUI: LPCWSTR, pvExtraData: *mut c_void, cbExtraData: ULONG, ) -> HRESULT, fn EmulateRecognition( pPhrase: *mut ISpPhrase, ) -> HRESULT, }); pub type SpeechLanguageId = c_long; ENUM!{enum DISPID_SpeechDataKey { DISPID_SDKSetBinaryValue = 1, DISPID_SDKGetBinaryValue, DISPID_SDKSetStringValue, DISPID_SDKGetStringValue, DISPID_SDKSetLongValue, DISPID_SDKGetlongValue, DISPID_SDKOpenKey, DISPID_SDKCreateKey, DISPID_SDKDeleteKey, DISPID_SDKDeleteValue, DISPID_SDKEnumKeys, DISPID_SDKEnumValues, }} ENUM!{enum DISPID_SpeechObjectToken { DISPID_SOTId = 1, DISPID_SOTDataKey, DISPID_SOTCategory, DISPID_SOTGetDescription, DISPID_SOTSetId, DISPID_SOTGetAttribute, DISPID_SOTCreateInstance, DISPID_SOTRemove, DISPID_SOTGetStorageFileName, DISPID_SOTRemoveStorageFileName, DISPID_SOTIsUISupported, DISPID_SOTDisplayUI, DISPID_SOTMatchesAttributes, }} ENUM!{enum SpeechDataKeyLocation { SDKLDefaultLocation = SPDKL_DefaultLocation, SDKLCurrentUser = SPDKL_CurrentUser, SDKLLocalMachine = SPDKL_LocalMachine, SDKLCurrentConfig = SPDKL_CurrentConfig, }} ENUM!{enum SpeechTokenContext { STCInprocServer = CLSCTX_INPROC_SERVER, STCInprocHandler = CLSCTX_INPROC_HANDLER, STCLocalServer = CLSCTX_LOCAL_SERVER, STCRemoteServer = CLSCTX_REMOTE_SERVER, STCAll = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER, }} ENUM!{enum SpeechTokenShellFolder { STSF_AppData = 0x1a, STSF_LocalAppData = 0x1c, STSF_CommonAppData = 0x23, STSF_FlagCreate = 0x8000, }} ENUM!{enum DISPID_SpeechObjectTokens { DISPID_SOTsCount = 1, DISPID_SOTsItem = DISPID_VALUE as u32, DISPID_SOTs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechObjectTokenCategory { DISPID_SOTCId = 1, DISPID_SOTCDefault, DISPID_SOTCSetId, DISPID_SOTCGetDataKey, DISPID_SOTCEnumerateTokens, }} ENUM!{enum SpeechAudioFormatType { SAFTDefault = -1i32 as u32, SAFTNoAssignedFormat = 0, SAFTText = 1, SAFTNonStandardFormat = 2, SAFTExtendedAudioFormat = 3, SAFT8kHz8BitMono = 4, SAFT8kHz8BitStereo = 5, SAFT8kHz16BitMono = 6, SAFT8kHz16BitStereo = 7, SAFT11kHz8BitMono = 8, SAFT11kHz8BitStereo = 9, SAFT11kHz16BitMono = 10, SAFT11kHz16BitStereo = 11, SAFT12kHz8BitMono = 12, SAFT12kHz8BitStereo = 13, SAFT12kHz16BitMono = 14, SAFT12kHz16BitStereo = 15, SAFT16kHz8BitMono = 16, SAFT16kHz8BitStereo = 17, SAFT16kHz16BitMono = 18, SAFT16kHz16BitStereo = 19, SAFT22kHz8BitMono = 20, SAFT22kHz8BitStereo = 21, SAFT22kHz16BitMono = 22, SAFT22kHz16BitStereo = 23, SAFT24kHz8BitMono = 24, SAFT24kHz8BitStereo = 25, SAFT24kHz16BitMono = 26, SAFT24kHz16BitStereo = 27, SAFT32kHz8BitMono = 28, SAFT32kHz8BitStereo = 29, SAFT32kHz16BitMono = 30, SAFT32kHz16BitStereo = 31, SAFT44kHz8BitMono = 32, SAFT44kHz8BitStereo = 33, SAFT44kHz16BitMono = 34, SAFT44kHz16BitStereo = 35, SAFT48kHz8BitMono = 36, SAFT48kHz8BitStereo = 37, SAFT48kHz16BitMono = 38, SAFT48kHz16BitStereo = 39, SAFTTrueSpeech_8kHz1BitMono = 40, SAFTCCITT_ALaw_8kHzMono = 41, SAFTCCITT_ALaw_8kHzStereo = 42, SAFTCCITT_ALaw_11kHzMono = 43, SAFTCCITT_ALaw_11kHzStereo = 44, SAFTCCITT_ALaw_22kHzMono = 45, SAFTCCITT_ALaw_22kHzStereo = 46, SAFTCCITT_ALaw_44kHzMono = 47, SAFTCCITT_ALaw_44kHzStereo = 48, SAFTCCITT_uLaw_8kHzMono = 49, SAFTCCITT_uLaw_8kHzStereo = 50, SAFTCCITT_uLaw_11kHzMono = 51, SAFTCCITT_uLaw_11kHzStereo = 52, SAFTCCITT_uLaw_22kHzMono = 53, SAFTCCITT_uLaw_22kHzStereo = 54, SAFTCCITT_uLaw_44kHzMono = 55, SAFTCCITT_uLaw_44kHzStereo = 56, SAFTADPCM_8kHzMono = 57, SAFTADPCM_8kHzStereo = 58, SAFTADPCM_11kHzMono = 59, SAFTADPCM_11kHzStereo = 60, SAFTADPCM_22kHzMono = 61, SAFTADPCM_22kHzStereo = 62, SAFTADPCM_44kHzMono = 63, SAFTADPCM_44kHzStereo = 64, SAFTGSM610_8kHzMono = 65, SAFTGSM610_11kHzMono = 66, SAFTGSM610_22kHzMono = 67, SAFTGSM610_44kHzMono = 68, }} ENUM!{enum DISPID_SpeechAudioFormat { DISPID_SAFType = 1, DISPID_SAFGuid, DISPID_SAFGetWaveFormatEx, DISPID_SAFSetWaveFormatEx, }} ENUM!{enum DISPID_SpeechBaseStream { DISPID_SBSFormat = 1, DISPID_SBSRead, DISPID_SBSWrite, DISPID_SBSSeek, }} ENUM!{enum SpeechStreamSeekPositionType { SSSPTRelativeToStart = STREAM_SEEK_SET, SSSPTRelativeToCurrentPosition = STREAM_SEEK_CUR, SSSPTRelativeToEnd = STREAM_SEEK_END, }} ENUM!{enum DISPID_SpeechAudio { DISPID_SAStatus = 200, DISPID_SABufferInfo, DISPID_SADefaultFormat, DISPID_SAVolume, DISPID_SABufferNotifySize, DISPID_SAEventHandle, DISPID_SASetState, }} ENUM!{enum SpeechAudioState { SASClosed = SPAS_CLOSED, SASStop = SPAS_STOP, SASPause = SPAS_PAUSE, SASRun = SPAS_RUN, }} ENUM!{enum DISPID_SpeechMMSysAudio { DISPID_SMSADeviceId = 300, DISPID_SMSALineId, DISPID_SMSAMMHandle, }} ENUM!{enum DISPID_SpeechFileStream { DISPID_SFSOpen = 100, DISPID_SFSClose, }} ENUM!{enum SpeechStreamFileMode { SSFMOpenForRead = SPFM_OPEN_READONLY, SSFMOpenReadWrite = SPFM_OPEN_READWRITE, SSFMCreate = SPFM_CREATE, SSFMCreateForWrite = SPFM_CREATE_ALWAYS, }} ENUM!{enum DISPID_SpeechCustomStream { DISPID_SCSBaseStream = 100, }} ENUM!{enum DISPID_SpeechMemoryStream { DISPID_SMSSetData = 100, DISPID_SMSGetData, }} ENUM!{enum DISPID_SpeechAudioStatus { DISPID_SASFreeBufferSpace = 1, DISPID_SASNonBlockingIO, DISPID_SASState, DISPID_SASCurrentSeekPosition, DISPID_SASCurrentDevicePosition, }} ENUM!{enum DISPID_SpeechAudioBufferInfo { DISPID_SABIMinNotification = 1, DISPID_SABIBufferSize, DISPID_SABIEventBias, }} ENUM!{enum DISPID_SpeechWaveFormatEx { DISPID_SWFEFormatTag = 1, DISPID_SWFEChannels, DISPID_SWFESamplesPerSec, DISPID_SWFEAvgBytesPerSec, DISPID_SWFEBlockAlign, DISPID_SWFEBitsPerSample, DISPID_SWFEExtraData, }} ENUM!{enum DISPID_SpeechVoice { DISPID_SVStatus = 1, DISPID_SVVoice, DISPID_SVAudioOutput, DISPID_SVAudioOutputStream, DISPID_SVRate, DISPID_SVVolume, DISPID_SVAllowAudioOuputFormatChangesOnNextSet, DISPID_SVEventInterests, DISPID_SVPriority, DISPID_SVAlertBoundary, DISPID_SVSyncronousSpeakTimeout, DISPID_SVSpeak, DISPID_SVSpeakStream, DISPID_SVPause, DISPID_SVResume, DISPID_SVSkip, DISPID_SVGetVoices, DISPID_SVGetAudioOutputs, DISPID_SVWaitUntilDone, DISPID_SVSpeakCompleteEvent, DISPID_SVIsUISupported, DISPID_SVDisplayUI, }} ENUM!{enum SpeechVoicePriority { SVPNormal = SPVPRI_NORMAL, SVPAlert = SPVPRI_ALERT, SVPOver = SPVPRI_OVER, }} ENUM!{enum SpeechVoiceSpeakFlags { SVSFDefault = SPF_DEFAULT, SVSFlagsAsync = SPF_ASYNC, SVSFPurgeBeforeSpeak = SPF_PURGEBEFORESPEAK, SVSFIsFilename = SPF_IS_FILENAME, SVSFIsXML = SPF_IS_XML, SVSFIsNotXML = SPF_IS_NOT_XML, SVSFPersistXML = SPF_PERSIST_XML, SVSFNLPSpeakPunc = SPF_NLP_SPEAK_PUNC, SVSFNLPMask = SPF_NLP_MASK, SVSFVoiceMask = SPF_VOICE_MASK as u32, SVSFUnusedFlags = SPF_UNUSED_FLAGS as u32, }} ENUM!{enum SpeechVoiceEvents { SVEStartInputStream = 1 << 1, SVEEndInputStream = 1 << 2, SVEVoiceChange = 1 << 3, SVEBookmark = 1 << 4, SVEWordBoundary = 1 << 5, SVEPhoneme = 1 << 6, SVESentenceBoundary = 1 << 7, SVEViseme = 1 << 8, SVEAudioLevel = 1 << 9, SVEPrivate = 1 << 15, SVEAllEvents = 0x83fe, }} ENUM!{enum DISPID_SpeechVoiceStatus { DISPID_SVSCurrentStreamNumber = 1, DISPID_SVSLastStreamNumberQueued, DISPID_SVSLastResult, DISPID_SVSRunningState, DISPID_SVSInputWordPosition, DISPID_SVSInputWordLength, DISPID_SVSInputSentencePosition, DISPID_SVSInputSentenceLength, DISPID_SVSLastBookmark, DISPID_SVSLastBookmarkId, DISPID_SVSPhonemeId, DISPID_SVSVisemeId, }} ENUM!{enum SpeechRunState { SRSEDone = SPRS_DONE, SRSEIsSpeaking = SPRS_IS_SPEAKING, }} ENUM!{enum SpeechVisemeType { SVP_0 = 0, SVP_1, SVP_2, SVP_3, SVP_4, SVP_5, SVP_6, SVP_7, SVP_8, SVP_9, SVP_10, SVP_11, SVP_12, SVP_13, SVP_14, SVP_15, SVP_16, SVP_17, SVP_18, SVP_19, SVP_20, SVP_21, }} ENUM!{enum SpeechVisemeFeature { SVF_None = 0, SVF_Stressed = SPVFEATURE_STRESSED, SVF_Emphasis = SPVFEATURE_EMPHASIS, }} ENUM!{enum DISPID_SpeechVoiceEvent { DISPID_SVEStreamStart = 1, DISPID_SVEStreamEnd, DISPID_SVEVoiceChange, DISPID_SVEBookmark, DISPID_SVEWord, DISPID_SVEPhoneme, DISPID_SVESentenceBoundary, DISPID_SVEViseme, DISPID_SVEAudioLevel, DISPID_SVEEnginePrivate, }} ENUM!{enum DISPID_SpeechRecognizer { DISPID_SRRecognizer = 1, DISPID_SRAllowAudioInputFormatChangesOnNextSet, DISPID_SRAudioInput, DISPID_SRAudioInputStream, DISPID_SRIsShared, DISPID_SRState, DISPID_SRStatus, DISPID_SRProfile, DISPID_SREmulateRecognition, DISPID_SRCreateRecoContext, DISPID_SRGetFormat, DISPID_SRSetPropertyNumber, DISPID_SRGetPropertyNumber, DISPID_SRSetPropertyString, DISPID_SRGetPropertyString, DISPID_SRIsUISupported, DISPID_SRDisplayUI, DISPID_SRGetRecognizers, DISPID_SVGetAudioInputs, DISPID_SVGetProfiles, }} ENUM!{enum SpeechRecognizerState { SRSInactive = SPRST_INACTIVE, SRSActive = SPRST_ACTIVE, SRSActiveAlways = SPRST_ACTIVE_ALWAYS, SRSInactiveWithPurge = SPRST_INACTIVE_WITH_PURGE, }} ENUM!{enum SpeechDisplayAttributes { SDA_No_Trailing_Space = 0, SDA_One_Trailing_Space = SPAF_ONE_TRAILING_SPACE, SDA_Two_Trailing_Spaces = SPAF_TWO_TRAILING_SPACES, SDA_Consume_Leading_Spaces = SPAF_CONSUME_LEADING_SPACES, }} ENUM!{enum SpeechFormatType { SFTInput = SPWF_INPUT, SFTSREngine = SPWF_SRENGINE, }} ENUM!{enum DISPID_SpeechRecognizerStatus { DISPID_SRSAudioStatus = 1, DISPID_SRSCurrentStreamPosition, DISPID_SRSCurrentStreamNumber, DISPID_SRSNumberOfActiveRules, DISPID_SRSClsidEngine, DISPID_SRSSupportedLanguages, }} ENUM!{enum DISPID_SpeechRecoContext { DISPID_SRCRecognizer = 1, DISPID_SRCAudioInInterferenceStatus, DISPID_SRCRequestedUIType, DISPID_SRCVoice, DISPID_SRAllowVoiceFormatMatchingOnNextSet, DISPID_SRCVoicePurgeEvent, DISPID_SRCEventInterests, DISPID_SRCCmdMaxAlternates, DISPID_SRCState, DISPID_SRCRetainedAudio, DISPID_SRCRetainedAudioFormat, DISPID_SRCPause, DISPID_SRCResume, DISPID_SRCCreateGrammar, DISPID_SRCCreateResultFromMemory, DISPID_SRCBookmark, DISPID_SRCSetAdaptationData, }} ENUM!{enum SpeechRetainedAudioOptions { SRAONone = SPAO_NONE, SRAORetainAudio = SPAO_RETAIN_AUDIO, }} ENUM!{enum SpeechBookmarkOptions { SBONone = SPBO_NONE, SBOPause = SPBO_PAUSE, }} ENUM!{enum SpeechInterference { SINone = SPINTERFERENCE_NONE, SINoise = SPINTERFERENCE_NOISE, SINoSignal = SPINTERFERENCE_NOSIGNAL, SITooLoud = SPINTERFERENCE_TOOLOUD, SITooQuiet = SPINTERFERENCE_TOOQUIET, SITooFast = SPINTERFERENCE_TOOFAST, SITooSlow = SPINTERFERENCE_TOOSLOW, }} ENUM!{enum SpeechRecoEvents { SREStreamEnd = 1 << 0, SRESoundStart = 1 << 1, SRESoundEnd = 1 << 2, SREPhraseStart = 1 << 3, SRERecognition = 1 << 4, SREHypothesis = 1 << 5, SREBookmark = 1 << 6, SREPropertyNumChange = 1 << 7, SREPropertyStringChange = 1 << 8, SREFalseRecognition = 1 << 9, SREInterference = 1 << 10, SRERequestUI = 1 << 11, SREStateChange = 1 << 12, SREAdaptation = 1 << 13, SREStreamStart = 1 << 14, SRERecoOtherContext = 1 << 15, SREAudioLevel = 1 << 16, SREPrivate = 1 << 18, SREAllEvents = 0x5ffff, }} ENUM!{enum SpeechRecoContextState { SRCS_Disabled = SPCS_DISABLED, SRCS_Enabled = SPCS_ENABLED, }} ENUM!{enum DISPIDSPRG { DISPID_SRGId = 1, DISPID_SRGRecoContext, DISPID_SRGState, DISPID_SRGRules, DISPID_SRGReset, DISPID_SRGCommit, DISPID_SRGCmdLoadFromFile, DISPID_SRGCmdLoadFromObject, DISPID_SRGCmdLoadFromResource, DISPID_SRGCmdLoadFromMemory, DISPID_SRGCmdLoadFromProprietaryGrammar, DISPID_SRGCmdSetRuleState, DISPID_SRGCmdSetRuleIdState, DISPID_SRGDictationLoad, DISPID_SRGDictationUnload, DISPID_SRGDictationSetState, DISPID_SRGSetWordSequenceData, DISPID_SRGSetTextSelection, DISPID_SRGIsPronounceable, }} ENUM!{enum SpeechLoadOption { SLOStatic = SPLO_STATIC, SLODynamic = SPLO_DYNAMIC, }} ENUM!{enum SpeechWordPronounceable { SWPUnknownWordUnpronounceable = SPWP_UNKNOWN_WORD_UNPRONOUNCEABLE, SWPUnknownWordPronounceable = SPWP_UNKNOWN_WORD_PRONOUNCEABLE, SWPKnownWordPronounceable = SPWP_KNOWN_WORD_PRONOUNCEABLE, }} ENUM!{enum SpeechGrammarState { SGSEnabled = SPGS_ENABLED, SGSDisabled = SPGS_DISABLED, SGSExclusive = SPGS_EXCLUSIVE, }} ENUM!{enum SpeechRuleState { SGDSInactive = SPRS_INACTIVE, SGDSActive = SPRS_ACTIVE, SGDSActiveWithAutoPause = SPRS_ACTIVE_WITH_AUTO_PAUSE, }} ENUM!{enum SpeechRuleAttributes { SRATopLevel = SPRAF_TopLevel, SRADefaultToActive = SPRAF_Active, SRAExport = SPRAF_Export, SRAImport = SPRAF_Import, SRAInterpreter = SPRAF_Interpreter, SRADynamic = SPRAF_Dynamic, }} ENUM!{enum SpeechGrammarWordType { SGDisplay = SPWT_DISPLAY, SGLexical = SPWT_LEXICAL, SGPronounciation = SPWT_PRONUNCIATION, }} ENUM!{enum DISPID_SpeechRecoContextEvents { DISPID_SRCEStartStream = 1, DISPID_SRCEEndStream, DISPID_SRCEBookmark, DISPID_SRCESoundStart, DISPID_SRCESoundEnd, DISPID_SRCEPhraseStart, DISPID_SRCERecognition, DISPID_SRCEHypothesis, DISPID_SRCEPropertyNumberChange, DISPID_SRCEPropertyStringChange, DISPID_SRCEFalseRecognition, DISPID_SRCEInterference, DISPID_SRCERequestUI, DISPID_SRCERecognizerStateChange, DISPID_SRCEAdaptation, DISPID_SRCERecognitionForOtherContext, DISPID_SRCEAudioLevel, DISPID_SRCEEnginePrivate, }} ENUM!{enum SpeechRecognitionType { SRTStandard = 0, SRTAutopause = SPREF_AutoPause, SRTEmulated = SPREF_Emulated, }} ENUM!{enum DISPID_SpeechGrammarRule { DISPID_SGRAttributes = 1, DISPID_SGRInitialState, DISPID_SGRName, DISPID_SGRId, DISPID_SGRClear, DISPID_SGRAddResource, DISPID_SGRAddState, }} ENUM!{enum DISPID_SpeechGrammarRules { DISPID_SGRsCount = 1, DISPID_SGRsDynamic, DISPID_SGRsAdd, DISPID_SGRsCommit, DISPID_SGRsCommitAndSave, DISPID_SGRsFindRule, DISPID_SGRsItem = DISPID_VALUE as u32, DISPID_SGRs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechGrammarRuleState { DISPID_SGRSRule = 1, DISPID_SGRSTransitions, DISPID_SGRSAddWordTransition, DISPID_SGRSAddRuleTransition, DISPID_SGRSAddSpecialTransition, }} ENUM!{enum SpeechSpecialTransitionType { SSTTWildcard = 1, SSTTDictation, SSTTTextBuffer, }} ENUM!{enum DISPID_SpeechGrammarRuleStateTransitions { DISPID_SGRSTsCount = 1, DISPID_SGRSTsItem = DISPID_VALUE as u32, DISPID_SGRSTs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechGrammarRuleStateTransition { DISPID_SGRSTType = 1, DISPID_SGRSTText, DISPID_SGRSTRule, DISPID_SGRSTWeight, DISPID_SGRSTPropertyName, DISPID_SGRSTPropertyId, DISPID_SGRSTPropertyValue, DISPID_SGRSTNextState, }} ENUM!{enum SpeechGrammarRuleStateTransitionType { SGRSTTEpsilon = 0, SGRSTTWord, SGRSTTRule, SGRSTTDictation, SGRSTTWildcard, SGRSTTTextBuffer, }} ENUM!{enum DISPIDSPTSI { DISPIDSPTSI_ActiveOffset = 1, DISPIDSPTSI_ActiveLength, DISPIDSPTSI_SelectionOffset, DISPIDSPTSI_SelectionLength, }} ENUM!{enum DISPID_SpeechRecoResult { DISPID_SRRRecoContext = 1, DISPID_SRRTimes, DISPID_SRRAudioFormat, DISPID_SRRPhraseInfo, DISPID_SRRAlternates, DISPID_SRRAudio, DISPID_SRRSpeakAudio, DISPID_SRRSaveToMemory, DISPID_SRRDiscardResultInfo, }} ENUM!{enum SpeechDiscardType { SDTProperty = SPDF_PROPERTY, SDTReplacement = SPDF_REPLACEMENT, SDTRule = SPDF_RULE, SDTDisplayText = SPDF_DISPLAYTEXT, SDTLexicalForm = SPDF_LEXICALFORM, SDTPronunciation = SPDF_PRONUNCIATION, SDTAudio = SPDF_AUDIO, SDTAlternates = SPDF_ALTERNATES, SDTAll = SPDF_ALL, }} ENUM!{enum DISPID_SpeechPhraseBuilder { DISPID_SPPBRestorePhraseFromMemory = 1, }} ENUM!{enum DISPID_SpeechRecoResultTimes { DISPID_SRRTStreamTime = 1, DISPID_SRRTLength, DISPID_SRRTTickCount, DISPID_SRRTOffsetFromStart, }} ENUM!{enum DISPID_SpeechPhraseAlternate { DISPID_SPARecoResult = 1, DISPID_SPAStartElementInResult, DISPID_SPANumberOfElementsInResult, DISPID_SPAPhraseInfo, DISPID_SPACommit, }} ENUM!{enum DISPID_SpeechPhraseAlternates { DISPID_SPAsCount = 1, DISPID_SPAsItem = DISPID_VALUE as u32, DISPID_SPAs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechPhraseInfo { DISPID_SPILanguageId = 1, DISPID_SPIGrammarId, DISPID_SPIStartTime, DISPID_SPIAudioStreamPosition, DISPID_SPIAudioSizeBytes, DISPID_SPIRetainedSizeBytes, DISPID_SPIAudioSizeTime, DISPID_SPIRule, DISPID_SPIProperties, DISPID_SPIElements, DISPID_SPIReplacements, DISPID_SPIEngineId, DISPID_SPIEnginePrivateData, DISPID_SPISaveToMemory, DISPID_SPIGetText, DISPID_SPIGetDisplayAttributes, }} ENUM!{enum DISPID_SpeechPhraseElement { DISPID_SPEAudioTimeOffset = 1, DISPID_SPEAudioSizeTime, DISPID_SPEAudioStreamOffset, DISPID_SPEAudioSizeBytes, DISPID_SPERetainedStreamOffset, DISPID_SPERetainedSizeBytes, DISPID_SPEDisplayText, DISPID_SPELexicalForm, DISPID_SPEPronunciation, DISPID_SPEDisplayAttributes, DISPID_SPERequiredConfidence, DISPID_SPEActualConfidence, DISPID_SPEEngineConfidence, }} ENUM!{enum SpeechEngineConfidence { SECLowConfidence = -1i32 as u32, SECNormalConfidence = 0, SECHighConfidence = 1, }} ENUM!{enum DISPID_SpeechPhraseElements { DISPID_SPEsCount = 1, DISPID_SPEsItem = DISPID_VALUE as u32, DISPID_SPEs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechPhraseReplacement { DISPID_SPRDisplayAttributes = 1, DISPID_SPRText, DISPID_SPRFirstElement, DISPID_SPRNumberOfElements, }} ENUM!{enum DISPID_SpeechPhraseReplacements { DISPID_SPRsCount = 1, DISPID_SPRsItem = DISPID_VALUE as u32, DISPID_SPRs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechPhraseProperty { DISPID_SPPName = 1, DISPID_SPPId, DISPID_SPPValue, DISPID_SPPFirstElement, DISPID_SPPNumberOfElements, DISPID_SPPEngineConfidence, DISPID_SPPConfidence, DISPID_SPPParent, DISPID_SPPChildren, }} ENUM!{enum DISPID_SpeechPhraseProperties { DISPID_SPPsCount = 1, DISPID_SPPsItem = DISPID_VALUE as u32, DISPID_SPPs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechPhraseRule { DISPID_SPRuleName = 1, DISPID_SPRuleId, DISPID_SPRuleFirstElement, DISPID_SPRuleNumberOfElements, DISPID_SPRuleParent, DISPID_SPRuleChildren, DISPID_SPRuleConfidence, DISPID_SPRuleEngineConfidence, }} ENUM!{enum DISPID_SpeechPhraseRules { DISPID_SPRulesCount = 1, DISPID_SPRulesItem = DISPID_VALUE as u32, DISPID_SPRules_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechLexicon { DISPID_SLGenerationId = 1, DISPID_SLGetWords, DISPID_SLAddPronunciation, DISPID_SLAddPronunciationByPhoneIds, DISPID_SLRemovePronunciation, DISPID_SLRemovePronunciationByPhoneIds, DISPID_SLGetPronunciations, DISPID_SLGetGenerationChange, }} ENUM!{enum SpeechLexiconType { SLTUser = eLEXTYPE_USER, SLTApp = eLEXTYPE_APP, }} ENUM!{enum SpeechPartOfSpeech { SPSNotOverriden = SPPS_NotOverriden, SPSUnknown = SPPS_Unknown, SPSNoun = SPPS_Noun, SPSVerb = SPPS_Verb, SPSModifier = SPPS_Modifier, SPSFunction = SPPS_Function, SPSInterjection = SPPS_Interjection, }} ENUM!{enum DISPID_SpeechLexiconWords { DISPID_SLWsCount = 1, DISPID_SLWsItem = DISPID_VALUE as u32, DISPID_SLWs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum SpeechWordType { SWTAdded = eWORDTYPE_ADDED, SWTDeleted = eWORDTYPE_DELETED, }} ENUM!{enum DISPID_SpeechLexiconWord { DISPID_SLWLangId = 1, DISPID_SLWType, DISPID_SLWWord, DISPID_SLWPronunciations, }} ENUM!{enum DISPID_SpeechLexiconProns { DISPID_SLPsCount = 1, DISPID_SLPsItem = DISPID_VALUE as u32, DISPID_SLPs_NewEnum = DISPID_NEWENUM as u32, }} ENUM!{enum DISPID_SpeechLexiconPronunciation { DISPID_SLPType = 1, DISPID_SLPLangId, DISPID_SLPPartOfSpeech, DISPID_SLPPhoneIds, DISPID_SLPSymbolic, }} ENUM!{enum DISPID_SpeechPhoneConverter { DISPID_SPCLangId = 1, DISPID_SPCPhoneToId, DISPID_SPCIdToPhone, }} extern { pub static LIBID_SpeechLib: IID; } RIDL!(#[uuid(0xce17c09b, 0x4efa, 0x44d5, 0xa4, 0xc9, 0x59, 0xd9, 0x58, 0x5a, 0xb0, 0xcd)] interface ISpeechDataKey(ISpeechDataKeyVtbl): IDispatch(IDispatchVtbl) { fn SetBinaryValue( ValueName: BSTR, Value: VARIANT, ) -> HRESULT, fn GetBinaryValue( ValueName: BSTR, Value: *mut VARIANT, ) -> HRESULT, fn SetStringValue( ValueName: BSTR, Value: BSTR, ) -> HRESULT, fn GetStringValue( ValueName: BSTR, Value: *mut BSTR, ) -> HRESULT, fn SetLongValue( ValueName: BSTR, Value: c_long, ) -> HRESULT, fn GetLongValue( ValueName: BSTR, Value: *mut c_long, ) -> HRESULT, fn OpenKey( SubKeyName: BSTR, SubKey: *mut *mut ISpeechDataKey, ) -> HRESULT, fn CreateKey( SubKeyName: BSTR, SubKey: *mut *mut ISpeechDataKey, ) -> HRESULT, fn DeleteKey( SubKeyName: BSTR, ) -> HRESULT, fn DeleteValue( ValueName: BSTR, ) -> HRESULT, fn EnumKeys( Index: c_long, SubKeyName: *mut BSTR, ) -> HRESULT, fn EnumValues( Index: c_long, ValueName: *mut BSTR, ) -> HRESULT, }); RIDL!(#[uuid(0xc74a3adc, 0xb727, 0x4500, 0xa8, 0x4a, 0xb5, 0x26, 0x72, 0x1c, 0x8b, 0x8c)] interface ISpeechObjectToken(ISpeechObjectTokenVtbl): IDispatch(IDispatchVtbl) { fn get_Id( ObjectId: *mut BSTR, ) -> HRESULT, fn get_DataKey( DataKey: *mut *mut ISpeechDataKey, ) -> HRESULT, fn get_Category( Category: *mut *mut ISpeechObjectTokenCategory, ) -> HRESULT, fn GetDescription( Locale: c_long, Description: *mut BSTR, ) -> HRESULT, fn SetId( Id: BSTR, CategoryId: BSTR, CreateIfNotExist: VARIANT_BOOL, ) -> HRESULT, fn GetAttribute( AttributeName: BSTR, AttributeValue: *mut BSTR, ) -> HRESULT, fn CreateInstance( pUnkOuter: *mut IUnknown, ClsContext: SpeechTokenContext, Object: *mut *mut IUnknown, ) -> HRESULT, fn Remove( ObjectStorageCLSID: BSTR, ) -> HRESULT, fn GetStorageFileName( ObjectStorageCLSID: BSTR, KeyName: BSTR, FileName: BSTR, Folder: BSTR, FilePath: *mut BSTR, ) -> HRESULT, fn RemoveStorageFileName( ObjectStorageCLSID: BSTR, KeyName: BSTR, DeleteFile: VARIANT_BOOL, ) -> HRESULT, fn IsUISupported( TypeOfUI: BSTR, ExtraData: *const VARIANT, Object: *mut IUnknown, Supported: *mut VARIANT_BOOL, ) -> HRESULT, fn DisplayUI( hWnd: c_long, Title: BSTR, TypeOfUI: BSTR, ExtraData: *const VARIANT, Object: *mut IUnknown, ) -> HRESULT, fn MatchesAttributes( Attributes: BSTR, Matches: *mut VARIANT_BOOL, ) -> HRESULT, }); RIDL!(#[uuid(0x9285b776, 0x2e7b, 0x4bc0, 0xb5, 0x3e, 0x58, 0x0e, 0xb6, 0xfa, 0x96, 0x7f)] interface ISpeechObjectTokens(ISpeechObjectTokensVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Token: *mut *mut ISpeechObjectToken, ) -> HRESULT, fn get__NewEnum( ppEnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0xca7eac50, 0x2d01, 0x4145, 0x86, 0xd4, 0x5a, 0xe7, 0xd7, 0x0f, 0x44, 0x69)] interface ISpeechObjectTokenCategory(ISpeechObjectTokenCategoryVtbl): IDispatch(IDispatchVtbl) { fn get_Id( Id: *mut BSTR, ) -> HRESULT, fn put_Default( TokenId: BSTR, ) -> HRESULT, fn get_Default( TokenId: *mut BSTR, ) -> HRESULT, fn SetId( Id: BSTR, CreateIfNotExist: VARIANT_BOOL, ) -> HRESULT, fn GetDataKey( Location: SpeechDataKeyLocation, DataKey: *mut *mut ISpeechDataKey, ) -> HRESULT, fn EnumerateTokens( RequiredAttributes: BSTR, OptionalAttributes: BSTR, Tokens: *mut *mut ISpeechObjectTokens, ) -> HRESULT, }); RIDL!(#[uuid(0x11b103d8, 0x1142, 0x4edf, 0xa0, 0x93, 0x82, 0xfb, 0x39, 0x15, 0xf8, 0xcc)] interface ISpeechAudioBufferInfo(ISpeechAudioBufferInfoVtbl): IDispatch(IDispatchVtbl) { fn get_MinNotification( MinNotification: *mut c_long, ) -> HRESULT, fn put_MinNotification( MinNotification: c_long, ) -> HRESULT, fn get_BufferSize( BufferSize: *mut c_long, ) -> HRESULT, fn put_BufferSize( BufferSize: c_long, ) -> HRESULT, fn get_EventBias( EventBias: *mut c_long, ) -> HRESULT, fn put_EventBias( EventBias: c_long, ) -> HRESULT, }); RIDL!(#[uuid(0xc62d9c91, 0x7458, 0x47f6, 0x86, 0x2d, 0x1e, 0xf8, 0x6f, 0xb0, 0xb2, 0x78)] interface ISpeechAudioStatus(ISpeechAudioStatusVtbl): IDispatch(IDispatchVtbl) { fn get_FreeBufferSpace( FreeBufferSpace: *mut c_long, ) -> HRESULT, fn get_NonBlockingIO( NonBlockingIO: *mut c_long, ) -> HRESULT, fn get_State( State: *mut SpeechAudioState, ) -> HRESULT, fn get_CurrentSeekPosition( CurrentSeekPosition: *mut VARIANT, ) -> HRESULT, fn get_CurrentDevicePosition( CurrentDevicePosition: *mut VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0xe6e9c590, 0x3e18, 0x40e3, 0x82, 0x99, 0x06, 0x1f, 0x98, 0xbd, 0xe7, 0xc7)] interface ISpeechAudioFormat(ISpeechAudioFormatVtbl): IDispatch(IDispatchVtbl) { fn get_Type( AudioFormat: *mut SpeechAudioFormatType, ) -> HRESULT, fn put_Type( AudioFormat: SpeechAudioFormatType, ) -> HRESULT, fn get_Guid( Guid: *mut BSTR, ) -> HRESULT, fn put_Guid( Guid: BSTR, ) -> HRESULT, fn GetWaveFormatEx( SpeechWaveFormatEx: *mut *mut ISpeechWaveFormatEx, ) -> HRESULT, fn SetWaveFormatEx( SpeechWaveFormatEx: *mut ISpeechWaveFormatEx, ) -> HRESULT, }); RIDL!(#[uuid(0x7a1ef0d5, 0x1581, 0x4741, 0x88, 0xe4, 0x20, 0x9a, 0x49, 0xf1, 0x1a, 0x10)] interface ISpeechWaveFormatEx(ISpeechWaveFormatExVtbl): IDispatch(IDispatchVtbl) { fn get_FormatTag( FormatTag: *mut c_short, ) -> HRESULT, fn put_FormatTag( FormatTag: c_short, ) -> HRESULT, fn get_Channels( Channels: *mut c_short, ) -> HRESULT, fn put_Channels( Channels: c_short, ) -> HRESULT, fn get_SamplesPerSec( SamplesPerSec: *mut c_long, ) -> HRESULT, fn put_SamplesPerSec( SamplesPerSec: c_long, ) -> HRESULT, fn get_AvgBytesPerSec( AvgBytesPerSec: *mut c_long, ) -> HRESULT, fn put_AvgBytesPerSec( AvgBytesPerSec: c_long, ) -> HRESULT, fn get_BlockAlign( BlockAlign: *mut c_short, ) -> HRESULT, fn put_BlockAlign( BlockAlign: c_short, ) -> HRESULT, fn get_BitsPerSample( BitsPerSample: *mut c_short, ) -> HRESULT, fn put_BitsPerSample( BitsPerSample: c_short, ) -> HRESULT, fn get_ExtraData( ExtraData: *mut VARIANT, ) -> HRESULT, fn put_ExtraData( ExtraData: VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0x6450336f, 0x7d49, 0x4ced, 0x80, 0x97, 0x49, 0xd6, 0xde, 0xe3, 0x72, 0x94)] interface ISpeechBaseStream(ISpeechBaseStreamVtbl): IDispatch(IDispatchVtbl) { fn get_Format( AudioFormat: *mut *mut ISpeechAudioFormat, ) -> HRESULT, fn putref_Format( AudioFormat: *mut ISpeechAudioFormat, ) -> HRESULT, fn Read( Buffer: *mut VARIANT, NumberOfBytes: c_long, BytesRead: *mut c_long, ) -> HRESULT, fn Write( Buffer: VARIANT, BytesWritten: *mut c_long, ) -> HRESULT, fn Seek( Position: VARIANT, Origin: SpeechStreamSeekPositionType, NewPosition: *mut VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0xaf67f125, 0xab39, 0x4e93, 0xb4, 0xa2, 0xcc, 0x2e, 0x66, 0xe1, 0x82, 0xa7)] interface ISpeechFileStream(ISpeechFileStreamVtbl): ISpeechBaseStream(ISpeechBaseStreamVtbl) { fn Open( FileName: BSTR, FileMode: SpeechStreamFileMode, DoEvents: VARIANT_BOOL, ) -> HRESULT, fn Close() -> HRESULT, }); RIDL!(#[uuid(0xeeb14b68, 0x808b, 0x4abe, 0xa5, 0xea, 0xb5, 0x1d, 0xa7, 0x58, 0x80, 0x08)] interface ISpeechMemoryStream(ISpeechMemoryStreamVtbl): ISpeechBaseStream(ISpeechBaseStreamVtbl) { fn SetData( Data: VARIANT, ) -> HRESULT, fn GetData( pData: *mut VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0x1a9e9f4f, 0x104f, 0x4db8, 0xa1, 0x15, 0xef, 0xd7, 0xfd, 0x0c, 0x97, 0xae)] interface ISpeechCustomStream(ISpeechCustomStreamVtbl): ISpeechBaseStream(ISpeechBaseStreamVtbl) { fn get_BaseStream( ppUnkStream: *mut *mut IUnknown, ) -> HRESULT, fn putref_BaseStream( pUnkStream: *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0xcff8e175, 0x019e, 0x11d3, 0xa0, 0x8e, 0x00, 0xc0, 0x4f, 0x8e, 0xf9, 0xb5)] interface ISpeechAudio(ISpeechAudioVtbl): ISpeechBaseStream(ISpeechBaseStreamVtbl) { fn get_Status( Status: *mut *mut ISpeechAudioStatus, ) -> HRESULT, fn get_BufferInfo( BufferInfo: *mut *mut ISpeechAudioBufferInfo, ) -> HRESULT, fn get_DefaultFormat( StreamFormat: *mut *mut ISpeechAudioFormat, ) -> HRESULT, fn get_Volume( Volume: *mut c_long, ) -> HRESULT, fn put_Volume( Volume: c_long, ) -> HRESULT, fn get_BufferNotifySize( BufferNotifySize: *mut c_long, ) -> HRESULT, fn put_BufferNotifySize( BufferNotifySize: c_long, ) -> HRESULT, fn get_EventHandle( EventHandle: *mut c_long, ) -> HRESULT, fn SetState( State: SpeechAudioState, ) -> HRESULT, }); RIDL!(#[uuid(0x3c76af6d, 0x1fd7, 0x4831, 0x81, 0xd1, 0x3b, 0x71, 0xd5, 0xa1, 0x3c, 0x44)] interface ISpeechMMSysAudio(ISpeechMMSysAudioVtbl): ISpeechAudio(ISpeechAudioVtbl) { fn get_DeviceId( DeviceId: *mut c_long, ) -> HRESULT, fn put_DeviceId( DeviceId: c_long, ) -> HRESULT, fn get_LineId( LineId: *mut c_long, ) -> HRESULT, fn put_LineId( LineId: c_long, ) -> HRESULT, fn get_MMHandle( Handle: *mut c_long, ) -> HRESULT, }); RIDL!(#[uuid(0x269316d8, 0x57bd, 0x11d2, 0x9e, 0xee, 0x00, 0xc0, 0x4f, 0x79, 0x73, 0x96)] interface ISpeechVoice(ISpeechVoiceVtbl): IDispatch(IDispatchVtbl) { fn get_Status( Status: *mut *mut ISpeechVoiceStatus, ) -> HRESULT, fn get_Voice( Voice: *mut *mut ISpeechObjectToken, ) -> HRESULT, fn putref_Voice( Voice: *mut ISpeechObjectToken, ) -> HRESULT, fn get_AudioOutput( AudioOutput: *mut *mut ISpeechObjectToken, ) -> HRESULT, fn putref_AudioOutput( AudioOutput: *mut ISpeechObjectToken, ) -> HRESULT, fn get_AudioOutputStream( AudioOutputStream: *mut *mut ISpeechBaseStream, ) -> HRESULT, fn putref_AudioOutputStream( AudioOutputStream: *mut ISpeechBaseStream, ) -> HRESULT, fn get_Rate( Rate: *mut c_long, ) -> HRESULT, fn put_Rate( Rate: c_long, ) -> HRESULT, fn get_Volume( Volume: *mut c_long, ) -> HRESULT, fn put_Volume( Volume: c_long, ) -> HRESULT, fn put_AllowAudioOutputFormatChangesOnNextSet( Allow: VARIANT_BOOL, ) -> HRESULT, fn get_AllowAudioOutputFormatChangesOnNextSet( Allow: *mut VARIANT_BOOL, ) -> HRESULT, fn get_EventInterests( EventInterestFlags: *mut SpeechVoiceEvents, ) -> HRESULT, fn put_EventInterests( EventInterestFlags: SpeechVoiceEvents, ) -> HRESULT, fn put_Priority( Priority: SpeechVoicePriority, ) -> HRESULT, fn get_Priority( Priority: *mut SpeechVoicePriority, ) -> HRESULT, fn put_AlertBoundary( Boundary: SpeechVoiceEvents, ) -> HRESULT, fn get_AlertBoundary( Boundary: *mut SpeechVoiceEvents, ) -> HRESULT, fn put_SynchronousSpeakTimeout( msTimeout: c_long, ) -> HRESULT, fn get_SynchronousSpeakTimeout( msTimeOut: *mut c_long, ) -> HRESULT, fn Speak( Text: BSTR, Flags: SpeechVoiceSpeakFlags, StreamNumber: *mut c_long, ) -> HRESULT, fn SpeakStream( Stream: *mut ISpeechBaseStream, Flags: SpeechVoiceSpeakFlags, StreamNumber: *mut c_long, ) -> HRESULT, fn Pause() -> HRESULT, fn Resume() -> HRESULT, fn Skip( Type: BSTR, NumItems: c_long, NumSkipped: c_long, ) -> HRESULT, fn GetVoices( RequiredAttributes: BSTR, OptionalAttributes: BSTR, ObjectTokens: *mut *mut ISpeechObjectTokens, ) -> HRESULT, fn GetAudioOutputs( RequiredAttributes: BSTR, OptionalAttributes: BSTR, ObjectTokens: *mut *mut ISpeechObjectTokens, ) -> HRESULT, fn WaitUntilDone( msTimeout: c_long, Done: *mut VARIANT_BOOL, ) -> HRESULT, fn SpeakCompleteEvent( Handle: *mut c_long, ) -> HRESULT, fn IsUISupported( TypeOfUI: BSTR, ExtraData: *const VARIANT, Supported: *mut VARIANT_BOOL, ) -> HRESULT, fn DisplayUI( hWndParent: c_long, Title: BSTR, TypeOfUI: BSTR, ExtraData: *const VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0x8be47b07, 0x57f6, 0x11d2, 0x9e, 0xee, 0x00, 0xc0, 0x4f, 0x79, 0x73, 0x96)] interface ISpeechVoiceStatus(ISpeechVoiceStatusVtbl): IDispatch(IDispatchVtbl) { fn get_CurrentStreamNumber( StreamNumber: *mut c_long, ) -> HRESULT, fn get_LastStreamNumberQueued( StreamNumber: *mut c_long, ) -> HRESULT, fn get_LastHResult( HResult: *mut c_long, ) -> HRESULT, fn get_RunningState( State: *mut SpeechRunState, ) -> HRESULT, fn get_InputWordPosition( Position: *mut c_long, ) -> HRESULT, fn get_InputWordLength( Length: *mut c_long, ) -> HRESULT, fn get_InputSentencePosition( Position: *mut c_long, ) -> HRESULT, fn get_InputSentenceLength( Length: *mut c_long, ) -> HRESULT, fn get_LastBookmark( Bookmark: *mut BSTR, ) -> HRESULT, fn get_LastBookmarkId( BookmarkId: *mut c_long, ) -> HRESULT, fn get_PhonemeId( PhoneId: *mut c_short, ) -> HRESULT, fn get_VisemeId( VisemeId: *mut c_short, ) -> HRESULT, }); RIDL!(#[uuid(0xa372acd1, 0x3bef, 0x4bbd, 0x8f, 0xfb, 0xcb, 0x3e, 0x2b, 0x41, 0x6a, 0xf8)] interface _ISpeechVoiceEvents(_ISpeechVoiceEventsVtbl): IDispatch(IDispatchVtbl) { }); RIDL!(#[uuid(0x2d5f1c0c, 0xbd75, 0x4b08, 0x94, 0x78, 0x3b, 0x11, 0xfe, 0xa2, 0x58, 0x6c)] interface ISpeechRecognizer(ISpeechRecognizerVtbl): IDispatch(IDispatchVtbl) { fn putref_Recognizer( Recognizer: *mut ISpeechObjectToken, ) -> HRESULT, fn get_Recognizer( Recognizer: *mut *mut ISpeechObjectToken, ) -> HRESULT, fn put_AllowAudioInputFormatChangesOnNextSet( Allow: VARIANT_BOOL, ) -> HRESULT, fn get_AllowAudioInputFormatChangesOnNextSet( Allow: *mut VARIANT_BOOL, ) -> HRESULT, fn putref_AudioInput( AudioInput: *mut ISpeechObjectToken, ) -> HRESULT, fn get_AudioInput( AudioInput: *mut *mut ISpeechObjectToken, ) -> HRESULT, fn putref_AudioInputStream( AudioInputStream: *mut ISpeechBaseStream, ) -> HRESULT, fn get_AudioInputStream( AudioInputStream: *mut *mut ISpeechBaseStream, ) -> HRESULT, fn get_IsShared( Shared: *mut VARIANT_BOOL, ) -> HRESULT, fn put_State( State: SpeechRecognizerState, ) -> HRESULT, fn get_State( State: *mut SpeechRecognizerState, ) -> HRESULT, fn get_Status( Status: *mut *mut ISpeechRecognizerStatus, ) -> HRESULT, fn putref_Profile( Profile: *mut ISpeechObjectToken, ) -> HRESULT, fn get_Profile( Profile: *mut *mut ISpeechObjectToken, ) -> HRESULT, fn EmulateRecognition( TextElements: VARIANT, ElementDisplayAttributes: *mut VARIANT, LanguageId: c_long, ) -> HRESULT, fn CreateRecoContext( NewContext: *mut *mut ISpeechRecoContext, ) -> HRESULT, fn GetFormat( Type: SpeechFormatType, Format: *mut *mut ISpeechAudioFormat, ) -> HRESULT, fn SetPropertyNumber( Name: BSTR, Value: c_long, Supported: *mut VARIANT_BOOL, ) -> HRESULT, fn GetPropertyNumber( Name: BSTR, Value: *mut c_long, Supported: *mut VARIANT_BOOL, ) -> HRESULT, fn SetPropertyString( Name: BSTR, Value: BSTR, Supported: *mut VARIANT_BOOL, ) -> HRESULT, fn GetPropertyString( Name: BSTR, Value: *mut BSTR, Supported: *mut VARIANT_BOOL, ) -> HRESULT, fn IsUISupported( TypeOfUI: BSTR, ExtraData: *const VARIANT, Supported: *mut VARIANT_BOOL, ) -> HRESULT, fn DisplayUI( hWndParent: c_long, Title: BSTR, TypeOfUI: BSTR, ExtraData: *const VARIANT, ) -> HRESULT, fn GetRecognizers( RequiredAttributes: BSTR, OptionalAttributes: BSTR, ObjectTokens: *mut *mut ISpeechObjectTokens, ) -> HRESULT, fn GetAudioInputs( RequiredAttributes: BSTR, OptionalAttributes: BSTR, ObjectTokens: *mut *mut ISpeechObjectTokens, ) -> HRESULT, fn GetProfiles( RequiredAttributes: BSTR, OptionalAttributes: BSTR, ObjectTokens: *mut *mut ISpeechObjectTokens, ) -> HRESULT, }); RIDL!(#[uuid(0xbff9e781, 0x53ec, 0x484e, 0xbb, 0x8a, 0x0e, 0x1b, 0x55, 0x51, 0xe3, 0x5c)] interface ISpeechRecognizerStatus(ISpeechRecognizerStatusVtbl): IDispatch(IDispatchVtbl) { fn get_AudioStatus( AudioStatus: *mut *mut ISpeechAudioStatus, ) -> HRESULT, fn get_CurrentStreamPosition( pCurrentStreamPos: *mut VARIANT, ) -> HRESULT, fn get_CurrentStreamNumber( StreamNumber: *mut c_long, ) -> HRESULT, fn get_NumberOfActiveRules( NumberOfActiveRules: *mut c_long, ) -> HRESULT, fn get_ClsidEngine( ClsidEngine: *mut BSTR, ) -> HRESULT, fn get_SupportedLanguages( SupportedLanguages: *mut VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0x580aa49d, 0x7e1e, 0x4809, 0xb8, 0xe2, 0x57, 0xda, 0x80, 0x61, 0x04, 0xb8)] interface ISpeechRecoContext(ISpeechRecoContextVtbl): IDispatch(IDispatchVtbl) { fn get_Recognizer( Recognizer: *mut *mut ISpeechRecognizer, ) -> HRESULT, fn get_AudioInputInterferenceStatus( Interference: *mut SpeechInterference, ) -> HRESULT, fn get_RequestedUIType( UIType: *mut BSTR, ) -> HRESULT, fn putref_Voice( Voice: *mut ISpeechVoice, ) -> HRESULT, fn get_Voice( Voice: *mut *mut ISpeechVoice, ) -> HRESULT, fn put_AllowVoiceFormatMatchingOnNextSet( Allow: VARIANT_BOOL, ) -> HRESULT, fn get_AllowVoiceFormatMatchingOnNextSet( Allow: *mut VARIANT_BOOL, ) -> HRESULT, fn put_VoicePurgeEvent( EventInterest: SpeechRecoEvents, ) -> HRESULT, fn get_VoicePurgeEvent( EventInterest: *mut SpeechRecoEvents, ) -> HRESULT, fn put_EventInterests( EventInterest: SpeechRecoEvents, ) -> HRESULT, fn get_EventInterests( EventInterest: *mut SpeechRecoEvents, ) -> HRESULT, fn put_CmdMaxAlternates( MaxAlternates: c_long, ) -> HRESULT, fn get_CmdMaxAlternates( MaxAlternates: *mut c_long, ) -> HRESULT, fn put_State( State: SpeechRecoContextState, ) -> HRESULT, fn get_State( State: *mut SpeechRecoContextState, ) -> HRESULT, fn put_RetainedAudio( Option: SpeechRetainedAudioOptions, ) -> HRESULT, fn get_RetainedAudio( Option: *mut SpeechRetainedAudioOptions, ) -> HRESULT, fn putref_RetainedAudioFormat( Format: *mut ISpeechAudioFormat, ) -> HRESULT, fn get_RetainedAudioFormat( Format: *mut *mut ISpeechAudioFormat, ) -> HRESULT, fn Pause() -> HRESULT, fn Resume() -> HRESULT, fn CreateGrammar( GrammarId: VARIANT, Grammar: *mut *mut ISpeechRecoGrammar, ) -> HRESULT, fn CreateResultFromMemory( ResultBlock: *mut VARIANT, Result: *mut *mut ISpeechRecoResult, ) -> HRESULT, fn Bookmark( Options: SpeechBookmarkOptions, StreamPos: VARIANT, BookmarkId: VARIANT, ) -> HRESULT, fn SetAdaptationData( AdaptationString: BSTR, ) -> HRESULT, }); RIDL!(#[uuid(0xb6d6f79f, 0x2158, 0x4e50, 0xb5, 0xbc, 0x9a, 0x9c, 0xcd, 0x85, 0x2a, 0x09)] interface ISpeechRecoGrammar(ISpeechRecoGrammarVtbl): IDispatch(IDispatchVtbl) { fn get_Id( Id: *mut VARIANT, ) -> HRESULT, fn get_RecoContext( RecoContext: *mut *mut ISpeechRecoContext, ) -> HRESULT, fn put_State( State: SpeechGrammarState, ) -> HRESULT, fn get_State( State: *mut SpeechGrammarState, ) -> HRESULT, fn get_Rules( Rules: *mut *mut ISpeechGrammarRules, ) -> HRESULT, fn Reset( NewLanguage: SpeechLanguageId, ) -> HRESULT, fn CmdLoadFromFile( FileName: BSTR, LoadOption: SpeechLoadOption, ) -> HRESULT, fn CmdLoadFromObject( ClassId: BSTR, GrammarName: BSTR, LoadOption: SpeechLoadOption, ) -> HRESULT, fn CmdLoadFromResource( hModule: c_long, ResourceName: VARIANT, ResourceType: VARIANT, LanguageId: SpeechLanguageId, LoadOption: SpeechLoadOption, ) -> HRESULT, fn CmdLoadFromMemory( GrammarData: VARIANT, LoadOption: SpeechLoadOption, ) -> HRESULT, fn CmdLoadFromProprietaryGrammar( ProprietaryGuid: BSTR, PriorietaryString: BSTR, ProprietaryData: VARIANT, LoadOption: SpeechLoadOption, ) -> HRESULT, fn CmdSetRuleState( Name: BSTR, State: SpeechRuleState, ) -> HRESULT, fn CmdSetRuleIdState( RuleId: c_long, State: SpeechRuleState, ) -> HRESULT, fn DictationLoad( TopicName: BSTR, LoadOption: SpeechLoadOption, ) -> HRESULT, fn DictationUnload() -> HRESULT, fn DictationSetState( State: SpeechRuleState, ) -> HRESULT, fn SetWordSequenceData( Text: BSTR, TextLength: c_long, Info: *mut ISpeechTextSelectionInformation, ) -> HRESULT, fn SetTextSelection( Info: *mut ISpeechTextSelectionInformation, ) -> HRESULT, fn IsPronounceable( Word: BSTR, WordPronounceable: *mut SpeechWordPronounceable, ) -> HRESULT, }); RIDL!(#[uuid(0x7b8fcb42, 0x0e9d, 0x4f00, 0xa0, 0x48, 0x7b, 0x04, 0xd6, 0x17, 0x9d, 0x3d)] interface _ISpeechRecoContextEvents(_ISpeechRecoContextEventsVtbl): IDispatch(IDispatchVtbl) { }); RIDL!(#[uuid(0xafe719cf, 0x5dd1, 0x44f2, 0x99, 0x9c, 0x7a, 0x39, 0x9f, 0x1c, 0xfc, 0xcc)] interface ISpeechGrammarRule(ISpeechGrammarRuleVtbl): IDispatch(IDispatchVtbl) { fn get_Attributes( Attributes: *mut SpeechRuleAttributes, ) -> HRESULT, fn get_InitialState( State: *mut *mut ISpeechGrammarRuleState, ) -> HRESULT, fn get_Name( Name: *mut BSTR, ) -> HRESULT, fn get_Id( Id: *mut c_long, ) -> HRESULT, fn Clear() -> HRESULT, fn AddResource( ResourceName: BSTR, ResourceValue: BSTR, ) -> HRESULT, fn AddState( State: *mut *mut ISpeechGrammarRuleState, ) -> HRESULT, }); RIDL!(#[uuid(0x6ffa3b44, 0xfc2d, 0x40d1, 0x8a, 0xfc, 0x32, 0x91, 0x1c, 0x7f, 0x1a, 0xd1)] interface ISpeechGrammarRules(ISpeechGrammarRulesVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn FindRule( RuleNameOrId: VARIANT, Rule: *mut *mut ISpeechGrammarRule, ) -> HRESULT, fn Item( Index: c_long, Rule: *mut *mut ISpeechGrammarRule, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, fn get_Dynamic( Dynamic: *mut VARIANT_BOOL, ) -> HRESULT, fn Add( RuleName: BSTR, Attributes: SpeechRuleAttributes, RuleId: c_long, Rule: *mut *mut ISpeechGrammarRule, ) -> HRESULT, fn Commit() -> HRESULT, fn CommitAndSave( ErrorText: *mut BSTR, SaveStream: *mut VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0xd4286f2c, 0xee67, 0x45ae, 0xb9, 0x28, 0x28, 0xd6, 0x95, 0x36, 0x2e, 0xda)] interface ISpeechGrammarRuleState(ISpeechGrammarRuleStateVtbl): IDispatch(IDispatchVtbl) { fn get_Rule( Rule: *mut *mut ISpeechGrammarRule, ) -> HRESULT, fn get_Transitions( Transitions: *mut *mut ISpeechGrammarRuleStateTransitions, ) -> HRESULT, fn AddWordTransition( DestState: *mut ISpeechGrammarRuleState, Words: BSTR, Separators: BSTR, Type: SpeechGrammarWordType, PropertyName: BSTR, PropertyId: c_long, PropertyValue: *mut VARIANT, Weight: c_float, ) -> HRESULT, fn AddRuleTransition( DestinationState: *mut ISpeechGrammarRuleState, Rule: *mut ISpeechGrammarRule, PropertyName: BSTR, PropertyId: c_long, PropertyValue: *mut VARIANT, Weight: c_float, ) -> HRESULT, fn AddSpecialTransition( DestinationState: *mut ISpeechGrammarRuleState, Type: SpeechSpecialTransitionType, PropertyName: BSTR, PropertyId: c_long, PropertyValue: *mut VARIANT, Weight: c_float, ) -> HRESULT, }); RIDL!(#[uuid(0xcafd1db1, 0x41d1, 0x4a06, 0x98, 0x63, 0xe2, 0xe8, 0x1d, 0xa1, 0x7a, 0x9a)] interface ISpeechGrammarRuleStateTransition(ISpeechGrammarRuleStateTransitionVtbl): IDispatch(IDispatchVtbl) { fn get_Type( Type: *mut SpeechGrammarRuleStateTransitionType, ) -> HRESULT, fn get_Text( Text: *mut BSTR, ) -> HRESULT, fn get_Rule( Rule: *mut *mut ISpeechGrammarRule, ) -> HRESULT, fn get_Weight( Weight: *mut VARIANT, ) -> HRESULT, fn get_PropertyName( PropertyName: *mut BSTR, ) -> HRESULT, fn get_PropertyId( PropertyId: *mut c_long, ) -> HRESULT, fn get_PropertyValue( PropertyValue: *mut VARIANT, ) -> HRESULT, fn get_NextState( NextState: *mut *mut ISpeechGrammarRuleState, ) -> HRESULT, }); RIDL!(#[uuid(0xeabce657, 0x75bc, 0x44a2, 0xaa, 0x7f, 0xc5, 0x64, 0x76, 0x74, 0x29, 0x63)] interface ISpeechGrammarRuleStateTransitions(ISpeechGrammarRuleStateTransitionsVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Transition: *mut *mut ISpeechGrammarRuleStateTransition, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0x3b9c7e7a, 0x6eee, 0x4ded, 0x90, 0x92, 0x11, 0x65, 0x72, 0x79, 0xad, 0xbe)] interface ISpeechTextSelectionInformation(ISpeechTextSelectionInformationVtbl): IDispatch(IDispatchVtbl) { fn put_ActiveOffset( ActiveOffset: c_long, ) -> HRESULT, fn get_ActiveOffset( ActiveOffset: *mut c_long, ) -> HRESULT, fn put_ActiveLength( ActiveLength: c_long, ) -> HRESULT, fn get_ActiveLength( ActiveLength: *mut c_long, ) -> HRESULT, fn put_SelectionOffset( SelectionOffset: c_long, ) -> HRESULT, fn get_SelectionOffset( SelectionOffset: *mut c_long, ) -> HRESULT, fn put_SelectionLength( SelectionLength: c_long, ) -> HRESULT, fn get_SelectionLength( SelectionLength: *mut c_long, ) -> HRESULT, }); RIDL!(#[uuid(0xed2879cf, 0xced9, 0x4ee6, 0xa5, 0x34, 0xde, 0x01, 0x91, 0xd5, 0x46, 0x8d)] interface ISpeechRecoResult(ISpeechRecoResultVtbl): IDispatch(IDispatchVtbl) { fn get_RecoContext( RecoContext: *mut *mut ISpeechRecoContext, ) -> HRESULT, fn get_Times( Times: *mut *mut ISpeechRecoResultTimes, ) -> HRESULT, fn putref_AudioFormat( Format: *mut ISpeechAudioFormat, ) -> HRESULT, fn get_AudioFormat( Format: *mut *mut ISpeechAudioFormat, ) -> HRESULT, fn get_PhraseInfo( PhraseInfo: *mut *mut ISpeechPhraseInfo, ) -> HRESULT, fn Alternates( RequestCount: c_long, StartElement: c_long, Elements: c_long, Alternates: *mut *mut ISpeechPhraseAlternates, ) -> HRESULT, fn Audio( StartElement: c_long, Elements: c_long, Stream: *mut *mut ISpeechMemoryStream, ) -> HRESULT, fn SpeakAudio( StartElement: c_long, Elements: c_long, Flags: SpeechVoiceSpeakFlags, StreamNumber: *mut c_long, ) -> HRESULT, fn SaveToMemory( ResultBlock: *mut VARIANT, ) -> HRESULT, fn DiscardResultInfo( ValueTypes: SpeechDiscardType, ) -> HRESULT, }); RIDL!(#[uuid(0x62b3b8fb, 0xf6e7, 0x41be, 0xbd, 0xcb, 0x05, 0x6b, 0x1c, 0x29, 0xef, 0xc0)] interface ISpeechRecoResultTimes(ISpeechRecoResultTimesVtbl): IDispatch(IDispatchVtbl) { fn get_StreamTime( Time: *mut VARIANT, ) -> HRESULT, fn get_Length( Length: *mut VARIANT, ) -> HRESULT, fn get_TickCount( TickCount: *mut c_long, ) -> HRESULT, fn get_OffsetFromStart( OffsetFromStart: *mut VARIANT, ) -> HRESULT, }); RIDL!(#[uuid(0x27864a2a, 0x2b9f, 0x4cb8, 0x92, 0xd3, 0x0d, 0x27, 0x22, 0xfd, 0x1e, 0x73)] interface ISpeechPhraseAlternate(ISpeechPhraseAlternateVtbl): IDispatch(IDispatchVtbl) { fn get_RecoResult( RecoResult: *mut *mut ISpeechRecoResult, ) -> HRESULT, fn get_StartElementInResult( StartElement: *mut c_long, ) -> HRESULT, fn get_NumberOfElementsInResult( NumberOfElements: *mut c_long, ) -> HRESULT, fn get_PhraseInfo( PhraseInfo: *mut *mut ISpeechPhraseInfo, ) -> HRESULT, fn Commit() -> HRESULT, }); RIDL!(#[uuid(0xb238b6d5, 0xf276, 0x4c3d, 0xa6, 0xc1, 0x29, 0x74, 0x80, 0x1c, 0x3c, 0xc2)] interface ISpeechPhraseAlternates(ISpeechPhraseAlternatesVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, PhraseAlternate: *mut *mut ISpeechPhraseAlternate, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0x961559cf, 0x4e67, 0x4662, 0x8b, 0xf0, 0xd9, 0x3f, 0x1f, 0xcd, 0x61, 0xb3)] interface ISpeechPhraseInfo(ISpeechPhraseInfoVtbl): IDispatch(IDispatchVtbl) { fn get_LanguageId( LanguageId: *mut c_long, ) -> HRESULT, fn get_GrammarId( GrammarId: *mut VARIANT, ) -> HRESULT, fn get_StartTime( StartTime: *mut VARIANT, ) -> HRESULT, fn get_AudioStreamPosition( AudioStreamPosition: *mut VARIANT, ) -> HRESULT, fn get_AudioSizeBytes( pAudioSizeBytes: *mut c_long, ) -> HRESULT, fn get_RetainedSizeBytes( RetainedSizeBytes: *mut c_long, ) -> HRESULT, fn get_AudioSizeTime( AudioSizeTime: *mut c_long, ) -> HRESULT, fn get_Rule( Rule: *mut *mut ISpeechPhraseRule, ) -> HRESULT, fn get_Properties( Properties: *mut *mut ISpeechPhraseProperties, ) -> HRESULT, fn get_Elements( Elements: *mut *mut ISpeechPhraseElements, ) -> HRESULT, fn get_Replacements( Replacements: *mut *mut ISpeechPhraseReplacements, ) -> HRESULT, fn get_EngineId( EngineIdGuid: *mut BSTR, ) -> HRESULT, fn get_EnginePrivateData( PrivateData: *mut VARIANT, ) -> HRESULT, fn SaveToMemory( PhraseBlock: *mut VARIANT, ) -> HRESULT, fn GetText( StartElement: c_long, Elements: c_long, UseReplacements: VARIANT_BOOL, Text: *mut BSTR, ) -> HRESULT, fn GetDisplayAttributes( StartElement: c_long, Elements: c_long, UseReplacements: VARIANT_BOOL, DisplayAttributes: *mut SpeechDisplayAttributes, ) -> HRESULT, }); RIDL!(#[uuid(0xe6176f96, 0xe373, 0x4801, 0xb2, 0x23, 0x3b, 0x62, 0xc0, 0x68, 0xc0, 0xb4)] interface ISpeechPhraseElement(ISpeechPhraseElementVtbl): IDispatch(IDispatchVtbl) { fn get_AudioTimeOffset( AudioTimeOffset: *mut c_long, ) -> HRESULT, fn get_AudioSizeTime( AudioSizeTime: *mut c_long, ) -> HRESULT, fn get_AudioStreamOffset( AudioStreamOffset: *mut c_long, ) -> HRESULT, fn get_AudioSizeBytes( AudioSizeBytes: *mut c_long, ) -> HRESULT, fn get_RetainedStreamOffset( RetainedStreamOffset: *mut c_long, ) -> HRESULT, fn get_RetainedSizeBytes( RetainedSizeBytes: *mut c_long, ) -> HRESULT, fn get_DisplayText( DisplayText: *mut BSTR, ) -> HRESULT, fn get_LexicalForm( LexicalForm: *mut BSTR, ) -> HRESULT, fn get_Pronunciation( Pronunciation: *mut VARIANT, ) -> HRESULT, fn get_DisplayAttributes( DisplayAttributes: *mut SpeechDisplayAttributes, ) -> HRESULT, fn get_RequiredConfidence( RequiredConfidence: *mut SpeechEngineConfidence, ) -> HRESULT, fn get_ActualConfidence( ActualConfidence: *mut SpeechEngineConfidence, ) -> HRESULT, fn get_EngineConfidence( EngineConfident: *mut c_float, ) -> HRESULT, }); RIDL!(#[uuid(0x0626b328, 0x3478, 0x467d, 0xa0, 0xb3, 0xd0, 0x85, 0x3b, 0x93, 0xdd, 0xa3)] interface ISpeechPhraseElements(ISpeechPhraseElementsVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Element: *mut *mut ISpeechPhraseElement, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0x2890a410, 0x53a7, 0x4fb5, 0x94, 0xec, 0x06, 0xd4, 0x99, 0x8e, 0x3d, 0x02)] interface ISpeechPhraseReplacement(ISpeechPhraseReplacementVtbl): IDispatch(IDispatchVtbl) { fn get_DisplayAttributes( DisplayAttributes: *mut SpeechDisplayAttributes, ) -> HRESULT, fn get_Text( Text: *mut BSTR, ) -> HRESULT, fn get_FirstElement( FirstElement: *mut c_long, ) -> HRESULT, fn get_NumberOfElements( NumberOfElements: *mut c_long, ) -> HRESULT, }); RIDL!(#[uuid(0x38bc662f, 0x2257, 0x4525, 0x95, 0x9e, 0x20, 0x69, 0xd2, 0x59, 0x6c, 0x05)] interface ISpeechPhraseReplacements(ISpeechPhraseReplacementsVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Reps: *mut *mut ISpeechPhraseReplacement, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0xce563d48, 0x961e, 0x4732, 0xa2, 0xe1, 0x37, 0x8a, 0x42, 0xb4, 0x30, 0xbe)] interface ISpeechPhraseProperty(ISpeechPhrasePropertyVtbl): IDispatch(IDispatchVtbl) { fn get_Name( Name: *mut BSTR, ) -> HRESULT, fn get_Id( Id: *mut c_long, ) -> HRESULT, fn get_Value( Value: *mut VARIANT, ) -> HRESULT, fn get_FirstElement( FirstElement: *mut c_long, ) -> HRESULT, fn get_NumberOfElements( NumberOfElements: *mut c_long, ) -> HRESULT, fn get_EngineConfidence( Confidence: *mut c_float, ) -> HRESULT, fn get_Confidence( Confidence: *mut SpeechEngineConfidence, ) -> HRESULT, fn get_Parent( ParentProperty: *mut *mut ISpeechPhraseProperty, ) -> HRESULT, fn get_Children( Children: *mut *mut ISpeechPhraseProperties, ) -> HRESULT, }); RIDL!(#[uuid(0x08166b47, 0x102e, 0x4b23, 0xa5, 0x99, 0xbd, 0xb9, 0x8d, 0xbf, 0xd1, 0xf4)] interface ISpeechPhraseProperties(ISpeechPhrasePropertiesVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Property: *mut *mut ISpeechPhraseProperty, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0xa7bfe112, 0xa4a0, 0x48d9, 0xb6, 0x02, 0xc3, 0x13, 0x84, 0x3f, 0x69, 0x64)] interface ISpeechPhraseRule(ISpeechPhraseRuleVtbl): IDispatch(IDispatchVtbl) { fn get_Name( Name: *mut BSTR, ) -> HRESULT, fn get_Id( Id: *mut c_long, ) -> HRESULT, fn get_FirstElement( FirstElement: *mut c_long, ) -> HRESULT, fn get_NumberOfElements( NumberOfElements: *mut c_long, ) -> HRESULT, fn get_Parent( Parent: *mut *mut ISpeechPhraseRule, ) -> HRESULT, fn get_Children( Children: *mut *mut ISpeechPhraseRules, ) -> HRESULT, fn get_Confidence( ActualConfidence: *mut SpeechEngineConfidence, ) -> HRESULT, fn get_EngineConfidence( Confidence: *mut c_float, ) -> HRESULT, }); RIDL!(#[uuid(0x9047d593, 0x01dd, 0x4b72, 0x81, 0xa3, 0xe4, 0xa0, 0xca, 0x69, 0xf4, 0x07)] interface ISpeechPhraseRules(ISpeechPhraseRulesVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Rule: *mut *mut ISpeechPhraseRule, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0x3da7627a, 0xc7ae, 0x4b23, 0x87, 0x08, 0x63, 0x8c, 0x50, 0x36, 0x2c, 0x25)] interface ISpeechLexicon(ISpeechLexiconVtbl): IDispatch(IDispatchVtbl) { fn get_GenerationId( GenerationId: *mut c_long, ) -> HRESULT, fn GetWords( Flags: SpeechLexiconType, GenerationID: *mut c_long, Words: *mut *mut ISpeechLexiconWords, ) -> HRESULT, fn AddPronunciation( bstrWord: BSTR, LangId: SpeechLanguageId, PartOfSpeech: SpeechPartOfSpeech, bstrPronunciation: BSTR, ) -> HRESULT, fn AddPronunciationByPhoneIds( bstrWord: BSTR, LangId: SpeechLanguageId, PartOfSpeech: SpeechPartOfSpeech, PhoneIds: *mut VARIANT, ) -> HRESULT, fn RemovePronunciation( bstrWord: BSTR, LangId: SpeechLanguageId, PartOfSpeech: SpeechPartOfSpeech, bstrPronunciation: BSTR, ) -> HRESULT, fn RemovePronunciationByPhoneIds( bstrWord: BSTR, LangId: SpeechLanguageId, PartOfSpeech: SpeechPartOfSpeech, PhoneIds: *mut VARIANT, ) -> HRESULT, fn GetPronunciations( bstrWord: BSTR, LangId: SpeechLanguageId, TypeFlags: SpeechLexiconType, ppPronunciations: *mut *mut ISpeechLexiconPronunciations, ) -> HRESULT, fn GetGenerationChange( GenerationID: *mut c_long, ppWords: *mut *mut ISpeechLexiconWords, ) -> HRESULT, }); RIDL!(#[uuid(0x8d199862, 0x415e, 0x47d5, 0xac, 0x4f, 0xfa, 0xa6, 0x08, 0xb4, 0x24, 0xe6)] interface ISpeechLexiconWords(ISpeechLexiconWordsVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Word: *mut *mut ISpeechLexiconWord, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0x4e5b933c, 0xc9be, 0x48ed, 0x88, 0x42, 0x1e, 0xe5, 0x1b, 0xb1, 0xd4, 0xff)] interface ISpeechLexiconWord(ISpeechLexiconWordVtbl): IDispatch(IDispatchVtbl) { fn get_LangId( LangId: *mut SpeechLanguageId, ) -> HRESULT, fn get_Type( WordType: *mut SpeechWordType, ) -> HRESULT, fn get_Word( Word: *mut BSTR, ) -> HRESULT, fn get_Pronunciations( Pronunciations: *mut *mut ISpeechLexiconPronunciations, ) -> HRESULT, }); RIDL!(#[uuid(0x72829128, 0x5682, 0x4704, 0xa0, 0xd4, 0x3e, 0x2b, 0xb6, 0xf2, 0xea, 0xd3)] interface ISpeechLexiconPronunciations(ISpeechLexiconPronunciationsVtbl): IDispatch(IDispatchVtbl) { fn get_Count( Count: *mut c_long, ) -> HRESULT, fn Item( Index: c_long, Pronunciation: *mut *mut ISpeechLexiconPronunciation, ) -> HRESULT, fn get__NewEnum( EnumVARIANT: *mut *mut IUnknown, ) -> HRESULT, }); RIDL!(#[uuid(0x95252c5d, 0x9e43, 0x4f4a, 0x98, 0x99, 0x48, 0xee, 0x73, 0x35, 0x2f, 0x9f)] interface ISpeechLexiconPronunciation(ISpeechLexiconPronunciationVtbl): IDispatch(IDispatchVtbl) { fn get_Type( LexiconType: *mut SpeechLexiconType, ) -> HRESULT, fn get_LangId( LangId: *mut SpeechLanguageId, ) -> HRESULT, fn get_PartOfSpeech( PartOfSpeech: *mut SpeechPartOfSpeech, ) -> HRESULT, fn get_PhoneIds( PhoneIds: *mut VARIANT, ) -> HRESULT, fn get_Symbolic( Symbolic: *mut BSTR, ) -> HRESULT, }); pub const Speech_Default_Weight: c_float = DEFAULT_WEIGHT; pub const Speech_Max_Word_Length: LONG = SP_MAX_WORD_LENGTH as i32; pub const Speech_Max_Pron_Length: LONG = SP_MAX_PRON_LENGTH as i32; pub const Speech_StreamPos_Asap: LONG = SP_STREAMPOS_ASAP as i32; pub const Speech_StreamPos_RealTime: LONG = SP_STREAMPOS_REALTIME as i32; pub const SpeechAllElements: LONG = SPPR_ALL_ELEMENTS as i32; RIDL!(#[uuid(0x3b151836, 0xdf3a, 0x4e0a, 0x84, 0x6c, 0xd2, 0xad, 0xc9, 0x33, 0x43, 0x33)] interface ISpeechPhraseInfoBuilder(ISpeechPhraseInfoBuilderVtbl): IDispatch(IDispatchVtbl) { fn RestorePhraseFromMemory( PhraseInMemory: *mut VARIANT, PhraseInfo: *mut *mut ISpeechPhraseInfo, ) -> HRESULT, }); RIDL!(#[uuid(0xc3e4f353, 0x433f, 0x43d6, 0x89, 0xa1, 0x6a, 0x62, 0xa7, 0x05, 0x4c, 0x3d)] interface ISpeechPhoneConverter(ISpeechPhoneConverterVtbl): IDispatch(IDispatchVtbl) { fn get_LanguageId( LanguageId: *mut SpeechLanguageId, ) -> HRESULT, fn put_LanguageId( LanguageId: SpeechLanguageId, ) -> HRESULT, fn PhoneToId( Phonemes: BSTR, IdArray: *mut VARIANT, ) -> HRESULT, fn IdToPhone( IdArray: VARIANT, Phonemes: *mut BSTR, ) -> HRESULT, }); extern { pub static CLSID_SpNotifyTranslator: CLSID; pub static CLSID_SpObjectTokenCategory: CLSID; pub static CLSID_SpObjectToken: CLSID; pub static CLSID_SpResourceManager: CLSID; pub static CLSID_SpStreamFormatConverter: CLSID; pub static CLSID_SpMMAudioEnum: CLSID; pub static CLSID_SpMMAudioIn: CLSID; pub static CLSID_SpMMAudioOut: CLSID; pub static CLSID_SpStream: CLSID; pub static CLSID_SpVoice: CLSID; pub static CLSID_SpSharedRecoContext: CLSID; pub static CLSID_SpInprocRecognizer: CLSID; pub static CLSID_SpSharedRecognizer: CLSID; pub static CLSID_SpLexicon: CLSID; pub static CLSID_SpUnCompressedLexicon: CLSID; pub static CLSID_SpCompressedLexicon: CLSID; pub static CLSID_SpPhoneConverter: CLSID; pub static CLSID_SpNullPhoneConverter: CLSID; pub static CLSID_SpTextSelectionInformation: CLSID; pub static CLSID_SpPhraseInfoBuilder: CLSID; pub static CLSID_SpAudioFormat: CLSID; pub static CLSID_SpWaveFormatEx: CLSID; pub static CLSID_SpInProcRecoContext: CLSID; pub static CLSID_SpCustomStream: CLSID; pub static CLSID_SpFileStream: CLSID; pub static CLSID_SpMemoryStream: CLSID; }<|fim▁end|>
LangID: WORD,
<|file_name|>test_settings.py<|end_file_name|><|fim▁begin|>import os import django TEST_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tests') COMPRESS_CACHE_BACKEND = 'locmem://' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } } INSTALLED_APPS = [ 'compressor', 'coffin', 'jingo', ] STATIC_URL = '/static/' STATIC_ROOT = os.path.join(TEST_DIR, 'static') TEMPLATE_DIRS = ( # Specifically choose a name that will not be considered # by app_directories loader, to make sure each test uses # a specific template without considering the others. os.path.join(TEST_DIR, 'test_templates'), ) if django.VERSION[:2] < (1, 6): TEST_RUNNER = 'discover_runner.DiscoverRunner' <|fim▁hole|>)<|fim▁end|>
SECRET_KEY = "iufoj=mibkpdz*%bob952x(%49rqgv8gg45k36kjcg76&-y5=!" PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
<|file_name|>fMatrices.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3 #-*- coding: utf-8 -*- def absMat(matrice): '''Renvoie la matrice avec la valeur absolue de tous ses coefficients. Renvoie False si échec. - matrice - complexe ''' if type(matrice) is not list: raise Exception("absMat : Pas une matrice.") #~ return False if not len(matrice): raise Exception("absMat : La matrice est une liste vide.") #~ return False M = [] for i in matrice: L = [] if type(i) is not list: raise Exception("absMat : Les éléments de la liste ne sont pas des listes.") #~ return False for j in i: try: e = abs(j) except: raise Exception("absMat : Impossible de calculer la valeur absolue.") #~ return False L.append(e) M.append(L) return M def ones(entier=1): ''' ''' try: entier = int(entier) except: raise Exception("ones : Pas un réel") L = [1 for i in range(entier)] M = [] for i in range(entier): M.append(L) return M def zeros(entier = 1): ''' ''' try: entier = int(entier) except: raise Exception("zeros : Pas un réel") L = [0 for i in range(entier)] M = [] for i in range(entier): M.append(L) return M def idMat(entier = 1): ''' ''' try: entier = int(entier) except: raise Exception("idMat : Pas un réel") e = 0 M = [] for i in range(entier): L = [1 if i == e else 0 for i in range(entier)] M.append(L) e +=1 return M print(idMat(5)) def det2Mat(matrice): '''Renvoie le déterminant d'une matrice 2*2. False en cas d'échec - matrice - complexe - 2*2 ''' try: a = matrice[0][0] b = matrice[0][1] c = matrice[1][0] d = matrice[1][1] e = a*d - b*c return e except: raise Exception("det2Mat : le calcul du déterminant a échoué") #~ return False def isMatrice(matrice): '''renvoie True s'il s'agit d'une matrice ''' if type(matrice) is not list: raise Exception("isMatrice : Pas une liste.") #~ return False for i in matrice: if type(i) is not list: raise Exception("isMatrice : Pas une liste de liste") #~ return False return True def newMatrix(matrice): '''renvoie une copie de la liste de liste envoyée False en cas d'échec ''' if not isMatrice(matrice): raise Exception("newMatrice : Pas une matrice") #~ return False return [i[:] for i in matrice] def isListCpx(vecteur): ''' ''' if type(vecteur) is not list: raise Exception("isListCpx : Pas un vecteur/list") for i in vecteur: try: abs(i) except: raise Exception("isListCpx : élément non complexe.") #~ return False return True def pdVect(v1, v2): ''' ''' if type(v1) is not list or type(v2) is not list: raise Exception("pdVect : pas un vecteur/list") #~ return False if len(v1) != len(v2): raise Exception("pdVect : Les vecteurs n'ont pas la même taille") #~ return False if not isListCpx(v1) or not isListCpx(v2): raise Exception("pdVect : Un élément n'est pas un vecteur/list") #~ return False s = 0 for i in v1: for j in v2: s += i*j return s def det3Mat(matrice): '''Renvoie le déterminant d'une matrice 3*3. False en cas d'échec - matrice - complexe - 3*3 ''' try: a = matrice[0][0] b = matrice[0][1] c = matrice[0][2] d = matrice[1][0] e = matrice[1][1] f = matrice[1][2] g = matrice[2][0] h = matrice[2][1] i = matrice[2][2] r = a*e*i + d*h*c + g*b*f - (g*e*c + a*h*f + d*b*i) return r except: raise Exception("det3Mat : Le calcul du déterminant a échoué.") #~ return False def transposeMat2d(matrice): '''renvoie la matrice transposée. False en cas d'échec - matrice - rectangulaire ? ''' if isMatrice(matrice): return [list(i) for i in zip(*matrice)] else: raise Exception("transposeMat2d : Pas une matrice.") #~ return False def isMatRect(matrice): '''renvoie True si la matrice est rectangulaire - matrice ''' if not isMatrice(matrice): raise Exception("isMatRect : Pas une matrice.") #~ return False l = len(matrice[0]) for i in matrice: if len(i) != l: return False return True def isMatCarree(matrice): '''renvoie True si la matrice est carrée - matrice ''' if not isMatrice(matrice): raise Exception("isMatCarree : Pas une matrice") #~ return False l = len(matrice[0]) for i in matrice: if len(i) != l: return False return l == len(matrice) def isMatReel(matrice): '''renvoie True si tous les éléments lde la matrice sont réels - matrice ''' if not isMatrice(matrice): return False for i in matrice: for j in i: try: float(j) except: return False return True def isMatComplexe(matrice): '''renvoie True si tous les éléments de la matrice sont complexes - matrice ''' if not isMatrice(matrice): return False for i in matrice: for j in i: try: abs(j) except: return False return True def hilbertMat(matrice): '''renvoie la transposée conjuguée d'une matrice - matrice - complexe - rectangulaire ''' if not isMatrice(matrice): return False if not isMatComplexe(matrice): return False if not isMatRect(matrice): return False M = [] for i in matrice: L = [] for j in i: if type(j) is complex: #~ L.append(complex(j.real, -j.imag)) L.append(j.conjugate()) else: L.append(j) M.append(L) R = transposeMat2d(M) return R def translatMat(matrice, complexe): ''' ''' if not isMatComplexe(matrice): raise Exception("translateMat : Pas une matrice complexe.") try: abs(complexe) except: raise Exception("translatMat : Pas un complexe.") M = [] for i in matrice: L = [] for j in i: L.append(j+complexe) M.append(L) return M def sumMat(matrice): '''renvoie la somme des termes de la matrice - matrice - complexe ''' if not isMatComplexe(matrice): return False s = 0 for i in matrice: for j in i: s += j return s def isInMat(e, matrice): ''' retourne True si l'élément est dans la matrice - matrice ''' r = False if not isMatrice(matrice): return False for i in matrice: for j in i: if j == e: return True return False def tailleMat(matrice): ''' retourne la taille de la matrice - matrice - rectangulaire ''' if not isMatRect(matrice): return False i = len(matrice) # nombre de lignes p = len(matrice[0]) # nombre de colonnes return (i, p) def isMatSym(matrice): ''' True si la matrice est symétrique - matrice - carrée ''' if not isMatCarree(matrice): return False return matrice == transposeMat2d(matrice) def nbFoisMat(e, matrice): '''retourne le nombre d'occurences de 'e'. - matrice ''' if not isMatrice(matrice): return False n = 0 for i in matrice: for j in i: if j == e: n+=1 return n def nbEleMat(matrice): '''renvoie le nombre d'éléments - matrice ''' if not isMatrice(matrice): n = 0 for i in matrice: for j in i: n += 1 return n def traceMatrix(matrice): ''' ''' if not isMatCarree(matrice): return False n, p = tailleMat(matrice) s = 0 for i in range(n): s += matrice[i][i] return s def sizeOfPdMat(mat1, mat2): ''' ''' if not isMatRect(mat1) or not isMatRect(mat2): return False a, b = tailleMat(mat1) c, d = tailleMat(mat2) return (a, d) def subMatrix(matrice, ligne, colonne): ''' ligne et colonne : 0 .. (n-1) ''' try: ligne = int(ligne)<|fim▁hole|> except: return 1 if not isMatRect(matrice): return matrice i, j = tailleMat(matrice) if ligne >= i or colonne >= j: return 3 M = [] #~ X = [] #~ for i in matrice: #~ X.append(i[:]) X = newMatrix(matrice) for i, line in enumerate(X): if i != ligne: line.pop(colonne) M.append(line) return M def detMat(matrice): '''renvoie le déterinant de la matrice - matrice - rectangulaire - complexe ''' if not isMatCarree(matrice): return False n, p = tailleMat(matrice) del p if n == 2: return det2Mat(matrice) elif n == 3: return det3Mat(matrice) else: s = 0 for i, line in enumerate(matrice): #~ print(s) s += line[i]*((-1)**i)*detMat(subMatrix(matrice, 0, i)) return s def isInversible(matrice): ''' ''' if detMat(matrice): return True else: return False def coFacteur(matrice, ligne, colonne): ''' ''' return ((-1)**(ligne+colonne))*detMat(subMatrix(matrice, ligne, colonne)) def coMatrice(matrice): ''' ''' if not isMatCarree(matrice): return False M = [] for i, line in enumerate(matrice): L = [] for j, column in enumerate(line): L.append(coFacteur(matrice, i, j)) M.append(L) return M def pdMat(mat1, mat2): ''' ''' if not sizeOfPdMat(mat1, mat2): return False M1 = newMatrix(mat1) M2 = newMatrix(mat2) M2 = transposeMat2d(M2) M = [] for i in M1: L = [] for j in M2: L.append(pdVect(i, j)) M.append(L) return M def homothetieMat(matrice, complexe): ''' ''' if not isMatrice(matrice): raise Exception("homothetieMat : L'objet reçu n'est pas une matrice.") try: abs(complexe) except: raise Exception("homothetieMat : Pas un complexe.") M = [] for i in matrice: L = [] for j in i: L.append(j*complexe) M.append(L) return M def inverseMat(matrice): ''' ''' if not isInversible(matrice): raise Exception("inverseMat : La matrice n'est pas inversible") M = newMatrix(matrice) return homothetieMat(transposeMat2d(coMatrice(M)), (detMat(M))**(-1)) a = [ [complex(1, 1), 2, 3], [4, 5, 6], [7, 8, 9] ] b = [ [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [4, 0, 0, 1, 0], [0, 0, 0, 0, 1] ] c = [ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1] ] #~ print(detMat(b)) #~ d = coMatrice(b) #~ print(d) #~ print(pdMat(b, b)) #~ print(inverseMat(b))<|fim▁end|>
colonne = int(colonne)
<|file_name|>quota_admission.go<|end_file_name|><|fim▁begin|>package imageapis import ( "fmt" "time" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kutilerrors "k8s.io/apimachinery/pkg/util/errors" kapi "k8s.io/kubernetes/pkg/apis/core" g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" imageapi "github.com/openshift/origin/pkg/image/apis/image" imagesutil "github.com/openshift/origin/test/extended/images" exutil "github.com/openshift/origin/test/extended/util" testutil "github.com/openshift/origin/test/util" ) const ( imageSize = 100 quotaName = "isquota" waitTimeout = time.Second * 600 ) var _ = g.Describe("[Feature:ImageQuota][registry][Serial][Suite:openshift/registry/serial][local] Image resource quota", func() { defer g.GinkgoRecover() var oc = exutil.NewCLI("resourcequota-admission", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("Waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) // needs to be run at the of of each It; cannot be run in AfterEach which is run after the project // is destroyed tearDown := func(oc *exutil.CLI) { g.By(fmt.Sprintf("Deleting quota %s", quotaName)) oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Delete(quotaName, nil) deleteTestImagesAndStreams(oc) } g.It(fmt.Sprintf("should deny a push of built image exceeding %s quota", imageapi.ResourceImageStreams), func() { defer tearDown(oc) dClient, err := testutil.NewDockerClient() o.Expect(err).NotTo(o.HaveOccurred()) outSink := g.GinkgoWriter quota := kapi.ResourceList{ imageapi.ResourceImageStreams: resource.MustParse("0"), } _, err = createResourceQuota(oc, quota) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding quota %v", quota)) _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "refused", imageSize, 1, outSink, false, true) o.Expect(err).NotTo(o.HaveOccurred()) quota, err = bumpQuota(oc, imageapi.ResourceImageStreams, 1) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below quota %v", quota)) _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "tag1", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) used, err := waitForResourceQuotaSync(oc, quotaName, quota) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(assertQuotasEqual(used, quota)).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image to existing image stream %v", quota)) _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "tag2", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding quota %v", quota)) _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "second", "refused", imageSize, 1, outSink, false, true) quota, err = bumpQuota(oc, imageapi.ResourceImageStreams, 2) o.Expect(err).NotTo(o.HaveOccurred()) used, err = waitForResourceQuotaSync(oc, quotaName, used) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below quota %v", quota)) _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "second", "tag1", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) used, err = waitForResourceQuotaSync(oc, quotaName, quota) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(assertQuotasEqual(used, quota)).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding quota %v", quota)) _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "third", "refused", imageSize, 1, outSink, false, true) o.Expect(err).NotTo(o.HaveOccurred()) g.By("deleting first image stream") err = oc.ImageClient().Image().ImageStreams(oc.Namespace()).Delete("first", nil) o.Expect(err).NotTo(o.HaveOccurred()) used, err = exutil.WaitForResourceQuotaSync( oc.InternalKubeClient().Core().ResourceQuotas(oc.Namespace()), quotaName, kapi.ResourceList{imageapi.ResourceImageStreams: resource.MustParse("1")}, true, waitTimeout, ) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(assertQuotasEqual(used, kapi.ResourceList{imageapi.ResourceImageStreams: resource.MustParse("1")})).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below quota %v", quota)) _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "third", "tag", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) used, err = waitForResourceQuotaSync(oc, quotaName, quota) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(assertQuotasEqual(used, quota)).NotTo(o.HaveOccurred()) }) }) // createResourceQuota creates a resource quota with given hard limits in a current namespace and waits until // a first usage refresh func createResourceQuota(oc *exutil.CLI, hard kapi.ResourceList) (*kapi.ResourceQuota, error) { rq := &kapi.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{ Name: quotaName, }, Spec: kapi.ResourceQuotaSpec{ Hard: hard, }, } g.By(fmt.Sprintf("creating resource quota with a limit %v", hard)) rq, err := oc.InternalAdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Create(rq) if err != nil { return nil, err } err = waitForLimitSync(oc, hard) return rq, err } // assertQuotasEqual compares two quota sets and returns an error with proper description when they don't match func assertQuotasEqual(a, b kapi.ResourceList) error { errs := []error{} if len(a) != len(b) { errs = append(errs, fmt.Errorf("number of items does not match (%d != %d)", len(a), len(b))) } for k, av := range a { if bv, exists := b[k]; exists { if av.Cmp(bv) != 0 { errs = append(errs, fmt.Errorf("a[%s] != b[%s] (%s != %s)", k, k, av.String(), bv.String())) } } else { errs = append(errs, fmt.Errorf("resource %q not present in b", k)) } } for k := range b { if _, exists := a[k]; !exists { errs = append(errs, fmt.Errorf("resource %q not present in a", k)) } } return kutilerrors.NewAggregate(errs) } // bumpQuota modifies hard spec of quota object with the given value. It returns modified hard spec. func bumpQuota(oc *exutil.CLI, resourceName kapi.ResourceName, value int64) (kapi.ResourceList, error) { g.By(fmt.Sprintf("bump the quota to %s=%d", resourceName, value)) rq, err := oc.InternalAdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Get(quotaName, metav1.GetOptions{}) if err != nil { return nil, err } rq.Spec.Hard[resourceName] = *resource.NewQuantity(value, resource.DecimalSI)<|fim▁hole|> err = waitForLimitSync(oc, rq.Spec.Hard) if err != nil { return nil, err } return rq.Spec.Hard, nil } // waitForResourceQuotaSync waits until a usage of a quota reaches given limit with a short timeout func waitForResourceQuotaSync(oc *exutil.CLI, name string, expectedResources kapi.ResourceList) (kapi.ResourceList, error) { g.By(fmt.Sprintf("waiting for resource quota %s to get updated", name)) used, err := exutil.WaitForResourceQuotaSync( oc.InternalKubeClient().Core().ResourceQuotas(oc.Namespace()), quotaName, expectedResources, false, waitTimeout, ) if err != nil { return nil, err } return used, nil } // waitForLimitSync waits until a usage of a quota reaches given limit with a short timeout func waitForLimitSync(oc *exutil.CLI, hardLimit kapi.ResourceList) error { g.By(fmt.Sprintf("waiting for resource quota %s to get updated", quotaName)) return testutil.WaitForResourceQuotaLimitSync( oc.InternalKubeClient().Core().ResourceQuotas(oc.Namespace()), quotaName, hardLimit, waitTimeout) } // deleteTestImagesAndStreams deletes test images built in current and shared // namespaces. It also deletes shared projects. func deleteTestImagesAndStreams(oc *exutil.CLI) { for _, projectName := range []string{ oc.Namespace() + "-s2", oc.Namespace() + "-s1", oc.Namespace() + "-shared", oc.Namespace(), } { g.By(fmt.Sprintf("Deleting images and image streams in project %q", projectName)) iss, err := oc.AdminImageClient().Image().ImageStreams(projectName).List(metav1.ListOptions{}) if err != nil { continue } for _, is := range iss.Items { for _, history := range is.Status.Tags { for i := range history.Items { oc.AdminImageClient().Image().Images().Delete(history.Items[i].Image, nil) } } for _, tagRef := range is.Spec.Tags { switch tagRef.From.Kind { case "ImageStreamImage": _, id, err := imageapi.ParseImageStreamImageName(tagRef.From.Name) if err != nil { continue } oc.AdminImageClient().Image().Images().Delete(id, nil) } } } // let the extended framework take care of the current namespace if projectName != oc.Namespace() { g.By(fmt.Sprintf("Deleting project %q", projectName)) oc.AdminProjectClient().Project().Projects().Delete(projectName, nil) } } }<|fim▁end|>
_, err = oc.InternalAdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Update(rq) if err != nil { return nil, err }
<|file_name|>angular2.webpack.config.js<|end_file_name|><|fim▁begin|>const path = require( 'path' ); const pkg = require( './package.json' ); const webpack = require( 'laxar-infrastructure' ).webpack( { context: __dirname, resolve: { extensions: [ '.js', '.jsx', '.ts', '.tsx' ] }, module: { rules: [ { test: /\.tsx?$/, exclude: /node_modules\/.*\/spec\//, loader: 'ts-loader' }, { test: /\.jsx?$/, exclude: path.resolve( __dirname, 'node_modules' ), loader: 'babel-loader' }, { test: /\.spec.js$/, exclude: path.resolve( __dirname, 'node_modules' ), loader: 'laxar-mocks/spec-loader' } ] } } ); module.exports = [ webpack.library(), webpack.browserSpec( [ `./spec/${pkg.name}.spec.js` ] )<|fim▁hole|><|fim▁end|>
];
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from setuptools import setup, find_packages import os import glob import sys #VERSION="2.1dev4" VERSION="2.6dev5" # Taken from kennethreitz/requests/setup.py package_directory = os.path.realpath(os.path.dirname(__file__)) def get_file_contents(file_path): """Get the context of the file using full path name.""" content = "" try: full_path = os.path.join(package_directory, file_path) content = open(full_path, 'r').read() except: print >> sys.stderr, "### could not open file %r" % file_path return content setup( name='privacyIDEA', version=VERSION, description='privacyIDEA: identity, multifactor authentication (OTP), ' 'authorization, audit', author='privacyidea.org', license='AGPLv3', author_email='[email protected]', url='http://www.privacyidea.org', keywords='OTP, two factor authentication, management, security', packages=find_packages(), scripts=['pi-manage.py', 'tools/privacyidea-convert-token', 'tools/privacyidea-create-pwidresolver-user', 'tools/privacyidea-create-sqlidresolver-user', 'tools/privacyidea-pip-update', 'tools/privacyidea-create-certificate', 'tools/privacyidea-fix-access-rights', 'tools/privacyidea-create-ad-users', 'tools/privacyidea-fetchssh.sh', 'tools/privacyidea-create-userdb.sh' ], extras_require={ 'dev': ["Sphinx>=1.3.1", "sphinxcontrib-httpdomain>=1.3.0"], 'test': ["coverage>=3.7.1", "mock>=1.0.1", "nose>=1.3.4", "responses>=0.4.0", "six>=1.8.0"], }, install_requires=["Flask>=0.10.1", "Flask-Cache>=0.13.1", "Flask-Migrate>=1.2.0", "Flask-SQLAlchemy>=2.0", "Flask-Script>=2.0.5", "Jinja2>=2.7.3", "Mako>=0.9.1", "MarkupSafe>=0.23", "MySQL-python>=1.2.5", "Pillow>=2.6.1", "PyJWT>=1.3.0", "PyYAML>=3.11", "Pygments>=2.0.2", "SQLAlchemy>=1.0.5", "Werkzeug>=0.10.4", "alembic>=0.6.7", "argparse>=1.2.1", "bcrypt>=1.1.0", "beautifulsoup4>=4.3.2", "cffi>=0.8.6", "configobj>=5.0.6", "docutils>=0.12", "funcparserlib>=0.3.6", "itsdangerous>=0.24", "ldap3>=0.9.8.4", "netaddr>=0.7.12", "passlib>=1.6.2", "pyasn1>=0.1.7", "pyOpenSSL>=0.15.1", "pycparser>=2.10", "pycrypto>=2.6.1", "pyrad>=2.0", "pyusb>=1.0.0b2", "qrcode>=5.1", "requests>=2.7.0", "sqlsoup>=0.9.0", "wsgiref>=0.1.2" ], include_package_data=True, data_files=[('etc/privacyidea/', ['deploy/apache/privacyideaapp.wsgi', 'deploy/privacyidea/dictionary', 'deploy/privacyidea/enckey', 'deploy/privacyidea/private.pem', 'deploy/privacyidea/public.pem']), ('share/man/man1', ["tools/privacyidea-convert-token.1", "tools/privacyidea-create-pwidresolver-user.1", "tools/privacyidea-create-sqlidresolver-user.1", "tools/privacyidea-pip-update.1", "tools/privacyidea-create-certificate.1", "tools/privacyidea-fix-access-rights.1"<|fim▁hole|> ]), ('lib/privacyidea/authmodules/FreeRADIUS', ["authmodules/FreeRADIUS/LICENSE", "authmodules/FreeRADIUS/privacyidea_radius.pm"]), ('lib/privacyidea/authmodules/OTRS', ["authmodules/OTRS/privacyIDEA.pm"]), ('lib/privacyidea/migrations', ["migrations/alembic.ini", "migrations/env.py", "migrations/README", "migrations/script.py.mako"]), ('lib/privacyidea/migrations/versions', ["migrations/versions/2551ee982544_.py", "migrations/versions/4f32a4e1bf33_.py", "migrations/versions/2181294eed0b_.py", "migrations/versions/e5cbeb7c177_.py", "migrations/versions/4d9178fa8336_.py", "migrations/versions/20969b4cbf06_.py"]) ], classifiers=["Framework :: Flask", "License :: OSI Approved :: " "GNU Affero General Public License v3", "Programming Language :: Python", "Development Status :: 5 - Production/Stable", "Topic :: Internet", "Topic :: Security", "Topic :: System ::" " Systems Administration :: Authentication/Directory" ], #message_extractors={'privacyidea': [ # ('**.py', 'python', None), # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]}, zip_safe=False, long_description=get_file_contents('README.md') )<|fim▁end|>
<|file_name|>TransportSearchCountAction.java<|end_file_name|><|fim▁begin|>/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY<|fim▁hole|> * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.action.search.type; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.action.SearchServiceListener; import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.FetchSearchResultProvider; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.threadpool.ThreadPool; import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId; /** * */ public class TransportSearchCountAction extends TransportSearchTypeAction { @Inject public TransportSearchCountAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController, ActionFilters actionFilters) { super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters); } @Override protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) { new AsyncAction(searchRequest, listener).start(); } private class AsyncAction extends BaseAsyncAction<QuerySearchResultProvider> { private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) { super(request, listener); } @Override protected String firstPhaseName() { return "query"; } @Override protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<QuerySearchResultProvider> listener) { searchService.sendExecuteQuery(node, request, listener); } @Override protected void moveToSecondPhase() throws Exception { // no need to sort, since we know we have no hits back final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray<? extends FetchSearchResultProvider>) AtomicArray.empty()); String scrollId = null; if (request.scroll() != null) { scrollId = buildScrollId(request.searchType(), firstResults, null); } listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures())); } } }<|fim▁end|>
<|file_name|>label.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> <|fim▁hole|> use ffi; use glib::{to_bool, to_gboolean}; use Justification; use cast::GTK_LABEL; pub trait LabelTrait: ::WidgetTrait { fn set_label(&self, text: &str) -> () { unsafe { ffi::gtk_label_set_label(GTK_LABEL(self.unwrap_widget()), text.to_glib_none().0) } } fn set_text(&self, text: &str) -> () { unsafe { ffi::gtk_label_set_text(GTK_LABEL(self.unwrap_widget()), text.to_glib_none().0) } } fn set_justify(&self, jtype: Justification) -> () { unsafe { ffi::gtk_label_set_justify(GTK_LABEL(self.unwrap_widget()), jtype); } } fn set_markup(&self, text: &str) -> () { unsafe { ffi::gtk_label_set_markup(GTK_LABEL(self.unwrap_widget()), text.to_glib_none().0) } } fn set_markup_with_mnemonic(&self, text: &str) -> () { unsafe { ffi::gtk_label_set_markup_with_mnemonic(GTK_LABEL(self.unwrap_widget()), text.to_glib_none().0) } } fn set_pattern(&self, text: &str) -> () { unsafe { ffi::gtk_label_set_pattern(GTK_LABEL(self.unwrap_widget()), text.to_glib_none().0) } } fn set_text_with_mnemonic(&self, text: &str) -> () { unsafe { ffi::gtk_label_set_text_with_mnemonic(GTK_LABEL(self.unwrap_widget()), text.to_glib_none().0); } } fn set_width_chars(&self, n_chars: i32) -> () { unsafe { ffi::gtk_label_set_width_chars(GTK_LABEL(self.unwrap_widget()), n_chars as c_int); } } fn set_max_width_chars(&self, n_chars: i32) -> () { unsafe { ffi::gtk_label_set_max_width_chars(GTK_LABEL(self.unwrap_widget()), n_chars as c_int); } } fn set_line_wrap(&self, wrap: bool) -> () { unsafe { ffi::gtk_label_set_line_wrap(GTK_LABEL(self.unwrap_widget()), to_gboolean(wrap)); } } fn get_line_wrap(&self) -> bool { unsafe { to_bool(ffi::gtk_label_get_line_wrap(GTK_LABEL(self.unwrap_widget()))) } } #[cfg(feature = "gtk_3_10")] fn set_lines(&self, lines: i32) -> () { unsafe { ffi::gtk_label_set_lines(GTK_LABEL(self.unwrap_widget()), lines as c_int); } } #[cfg(feature = "gtk_3_10")] fn get_lines(&self) -> i32 { unsafe { ffi::gtk_label_get_lines(GTK_LABEL(self.unwrap_widget())) as c_int } } fn get_layout_offsets(&self) -> (i32, i32) { let x = 0; let y = 0; unsafe { ffi::gtk_label_get_layout_offsets(GTK_LABEL(self.unwrap_widget()), &x, &y); } (x, y) } fn get_mnemonic_keyval(&self) -> u32 { unsafe { ffi::gtk_label_get_mnemonic_keyval(GTK_LABEL(self.unwrap_widget())) as u32 } } fn set_selectable(&self, selectable: bool) -> () { unsafe { ffi::gtk_label_set_selectable(GTK_LABEL(self.unwrap_widget()), to_gboolean(selectable)); } } fn get_selectable(&self) -> bool { unsafe { to_bool(ffi::gtk_label_get_selectable(GTK_LABEL(self.unwrap_widget()))) } } fn set_use_markup(&self, use_markup: bool) -> () { unsafe { ffi::gtk_label_set_use_markup(GTK_LABEL(self.unwrap_widget()), to_gboolean(use_markup)); } } fn get_use_markup(&self) -> bool { unsafe { to_bool(ffi::gtk_label_get_use_markup(GTK_LABEL(self.unwrap_widget()))) } } fn set_use_underline(&self, use_underline: bool) -> () { unsafe { ffi::gtk_label_set_use_underline(GTK_LABEL(self.unwrap_widget()), to_gboolean(use_underline)); } } fn get_use_underline(&self) -> bool { unsafe { to_bool(ffi::gtk_label_get_use_underline(GTK_LABEL(self.unwrap_widget()))) } } fn set_single_line_mode(&self, single_line_mode: bool) -> () { unsafe { ffi::gtk_label_set_single_line_mode(GTK_LABEL(self.unwrap_widget()), to_gboolean(single_line_mode)); } } fn get_single_line_mode(&self) -> bool { unsafe { to_bool(ffi::gtk_label_get_single_line_mode(GTK_LABEL(self.unwrap_widget()))) } } fn set_track_visited_links(&self, track_visited_links: bool) -> () { unsafe { ffi::gtk_label_set_track_visited_links(GTK_LABEL(self.unwrap_widget()), to_gboolean(track_visited_links)); } } fn get_track_visited_links(&self) -> bool { unsafe { to_bool(ffi::gtk_label_get_track_visited_links(GTK_LABEL(self.unwrap_widget()))) } } fn get_text(&self) -> Option<String> { unsafe { from_glib_none(ffi::gtk_label_get_text(GTK_LABEL(self.unwrap_widget()))) } } fn get_label(&self) -> Option<String> { unsafe { from_glib_none(ffi::gtk_label_get_label(GTK_LABEL(self.unwrap_widget()))) } } fn get_current_uri(&self) -> Option<String> { unsafe { from_glib_none(ffi::gtk_label_get_current_uri(GTK_LABEL(self.unwrap_widget()))) } } fn select_region(&self, start_offset: i32, end_offset: i32) -> () { unsafe { ffi::gtk_label_select_region(GTK_LABEL(self.unwrap_widget()), start_offset as c_int, end_offset as c_int); } } fn get_justify(&self) -> Justification { unsafe { ffi::gtk_label_get_justify(GTK_LABEL(self.unwrap_widget())) } } fn get_width_chars(&self) -> i32 { unsafe { ffi::gtk_label_get_width_chars(GTK_LABEL(self.unwrap_widget())) as i32 } } fn get_max_width_chars(&self) -> i32 { unsafe { ffi::gtk_label_get_max_width_chars(GTK_LABEL(self.unwrap_widget())) as i32 } } fn get_selection_bounds(&self) -> (i32, i32) { let x = 0; let y = 0; unsafe { ffi::gtk_label_get_selection_bounds(GTK_LABEL(self.unwrap_widget()), &x, &y); } (x, y) } fn set_angle(&self, angle: f64) -> () { unsafe { ffi::gtk_label_set_angle(GTK_LABEL(self.unwrap_widget()), angle as c_double); } } fn get_angle(&self) -> f64 { unsafe { ffi::gtk_label_get_angle(GTK_LABEL(self.unwrap_widget())) as f64 } } }<|fim▁end|>
use libc::{c_int, c_double}; use glib::translate::{from_glib_none, ToGlibPtr};
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>""" Django settings for central_service project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) PROJECT_PATH = os.path.dirname(os.path.realpath(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'd$^6$7ybljkbz@b#7j&4cz_46dhe$=uiqnxuz+h3yoyj6u$$fk' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True # Application definition INSTALLED_APPS = ( 'django_admin_bootstrapped', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'services', 'service_pages', 'rest_framework', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'central_service.urls' WSGI_APPLICATION = 'central_service.wsgi.application' # Templates TEMPLATE_DIRS = ( os.path.join(PROJECT_PATH, 'templates'), ) # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Cache CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake', } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' # Used in production to define where collectstatic stores stuff STATIC_ROOT = os.path.join(PROJECT_PATH, '../static') ADMIN_MEDIA_PREFIX = '/static/admin/' # STATICFILES_FINDERS = ( # 'django.contrib.staticfiles.finders.FileSystemFinder', # 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # ) # Used in development to force django to serve static files STATICFILES_DIRS = [ os.path.join(PROJECT_PATH, "static"), ] LOGIN_URL = '/login/' LOGIN_REDIRECT_URL = '/' REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [<|fim▁hole|>}<|fim▁end|>
'rest_framework.permissions.IsAdminUser', ]
<|file_name|>model.py<|end_file_name|><|fim▁begin|># The content of this file was generated using the Python profile of libCellML 0.2.0. from enum import Enum from math import * __version__ = "0.3.0" LIBCELLML_VERSION = "0.2.0" STATE_COUNT = 1 VARIABLE_COUNT = 2 class VariableType(Enum): VARIABLE_OF_INTEGRATION = 1 STATE = 2 CONSTANT = 3 COMPUTED_CONSTANT = 4 ALGEBRAIC = 5 VOI_INFO = {"name": "time", "units": "second", "component": "my_component", "type": VariableType.VARIABLE_OF_INTEGRATION} STATE_INFO = [ {"name": "x", "units": "dimensionless", "component": "my_component", "type": VariableType.STATE} ] VARIABLE_INFO = [ {"name": "b", "units": "second", "component": "my_component", "type": VariableType.ALGEBRAIC}, {"name": "a", "units": "second", "component": "my_component", "type": VariableType.ALGEBRAIC} ] def create_states_array(): return [nan]*STATE_COUNT def create_variables_array(): return [nan]*VARIABLE_COUNT def initialise_states_and_constants(states, variables): states[0] = 0.0 def compute_computed_constants(variables): pass def compute_rates(voi, states, rates, variables): rates[0] = 1.0 def compute_variables(voi, states, rates, variables): variables[0] = 2.0*voi<|fim▁hole|><|fim▁end|>
variables[1] = 3.0*variables[0]
<|file_name|>statemachine.py<|end_file_name|><|fim▁begin|># $Id: statemachine.py 6314 2010-04-26 10:04:17Z milde $ # Author: David Goodger <[email protected]> # Copyright: This module has been placed in the public domain. """ A finite state machine specialized for regular-expression-based text filters, this module defines the following classes: - `StateMachine`, a state machine - `State`, a state superclass - `StateMachineWS`, a whitespace-sensitive version of `StateMachine` - `StateWS`, a state superclass for use with `StateMachineWS` - `SearchStateMachine`, uses `re.search()` instead of `re.match()` - `SearchStateMachineWS`, uses `re.search()` instead of `re.match()` - `ViewList`, extends standard Python lists. - `StringList`, string-specific ViewList. Exception classes: - `StateMachineError` - `UnknownStateError` - `DuplicateStateError` - `UnknownTransitionError` - `DuplicateTransitionError` - `TransitionPatternNotFound` - `TransitionMethodNotFound` - `UnexpectedIndentationError` - `TransitionCorrection`: Raised to switch to another transition. - `StateCorrection`: Raised to switch to another state & transition. Functions: - `string2lines()`: split a multi-line string into a list of one-line strings How To Use This Module ====================== (See the individual classes, methods, and attributes for details.) 1. Import it: ``import statemachine`` or ``from statemachine import ...``. You will also need to ``import re``. 2. Derive a subclass of `State` (or `StateWS`) for each state in your state machine:: class MyState(statemachine.State): Within the state's class definition: a) Include a pattern for each transition, in `State.patterns`:: patterns = {'atransition': r'pattern', ...} b) Include a list of initial transitions to be set up automatically, in `State.initial_transitions`:: initial_transitions = ['atransition', ...] c) Define a method for each transition, with the same name as the transition pattern:: def atransition(self, match, context, next_state): # do something result = [...] # a list return context, next_state, result # context, next_state may be altered Transition methods may raise an `EOFError` to cut processing short. d) You may wish to override the `State.bof()` and/or `State.eof()` implicit transition methods, which handle the beginning- and end-of-file. e) In order to handle nested processing, you may wish to override the attributes `State.nested_sm` and/or `State.nested_sm_kwargs`. If you are using `StateWS` as a base class, in order to handle nested indented blocks, you may wish to: - override the attributes `StateWS.indent_sm`, `StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or `StateWS.known_indent_sm_kwargs`; - override the `StateWS.blank()` method; and/or - override or extend the `StateWS.indent()`, `StateWS.known_indent()`, and/or `StateWS.firstknown_indent()` methods. 3. Create a state machine object:: sm = StateMachine(state_classes=[MyState, ...], initial_state='MyState') 4. Obtain the input text, which needs to be converted into a tab-free list of one-line strings. For example, to read text from a file called 'inputfile':: input_string = open('inputfile').read() input_lines = statemachine.string2lines(input_string) 5. Run the state machine on the input text and collect the results, a list:: results = sm.run(input_lines) 6. Remove any lingering circular references:: sm.unlink() """ __docformat__ = 'restructuredtext' import sys import re import types import unicodedata class StateMachine: """ A finite state machine for text filters using regular expressions. The input is provided in the form of a list of one-line strings (no newlines). States are subclasses of the `State` class. Transitions consist of regular expression patterns and transition methods, and are defined in each state. The state machine is started with the `run()` method, which returns the results of processing in a list. """ def __init__(self, state_classes, initial_state, debug=0): """ Initialize a `StateMachine` object; add state objects. Parameters: - `state_classes`: a list of `State` (sub)classes. - `initial_state`: a string, the class name of the initial state. - `debug`: a boolean; produce verbose output if true (nonzero). """ self.input_lines = None """`StringList` of input lines (without newlines). Filled by `self.run()`.""" self.input_offset = 0 """Offset of `self.input_lines` from the beginning of the file.""" self.line = None """Current input line.""" self.line_offset = -1 """Current input line offset from beginning of `self.input_lines`.""" self.debug = debug """Debugging mode on/off.""" self.initial_state = initial_state """The name of the initial state (key to `self.states`).""" self.current_state = initial_state """The name of the current state (key to `self.states`).""" self.states = {} """Mapping of {state_name: State_object}.""" self.add_states(state_classes) self.observers = [] """List of bound methods or functions to call whenever the current line changes. Observers are called with one argument, ``self``. Cleared at the end of `run()`.""" def unlink(self): """Remove circular references to objects no longer required.""" for state in self.states.values(): state.unlink() self.states = None def run(self, input_lines, input_offset=0, context=None, input_source=None, initial_state=None): """ Run the state machine on `input_lines`. Return results (a list). Reset `self.line_offset` and `self.current_state`. Run the beginning-of-file transition. Input one line at a time and check for a matching transition. If a match is found, call the transition method and possibly change the state. Store the context returned by the transition method to be passed on to the next transition matched. Accumulate the results returned by the transition methods in a list. Run the end-of-file transition. Finally, return the accumulated results. Parameters: - `input_lines`: a list of strings without newlines, or `StringList`. - `input_offset`: the line offset of `input_lines` from the beginning of the file. - `context`: application-specific storage. - `input_source`: name or path of source of `input_lines`. - `initial_state`: name of initial state. """ self.runtime_init() if isinstance(input_lines, StringList): self.input_lines = input_lines else: self.input_lines = StringList(input_lines, source=input_source) self.input_offset = input_offset self.line_offset = -1 self.current_state = initial_state or self.initial_state if self.debug: print >>sys.stderr, ( '\nStateMachine.run: input_lines (line_offset=%s):\n| %s' % (self.line_offset, '\n| '.join(self.input_lines))) transitions = None results = [] state = self.get_state() try: if self.debug: print >>sys.stderr, ('\nStateMachine.run: bof transition') context, result = state.bof(context) results.extend(result) while 1: try: try: self.next_line() if self.debug: source, offset = self.input_lines.info( self.line_offset) print >>sys.stderr, ( '\nStateMachine.run: line (source=%r, ' 'offset=%r):\n| %s' % (source, offset, self.line)) context, next_state, result = self.check_line( context, state, transitions) except EOFError: if self.debug: print >>sys.stderr, ( '\nStateMachine.run: %s.eof transition' % state.__class__.__name__) result = state.eof(context) results.extend(result) break else: results.extend(result) except TransitionCorrection, exception: self.previous_line() # back up for another try transitions = (exception.args[0],) if self.debug: print >>sys.stderr, ( '\nStateMachine.run: TransitionCorrection to ' 'state "%s", transition %s.' % (state.__class__.__name__, transitions[0])) continue except StateCorrection, exception: self.previous_line() # back up for another try next_state = exception.args[0] if len(exception.args) == 1: transitions = None else: transitions = (exception.args[1],) if self.debug: print >>sys.stderr, ( '\nStateMachine.run: StateCorrection to state ' '"%s", transition %s.' % (next_state, transitions[0])) else: transitions = None state = self.get_state(next_state) except: if self.debug: self.error() raise self.observers = [] return results def get_state(self, next_state=None): """ Return current state object; set it first if `next_state` given. Parameter `next_state`: a string, the name of the next state. Exception: `UnknownStateError` raised if `next_state` unknown. """ if next_state: if self.debug and next_state != self.current_state: print >>sys.stderr, \ ('\nStateMachine.get_state: Changing state from ' '"%s" to "%s" (input line %s).' % (self.current_state, next_state, self.abs_line_number())) self.current_state = next_state try: return self.states[self.current_state] except KeyError: raise UnknownStateError(self.current_state) def next_line(self, n=1): """Load `self.line` with the `n`'th next line and return it.""" try: try: self.line_offset += n self.line = self.input_lines[self.line_offset] except IndexError: self.line = None raise EOFError return self.line finally: self.notify_observers() def is_next_line_blank(self): """Return 1 if the next line is blank or non-existant.""" try: return not self.input_lines[self.line_offset + 1].strip() except IndexError: return 1 def at_eof(self): """Return 1 if the input is at or past end-of-file.""" return self.line_offset >= len(self.input_lines) - 1 def at_bof(self): """Return 1 if the input is at or before beginning-of-file.""" return self.line_offset <= 0 def previous_line(self, n=1): """Load `self.line` with the `n`'th previous line and return it.""" self.line_offset -= n if self.line_offset < 0: self.line = None else: self.line = self.input_lines[self.line_offset] self.notify_observers() return self.line def goto_line(self, line_offset): """Jump to absolute line offset `line_offset`, load and return it.""" try: try: self.line_offset = line_offset - self.input_offset self.line = self.input_lines[self.line_offset] except IndexError: self.line = None raise EOFError return self.line finally: self.notify_observers() def get_source(self, line_offset): """Return source of line at absolute line offset `line_offset`.""" return self.input_lines.source(line_offset - self.input_offset) def abs_line_offset(self): """Return line offset of current line, from beginning of file.""" return self.line_offset + self.input_offset def abs_line_number(self): """Return line number of current line (counting from 1).""" return self.line_offset + self.input_offset + 1 def get_source_and_line(self, lineno=None): """Return (source, line) tuple for current or given line number. Looks up the source and line number in the `self.input_lines` StringList instance to count for included source files. If the optional argument `lineno` is given, convert it from an absolute line number to the corresponding (source, line) pair. """ if lineno is None: offset = self.line_offset else: offset = lineno - self.input_offset - 1 try: src, srcoffset = self.input_lines.info(offset) srcline = srcoffset + 1 except (TypeError): # line is None if index is "Just past the end" src, line = self.get_source_and_line(offset + self.input_offset) return src, line + 1 except (IndexError): # `offset` is off the list src, srcline = None, None # raise AssertionError('cannot find line %d in %s lines' % # (offset, len(self.input_lines))) # # list(self.input_lines.lines()))) # assert offset == srcoffset, str(self.input_lines) # print "get_source_and_line(%s):" % lineno, # print offset + 1, '->', src, srcline # print self.input_lines return (src, srcline) def insert_input(self, input_lines, source): self.input_lines.insert(self.line_offset + 1, '', source='internal padding after '+source, offset=len(input_lines)) self.input_lines.insert(self.line_offset + 1, '', source='internal padding before '+source, offset=-1) self.input_lines.insert(self.line_offset + 2, StringList(input_lines, source)) def get_text_block(self, flush_left=0): """ Return a contiguous block of text. If `flush_left` is true, raise `UnexpectedIndentationError` if an indented line is encountered before the text block ends (with a blank line). """ try: block = self.input_lines.get_text_block(self.line_offset, flush_left) self.next_line(len(block) - 1) return block except UnexpectedIndentationError, error: block, source, lineno = error.args self.next_line(len(block) - 1) # advance to last line of block raise def check_line(self, context, state, transitions=None): """ Examine one line of input for a transition match & execute its method. Parameters: - `context`: application-dependent storage. - `state`: a `State` object, the current state. - `transitions`: an optional ordered list of transition names to try, instead of ``state.transition_order``. Return the values returned by the transition method: - context: possibly modified from the parameter `context`; - next state name (`State` subclass name); - the result output of the transition, a list. When there is no match, ``state.no_match()`` is called and its return value is returned. """ if transitions is None: transitions = state.transition_order state_correction = None if self.debug: print >>sys.stderr, ( '\nStateMachine.check_line: state="%s", transitions=%r.' % (state.__class__.__name__, transitions)) for name in transitions: pattern, method, next_state = state.transitions[name] match = pattern.match(self.line) if match: if self.debug: print >>sys.stderr, ( '\nStateMachine.check_line: Matched transition ' '"%s" in state "%s".' % (name, state.__class__.__name__)) return method(match, context, next_state) else: if self.debug: print >>sys.stderr, ( '\nStateMachine.check_line: No match in state "%s".' % state.__class__.__name__) return state.no_match(context, transitions) def add_state(self, state_class): """ Initialize & add a `state_class` (`State` subclass) object. Exception: `DuplicateStateError` raised if `state_class` was already added. """ statename = state_class.__name__ if statename in self.states: raise DuplicateStateError(statename) self.states[statename] = state_class(self, self.debug) def add_states(self, state_classes): """ Add `state_classes` (a list of `State` subclasses). """ for state_class in state_classes: self.add_state(state_class) def runtime_init(self): """ Initialize `self.states`. """ for state in self.states.values(): state.runtime_init() def error(self): """Report error details.""" type, value, module, line, function = _exception_data() print >>sys.stderr, '%s: %s' % (type, value) print >>sys.stderr, 'input line %s' % (self.abs_line_number()) print >>sys.stderr, ('module %s, line %s, function %s' % (module, line, function)) def attach_observer(self, observer): """ The `observer` parameter is a function or bound method which takes two arguments, the source and offset of the current line. """ self.observers.append(observer) def detach_observer(self, observer): self.observers.remove(observer) def notify_observers(self): for observer in self.observers: try: info = self.input_lines.info(self.line_offset) except IndexError: info = (None, None) observer(*info) class State: """ State superclass. Contains a list of transitions, and transition methods. Transition methods all have the same signature. They take 3 parameters: - An `re` match object. ``match.string`` contains the matched input line, ``match.start()`` gives the start index of the match, and ``match.end()`` gives the end index. - A context object, whose meaning is application-defined (initial value ``None``). It can be used to store any information required by the state machine, and the retured context is passed on to the next transition method unchanged. - The name of the next state, a string, taken from the transitions list; normally it is returned unchanged, but it may be altered by the transition method if necessary. Transition methods all return a 3-tuple: - A context object, as (potentially) modified by the transition method. - The next state name (a return value of ``None`` means no state change). - The processing result, a list, which is accumulated by the state machine. Transition methods may raise an `EOFError` to cut processing short. There are two implicit transitions, and corresponding transition methods are defined: `bof()` handles the beginning-of-file, and `eof()` handles the end-of-file. These methods have non-standard signatures and return values. `bof()` returns the initial context and results, and may be used to return a header string, or do any other processing needed. `eof()` should handle any remaining context and wrap things up; it returns the final processing result. Typical applications need only subclass `State` (or a subclass), set the `patterns` and `initial_transitions` class attributes, and provide corresponding transition methods. The default object initialization will take care of constructing the list of transitions. """ patterns = None """ {Name: pattern} mapping, used by `make_transition()`. Each pattern may be a string or a compiled `re` pattern. Override in subclasses. """ initial_transitions = None """ A list of transitions to initialize when a `State` is instantiated. Each entry is either a transition name string, or a (transition name, next state name) pair. See `make_transitions()`. Override in subclasses. """ nested_sm = None """ The `StateMachine` class for handling nested processing. If left as ``None``, `nested_sm` defaults to the class of the state's controlling state machine. Override it in subclasses to avoid the default. """ nested_sm_kwargs = None """ Keyword arguments dictionary, passed to the `nested_sm` constructor. Two keys must have entries in the dictionary: - Key 'state_classes' must be set to a list of `State` classes. - Key 'initial_state' must be set to the name of the initial state class. If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the class of the current state, and 'initial_state' defaults to the name of the class of the current state. Override in subclasses to avoid the defaults. """ def __init__(self, state_machine, debug=0): """ Initialize a `State` object; make & add initial transitions. Parameters: - `statemachine`: the controlling `StateMachine` object. - `debug`: a boolean; produce verbose output if true (nonzero). """ self.transition_order = [] """A list of transition names in search order.""" self.transitions = {} """ A mapping of transition names to 3-tuples containing (compiled_pattern, transition_method, next_state_name). Initialized as an instance attribute dynamically (instead of as a class attribute) because it may make forward references to patterns and methods in this or other classes. """ self.add_initial_transitions() self.state_machine = state_machine """A reference to the controlling `StateMachine` object.""" self.debug = debug """Debugging mode on/off.""" if self.nested_sm is None: self.nested_sm = self.state_machine.__class__ if self.nested_sm_kwargs is None: self.nested_sm_kwargs = {'state_classes': [self.__class__], 'initial_state': self.__class__.__name__} def runtime_init(self): """ Initialize this `State` before running the state machine; called from `self.state_machine.run()`. """ pass def unlink(self): """Remove circular references to objects no longer required.""" self.state_machine = None def add_initial_transitions(self): """Make and add transitions listed in `self.initial_transitions`.""" if self.initial_transitions: names, transitions = self.make_transitions( self.initial_transitions) self.add_transitions(names, transitions) def add_transitions(self, names, transitions): """ Add a list of transitions to the start of the transition list. Parameters: - `names`: a list of transition names. - `transitions`: a mapping of names to transition tuples. Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`. """ for name in names: if name in self.transitions: raise DuplicateTransitionError(name) if name not in transitions: raise UnknownTransitionError(name) self.transition_order[:0] = names self.transitions.update(transitions) def add_transition(self, name, transition): """ Add a transition to the start of the transition list. Parameter `transition`: a ready-made transition 3-tuple. Exception: `DuplicateTransitionError`. """ if name in self.transitions: raise DuplicateTransitionError(name) self.transition_order[:0] = [name] self.transitions[name] = transition def remove_transition(self, name): """ Remove a transition by `name`. Exception: `UnknownTransitionError`. """ try: del self.transitions[name] self.transition_order.remove(name) except: raise UnknownTransitionError(name) def make_transition(self, name, next_state=None): """ Make & return a transition tuple based on `name`. This is a convenience function to simplify transition creation. Parameters: - `name`: a string, the name of the transition pattern & method. This `State` object must have a method called '`name`', and a dictionary `self.patterns` containing a key '`name`'. - `next_state`: a string, the name of the next `State` object for this transition. A value of ``None`` (or absent) implies no state change (i.e., continue with the same state). Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`. """ if next_state is None: next_state = self.__class__.__name__ try: pattern = self.patterns[name] if not hasattr(pattern, 'match'): pattern = re.compile(pattern) except KeyError: raise TransitionPatternNotFound( '%s.patterns[%r]' % (self.__class__.__name__, name)) try: method = getattr(self, name) except AttributeError: raise TransitionMethodNotFound( '%s.%s' % (self.__class__.__name__, name)) return (pattern, method, next_state) def make_transitions(self, name_list): """ Return a list of transition names and a transition mapping. Parameter `name_list`: a list, where each entry is either a transition name string, or a 1- or 2-tuple (transition name, optional next state name). """ stringtype = type('') names = [] transitions = {} for namestate in name_list: if type(namestate) is stringtype: transitions[namestate] = self.make_transition(namestate) names.append(namestate) else: transitions[namestate[0]] = self.make_transition(*namestate) names.append(namestate[0]) return names, transitions def no_match(self, context, transitions): """ Called when there is no match from `StateMachine.check_line()`. Return the same values returned by transition methods: - context: unchanged; - next state name: ``None``; - empty result list. Override in subclasses to catch this event. """ return context, None, [] def bof(self, context): """ Handle beginning-of-file. Return unchanged `context`, empty result. Override in subclasses. Parameter `context`: application-defined storage. """ return context, [] def eof(self, context): """ Handle end-of-file. Return empty result. Override in subclasses. Parameter `context`: application-defined storage. """ return [] def nop(self, match, context, next_state): """ A "do nothing" transition method. Return unchanged `context` & `next_state`, empty result. Useful for simple state changes (actionless transitions). """ return context, next_state, [] class StateMachineWS(StateMachine): """ `StateMachine` subclass specialized for whitespace recognition. There are three methods provided for extracting indented text blocks: - `get_indented()`: use when the indent is unknown. - `get_known_indented()`: use when the indent is known for all lines. - `get_first_known_indented()`: use when only the first line's indent is known. """ def get_indented(self, until_blank=0, strip_indent=1): """ Return a block of indented lines of text, and info. Extract an indented block where the indent is unknown for all lines. :Parameters: - `until_blank`: Stop collecting at the first blank line if true (1). - `strip_indent`: Strip common leading indent if true (1, default). :Return: - the indented block (a list of lines of text), - its indent, - its first line offset from BOF, and - whether or not it finished with a blank line. """ offset = self.abs_line_offset() indented, indent, blank_finish = self.input_lines.get_indented( self.line_offset, until_blank, strip_indent) if indented: self.next_line(len(indented) - 1) # advance to last indented line while indented and not indented[0].strip(): indented.trim_start() offset += 1 return indented, indent, offset, blank_finish def get_known_indented(self, indent, until_blank=0, strip_indent=1): """ Return an indented block and info. Extract an indented block where the indent is known for all lines. Starting with the current line, extract the entire text block with at least `indent` indentation (which must be whitespace, except for the first line). :Parameters: - `indent`: The number of indent columns/characters. - `until_blank`: Stop collecting at the first blank line if true (1). - `strip_indent`: Strip `indent` characters of indentation if true (1, default). :Return: - the indented block, - its first line offset from BOF, and - whether or not it finished with a blank line. """ offset = self.abs_line_offset() indented, indent, blank_finish = self.input_lines.get_indented( self.line_offset, until_blank, strip_indent, block_indent=indent) self.next_line(len(indented) - 1) # advance to last indented line while indented and not indented[0].strip(): indented.trim_start() offset += 1 return indented, offset, blank_finish def get_first_known_indented(self, indent, until_blank=0, strip_indent=1, strip_top=1): """ Return an indented block and info. Extract an indented block where the indent is known for the first line and unknown for all other lines. :Parameters: - `indent`: The first line's indent (# of columns/characters). - `until_blank`: Stop collecting at the first blank line if true (1). - `strip_indent`: Strip `indent` characters of indentation if true (1, default). - `strip_top`: Strip blank lines from the beginning of the block. :Return: - the indented block, - its indent, - its first line offset from BOF, and - whether or not it finished with a blank line. """ offset = self.abs_line_offset() indented, indent, blank_finish = self.input_lines.get_indented( self.line_offset, until_blank, strip_indent, first_indent=indent) self.next_line(len(indented) - 1) # advance to last indented line if strip_top: while indented and not indented[0].strip(): indented.trim_start() offset += 1 return indented, indent, offset, blank_finish class StateWS(State): """ State superclass specialized for whitespace (blank lines & indents). Use this class with `StateMachineWS`. The transitions 'blank' (for blank lines) and 'indent' (for indented text blocks) are added automatically, before any other transitions. The transition method `blank()` handles blank lines and `indent()` handles nested indented blocks. Indented blocks trigger a new state machine to be created by `indent()` and run. The class of the state machine to be created is in `indent_sm`, and the constructor keyword arguments are in the dictionary `indent_sm_kwargs`. The methods `known_indent()` and `firstknown_indent()` are provided for indented blocks where the indent (all lines' and first line's only, respectively) is known to the transition method, along with the attributes `known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method is triggered automatically. """ indent_sm = None """ The `StateMachine` class handling indented text blocks. If left as ``None``, `indent_sm` defaults to the value of `State.nested_sm`. Override it in subclasses to avoid the default. """ indent_sm_kwargs = None """ Keyword arguments dictionary, passed to the `indent_sm` constructor. If left as ``None``, `indent_sm_kwargs` defaults to the value of `State.nested_sm_kwargs`. Override it in subclasses to avoid the default. """ known_indent_sm = None """ The `StateMachine` class handling known-indented text blocks. If left as ``None``, `known_indent_sm` defaults to the value of `indent_sm`. Override it in subclasses to avoid the default. """ known_indent_sm_kwargs = None """ Keyword arguments dictionary, passed to the `known_indent_sm` constructor. If left as ``None``, `known_indent_sm_kwargs` defaults to the value of `indent_sm_kwargs`. Override it in subclasses to avoid the default. """ ws_patterns = {'blank': ' *$', 'indent': ' +'} """Patterns for default whitespace transitions. May be overridden in subclasses.""" ws_initial_transitions = ('blank', 'indent') """Default initial whitespace transitions, added before those listed in `State.initial_transitions`. May be overridden in subclasses.""" def __init__(self, state_machine, debug=0): """ Initialize a `StateSM` object; extends `State.__init__()`. Check for indent state machine attributes, set defaults if not set. """ State.__init__(self, state_machine, debug) if self.indent_sm is None: self.indent_sm = self.nested_sm if self.indent_sm_kwargs is None: self.indent_sm_kwargs = self.nested_sm_kwargs if self.known_indent_sm is None: self.known_indent_sm = self.indent_sm if self.known_indent_sm_kwargs is None: self.known_indent_sm_kwargs = self.indent_sm_kwargs def add_initial_transitions(self): """ Add whitespace-specific transitions before those defined in subclass. Extends `State.add_initial_transitions()`. """ State.add_initial_transitions(self) if self.patterns is None: self.patterns = {} self.patterns.update(self.ws_patterns) names, transitions = self.make_transitions( self.ws_initial_transitions) self.add_transitions(names, transitions) def blank(self, match, context, next_state): """Handle blank lines. Does nothing. Override in subclasses.""" return self.nop(match, context, next_state) def indent(self, match, context, next_state): """ Handle an indented text block. Extend or override in subclasses. Recursively run the registered state machine for indented blocks (`self.indent_sm`). """ indented, indent, line_offset, blank_finish = \ self.state_machine.get_indented() sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results def known_indent(self, match, context, next_state): """ Handle a known-indent text block. Extend or override in subclasses. Recursively run the registered state machine for known-indent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``. """ indented, line_offset, blank_finish = \ self.state_machine.get_known_indented(match.end()) sm = self.known_indent_sm(debug=self.debug, **self.known_indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results def first_known_indent(self, match, context, next_state): """ Handle an indented text block (first line's indent known). Extend or override in subclasses. Recursively run the registered state machine for known-indent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``. """ indented, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) sm = self.known_indent_sm(debug=self.debug, **self.known_indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results class _SearchOverride: """ Mix-in class to override `StateMachine` regular expression behavior. Changes regular expression matching, from the default `re.match()` (succeeds only if the pattern matches at the start of `self.line`) to `re.search()` (succeeds if the pattern matches anywhere in `self.line`). When subclassing a `StateMachine`, list this class **first** in the inheritance list of the class definition. """ def match(self, pattern): """ Return the result of a regular expression search. Overrides `StateMachine.match()`. Parameter `pattern`: `re` compiled regular expression. """ return pattern.search(self.line) class SearchStateMachine(_SearchOverride, StateMachine): """`StateMachine` which uses `re.search()` instead of `re.match()`.""" pass class SearchStateMachineWS(_SearchOverride, StateMachineWS): """`StateMachineWS` which uses `re.search()` instead of `re.match()`.""" pass class ViewList: """ List with extended functionality: slices of ViewList objects are child lists, linked to their parents. Changes made to a child list also affect the parent list. A child list is effectively a "view" (in the SQL sense) of the parent list. Changes to parent lists, however, do *not* affect active child lists. If a parent list is changed, any active child lists should be recreated. The start and end of the slice can be trimmed using the `trim_start()` and `trim_end()` methods, without affecting the parent list. The link between child and parent lists can be broken by calling `disconnect()` on the child list. Also, ViewList objects keep track of the source & offset of each item. This information is accessible via the `source()`, `offset()`, and `info()` methods. """ def __init__(self, initlist=None, source=None, items=None, parent=None, parent_offset=None): self.data = [] """The actual list of data, flattened from various sources.""" self.items = [] """A list of (source, offset) pairs, same length as `self.data`: the source of each line and the offset of each line from the beginning of its source.""" self.parent = parent """The parent list.""" self.parent_offset = parent_offset """Offset of this list from the beginning of the parent list.""" if isinstance(initlist, ViewList): self.data = initlist.data[:] self.items = initlist.items[:] elif initlist is not None: self.data = list(initlist) if items: self.items = items else: self.items = [(source, i) for i in range(len(initlist))] assert len(self.data) == len(self.items), 'data mismatch' def __str__(self): return str(self.data) def __repr__(self): return '%s(%s, items=%s)' % (self.__class__.__name__, self.data, self.items) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __ne__(self, other): return self.data != self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cmp__(self, other): return cmp(self.data, self.__cast(other)) def __cast(self, other): if isinstance(other, ViewList): return other.data else: return other def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) # The __getitem__()/__setitem__() methods check whether the index # is a slice first, since indexing a native list with a slice object # just works. def __getitem__(self, i): if isinstance(i, types.SliceType): assert i.step in (None, 1), 'cannot handle slice with stride' return self.__class__(self.data[i.start:i.stop], items=self.items[i.start:i.stop], parent=self, parent_offset=i.start or 0) else: return self.data[i] def __setitem__(self, i, item): if isinstance(i, types.SliceType): assert i.step in (None, 1), 'cannot handle slice with stride' if not isinstance(item, ViewList): raise TypeError('assigning non-ViewList to ViewList slice') self.data[i.start:i.stop] = item.data self.items[i.start:i.stop] = item.items assert len(self.data) == len(self.items), 'data mismatch' if self.parent: self.parent[(i.start or 0) + self.parent_offset : (i.stop or len(self)) + self.parent_offset] = item else: self.data[i] = item if self.parent: self.parent[i + self.parent_offset] = item def __delitem__(self, i): try: del self.data[i] del self.items[i] if self.parent: del self.parent[i + self.parent_offset] except TypeError: assert i.step is None, 'cannot handle slice with stride' del self.data[i.start:i.stop] del self.items[i.start:i.stop] if self.parent: del self.parent[(i.start or 0) + self.parent_offset : (i.stop or len(self)) + self.parent_offset] def __add__(self, other): if isinstance(other, ViewList): return self.__class__(self.data + other.data, items=(self.items + other.items)) else: raise TypeError('adding non-ViewList to a ViewList') def __radd__(self, other): if isinstance(other, ViewList): return self.__class__(other.data + self.data, items=(other.items + self.items)) else: raise TypeError('adding ViewList to a non-ViewList') def __iadd__(self, other): if isinstance(other, ViewList): self.data += other.data else: raise TypeError('argument to += must be a ViewList') return self def __mul__(self, n): return self.__class__(self.data * n, items=(self.items * n)) __rmul__ = __mul__ def __imul__(self, n): self.data *= n self.items *= n return self def extend(self, other): if not isinstance(other, ViewList): raise TypeError('extending a ViewList with a non-ViewList') if self.parent: self.parent.insert(len(self.data) + self.parent_offset, other) self.data.extend(other.data) self.items.extend(other.items) def append(self, item, source=None, offset=0): if source is None: self.extend(item) else: if self.parent: self.parent.insert(len(self.data) + self.parent_offset, item, source, offset) self.data.append(item) self.items.append((source, offset)) def insert(self, i, item, source=None, offset=0): if source is None: if not isinstance(item, ViewList): raise TypeError('inserting non-ViewList with no source given') self.data[i:i] = item.data self.items[i:i] = item.items if self.parent: index = (len(self.data) + i) % len(self.data) self.parent.insert(index + self.parent_offset, item) else: self.data.insert(i, item) self.items.insert(i, (source, offset)) if self.parent: index = (len(self.data) + i) % len(self.data) self.parent.insert(index + self.parent_offset, item, source, offset) def pop(self, i=-1): if self.parent: index = (len(self.data) + i) % len(self.data) self.parent.pop(index + self.parent_offset) self.items.pop(i) return self.data.pop(i) def trim_start(self, n=1): """ Remove items from the start of the list, without touching the parent. """ if n > len(self.data): raise IndexError("Size of trim too large; can't trim %s items " "from a list of size %s." % (n, len(self.data))) elif n < 0: raise IndexError('Trim size must be >= 0.') del self.data[:n] del self.items[:n] if self.parent: self.parent_offset += n def trim_end(self, n=1): """ Remove items from the end of the list, without touching the parent. """ if n > len(self.data): raise IndexError("Size of trim too large; can't trim %s items " "from a list of size %s." % (n, len(self.data))) elif n < 0: raise IndexError('Trim size must be >= 0.') del self.data[-n:] del self.items[-n:] def remove(self, item): index = self.index(item) del self[index] def count(self, item): return self.data.count(item) def index(self, item): return self.data.index(item) def reverse(self): self.data.reverse() self.items.reverse() self.parent = None def sort(self, *args): tmp = zip(self.data, self.items) tmp.sort(*args) self.data = [entry[0] for entry in tmp] self.items = [entry[1] for entry in tmp] self.parent = None def info(self, i): """Return source & offset for index `i`.""" try: return self.items[i] except IndexError: if i == len(self.data): # Just past the end return self.items[i - 1][0], None else: raise def source(self, i): """Return source for index `i`.""" return self.info(i)[0] def offset(self, i): """Return offset for index `i`.""" return self.info(i)[1] def disconnect(self): """Break link between this list and parent list.""" self.parent = None def xitems(self): """Return iterator yielding (source, offset, value) tuples.""" for (value, (source, offset)) in zip(self.data, self.items): yield (source, offset, value) def pprint(self): """Print the list in `grep` format (`source:offset:value` lines)""" for line in self.xitems(): print "%s:%d:%s" % line class StringList(ViewList): """A `ViewList` with string-specific methods.""" def trim_left(self, length, start=0, end=sys.maxint): """ Trim `length` characters off the beginning of each item, in-place, from index `start` to `end`. No whitespace-checking is done on the trimmed text. Does not affect slice parent. """ self.data[start:end] = [line[length:] for line in self.data[start:end]] def get_text_block(self, start, flush_left=0): """ Return a contiguous block of text. If `flush_left` is true, raise `UnexpectedIndentationError` if an indented line is encountered before the text block ends (with a blank line). """ end = start last = len(self.data) while end < last: line = self.data[end] if not line.strip(): break if flush_left and (line[0] == ' '): source, offset = self.info(end) raise UnexpectedIndentationError(self[start:end], source, offset + 1) end += 1 return self[start:end] def get_indented(self, start=0, until_blank=0, strip_indent=1, block_indent=None, first_indent=None): """ Extract and return a StringList of indented lines of text. Collect all lines with indentation, determine the minimum indentation, remove the minimum indentation from all indented lines (unless `strip_indent` is false), and return them. All lines up to but not including the first unindented line will be returned. :Parameters: - `start`: The index of the first line to examine. - `until_blank`: Stop collecting at the first blank line if true. - `strip_indent`: Strip common leading indent if true (default). - `block_indent`: The indent of the entire block, if known. - `first_indent`: The indent of the first line, if known. :Return: - a StringList of indented lines with mininum indent removed; - the amount of the indent; - a boolean: did the indented block finish with a blank line or EOF? """ indent = block_indent # start with None if unknown end = start if block_indent is not None and first_indent is None: first_indent = block_indent if first_indent is not None: end += 1 last = len(self.data) while end < last: line = self.data[end] if line and (line[0] != ' ' or (block_indent is not None and line[:block_indent].strip())): # Line not indented or insufficiently indented. # Block finished properly iff the last indented line blank: blank_finish = ((end > start) and not self.data[end - 1].strip()) break stripped = line.lstrip() if not stripped: # blank line if until_blank: blank_finish = 1 break elif block_indent is None: line_indent = len(line) - len(stripped) if indent is None: indent = line_indent else: indent = min(indent, line_indent) end += 1 else: blank_finish = 1 # block ends at end of lines block = self[start:end] if first_indent is not None and block: block.data[0] = block.data[0][first_indent:] if indent and strip_indent: block.trim_left(indent, start=(first_indent is not None)) return block, indent or 0, blank_finish def get_2D_block(self, top, left, bottom, right, strip_indent=1): block = self[top:bottom] indent = right for i in range(len(block.data)): block.data[i] = line = block.data[i][left:right].rstrip() if line: indent = min(indent, len(line) - len(line.lstrip())) if strip_indent and 0 < indent < right: block.data = [line[indent:] for line in block.data] return block <|fim▁hole|> def pad_double_width(self, pad_char): """ Pad all double-width characters in self by appending `pad_char` to each. For East Asian language support. """ if hasattr(unicodedata, 'east_asian_width'): east_asian_width = unicodedata.east_asian_width else: return # new in Python 2.4 for i in range(len(self.data)): line = self.data[i] if isinstance(line, unicode): new = [] for char in line: new.append(char) if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width new.append(pad_char) self.data[i] = ''.join(new) def replace(self, old, new): """Replace all occurrences of substring `old` with `new`.""" for i in range(len(self.data)): self.data[i] = self.data[i].replace(old, new) class StateMachineError(Exception): pass class UnknownStateError(StateMachineError): pass class DuplicateStateError(StateMachineError): pass class UnknownTransitionError(StateMachineError): pass class DuplicateTransitionError(StateMachineError): pass class TransitionPatternNotFound(StateMachineError): pass class TransitionMethodNotFound(StateMachineError): pass class UnexpectedIndentationError(StateMachineError): pass class TransitionCorrection(Exception): """ Raise from within a transition method to switch to another transition. Raise with one argument, the new transition name. """ class StateCorrection(Exception): """ Raise from within a transition method to switch to another state. Raise with one or two arguments: new state name, and an optional new transition name. """ def string2lines(astring, tab_width=8, convert_whitespace=0, whitespace=re.compile('[\v\f]')): """ Return a list of one-line strings with tabs expanded, no newlines, and trailing whitespace stripped. Each tab is expanded with between 1 and `tab_width` spaces, so that the next character's index becomes a multiple of `tab_width` (8 by default). Parameters: - `astring`: a multi-line string. - `tab_width`: the number of columns between tab stops. - `convert_whitespace`: convert form feeds and vertical tabs to spaces? """ if convert_whitespace: astring = whitespace.sub(' ', astring) return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()] def _exception_data(): """ Return exception information: - the exception's class name; - the exception object; - the name of the file containing the offending code; - the line number of the offending code; - the function name of the offending code. """ type, value, traceback = sys.exc_info() while traceback.tb_next: traceback = traceback.tb_next code = traceback.tb_frame.f_code return (type.__name__, value, code.co_filename, traceback.tb_lineno, code.co_name)<|fim▁end|>
<|file_name|>test_constants.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import unittest from wechatpy.constants import WeChatErrorCode class WeChatErrorCodeTestCase(unittest.TestCase): """ensure python compatibility""" def test_error_code(self): self.assertEqual(-1000, WeChatErrorCode.SYSTEM_ERROR.value) self.assertEqual(42001, WeChatErrorCode.EXPIRED_ACCESS_TOKEN.value) self.assertEqual(48001, WeChatErrorCode.UNAUTHORIZED_API.value)<|fim▁hole|> def test_enum(self): self.assertEqual(WeChatErrorCode.SYSTEM_BUSY, WeChatErrorCode(-1))<|fim▁end|>
<|file_name|>animate-path-animation-qQ-tT-inverse.js<|end_file_name|><|fim▁begin|>description("Test path animation where coordinate modes of start and end differ. You should see PASS messages"); createSVGTestCase(); // Setup test document var path = createSVGElement("path"); path.setAttribute("id", "path"); path.setAttribute("d", "M -30 -30 q 30 0 30 30 t -30 30 Z"); path.setAttribute("fill", "green"); path.setAttribute("onclick", "executeTest()"); path.setAttribute("transform", "translate(50, 50)"); var animate = createSVGElement("animate"); animate.setAttribute("id", "animation"); animate.setAttribute("attributeName", "d"); animate.setAttribute("from", "M -30 -30 q 30 0 30 30 t -30 30 Z"); animate.setAttribute("to", "M -30 -30 Q 30 -30 30 0 T -30 30 Z"); animate.setAttribute("begin", "click"); animate.setAttribute("dur", "4s"); path.appendChild(animate); rootSVGElement.appendChild(path); // Setup animation test function sample1() { // Check initial/end conditions shouldBeEqualToString("path.getAttribute('d')", "M -30 -30 q 30 0 30 30 t -30 30 Z"); } function sample2() { shouldBeEqualToString("path.getAttribute('d')", "M -30 -30 q 37.5 0 37.5 30 t -37.5 30 Z"); } function sample3() { shouldBeEqualToString("path.getAttribute('d')", "M -30 -30 Q 22.5 -30 22.5 0 T -30 30 Z"); } function sample4() { shouldBeEqualToString("path.getAttribute('d')", "M -30 -30 Q 29.9925 -30 29.9925 0 T -30 30 Z"); } function executeTest() { const expectedValues = [ // [animationId, time, sampleCallback] ["animation", 0.0, sample1], ["animation", 1.0, sample2], ["animation", 3.0, sample3], ["animation", 3.999, sample4], ["animation", 4.001, sample1] ]; runAnimationTest(expectedValues); } window.clickX = 40;<|fim▁hole|><|fim▁end|>
window.clickY = 70; var successfullyParsed = true;
<|file_name|>populate_auto.py<|end_file_name|><|fim▁begin|>import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'esite.settings') import django django.setup() from auto.models import Car # def add_car(make, model, km, year, color, eng, drive,trans, icolor): c = Car.objects.get_or_create(make=make, model=model, kilometers=km, year=year, color=color, engine_size=eng, drivetrain=drive, transmition=trans, interanl_color=icolor) def populate(): # car = Car(make='Acura',model='TL', kilometers=74673, year=2012, color='White', engine_size=3.7, drivetrain='AWD', transmition='MA') add_car('Acura', 'TL', 74673, 2012, 'White', 3.7, 'AWD','MA','White') add_car('Volkswagen', 'Touareg', 5344, 2015, 'Silver', 3.6, 'AWD','AU','White') if __name__ == '__main__': print "Starting Car population script..." populate() # def populate(): # python_cat = add_cat('Python') # # add_page(cat=python_cat, # title="Official Python Tutorial",<|fim▁hole|># url="http://docs.python.org/2/tutorial/") # # add_page(cat=python_cat, # title="How to Think like a Computer Scientist", # url="http://www.greenteapress.com/thinkpython/") # # add_page(cat=python_cat, # title="Learn Python in 10 Minutes", # url="http://www.korokithakis.net/tutorials/python/") # # django_cat = add_cat("Django") # # add_page(cat=django_cat, # title="Official Django Tutorial", # url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/") # # add_page(cat=django_cat, # title="Django Rocks", # url="http://www.djangorocks.com/") # # add_page(cat=django_cat, # title="How to Tango with Django", # url="http://www.tangowithdjango.com/") # # frame_cat = add_cat("Other Frameworks") # # add_page(cat=frame_cat, # title="Bottle", # url="http://bottlepy.org/docs/dev/") # # add_page(cat=frame_cat, # title="Flask", # url="http://flask.pocoo.org") # # # Print out what we have added to the user. # for c in Category.objects.all(): # for p in Page.objects.filter(category=c): # print "- {0} - {1}".format(str(c), str(p)) # # def add_page(cat, title, url, views=0): # p = Page.objects.get_or_create(category=cat, title=title)[0] # p.url=url # p.views=views # p.save() # return p # # def add_cat(name): # c = Category.objects.get_or_create(name=name)[0] # return c # Start execution here! # if __name__ == '__main__': # print "Starting Rango population script..." # populate()<|fim▁end|>
<|file_name|>Test.js<|end_file_name|><|fim▁begin|>describe("Test", function () { function testRegex(regexText, text, expectedResult, allTextMatched = true) { let textArray; if (Array.isArray(text)) { textArray = text; } else { textArray = [text]; } const regex = parser.compile(regexText); textArray.forEach(function (txt) { const match = regex.match(txt); expect( allTextMatched ? match.matches : match.allMatchersMatched).toBe(expectedResult); }); } describe("Start End", function () { describe("Matches", function () { it("Both", function () { testRegex('^ab$', ['ab'], true); }); it("Start", function () { testRegex('^ab', ['ab', 'abc'], true, false); }); it("End", function () { testRegex('ab$', ['ab'], true); testRegex('.*ab$', ['cab'], true); }); }); describe("Mismatches", function () { it("Both", function () { testRegex('^ab$', ['a', 'b', 'ac', 'd'], false);<|fim▁hole|> }); it("Start", function () { testRegex('^ab', ['a', 'b', 'ac', 'bc'], false); }); it("End", function () { testRegex('ab$', ['acb', 'a', 'ba'], false); testRegex('.*ab$', ['caxb'], false); }); }); }); describe("Literals", function () { describe("Matches", function () { it("empty string", function () { testRegex('', '', true); }); it("single character", function () { testRegex('a', 'a', true); }); it("multiple characters", function () { testRegex('abc', 'abc', true); }); }); describe("Mismatches", function () { it("empty string", function () { testRegex('', 'a', false); }); it("single character", function () { testRegex('a', ['x', '', 'aa'], false); }); it("multiple characters", function () { testRegex('abc', ['abcd', 'ab', 'Abc'], false); testRegex('zabc', 'abc', false); }); }); }); describe("Dot", function () { describe("Matches", function () { it("single dot", function () { testRegex('.', ['a', '.'], true); testRegex('a.', 'ax', true); testRegex('.b', 'xb', true); }); it("multiple dots", function () { testRegex('..', 'ab', true); testRegex('.x.x.', 'xxxxx', true); }); }); describe("Mismatches", function () { it("single dot", function () { testRegex('.', '', false); testRegex('a.', 'a', false); testRegex('.a', 'a', false); }); it("multiple dots", function () { testRegex('..', 'abc', false); testRegex('a.b.c', 'abc', false); }); }); }); describe("Inclusive Character Specifications", function () { describe("Matches", function () { it("literals", function () { testRegex('[a]', 'a', true); testRegex('[aaa]', 'a', true); testRegex('[abc]', ['a', 'b', 'c'], true); }); it("ranges", function () { testRegex('[a-a]', 'a', true); testRegex('[a-b]', 'a', true); testRegex('[a-b]', 'b', true); testRegex('[a-z]', 'q', true); testRegex('[a-cx-z]', ['a', 'b', 'c', 'x', 'y', 'z'], true); }); it("combinations", function () { testRegex('[ag-imv-xz]', ['a', 'g', 'h', 'i', 'm', 'v', 'w', 'x', 'z'], true); }); it("multiples", function () { testRegex('[a]b[c-f]', ['abc', 'abd', 'abe', 'abf'], true); }); it("meta-characters", function () { testRegex('[-+]', ['+', '-'], true); testRegex('[\-\+\\\\]', ['+', '-','\\'], true); }); }); describe("Mismatches", function () { it("literals", function () { testRegex('[a]', 'b', false); testRegex('[aaa]', 'b', false); testRegex('[abc]', 'd', false); }); it("ranges", function () { testRegex('[a-a]', 'b', false); testRegex('[a-b]', 'B', false); testRegex('[a-cx-z]', 'd', false); }); it("combinations", function () { testRegex('[ag-imv-xz]', ['b', 'f', 'j', 'l', 'n', 'u', 'y'], false); }); }); }); describe("Exclusive Character Specifications", function () { describe("Matches", function () { it("literals", function () { testRegex('[^a]', 'b', true); testRegex('[^aaa]', 'b', true); testRegex('[^abc]', 'd', true); }); it("ranges", function () { testRegex('[^a-a]', 'b', true); testRegex('[^a-b]', 'B', true); testRegex('[^a-cx-z]', 'd', true); }); it("combinations", function () { testRegex('[^ag-imv-xz]', ['b', 'f', 'j', 'l', 'n', 'u', 'y'], true); }); }); describe("Mismatches", function () { it("literals", function () { testRegex('[^a]', 'a', false); testRegex('[^aaa]', 'a', false); testRegex('[^abc]', ['a', 'b', 'c'], false); }); it("ranges", function () { testRegex('[^a-a]', 'a', false); testRegex('[^a-b]', 'a', false); testRegex('[^a-b]', 'b', false); testRegex('[^a-z]', 'q', false); testRegex('[^a-cx-z]', ['a', 'b', 'c', 'x', 'y', 'z'], false); }); it("combinations", function () { testRegex('[^ag-imv-xz]', ['a', 'g', 'h', 'i', 'm', 'v', 'w', 'x', 'z'], false); }); it("multiples", function () { testRegex('[^a]b[c-f]', ['abc', 'abd', 'abe', 'abf'], false); testRegex('[a]b[^c-f]', ['abc', 'abd', 'abe', 'abf'], false); }); }); }); describe("test", function() { it("Matches", function () { testRegex('[a-c]*', '', true); }) }) describe("Exact Number Of", function () { it("Matches", function () { testRegex('a{0}', '', true); testRegex('a{1}', 'a', true); testRegex('a{3}', 'aaa', true); testRegex('.{2}', 'ab', true); testRegex('[abc]{2}', ['aa', 'bc', 'cc'], true); }); it("Mismatches", function () { testRegex('a{0}', 'a', false); testRegex('a{1}', ['', 'aa', 'b', 'ab', 'ba'], false); testRegex('a{3}', ['', 'a', 'aa', 'aaaa', 'aba'], false); testRegex('.{2}', 'abc', false); testRegex('.{2}', 'a', false); testRegex('[abc]{2}', 'ad', false); }); }); describe("Number Range Of", function () { it("Matches", function () { testRegex('a{0,0}', '', true); testRegex('.{3,3}', 'abc', true); testRegex('a{0,2}', '', true); testRegex('[abc]{2,5}', ['ab', 'cca', 'abca', 'cccca'], true); }); it("Mismatches", function () { testRegex('a{0,0}', 'a', false); testRegex('.{3,3}', ['ab', 'aaaa'], false); testRegex('a{0,2}', 'aaa', false); testRegex('[abc]{2,5}', ['a', 'aaabbb', 'ax', 'xbc'], false); }); }); describe("Number Or More Of", function () { it("Matches", function () { testRegex('a{0,}', ['', 'a', 'aaa'], true); testRegex('a{1,}', ['a', 'aa'], true); testRegex('a{3,}', ['aaa', 'aaaaaaaa'], true); testRegex('.{2,}', ['ab', 'xx', 'abc'], true); testRegex('[abc]{2,}', ['aa', 'bc', 'cc', 'aaa', 'bbcc'], true); }); it("Mismatches", function () { testRegex('a{1,}', ['', 'b', 'ab', 'ba'], false); testRegex('a{3,}', ['', 'a', 'aa', 'aab', 'baaa'], false); testRegex('.{2,}', ['', 'a'], false); testRegex('[abc]{2,}', ['a', 'ad', 'dabc'], false); }); }); describe("Zero Or More", function () { it("Matches", function () { testRegex('a*', ['', 'a', 'aa', 'aaa'], true); testRegex('[a-c]*', ['', 'a', 'b', 'c', 'ab', 'ccc', 'bcabca'], true); }); it("Mismatches", function () { testRegex('a*', ['b', 'aaab', 'baaa'], false); testRegex('[a-c]*', ['d', 'abcd'], false); }); }); describe("One Or More", function () { it("Matches", function () { testRegex('a+', ['a', 'aa', 'aaa'], true); testRegex('[a-c]+', ['a', 'b', 'c', 'ab', 'ccc', 'bcabca'], true); }); it("Mismatches", function () { testRegex('a+', ['', 'aab', 'aabaa'], false); testRegex('[a-c]+', ['', 'd', 'aaad', 'bcadbca'], false); }); }); describe("Zero Or One", function () { it("Matches", function () { testRegex('a?', ['', 'a'], true); testRegex('[a-c]?', ['', 'a', 'b', 'c'], true); }); it("Mismatches", function () { testRegex('a?', ['b', 'aa'], false); testRegex('[a-c]?', ['d', 'aa', 'abc'], false); }); }); describe("Combinations", function () { it("Matches", function () { testRegex('a[b-d]{3,5}e{2,}f?g*h+', ['abcdeefgh', 'abbbbbeeeeeh', 'adddeefgggghhhh'], true); }); it("Mismatches", function () { testRegex('a[b-d]{3,5}e{2,}f?g*h+', ['bbbeefgh', 'abbeeh', 'abbbeh', 'abcdeef'], false); }); }); describe("Alternatives", function () { it("Matches", function () { testRegex('a|b', ['a', 'b'], true); testRegex('a||b', ['', 'a', 'b'], true); testRegex('a+|b+', ['a', 'aaa', 'b', 'bb'], true); testRegex('a*|b*|c*', ['', 'a', 'aaa', 'b', 'bb', 'c', 'cccc'], true); }); it("Mismatches", function () { testRegex('a|b', ['', 'c', 'aa', 'ab'], false); }); }); describe("Brackets", function () { it("Matches", function () { testRegex('(a)', 'a', true); testRegex('(ab)*', ['', 'ab', 'ababab'], true); testRegex('(ab|cd)*', ['', 'ab', 'ababab', 'cd', 'cdcd', 'abcdababcdab'], true); testRegex('(ab(c|d)+e)*', ['abce', 'abde', 'abcde', 'abccddcce'], true); }); it("Mismatches", function () { testRegex('(a)', ['', 'aa', 'b', '(a)'], false); testRegex('(ab)*', ['a', 'b', 'aba', 'abbb'], false); testRegex('(ab|cd)*', ['a', 'b', 'c', 'd', 'ac', 'abd'], false); testRegex('(ab(c|d)+e)*', ['abe', 'abcd'], false); }); }); describe("Capturing Groups", function () { it("Matches", function () { testRegex('(a)=\\1', 'a=a', true); testRegex('(a)\\1\\1b', 'aaab', true); testRegex('(a)(b)(c) \\1\\2\\3', 'abc abc', true); testRegex('(a)(b)(c) \\3\\2\\1', 'abc cba', true); testRegex('(a|b)\\1', ['aa', 'bb'], true); testRegex('(.)\\1', ['aa', 'bb', 'cc'], true); testRegex('(a*)\\1', ['', 'aa', 'aaaa'], true); testRegex('(a+)\\1', ['aa', 'aaaa'], true); testRegex('(A+) (B+) \\1 \\2', ['A B A B', 'AAA BB AAA BB'], true); }); it("Mismatches", function () { testRegex('(a)\\1', ['', 'ab', 'a', 'a\\1', 'aaa'], false); testRegex('(a|b)\\1', ['ab', 'ba'], false); testRegex('(.)\\1', ['', 'a'], false); testRegex('(a*)\\1', ['a', 'aaa', 'aaaaa'], false); testRegex('(a+)\\1', ['', 'a', 'aaa'], false); testRegex('(A+) (B+) \\1 \\2', ['A B AA B', 'AAA BB AAA B'], false); }); }); describe("Whitespace", function () { it("Matches", function () { testRegex('\\s', ' ', true); testRegex('\\s+', ' \t\r\n\f', true); testRegex('\\s*', '', true); }); it("Mismatches", function () { testRegex('\\s', ['', 'a', '\\s', ' '], false); }); }); describe("Non-Whitespace", function () { it("Matches", function () { testRegex('\\S', 'a', true); }); it("Mismatches", function () { testRegex('\\S', [' ', '\n', '\t', '\r', '\f'], false); }); }); describe("Digit Alias", function () { it("Matches", function () { testRegex('\\d', ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], true); }); it("Mismatches", function () { testRegex('\\d', ['00', 'a', '.', ' '], false); }); }); describe("Non-Digit Alias", function () { it("Matches", function () { testRegex('\\D', ['a', '.', ' '], true); }); it("Mismatches", function () { testRegex('\\D', ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], false); }); }); function expectError(regexText, consumed){ expect(function(){ parser.compile(regexText) }).toThrow('Unable to parse regex, consumed "' + consumed + '"') } describe("Invalid Regexes", function(){ it("Throw Errors", function(){ expectError('*', ''); expectError('abc[]', 'abc'); expectError('abc[xyz', 'abc'); expectError('+++', ''); expectError('a(', 'a'); expectError('abc[^]', 'abc'); expectError('a{z}', 'a'); expectError('a{1,x}', 'a'); }) }); });<|fim▁end|>
<|file_name|>baseFilter.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for ag-grid-community v20.2.0 // Project: http://www.ag-grid.com/ // Definitions by: Niall Crosby <https://github.com/ag-grid/> import { Component } from "../widgets/component"; import { IFilterOptionDef, IDoesFilterPassParams, IFilterComp, IFilterParams } from "../interfaces/iFilter"; import { GridOptionsWrapper } from "../gridOptionsWrapper"; import { FloatingFilterChange } from "./floatingFilter"; import { INumberFilterParams, ITextFilterParams } from "./textFilter"; export interface Comparator<T> { (left: T, right: T): number; } export declare enum FilterConditionType { MAIN = 0, CONDITION = 1 } export interface CombinedFilter<T> { operator: string; condition1: T; condition2: T; } /** * T(ype) The type of this filter. ie in DateFilter T=Date * P(arams) The params that this filter can take * M(model getModel/setModel) The object that this filter serializes to * F Floating filter params * * Contains common logic to ALL filters.. Translation, apply and clear button * get/setModel context wiring.... */ export declare abstract class BaseFilter<T, P extends IFilterParams, M> extends Component implements IFilterComp { static EMPTY: string; static EQUALS: string; static NOT_EQUAL: string; static LESS_THAN: string; static LESS_THAN_OR_EQUAL: string; static GREATER_THAN: string; static GREATER_THAN_OR_EQUAL: string; static IN_RANGE: string; static CONTAINS: string; static NOT_CONTAINS: string; static STARTS_WITH: string; static ENDS_WITH: string; private newRowsActionKeep; customFilterOptions: { [name: string]: IFilterOptionDef; }; filterParams: P; clearActive: boolean; applyActive: boolean; defaultFilter: string; selectedFilter: string; selectedFilterCondition: string; private eButtonsPanel; private eFilterBodyWrapper; private eApplyButton; private eClearButton; private eConditionWrapper; conditionValue: string; gridOptionsWrapper: GridOptionsWrapper; init(params: P): void; onClearButton(): void; abstract customInit(): void; abstract isFilterActive(): boolean; abstract modelFromFloatingFilter(from: string): M; abstract doesFilterPass(params: IDoesFilterPassParams): boolean; abstract bodyTemplate(type: FilterConditionType): string; abstract resetState(resetConditionFilterOnly?: boolean): void; abstract serialize(type: FilterConditionType): M; abstract parse(toParse: M, type: FilterConditionType): void; abstract refreshFilterBodyUi(type: FilterConditionType): void; abstract initialiseFilterBodyUi(type: FilterConditionType): void; abstract isFilterConditionActive(type: FilterConditionType): boolean; floatingFilter(from: string): void; onNewRowsLoaded(): void; getModel(): M | CombinedFilter<M>;<|fim▁hole|> private doOnFilterChanged; onFilterChanged(applyNow?: boolean): void; private redrawCondition; private refreshOperatorUi; onFloatingFilterChanged(change: FloatingFilterChange): boolean; generateFilterHeader(type: FilterConditionType): string; private generateTemplate; acceptsBooleanLogic(): boolean; wrapCondition(mainCondition: string): string; private createConditionTemplate; private createConditionBody; translate(toTranslate: string): string; getDebounceMs(filterParams: ITextFilterParams | INumberFilterParams): number; doesFilterHaveHiddenInput(filterType: string): boolean; } /** * Every filter with a dropdown where the user can specify a comparing type against the filter values */ export declare abstract class ComparableBaseFilter<T, P extends IComparableFilterParams, M> extends BaseFilter<T, P, M> { private eTypeSelector; private eTypeConditionSelector; private suppressAndOrCondition; abstract getApplicableFilterTypes(): string[]; abstract filterValues(type: FilterConditionType): T | T[]; abstract individualFilterPasses(params: IDoesFilterPassParams, type: FilterConditionType): boolean; doesFilterPass(params: IDoesFilterPassParams): boolean; init(params: P): void; customInit(): void; acceptsBooleanLogic(): boolean; generateFilterHeader(type: FilterConditionType): string; initialiseFilterBodyUi(type: FilterConditionType): void; abstract getDefaultType(): string; private onFilterTypeChanged; isFilterActive(): boolean; setFilterType(filterType: string, type: FilterConditionType): void; isFilterConditionActive(type: FilterConditionType): boolean; } export interface NullComparator { equals?: boolean; lessThan?: boolean; greaterThan?: boolean; } export interface IComparableFilterParams extends IFilterParams { suppressAndOrCondition: boolean; } export interface IScalarFilterParams extends IComparableFilterParams { inRangeInclusive?: boolean; nullComparator?: NullComparator; } /** * Comparable filter with scalar underlying values (ie numbers and dates. Strings are not scalar so have to extend * ComparableBaseFilter) */ export declare abstract class ScalarBaseFilter<T, P extends IScalarFilterParams, M> extends ComparableBaseFilter<T, P, M> { static readonly DEFAULT_NULL_COMPARATOR: NullComparator; abstract comparator(): Comparator<T>; private nullComparator; getDefaultType(): string; private translateNull; individualFilterPasses(params: IDoesFilterPassParams, type: FilterConditionType): boolean; private doIndividualFilterPasses; }<|fim▁end|>
getNullableModel(): M | CombinedFilter<M>; setModel(model: M | CombinedFilter<M>): void;
<|file_name|>update-downloads.rs<|end_file_name|><|fim▁begin|>#![deny(warnings)] extern crate cargo_registry; extern crate postgres; extern crate semver; extern crate time; use std::env; use std::collections::HashMap; use time::Duration; use cargo_registry::{VersionDownload, Version, Model}; static LIMIT: i64 = 1000; #[allow(dead_code)] // dead in tests fn main() { let daemon = env::args().nth(1).as_ref().map(|s| &s[..]) == Some("daemon"); let sleep = env::args().nth(2).map(|s| s.parse::<u32>().unwrap()); loop { let conn = postgres::Connection::connect(&env("DATABASE_URL")[..], &postgres::SslMode::None).unwrap(); update(&conn).unwrap(); drop(conn); if daemon { std::thread::sleep_ms(sleep.unwrap() * 1000); } else { break } } } fn env(s: &str) -> String { match env::var(s).ok() { Some(s) => s, None => panic!("must have `{}` defined", s), } } fn update(conn: &postgres::GenericConnection) -> postgres::Result<()> { let mut max = 0; loop { let tx = try!(conn.transaction()); { let stmt = try!(tx.prepare("SELECT * FROM version_downloads \ WHERE processed = FALSE AND id > $1 ORDER BY id ASC LIMIT $2")); let mut rows = try!(stmt.query(&[&max, &LIMIT])); match try!(collect(&tx, &mut rows)) { None => break, Some(m) => max = m, } } tx.set_commit(); try!(tx.finish()); } Ok(()) } fn collect(tx: &postgres::Transaction, rows: &mut postgres::Rows) -> postgres::Result<Option<i32>> { // Anything older than 24 hours ago will be frozen and will not be queried // against again. let cutoff = time::now_utc().to_timespec(); let cutoff = cutoff + Duration::days(-1); let mut map = HashMap::new(); for row in rows.iter() { let download: VersionDownload = Model::from_row(&row); assert!(map.insert(download.id, download).is_none()); } println!("updating {} versions (cutoff {})", map.len(), time::at(cutoff).rfc822()); if map.len() == 0 { return Ok(None) } let mut max = 0; let mut total = 0; for (id, download) in map.iter() { if *id > max { max = *id; } if download.date > cutoff && download.counted == download.downloads { continue<|fim▁hole|> } let amt = download.downloads - download.counted; let crate_id = Version::find(tx, download.version_id).unwrap().crate_id; // Update the total number of version downloads try!(tx.execute("UPDATE versions SET downloads = downloads + $1 WHERE id = $2", &[&amt, &download.version_id])); // Update the total number of crate downloads try!(tx.execute("UPDATE crates SET downloads = downloads + $1 WHERE id = $2", &[&amt, &crate_id])); // Update the total number of crate downloads for today let cnt = try!(tx.execute("UPDATE crate_downloads SET downloads = downloads + $2 WHERE crate_id = $1 AND date = date($3)", &[&crate_id, &amt, &download.date])); if cnt == 0 { try!(tx.execute("INSERT INTO crate_downloads (crate_id, downloads, date) VALUES ($1, $2, $3)", &[&crate_id, &amt, &download.date])); } // Flag this row as having been processed if we're passed the cutoff, // and unconditionally increment the number of counted downloads. try!(tx.execute("UPDATE version_downloads SET processed = $2, counted = counted + $3 WHERE id = $1", &[id, &(download.date < cutoff), &amt])); total += amt as i64; } // After everything else is done, update the global counter of total // downloads. try!(tx.execute("UPDATE metadata SET total_downloads = total_downloads + $1", &[&total])); Ok(Some(max)) } #[cfg(test)] mod test { use std::collections::HashMap; use postgres; use semver; use cargo_registry::{Version, Crate, User}; fn conn() -> postgres::Connection { postgres::Connection::connect(&::env("TEST_DATABASE_URL")[..], &postgres::SslMode::None).unwrap() } fn user(conn: &postgres::Transaction) -> User{ User::find_or_insert(conn, "login", None, None, None, "access_token", "api_token").unwrap() } fn crate_downloads(tx: &postgres::Transaction, id: i32, expected: usize) { let stmt = tx.prepare("SELECT * FROM crate_downloads WHERE crate_id = $1").unwrap(); let dl: i32 = stmt.query(&[&id]).unwrap().iter() .next().unwrap().get("downloads"); assert_eq!(dl, expected as i32); } #[test] fn increment() { let conn = conn(); let tx = conn.transaction().unwrap(); let user = user(&tx); let krate = Crate::find_or_insert(&tx, "foo", user.id, &None, &None, &None, &None, &[], &None, &None, &None).unwrap(); let version = Version::insert(&tx, krate.id, &semver::Version::parse("1.0.0").unwrap(), &HashMap::new(), &[]).unwrap(); tx.execute("INSERT INTO version_downloads \ (version_id, downloads, counted, date, processed) VALUES ($1, 1, 0, current_date, false)", &[&version.id]).unwrap(); tx.execute("INSERT INTO version_downloads \ (version_id, downloads, counted, date, processed) VALUES ($1, 1, 0, current_date, true)", &[&version.id]).unwrap(); ::update(&tx).unwrap(); assert_eq!(Version::find(&tx, version.id).unwrap().downloads, 1); assert_eq!(Crate::find(&tx, krate.id).unwrap().downloads, 1); crate_downloads(&tx, krate.id, 1); ::update(&tx).unwrap(); assert_eq!(Version::find(&tx, version.id).unwrap().downloads, 1); } #[test] fn set_processed_true() { let conn = conn(); let tx = conn.transaction().unwrap(); let user = user(&tx); let krate = Crate::find_or_insert(&tx, "foo", user.id, &None, &None, &None, &None, &[], &None, &None, &None).unwrap(); let version = Version::insert(&tx, krate.id, &semver::Version::parse("1.0.0").unwrap(), &HashMap::new(), &[]).unwrap(); tx.execute("INSERT INTO version_downloads \ (version_id, downloads, counted, date, processed) VALUES ($1, 2, 2, current_date - interval '2 days', false)", &[&version.id]).unwrap(); ::update(&tx).unwrap(); let stmt = tx.prepare("SELECT processed FROM version_downloads WHERE version_id = $1").unwrap(); let processed: bool = stmt.query(&[&version.id]).unwrap().iter() .next().unwrap().get("processed"); assert!(processed); } #[test] fn increment_a_little() { let conn = conn(); let tx = conn.transaction().unwrap(); let user = user(&tx); let krate = Crate::find_or_insert(&tx, "foo", user.id, &None, &None, &None, &None, &[], &None, &None, &None).unwrap(); let version = Version::insert(&tx, krate.id, &semver::Version::parse("1.0.0").unwrap(), &HashMap::new(), &[]).unwrap(); tx.execute("INSERT INTO version_downloads \ (version_id, downloads, counted, date, processed) VALUES ($1, 2, 1, current_date, false)", &[&version.id]).unwrap(); tx.execute("INSERT INTO version_downloads \ (version_id, downloads, counted, date, processed) VALUES ($1, 1, 0, current_date, false)", &[&version.id]).unwrap(); ::update(&tx).unwrap(); assert_eq!(Version::find(&tx, version.id).unwrap().downloads, 2); assert_eq!(Crate::find(&tx, krate.id).unwrap().downloads, 2); crate_downloads(&tx, krate.id, 2); ::update(&tx).unwrap(); assert_eq!(Version::find(&tx, version.id).unwrap().downloads, 2); } }<|fim▁end|>
<|file_name|>ObjectReference2.cpp<|end_file_name|><|fim▁begin|>/* * ObjectReference2.cpp * * Created on: Oct 16, 2015 * Author: kfulks */ #include "../../Concept/Reference/ObjectReference2.h" #include <iostream> namespace gitux { using namespace std; ObjectReference2::ObjectReference2() : myRef() { // TODO Auto-generated constructor stub cout << "ObjectReference2 constructor" << endl; } ObjectReference2::~ObjectReference2() { // TODO Auto-generated destructor stub cout << "ObjectReference2 destructor" << endl; } void ObjectReference2::SetReference(int &testRef) { myRef = &testRef; } void ObjectReference2::ChangeReference(int &testRef) { testRef = 2; } void ObjectReference2::PrintReference() {<|fim▁hole|>} /* namespace gitux */<|fim▁end|>
cout << "ObjectReference2 value is " << *myRef << endl; }
<|file_name|>HttpClientExample.java<|end_file_name|><|fim▁begin|>package components; import org.apache.http.HttpResponse; import org.apache.http.NameValuePair; import org.apache.http.client.HttpClient; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.message.BasicNameValuePair; import java.io.BufferedReader; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.List; import java.util.Map; public class HttpClientExample { private final String USER_AGENT = "Mozilla/5.0"; public String sendGet(String url) throws Exception { HttpClient client = new DefaultHttpClient(); HttpGet request = new HttpGet(url); request.addHeader("User-Agent", USER_AGENT); HttpResponse response = client.execute(request); BufferedReader rd = new BufferedReader(new InputStreamReader(response.getEntity().getContent())); StringBuffer result = new StringBuffer(); String line = ""; while ((line = rd.readLine()) != null) { result.append(line); } System.out.println(result.toString()); return result.toString(); } public String sendPost(String url, Map<String, String> param) throws Exception { HttpClient client = new DefaultHttpClient();<|fim▁hole|> for (Map.Entry<String, String> entry : param.entrySet()) { urlParameters.add(new BasicNameValuePair(entry.getKey(), entry.getValue())); } post.setEntity(new UrlEncodedFormEntity(urlParameters)); HttpResponse response = client.execute(post); BufferedReader rd = new BufferedReader(new InputStreamReader(response.getEntity().getContent())); StringBuffer result = new StringBuffer(); String line = ""; while ((line = rd.readLine()) != null) { result.append(line); } System.out.println(result.toString()); return result.toString(); } }<|fim▁end|>
HttpPost post = new HttpPost(url); post.setHeader("User-Agent", USER_AGENT); List<NameValuePair> urlParameters = new ArrayList<NameValuePair>();
<|file_name|>snake_agent.py<|end_file_name|><|fim▁begin|># Przykladowy agent do zadania 'zagubiony Wumpus'. Agent porusza sie wezykiem. import random from action import Action # nie zmieniac nazwy klasy class Agent: # nie zmieniac naglowka konstruktora, tutaj agent dostaje wszystkie informacje o srodowisku def __init__(self, p, pj, pn, height, width, areaMap): self.times_moved = 0 self.direction = Action.LEFT <|fim▁hole|> # w ten sposob mozna zapamietac zmienne obiektu self.p = p self.pj = pj self.pn = pn self.height = height self.width = width self.map = areaMap # w tym przykladzie histogram wypelniany jest tak aby na planszy wyszedl gradient self.hist = [] for y in range(self.height): self.hist.append([]) for x in range(self.width): self.hist[y].append(float(y + x) / (self.width + self.height - 2)) # dopisac reszte inicjalizacji agenta return # nie zmieniac naglowka metody, tutaj agent dokonuje obserwacji swiata # sensor przyjmuje wartosc True gdy agent ma uczucie stania w jamie def sense(self, sensor): pass # nie zmieniac naglowka metody, tutaj agent decyduje w ktora strone sie ruszyc, # funkcja MUSI zwrocic jedna z wartosci [Action.UP, Action.DOWN, Action.LEFT, Action.RIGHT] def move(self): if self.times_moved < self.width - 1: self.times_moved += 1 return self.direction else: self.times_moved = 0 self.direction = Action.RIGHT if self.direction == Action.LEFT else Action.LEFT return Action.DOWN # nie zmieniac naglowka metody, tutaj agent udostepnia swoj histogram (ten z filtru # histogramowego), musi to byc tablica (lista list, krotka krotek...) o wymarach takich jak # plansza, pobranie wartosci agent.histogram()[y][x] zwraca prawdopodobienstwo stania na polu # w wierszu y i kolumnie x def histogram(self): return self.hist<|fim▁end|>
<|file_name|>function_statement.hh<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2019 ScyllaDB */ /* * This file is part of Scylla. * * Scylla is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Scylla is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Scylla. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include "cql3/statements/schema_altering_statement.hh" #include "cql3/functions/user_function.hh" namespace cql3 { namespace statements { class function_statement : public schema_altering_statement { protected: virtual future<> check_access(service::storage_proxy& proxy, const service::client_state& state) const override; virtual void prepare_keyspace(const service::client_state& state) override; functions::function_name _name; std::vector<shared_ptr<cql3_type::raw>> _raw_arg_types;<|fim▁hole|> const functions::function& func, bool created); function_statement(functions::function_name name, std::vector<shared_ptr<cql3_type::raw>> raw_arg_types); void create_arg_types(service::storage_proxy& proxy) const; data_type prepare_type(service::storage_proxy& proxy, cql3_type::raw &t) const; }; // common logic for creating UDF and UDA class create_function_statement_base : public function_statement { protected: virtual void validate(service::storage_proxy& proxy, const service::client_state& state) const override; virtual void create(service::storage_proxy& proxy, functions::function* old) const = 0; bool _or_replace; bool _if_not_exists; create_function_statement_base(functions::function_name name, std::vector<shared_ptr<cql3_type::raw>> raw_arg_types, bool or_replace, bool if_not_exists); }; // common logic for dropping UDF and UDA class drop_function_statement_base : public function_statement { protected: virtual void validate(service::storage_proxy&, const service::client_state& state) const override; bool _args_present; bool _if_exists; mutable shared_ptr<functions::function> _func{}; drop_function_statement_base(functions::function_name name, std::vector<shared_ptr<cql3_type::raw>> arg_types, bool args_present, bool if_exists); }; } }<|fim▁end|>
mutable std::vector<data_type> _arg_types; static shared_ptr<cql_transport::event::schema_change> create_schema_change(
<|file_name|>train.py<|end_file_name|><|fim▁begin|>''' Training file with functions for 1) Taking in the inputs 2) Defining the model 3) Reading the input and generating batches 4) Defining the loss, learning rate and optimization functions 5) Running multiple epochs on training and testing ''' import argparse from read_input import * from model import * import tensorflow as tf import time def run_epoch(session, model, train_op, data, max_batches, args): ''' Run the model under given session for max_batches based on args :param model: model on which the operations take place :param session: session for tensorflow :param train_op: training output variable name, pass as tf.no_op() for validation and testing :param data: train, validation or testing data :param max_batches: maximum number of batches that can be called :param args: arguments provided by user in main :return: perplexity ''' # to run a session you need the list of tensors/graph nodes and the feed dict # for us its the cost, final_state, and optimizer # you feed in the (x,y) pairs, and you also propagate the state across the batches state = np.zeros((args.batch_size,model.lstm_layer.state_size)) tot_cost = 0.0 start_time = time.time() iters = 0 for i in range(max_batches): x, y = data.next() cur_cost, curr_state, _ = session.run([model.cost,model.final_state,train_op], feed_dict={model.input_layer: x, model.targets: y, model.initial_state: state}) tot_cost += cur_cost state = curr_state iters += args.batch_len if i % (max_batches//50) == 0: print 'iteration %.3f perplexity: %.3f speed: %.0f wps' %\ (i, np.exp(tot_cost/iters), iters*args.batch_size/(time.time()-start_time)) return np.exp(tot_cost/iters) # TODO: Add model saving and loading def main(): # parse arguments parser = argparse.ArgumentParser() parser.add_argument('--filename', type=str, default='./data/tinyshakespeare/input.txt', help='data location for all data') parser.add_argument('--split_ratio', type =list, default=[0.9,0.05,0.05], help='split ratio for train, validation and test') parser.add_argument('--batch_size', type=int, default=1, help='batch size for data') parser.add_argument('--batch_len', type=int, default=1, help='number of time steps to unroll') parser.add_argument('--cell', type=str, default='lstm', help='the cell type to use, currently only LSTM') parser.add_argument('--num_layers', type=int, default=1, help='depth of hidden units in the model') parser.add_argument('--hidden_units', type=int, default=32, help='number of hidden units in the cell') parser.add_argument('--num_epochs', type=int, default=50, help='max number of epochs to run the training') parser.add_argument('--lr_rate', type=float, default=2e-5, help='learning rate') parser.add_argument('--lr_decay', type=float, default=0.97, help='learning rate decay') parser.add_argument('--drop_prob', type=float, default=0, help='optimization function to be used') parser.add_argument('--grad_clip', type=float, default=5.0, help='clip gradients at this value') parser.add_argument('--stateful', type=bool, default=True, help='save at every batches') args = parser.parse_args() # load data if args.filename[-3:] == 'zip': data = load_zip_data(args.filename) elif args.filename[-3:] == 'txt': data = load_csv_file(args.filename) else: raise NotImplementedError("File extension not supported") train, val ,test = train_test_split(data, args.split_ratio) batch_train = BatchGenerator(train,args.batch_size,args.batch_len) batch_train.create_batches() max_batches_train = batch_train.epoch_size # New chars seen in test time will have a problem args.data_dim = batch_train.vocab_size batch_val = BatchGenerator(val,args.batch_size,args.batch_len) batch_val.create_batches() max_batches_val = batch_val.epoch_size batch_test = BatchGenerator(test,args.batch_size,args.batch_len) batch_test.create_batches() max_batches_test = batch_test.epoch_size print max_batches_train, max_batches_val, max_batches_test # Initialize session and graph with tf.Graph().as_default(), tf.Session() as session: initializer = tf.random_uniform_initializer(-0.1,0.1) with tf.variable_scope("model",reuse=None,initializer=initializer): train_model = Model(args, is_training=True, is_inference=False) with tf.variable_scope("model",reuse=True,initializer=initializer): val_model = Model(args, is_training=False, is_inference=False) test_model = Model(args, is_training=False, is_inference=False) tf.initialize_all_variables().run() <|fim▁hole|> for i in range(args.num_epochs): # TODO: Add parameter for max_max_epochs lr_decay = args.lr_decay ** max(i-10.0,0.0) train_model.assign_lr(session, args.lr_rate*lr_decay) # run a complete epoch and return appropriate variables train_perplexity = run_epoch(session, train_model, train_model.train_op, batch_train, max_batches_train, args) print 'Epoch %d, Train Perplexity: %.3f' %(i+1, train_perplexity) val_perplexity = run_epoch(session, val_model, tf.no_op(), batch_val, max_batches_val, args) print 'Epoch %d, Val Perplexity: %.3f' %(i+1, val_perplexity) test_perplexity = run_epoch(session, test_model, tf.no_op(), batch_test, max_batches_test, args) print 'Test Perplexity: %.3f' % test_perplexity if __name__ == "__main__": main()<|fim▁end|>
<|file_name|>static-methods-crate.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //<|fim▁hole|>// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[link(name = "static_methods_crate", vers = "0.1")]; #[crate_type = "lib"]; use std::int; pub trait read { fn readMaybe(s: ~str) -> Option<Self>; } impl read for int { fn readMaybe(s: ~str) -> Option<int> { from_str::<int>(s) } } impl read for bool { fn readMaybe(s: ~str) -> Option<bool> { match s { ~"true" => Some(true), ~"false" => Some(false), _ => None } } } pub fn read<T:read>(s: ~str) -> T { match read::readMaybe(s) { Some(x) => x, _ => fail2!("read failed!") } }<|fim▁end|>
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<|file_name|>source.js<|end_file_name|><|fim▁begin|>'use strict'; module.exports = Source; const inherits = require('util').inherits; const Stream = require('../stream'); const Chunk = require('../chunk'); const Compose = require('../through/compose'); const Break = require('../through/break'); const Filter = require('../through/filter'); const Map = require('../through/map'); const Take = require('../through/take'); const Each = require('../feed/each'); const Value = require('../feed/value'); module.exports = Source; inherits(Source, Stream); function Source(){ Stream.call(this); this.async = false; } Source.prototype.iterator = function iterator() { return new this.constructor.Iterator(this); }; Source.prototype.pipe = function pipe(feed) { return feed.feed([this]); }; Source.prototype.break = function breake(fn, async){ return this.pipe(new Break(fn, async)); };<|fim▁hole|>}; Source.prototype.map = function map(fn, async){ return this.pipe(new Map(fn, async)); }; Source.prototype.take = function take(max){ return this.pipe(new Take(max)); }; Source.prototype.each = function each(fn){ return this.pipe(new Each(fn)); }; Source.prototype.value = function value(async){ return this.pipe(new Value(async)); };<|fim▁end|>
Source.prototype.filter = function filter(fn, async){ return this.pipe(new Filter(fn, async));
<|file_name|>ixio.py<|end_file_name|><|fim▁begin|>import logging import re from datetime import datetime from typing import List, Any, Dict, Union, Pattern from pastehunter.common import base62_decode, base62_encode from pastehunter.inputs.base_input import BasePasteSite logger = logging.getLogger('pastehunter') class IxDotIoSite(BasePasteSite): # Yeah, yeah, I know, no regex for HTML parsing... # If we end up doing a lot more of this, then maybe we'll use beautifulsoup or something. # Capturing groups: # 1. Paste ID # 2. Timestamp _ITEM_ID_RE: Pattern = re.compile('<div class="t">[\\sa-zA-Z0-9]+' '<a href="/(.*?)">\\[r][^\r\n]+' '\\s+@ (.*?)[\r\n]') def __init__(self, conf): self.conf = conf self.site = "ix.io" url_main = "http://" + self.site self.url_recent = url_main + "/user/" self.view_pattern = url_main + "/{}/" self.raw_pattern = url_main + "/{}" self.url = None def remap_raw_item(self, raw_item: [str, Dict]) -> Dict[str, Any]: pid = raw_item['pid'] paste_data = { # at a 'filename': str(pid), 'confname': 'ixio', 'pastesite': self.site, 'pasteid': pid, } # Timezone is UTC/Zulu date = datetime.strptime(raw_item['date'], '%a %b %d %H:%M:%S %Y').isoformat() paste_data['@timestamp'] = date encoded_pid = self.get_paste_id(paste_data) paste_data['scrape_url'] = self.raw_pattern.format(encoded_pid) return paste_data def get_paste_for_id(self, paste_id: Any) -> str: self.make_request(self.raw_pattern.format(paste_id)) def get_paste_id(self, paste_obj: Dict[str, Any]) -> str: decoded = paste_obj.get('pasteid') return base62_encode(decoded) def get_recent_items(self, input_history: List[str]): history = [] paste_list = [] try: recent_page = self.make_request(self.url_recent) item_data = self.get_data_for_page(recent_page.text) for val in item_data: # Track paste ids to prevent dupes pid = val['pid'] history.append(pid) if pid in input_history: continue paste_data = self.remap_raw_item(val) paste_list.append(paste_data) return paste_list, history except Exception as e: logger.error("Unable to parse ixio items: {0}".format(e)) return paste_list, history def get_data_for_page(self, page_data: str) -> List[Dict[str, Union[int, str]]]: page: List[Dict[str, Union[int, str]]] = [] last_item_id = -1 regex_matches = self._ITEM_ID_RE.findall(page_data) # We are going to reverse the order because ix pages are structured newest -> oldest, and this makes it simpler. regex_matches.reverse() for encoded_id, created_at in regex_matches: # Okay so the logic here is a bit tricky. Basically, ix's all user page only returns anonymous pastes # BUT! We can infer the paste ids that aren't present by filling in the blanks, because ix IDs are # incremental. So first, we base62 decode the value so we can use it as an int item_id = base62_decode(encoded_id) # Then, we check if we've seen another value. If this is our first, we can skip a lot of this logic. # (we probably don't want to go back and grab every ix paste historically for most use cases) if last_item_id == -1: page.append({'pid': item_id, 'date': created_at}) last_item_id = item_id # If there has been a delta, let's traverse it. elif item_id - last_item_id > 1: # We've already hit last_item_id so we skip that and fill in the delta for i in range(last_item_id + 1, item_id + 1): # Copy the created date as a best guess page.append({'pid': i, 'date': created_at}) last_item_id = item_id else: # If there's no delta, just add this nromally<|fim▁hole|> return page def recent_pastes(conf, input_history): site = IxDotIoSite(conf) # populate vars from config return site.get_recent_items(input_history)<|fim▁end|>
page.append({'pid': item_id, 'date': created_at}) last_item_id = item_id
<|file_name|>action_handler.js<|end_file_name|><|fim▁begin|>/** @module ember @submodule ember-runtime */ import Ember from 'ember-metal/core'; import { Mixin } from 'ember-metal/mixin'; import { get } from 'ember-metal/property_get'; import { deprecateProperty } from 'ember-metal/deprecate_property'; /** `Ember.ActionHandler` is available on some familiar classes including `Ember.Route`, `Ember.View`, `Ember.Component`, and `Ember.Controller`. (Internally the mixin is used by `Ember.CoreView`, `Ember.ControllerMixin`, and `Ember.Route` and available to the above classes through inheritance.) @class ActionHandler @namespace Ember @private */ var ActionHandler = Mixin.create({ mergedProperties: ['actions'], /** The collection of functions, keyed by name, available on this `ActionHandler` as action targets. These functions will be invoked when a matching `{{action}}` is triggered from within a template and the application's current route is this route. Actions can also be invoked from other parts of your application via `ActionHandler#send`. The `actions` hash will inherit action handlers from the `actions` hash defined on extended parent classes or mixins rather than just replace the entire hash, e.g.: ```js App.CanDisplayBanner = Ember.Mixin.create({ actions: { displayBanner: function(msg) { // ... } } }); App.WelcomeRoute = Ember.Route.extend(App.CanDisplayBanner, { actions: { playMusic: function() { // ... } } }); // `WelcomeRoute`, when active, will be able to respond // to both actions, since the actions hash is merged rather // then replaced when extending mixins / parent classes. this.send('displayBanner'); this.send('playMusic'); ``` Within a Controller, Route, View or Component's action handler, the value of the `this` context is the Controller, Route, View or Component object: ```js App.SongRoute = Ember.Route.extend({ actions: { myAction: function() { this.controllerFor("song"); this.transitionTo("other.route"); ... } } }); ``` It is also possible to call `this._super.apply(this, arguments)` from within an action handler if it overrides a handler defined on a parent class or mixin: Take for example the following routes: ```js App.DebugRoute = Ember.Mixin.create({ actions: { debugRouteInformation: function() { console.debug("trololo"); } } }); App.AnnoyingDebugRoute = Ember.Route.extend(App.DebugRoute, { actions: { debugRouteInformation: function() { // also call the debugRouteInformation of mixed in App.DebugRoute this._super.apply(this, arguments); // show additional annoyance window.alert(...); } } }); ``` ## Bubbling By default, an action will stop bubbling once a handler defined on the `actions` hash handles it. To continue bubbling the action, you must return `true` from the handler: ```js App.Router.map(function() { this.route("album", function() { this.route("song"); }); }); App.AlbumRoute = Ember.Route.extend({ actions: { startPlaying: function() { } } }); App.AlbumSongRoute = Ember.Route.extend({ actions: { startPlaying: function() { // ... if (actionShouldAlsoBeTriggeredOnParentRoute) { return true; } } } }); ``` @property actions @type Object @default null @public */ /** Triggers a named action on the `ActionHandler`. Any parameters supplied after the `actionName` string will be passed as arguments to the action target function. If the `ActionHandler` has its `target` property set, actions may bubble to the `target`. Bubbling happens when an `actionName` can not be found in the `ActionHandler`'s `actions` hash or if the action target function returns `true`. Example ```js App.WelcomeRoute = Ember.Route.extend({ actions: { playTheme: function() { this.send('playMusic', 'theme.mp3'); }, playMusic: function(track) { // ...<|fim▁hole|> }); ``` @method send @param {String} actionName The action to trigger @param {*} context a context to send with the action @public */ send(actionName, ...args) { var target; if (this.actions && this.actions[actionName]) { var shouldBubble = this.actions[actionName].apply(this, args) === true; if (!shouldBubble) { return; } } if (target = get(this, 'target')) { Ember.assert('The `target` for ' + this + ' (' + target + ') does not have a `send` method', typeof target.send === 'function'); target.send(...arguments); } } }); export default ActionHandler; export function deprecateUnderscoreActions(factory) { deprecateProperty(factory.prototype, '_actions', 'actions', { id: 'ember-runtime.action-handler-_actions', until: '3.0.0' }); }<|fim▁end|>
} }
<|file_name|>utility-classes.js<|end_file_name|><|fim▁begin|>(function webpackUniversalModuleDefinition(root, factory) { if(typeof exports === 'object' && typeof module === 'object') module.exports = factory(); else if(typeof define === 'function' && define.amd) define([], factory); else { var a = factory(); for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i]; } })(this, function() { return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) /******/ return installedModules[moduleId].exports; /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ exports: {}, /******/ id: moduleId, /******/ loaded: false /******/ }; /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ // Flag the module as loaded /******/ module.loaded = true; /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; /******/ // Load entry module and return exports /******/ return __webpack_require__(0); /******/ })<|fim▁hole|>/***/ 0: /***/ function(module, exports, __webpack_require__) { __webpack_require__(228); /***/ }, /***/ 228: /***/ function(module, exports, __webpack_require__) { module.exports = __webpack_require__(229); /***/ }, /***/ 229: /***/ function(module, exports, __webpack_require__) { /** * @name Utility Classes * @collection core * @example-file ./examples.html */ __webpack_require__(230); /***/ }, /***/ 230: /***/ function(module, exports) { // removed by extract-text-webpack-plugin /***/ } /******/ }) }); ;<|fim▁end|>
/************************************************************************/ /******/ ({
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>"""Unittests that do not require the server to be running an common tests of responses. The TestCase here just calls the functions that provide the logic to the ws views with DummyRequest objects to mock a real request. The functions starting with `check_...` are called with UnitTest.TestCase instance as the first arg and the response. These functions are used within the unit tests in this file, but also in the `ws-tests` calls that perform the tests through http. """ import os import unittest from pyramid import testing from phylesystem_api.utility import fill_app_settings, umbrella_from_request from phylesystem_api.views import import_nexson_from_crossref_metadata def get_app_settings_for_testing(settings): """Fills the settings of a DummyRequest, with info from the development.ini This allows the dummy requests to mock a real request wrt configuration-dependent settings.""" from peyotl.utility.imports import SafeConfigParser cfg = SafeConfigParser() devini_path = os.path.abspath(os.path.join('..', 'development.ini')) if not os.path.isfile(devini_path): raise RuntimeError('Expecting a INI file at "{}" to run tests'.format(devini_path)) cfg.read(devini_path) settings['repo_parent'] = cfg.get('app:main', 'repo_parent') fill_app_settings(settings=settings) def gen_versioned_dummy_request(): """Adds a version number (3) to the request to mimic the matching based on URL in the real app. """ req = testing.DummyRequest() get_app_settings_for_testing(req.registry.settings) req.matchdict['api_version'] = 'v3' return req def check_index_response(test_case, response): """Verifies the existene of expected keys in the response to an index call. 'documentation_url', 'description', and 'source_url' keys must be in the response. """ for k in ['documentation_url', 'description', 'source_url']: test_case.assertIn(k, response) def check_render_markdown_response(test_case, response): """Check of `response` to a `render_markdown` call.""" expected = '<p>hi from <a href="http://phylo.bio.ku.edu" target="_blank">' \ 'http://phylo.bio.ku.edu</a> and ' \ '<a href="https://github.com/orgs/OpenTreeOfLife/dashboard" target="_blank">' \ 'https://github.com/orgs/OpenTreeOfLife/dashboard</a></p>' test_case.assertEquals(response.body, expected) def check_study_list_and_config_response(test_case, sl_response, config_response, from_generic_config): """Checks of responses from study_list, config, and the generic config calls.""" nsis = sum([i['number of documents'] for i in config_response['shards']]) test_case.assertEquals(nsis, len(sl_response)) test_case.assertEquals(from_generic_config, config_response) def check_unmerged_response(test_case, ub): """Check of `ub` response from an `unmerged_branches` call""" test_case.assertTrue('master' not in ub) def check_config_response(test_case, cfg): """Check of `cfg` response from a `config` call""" test_case.assertSetEqual(set(cfg.keys()), {"initialization", "shards", "number_of_shards"}) def check_external_url_response(test_case, doc_id, resp): """Simple check of an `external_url` `resp` response for `doc_id`. `doc_id` and `url` fields of the response are checked.""" test_case.assertEquals(resp.get('doc_id'), doc_id) test_case.assertTrue(resp.get('url', '').endswith('{}.json'.format(doc_id))) def check_push_failure_response(test_case, resp): """Check of the `resp` response of a `push_failure` method call to verify it has the right keys. """ test_case.assertSetEqual(set(resp.keys()), {"doc_type", "errors", "pushes_succeeding"}) test_case.assertTrue(resp["pushes_succeeding"]) render_test_input = 'hi from <a href="http://phylo.bio.ku.edu" target="new">' \ 'http://phylo.bio.ku.edu</a> and ' \ 'https://github.com/orgs/OpenTreeOfLife/dashboard' class ViewTests(unittest.TestCase): """UnitTest of the functions that underlie the ws views.""" def setUp(self): """Calls pyramid testing.setUp""" self.config = testing.setUp() def tearDown(self): """Calls pyramid testing.tearDown""" testing.tearDown() def test_index(self): """Test of index view""" request = gen_versioned_dummy_request() from phylesystem_api.views import index check_index_response(self, index(request)) def test_render_markdown(self): """Test of render_markdown view""" request = testing.DummyRequest(post={'src': render_test_input}) from phylesystem_api.views import render_markdown check_render_markdown_response(self, render_markdown(request)) def test_study_list_and_config(self): """Test of study_list and phylesystem_config views""" request = gen_versioned_dummy_request() from phylesystem_api.views import study_list sl = study_list(request) request = gen_versioned_dummy_request() from phylesystem_api.views import phylesystem_config x = phylesystem_config(request) request = gen_versioned_dummy_request() request.matchdict['resource_type'] = 'study' from phylesystem_api.views import generic_config y = generic_config(request) check_study_list_and_config_response(self, sl, x, y) if not sl: return from phylesystem_api.views import external_url doc_id = sl[0] request.matchdict['doc_id'] = doc_id e = external_url(request) check_external_url_response(self, doc_id, e) def test_unmerged(self): """Test of unmerged_branches view""" request = gen_versioned_dummy_request() request.matchdict['resource_type'] = 'study' from phylesystem_api.views import unmerged_branches check_unmerged_response(self, unmerged_branches(request)) def test_config(self): """Test of generic_config view""" request = gen_versioned_dummy_request() from phylesystem_api.views import phylesystem_config, generic_config r2 = phylesystem_config(request) check_config_response(self, r2) request.matchdict['resource_type'] = 'study' r = generic_config(request) check_config_response(self, r) self.assertDictEqual(r, r2) request.matchdict['resource_type'] = 'amendment' ra = generic_config(request) check_config_response(self, ra) self.assertNotEqual(ra, r) def test_push_failure_state(self): """Test of push_failure view""" request = gen_versioned_dummy_request() request.matchdict['resource_type'] = 'collection' from phylesystem_api.views import push_failure<|fim▁hole|> def test_doi_import(self): """Make sure that fetching from DOI generates a valid study shell.""" doi = "10.3732/ajb.0800060" document = import_nexson_from_crossref_metadata(doi=doi, ref_string=None, include_cc0=None) request = gen_versioned_dummy_request() request.matchdict['resource_type'] = 'study' umbrella = umbrella_from_request(request) errors = umbrella.validate_and_convert_doc(document, {})[1] self.assertEquals(len(errors), 0) if __name__ == '__main__': unittest.main()<|fim▁end|>
pf = push_failure(request) check_push_failure_response(self, pf)