prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # scikit-learn documentation build configuration file, created by # sphinx-quickstart on Fri Jan 8 09:13:42 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import print_function import sys import os import warnings # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) from github_link import make_linkcode_resolve import sphinx_gallery import chainladder as cl # this is needed for some reason... # see https://github.com/numpy/numpydoc/issues/69 # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'numpydoc', 'sphinx_gallery.gen_gallery', 'sphinx.ext.githubpages', 'nbsphinx', 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', 'sphinx_gallery.load_style', 'IPython.sphinxext.ipython_console_highlighting'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # this is needed for some reason... # see https://github.com/numpy/numpydoc/issues/69 numpydoc_class_members_toctree = False # For maths, use mathjax by default and svg if NO_MATHJAX env variable is set # (useful for viewing the doc offline) if os.environ.get('NO_MATHJAX'): extensions.append('sphinx.ext.imgmath') imgmath_image_format = 'svg' else: extensions.append('sphinx.ext.mathjax') mathjax_path = ('https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/' 'MathJax.js?config=TeX-AMS_SVG') autodoc_default_flags = ['members', 'inherited-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # generate autosummary even if no references autosummary_generate = True # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'ChainLadder' copyright = '2017, John Bogaardt' author = 'John Bogaardt' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = cl.__version__ # The full version, including alpha/beta/rc tags. release = cl.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #<|fim▁hole|> # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'templates', 'includes', 'themes', '**.ipynb_checkpoints'] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = 'any' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'scikit-learn' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'oldversion': False, 'collapsiblesidebar': True, 'google_analytics': False, 'surveybanner': False, 'sprintbanner': False} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['themes'] # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'chainladder' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static/images'] # -- Options for HTMLHelp output ------------------------------------------ # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # Output file base name for HTML help builder. htmlhelp_basename = 'chainladderdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. 'preamble': r""" \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm} \usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10} """ } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [('index', 'user_guide.tex', 'scikit-learn user guide', 'scikit-learn developers', 'manual'), ] # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False trim_doctests_flags = True # intersphinx configuration intersphinx_mapping = { 'python': ('https://docs.python.org/{.major}'.format( sys.version_info), None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None), 'matplotlib': ('https://matplotlib.org/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'joblib': ('https://joblib.readthedocs.io/en/latest/', None), } sphinx_gallery_conf = { 'doc_module': 'chainladder', 'backreferences_dir': os.path.join('modules', 'generated'), 'reference_url': {'chainladder': None}, 'capture_repr': () } # The following dictionary contains the information used to create the # thumbnails for the front page of the scikit-learn home page. # key: first image in set # values: (number of plot in set, height of thumbnail) carousel_thumbs = {'sphx_glr_plot_asvanced_triangle_001.png': 600, 'sphx_glr_plot_ave_analysis_001.png': 372, } def make_carousel_thumbs(app, exception): """produces the final resized carousel images""" if exception is not None: return print('Preparing carousel images') image_dir = os.path.join(app.builder.outdir, '_images') for glr_plot, max_width in carousel_thumbs.items(): image = os.path.join(image_dir, glr_plot) if os.path.exists(image): c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png') sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190) # Config for sphinx_issues issues_uri = 'https://github.com/casact/chainladder-python/issues/{issue}' issues_github_path = 'chainladder-python/chainladder' issues_user_uri = 'https://github.com/{user}' def setup(app): # to hide/show the prompt in code examples: app.add_js_file('js/copybutton.js') app.add_js_file('js/extra.js') app.connect('build-finished', make_carousel_thumbs) # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve('chainladder', u'https://github.com/casact/' 'chainladder-python/blob/{revision}/' '{package}/{path}#L{lineno}') warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib", message='Matplotlib is currently using agg, which is a' ' non-GUI backend, so cannot show the figure.')<|fim▁end|>
# This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. #language = None
<|file_name|>OutdatedTaskConfiguration.java<|end_file_name|><|fim▁begin|>package com.liferay.micro.maintainance.configuration; import aQute.bnd.annotation.metatype.Meta; /** * @author Rimi Saadou * @author Laszlo Hudak */ @Meta.OCD( id = "com.liferay.micro.maintainance.configuration.OutdatedTaskConfiguration" ) public interface OutdatedTaskConfiguration { @Meta.AD(deflt = "14", required = false) public int votingPeriodDays(); @Meta.AD(deflt = "5", required = false) public int requiredVotingPercentage(); @Meta.AD(deflt = "5", required = false) public int requiredYesVotesPercentage();<|fim▁hole|><|fim▁end|>
}
<|file_name|>generate-chains.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Copyright (c) 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Valid certificate chain where the target certificate contains a public key with a 512-bit modulus (weak).""" import sys sys.path += ['../..'] import gencerts # Self-signed root certificate. root = gencerts.create_self_signed_root_certificate('Root')<|fim▁hole|> # Target certificate. target = gencerts.create_end_entity_certificate('Target', intermediate) target.set_key(gencerts.get_or_generate_rsa_key( 512, gencerts.create_key_path(target.name))) chain = [target, intermediate, root] gencerts.write_chain(__doc__, chain, 'chain.pem')<|fim▁end|>
# Intermediate intermediate = gencerts.create_intermediate_certificate('Intermediate', root)
<|file_name|>model_change.js<|end_file_name|><|fim▁begin|>Ext.define('User', {<|fim▁hole|> extend: 'Ext.data.Model', fields: [ {name: 'name', type: 'string', convert: function(value,record) { return record.get('name')+' the barbarian'); } }, {name: 'age', type: 'int'}, {name: 'phone', type: 'string'}, {name: 'alive', type: 'boolean', defaultValue: true} ], });<|fim▁end|>
<|file_name|>borrowck-freeze-frozen-mut.rs<|end_file_name|><|fim▁begin|>// run-pass // Test that a `&mut` inside of an `&` is freezable. struct MutSlice<'a, T:'a> { data: &'a mut [T] } fn get<'a, T>(ms: &'a MutSlice<'a, T>, index: usize) -> &'a T { &ms.data[index] } pub fn main() { let mut data = [1, 2, 3]; { let slice = MutSlice { data: &mut data }; slice.data[0] += 4; let index0 = get(&slice, 0);<|fim▁hole|> assert_eq!(*index2, 3); } assert_eq!(data[0], 5); assert_eq!(data[1], 2); assert_eq!(data[2], 3); }<|fim▁end|>
let index1 = get(&slice, 1); let index2 = get(&slice, 2); assert_eq!(*index0, 5); assert_eq!(*index1, 2);
<|file_name|>case-insensitive.test.js<|end_file_name|><|fim▁begin|>'use strict' const t = require('tap') const test = t.test const FindMyWay = require('../') test('case insensitive static routes of level 1', t => { t.plan(1) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/woo', (req, res, params) => { t.pass('we should be here') }) findMyWay.lookup({ method: 'GET', url: '/WOO', headers: {} }, null) }) test('case insensitive static routes of level 2', t => { t.plan(1) const findMyWay = FindMyWay({<|fim▁hole|> t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/woo', (req, res, params) => { t.pass('we should be here') }) findMyWay.lookup({ method: 'GET', url: '/FoO/WOO', headers: {} }, null) }) test('case insensitive static routes of level 3', t => { t.plan(1) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/bar/woo', (req, res, params) => { t.pass('we should be here') }) findMyWay.lookup({ method: 'GET', url: '/Foo/bAR/WoO', headers: {} }, null) }) test('parametric case insensitive', t => { t.plan(1) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/:param', (req, res, params) => { t.equal(params.param, 'bAR') }) findMyWay.lookup({ method: 'GET', url: '/Foo/bAR', headers: {} }, null) }) test('parametric case insensitive with a static part', t => { t.plan(1) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/my-:param', (req, res, params) => { t.equal(params.param, 'bAR') }) findMyWay.lookup({ method: 'GET', url: '/Foo/MY-bAR', headers: {} }, null) }) test('parametric case insensitive with capital letter', t => { t.plan(1) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/:Param', (req, res, params) => { t.equal(params.Param, 'bAR') }) findMyWay.lookup({ method: 'GET', url: '/Foo/bAR', headers: {} }, null) }) test('case insensitive with capital letter in static path with param', t => { t.plan(1) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/Foo/bar/:param', (req, res, params) => { t.equal(params.param, 'baZ') }) findMyWay.lookup({ method: 'GET', url: '/foo/bar/baZ', headers: {} }, null) }) test('case insensitive with multiple paths containing capital letter in static path with param', t => { /* * This is a reproduction of the issue documented at * https://github.com/delvedor/find-my-way/issues/96. */ t.plan(2) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/Foo/bar/:param', (req, res, params) => { t.equal(params.param, 'baZ') }) findMyWay.on('GET', '/Foo/baz/:param', (req, res, params) => { t.equal(params.param, 'baR') }) findMyWay.lookup({ method: 'GET', url: '/foo/bar/baZ', headers: {} }, null) findMyWay.lookup({ method: 'GET', url: '/foo/baz/baR', headers: {} }, null) }) test('case insensitive with multiple mixed-case params within same slash couple', t => { t.plan(2) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/:param1-:param2', (req, res, params) => { t.equal(params.param1, 'My') t.equal(params.param2, 'bAR') }) findMyWay.lookup({ method: 'GET', url: '/FOO/My-bAR', headers: {} }, null) }) test('case insensitive with multiple mixed-case params', t => { t.plan(2) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/:param1/:param2', (req, res, params) => { t.equal(params.param1, 'My') t.equal(params.param2, 'bAR') }) findMyWay.lookup({ method: 'GET', url: '/FOO/My/bAR', headers: {} }, null) }) test('case insensitive with wildcard', t => { t.plan(1) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('GET', '/foo/*', (req, res, params) => { t.equal(params['*'], 'baR') }) findMyWay.lookup({ method: 'GET', url: '/FOO/baR', headers: {} }, null) }) test('parametric case insensitive with multiple routes', t => { t.plan(6) const findMyWay = FindMyWay({ caseSensitive: false, defaultRoute: (req, res) => { t.fail('Should not be defaultRoute') } }) findMyWay.on('POST', '/foo/:param/Static/:userId/Save', (req, res, params) => { t.equal(params.param, 'bAR') t.equal(params.userId, 'one') }) findMyWay.on('POST', '/foo/:param/Static/:userId/Update', (req, res, params) => { t.equal(params.param, 'Bar') t.equal(params.userId, 'two') }) findMyWay.on('POST', '/foo/:param/Static/:userId/CANCEL', (req, res, params) => { t.equal(params.param, 'bAR') t.equal(params.userId, 'THREE') }) findMyWay.lookup({ method: 'POST', url: '/foo/bAR/static/one/SAVE', headers: {} }, null) findMyWay.lookup({ method: 'POST', url: '/fOO/Bar/Static/two/update', headers: {} }, null) findMyWay.lookup({ method: 'POST', url: '/Foo/bAR/STATIC/THREE/cAnCeL', headers: {} }, null) })<|fim▁end|>
caseSensitive: false, defaultRoute: (req, res) => {
<|file_name|>GUIdispatcher.py<|end_file_name|><|fim▁begin|>""" Graphical user interface functionalities for the SampleResource Aggregate Manager. @date: Jun 12, 2013 @author: CarolinaFernandez """ from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import get_object_or_404 from expedient.clearinghouse.aggregate.models import Aggregate from expedient.clearinghouse.slice.models import Slice from expedient.common.messaging.models import DatedMessage from expedient.common.utils.plugins.plugincommunicator import * from expedient.common.utils.plugins.resources.link import Link from expedient.common.utils.plugins.resources.node import Node from expedient.common.utils.views import generic_crud from sample_resource.controller.resource import SampleResource as SampleResourceController from sample_resource.forms.SampleResource import SampleResource as SampleResourceModelForm from sample_resource.models import SampleResource as SampleResourceModel,\ SampleResourceAggregate as SampleResourceAggregateModel import copy import logging import xmlrpclib def create_resource(request, slice_id, agg_id): """Show a page that allows user to add a SampleResource to the aggregate.""" if request.method == "POST": # Shows error message when aggregate unreachable, disable SampleResource creation and get back to slice detail page agg = Aggregate.objects.get(id = agg_id) if agg.check_status() == False: DatedMessage.objects.post_message_to_user( "SampleResource Aggregate '%s' is not available" % agg.name, request.user, msg_type=DatedMessage.TYPE_ERROR,) return HttpResponseRedirect(reverse("slice_detail", args=[slice_id])) if 'create_resource' in request.POST: return HttpResponseRedirect(reverse("sample_resource_resource_crud", args=[slice_id, agg_id])) else: return HttpResponseRedirect(reverse("slice_detail", args=[slice_id])) def resource_crud(request, slice_id, agg_id, resource_id = None): """ Show a page that allows user to create/edit SampleResource's to the Aggregate. """ slice = get_object_or_404(Slice, id = slice_id) aggregate = Aggregate.objects.get(id = agg_id) error_crud = "" def pre_save(instance, created): """ Fills SampleResource instance prior to its saving. Used within the scope of the generic_crud method. """ instance = SampleResourceController.fill(instance, slice, agg_id, resource_id) try: return generic_crud(request, obj_id=resource_id, model=SampleResourceModel, form_class=SampleResourceModelForm, template="sample_resource_resource_crud.html", redirect=lambda inst: reverse("slice_detail", args=[slice_id]), extra_context={"agg": aggregate, "slice": slice, "exception": error_crud, "breadcrumbs": ( ("Home", reverse("home")), ("Project %s" % slice.project.name, reverse("project_detail", args=[slice.project.id])), ("Slice %s" % slice.name, reverse("slice_detail", args=[slice_id])), ("%s SampleResource" % "Update" if resource_id else "Create", reverse("sample_resource_resource_crud", args=[slice_id, agg_id])),) }, extra_form_params={}, template_object_name="object", pre_save=pre_save, post_save=None, success_msg=None) except ValidationError as e: # Django exception message handling is different to Python's... error_crud = ";".join(e.messages) except Exception as e: print "[WARNING] Could not create resource in plugin 'sample_resource'. Details: %s" % str(e) DatedMessage.objects.post_message_to_user( "SampleResource might have been created, but some problem ocurred: %s" % str(e), request.user, msg_type=DatedMessage.TYPE_ERROR) return HttpResponseRedirect(reverse("slice_detail", args=[slice_id])) def manage_resource(request, resource_id, action_type): """ Manages the actions executed over SampleResource's. """ if action_type == "delete": SampleResourceController.delete(resource_id) # Go to manage resources again return HttpResponse("") ### # Topology to show in the Expedient # def get_sr_list(slice): return SampleResourceModel.objects.filter(slice_id = slice.uuid) def get_sr_aggregates(slice): sr_aggs = [] try: sr_aggs = slice.aggregates.filter(leaf_name=SampleResourceAggregateModel.__name__.lower()) except: pass return sr_aggs def get_node_description(node): description = "<strong>Sample Resource: " + node.name + "</strong><br/><br/>" description += "&#149; Temperature: %s (&#176;%s)" % (str(node.get_temperature()), str(node.get_temperature_scale())) connections = "" node_connections = node.get_connections() for i, connection in enumerate(node_connections): connections += connection.name if i < len(node_connections)-1: connections += ", " description += "<br/>&#149; Connected to: %s" % str(connections) return description def get_nodes_links(slice, chosen_group=None): nodes = [] links = [] sr_aggs = get_sr_aggregates(slice) # Getting image for the nodes # FIXME: avoid to ask the user for the complete name of the method here! he should NOT know it try: image_url = reverse('img_media_sample_resource', args=("sensor-tiny.png",)) except: image_url = 'sensor-tiny.png' # For every SampleResource AM for i, sr_agg in enumerate(sr_aggs): sr_agg = sr_agg.sampleresourceaggregate # Iterates over every SampleResource contained within the slice for sr in sr_agg.get_resources(): sr = sr.sampleresource<|fim▁hole|> name = sr.name, value = sr.id, aggregate = sr.aggregate, type = "Sample resource", description = get_node_description(sr), image = image_url) ) for connection in sr.get_connections(): # Two-ways link links.append( Link( target = str(sr.id), source = str(connection.id), value = "rsc_id_%s-rsc_id_%s" % (connection.id, sr.id) ), ) links.append( Link( target = str(sr.id), source = str(connection.id), value = "rsc_id_%s-rsc_id_%s" % (sr.id, connection.id) ), ) return [nodes, links] #from expedient.common.utils.plugins.plugininterface import PluginInterface # #class Plugin(PluginInterface): # @staticmethod def get_ui_data(slice): """ Hook method. Use this very same name so Expedient can get the resources for every plugin. """ ui_context = dict() try: ui_context['sr_list'] = get_sr_list(slice) ui_context['sr_aggs'] = get_sr_aggregates(slice) ui_context['nodes'], ui_context['links'] = get_nodes_links(slice) except Exception as e: print "[ERROR] Problem loading UI data for plugin 'sample_resource'. Details: %s" % str(e) return ui_context<|fim▁end|>
nodes.append(Node( # Users shall not be left the choice to choose group/island; otherwise collision may arise
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models from django.core.urlresolvers import reverse from django.conf import settings import misaka from groups.models import Group # Create your models here. # POSTS MODELS.PY from django.contrib.auth import get_user_model User = get_user_model() class Post(models.Model): user = models.ForeignKey(User, related_name='posts') created_at = models.DateTimeField(auto_now=True) message = models.TextField() message_html = models.TextField(editable=False) group = models.ForeignKey(Group, related_name='posts', null=True, blank=True) def __str__(self): return self.message def save(self, *args, **kwargs): self.message_html = misaka.html(self.message) super().save(*args, **kwargs) def get_absolute_url(self): return reverse('posts:single', kwargs={'username': self.user.username, 'pk': self.pk}) class Meta:<|fim▁hole|><|fim▁end|>
ordering = ['-created_at'] unique_together = ['user', 'message']
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use kaze::*; use rand::Rng; use structopt::StructOpt; /// This tool generates lock-style state machines that output an `unlocked` flag /// after receiving a specific sequence of input symbols. It can configurably /// generate the length of the unlock sequence, the width of the interface, and /// the probability of it inserting backtracking transitions. #[derive(StructOpt)] struct Options { /// The number of states between the initial and unlocked state. #[structopt(long, default_value = "32")] states: u32, /// The width of the registers and ports making up the lock. #[structopt(long, default_value = "32")] width: u32, } fn main() -> std::io::Result<()> { let options = Options::from_args(); let generator = Generator { states: options.states, width: options.width, }; let mut context = Context::new(); let lock = generator.generate(&mut context); verilog::generate(&lock, std::io::stdout()) } struct Generator { states: u32, width: u32, } const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 } fn log_2(x: u32) -> u32 { num_bits::<u32>() as u32 - x.leading_zeros() } impl Generator { fn generate<'a>(&'a self, c: &'a mut Context<'a>) -> &'a Module { let mut rng = rand::thread_rng(); // compute width of state register let state_reg_width = log_2(self.states - 1u32); // create lock module with a single state register and trigger input let lock = c.module("lock"); let input = lock.input("code", self.width); let state = lock.reg("state", state_reg_width); state.default_value(0u32); // define lock state transitions let mut next = state.value; for i in 0..(self.states - 1u32) { let trigger_value = rng.gen_range(1u64, 2u64.pow(self.width)); let from = lock.lit(i, state_reg_width); let to = lock.lit(i + 1u32, state_reg_width); let trigger = lock.lit(trigger_value, self.width);<|fim▁hole|> next = (state.value.eq(from) & input.eq(trigger)).mux(to, next); } state.drive_next(next); // define lock outputs lock.output("unlocked", state.value.eq(lock.lit(self.states - 1u32, state_reg_width))); lock.output("state", state.value); // return HDL lock } }<|fim▁end|>
<|file_name|>JavaElementRequestor.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 2000, 2013 IBM Corporation and others. * * This program and the accompanying materials * are made available under the terms of the Eclipse Public License 2.0 * which accompanies this distribution, and is available at * https://www.eclipse.org/legal/epl-2.0/ * * SPDX-License-Identifier: EPL-2.0 * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.jdt.internal.core; import java.util.ArrayList; import org.eclipse.jdt.core.IField; import org.eclipse.jdt.core.IInitializer; import org.eclipse.jdt.core.IMethod; import org.eclipse.jdt.core.IModuleDescription; import org.eclipse.jdt.core.IPackageFragment; import org.eclipse.jdt.core.IType; /** * @see IJavaElementRequestor */ @SuppressWarnings({ "rawtypes", "unchecked" }) public class JavaElementRequestor implements IJavaElementRequestor { /** * True if this requestor no longer wants to receive * results from its <code>IRequestorNameLookup</code>. */ protected boolean canceled= false; /** * A collection of the resulting fields, or <code>null</code> * if no field results have been received. */ protected ArrayList fields= null; /** * A collection of the resulting initializers, or <code>null</code> * if no initializer results have been received. */ protected ArrayList initializers= null; /** * A collection of the resulting member types, or <code>null</code> * if no member type results have been received. */ protected ArrayList memberTypes= null; /** * A collection of the resulting methods, or <code>null</code> * if no method results have been received. */ protected ArrayList methods= null; /** * A collection of the resulting package fragments, or <code>null</code> * if no package fragment results have been received. */ protected ArrayList packageFragments= null; /** * A collection of the resulting types, or <code>null</code> * if no type results have been received. */ protected ArrayList types= null; /** * A collection of the resulting modules, or <code>null</code> * if no module results have been received */ protected ArrayList<IModuleDescription> modules = null; /** * Empty arrays used for efficiency */ protected static final IField[] EMPTY_FIELD_ARRAY= new IField[0]; protected static final IInitializer[] EMPTY_INITIALIZER_ARRAY= new IInitializer[0]; protected static final IType[] EMPTY_TYPE_ARRAY= new IType[0]; protected static final IPackageFragment[] EMPTY_PACKAGE_FRAGMENT_ARRAY= new IPackageFragment[0]; protected static final IMethod[] EMPTY_METHOD_ARRAY= new IMethod[0]; protected static final IModuleDescription[] EMPTY_MODULE_ARRAY= new IModuleDescription[0]; /** * @see IJavaElementRequestor */ @Override public void acceptField(IField field) { if (this.fields == null) { this.fields= new ArrayList(); } this.fields.add(field); } /** * @see IJavaElementRequestor */ @Override public void acceptInitializer(IInitializer initializer) { if (this.initializers == null) { this.initializers= new ArrayList(); } this.initializers.add(initializer); } /** * @see IJavaElementRequestor */ @Override public void acceptMemberType(IType type) { if (this.memberTypes == null) { this.memberTypes= new ArrayList(); } this.memberTypes.add(type); } /** * @see IJavaElementRequestor */ @Override public void acceptMethod(IMethod method) { if (this.methods == null) { this.methods = new ArrayList(); } this.methods.add(method); } /** * @see IJavaElementRequestor */ @Override public void acceptPackageFragment(IPackageFragment packageFragment) { if (this.packageFragments== null) { this.packageFragments= new ArrayList(); } this.packageFragments.add(packageFragment); } /** * @see IJavaElementRequestor */ @Override public void acceptType(IType type) { if (this.types == null) { this.types= new ArrayList(); } this.types.add(type); } /** * @see IJavaElementRequestor */ @Override public void acceptModule(IModuleDescription module) { if (this.modules == null) { this.modules= new ArrayList(); } this.modules.add(module); } /** * @see IJavaElementRequestor */ public IField[] getFields() { if (this.fields == null) { return EMPTY_FIELD_ARRAY; } int size = this.fields.size(); IField[] results = new IField[size]; this.fields.toArray(results); return results; } /** * @see IJavaElementRequestor */ public IInitializer[] getInitializers() { if (this.initializers == null) { return EMPTY_INITIALIZER_ARRAY; } int size = this.initializers.size(); IInitializer[] results = new IInitializer[size]; this.initializers.toArray(results); return results; } /** * @see IJavaElementRequestor */ public IType[] getMemberTypes() { if (this.memberTypes == null) { return EMPTY_TYPE_ARRAY; } int size = this.memberTypes.size(); IType[] results = new IType[size]; this.memberTypes.toArray(results); return results; } /** * @see IJavaElementRequestor */ public IMethod[] getMethods() { if (this.methods == null) { return EMPTY_METHOD_ARRAY; } int size = this.methods.size(); IMethod[] results = new IMethod[size]; this.methods.toArray(results); return results; } /** * @see IJavaElementRequestor */ public IPackageFragment[] getPackageFragments() { if (this.packageFragments== null) { return EMPTY_PACKAGE_FRAGMENT_ARRAY; } int size = this.packageFragments.size(); IPackageFragment[] results = new IPackageFragment[size]; this.packageFragments.toArray(results); return results; } /** * @see IJavaElementRequestor */ public IType[] getTypes() { if (this.types== null) { return EMPTY_TYPE_ARRAY; } int size = this.types.size(); IType[] results = new IType[size];<|fim▁hole|> return results; } /** * @see IJavaElementRequestor */ public IModuleDescription[] getModules() { if (this.modules == null) { return EMPTY_MODULE_ARRAY; } int size = this.modules.size(); IModuleDescription[] results = new IModuleDescription[size]; this.modules.toArray(results); return results; } /** * @see IJavaElementRequestor */ @Override public boolean isCanceled() { return this.canceled; } /** * Reset the state of this requestor. */ public void reset() { this.canceled = false; this.fields = null; this.initializers = null; this.memberTypes = null; this.methods = null; this.packageFragments = null; this.types = null; } /** * Sets the #isCanceled state of this requestor to true or false. */ public void setCanceled(boolean b) { this.canceled= b; } }<|fim▁end|>
this.types.toArray(results);
<|file_name|>syncSchemaDocs.js<|end_file_name|><|fim▁begin|>// run `node syncSchemaDocs.js` from the lib folder to update/add the schema docs that we specify here // note the relative path to nano module from where this doc is stored. // currently this script requires the context docs to already exist in couchdb // if creating/defining a brand new schema doc - use futon or curl to create an otherwise empty doc with the needed "_id" first. var nano = require('../node_modules/nano')('http://127.0.0.1:5984'); var db = nano.use('patterns'); // specifies the blank json to be sent when GETing /patterns/new var newPatternSchema = { "_id": "patternSchema", "doctype": "schema", "int_id": null, "name": "", "pic": {"b64encoding": "", "filename": "" }, "author": [{ "ORCID": "", "name": "" }], "context": "", "problem": "", "force": [{ "name": "", "description": "", "pic": { "b64encoding": "", "filename": "" } } ], "solution": "", "rationale": "", "diagram": { "b64encoding": "", "filename": "" }, "evidence": [ {} ], }; var alpaca = { "_id": "alpaca", "schema": { "title": "Create a new Pattern!", "type": "object", "properties": { "title": { "type": "string", "title": "Title" }, "image": { "type": "string", "title": "Select an image to upload..." } } } }; //schema for vaidating POST to new or PUT to /prototype var validationSchema = { "_id": "newPatternValidationSchema", "doctype": "schema", "$schema": "http://json-schema.org/schema#", "title": "New pattern validation schema", "type": "object", "items": { "type": "object", "properties": { "doctype":{ "type": "string" },<|fim▁hole|> "name": { "type": "string" }, "pic": { "type": "object" }, "author": { "type": "array" }, "context": { "type": "string" }, "problem": { "type": "string" }, "force": { "type": "array" }, "solution": { "type": "string" }, "rationale": { "type": "string" }, "diagram": { "type": "object" }, "evidence": { "type": "array" } }, "required": ["doctype", "name", "pic", "author", "context", "problem", "force", "solution", "rationale", "diagram", "evidence"] } }; var schemaDocs = [newPatternSchema, validationSchema, alpaca]; //note this function uses an Immediately Invoked Function expression to // allow async call-back funtions to close properly within the // for loop. see // http://stackoverflow.com/questions/750486/javascript-closure-inside-loops-simple-practical-example/19323214#19323214 // http://learn.jquery.com/javascript-101/functions/#immediately-invoked-function-expression-iife // http://en.wikipedia.org/wiki/Immediately-invoked_function_expression function syncDocs() { for (var x = 0; x < schemaDocs.length; x++) { //IIFE - anon function will be called on each iteration of the for loop // we pass in the value of for loop x as index within the anon funct (function(index){ //we copy the contents of the JSON objects specified above into the temp var doc here var doc = JSON.parse(JSON.stringify(schemaDocs[index])); //retreive the doc from couch db db.get(doc['_id'], function(err, body){ if(!err){ //if OK, set/create temp doc "_rev" field to match current db rev doc['_rev'] = body['_rev']; //write the doc db.insert(doc, function(err, body){ console.log(body); }) } else{ // if the db.get fails console.log(err); } //console.log("doc id is "+doc['_id']+" and doc rev is set to "+doc['_rev']); }) })(x); // we send the for loop iterator x to the (IIFE) anon function above, where it is defined as 'index' // see IIFE links above } } syncDocs();<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>#[cfg(any(target_os = "linux", target_os = "android"))] pub mod epoll; #[cfg(any(target_os = "macos", target_os = "ios", target_os = "freebsd", target_os = "openbsd"))] pub mod event; // TODO: switch from feature flags to conditional builds #[cfg(feature = "eventfd")] pub mod eventfd; #[cfg(not(any(target_os = "ios", target_os = "freebsd")))] pub mod ioctl; pub mod signal;<|fim▁hole|>pub mod stat; #[cfg(any(target_os = "linux", target_os = "android"))] pub mod syscall; #[cfg(not(target_os = "ios"))] pub mod termios; #[cfg(any(target_os = "linux", target_os = "android"))] pub mod utsname; pub mod wait; pub mod mman; pub mod uio; pub mod time;<|fim▁end|>
pub mod socket;
<|file_name|>QAInfo.java<|end_file_name|><|fim▁begin|>/* $Id: QAInfo.java,v 1.6 2007/12/04 13:22:01 mke Exp $ * $Revision: 1.6 $ * $Date: 2007/12/04 13:22:01 $ * $Author: mke $ * * The SB Util Library. * Copyright (C) 2005-2007 The State and University Library of Denmark * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details.<|fim▁hole|> */ package dk.statsbiblioteket.util.qa; import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; /** * Annotation containing all information relevant to extracting QA reports. */ @Documented @Retention(RetentionPolicy.RUNTIME) public @interface QAInfo { /** * Java doc needed. */ String JAVADOCS_NEEDED = "Javadocs needed"; /** * Code not finished. */ String UNFINISHED_CODE = "Unfinished code"; /** * Code isn't working properly. */ String FAULTY_CODE = "Faulty code"; /** * Code is messy. */ String MESSY_CODE = "Messy code"; /** * Enumeration describing the state of the QA process this class, method, * or field is in. */ public enum State { /** * Default state. Never use this manually. */ UNDEFINED, /** * No review should be performed. This is normally used when code is * under active development. */ IN_DEVELOPMENT, /** * The code should be reviewed and unit tests performed. */ QA_NEEDED, /** * Reviews and unit tests has been made and passed for this code. * The code is judged to be satisfiable. This annotation should be * changed as soon as the code is changed again. */ QA_OK } /** * Enumeration describing the possible QA levels a class, method, or field * can have. */ public enum Level { /** * Default level. Never use this manually. */ UNDEFINED, /** * The code is of utmost importance and should be thoroughly reviewed * and unit tested. */ PEDANTIC, /** * The code is important or complex and extra care should be taken when * reviewing and unit testing. */ FINE, /** * The code is standard and should be reviewed and unit tested * normally. */ NORMAL, /** * The code does not need reviewing or unit testing. */ NOT_NEEDED } /** * A free form string naming the author. For clarity use the same author * format as in {@link #reviewers}. * It is suggested to use the {@code Author} keyword for CVS controlled * code. * This annotation should name the primary responsibly party for this * piece of code. In most cases it will be the original author of the * document, but if the file receives heavy editing by other parties, they * may end up being more appropriate for the listed author. * @return the author. */ String author() default ""; /** * The current revision of the annotated element. Mostly for use on classes. * It is suggested to use the CVS {@code Id} keyword for CVS controlled * repositories. * @return the revision. */ String revision() default ""; /** * Free form string describing the deadline. * @return the deadline. */ String deadline() default ""; /** * Developers responsible for reviewing this class or method. * Fx <code>{"mke", "te"}</code> - use same convention as * {@link #author}. * It is advised to keep a list of all reviewers here, with the last * one in the list being the last person to review the code. This way it * will be easy to construct a simple audit trail for the code. * @return a list of reviewers. */ String[] reviewers() default {}; // Note use of array /** * A freeform comment that can be included in QA reports. * @return the comment. */ String comment() default ""; /** * The {@link Level} of the annotated element. * @return the severity level. */ Level level() default Level.UNDEFINED; /** * The {@link State} of the annotated element. * @return the state. */ State state() default State.UNDEFINED; }<|fim▁end|>
* * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
<|file_name|>iItemsChangedEventArgs.ts<|end_file_name|><|fim▁begin|>export interface IItemsChangedEventArgs<T> { added: T[], removed: T[]<|fim▁hole|><|fim▁end|>
}
<|file_name|>timer.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ AsRawDescriptor, FakeClock, FromRawDescriptor, IntoRawDescriptor, RawDescriptor, Result, }; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; use std::sync::Arc; use std::time::Duration; use sync::Mutex; use sys_util::{FakeTimerFd, TimerFd}; /// See [TimerFd](sys_util::TimerFd) for struct- and method-level /// documentation. pub struct Timer(pub TimerFd); impl Timer { pub fn new() -> Result<Timer> { TimerFd::new().map(Timer) } } /// See [FakeTimerFd](sys_util::FakeTimerFd) for struct- and method-level /// documentation. pub struct FakeTimer(FakeTimerFd); impl FakeTimer { pub fn new(clock: Arc<Mutex<FakeClock>>) -> Self { FakeTimer(FakeTimerFd::new(clock)) } } macro_rules! build_timer { ($timer:ident, $inner:ident) => { impl $timer { pub fn reset(&mut self, dur: Duration, interval: Option<Duration>) -> Result<()> { self.0.reset(dur, interval) } pub fn wait(&mut self) -> Result<()> { self.0.wait().map(|_| ()) } pub fn is_armed(&self) -> Result<bool> { self.0.is_armed() } pub fn clear(&mut self) -> Result<()> { self.0.clear() } pub fn resolution() -> Result<Duration> { $inner::resolution() } } impl AsRawDescriptor for $timer { fn as_raw_descriptor(&self) -> RawDescriptor { self.0.as_raw_fd() } } impl IntoRawDescriptor for $timer { fn into_raw_descriptor(self) -> RawDescriptor { self.0.into_raw_fd() } } }; }<|fim▁hole|> build_timer!(Timer, TimerFd); build_timer!(FakeTimer, FakeTimerFd); impl FromRawDescriptor for Timer { unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self { Timer(TimerFd::from_raw_fd(descriptor)) } }<|fim▁end|>
<|file_name|>database.go<|end_file_name|><|fim▁begin|>package documentdb // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "github.com/Azure/go-autorest/tracing" "net/http" ) // DatabaseClient is the azure Cosmos DB Database Service Resource Provider REST API type DatabaseClient struct { BaseClient } // NewDatabaseClient creates an instance of the DatabaseClient client. func NewDatabaseClient(subscriptionID string) DatabaseClient { return NewDatabaseClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewDatabaseClientWithBaseURI creates an instance of the DatabaseClient client using a custom endpoint. Use this // when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewDatabaseClientWithBaseURI(baseURI string, subscriptionID string) DatabaseClient { return DatabaseClient{NewWithBaseURI(baseURI, subscriptionID)} } // ListMetricDefinitions retrieves metric definitions for the given database. // Parameters: // resourceGroupName - name of an Azure resource group. // accountName - cosmos DB database account name. // databaseRid - cosmos DB database rid. func (client DatabaseClient) ListMetricDefinitions(ctx context.Context, resourceGroupName string, accountName string, databaseRid string) (result MetricDefinitionsListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseClient.ListMetricDefinitions") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}, {Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil { return result, validation.NewError("documentdb.DatabaseClient", "ListMetricDefinitions", err.Error()) } req, err := client.ListMetricDefinitionsPreparer(ctx, resourceGroupName, accountName, databaseRid) if err != nil { err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListMetricDefinitions", nil, "Failure preparing request") return } resp, err := client.ListMetricDefinitionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListMetricDefinitions", resp, "Failure sending request") return } result, err = client.ListMetricDefinitionsResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListMetricDefinitions", resp, "Failure responding to request") return } return } // ListMetricDefinitionsPreparer prepares the ListMetricDefinitions request. func (client DatabaseClient) ListMetricDefinitionsPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseRid string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "databaseRid": autorest.Encode("path", databaseRid), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2015-04-08" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/metricDefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListMetricDefinitionsSender sends the ListMetricDefinitions request. The method will close the // http.Response Body if it receives an error. func (client DatabaseClient) ListMetricDefinitionsSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListMetricDefinitionsResponder handles the response to the ListMetricDefinitions request. The method always // closes the http.Response Body. func (client DatabaseClient) ListMetricDefinitionsResponder(resp *http.Response) (result MetricDefinitionsListResult, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ListMetrics retrieves the metrics determined by the given filter for the given database account and database. // Parameters: // resourceGroupName - name of an Azure resource group. // accountName - cosmos DB database account name. // databaseRid - cosmos DB database rid. // filter - an OData filter expression that describes a subset of metrics to return. The parameters that can be // filtered are name.value (name of the metric, can have an or of multiple names), startTime, endTime, and // timeGrain. The supported operator is eq. func (client DatabaseClient) ListMetrics(ctx context.Context, resourceGroupName string, accountName string, databaseRid string, filter string) (result MetricListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseClient.ListMetrics") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}, {Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil { return result, validation.NewError("documentdb.DatabaseClient", "ListMetrics", err.Error()) } req, err := client.ListMetricsPreparer(ctx, resourceGroupName, accountName, databaseRid, filter) if err != nil { err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListMetrics", nil, "Failure preparing request") return } resp, err := client.ListMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListMetrics", resp, "Failure sending request") return } result, err = client.ListMetricsResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListMetrics", resp, "Failure responding to request") return } return } // ListMetricsPreparer prepares the ListMetrics request. func (client DatabaseClient) ListMetricsPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseRid string, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "databaseRid": autorest.Encode("path", databaseRid), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2015-04-08" queryParameters := map[string]interface{}{ "$filter": autorest.Encode("query", filter), "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListMetricsSender sends the ListMetrics request. The method will close the // http.Response Body if it receives an error. func (client DatabaseClient) ListMetricsSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListMetricsResponder handles the response to the ListMetrics request. The method always // closes the http.Response Body. func (client DatabaseClient) ListMetricsResponder(resp *http.Response) (result MetricListResult, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ListUsages retrieves the usages (most recent data) for the given database. // Parameters: // resourceGroupName - name of an Azure resource group. // accountName - cosmos DB database account name. // databaseRid - cosmos DB database rid. // filter - an OData filter expression that describes a subset of usages to return. The supported parameter is // name.value (name of the metric, can have an or of multiple names). func (client DatabaseClient) ListUsages(ctx context.Context, resourceGroupName string, accountName string, databaseRid string, filter string) (result UsagesResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseClient.ListUsages") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}, {Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil { return result, validation.NewError("documentdb.DatabaseClient", "ListUsages", err.Error()) } req, err := client.ListUsagesPreparer(ctx, resourceGroupName, accountName, databaseRid, filter) if err != nil { err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListUsages", nil, "Failure preparing request") return } resp, err := client.ListUsagesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListUsages", resp, "Failure sending request") return } result, err = client.ListUsagesResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "documentdb.DatabaseClient", "ListUsages", resp, "Failure responding to request") return } return } // ListUsagesPreparer prepares the ListUsages request. func (client DatabaseClient) ListUsagesPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseRid string, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "databaseRid": autorest.Encode("path", databaseRid), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2015-04-08" queryParameters := map[string]interface{}{ "api-version": APIVersion, } if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListUsagesSender sends the ListUsages request. The method will close the // http.Response Body if it receives an error. func (client DatabaseClient) ListUsagesSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client))<|fim▁hole|>// ListUsagesResponder handles the response to the ListUsages request. The method always // closes the http.Response Body. func (client DatabaseClient) ListUsagesResponder(resp *http.Response) (result UsagesResult, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }<|fim▁end|>
}
<|file_name|>appconfig.py<|end_file_name|><|fim▁begin|>from django.apps import AppConfig class MailinglistsConfig(AppConfig): name = 'apps.mailinglists'<|fim▁hole|><|fim▁end|>
verbose_name = 'Mailinglists'
<|file_name|>run.py<|end_file_name|><|fim▁begin|><|fim▁hole|> rcblog.main()<|fim▁end|>
import rcblog if __name__ == '__main__':
<|file_name|>ubcScraper.py<|end_file_name|><|fim▁begin|>import requests from bs4 import BeautifulSoup import sys import os import pandas import re <|fim▁hole|> bookLinks = "http://www.ubcpress.ca/search/" outputDir = "UBC_Output" def main(): r = requests.get(targetURL) soup = BeautifulSoup(r.content, "html.parser") # make a list book_urls = [] # get titles and links for link in soup.find_all("a"): if "title_book.asp" in link.get("href"): book_urls.append(bookLinks + link.get("href")) if not os.path.isdir(outputDir): os.mkdir(outputDir) os.chdir(outputDir) booksDict = { "title" : [], "authors" : [], "summary" : [], "subjects" : [], "authorBio" : [], "date" : [], "ISBN" : [], } print("Found {} urls".format(len(book_urls))) for i, url in enumerate(book_urls): print("On url index {}".format(i)) r = requests.get(url) soup = BeautifulSoup(r.content, "html.parser") print("Getting: {}".format(url)) title = soup.find("span", {"class" : "booktitle"}).text print("Found: '{}'".format(title)) print("Writing '{}/{}.html'".format(outputDir, title)) with open("{}.html".format(title.replace('/','')), 'wb') as f: for chunk in r.iter_content(1024): f.write(chunk) booksDict['title'].append(title) booksDict['authors'].append([a.text.strip() for a in soup.find_all("a", {"href" : "#author"})]) mainBodyText = soup.find("td", {"width" : "545", "colspan":"3"}).find("span" , {"class" : "regtext"}) regex = re.match(r"""(.*)About the Author\(s\)(.*)Table of Contents""", mainBodyText.text, flags = re.DOTALL) if regex is None: regex = re.match(r"""(.*)About the Author\(s\)(.*)""", mainBodyText.text, flags = re.DOTALL) booksDict['summary'].append(regex.group(1).strip()) booksDict["authorBio"].append(regex.group(2).strip().split('\n ')) booksDict["authorBio"][-1] = [s.strip() for s in booksDict["authorBio"][-1]] subjectsLst = [] for sub in mainBodyText.find_all("a"): try: if "subject_list.asp?SubjID=" in sub.get("href"): subjectsLst.append(sub.text) except TypeError: pass booksDict["subjects"].append(subjectsLst) newstext = soup.find("span", {"class" : "newstext"}).text regex = re.search(r"Release Date: (.*)(ISBN: \d*)", newstext) try: booksDict['date'].append(regex.group(1)) booksDict['ISBN'].append(regex.group(2)) except AttributeError: booksDict['date'].append(None) booksDict['ISBN'].append(None) os.chdir('..') pandas.DataFrame(booksDict).to_csv("UBCscrape.csv") if __name__ == "__main__": main()<|fim▁end|>
targetURL = "http://www.ubcpress.ca/search/subject_list.asp?SubjID=45"
<|file_name|>package.ts<|end_file_name|><|fim▁begin|>import { resolve, join } from "path"; const { version } = require('../package.json'); const root = resolve(__dirname, '..'); export type PkgEnv = { name: string; version: string; root: string; buildCtx: { src: string; config: string; output: { [key: string]: any }; entryPoints: { [key: string]: any }; styles: { name: string; path: string; }[]; assets: { src: string; dest: string; glob: string; ignore?: string }[]; target: 'es5' | 'es2015'; enviromentModules: { prod: string; dev: string }[]; extractCss: boolean; optimizeCss: boolean; inlineSource: boolean; }; };<|fim▁hole|> root, version, buildCtx: { src: join(root, 'src'), output: { parent: join(root, 'build'), app: join(root, 'build', 'ux') }, config: 'tsconfig.app.json', target: 'es2015', entryPoints: { main: './main.ts', polyfill: './polyfill.ts' }, styles: [], assets: [{ src: './assets/images/icon/tab', glob: '*.ico', dest: 'assets/images/icon/tab' }], enviromentModules: [], extractCss: false, optimizeCss: false, inlineSource: false } } as PkgEnv;<|fim▁end|>
export default { name: 'studio90srls',
<|file_name|>Connector.java<|end_file_name|><|fim▁begin|>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package config; import interfaces.*; import java.sql.*; import java.util.logging.*; import javax.swing.*; /** * * @author Luis G */ public class Connector { public Connector() { } <|fim▁hole|> Connection conn = null; Statement stmt = null; boolean isNull = false; try { connect(); conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/escuela", "root", ""); stmt = conn.createStatement(); rs = stmt.executeQuery(query); for (int i = 0; rs.next(); i++) { callback.callback(rs, i); } stmt.close(); conn.close(); } catch (SQLException ex) { Logger.getLogger(Connector.class.getName()).log(Level.SEVERE, null, ex); } return isNull; } protected ResultSet getData(String query) { ResultSet rs = null; Connection conn = null; Statement stmt = null; try { connect(); conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/escuela", "root", ""); stmt = conn.createStatement(); rs = stmt.executeQuery(query); } catch (Exception e) { System.out.println(e); } return rs; } protected int executeQuery(String query) { int id = -1; try { connect(); Connection conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/escuela", "root", ""); Statement stmt = conn.createStatement(); id = stmt.executeUpdate(query, Statement.RETURN_GENERATED_KEYS); ResultSet rs = stmt.getGeneratedKeys(); if (rs.next()) { id = rs.getInt(1); } stmt.close(); conn.close(); } catch (SQLException e) { Logger.getLogger(Connector.class.getName()).log(Level.SEVERE, null, e); switch (e.getErrorCode()) { case 1062: JOptionPane.showMessageDialog(null, "Ese correo ya esta registrado", "error", 0); break; case 1054: JOptionPane.showMessageDialog(null, "El registro no existe", "error", 0); break; default: JOptionPane.showMessageDialog(null, "A ocurrido un error " + e, "error", 0); System.out.println(e); break; } } return id; } private void connect() { try { Class.forName("com.mysql.jdbc.Driver"); } catch (Exception e) { } } }<|fim▁end|>
protected boolean getData(String query, Callback callback) { ResultSet rs = null;
<|file_name|>factory.py<|end_file_name|><|fim▁begin|>""" This is part of the MSS Python's module. Source: https://github.com/BoboTiG/python-mss """ import platform from typing import TYPE_CHECKING from .exception import ScreenShotError if TYPE_CHECKING: from typing import Any # noqa from .base import MSSBase # noqa <|fim▁hole|>def mss(**kwargs): # type: (Any) -> MSSBase """ Factory returning a proper MSS class instance. It detects the platform we are running on and chooses the most adapted mss_class to take screenshots. It then proxies its arguments to the class for instantiation. """ # pylint: disable=import-outside-toplevel os_ = platform.system().lower() if os_ == "darwin": from . import darwin return darwin.MSS(**kwargs) if os_ == "linux": from . import linux return linux.MSS(**kwargs) if os_ == "windows": from . import windows return windows.MSS(**kwargs) raise ScreenShotError("System {!r} not (yet?) implemented.".format(os_))<|fim▁end|>
<|file_name|>hdfs_space_metric_server.py<|end_file_name|><|fim▁begin|>import asyncore, socket, logging, time, asynchat, os from hdfs_space_common import get_tree_from_cache, get_child_node, TreeNode FORMAT = '%(asctime)-15s: %(levelname)s %(module)s - %(funcName)s: %(message)s' logging.basicConfig(format=FORMAT, level=logging.WARNING) class ChatHandler(asynchat.async_chat): def __init__(self, sock): asynchat.async_chat.__init__(self, sock = sock) self.ibuffer = [] self.obuffer = '' self.set_terminator("\n") def collect_incoming_data(self, data): self.ibuffer.append(data) logging.info('Received data "%s"' % data) def found_terminator(self): self.handle_request() def handle_request(self): data = self.ibuffer.pop(0) #Data should be like:<|fim▁hole|> command = data.split(":")[0] if command == 'metric': metric_args = data.split(":")[1].split('|') hdfs_path = metric_args[0] if len(metric_args) > 0 else "/" user_name = metric_args[1] if len(metric_args) > 1 else "ALL" metric = metric_args[2] if len(metric_args) > 2 else "size" logging.debug('metric_args: %s' % metric_args) logging.debug('hdfs_path: %s' % hdfs_path) logging.debug('user_name: %s' % user_name) logging.debug('metric: %s' % metric) result = 0 if user_name == "ALL" and metric == 'size': logging.warning('Rather using this script try command "hdfs dfs -du /"') elif user_name == "ALL" and metric == 'amount': logging.info('Calculating the metric') result = get_child_node(file_tree, hdfs_path).get_amount_for_all() else: if metric == "size": logging.info('Calculating the metric') result = get_child_node(file_tree, hdfs_path).get_size_by_user(user_name) elif metric == "amount": logging.info('Calculating the metric') result = get_child_node(file_tree, hdfs_path).get_amount_by_user(user_name) else: logging.warning("The metric %s not implemented yet" % metric) logging.info('The result is ready: %s. Pushing it to back' % result) self.push(str(result)) return elif command == 'db': file_path = data.split(":")[1] if os.path.exists(file_path): global file_tree file_tree = get_tree_from_cache(file_path) os.rename(file_path,MetricServer.db_path) logging.info('File %s remaned to %s' % (file_path, MetricServer.db_path)) self.push('OK') else: logging.warning('File %s could not be found. Doing nothing' % file_path) self.push('FAIL') else: logging.warning("The command %s not implemented yet") self.push('FAIL') class MetricServer(asyncore.dispatcher): sock_path = '/tmp/hdfs_space.sock' db_path = '/tmp/hdfs_space.data' def __init__(self): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM) self.set_reuse_addr() self.bind(self.sock_path) logging.info('Starting metric-server') self.listen(5) global file_tree try: file_tree = get_tree_from_cache(self.db_path) except KeyError as e: file_tree = TreeNode('') def handle_accept(self): pair = self.accept() if pair is not None: sock, addr = pair logging.info('Incoming connection') handler = ChatHandler(sock) def handle_close(self): self.close() logging.info('The socket is closed') def handle_expt(self): logging.info("OOB detected for %s" % self) if __name__ == '__main__': file_tree = None server = MetricServer() try: asyncore.loop() finally: if os.path.exists(server.sock_path): os.unlink(server.sock_path)<|fim▁end|>
#metric:path|user|size # OR #db:new_path
<|file_name|>account.go<|end_file_name|><|fim▁begin|>package tradier <|fim▁hole|>// from the Tradier API. type AccountService service func (s *AccountService) accountRequest(uri string) (*Account, *Response, error) { req, err := s.client.NewRequest("GET", uri, nil) if err != nil { return nil, nil, err } a := &Account{} resp, err := s.client.Do(req, a) if err != nil { return nil, resp, err } return a, resp, nil }<|fim▁end|>
// AccountService handles routes related to account inquiry
<|file_name|>MenuState.java<|end_file_name|><|fim▁begin|>package states; import java.awt.Color; import java.awt.Font; import java.awt.Graphics; import java.awt.image.BufferedImage;<|fim▁hole|>import database.LoadGame; import game.Game; import graphics.ButtonAction; import graphics.Text; import graphics.UIButton; import graphics.UIList; import graphics.UIScrollScreen; import loader.ImageLoader; public class MenuState extends State { /* Menu screen state it is the initial screen of the game it control the new button and the load button*/ //Main Menu private UIList menuButtons; private BufferedImage menuBackground; //Load Buttons Menu (second Screen) private UIScrollScreen loadScreen; private BufferedImage subMenuBackground; private boolean loadScreenMenu; //Selected Game Menu (Third Screen) private UIList loadSubMenu; private BufferedImage gameSelectedBackground; private boolean gameSelected; public MenuState(Game game) { super(game); State.loadMenuState = true; } @Override public UIList getUIButtons() { /*Control of active buttons*/ if (!gameSelected) { return menuButtons; } else { return loadSubMenu; } } @Override public UIScrollScreen getScreen() { /*control if scroll buttons are active*/ if (loadScreenMenu) return loadScreen; else return null; } @Override public void tick() { // If ESC is clicked on the menu screen then the game closes if(State.loadMenuState) { //loadMenuState is true then init menu screen initMenuScreen(); State.loadMenuState = false; } if (game.getKeyboard().mESC == true) { //If esc was pressed if (loadScreenMenu) { //Release loadScreen memory loadScreenMenu = false; loadScreen.getButtons().clear(); loadScreen = null; subMenuBackground = null; } else if(gameSelected) { //Release memory of the screen after choose a saved game gameSelected = false; loadSubMenu.getButtons().clear(); loadSubMenu = null; gameSelectedBackground = null; } else { // If esc was clicked on menu then close game game.stop(); } game.getKeyboard().mESC = false; } if(State.loadGame || State.newGame) // If load or new game true then it will change to gameState so release menu memory and changes state { menuButtons.getButtons().clear(); menuButtons = null; menuBackground = null; State.setCurrentState(game.getGameState()); } } @Override public void render(Graphics graph) { if(State.loadMenuState) // Make sure that only render after menu was loaded return; // Draw the menu background image and render the UI buttons graph.drawImage(menuBackground, 0, 0, game.getWidth(), game.getHeight(), null); menuButtons.render(graph); if (loadScreenMenu) { //Draw subMenu background and render buttons graph.drawImage(subMenuBackground, 0, 0, game.getWidth(), game.getHeight(), null); loadScreen.render(graph); } else if (gameSelected) { //Draw gameSelected background and render buttons graph.drawImage(gameSelectedBackground, 0, 0, game.getWidth(), game.getHeight(), null); loadSubMenu.render(graph); } } private void initMenuScreen() { /*Initialize the screen and buttons of the first menu screen*/ menuBackground = ImageLoader.loadImage("/background/menu_backgroud.png"); try { initMenuButtons(); } catch (Exception e) { e.printStackTrace(); } } private void initLoadScreen() { /*Initialize the screen and buttons of the second menu screen (list of saved games)*/ subMenuBackground = ImageLoader.loadImage("/background/submenu_background.png"); initLoadScreenButtons(); } private void initGameSelectedScreen() { /*Initialize the screen and of the third menu screen (game selected)*/ gameSelectedBackground = ImageLoader.loadImage("/background/load_submenu_background.png"); initGameSelectedButtons(); } private void initGameSelectedButtons() { /*Init buttons of the selected game load, delete and cancel*/ BufferedImage loadSaveButton[] = new BufferedImage[2]; BufferedImage deleteSaveButton[] = new BufferedImage[2]; BufferedImage cancelButton[] = new BufferedImage[2]; loadSubMenu = new UIList(); loadSaveButton[0] = ImageLoader.loadImage("/button/load_submenu_d.png"); loadSaveButton[1] = ImageLoader.loadImage("/button/load_submenu_s.png"); int buttonWidth = (int) (loadSaveButton[0].getWidth() * game.getScale()); int buttonHeight = (int) (loadSaveButton[0].getHeight() * game.getScale()); //Load a saved game loadSubMenu.getButtons().add(new UIButton((int) (50 * game.getScale()), (int)(300 * game.getScale()), buttonWidth, buttonHeight, loadSaveButton, -1, new ButtonAction() { @Override public void action() { State.loadGame = true; // Tells gameState to load a game game.getKeyboard().mESC = true; // Set esc true to release memory from this screen (GameSelected screen) } })); deleteSaveButton[0] = ImageLoader.loadImage("/button/delete_submenu_d.png"); deleteSaveButton[1] = ImageLoader.loadImage("/button/delete_submenu_s.png"); //Delete a saved game loadSubMenu.getButtons().add(new UIButton((int)(50 * game.getScale()), (int)(430 * game.getScale()), buttonWidth, buttonHeight, deleteSaveButton, -1, new ButtonAction() { @Override public void action() { try { DeleteGame.Delete(State.savedGames.get(lastButtonIndex).split(" ")[0]); //Get the name of the button pressed and removes from database } catch (Exception e) { e.printStackTrace(); } State.savedGames.clear(); //Clear database name loaded State.savedGames = null; game.getKeyboard().mESC = true; //Release memory from this screen (GameSelected screen) } })); cancelButton[0] = ImageLoader.loadImage("/button/cancel_submenu_d.png"); cancelButton[1] = ImageLoader.loadImage("/button/cancel_submenu_s.png"); //Cancel operation and goes back to the first menu screen loadSubMenu.getButtons().add(new UIButton((int)(50 * game.getScale()), (int)(550 * game.getScale()), buttonWidth, buttonHeight, cancelButton, -1, new ButtonAction() { @Override public void action() { State.savedGames.clear(); //Clear database name loaded State.savedGames = null; game.getKeyboard().mESC = true; //Release memory from this screen (GameSelected screen) } })); } private void initLoadScreenButtons() { /*Initialize all load screen buttons*/ BufferedImage loadScreenImage = ImageLoader.loadImage("/background/scrollScreen.png"); BufferedImage loadButton[] = new BufferedImage[2]; int scrollSpeed = 10; //Init load screen loadScreen = new UIScrollScreen(loadScreenImage, (int)(31 * game.getScale()), (int)(132 * game.getScale()), (int)(loadScreenImage.getWidth() * game.getScale()), (int)(loadScreenImage.getHeight() * game.getScale()), scrollSpeed); loadButton[0] = ImageLoader.loadImage("/button/submenu_button_d.png"); loadButton[1] = ImageLoader.loadImage("/button/submenu_button_s.png"); float buttonWidth = loadButton[0].getWidth() * game.getScale(); float buttonHeight = loadButton[0].getHeight() * game.getScale(); Font font = new Font("Castellar", Font.PLAIN, (int)(25 * game.getScale())); for (int i = 0, accumulator = (int) loadScreen.getScreen().getY(); (int) i < savedGames.size(); i++) { //Accumulator controls the button position on the screen String split[] = savedGames.get(i).split(" "); //split the name that came from the database float buttonX = (float) (loadScreen.getScreen().getX() + 3); Text text[] = new Text[2]; //Initialize both colors of the text and create the visible buttons text[0] = new Text("SaveGame " + (i+1) + " - " + split[split.length - 1], font, Color.black, (int) (buttonX - (25 * game.getScale()) + buttonWidth/4), accumulator + (int) (buttonHeight / 2)); text[1] = new Text("SaveGame " + (i+1) + " - " + split[split.length - 1], font, Color.white, (int) (buttonX - (25 * game.getScale()) + buttonWidth/4), accumulator + (int) (buttonHeight / 2)); loadScreen.getButtons().add(new UIButton((int) buttonX, accumulator, (int) (buttonWidth), (int) buttonHeight, loadButton, i, text, new ButtonAction() { public void action() { initGameSelectedScreen(); //Initialize gameSelect screen and buttons gameSelected = true; game.getKeyboard().mESC = true; // Select true to free memory used by the loadScreen } })); accumulator += (buttonHeight); } } private void initMenuButtons() throws Exception{ // Resize the button depending of the scale attribute of the game class BufferedImage[] buttonNewGame = new BufferedImage[2]; BufferedImage[] buttonLoadGame = new BufferedImage[2]; buttonNewGame[0] = ImageLoader.loadImage("/button/new_game.png"); buttonNewGame[1] = ImageLoader.loadImage("/button/new_game_b.png"); buttonLoadGame[0] = ImageLoader.loadImage("/button/load_game.png"); buttonLoadGame[1] = ImageLoader.loadImage("/button/load_game_b.png"); menuButtons = new UIList(); /* * Creates the load button and add to the UI button list, the first two * parameters has the position of the button on the screen it uses the * game.width to centralize the button and the game.height to control * the y position on the screen for every button a Button action is * defined when passing the argument, this way is possible to program * the button when creating it */ float buttonWidth = buttonLoadGame[0].getWidth() * game.getScale(); float buttonHeight = buttonLoadGame[0].getHeight() * game.getScale(); menuButtons.getButtons().add(new UIButton((int) ((game.getWidth() / 2) - (buttonWidth / 2)), (int) ((game.getHeight() - game.getHeight() / 3) + buttonHeight), (int) (buttonWidth), (int) buttonHeight, buttonLoadGame, -1, new ButtonAction() { public void action() { savedGames = new ArrayList<>(); try { savedGames = LoadGame.loadNames(); } catch (Exception e) { e.printStackTrace(); } initLoadScreen(); loadScreenMenu = true; } })); /* * Creates the game button and add to the UI button list, the first two * parameters has the position of the button on the screen it uses the * game.width to centralize the button and the game.height to control * the y position on the screen for every button a Button action is * defined when passing the argument, this way is possible to program * the button when creating it */ // Resize the button depending of the scale attribute of the game class buttonWidth = buttonNewGame[0].getWidth() * game.getScale(); buttonHeight = buttonNewGame[0].getHeight() * game.getScale(); menuButtons.getButtons() .add(new UIButton((int) ((game.getWidth() / 2) - (buttonWidth / 2)), (int) ((game.getHeight() - game.getHeight() / 3)), (int) (buttonWidth), (int) (buttonHeight), buttonNewGame, -1, new ButtonAction() { public void action() { State.newGame = true; } })); } }<|fim▁end|>
import java.util.ArrayList; import database.DeleteGame;
<|file_name|>debug.py<|end_file_name|><|fim▁begin|>from .. import x64dbg class HardwareType: HardwareAccess = x64dbg.HardwareAccess HardwareWrite = x64dbg.HardwareWrite HardwareExecute = x64dbg.HardwareExecute def Wait(): x64dbg.Wait() def Run(): x64dbg.Run() def Stop(): x64dbg.Stop() def StepIn(): x64dbg.StepIn()<|fim▁hole|>def StepOver(): x64dbg.StepOver() def StepOut(): x64dbg.StepOut() def SetBreakpoint(address): return x64dbg.SetBreakpoint(address) def DeleteBreakpoint(address): return x64dbg.DeleteBreakpoint(address) def SetHardwareBreakpoint(address, type = HardwareType.HardwareExecute): return x64dbg.SetHardwareBreakpoint(address, type) def DeleteHardwareBreakpoint(address): return x64dbg.DeleteHardwareBreakpoint(address)<|fim▁end|>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>extern crate meowth; <|fim▁hole|> fn main() { let mut interpreter = Interpreter::new(); loop { print!("meowth :: "); let _ = stdout().flush(); let mut input = String::new(); match stdin().read_line(&mut input) { Ok(_) => { if input == String::from("exit\n") { break; } let expr_result = interpreter.eval(&input); match expr_result { Ok(exp) => println!(" => {}", exp), Err(err) => println!("Error: {}", err), } }, Err(e) => print!("error: {}", e) } let _ = stdout().flush(); } }<|fim▁end|>
use meowth::interpreter::Interpreter; use std::io::{Write, stdout, stdin};
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|>""" COMMAND-LINE SPECIFIC STUFF ============================================================================= """ import markdown import sys import optparse import logging from logging import DEBUG, INFO, CRITICAL logger = logging.getLogger('MARKDOWN') def parse_options(): """ Define and parse `optparse` options for command-line usage. """ usage = """%prog [options] [INPUTFILE] (STDIN is assumed if no INPUTFILE is given)""" desc = "A Python implementation of John Gruber's Markdown. " \ "http://www.freewisdom.org/projects/python-markdown/" ver = "%%prog %s" % markdown.version parser = optparse.OptionParser(usage=usage, description=desc, version=ver) parser.add_option("-f", "--file", dest="filename", default=sys.stdout, help="Write output to OUTPUT_FILE. Defaults to STDOUT.", metavar="OUTPUT_FILE") parser.add_option("-e", "--encoding", dest="encoding", help="Encoding for input and output files.",) parser.add_option("-q", "--quiet", default = CRITICAL, action="store_const", const=CRITICAL+10, dest="verbose", help="Suppress all warnings.") parser.add_option("-v", "--verbose", action="store_const", const=INFO, dest="verbose", help="Print all warnings.") parser.add_option("-s", "--safe", dest="safe", default=False, metavar="SAFE_MODE", help="'replace', 'remove' or 'escape' HTML tags in input") parser.add_option("-o", "--output_format", dest="output_format", default='xhtml1', metavar="OUTPUT_FORMAT", help="'xhtml1' (default), 'html4' or 'html5'.") parser.add_option("--noisy", action="store_const", const=DEBUG, dest="verbose", help="Print debug messages.") parser.add_option("-x", "--extension", action="append", dest="extensions", help = "Load extension EXTENSION.", metavar="EXTENSION") parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol", action='store_false', default=True, help="Observe number of first item of ordered lists.") (options, args) = parser.parse_args() if len(args) == 0: input_file = sys.stdin else: input_file = args[0] if not options.extensions: options.extensions = [] return {'input': input_file, 'output': options.filename,<|fim▁hole|> 'lazy_ol': options.lazy_ol}, options.verbose def run(): """Run Markdown from the command line.""" # Parse options and adjust logging level if necessary options, logging_level = parse_options() if not options: sys.exit(2) logger.setLevel(logging_level) logger.addHandler(logging.StreamHandler()) # Run markdown.markdownFromFile(**options) if __name__ == '__main__': # Support running module as a commandline command. # Python 2.5 & 2.6 do: `python -m markdown.__main__ [options] [args]`. # Python 2.7 & 3.x do: `python -m markdown [options] [args]`. run()<|fim▁end|>
'safe_mode': options.safe, 'extensions': options.extensions, 'encoding': options.encoding, 'output_format': options.output_format,
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import absolute_import import os import zipfile DEV_DATA_PATH = os.path.join( os.path.dirname(__file__), '..', 'dev_data', ) def data_path(*args): """ Returns a path to dev data """ return os.path.join(DEV_DATA_PATH, *args) def words100k(): zip_name = data_path('words100k.txt.zip')<|fim▁hole|> return txt.splitlines()<|fim▁end|>
zf = zipfile.ZipFile(zip_name) txt = zf.open(zf.namelist()[0]).read().decode('utf8')
<|file_name|>fb.go<|end_file_name|><|fim▁begin|>// Copyright 2019-2019 the u-root Authors. All rights reserved // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fb import ( "fmt" "image" "os" "github.com/orangecms/go-framebuffer/framebuffer" ) const fbdev = "/dev/fb0" func DrawOnBufAt( buf []byte, img image.Image, posx int, posy int, stride int, bpp int, ) { for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { r, g, b, a := img.At(x, y).RGBA() offset := bpp * ((posy+y)*stride + posx + x) // framebuffer is BGR(A) buf[offset+0] = byte(b) buf[offset+1] = byte(g) buf[offset+2] = byte(r) if bpp >= 4 { buf[offset+3] = byte(a) } } } } // FbInit initializes a frambuffer by querying ioctls and returns the width and // height in pixels, the stride, and the bytes per pixel func FbInit() (int, int, int, int, error) { fbo, err := framebuffer.Init(fbdev) if err != nil { return 0, 0, 0, 0, err } width, height := fbo.Size() stride := fbo.Stride() bpp := fbo.Bpp() fmt.Fprintf(os.Stdout, "Framebuffer resolution: %v %v %v %v\n", width, height, stride, bpp) return width, height, stride, bpp, nil } func DrawImageAt(img image.Image, posx int, posy int) error { width, height, stride, bpp, err := FbInit() if err != nil { fmt.Fprintf(os.Stderr, "Framebuffer init error: %v\n", err) // fallback values, probably a bad assumption width, height, stride, bpp = 1920, 1080, 1920*4, 4 } buf := make([]byte, width*height*bpp) DrawOnBufAt(buf, img, posx, posy, stride, bpp) err = os.WriteFile(fbdev, buf, 0o600) if err != nil { return fmt.Errorf("Error writing to framebuffer: %v", err) } return nil } func DrawScaledOnBufAt( buf []byte, img image.Image, posx int, posy int, factor int, stride int, bpp int, ) { for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { r, g, b, a := img.At(x, y).RGBA() for sx := 1; sx <= factor; sx++ {<|fim▁hole|> buf[offset+1] = byte(g) buf[offset+2] = byte(r) if bpp == 4 { buf[offset+3] = byte(a) } } } } } } func DrawScaledImageAt(img image.Image, posx int, posy int, factor int) error { width, height, stride, bpp, err := FbInit() if err != nil { fmt.Fprintf(os.Stderr, "Framebuffer init error: %v\n", err) } buf := make([]byte, width*height*bpp) DrawScaledOnBufAt(buf, img, posx, posy, factor, stride, bpp) err = os.WriteFile(fbdev, buf, 0o600) if err != nil { return fmt.Errorf("Error writing to framebuffer: %v", err) } return nil }<|fim▁end|>
for sy := 1; sy <= factor; sy++ { offset := bpp * ((posy+y*factor+sy)*stride + posx + x*factor + sx) buf[offset+0] = byte(b)
<|file_name|>perf_http.py<|end_file_name|><|fim▁begin|># # This file is part of Gruvi. Gruvi is free software available under the # terms of the MIT license. See the file "LICENSE" that was provided # together with this source file for the licensing terms. # # Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a # complete list. from __future__ import absolute_import, print_function, division <|fim▁hole|>from gruvi.http import HttpProtocol, HttpServer, HttpClient from support import PerformanceTest, MockTransport def hello_app(environ, start_response): headers = [('Content-Type', 'text/plain')] start_response('200 OK', headers) return [b'Hello!'] class PerfHttp(PerformanceTest): def perf_parsing_speed(self): transport = MockTransport() protocol = HttpProtocol() transport.start(protocol) r = b'HTTP/1.1 200 OK\r\nContent-Length: 10000\r\n\r\n' r += b'x' * 10000 reqs = 4 * r nbytes = 0 t0 = t1 = time.time() while t1 - t0 < 1.0: protocol.data_received(reqs) del protocol._queue._heap[:] nbytes += len(reqs) t1 = time.time() speed = nbytes / (t1 - t0) / (1024 * 1024) self.add_result(speed) def perf_server_throughput(self): server = HttpServer(hello_app) server.listen(('localhost', 0)) addr = server.addresses[0] client = HttpClient() client.connect(addr) nrequests = 0 pipeline = 10 t0 = t1 = time.time() while t1 - t0 < 1.0: for i in range(pipeline): client.request('GET', '/') for i in range(pipeline): resp = client.getresponse() self.assertEqual(resp.body.read(), b'Hello!') nrequests += pipeline t1 = time.time() throughput = nrequests / (t1 - t0) self.add_result(throughput) server.close() client.close() if __name__ == '__main__': unittest.defaultTestLoader.testMethodPrefix = 'perf' unittest.main()<|fim▁end|>
import time import unittest
<|file_name|>Template.js<|end_file_name|><|fim▁begin|>"use strict"; var Template = function (options) { this._pageTitle = ''; this._titleSeparator = options.title_separator; this._siteTitle = options.site_title; this._req = null; this._res = null; }; Template.prototype.bindMiddleware = function(req, res) { this._req = req; this._res = res; }; Template.prototype.setPageTitle = function(pageTitle) { this._pageTitle = pageTitle; }; Template.prototype.setSiteTitle = function(siteTitle) {<|fim▁hole|> Template.prototype.setTitleSeparator = function(separator) { this._titleSeparator = separator; }; Template.prototype.getTitle = function() { if (this._pageTitle !== '') { return this._pageTitle + ' ' + this._titleSeparator + ' ' + this._siteTitle; } else { return this._siteTitle; } }; Template.prototype.getPageTitle = function() { return this._pageTitle; }; Template.prototype.getSiteTitle = function() { return this._siteTitle; }; Template.prototype.render = function(path, params) { this._res.render('partials/' + path, params); }; module.exports = Template;<|fim▁end|>
this._siteTitle = siteTitle; };
<|file_name|>CreateUseCaseResultJsonUnmarshaller.java<|end_file_name|><|fim▁begin|>/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.connect.model.transform; import java.math.*; import javax.annotation.Generated; import com.amazonaws.services.connect.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * CreateUseCaseResult JSON Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class CreateUseCaseResultJsonUnmarshaller implements Unmarshaller<CreateUseCaseResult, JsonUnmarshallerContext> { public CreateUseCaseResult unmarshall(JsonUnmarshallerContext context) throws Exception { CreateUseCaseResult createUseCaseResult = new CreateUseCaseResult(); int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1; JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) { return createUseCaseResult; } while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) {<|fim▁hole|> context.nextToken(); createUseCaseResult.setUseCaseId(context.getUnmarshaller(String.class).unmarshall(context)); } if (context.testExpression("UseCaseArn", targetDepth)) { context.nextToken(); createUseCaseResult.setUseCaseArn(context.getUnmarshaller(String.class).unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return createUseCaseResult; } private static CreateUseCaseResultJsonUnmarshaller instance; public static CreateUseCaseResultJsonUnmarshaller getInstance() { if (instance == null) instance = new CreateUseCaseResultJsonUnmarshaller(); return instance; } }<|fim▁end|>
if (context.testExpression("UseCaseId", targetDepth)) {
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url from testapp.api import PersonResource from django.contrib import admin admin.autodiscover() person_resource = PersonResource() <|fim▁hole|>urlpatterns = patterns('', # Examples: # url(r'^$', 'testapp.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^admin/', include(admin.site.urls)), (r'^api/', include(person_resource.urls)) )<|fim▁end|>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#![deny( missing_debug_implementations, missing_copy_implementations, trivial_casts, trivial_numeric_casts, unused_import_braces, unused_qualifications, unsafe_code, dead_code, unused_results, )] extern crate clap; extern crate rand; extern crate time; extern crate ctrlc; extern crate serde; extern crate serde_json; extern crate websocket; mod options; pub mod math; pub mod message; pub mod server; use websocket::Client; use websocket::client::request::Url; use std::sync::{Arc, RwLock}; use std::sync::mpsc::channel; use server::{listen, start_game_loop}; pub use options::Options; fn main() { let opts = Options::parse(); let cont = Arc::new(RwLock::new(true)); { let host = opts.host.clone(); let port = opts.port;<|fim▁hole|> let cont = cont.clone(); ctrlc::set_handler(move || { println!("Ctrl+C received, terminating..."); *cont.write().unwrap() = false; let _ = Client::connect(Url::parse(&format!("ws://{}:{}", host, port)[..]).unwrap()); }); } // Create the channel which will allow the game loop to recieve messages. let (tx, rx) = channel(); let game_loop_handle = start_game_loop(rx, &cont); listen(&opts.host, opts.port, tx, &cont); if let Err(error) = game_loop_handle.join() { println!("Game loop thread failed: {:?}", error); } }<|fim▁end|>
<|file_name|>noexporttypeexe.rs<|end_file_name|><|fim▁begin|>// aux-build:noexporttypelib.rs extern crate noexporttypelib;<|fim▁hole|> // because the def_id associated with the type was // not convertible to a path. let x: isize = noexporttypelib::foo(); //~^ ERROR mismatched types //~| expected type `isize` //~| found enum `Option<isize>` //~| expected `isize`, found enum `Option` }<|fim▁end|>
fn main() { // Here, the type returned by foo() is not exported. // This used to cause internal errors when serializing
<|file_name|>tts.py<|end_file_name|><|fim▁begin|>"""Support for the cloud for text to speech service.""" from hass_nabucasa import Cloud from hass_nabucasa.voice import MAP_VOICE, VoiceError import voluptuous as vol from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider <|fim▁hole|>CONF_GENDER = "gender" SUPPORT_LANGUAGES = list({key[0] for key in MAP_VOICE}) def validate_lang(value): """Validate chosen gender or language.""" lang = value.get(CONF_LANG) if lang is None: return value gender = value.get(CONF_GENDER) if gender is None: gender = value[CONF_GENDER] = next( (chk_gender for chk_lang, chk_gender in MAP_VOICE if chk_lang == lang), None ) if (lang, gender) not in MAP_VOICE: raise vol.Invalid("Unsupported language and gender specified.") return value PLATFORM_SCHEMA = vol.All( PLATFORM_SCHEMA.extend( { vol.Optional(CONF_LANG): str, vol.Optional(CONF_GENDER): str, } ), validate_lang, ) async def async_get_engine(hass, config, discovery_info=None): """Set up Cloud speech component.""" cloud: Cloud = hass.data[DOMAIN] if discovery_info is not None: language = None gender = None else: language = config[CONF_LANG] gender = config[CONF_GENDER] return CloudProvider(cloud, language, gender) class CloudProvider(Provider): """NabuCasa Cloud speech API provider.""" def __init__(self, cloud: Cloud, language: str, gender: str) -> None: """Initialize cloud provider.""" self.cloud = cloud self.name = "Cloud" self._language = language self._gender = gender if self._language is not None: return self._language, self._gender = cloud.client.prefs.tts_default_voice cloud.client.prefs.async_listen_updates(self._sync_prefs) async def _sync_prefs(self, prefs): """Sync preferences.""" self._language, self._gender = prefs.tts_default_voice @property def default_language(self): """Return the default language.""" return self._language @property def supported_languages(self): """Return list of supported languages.""" return SUPPORT_LANGUAGES @property def supported_options(self): """Return list of supported options like voice, emotion.""" return [CONF_GENDER] @property def default_options(self): """Return a dict include default options.""" return {CONF_GENDER: self._gender} async def async_get_tts_audio(self, message, language, options=None): """Load TTS from NabuCasa Cloud.""" # Process TTS try: data = await self.cloud.voice.process_tts( message, language, gender=options[CONF_GENDER] ) except VoiceError: return (None, None) return ("mp3", data)<|fim▁end|>
from .const import DOMAIN
<|file_name|>ua.js<|end_file_name|><|fim▁begin|>(function ($) { $.Redactor.opts.langs['ua'] = { html: 'Код', video: 'Відео', image: 'Зображення', table: 'Таблиця', link: 'Посилання', link_insert: 'Вставити посилання ...', link_edit: 'Edit link', unlink: 'Видалити посилання', formatting: 'Стилі', paragraph: 'Звичайний текст', quote: 'Цитата', code: 'Код', header1: 'Заголовок 1', header2: 'Заголовок 2', header3: 'Заголовок 3', header4: 'Заголовок 4', bold: 'Жирний', italic: 'Похилий', fontcolor: 'Колір тексту', backcolor: 'Заливка тексту', unorderedlist: 'Звичайний список', orderedlist: 'Нумерований список', outdent: 'Зменшити відступ', indent: 'Збільшити відступ',<|fim▁hole|> insert_table: 'Вставити таблицю', insert_row_above: 'Додати рядок зверху', insert_row_below: 'Додати рядок знизу', insert_column_left: 'Додати стовпець ліворуч', insert_column_right: 'Додати стовпець праворуч', delete_column: 'Видалити стовпець', delete_row: 'Видалити рядок', delete_table: 'Видалити таблицю', rows: 'Рядки', columns: 'Стовпці', add_head: 'Додати заголовок', delete_head: 'Видалити заголовок', title: 'Підказка', image_view: 'Завантажити зображення', image_position: 'Обтікання текстом', none: 'ні', left: 'ліворуч', right: 'праворуч', image_web_link: 'Посилання на зображення', text: 'Текст', mailto: 'Ел. пошта', web: 'URL', video_html_code: 'Код відео ролика', file: 'Файл', upload: 'Завантажити', download: 'Завантажити', choose: 'Вибрати', or_choose: 'Або виберіть', drop_file_here: 'Перетягніть файл сюди', align_left: 'По лівому краю', align_center: 'По центру', align_right: 'По правому краю', align_justify: 'Вирівняти текст по ширині', horizontalrule: 'Горизонтальная лінійка', fullscreen: 'На весь екран', deleted: 'Закреслений', anchor: 'Anchor', link_new_tab: 'Open link in new tab', underline: 'Underline', alignment: 'Alignment', filename: 'Name (optional)' }; })( jQuery );<|fim▁end|>
cancel: 'Скасувати', insert: 'Вставити', save: 'Зберегти', _delete: 'Видалити',
<|file_name|>build_project.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import argparse from os import path as os_path import demo_project as demo import traceback def set_host_url_arg(): parser.add_argument('--host', required=True, help='the url for the Materials Commons server') def set_datapath_arg(): parser.add_argument('--datapath', required=True, help='the path to the directory containing the files used by the build') def set_apikey_arg(): parser.add_argument('--apikey', required=True, help='rapikey for the user building the demo project') parser = argparse.ArgumentParser(description='Build Demo Project.') set_host_url_arg() set_datapath_arg() set_apikey_arg() args = parser.parse_args() <|fim▁hole|>host = args.host path = os_path.abspath(args.datapath) key = args.apikey # log_messages # print "Running script to build demo project: " # print " host = " + host + ", " # print " key = " + key + ", " # print " path = " + path try: builder = demo.DemoProject(host, path, key) # a basic get request that makes no changes; will fail if there is a problem with the host or key flag = builder.does_project_exist() project = builder.build_project() if flag: print "Refreshed project with name = " + project.name else: print "Built project with name = " + project.name except Exception as err: traceback.print_exc() print 'Error: ', err<|fim▁end|>
<|file_name|>mock_alarm.rs<|end_file_name|><|fim▁begin|>// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub struct MockAlarm { current_time: core::cell::Cell<kernel::hil::time::Ticks32>, setpoint: core::cell::Cell<Option<kernel::hil::time::Ticks32>>, } impl MockAlarm { pub fn new() -> MockAlarm { MockAlarm { current_time: core::cell::Cell::new(0.into()), setpoint: core::cell::Cell::new(Some(0.into())), } } pub fn set_time(&self, new_time: kernel::hil::time::Ticks32) { self.current_time.set(new_time); } } impl kernel::hil::time::Time for MockAlarm { type Frequency = h1::timels::Freq256Khz; type Ticks = kernel::hil::time::Ticks32;<|fim▁hole|> fn now(&self) -> Self::Ticks { self.current_time.get() } } impl<'a> kernel::hil::time::Alarm<'a> for MockAlarm { fn set_alarm(&self, reference: Self::Ticks, dt: Self::Ticks) { use kernel::hil::time::Ticks; self.setpoint.set(Some(reference.wrapping_add(dt))); } fn get_alarm(&self) -> Self::Ticks { self.setpoint.get().unwrap_or(0.into()) } // Ignored -- the test should manually trigger the client. fn set_alarm_client(&'a self, _client: &'a dyn kernel::hil::time::AlarmClient) {} fn is_armed(&self) -> bool { self.setpoint.get().is_some() } fn disarm(&self) -> kernel::ReturnCode { self.setpoint.set(None); kernel::ReturnCode::SUCCESS } fn minimum_dt(&self) -> Self::Ticks { 1.into() } }<|fim▁end|>
<|file_name|>shipping.py<|end_file_name|><|fim▁begin|>from django.shortcuts import redirect from django.template.response import TemplateResponse from ..forms import AnonymousUserShippingForm, ShippingAddressesForm from ...userprofile.forms import get_address_form from ...userprofile.models import Address from ...teamstore.utils import get_team def anonymous_user_shipping_address_view(request, checkout): team = get_team(request.session['team']) if team.group_shipping: address_form, preview = get_address_form( request.POST or None, country_code=request.country.code, autocomplete_type='shipping', initial={'country': request.country.code}, instance=team.shipping_address) else: address_form, preview = get_address_form( request.POST or None, country_code=request.country.code, autocomplete_type='shipping', initial={'country': request.country.code}, instance=checkout.shipping_address) user_form = AnonymousUserShippingForm( not preview and request.POST or None, initial={'email': checkout.email} if not preview else request.POST.dict()) if team.group_shipping and user_form.is_valid(): checkout.shipping_address = team.shipping_address checkout.email = user_form.cleaned_data['email'] return redirect('checkout:shipping-method') elif all([user_form.is_valid(), address_form.is_valid()]): checkout.shipping_address = address_form.instance checkout.email = user_form.cleaned_data['email'] return redirect('checkout:shipping-method') return TemplateResponse( request, 'checkout/shipping_address.html', context={ 'address_form': address_form, 'user_form': user_form, 'group_shipping': team.group_shipping, 'checkout': checkout}) def user_shipping_address_view(request, checkout): data = request.POST or None additional_addresses = request.user.addresses.all() checkout.email = request.user.email shipping_address = checkout.shipping_address if shipping_address is not None and shipping_address.id: address_form, preview = get_address_form( data, country_code=request.country.code, initial={'country': request.country})<|fim▁hole|> address_form, preview = get_address_form( data, country_code=shipping_address.country.code, instance=shipping_address) addresses_form = ShippingAddressesForm( data, additional_addresses=additional_addresses) else: address_form, preview = get_address_form( data, initial={'country': request.country}, country_code=request.country.code) addresses_form = ShippingAddressesForm( data, additional_addresses=additional_addresses) if addresses_form.is_valid() and not preview: if addresses_form.cleaned_data['address'] != ShippingAddressesForm.NEW_ADDRESS: address_id = addresses_form.cleaned_data['address'] checkout.shipping_address = Address.objects.get(id=address_id) return redirect('checkout:shipping-method') elif address_form.is_valid(): checkout.shipping_address = address_form.instance return redirect('checkout:shipping-method') return TemplateResponse( request, 'checkout/shipping_address.html', context={ 'address_form': address_form, 'user_form': addresses_form, 'checkout': checkout, 'additional_addresses': additional_addresses})<|fim▁end|>
addresses_form = ShippingAddressesForm( data, additional_addresses=additional_addresses, initial={'address': shipping_address.id}) elif shipping_address:
<|file_name|>app.js<|end_file_name|><|fim▁begin|>// File: chapter14/appUnderTest/app/scripts/app.js angular.module('fifaApp', ['ngRoute']) .config(function($routeProvider) { $routeProvider.when('/', { templateUrl: 'views/team_list.html', controller: 'TeamListCtrl as teamListCtrl' }) .when('/login', { templateUrl: 'views/login.html', }) .when('/team/:code', { templateUrl: 'views/team_details.html', controller:'TeamDetailsCtrl as teamDetailsCtrl',<|fim▁hole|> return UserService.session().then( function(success) {}, function(err) { $location.path('/login'); return $q.reject(err); }); }] } }); $routeProvider.otherwise({ redirectTo: '/' }); });<|fim▁end|>
resolve: { auth: ['$q', '$location', 'UserService', function($q, $location, UserService) {
<|file_name|>find_active_cell_around_point_01.cc<|end_file_name|><|fim▁begin|>// --------------------------------------------------------------------- // // Copyright (C) 2009 - 2014 by the deal.II authors // // This file is part of the deal.II library. // // The deal.II library is free software; you can use it, redistribute // it, and/or modify it under the terms of the GNU Lesser General // Public License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // The full text of the license can be found in the file LICENSE at // the top level of the deal.II distribution. // // --------------------------------------------------------------------- // make sure only one processor finds a locally-owned cell around a point #include "../tests.h" #include "coarse_grid_common.h" #include <deal.II/base/logstream.h> #include <deal.II/base/tensor.h> #include <deal.II/grid/tria.h> #include <deal.II/distributed/tria.h> #include <deal.II/grid/tria_accessor.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/grid/grid_out.h> #include <deal.II/grid/grid_tools.h> #include <deal.II/base/utilities.h> #include <fstream> template<int dim> void test() { unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); if (true) { if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) deallog << "hyper_cube" << std::endl; parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD); GridGenerator::hyper_cube(tr); tr.refine_global(2); // choose a point that is guaranteed to lie in the domain but not // at the interface between cells Point<dim> p; for (unsigned int d=0; d<dim; ++d) p[d] = 1./3; typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = GridTools::find_active_cell_around_point (tr, p); const unsigned int n_locally_owned = Utilities::MPI::sum (cell->is_locally_owned() ? 1 : 0, MPI_COMM_WORLD); const unsigned int n_locally_owned_or_ghost = Utilities::MPI::sum (!cell->is_artificial() ? 1 : 0, MPI_COMM_WORLD); if (myid == 0) deallog << "Locally owned: " << n_locally_owned << std::endl << "Locally owned or ghost: " << n_locally_owned_or_ghost << std::endl; } } int main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); if (myid == 0) { std::ofstream logfile("output"); deallog.attach(logfile);<|fim▁hole|> deallog.push("2d"); test<2>(); deallog.pop(); deallog.push("3d"); test<3>(); deallog.pop(); } else { test<2>(); test<3>(); } }<|fim▁end|>
deallog.depth_console(0); deallog.threshold_double(1.e-10);
<|file_name|>color-button.hpp<|end_file_name|><|fim▁begin|>/* * This file is part of BlendInt (a Blender-like Interface Library in * OpenGL). * * BlendInt (a Blender-like Interface Library in OpenGL) is free * software: you can redistribute it and/or modify it under the terms * of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your * option) any later version. * * BlendInt (a Blender-like Interface Library in OpenGL) is * distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General * Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BlendInt. If not, see * <http://www.gnu.org/licenses/>. * * Contributor(s): Freeman Zhang <[email protected]> */ #pragma once #include <blendint/opengl/gl-buffer.hpp> #include <blendint/core/color.hpp> #include <blendint/gui/abstract-button.hpp> #include <blendint/gui/color-selector.hpp> namespace BlendInt { /** * @brief The most common button class * * @ingroup blendint_gui_widgets_buttons */ class ColorButton: public AbstractButton { DISALLOW_COPY_AND_ASSIGN(ColorButton); public: ColorButton (); virtual ~ColorButton (); void SetColor (const Color& color); virtual bool IsExpandX () const override; virtual Size GetPreferredSize () const override; protected: virtual void PerformSizeUpdate (const AbstractView* source, const AbstractView* target, int width, int height) final; virtual void PerformRoundTypeUpdate (int round_type) final; virtual void PerformRoundRadiusUpdate (float radius) final; virtual void PerformHoverIn (AbstractWindow* context) final; virtual void PerformHoverOut (AbstractWindow* context) final; virtual Response Draw (AbstractWindow* context) final; private: void InitializeColorButton (); void OnClick (); void OnSelectorDestroyed (AbstractFrame* sender); <|fim▁hole|> GLBuffer<ARRAY_BUFFER, 2> vbo_; Color color0_; Color color1_; ColorSelector* selector_; }; }<|fim▁end|>
GLuint vao_[2];
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import epics from './src/metricsEpics'; import { reducer } from './src/metricsReducer'; import * as selector from './src/metricsSelectors'; import * as constants from './src/metricsConstants'; import Metrics from './src/metricsPage';<|fim▁hole|> export default { name: 'Metrics', page: Metrics, menu: MetricsMenu, store: { name: 'metrics', epics, reducer, selector, constants } };<|fim▁end|>
import { MetricsMenu } from './src/metricsMenu';
<|file_name|>pcg32_tests.rs<|end_file_name|><|fim▁begin|>extern crate pcg_rand; extern crate rand; use pcg_rand::seeds::PcgSeeder; use pcg_rand::Pcg32; use rand::{distributions::Alphanumeric, thread_rng, Rng, SeedableRng}; const NUM_TESTS: usize = 1000; #[test] fn pcg32_unseeded() { let mut ra: Pcg32 = Pcg32::new_unseeded(); let mut rb: Pcg32 = Pcg32::new_unseeded(); assert_eq!( ra.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>(), rb.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>() ); } #[test] fn pcg32_seed_match() { for _ in 0..NUM_TESTS { let seed: u64 = thread_rng().gen(); let seq: u64 = thread_rng().gen(); let s = PcgSeeder::seed_with_stream(seed, seq); let mut ra: Pcg32 = SeedableRng::from_seed(s.clone()); let mut rb: Pcg32 = SeedableRng::from_seed(s); assert_eq!( ra.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>(), rb.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>() ); } } #[test] fn pcg32_seq_diff() { for _ in 0..NUM_TESTS { //Test a bad case same seed with just slightly different //sequences. Because sequences have to be odd only sequences that are 2 apart //are for sure going to be different. let seed: u64 = thread_rng().gen();<|fim▁hole|> ra.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>() != rb.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>() ); } } #[test] fn pcg32_seed_diff() { for _ in 0..NUM_TESTS { //Test a bad case same seed with just slightly different //seeds let seed: u64 = thread_rng().gen(); let seq: u64 = thread_rng().gen(); let mut ra: Pcg32 = Pcg32::from_seed(PcgSeeder::seed_with_stream(seed, seq)); let mut rb: Pcg32 = Pcg32::from_seed(PcgSeeder::seed_with_stream(seed + 1, seq)); assert!( ra.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>() != rb.sample_iter(&Alphanumeric).take(100).collect::<Vec<_>>() ); } }<|fim▁end|>
let seq: u64 = thread_rng().gen(); let mut ra: Pcg32 = Pcg32::from_seed(PcgSeeder::seed_with_stream(seed, seq)); let mut rb: Pcg32 = Pcg32::from_seed(PcgSeeder::seed_with_stream(seed, seq + 2)); assert!(
<|file_name|>darwin.py<|end_file_name|><|fim▁begin|>"""engine.SCons.Platform.darwin Platform-specific initialization for Mac OS X systems. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Platform.Platform() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY<|fim▁hole|># WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Platform/darwin.py 5023 2010/06/14 22:05:46 scons" import posix def generate(env): posix.generate(env) env['SHLIBSUFFIX'] = '.dylib' env['ENV']['PATH'] = env['ENV']['PATH'] + ':/sw/bin' # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:<|fim▁end|>
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
<|file_name|>04.1.Cameras.c++<|end_file_name|><|fim▁begin|>/* * * Copyright (C) 2000 Silicon Graphics, Inc. All Rights Reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * Further, this software is distributed without any warranty that it is * free of the rightful claim of any third person regarding infringement * or the like. Any license provided herein, whether implied or * otherwise, applies only to this software file. Patent licenses, if * any, provided herein do not apply to combinations of this program with * other software, or any other product whatsoever. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, * Mountain View, CA 94043, or: * * http://www.sgi.com * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan/ * */ /*-------------------------------------------------------------- * This is an example from the Inventor Mentor, * chapter 4, example 1. * * Camera example. * A blinker node is used to switch between three * different views of the same scene. The cameras are * switched once per second. *------------------------------------------------------------*/ #include <cstdlib> #include <Inventor/SbLinear.h> #include <Inventor/SoDB.h> #include <Inventor/SoInput.h> #include <Inventor/Xt/SoXt.h> #include <Inventor/Xt/SoXtRenderArea.h> #include <Inventor/nodes/SoBlinker.h> #include <Inventor/nodes/SoDirectionalLight.h> #include <Inventor/nodes/SoMaterial.h> #include <Inventor/nodes/SoOrthographicCamera.h> #include <Inventor/nodes/SoPerspectiveCamera.h> #include <Inventor/nodes/SoSeparator.h> #include <Inventor/nodes/SoTransform.h> int main(int, char **argv) { // Initialize Inventor and Xt Widget myWindow = SoXt::init(argv[0]); if (myWindow == NULL) exit(1); SoSeparator *root = new SoSeparator; root->ref(); // Create a blinker node and put it in the scene. A blinker // switches between its children at timed intervals. SoBlinker *myBlinker = new SoBlinker; root->addChild(myBlinker); // Create three cameras. Their positions will be set later. // This is because the viewAll method depends on the size // of the render area, which has not been created yet. SoOrthographicCamera *orthoViewAll =new SoOrthographicCamera; SoPerspectiveCamera *perspViewAll = new SoPerspectiveCamera; SoPerspectiveCamera *perspOffCenter =new SoPerspectiveCamera; myBlinker->addChild(orthoViewAll); myBlinker->addChild(perspViewAll); myBlinker->addChild(perspOffCenter); // Create a light root->addChild(new SoDirectionalLight); // Read the object from a file and add to the scene SoInput myInput; if (!myInput.openFile("/usr/share/src/Inventor/examples/data/parkbench.iv")) exit (1); SoSeparator *fileContents = SoDB::readAll(&myInput); if (fileContents == NULL) exit (1); SoMaterial *myMaterial = new SoMaterial; myMaterial->diffuseColor.setValue(0.8, 0.23, 0.03); root->addChild(myMaterial); root->addChild(fileContents); SoXtRenderArea *myRenderArea = new SoXtRenderArea(myWindow);<|fim▁hole|> // Establish camera positions. // First do a viewAll on all three cameras. // Then modify the position of the off-center camera. SbViewportRegion myRegion(myRenderArea->getSize()); orthoViewAll->viewAll(root, myRegion); perspViewAll->viewAll(root, myRegion); perspOffCenter->viewAll(root, myRegion); SbVec3f initialPos; initialPos = perspOffCenter->position.getValue(); float x, y, z; initialPos.getValue(x,y,z); perspOffCenter->position.setValue(x+x/2., y+y/2., z+z/4.); myRenderArea->setSceneGraph(root); myRenderArea->setTitle("Cameras"); myRenderArea->show(); SoXt::show(myWindow); SoXt::mainLoop(); }<|fim▁end|>
<|file_name|>connection.js<|end_file_name|><|fim▁begin|>/** * Module dependencies. */ var Connection = require('../../connection') , mongo = require('mongodb') , Server = mongo.Server , ReplSetServers = mongo.ReplSetServers; /** * Connection for mongodb-native driver * * @api private */ function NativeConnection() { Connection.apply(this, arguments); }; /** * Inherits from Connection. */ NativeConnection.prototype.__proto__ = Connection.prototype; <|fim▁hole|> * Opens the connection. * * Example server options: * auto_reconnect (default: false) * poolSize (default: 1) * * Example db options: * pk - custom primary key factory to generate `_id` values * * Some of these may break Mongoose. Use at your own risk. You have been warned. * * @param {Function} callback * @api private */ NativeConnection.prototype.doOpen = function (fn) { var server; if (!this.db) { server = new mongo.Server(this.host, Number(this.port), this.options.server); this.db = new mongo.Db(this.name, server, this.options.db); } this.db.open(fn); return this; }; /** * Opens a set connection * * See description of doOpen for server options. In this case options.replset * is also passed to ReplSetServers. Some additional options there are * * reconnectWait (default: 1000) * retries (default: 30) * rs_name (default: false) * read_secondary (default: false) Are reads allowed from secondaries? * * @param {Function} fn * @api private */ NativeConnection.prototype.doOpenSet = function (fn) { if (!this.db) { var servers = [] , ports = this.port , self = this this.host.forEach(function (host, i) { servers.push(new mongo.Server(host, Number(ports[i]), self.options.server)); }); var server = new ReplSetServers(servers, this.options.replset); this.db = new mongo.Db(this.name, server, this.options.db); } this.db.open(fn); return this; }; /** * Closes the connection * * @param {Function} callback * @api private */ NativeConnection.prototype.doClose = function (fn) { this.db.close(); if (fn) fn(); return this; } /** * Module exports. */ module.exports = NativeConnection;<|fim▁end|>
/**
<|file_name|>hrtb-perfect-forwarding.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test a case where you have an impl of `Foo<X>` for all `X` that // is being applied to `for<'a> Foo<&'a mut X>`. Issue #19730. trait Foo<X> { fn foo(&mut self, x: X) { } } trait Bar<X> { fn bar(&mut self, x: X) { } } impl<'a,X,F> Foo<X> for &'a mut F where F : Foo<X> + Bar<X> { } impl<'a,X,F> Bar<X> for &'a mut F where F : Bar<X> { } fn no_hrtb<'b,T>(mut t: T) where T : Bar<&'b isize> { // OK -- `T : Bar<&'b isize>`, and thus the impl above ensures that // `&mut T : Bar<&'b isize>`. no_hrtb(&mut t); } fn bar_hrtb<T>(mut t: T) where T : for<'b> Bar<&'b isize> { // OK -- `T : for<'b> Bar<&'b isize>`, and thus the impl above // ensures that `&mut T : for<'b> Bar<&'b isize>`. This is an // example of a "perfect forwarding" impl. bar_hrtb(&mut t); } fn foo_hrtb_bar_not<'b,T>(mut t: T) where T : for<'a> Foo<&'a isize> + Bar<&'b isize> { // Not OK -- The forwarding impl for `Foo` requires that `Bar` also // be implemented. Thus to satisfy `&mut T : for<'a> Foo<&'a // isize>`, we require `T : for<'a> Bar<&'a isize>`, but the where // clause only specifies `T : Bar<&'b isize>`. foo_hrtb_bar_not(&mut t); //~ ERROR `for<'a> Bar<&'a isize>` is not implemented for the type `T` } fn foo_hrtb_bar_hrtb<T>(mut t: T) where T : for<'a> Foo<&'a isize> + for<'b> Bar<&'b isize> { // OK -- now we have `T : for<'b> Bar&'b isize>`.<|fim▁hole|> foo_hrtb_bar_hrtb(&mut t); } fn main() { }<|fim▁end|>
<|file_name|>wrappedAndRecursiveConstraints.js<|end_file_name|><|fim▁begin|><|fim▁hole|> constructor(public data: T) { } foo<U extends T>(x: U) { return x; } } interface Foo extends Date { foo: string; } var y: Foo = null; var c = new C(y); var r = c.foo(y); //// [wrappedAndRecursiveConstraints.js] // no errors expected var C = /** @class */ (function () { function C(data) { this.data = data; } C.prototype.foo = function (x) { return x; }; return C; }()); var y = null; var c = new C(y); var r = c.foo(y);<|fim▁end|>
//// [wrappedAndRecursiveConstraints.ts] // no errors expected class C<T extends Date> {
<|file_name|>character.js<|end_file_name|><|fim▁begin|>var mongoose = require('mongoose'); var characterSchema = new mongoose.Schema({ name: String, userID: { type: mongoose.Schema.Types.ObjectId, ref: 'User' }, inventarID: { type: mongoose.Schema.Types.ObjectId, ref: 'Inventar' }, gender: String, skincolor: String, hair: Number, haircolor: String, gear: { head: { type: mongoose.Schema.Types.ObjectId, ref: 'Item' }, body: { type: mongoose.Schema.Types.ObjectId, ref: 'Item' }, legs: { type: mongoose.Schema.Types.ObjectId, ref: 'Item' } }, costume: { head: { type: mongoose.Schema.Types.ObjectId, ref: 'Item' }, body: { type: mongoose.Schema.Types.ObjectId, ref: 'Item' }, legs: { type: mongoose.Schema.Types.ObjectId, ref: 'Item' } }, hp: Number, weapon: { type: mongoose.Schema.Types.ObjectId, ref: 'Item' }, deaths: Number, kills: Number, rounds: Number, created: { type: Date, default: Date.now } }); // Updates a character from the database. characterSchema.methods.update = function(_id, data, callback){ this.findById(_id, function(err, character){ if(err) return callback(err); if(character){ updateCharacter(character, data, err); if(err) return callback(err); else return callback(null); } }); } // Deletes a character from the database. characterSchema.methods.delete = function(name, callback){ this.findById(name, function(err, character){ if(err) return callback(err); if(character) character.remove(function(err){ return callback(err); }); else return callback("Could not be removed"); }); } // Helper function to update every single field in the database if it's in the data-object. function updateCharacter(character, data, callback){ if("name" in data) character.name = data.name; if("userID" in data) character.userID = data.userID; if("inventarID" in data) character.inventarID = data.inventarID; if("gender" in data) character.gender = data.gender; if("skincolor" in data) character.skincolor = data.skincolor; if("hair" in data) character.hair = data.hair; if("haircolor" in data) character.haircolor = data.haircolor; if("hp" in data) character.hp = data.hp; if("weapon" in data) character.weapon = data.weapon; if("deaths" in data) character.deaths = data.deaths; if("kills" in data) character.kills = data.kills; if("rounds" in data) character.rounds = data.rounds; if("gear" in data){ if("head" in data.gear) character.gear.head = data.gear.head; if("body" in data.gear) character.gear.body = data.gear.body;<|fim▁hole|> } if("costume" in data){ if("head" in data.costume) character.costume.head = data.costume.head; if("body" in data.costume) character.costume.body = data.costume.body; if("legs" in data.costume) character.costume.legs = data.costume.legs; } character.save(function(err){ if(err) return callback(err) else return callback(null); }); } module.exports = mongoose.model("Character", characterSchema);<|fim▁end|>
if("legs" in data.gear) character.gear.legs = data.gear.legs;
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod counts; mod hashing; pub mod mash; pub mod scaled; use needletail::parser::SequenceRecord; use serde::{Deserialize, Serialize}; use crate::bail; use crate::errors::FinchResult; use crate::filtering::FilterParams; use crate::serialization::Sketch; pub use hashing::ItemHash; #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] pub struct KmerCount { pub hash: ItemHash, pub kmer: Vec<u8>, pub count: u32, pub extra_count: u32, pub label: Option<Vec<u8>>, } pub trait SketchScheme { fn process(&mut self, seq: SequenceRecord); fn total_bases_and_kmers(&self) -> (u64, u64); fn to_vec(&self) -> Vec<KmerCount>; fn parameters(&self) -> SketchParams; fn to_sketch(&self) -> Sketch { // TODO: maybe this should be the primary teardown method for // sketching and sketch_stream should wrap it? // TODO: this doesn't really use filtering // TODO: also the pass-through for the post-filtering trimming is // weird for SketchParams::Mash let (seq_length, num_valid_kmers) = self.total_bases_and_kmers(); let hashes = self.to_vec(); Sketch { name: "".to_string(), seq_length, num_valid_kmers, comment: "".to_string(), hashes, filter_params: FilterParams::default(), sketch_params: self.parameters(), } } } #[derive(Clone, Debug, PartialEq)] pub enum SketchParams { Mash { kmers_to_sketch: usize, final_size: usize, no_strict: bool, kmer_length: u8, hash_seed: u64, }, Scaled { kmers_to_sketch: usize, kmer_length: u8, scale: f64, hash_seed: u64, }, AllCounts { kmer_length: u8, }, } impl Default for SketchParams { fn default() -> Self { SketchParams::Mash { kmers_to_sketch: 1000, final_size: 1000, no_strict: false, kmer_length: 21, hash_seed: 0, } } } impl SketchParams { pub fn create_sketcher(&self) -> Box<dyn SketchScheme> { match self { SketchParams::Mash { kmers_to_sketch, kmer_length, hash_seed, .. } => Box::new(mash::MashSketcher::new( *kmers_to_sketch, *kmer_length, *hash_seed, )), SketchParams::Scaled { kmers_to_sketch, kmer_length, scale, hash_seed, } => Box::new(scaled::ScaledSketcher::new( *kmers_to_sketch, *scale, *kmer_length, *hash_seed, )), SketchParams::AllCounts { kmer_length } => { Box::new(counts::AllCountsSketcher::new(*kmer_length)) } }<|fim▁hole|> pub fn process_post_filter(&self, kmers: &mut Vec<KmerCount>, name: &str) -> FinchResult<()> { if let SketchParams::Mash { final_size, no_strict, .. } = self { kmers.truncate(*final_size); if !no_strict && kmers.len() < *final_size { bail!("{} had too few kmers ({}) to sketch", name, kmers.len(),); } } Ok(()) } pub fn k(&self) -> u8 { match self { SketchParams::Mash { kmer_length, .. } => *kmer_length, SketchParams::Scaled { kmer_length, .. } => *kmer_length, SketchParams::AllCounts { kmer_length, .. } => *kmer_length, } } pub fn hash_info(&self) -> (&str, u16, u64, Option<f64>) { match self { SketchParams::Mash { hash_seed, .. } => ("MurmurHash3_x64_128", 64, *hash_seed, None), SketchParams::Scaled { hash_seed, scale, .. } => ("MurmurHash3_x64_128", 64, *hash_seed, Some(*scale)), SketchParams::AllCounts { .. } => ("None", 0, 0, None), } } pub fn expected_size(&self) -> usize { match self { SketchParams::Mash { final_size, .. } => *final_size, SketchParams::Scaled { kmers_to_sketch, .. } => *kmers_to_sketch, SketchParams::AllCounts { kmer_length, .. } => 4usize.pow(u32::from(*kmer_length)), } } pub fn from_sketches(sketches: &[Sketch]) -> FinchResult<Self> { let first_params = sketches[0].sketch_params.clone(); for (ix, sketch) in sketches.iter().enumerate().skip(1) { let params = &sketch.sketch_params; if let Some((mismatched_param, v1, v2)) = first_params.check_compatibility(&params) { bail!( "First sketch has {} {}, but sketch {} has {0} {}", mismatched_param, v1, ix + 1, v2, ); } // TODO: harmonize scaled/non-scaled sketches? // TODO: harminize sketch sizes? // TODO: do something with no_strict and final_size } Ok(first_params) } /// Return any sketch parameter difference that would make comparisons /// between sketches generated by these parameter sets not work. /// /// Note this doesn't actually check the enum variants themselves, but it /// should still break if there are different variants because the hash /// types should be different. pub fn check_compatibility(&self, other: &SketchParams) -> Option<(&str, String, String)> { if self.k() != other.k() { return Some(("k", self.k().to_string(), other.k().to_string())); } if self.hash_info().0 != other.hash_info().0 { return Some(( "hash type", self.hash_info().0.to_string(), other.hash_info().0.to_string(), )); } if self.hash_info().1 != other.hash_info().1 { return Some(( "hash bits", self.hash_info().1.to_string(), other.hash_info().1.to_string(), )); } if self.hash_info().2 != other.hash_info().2 { return Some(( "hash seed", self.hash_info().2.to_string(), other.hash_info().2.to_string(), )); } None } }<|fim▁end|>
}
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InBrowserEditor.settings") from django.core.management import execute_from_command_line <|fim▁hole|><|fim▁end|>
execute_from_command_line(sys.argv)
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import os import pytest BASE_DIR = os.path.abspath(os.path.dirname(__file__)) SITE_DIR = os.path.join(BASE_DIR, "site") @pytest.fixture def site_dir(): return SITE_DIR @pytest.fixture def output_exist(): return lambda path: os.path.exists(os.path.join(SITE_DIR, "deploy", path)) @pytest.fixture(autouse=True) def chdir(): from catsup.options import g <|fim▁hole|> g.cwdpath = SITE_DIR<|fim▁end|>
os.chdir(SITE_DIR)
<|file_name|>diagramsTest.js<|end_file_name|><|fim▁begin|>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ function drawSctDiagram(concept, parentDiv) { parentDiv.svg({settings: {width: '600px', height: '500px'}}); var svg = parentDiv.svg('get'); loadDefs(svg); rect1 = drawSctBox(parentDiv, 10, 10, "<span class='sct-box-id'>12676007<br></span>Fracture of radius", "sct-defined-concept"); circle1 = drawEquivalentNode(svg, 120,130); drawSubsumedByNode(svg, 120,230); drawSubsumesNode(svg, 120,330); drawSctBox(parentDiv, 100, 400, "&lt;slot&gt;", "sct-slot"); connectElements(svg, rect1, circle1, 'center', 'left'); circle2 = drawConjunctionNode(svg, 200, 130); connectElements(svg, circle1, circle2, 'right', 'left'); rect2 = drawSctBox(parentDiv, 250, 100, "<span class='sct-box-id'>65966004<br></span>Fracture of forearm", "sct-defined-concept"); connectElements(svg, circle2, rect2, 'bottom', 'left', 'ClearTriangle'); rect3 = drawSctBox(parentDiv, 250, 200, "<span class='sct-box-id'>429353004<br></span>Injury of radius", "sct-defined-concept");<|fim▁hole|> circle4 = drawConjunctionNode(svg, 300, 330); connectElements(svg, circle3, circle4, 'right', 'left'); rect4 = drawSctBox(parentDiv, 350, 300, "<span class='sct-box-id'>116676008<br></span>Associated morphology", "sct-attribute"); connectElements(svg, circle4, rect4, 'right', 'left'); rect5 = drawSctBox(parentDiv, 550, 300, "<span class='sct-box-id'>72704001<br></span>Fracture", "sct-primitive-concept"); connectElements(svg, rect4, rect5, 'right', 'left'); rect6 = drawSctBox(parentDiv, 350, 400, "<span class='sct-box-id'>363698007<br></span>Finding site", "sct-attribute"); connectElements(svg, circle4, rect6, 'bottom', 'left'); rect7 = drawSctBox(parentDiv, 550, 400, "<span class='sct-box-id'>62413002<br></span>Bone structure of radius", "sct-primitive-concept"); connectElements(svg, rect6, rect7, 'right', 'left'); } function toggleIds() { $('.sct-box-id').toggle(); }<|fim▁end|>
connectElements(svg, circle2, rect3, 'bottom', 'left', 'ClearTriangle'); circle3 = drawAttributeGroupNode(svg, 250, 330); connectElements(svg, circle2, circle3, 'bottom', 'left');
<|file_name|>doc.go<|end_file_name|><|fim▁begin|>// Package models has the structs that are used by the application. // // Copyright (c) 2016 VMware // Author: Luis M. Valerio ([email protected]) //<|fim▁hole|>// License: MIT // /* Package approval monitor managers approvals for the lab. */ package models<|fim▁end|>
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('datasets', parent_package, top_path) config.add_subpackage('volumes')<|fim▁hole|> return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())<|fim▁end|>
config.add_subpackage('transforms')
<|file_name|>index.js<|end_file_name|><|fim▁begin|>module.exports = [ require('./ngdoc'), require('./name'), require('./area'), require('./module'), require('./id'), require('./restrict'), require('./eventType'), require('./example'), require('./element'), require('./fullName'), require('./priority'), require('./title'), require('./parent'),<|fim▁hole|><|fim▁end|>
require('./packageName'), require('./scope') ];
<|file_name|>worldclock_lt.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?> <!DOCTYPE TS> <TS version="2.1" language="lt"> <context> <name>LXQtWorldClock</name> <message> <location filename="../lxqtworldclock.cpp" line="241"/> <source>&apos;&lt;b&gt;&apos;HH:mm:ss&apos;&lt;/b&gt;&lt;br/&gt;&lt;font size=&quot;-2&quot;&gt;&apos;ddd, d MMM yyyy&apos;&lt;br/&gt;&apos;TT&apos;&lt;/font&gt;&apos;</source> <translation>&apos;&lt;b&gt;&apos;HH:mm:ss&apos;&lt;/b&gt;&lt;br/&gt;&lt;font size=&quot;-2&quot;&gt;&apos;ddd, d MMM yyyy&apos;&lt;br/&gt;&apos;TT&apos;&lt;/font&gt;&apos;</translation> </message> </context> <context> <name>LXQtWorldClockConfiguration</name> <message> <location filename="../lxqtworldclockconfiguration.ui" line="14"/> <source>World Clock Settings</source> <translation>Pasaulio laikrodžių nustatymai</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="24"/> <source>Display &amp;format</source> <translation>Rodymo &amp;formatas</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="30"/> <source>&amp;Time</source> <translation>&amp;Laikas</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="39"/> <source>F&amp;ormat:</source> <translation>F&amp;ormatas:</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="50"/> <location filename="../lxqtworldclockconfiguration.ui" line="176"/> <location filename="../lxqtworldclockconfiguration.ui" line="272"/> <source>Short</source> <translation>Trumpas</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="55"/> <location filename="../lxqtworldclockconfiguration.ui" line="181"/> <location filename="../lxqtworldclockconfiguration.ui" line="277"/> <source>Long</source> <translation>Ilgas</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="60"/> <location filename="../lxqtworldclockconfiguration.ui" line="287"/> <source>Custom</source> <translation>Tinkintas</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="89"/> <source>Sho&amp;w seconds</source> <translation>R&amp;odyti sekundes</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="96"/> <source>Pad &amp;hour with zero</source> <translation>Papildyti &amp;valandą nuliu</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="116"/> <source>T&amp;ime zone</source> <translation>La&amp;iko juosta</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="128"/> <source>&amp;Position:</source> <translation>&amp;Pozicija:</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="138"/> <source>For&amp;mat:</source> <translation>For&amp;matas:</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="149"/> <location filename="../lxqtworldclockconfiguration.ui" line="238"/> <source>Below</source> <translation>Žemiau</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="154"/> <location filename="../lxqtworldclockconfiguration.ui" line="243"/> <source>Above</source> <translation>Aukščiau</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="159"/> <location filename="../lxqtworldclockconfiguration.ui" line="248"/> <source>Before</source> <translation>Prieš</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="164"/> <location filename="../lxqtworldclockconfiguration.ui" line="253"/> <source>After</source> <translation>Po</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="186"/> <source>Offset from UTC</source> <translation>Poslinkis nuo UTC (Suderintojo pasaulinio laiko)</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="191"/> <source>Abbreviation</source> <translation>Santrumpa</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="431"/> <source>IANA id</source> <translation>IANA id</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="201"/> <location filename="../lxqtworldclockconfiguration.ui" line="436"/> <location filename="../lxqtworldclockconfiguration.cpp" line="579"/> <source>Custom name</source> <translation>Tinkintas pavadinimas</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="103"/> <source>&amp;Use 12-hour format</source> <translation>Na&amp;udoti 12 valandų formatą</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="196"/> <source>Location identifier</source> <translation>Vietos identifikatorius</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="212"/> <source>&amp;Date</source> <translation>&amp;Data</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="227"/> <source>Po&amp;sition:</source> <translation>Po&amp;zicija:</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="261"/> <source>Fo&amp;rmat:</source> <translation>Fo&amp;rmatas:</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="282"/> <source>ISO 8601</source> <translation>ISO 8601</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="316"/> <source>Show &amp;year</source> <translation>Rodyti &amp;metus</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="323"/> <source>Show day of wee&amp;k</source> <translation>Rodyti savaitės &amp;dieną</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="330"/> <source>Pad d&amp;ay with zero</source> <translation>Papildyti &amp;dieną nuliu</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="337"/> <source>&amp;Long month and day of week names</source> <translation>I&amp;lgi mėnesių ir savaitės dienų pavadinimai</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="350"/> <source>Ad&amp;vanced manual format</source> <translation>Iš&amp;plėstinis rankinis formatas</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="377"/> <source>&amp;Customise ...</source> <translation>&amp;Tinkinti ...</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="403"/> <source>Time &amp;zones</source> <translation>Laiko &amp;juostos</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="446"/> <source>&amp;Add ...</source> <translation>&amp;Pridėti ...</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="456"/> <source>&amp;Remove</source> <translation>Ša&amp;linti</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="466"/> <source>Set as &amp;default</source> <translation>Nustatyti &amp;numatytąja</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="476"/> <source>&amp;Edit custom name ...</source> <translation>&amp;Taisyti tinkintą pavadinimą ...</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="486"/> <source>Move &amp;up</source> <translation>Pa&amp;kelti</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="496"/> <source>Move do&amp;wn</source> <translation>&amp;Nuleisti</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="519"/> <source>&amp;General</source> <translation>&amp;Bendra</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.ui" line="525"/> <source>Auto&amp;rotate when the panel is vertical</source> <translation>Automatiškai pasuk&amp;ti, kai skydelis yra vertikalus</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.cpp" line="103"/> <source>&apos;&lt;b&gt;&apos;HH:mm:ss&apos;&lt;/b&gt;&lt;br/&gt;&lt;font size=&quot;-2&quot;&gt;&apos;ddd, d MMM yyyy&apos;&lt;br/&gt;&apos;TT&apos;&lt;/font&gt;&apos;</source> <translation>&apos;&lt;b&gt;&apos;HH:mm:ss&apos;&lt;/b&gt;&lt;br/&gt;&lt;font size=&quot;-2&quot;&gt;&apos;yyyy MMM d, ddd&apos;&lt;br/&gt;&apos;TT&apos;&lt;/font&gt;&apos;</translation> </message> <message> <location filename="../lxqtworldclockconfiguration.cpp" line="578"/> <source>Input custom time zone name</source> <translation>Įrašykite tinkintą laiko juostos pavadinimą</translation> </message> </context> <context> <name>LXQtWorldClockConfigurationManualFormat</name> <message> <location filename="../lxqtworldclockconfigurationmanualformat.ui" line="14"/> <source>World Clock Manual Format Configuration</source> <translation>Pasaulio laikrodžių rankinė formato konfigūracija</translation> </message> <message> <location filename="../lxqtworldclockconfigurationmanualformat.ui" line="83"/> <source>&lt;h1&gt;Custom Date/Time Format Syntax&lt;/h1&gt; &lt;p&gt;A date pattern is a string of characters, where specific strings of characters are replaced with date and time data from a calendar when formatting or used to generate data for a calendar when parsing.&lt;/p&gt; &lt;p&gt;The Date Field Symbol Table below contains the characters used in patterns to show the appropriate formats for a given locale, such as yyyy for the year. Characters may be used multiple times. For example, if y is used for the year, &apos;yy&apos; might produce &apos;99&apos;, whereas &apos;yyyy&apos; produces &apos;1999&apos;. For most numerical fields, the number of characters specifies the field width. For example, if h is the hour, &apos;h&apos; might produce &apos;5&apos;, but &apos;hh&apos; produces &apos;05&apos;. For some characters, the count specifies whether an abbreviated or full form should be used, but may have other choices, as given below.&lt;/p&gt; &lt;p&gt;Two single quotes represents a literal single quote, either inside or outside single quotes. Text within single quotes is not interpreted in any way (except for two adjacent single quotes). Otherwise all ASCII letter from a to z and A to Z are reserved as syntax characters, and require quoting if they are to represent literal characters. In addition, certain ASCII punctuation characters may become variable in the future (eg &quot;:&quot; being interpreted as the time separator and &apos;/&apos; as a date separator, and replaced by respective locale-sensitive characters in display).&lt;br /&gt;&lt;/p&gt; &lt;table border=&quot;1&quot; width=&quot;100%&quot; cellpadding=&quot;4&quot; cellspacing=&quot;0&quot;&gt; &lt;tr&gt;&lt;th width=&quot;20%&quot;&gt;Code&lt;/th&gt;&lt;th&gt;Meaning&lt;/th&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;d&lt;/td&gt;&lt;td&gt;the day as number without a leading zero (1 to 31)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;dd&lt;/td&gt;&lt;td&gt;the day as number with a leading zero (01 to 31)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;ddd&lt;/td&gt;&lt;td&gt;the abbreviated localized day name (e.g. &apos;Mon&apos; to &apos;Sun&apos;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;dddd&lt;/td&gt;&lt;td&gt;the long localized day name (e.g. &apos;Monday&apos; to &apos;Sunday&apos;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;M&lt;/td&gt;&lt;td&gt;the month as number without a leading zero (1-12)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;MM&lt;/td&gt;&lt;td&gt;the month as number with a leading zero (01-12)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;MMM&lt;/td&gt;&lt;td&gt;the abbreviated localized month name (e.g. &apos;Jan&apos; to &apos;Dec&apos;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;MMMM&lt;/td&gt;&lt;td&gt;the long localized month name (e.g. &apos;January&apos; to &apos;December&apos;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;yy&lt;/td&gt;&lt;td&gt;the year as two digit number (00-99)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;yyyy&lt;/td&gt;&lt;td&gt;the year as four digit number&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;h&lt;/td&gt;&lt;td&gt;the hour without a leading zero (0 to 23 or 1 to 12 if AM/PM display)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;hh&lt;/td&gt;&lt;td&gt;the hour with a leading zero (00 to 23 or 01 to 12 if AM/PM display)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;H&lt;/td&gt;&lt;td&gt;the hour without a leading zero (0 to 23, even with AM/PM display)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;HH&lt;/td&gt;&lt;td&gt;the hour with a leading zero (00 to 23, even with AM/PM display)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;m&lt;/td&gt;&lt;td&gt;the minute without a leading zero (0 to 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;mm&lt;/td&gt;&lt;td&gt;the minute with a leading zero (00 to 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;s&lt;/td&gt;&lt;td&gt;the second without a leading zero (0 to 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;ss&lt;/td&gt;&lt;td&gt;the second with a leading zero (00 to 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;z&lt;/td&gt;&lt;td&gt;the milliseconds without leading zeroes (0 to 999)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;zzz&lt;/td&gt;&lt;td&gt;the milliseconds with leading zeroes (000 to 999)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;AP &lt;i&gt;or&lt;/i&gt; A&lt;/td&gt;&lt;td&gt;use AM/PM display. &lt;b&gt;A/AP&lt;/b&gt; will be replaced by either &quot;AM&quot; or &quot;PM&quot;.&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;ap &lt;i&gt;or&lt;/i&gt; a&lt;/td&gt;&lt;td&gt;use am/pm display. &lt;b&gt;a/ap&lt;/b&gt; will be replaced by either &quot;am&quot; or &quot;pm&quot;.&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;t&lt;/td&gt;&lt;td&gt;the timezone (e.g. &quot;CEST&quot;)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;T&lt;/td&gt;&lt;td&gt;the offset from UTC&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TT&lt;/td&gt;&lt;td&gt;the timezone IANA id&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTT&lt;/td&gt;&lt;td&gt;the timezone abbreviation&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTTT&lt;/td&gt;&lt;td&gt;the timezone short display name&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTTTT&lt;/td&gt;&lt;td&gt;the timezone long display name&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTTTTT&lt;/td&gt;&lt;td&gt;the timezone custom name. You can change it the &apos;Time zones&apos; tab of the configuration window&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt; &lt;br /&gt;&lt;b&gt;Notes:&lt;/b&gt; &lt;ul&gt;&lt;li&gt;Any characters in the pattern that are not in the ranges of [&apos;a&apos;..&apos;z&apos;] and [&apos;A&apos;..&apos;Z&apos;] will be treated as quoted text. For instance, characters like &apos;:&apos;, &apos;.&apos;, &apos; &apos;, &apos;#&apos; and &apos;@&apos; will appear in the resulting time text even they are not enclosed within single quotes. The single quote is used to &apos;escape&apos; letters. Two single quotes in a row, whether inside or outside a quoted sequence, represent a &apos;real&apos; single quote.&lt;/li&gt;&lt;li&gt;Minimal update interval is 1 second. If z or zzz is configured time is shown with the milliseconds fraction, but not updated on millisecond basis (avoiding big performance hit).&lt;/li&gt;&lt;ul&gt; </source> <translation>&lt;h1&gt;Tinkintos datos/Laiko formato sintaksė&lt;/h1&gt; &lt;p&gt;Datos šablonas yra simbolių eilutė, kurioje tam tikros simbolių eilutės, formatuojant, yra pakeičiamos datos ir laiko duomenimis iš kalendoriaus arba analizuojant, yra naudojamos kalendoriaus duomenų kūrimui.&lt;/p&gt; &lt;p&gt;Žemiau esančioje datų laukelio simbolių lentelėje yra simboliai, kurie naudojami šablonuose, siekiant rodyti atitinkamus nurodytos lokalės formatus, tokie simboliai kaip pvz., yyyy, kurie yra skirti metams. Simboliai gali būti naudojami kelis kartus. Pavyzdžiui, jeigu y yra naudojama metams, &quot;yy&quot; gali atvaizduoti &quot;99&quot;, tuo tarpu &quot;yyyy&quot; atvaizduos &quot;1999&quot;. Daugelyje skaitmeninių laukelių, simbolių skaičius nustato laukelio plotį. Pavyzdžiui, jeigu h yra valanda, tai &quot;h&quot; gali atvaizduoti &quot;5&quot;, tačiau &quot;hh&quot; atvaizduos &quot;05&quot;. Kai kuriems simboliams skaičius nustato, ar turėtų būti naudojama pilna ar sutrumpinta forma, tačiau gali būti ir kiti pasirinkimai, kaip tai yra nurodyta žemiau.&lt;/p&gt; &lt;p&gt;Dvi kabutės, nesvarbu ar viengubos kabutės viduje ar išorėje, atvaizduoja viengubą kabutę. Viengubų kabučių viduje esantis tekstas niekaip nėra interpretuojamas (išskyrus dvi viengubas kabutes viena šalia kitos). Kita vertus visos ASCII raidės nuo a iki z ir nuo A iki Z yra rezervuotos kaip sintaksės simboliai, ir yra reikalaujama, kad jos būtų kabutėse, jeigu norima jas atvaizduoti kaip įprastas raides. Be to, tam tikri ASCII punktuacijos simboliai ateityje gali tapti kintamaisiais (pvz., &quot;:&quot; gali būti interpretuojamas kaip laiko skirtuvas, o &quot;/&quot; kaip datos skirtuvas, ir rodinyje gali būti pakeisti atitinkamais lokalės simboliais).&lt;br /&gt;&lt;/p&gt; &lt;table border=&quot;1&quot; width=&quot;100%&quot; cellpadding=&quot;4&quot; cellspacing=&quot;0&quot;&gt; &lt;tr&gt;&lt;th width=&quot;20%&quot;&gt;Kodo&lt;/th&gt;&lt;th&gt;reikšmė&lt;/th&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;d&lt;/td&gt;&lt;td&gt;diena kaip skaičius be priekinio nulio (1 iki 31)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;dd&lt;/td&gt;&lt;td&gt;diena kaip skaičius su priekiniu nuliu (01 iki 31)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;ddd&lt;/td&gt;&lt;td&gt;sutrumpintas lokalizuotas dienos pavadinimas (pvz., nuo &quot;Pir&quot; iki &quot;Sek&quot;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;dddd&lt;/td&gt;&lt;td&gt;ilgas lokalizuotas dienos pavadinimas (pvz., nuo &quot;Pirmadienis&quot; iki &quot;Sekmadienis&quot;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;M&lt;/td&gt;&lt;td&gt;mėnesis kaip skaičius be priekinio nulio (1-12)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;MM&lt;/td&gt;&lt;td&gt;mėnesis kaip skaičius su priekiniu nuliu (01-12)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;MMM&lt;/td&gt;&lt;td&gt;sutrumpintas lokalizuotas mėnesio pavadinimas (pvz., nuo &quot;Sau&quot; iki &quot;Gru&quot;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;MMMM&lt;/td&gt;&lt;td&gt;ilgas lokalizuotas mėnesio pavadinimas (pvz., nuo &quot;Sausis&quot; iki &quot;Gruodis&quot;).&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;yy&lt;/td&gt;&lt;td&gt;metai kaip dviejų skaitmenų skaičius (00-99)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;yyyy&lt;/td&gt;&lt;td&gt;metai kaip keturių skaitmenų skaičius&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;h&lt;/td&gt;&lt;td&gt;valanda be priekinio nulio (nuo 0 iki 23 arba nuo 1 iki 12, jeigu yra rodoma AM/PM formatu)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;hh&lt;/td&gt;&lt;td&gt;valanda su priekiniu nuliu (nuo 00 iki 23 arba nuo 01 iki 12, jeigu yra rodoma AM/PM formatu)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;H&lt;/td&gt;&lt;td&gt;valanda be priekinio nulio (nuo 0 iki 23, netgi jeigu yra rodoma AM/PM formatu)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;HH&lt;/td&gt;&lt;td&gt;valanda su priekiniu nuliu (nuo 00 iki 23, netgi jeigu yra rodoma AM/PM formatu)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;m&lt;/td&gt;&lt;td&gt;minutė be priekinio nulio (nuo 0 iki 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;mm&lt;/td&gt;&lt;td&gt;minutė su priekiniu nuliu (nuo 00 iki 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;s&lt;/td&gt;&lt;td&gt;sekundė be priekinio nulio (nuo 0 iki 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;ss&lt;/td&gt;&lt;td&gt;sekundė su priekiniu nuliu (nuo 00 iki 59)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;z&lt;/td&gt;&lt;td&gt;milisekundės be priekinių nulių (nuo 0 iki 999)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;zzz&lt;/td&gt;&lt;td&gt;milisekundės su priekiniais nuliais (nuo 000 iki 999)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;AP &lt;i&gt;arba&lt;/i&gt; A&lt;/td&gt;&lt;td&gt;naudoti AM/PM rodymą. &lt;b&gt;A/AP&lt;/b&gt; bus pakeista į &quot;AM&quot; arba &quot;PM&quot;.&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;ap &lt;i&gt;arba&lt;/i&gt; a&lt;/td&gt;&lt;td&gt;naudoti am/pm rodymą. &lt;b&gt;a/ap&lt;/b&gt; bus pakeista į &quot;am&quot; arba &quot;pm&quot;.&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;t&lt;/td&gt;&lt;td&gt;laiko juosta (pavyzdžiui, &quot;CEST&quot;)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;T&lt;/td&gt;&lt;td&gt;poslinkis nuo UTC (Suderintojo pasaulinio laiko)&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TT&lt;/td&gt;&lt;td&gt;laiko juosta IANA id&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTT&lt;/td&gt;&lt;td&gt;laiko juostos santrumpa&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTTT&lt;/td&gt;&lt;td&gt;trumpasis laiko juostos pavadinimas&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTTTT&lt;/td&gt;&lt;td&gt;ilgasis laiko juostos pavadinimas&lt;/td&gt;&lt;/tr&gt; &lt;tr&gt;&lt;td&gt;TTTTTT&lt;/td&gt;&lt;td&gt;tinkintas laiko juostos pavadinimas. Jūs galite jį pakeisti konfigūracijos lango &quot;Laiko juostų&quot; kortelėje&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt; &lt;br /&gt;&lt;b&gt;Pastabos:&lt;/b&gt; &lt;ul&gt;&lt;li&gt;Bet kurie šablone esantys simboliai, kurie nėra rėžyje [&quot;a&quot;..&quot;z&quot;] ir [&quot;A&quot;..&quot;Z&quot;] bus laikomi cituotu tekstu. Pavyzdžiui, tokie simboliai kaip &quot;:&quot;, &quot;.&quot;, &quot; &quot;, &quot;#&quot; ir &quot;@&quot; bus rodomi galutiniame laiko tekste netgi tuo atveju, jeigu jie nebus viengubose kabutėse. Viengubos kabutės yra naudojamos raidžių &quot;ištrūkimui&quot;. Dvi viengubos kabutės iš eilės, nesvarbu ar cituojamos sekos viduje ar išorėje, atvaizduoja &quot;tikrąsias&quot; viengubas kabutes.&lt;/li&gt;&lt;li&gt;Mažiausias atnaujinimo intervalas yra 1 sekundė. Jeigu laiko atvaizdavimui yra naudojama z ar zzz, tuomet laikas bus rodomas su milisekundėmis, tačiau jis nebus atnaujinamas kas milisekundę (tam, kad būtų išvengta našumo sumažėjimo).&lt;/li&gt;&lt;ul&gt; </translation> </message> </context> <context> <name>LXQtWorldClockConfigurationTimeZones</name> <message> <location filename="../lxqtworldclockconfigurationtimezones.ui" line="14"/> <source>World Clock Time Zones</source> <translation>Pasaulio laikrodžių laiko juostos</translation> </message> <message><|fim▁hole|> <translation>Laiko juosta</translation> </message> <message> <location filename="../lxqtworldclockconfigurationtimezones.ui" line="41"/> <source>Name</source> <translation>Pavadinimas</translation> </message> <message> <location filename="../lxqtworldclockconfigurationtimezones.ui" line="46"/> <source>Comment</source> <translation>Komentaras</translation> </message> <message> <location filename="../lxqtworldclockconfigurationtimezones.ui" line="51"/> <source>Country</source> <translation>Šalis</translation> </message> <message> <location filename="../lxqtworldclockconfigurationtimezones.cpp" line="117"/> <source>UTC</source> <translation>UTC (Suderintasis pasaulinis laikas)</translation> </message> <message> <location filename="../lxqtworldclockconfigurationtimezones.cpp" line="120"/> <location filename="../lxqtworldclockconfigurationtimezones.cpp" line="128"/> <source>Other</source> <translation>Kita</translation> </message> <message> <location filename="../lxqtworldclockconfigurationtimezones.cpp" line="129"/> <source>Local timezone</source> <translation>Vietinė laiko juosta</translation> </message> </context> </TS><|fim▁end|>
<location filename="../lxqtworldclockconfigurationtimezones.ui" line="36"/> <source>Time zone</source>
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>/* MIT License Copyright (c) 2022 Looker Data Sciences, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<|fim▁hole|> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ export * from './Slider'<|fim▁end|>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
export { default } from './ElectronOriginalWordmark'
<|file_name|>log-service.ts<|end_file_name|><|fim▁begin|>module app.log { export class LogRow { message: string; level: string; constructor(message: string, level: string) { this.message = message; this.level = level; } } export class LogService { logRows: Array<LogRow>; loggerUrl: string; static $inject = ['logConfig']; constructor(private logConfig: ILogConfig) { this.logRows = []; this.loggerUrl = 'api/log'; } sendLog(messageIn, levelIn): ng.IPromise<any> { //return this.$http.post(this.logConfig.loggerUrl, // { // message: messageIn, // level: levelIn, // source: this.GetFileName(), // }); return null; } log(msg, color) { color = color || "black"; var bgc = "White"; switch (color) { case "success": color = "Black"; bgc = "LimeGreen"; break; case "info": color = "DodgerBlue"; bgc = "Turquoise"; break; case "error": color = "Red"; bgc = "White"; break; case "start": color = "OliveDrab"; bgc = "PaleGreen"; break; case "warning": color = "Tomato"; bgc = "Black"; break; case "end": color = "Orchid"; bgc = "MediumVioletRed"; break; default: color = color; } if (typeof msg == "object") { console.log(msg); } else if (typeof color == "object") { console.log("%c" + msg, "color: PowderBlue;font-weight:bold; background-color: RoyalBlue;"); console.log(color); } else { console.log("%c" + msg, "color:" + color + ";font-weight:bold; background-color: " + bgc + ";"); } } navigatedTo(message) { this.sendLog(message, "NavigatedTo"); } debug(message) { this.sendLog(message, "Debug"); } info(message) { this.sendLog(message, "Info"); } success(message) { this.sendLog(message, "Info"); } error(message) { if (typeof message === "string") { this.sendLog(message, "Error"); } if (typeof message === "object") { if (message.hasOwnProperty("data")) { if (message.data.hasOwnProperty("ExceptionMessage")) { this.sendLog(message.data.ExceptionMessage, "Error"); } } } } warning(message) { this.sendLog(message, "Warn"); } fatal(message) {<|fim▁hole|> this.sendLog(message, "Fatal"); } GetFileName() { var url = document.location.href; return url; } } } angular.module('app.log').service("logService", app.log.LogService);<|fim▁end|>
<|file_name|>dimension1.cc<|end_file_name|><|fim▁begin|>/* Test Product<NNC_Polyhedron, Grid>::space_dimension() and Product<NNC_Polyhedron, Grid>::affine_dimension(). Copyright (C) 2001-2010 Roberto Bagnara <[email protected]> Copyright (C) 2010-2017 BUGSENG srl (http://bugseng.com) This file is part of the Parma Polyhedra Library (PPL). The PPL is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The PPL is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307, USA. For the most up-to-date information see the Parma Polyhedra Library site: http://bugseng.com/products/ppl/ . */ #include "ppl_test.hh"<|fim▁hole|>typedef Grid DOMAIN2; typedef Domain_Product<DOMAIN1x, DOMAIN2x>::Constraints_Product Product; namespace { // space_dimension() bool test01() { Variable A(0); Variable E(4); Constraint_System cs(A + E < 9); Product prp(5); prp.refine_with_constraints(cs); bool ok = (prp.space_dimension() == 5); print_congruences(prp, "*** prp congruences ***"); print_constraints(prp, "*** prp constraints ***"); return ok; } // affine_dimension() bool test02() { Variable A(0); Variable B(1); Variable C(2); Product prp(3); prp.refine_with_constraint(A - C >= 9); prp.refine_with_constraint(A - C <= 9); prp.refine_with_constraint(B >= 2); bool ok = (prp.affine_dimension() == 2); prp.refine_with_constraint(C == 4); prp.refine_with_constraint(B == 2); ok &= (ok && prp.affine_dimension() == 0); print_congruences(prp, "*** prp congruences ***"); print_constraints(prp, "*** prp constraints ***"); return ok; } } // namespace BEGIN_MAIN DO_TEST(test01); DO_TEST(test02); END_MAIN<|fim▁end|>
#include "partially_reduced_product_test.hh" typedef NNC_Polyhedron DOMAIN1;
<|file_name|>ja.js<|end_file_name|><|fim▁begin|>/* Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'removeformat', 'ja', {<|fim▁hole|><|fim▁end|>
toolbar: 'フォーマット削除' });
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from stst.features.features_sequence import * from stst.features.features_pos import * from stst.features.features_ngram import * from stst.features.features_bow import * from stst.features.features_dependency import * from stst.features.features_align import * from stst.features.features_embedding import * from stst.features.features_tree_kernels import * from stst.features.features_wn import *<|fim▁hole|>from stst.features.features_mt import * from stst.features.features_nn import * from stst.features.features_negative import *<|fim▁end|>
<|file_name|>format_pharmaciens.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import pandas as pd import sys from builtins import str as text from utils import find_zipcode, str2date header_mapping = { 'origin': 'ORIGIN', 'company_name': 'LABO', 'lastname_firstname': 'BENEF_PS_QUALITE_NOM_PRENOM', 'address': 'BENEF_PS_ADR', 'job': 'BENEF_PS_QUALIFICATION', 'rpps': 'BENEF_PS_RPPS', 'value': 'DECL_AVANT_MONTANT', 'date': 'DECL_AVANT_DATE', 'kind': 'DECL_AVANT_NATURE', 'BENEF_PS_CODEPOSTAL': 'BENEF_PS_CODEPOSTAL'<|fim▁hole|> input_filename = sys.argv[1] output_filename = sys.argv[2] df = pd.read_csv(input_filename, encoding='utf-8') df['lastname_firstname'] = df['name'] + ' ' + df['firstname'] df['origin'] = 'Pharmacien' df['date'] = df['date'].apply(str2date) df['BENEF_PS_CODEPOSTAL'] = df['address'].apply(find_zipcode) for origin, target in header_mapping.items(): df[target] = df[origin] df[target] = df[target].apply(text).apply(lambda s: s.replace(',', '- ').replace('"', '')) df[list(header_mapping.values())].to_csv(output_filename, index=False, encoding='utf-8')<|fim▁end|>
}
<|file_name|>test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- # File: test.py # # Copyright 2018 Costas Tyfoxylos # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import argparse import logging import json import os from bootstrap import bootstrap from library import execute_command, tempdir # This is the main prefix used for logging LOGGER_BASENAME = '''_CI.test''' LOGGER = logging.getLogger(LOGGER_BASENAME) LOGGER.addHandler(logging.NullHandler()) def get_arguments(): parser = argparse.ArgumentParser(description='Accepts stages for testing') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--lint', help='Test the lint stage of the template', action='store_true') group.add_argument('--test', help='Test the test stage of the template', action='store_true') group.add_argument('--build', help='Test the build stage of the template', action='store_true') group.add_argument('--document', help='Test the document stage of the template', action='store_true') args = parser.parse_args() return args def _test(stage): from cookiecutter.main import cookiecutter template = os.path.abspath('.') context = os.path.abspath('cookiecutter.json') with tempdir(): cookiecutter(template, extra_context=json.loads(open(context).read()), no_input=True) os.chdir(os.listdir('.')[0]) del os.environ['PIPENV_PIPFILE'] return execute_command(os.path.join('_CI', 'scripts', f'{stage}.py')) def test(stage): emojize = bootstrap() exit_code = _test(stage) success = not exit_code if success: LOGGER.info('%s Tested stage "%s" successfully! %s', emojize(':white_heavy_check_mark:'), stage, emojize(':thumbs_up:')) else: LOGGER.error('%s Errors found testing stage "%s"! %s', emojize(':cross_mark:'), stage, emojize(':crying_face:')) raise SystemExit(exit_code) if __name__ == '__main__': args = get_arguments() stage = next((argument for argument in ('lint', 'test', 'build', 'document')<|fim▁hole|><|fim▁end|>
if getattr(args, argument)), None) test(stage)
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use amethyst::{ core::transform::TransformBundle, input::{InputBundle, StringBindings}, prelude::*, renderer::{ plugins::{RenderFlat2D, RenderToWindow}, types::DefaultBackend, RenderingBundle, }, utils::application_root_dir, }; mod pong; mod systems; use crate::pong::Pong; fn main() -> amethyst::Result<()> { amethyst::start_logger(Default::default()); let app_root = application_root_dir()?; let display_config_path = app_root.join("config").join("display.ron"); let binding_path = app_root.join("config").join("bindings.ron"); let input_bundle = InputBundle::<StringBindings>::new() .with_bindings_from_file(binding_path)?; let game_data = GameDataBuilder::default() .with_bundle( RenderingBundle::<DefaultBackend>::new() .with_plugin( RenderToWindow::from_config_path(display_config_path)? .with_clear([0.0, 0.0, 0.0, 1.0]), ) .with_plugin(RenderFlat2D::default()), )? .with_bundle(TransformBundle::new())? .with_bundle(input_bundle)? .with(systems::PaddleSystem, "paddle_system", &["input_system"]); let assets_dir = app_root.join("assets"); let mut game = Application::new(assets_dir, Pong, game_data)?; game.run(); Ok(()) } /* struct MyState; impl SimpleState for MyState { fn on_start(&mut self, _data: StateData<'_, GameData<'_, '_>>) {} } fn main() -> amethyst::Result<()> { amethyst::start_logger(Default::default()); let app_root = application_root_dir()?; let assets_dir = app_root.join("assets"); let config_dir = app_root.join("config"); let display_config_path = config_dir.join("display.ron"); let game_data = GameDataBuilder::default() .with_bundle( RenderingBundle::<DefaultBackend>::new() .with_plugin( RenderToWindow::from_config_path(display_config_path)? .with_clear([0.34, 0.36, 0.52, 1.0]), )<|fim▁hole|> )? .with_bundle(TransformBundle::new())?; let mut game = Application::new(assets_dir, MyState, game_data)?; game.run(); Ok(()) } */<|fim▁end|>
.with_plugin(RenderFlat2D::default()),
<|file_name|>log.go<|end_file_name|><|fim▁begin|>package log import ( "fmt" "os" ) type Exit struct { Code int Closure func() } func HandleExit() { if e := recover(); e != nil { if exit, ok := e.(Exit); ok { exit.Closure() os.Exit(exit.Code) } panic(e) } } func Error(msg string, closure func()) { fmt.Printf("\033[1;31m[ERROR]\033[0m %s\n", msg) panic(Exit{1, closure}) } func Warn(msg string) { fmt.Printf("\033[1;33m[WARNING]\033[0m %s\n", msg) } func Succ(msg string) { fmt.Printf("\033[1;32m[DONE]\033[0m %s\n", msg)<|fim▁hole|><|fim▁end|>
}
<|file_name|>topic-viewer-backend-api.service.spec.ts<|end_file_name|><|fim▁begin|>// Copyright 2018 The Oppia Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @fileoverview Unit tests for TopicViewerBackendApiService. */ import { HttpClientTestingModule, HttpTestingController } from '@angular/common/http/testing'; import { TestBed, fakeAsync, flushMicrotasks } from '@angular/core/testing'; import { ReadOnlyTopic, ReadOnlyTopicBackendDict, ReadOnlyTopicObjectFactory } from 'domain/topic_viewer/read-only-topic-object.factory'; import { TopicViewerBackendApiService } from 'domain/topic_viewer/topic-viewer-backend-api.service'; describe('Topic viewer backend API service', () => { let topicViewerBackendApiService: TopicViewerBackendApiService; let httpTestingController: HttpTestingController; let sampleDataResultsObjects: ReadOnlyTopic; let sampleDataResults: ReadOnlyTopicBackendDict; let readOnlyTopicObjectFactory: ReadOnlyTopicObjectFactory; beforeEach(() => { readOnlyTopicObjectFactory = new ReadOnlyTopicObjectFactory(); TestBed.configureTestingModule({ imports: [HttpClientTestingModule], }); httpTestingController = TestBed.inject(HttpTestingController); topicViewerBackendApiService = TestBed.inject(TopicViewerBackendApiService); readOnlyTopicObjectFactory = TestBed.inject(ReadOnlyTopicObjectFactory); let nodeDict = { id: 'node_1', thumbnail_filename: 'image.png', title: 'Title 1', description: 'Description 1', prerequisite_skill_ids: ['skill_1'], acquired_skill_ids: ['skill_2'], destination_node_ids: ['node_2'], outline: 'Outline', exploration_id: null, outline_is_finalized: false, thumbnail_bg_color: '#a33f40' }; // Sample topic object returnable from the backend. sampleDataResults = { topic_name: 'topic_name', topic_id: 'topic_id', topic_description: 'Topic description', canonical_story_dicts: [{ id: '0', title: 'Story Title', description: 'Story Description', node_titles: ['Chapter 1'], thumbnail_filename: 'image.svg', thumbnail_bg_color: '#F8BF74', story_is_published: true, url_fragment: 'story-title', completed_node_titles: ['Chapter 1'], all_node_dicts: [nodeDict] }], additional_story_dicts: [{ id: '1', title: 'Story Title', description: 'Story Description', node_titles: ['Chapter 1'], thumbnail_filename: 'image.svg', thumbnail_bg_color: '#F8BF74', story_is_published: true, completed_node_titles: ['Chapter 1'], url_fragment: 'story-title-one', all_node_dicts: [nodeDict] }], uncategorized_skill_ids: ['skill_id_1'], subtopics: [{ skill_ids: ['skill_id_2'], id: 1, title: 'subtopic_name', thumbnail_filename: 'image.svg', thumbnail_bg_color: '#F8BF74', url_fragment: 'subtopic-name' }], degrees_of_mastery: { skill_id_1: 0.5, skill_id_2: 0.3 }, skill_descriptions: { skill_id_1: 'Skill Description 1', skill_id_2: 'Skill Description 2' }, practice_tab_is_displayed: false, meta_tag_content: 'Topic meta tag content', page_title_fragment_for_web: 'topic page title'<|fim▁hole|> }; sampleDataResultsObjects = readOnlyTopicObjectFactory.createFromBackendDict( sampleDataResults); }); afterEach(() => { httpTestingController.verify(); }); it('should successfully fetch an existing topic from the backend', fakeAsync(() => { const successHandler = jasmine.createSpy('success'); const failHandler = jasmine.createSpy('fail'); topicViewerBackendApiService.fetchTopicDataAsync('0', 'staging').then( successHandler, failHandler); const req = httpTestingController.expectOne( '/topic_data_handler/staging/0'); expect(req.request.method).toEqual('GET'); req.flush(sampleDataResults); flushMicrotasks(); expect(successHandler).toHaveBeenCalledWith(sampleDataResultsObjects); expect(failHandler).not.toHaveBeenCalled(); }) ); it('should use rejection handler if backend request failed', fakeAsync(() => { const successHandler = jasmine.createSpy('success'); const failHandler = jasmine.createSpy('fail'); topicViewerBackendApiService.fetchTopicDataAsync('0', 'staging').then( successHandler, failHandler); const req = httpTestingController.expectOne( '/topic_data_handler/staging/0'); expect(req.request.method).toEqual('GET'); req.flush({ error: 'Error fetching topic 0.' }, { status: 500, statusText: 'Error fetching topic 0.' }); flushMicrotasks(); expect(successHandler).not.toHaveBeenCalled(); expect(failHandler).toHaveBeenCalledWith('Error fetching topic 0.'); }) ); });<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use byteorder::{BigEndian, WriteBytesExt}; use db; use std::cmp; use std::io::Cursor; pub mod bloomfilter; #[macro_export] macro_rules! retry_bound { ($k:expr) => { if $k < 9 { // special case since formula yields a very loose upper bound for k < 9 $k as u32 // we can always retrieve k elements in k rounds } else { 3 * (($k as f64).ln() / ($k as f64).ln().ln()).ceil() as u32 } }; ($k:expr, $d:expr) => { if $k < 3 { // special case since formula yields a very loose upper bound for k < 3 $k as u32 } else { ((($k as f64).ln().ln() / ($d as f64).ln()) + 1.0).ceil() as u32 } }; } #[macro_export] macro_rules! some_or_random { ($res:expr, $rng:expr, $len:expr) => { if let Some(idx) = $res { idx } else { $rng.next_u64() % ($len as u64) } }; } // Below is unsafe #[inline] pub fn label_cmp(l1: &[u8], l2: &[u8]) -> cmp::Ordering { unsafe { (&*(l1 as *const [u8] as *const [u64; 4])).cmp(&*(l2 as *const [u8] as *const [u64; 4])) } } #[inline] pub fn tree_height(num: u64) -> u32 { ((num + 1) as f64).log2().ceil() as u32 } #[inline] pub fn get_index(labels: &[Vec<u8>], label: &[u8]) -> Option<u64> { match labels.binary_search_by(|probe| label_cmp(&probe[..], label)) { Ok(i) => Some(i as u64), Err(_) => None,<|fim▁hole|>#[inline] pub fn get_idx_bloom(bloom: &bloomfilter::Bloom, label: &[u8], num: u64) -> Option<u64> { for i in 0..(num as usize) { if bloom.check((i, label)) { return Some(i as u64); } } None } // Returns number of elements in collection for given collection_idx (this assumes hybrid 2 or 4) pub fn collection_len(bucket_len: u64, collection_idx: u32, num_collections: u32) -> u64 { if num_collections == 1 { bucket_len } else if num_collections == 2 { // hybrid 2 match collection_idx { 0 => (bucket_len as f64 / 2f64).ceil() as u64, 1 => bucket_len / 2, _ => panic!("Invalid collection idx"), } } else if num_collections == 4 { // hybrid 4 match collection_idx { 0 => ((bucket_len as f64 / 2f64).ceil() / 2f64).ceil() as u64, 1 => ((bucket_len as f64 / 2f64).ceil() / 2f64).floor() as u64, 2 => ((bucket_len as f64 / 2f64).floor() / 2f64).ceil() as u64, 3 => bucket_len / 4, _ => panic!("Invalid collection idx"), } } else { panic!("Invalid num collections"); } } // Returns the indices of collections that contain a meaningful label #[inline] pub fn label_collections(scheme: db::OptScheme) -> Vec<usize> { match scheme { db::OptScheme::Normal | db::OptScheme::Aliasing => vec![0], db::OptScheme::Hybrid2 => vec![0, 1], // labels are in collections 0 and 1 db::OptScheme::Hybrid4 => vec![0, 1, 2, 3], // labels are in collections 0, 1, 2, and 3 } } #[inline] pub fn label_marker(index: usize, buckets: usize) -> Vec<u8> { assert!(index < buckets); let max = u32::max_value(); let mut limit = max / buckets as u32; limit *= (index as u32) + 1; let mut a = Cursor::new(Vec::with_capacity(4)); a.write_u32::<BigEndian>(limit).unwrap(); a.into_inner() } #[inline] pub fn bucket_idx(label: &[u8], partitions: &[Vec<u8>]) -> usize { for (i, partition) in partitions.iter().enumerate() { if label <= &partition[..] { return i; } } 0 } #[inline] pub fn get_alpha(num: u64) -> u64 { if db::CIPHER_SIZE <= 240 { if num < 8 { 1 } else if num < 2048 { 8 } else if num < 65536 { 32 } else { 64 } } else if db::CIPHER_SIZE <= 1024 { if num < 8 { 1 } else if num < 32768 { 8 } else if num < 131072 { 16 } else { 32 } } else if num < 32768 { 1 } else { 8 } }<|fim▁end|>
} }
<|file_name|>range_put.js<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright 2013-2014 Aerospike, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ /******************************************************************************* * * node range_put --start <start> --end <end> --skip <skip> * * Write records with given key range. * * Examples: * * Write records with keys in range 1-100 * * node range_put --start 1 --end 100 * * Read records with keys in range 1-100, skipping every 5th key (in sequence) * * node range_get --start 1 --end 100 --skip 5 * * Write records with keys in range 900-1000 * * node range_put --start 900 * ******************************************************************************/ var fs = require('fs'); var aerospike = require('aerospike'); var yargs = require('yargs'); var Policy = aerospike.policy; var Status = aerospike.status; /******************************************************************************* * * Options parsing * ******************************************************************************/ var argp = yargs .usage("$0 [options]") .options({ help: { boolean: true, describe: "Display this message." }, host: { alias: "h", default: "127.0.0.1", describe: "Aerospike database address." }, port: { alias: "p", default: 3000, describe: "Aerospike database port." }, timeout: { alias: "t", default: 10, describe: "Timeout in milliseconds." }, 'log-level': { alias: "l", default: aerospike.log.INFO, describe: "Log level [0-5]" }, 'log-file': { default: undefined, describe: "Path to a file send log messages to." }, namespace: { alias: "n", default: "test", describe: "Namespace for the keys." }, set: { alias: "s", default: "demo", describe: "Set for the keys." }, start: { default: 1, describe: "Start value for the key range." }, end: { default: 1000, describe: "End value for the key range." }, skip: { default: 0, describe: "Skip every n keys." } }); var argv = argp.argv; if ( argv.help === true ) { argp.showHelp(); process.exit(0); } /******************************************************************************* * * Configure the client. * ******************************************************************************/ config = { // the hosts to attempt to connect with. hosts: [ { addr: argv.host, port: argv.port } ], // log configuration log: { level: argv['log-level'], file: argv['log-file'] ? fs.openSync(argv['log-file'], "a") : 2 }, // default policies policies: { timeout: argv.timeout } }; /******************************************************************************* * * Perform the operation * ******************************************************************************/ aerospike.client(config).connect(function (err, client) { if ( err.code != Status.AEROSPIKE_OK ) { console.error("Error: Aerospike server connection error. ", err.message); process.exit(1); }<|fim▁hole|> // function put_done(client, start, end, skip) { var total = end - start + 1; var done = 0; var success = 0; var failure = 0; var skipped = 0; var timeLabel = "range_put @ " + total; console.time(timeLabel); return function(err, key, skippy) { if ( skippy === true ) { console.log("SKIP - ", key); skipped++; } else { switch ( err.code ) { case Status.AEROSPIKE_OK: console.log("OK - ", key); success++; break; default: console.log("ERR - ", err, key); failure++; } } done++; if ( done >= total ) { console.timeEnd(timeLabel); console.log(); console.log("RANGE: start=%d end=%d skip=%d)", start, end, skip); console.log("RESULTS: (%d completed, %d success, %d failed, %d skipped)", done, success, failure, skipped); console.log(); client.close(); } } } function put_start(client, start, end, skip) { var done = put_done(client, start, end, skip); var i = start, s = 0; for (; i <= end; i++ ) { var key = { ns: argv.namespace, set: argv.set, key: i }; if ( skip !== 0 && ++s >= skip ) { s = 0; done(null, key, true); continue; } var record = { k: i, s: "abc", i: i * 1000 + 123, b: new Buffer([0xa, 0xb, 0xc]) }; var metadata = { ttl: 10000, gen: 0 }; client.put(key, record, metadata, done); } } put_start(client, argv.start, argv.end, argv.skip); });<|fim▁end|>
// // Perform the operation
<|file_name|>grideditor.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function, division import copy import os import re import urwid from mitmproxy import filt from mitmproxy import script from mitmproxy import utils from mitmproxy.console import common from mitmproxy.console import signals from netlib.http import cookies from netlib.http import user_agents FOOTER = [ ('heading_key', "enter"), ":edit ", ('heading_key', "q"), ":back ", ] FOOTER_EDITING = [ ('heading_key', "esc"), ":stop editing ", ] class TextColumn: subeditor = None def __init__(self, heading): self.heading = heading def text(self, obj): return SEscaped(obj or "") def blank(self): return "" def keypress(self, key, editor): if key == "r": if editor.walker.get_current_value() is not None: signals.status_prompt_path.send( self, prompt = "Read file", callback = editor.read_file ) elif key == "R": if editor.walker.get_current_value() is not None: signals.status_prompt_path.send( editor, prompt = "Read unescaped file", callback = editor.read_file, args = (True,) ) elif key == "e": o = editor.walker.get_current_value() if o is not None: n = editor.master.spawn_editor(o.encode("string-escape")) n = utils.clean_hanging_newline(n) editor.walker.set_current_value(n, False) editor.walker._modified() elif key in ["enter"]: editor.walker.start_edit() else: return key class SubgridColumn: def __init__(self, heading, subeditor): self.heading = heading self.subeditor = subeditor def text(self, obj): p = cookies._format_pairs(obj, sep="\n") return urwid.Text(p) def blank(self): return [] def keypress(self, key, editor): if key in "rRe": signals.status_message.send( self, message = "Press enter to edit this field.", expire = 1000 ) return elif key in ["enter"]: editor.master.view_grideditor( self.subeditor( editor.master, editor.walker.get_current_value(), editor.set_subeditor_value, editor.walker.focus, editor.walker.focus_col ) ) else: return key class SEscaped(urwid.WidgetWrap): def __init__(self, txt): txt = txt.encode("string-escape") w = urwid.Text(txt, wrap="any") urwid.WidgetWrap.__init__(self, w) def get_text(self): return self._w.get_text()[0] def keypress(self, size, key): return key def selectable(self): return True class SEdit(urwid.WidgetWrap): def __init__(self, txt): txt = txt.encode("string-escape") w = urwid.Edit(edit_text=txt, wrap="any", multiline=True) w = urwid.AttrWrap(w, "editfield") urwid.WidgetWrap.__init__(self, w) def get_text(self): return self._w.get_text()[0].strip() def selectable(self): return True class GridRow(urwid.WidgetWrap): def __init__(self, focused, editing, editor, values): self.focused, self.editing, self.editor = focused, editing, editor errors = values[1] self.fields = [] for i, v in enumerate(values[0]): if focused == i and editing: self.editing = SEdit(v) self.fields.append(self.editing) else: w = self.editor.columns[i].text(v) if focused == i: if i in errors: w = urwid.AttrWrap(w, "focusfield_error") else: w = urwid.AttrWrap(w, "focusfield") elif i in errors: w = urwid.AttrWrap(w, "field_error") self.fields.append(w) fspecs = self.fields[:] if len(self.fields) > 1:<|fim▁hole|> dividechars = 2 ) if focused is not None: w.set_focus_column(focused) urwid.WidgetWrap.__init__(self, w) def get_edit_value(self): return self.editing.get_text() def keypress(self, s, k): if self.editing: w = self._w.column_widths(s)[self.focused] k = self.editing.keypress((w,), k) return k def selectable(self): return True class GridWalker(urwid.ListWalker): """ Stores rows as a list of (rows, errors) tuples, where rows is a list and errors is a set with an entry of each offset in rows that is an error. """ def __init__(self, lst, editor): self.lst = [(i, set([])) for i in lst] self.editor = editor self.focus = 0 self.focus_col = 0 self.editing = False def _modified(self): self.editor.show_empty_msg() return urwid.ListWalker._modified(self) def add_value(self, lst): self.lst.append((lst[:], set([]))) self._modified() def get_current_value(self): if self.lst: return self.lst[self.focus][0][self.focus_col] def set_current_value(self, val, unescaped): if not unescaped: try: val = val.decode("string-escape") except ValueError: signals.status_message.send( self, message = "Invalid Python-style string encoding.", expire = 1000 ) return errors = self.lst[self.focus][1] emsg = self.editor.is_error(self.focus_col, val) if emsg: signals.status_message.send(message = emsg, expire = 1) errors.add(self.focus_col) else: errors.discard(self.focus_col) self.set_value(val, self.focus, self.focus_col, errors) def set_value(self, val, focus, focus_col, errors=None): if not errors: errors = set([]) row = list(self.lst[focus][0]) row[focus_col] = val self.lst[focus] = [tuple(row), errors] self._modified() def delete_focus(self): if self.lst: del self.lst[self.focus] self.focus = min(len(self.lst) - 1, self.focus) self._modified() def _insert(self, pos): self.focus = pos self.lst.insert( self.focus, [ [c.blank() for c in self.editor.columns], set([]) ] ) self.focus_col = 0 self.start_edit() def insert(self): return self._insert(self.focus) def add(self): return self._insert(min(self.focus + 1, len(self.lst))) def start_edit(self): col = self.editor.columns[self.focus_col] if self.lst and not col.subeditor: self.editing = GridRow( self.focus_col, True, self.editor, self.lst[self.focus] ) self.editor.master.loop.widget.footer.update(FOOTER_EDITING) self._modified() def stop_edit(self): if self.editing: self.editor.master.loop.widget.footer.update(FOOTER) self.set_current_value(self.editing.get_edit_value(), False) self.editing = False self._modified() def left(self): self.focus_col = max(self.focus_col - 1, 0) self._modified() def right(self): self.focus_col = min(self.focus_col + 1, len(self.editor.columns) - 1) self._modified() def tab_next(self): self.stop_edit() if self.focus_col < len(self.editor.columns) - 1: self.focus_col += 1 elif self.focus != len(self.lst) - 1: self.focus_col = 0 self.focus += 1 self._modified() def get_focus(self): if self.editing: return self.editing, self.focus elif self.lst: return GridRow( self.focus_col, False, self.editor, self.lst[self.focus] ), self.focus else: return None, None def set_focus(self, focus): self.stop_edit() self.focus = focus self._modified() def get_next(self, pos): if pos + 1 >= len(self.lst): return None, None return GridRow(None, False, self.editor, self.lst[pos + 1]), pos + 1 def get_prev(self, pos): if pos - 1 < 0: return None, None return GridRow(None, False, self.editor, self.lst[pos - 1]), pos - 1 class GridListBox(urwid.ListBox): def __init__(self, lw): urwid.ListBox.__init__(self, lw) FIRST_WIDTH_MAX = 40 FIRST_WIDTH_MIN = 20 class GridEditor(urwid.WidgetWrap): title = None columns = None def __init__(self, master, value, callback, *cb_args, **cb_kwargs): value = self.data_in(copy.deepcopy(value)) self.master, self.value, self.callback = master, value, callback self.cb_args, self.cb_kwargs = cb_args, cb_kwargs first_width = 20 if value: for r in value: assert len(r) == len(self.columns) first_width = max(len(r), first_width) self.first_width = min(first_width, FIRST_WIDTH_MAX) title = urwid.Text(self.title) title = urwid.Padding(title, align="left", width=("relative", 100)) title = urwid.AttrWrap(title, "heading") headings = [] for i, col in enumerate(self.columns): c = urwid.Text(col.heading) if i == 0 and len(self.columns) > 1: headings.append(("fixed", first_width + 2, c)) else: headings.append(c) h = urwid.Columns( headings, dividechars = 2 ) h = urwid.AttrWrap(h, "heading") self.walker = GridWalker(self.value, self) self.lb = GridListBox(self.walker) self._w = urwid.Frame( self.lb, header = urwid.Pile([title, h]) ) self.master.loop.widget.footer.update("") self.show_empty_msg() def show_empty_msg(self): if self.walker.lst: self._w.set_footer(None) else: self._w.set_footer( urwid.Text( [ ("highlight", "No values. Press "), ("key", "a"), ("highlight", " to add some."), ] ) ) def encode(self, s): if not self.encoding: return s try: return s.encode(self.encoding) except ValueError: return None def read_file(self, p, unescaped=False): if p: try: p = os.path.expanduser(p) d = file(p, "rb").read() self.walker.set_current_value(d, unescaped) self.walker._modified() except IOError as v: return str(v) def set_subeditor_value(self, val, focus, focus_col): self.walker.set_value(val, focus, focus_col) def keypress(self, size, key): if self.walker.editing: if key in ["esc"]: self.walker.stop_edit() elif key == "tab": pf, pfc = self.walker.focus, self.walker.focus_col self.walker.tab_next() if self.walker.focus == pf and self.walker.focus_col != pfc: self.walker.start_edit() else: self._w.keypress(size, key) return None key = common.shortcuts(key) column = self.columns[self.walker.focus_col] if key in ["q", "esc"]: res = [] for i in self.walker.lst: if not i[1] and any([x for x in i[0]]): res.append(i[0]) self.callback(self.data_out(res), *self.cb_args, **self.cb_kwargs) signals.pop_view_state.send(self) elif key == "g": self.walker.set_focus(0) elif key == "G": self.walker.set_focus(len(self.walker.lst) - 1) elif key in ["h", "left"]: self.walker.left() elif key in ["l", "right"]: self.walker.right() elif key == "tab": self.walker.tab_next() elif key == "a": self.walker.add() elif key == "A": self.walker.insert() elif key == "d": self.walker.delete_focus() elif column.keypress(key, self) and not self.handle_key(key): return self._w.keypress(size, key) def data_out(self, data): """ Called on raw list data, before data is returned through the callback. """ return data def data_in(self, data): """ Called to prepare provided data. """ return data def is_error(self, col, val): """ Return False, or a string error message. """ return False def handle_key(self, key): return False def make_help(self): text = [] text.append(urwid.Text([("text", "Editor control:\n")])) keys = [ ("A", "insert row before cursor"), ("a", "add row after cursor"), ("d", "delete row"), ("e", "spawn external editor on current field"), ("q", "save changes and exit editor"), ("r", "read value from file"), ("R", "read unescaped value from file"), ("esc", "save changes and exit editor"), ("tab", "next field"), ("enter", "edit field"), ] text.extend( common.format_keyvals(keys, key="key", val="text", indent=4) ) text.append( urwid.Text( [ "\n", ("text", "Values are escaped Python-style strings.\n"), ] ) ) return text class QueryEditor(GridEditor): title = "Editing query" columns = [ TextColumn("Key"), TextColumn("Value") ] class HeaderEditor(GridEditor): title = "Editing headers" columns = [ TextColumn("Key"), TextColumn("Value") ] def make_help(self): h = GridEditor.make_help(self) text = [] text.append(urwid.Text([("text", "Special keys:\n")])) keys = [ ("U", "add User-Agent header"), ] text.extend( common.format_keyvals(keys, key="key", val="text", indent=4) ) text.append(urwid.Text([("text", "\n")])) text.extend(h) return text def set_user_agent(self, k): ua = user_agents.get_by_shortcut(k) if ua: self.walker.add_value( [ "User-Agent", ua[2] ] ) def handle_key(self, key): if key == "U": signals.status_prompt_onekey.send( prompt = "Add User-Agent header:", keys = [(i[0], i[1]) for i in user_agents.UASTRINGS], callback = self.set_user_agent, ) return True class URLEncodedFormEditor(GridEditor): title = "Editing URL-encoded form" columns = [ TextColumn("Key"), TextColumn("Value") ] class ReplaceEditor(GridEditor): title = "Editing replacement patterns" columns = [ TextColumn("Filter"), TextColumn("Regex"), TextColumn("Replacement"), ] def is_error(self, col, val): if col == 0: if not filt.parse(val): return "Invalid filter specification." elif col == 1: try: re.compile(val) except re.error: return "Invalid regular expression." return False class SetHeadersEditor(GridEditor): title = "Editing header set patterns" columns = [ TextColumn("Filter"), TextColumn("Header"), TextColumn("Value"), ] def is_error(self, col, val): if col == 0: if not filt.parse(val): return "Invalid filter specification" return False def make_help(self): h = GridEditor.make_help(self) text = [] text.append(urwid.Text([("text", "Special keys:\n")])) keys = [ ("U", "add User-Agent header"), ] text.extend( common.format_keyvals(keys, key="key", val="text", indent=4) ) text.append(urwid.Text([("text", "\n")])) text.extend(h) return text def set_user_agent(self, k): ua = user_agents.get_by_shortcut(k) if ua: self.walker.add_value( [ ".*", "User-Agent", ua[2] ] ) def handle_key(self, key): if key == "U": signals.status_prompt_onekey.send( prompt = "Add User-Agent header:", keys = [(i[0], i[1]) for i in user_agents.UASTRINGS], callback = self.set_user_agent, ) return True class PathEditor(GridEditor): title = "Editing URL path components" columns = [ TextColumn("Component"), ] def data_in(self, data): return [[i] for i in data] def data_out(self, data): return [i[0] for i in data] class ScriptEditor(GridEditor): title = "Editing scripts" columns = [ TextColumn("Command"), ] def is_error(self, col, val): try: script.Script.parse_command(val) except script.ScriptException as e: return str(e) class HostPatternEditor(GridEditor): title = "Editing host patterns" columns = [ TextColumn("Regex (matched on hostname:port / ip:port)") ] def is_error(self, col, val): try: re.compile(val, re.IGNORECASE) except re.error as e: return "Invalid regex: %s" % str(e) def data_in(self, data): return [[i] for i in data] def data_out(self, data): return [i[0] for i in data] class CookieEditor(GridEditor): title = "Editing request Cookie header" columns = [ TextColumn("Name"), TextColumn("Value"), ] class CookieAttributeEditor(GridEditor): title = "Editing Set-Cookie attributes" columns = [ TextColumn("Name"), TextColumn("Value"), ] def data_out(self, data): ret = [] for i in data: if not i[1]: ret.append([i[0], None]) else: ret.append(i) return ret class SetCookieEditor(GridEditor): title = "Editing response SetCookie header" columns = [ TextColumn("Name"), TextColumn("Value"), SubgridColumn("Attributes", CookieAttributeEditor), ] def data_in(self, data): flattened = [] for key, (value, attrs) in data: flattened.append([key, value, attrs.items(multi=True)]) return flattened def data_out(self, data): vals = [] for key, value, attrs in data: vals.append( [ key, (value, attrs) ] ) return vals<|fim▁end|>
fspecs[0] = ("fixed", self.editor.first_width + 2, fspecs[0]) w = urwid.Columns( fspecs,
<|file_name|>base.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright 2016 Google Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License.<|fim▁hole|> * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {SliceViewChunkSource, SliceViewChunkSpecification, SliceViewChunkSpecificationBaseOptions, SliceViewChunkSpecificationOptions, SliceViewSourceOptions} from 'neuroglancer/sliceview/base'; import {getCombinedTransform} from 'neuroglancer/sliceview/base'; export enum VectorGraphicsType { LINE, POINT } export interface RenderLayer { sources: VectorGraphicsChunkSource[][]|null; } export interface VectorGraphicsChunkSpecificationSourceOptions { vectorGraphicsSourceOptions: VectorGraphicsSourceOptions; } export interface VectorGraphicsSourceOptions extends SliceViewSourceOptions {} export interface VectorGraphicsChunkSource extends SliceViewChunkSource { spec: VectorGraphicsChunkSpecification; } export type VectorGraphicsChunkSpecificationOptions = SliceViewChunkSpecificationOptions; export interface VectorGraphicsChunkSpecificationDefaultChunkSizeOptions extends SliceViewChunkSpecificationBaseOptions {} /** * Specifies a chunk layout and voxel size. */ export class VectorGraphicsChunkSpecification extends SliceViewChunkSpecification { constructor(options: VectorGraphicsChunkSpecificationOptions) { super(options); } static make(options: VectorGraphicsChunkSpecificationOptions& {vectorGraphicsSourceOptions: VectorGraphicsSourceOptions}) { return new VectorGraphicsChunkSpecification(Object.assign( {}, options, {transform: getCombinedTransform(options.transform, options.vectorGraphicsSourceOptions)})); } static fromObject(msg: any) { return new VectorGraphicsChunkSpecification(msg); } toObject(): SliceViewChunkSpecificationOptions { return super.toObject(); } } export const VECTOR_GRAPHICS_RPC_ID = 'vectorgraphics'; export const VECTOR_GRAPHICS_RENDERLAYER_RPC_ID = 'vectorgraphics/RenderLayer';<|fim▁end|>
* You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0
<|file_name|>part2.rs<|end_file_name|><|fim▁begin|>// adventofcode - day 7 // part 2 use std::io::prelude::*; use std::fs::File; use std::collections::HashMap; use std::cell::RefCell; enum Gate { // Strings are the names of the input wire(s) Not(String), And(String, String), FixedAnd(String), // for ANDs with a '1' Or(String, String), Lshift(String, u16), Rshift(String, u16), Redirect(String), Const(u16), } impl Gate { // long list of functions which parse the input and create the right gates fn new(desc: &mut &str) -> Gate { if desc.contains("NOT") { Gate::new_not(desc) } else if desc.contains("AND") { if desc.starts_with("1"){ Gate::new_fixed_and(desc) } else { Gate::new_and(desc) } } else if desc.contains("OR") { Gate::new_or(desc) } else if desc.contains("RSHIFT") { Gate::new_rshift(desc) } else if desc.contains("LSHIFT") { Gate::new_lshift(desc) } else { Gate::new_redirect_or_const(desc) } } fn new_not(desc: &mut &str) -> Gate { // skip the "NOT " let tmp = &desc[4..]; let x = tmp.split(" -> ").collect::<Vec<&str>>(); *desc = x[1]; Gate::Not(x[0].to_string()) } fn new_fixed_and(desc: &mut &str) -> Gate { // skip the "1 AND " let tmp = &desc[6..]; let x = tmp.split(" -> ").collect::<Vec<&str>>(); *desc = x[1]; Gate::FixedAnd(x[0].to_string()) } fn new_and(desc: &mut &str) -> Gate { let x = desc.split(" AND ") .flat_map(|s| s.split(" -> ")) .collect::<Vec<&str>>(); *desc = x[2]; Gate::And(x[0].to_string(), x[1].to_string()) } fn new_or(desc: &mut &str) -> Gate { let x = desc.split(" OR ") .flat_map(|s| s.split(" -> ")) .collect::<Vec<&str>>(); *desc = x[2]; Gate::Or(x[0].to_string(), x[1].to_string()) } fn new_rshift(desc: &mut &str) -> Gate { let x = desc.split(" RSHIFT ") .flat_map(|s| s.split(" -> ")) .collect::<Vec<&str>>(); *desc = x[2]; Gate::Rshift(x[0].to_string(), x[1].parse::<u16>().unwrap()) } fn new_lshift(desc: &mut &str) -> Gate { let x = desc.split(" LSHIFT ") .flat_map(|s| s.split(" -> ")) .collect::<Vec<&str>>(); *desc = x[2]; Gate::Lshift(x[0].to_string(), x[1].parse::<u16>().unwrap()) } fn new_redirect_or_const(desc: &mut &str) -> Gate { let x = desc.split(" -> ").collect::<Vec<&str>>(); *desc = x[1]; match x[0].parse::<u16>() { Ok(x) => Gate::Const(x), Err(_) => Gate::Redirect(x[0].to_string()) } } // compute the value of this gate, depending on its type fn compute_value(&mut self, wires: &HashMap<String, RefCell<Wire>>) -> u16 { match *self { Gate::Not(ref wname) => { let x = wname.to_string(); //println!("Trying to borrow {}", x); let mut wire = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wname), }; ! wire.get_value(&wires) }, Gate::And(ref wn1, ref wn2) => { let tmp; // we need to create an extra block here, so that w1 has a // shorter lifetime and dies right after the block again. // as a result, it is free again and available for a new // recursive borrow { let x = wn1.to_string(); //println!("Trying to borrow {}", x); let mut w1 = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wn1), }; tmp = w1.get_value(&wires); } let x = wn2.to_string(); //println!("Trying to borrow {}", x); let mut w2 = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wn2), }; tmp & w2.get_value(&wires) }, Gate::FixedAnd(ref wname) => { let x = wname.to_string(); //println!("Trying to borrow {}", x); let mut wire = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wname), }; wire.get_value(&wires) & 1 }, Gate::Or(ref wn1, ref wn2) => { let tmp; { let x = wn1.to_string(); //println!("Trying to borrow {}", x); let mut w1 = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wn1), }; tmp = w1.get_value(&wires); } let x = wn2.to_string(); //println!("Trying to borrow {}", x); let mut w2 = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wn2), }; tmp | w2.get_value(&wires) }, Gate::Lshift(ref wname, bits) => {<|fim▁hole|> //println!("Trying to borrow {}", x); let mut wire = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wname), }; wire.get_value(&wires) << bits }, Gate::Rshift(ref wname, bits) => { let x = wname.to_string(); //println!("Trying to borrow {}", x); let mut wire = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wname), }; wire.get_value(&wires) >> bits }, Gate::Redirect(ref wname) => { let x = wname.to_string(); //println!("Trying to borrow {}", x); let mut wire = match wires.get(&x) { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"{}\" found!", wname), }; wire.get_value(&wires) }, Gate::Const(value) => { value }, } } } // each Wire stores: // - its name (which is also its key in the hashmap) // - the gate this wire is connected to // - value of this gate (if it's already computed) struct Wire { #[allow(dead_code)] id: String, gate: Gate, value: Option<u16>, } impl Wire { fn new(desc: &mut &str) -> Wire { Wire{ gate: Gate::new( desc), value: None, id: desc.to_string()} } fn get_value(&mut self, list: &HashMap<String, RefCell<Wire>>) -> u16 { match self.value { Some(val) => val, None => { let val = self.gate.compute_value(list); self.value = Some(val); val } } } } fn main(){ println!("Advent of Code - day 7 | part 2"); // import data let data = import_data(); let wires = create_hashmap_from_data(&data); let mut a = match wires.get("a") { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"a\" found!"), }; let value = a.get_value(&wires); println!("Value of a: {}. Now setting b to that value.", value); // part 2 specific code let mut wires2 = create_hashmap_from_data(&data); // extra block, so that tmp dies again { wires2.remove("b"); let tmp = RefCell::new( Wire{id: "b".to_string(), gate: Gate::Const(value), value: Some(value) } ); wires2.insert("b".to_string(), tmp); } let mut a = match wires2.get("a") { Some(elem) => elem.borrow_mut(), None => panic!("No wire with name \"a\" found!"), }; println!("Value of a: {}", a.get_value(&wires2)); } fn create_hashmap_from_data(data: &String) -> HashMap<String, RefCell<Wire>> { let mut wires = HashMap::new(); for mut line in data.lines(){ let wire = Wire::new(&mut line); let x = RefCell::new(wire); // line now contains the id of the wire -> use it as key for the hashmap wires.insert(line.to_string(), x); } wires } // This function simply imports the data set from a file called input.txt fn import_data() -> String { let mut file = match File::open("../../inputs/07.txt") { Ok(f) => f, Err(e) => panic!("file error: {}", e), }; let mut data = String::new(); match file.read_to_string(&mut data){ Ok(_) => {}, Err(e) => panic!("file error: {}", e), }; data }<|fim▁end|>
let x = wname.to_string();
<|file_name|>topic_vote.rs<|end_file_name|><|fim▁begin|>use mysql::from_row; use mysql::error::Error::MySqlError; use common::utils::*;<|fim▁hole|>pub fn is_voted(user_id: &str, topic_id: &str) -> bool { let mut result = SQL_POOL.prep_exec(r#" SELECT count(id) FROM topic_vote WHERE user_id = ? AND topic_id = ? "#, (user_id, topic_id)).unwrap(); let row_wrapper = result.next(); if row_wrapper.is_none() { return false; } let row = row_wrapper.unwrap().unwrap(); let (count, ) = from_row::<(u8, )>(row); if count == 0 { false } else { true } } pub fn is_agreed(user_id: &str, topic_id: &str) -> bool { let mut result = SQL_POOL.prep_exec(r#" SELECT count(id) FROM topic_vote WHERE user_id = ? AND topic_id = ? AND state = 1 "#, (user_id, topic_id)).unwrap(); let row_wrapper = result.next(); if row_wrapper.is_none() { return false; } let row = row_wrapper.unwrap().unwrap(); let (count, ) = from_row::<(u8, )>(row); if count == 0 { false } else { true } } pub fn is_disagreed(user_id: &str, topic_id: &str) -> bool { let mut result = SQL_POOL.prep_exec(r#" SELECT count(id) FROM topic_vote WHERE user_id = ? AND topic_id = ? AND state = -1 "#, (user_id, topic_id)).unwrap(); let row_wrapper = result.next(); if row_wrapper.is_none() { return false; } let row = row_wrapper.unwrap().unwrap(); let (count, ) = from_row::<(u8, )>(row); if count == 0 { false } else { true } } pub fn create_topic_vote(user_id: &str, topic_id: &str, state: &str) -> Option<u8> { let create_time = gen_datetime().to_string(); let mut stmt = SQL_POOL.prepare(r#" INSERT INTO topic_vote (user_id, topic_id, state, create_time, update_time) VALUES (?, ?, ?, ?, ?) "#).unwrap(); let result = stmt.execute((user_id, topic_id, state, &*create_time, &*create_time)); if let Err(MySqlError(ref err)) = result { println!("{:?}", err.message); return None; } Some(1) } pub fn update_topic_vote(user_id: &str, topic_id: &str, state: &str) -> Option<u8> { let update_time = gen_datetime().to_string(); let mut stmt = SQL_POOL.prepare(r#" UPDATE topic_vote SET state = ?, update_time = ? WHERE user_id = ? AND topic_id = ? "#).unwrap(); let result = stmt.execute((state, &*update_time, user_id, topic_id)); if let Err(MySqlError(ref err)) = result { println!("{:?}", err.message); return None; } Some(1) } pub fn delete_topic_vote(user_id: &str, topic_id: &str) -> Option<u8> { let mut stmt = SQL_POOL.prepare(r#" DELETE FROM topic_vote WHERE user_id = ? AND topic_id = ? "#).unwrap(); let result = stmt.execute((user_id, topic_id)); if let Err(MySqlError(ref err)) = result { println!("{:?}", err.message); return None; } Some(1) }<|fim▁end|>
use common::lazy_static::SQL_POOL;
<|file_name|>eventos.js<|end_file_name|><|fim▁begin|>function closeObject(id) { document.getElementById(id).style.display = 'none'; } function openObject(id) { document.getElementById(id).style.display = 'block'; } function openClose(id){ if (document.getElementById(id).style.display == 'block'){ console.log('intenta cerrar'); closeObject(id); }else{ console.log('intenta abrir'); openObject(id); } } $(document).ready(function () { $('#datetimepicker1').datetimepicker({ format: 'DD/MM/YYYY HH:mm' }); $(".botonRectificarEvento").on("click", function () { $.ajax({ type: 'POST', data: { id: $(this).attr("data-id") }, url: Routing.generate('eventos_rectificacion_nueva',null,true), context: document.body }) .done(function (datos) { if(datos.estado) { $('#tituloPopUp').html('Rectificar Evento'); $("#contenidoPopUp").html(datos.vista); $('.modal-dialog').removeClass('modal-lg'); $('#piePopUp').addClass('show'); $('#ventanaPopUp').modal('show'); } }); }); $("#botonGuardarPopUp").on("click", function () { var datosFormulario = $("#formularioRectificarEvento").serializeArray(); var urlFormulario = Routing.generate('eventos_rectificacion_crear',null,true); $.ajax( { url : urlFormulario, type: "POST", data : datosFormulario, success: function(datos) { if(datos.estado){ if(datos.rectificado){ $('#ventanaPopUp').modal('hide'); $( location ).attr("href", Routing.generate('eventos',null,true)); } else { $("#contenidoPopUp").html(datos.html); } } }, error: function() { alert('error'); } }); return false; }); // Visualizar detalles $('.botonMostrarDetallesEvento').on('click',function(){ $.ajax({ type: 'GET', url: Routing.generate('eventos_detalle',{ id: $(this).attr('data-id') },true), context: document.body }) <|fim▁hole|> .done(function (html) { $('#tituloPopUp').html('Detalle eventos'); $("#contenidoPopUp").html(html); $('.modal-dialog').addClass('modal-lg'); $('#piePopUp').addClass('hide'); $('#piePopUp').removeClass('show'); $('#ventanaPopUp').modal('show'); }); }); //Agregar nuevo detalle $('#ventanaPopUp').on("click",'#botonAgregarDetalle',function(){ var datosFormulario = $("#formularioAgregarDetalle").serializeArray(); var urlFormulario = Routing.generate('eventos_detalle_crear',null,true); $.ajax( { url : urlFormulario, type: "POST", data : datosFormulario, success: function(datos) { $("#contenidoPopUp").html(datos.html); } }); return false; }); $('#ventanaPopUp').on('mouseover','#dp-detalle',function(){ $('#dp-detalle').datetimepicker({ format: 'DD/MM/YYYY HH:mm' }); }); $('#ventanaPopUp').on('mouseover','#dp-rectificar',function(){ $('#dp-rectificar').datetimepicker({ format: 'DD/MM/YYYY HH:mm' }); }); $(".estado-switch").bootstrapSwitch(); $('#botonFormularioRegistro').on('click',function(){ if(! $('#botonFormularioRegistro').hasClass('active')) { $(':input','#formularioBusqueda') .not(':button, :submit, :reset, :hidden') .val('') .removeAttr('checked') .removeAttr('selected'); $('#formularioBusqueda').append('<input type="hidden" name="seccion" value="registro" />'); $('#formularioBusqueda').submit(); } // $('#botonFormularioBusqueda').removeClass('active'); // $(this).addClass('active'); // $('#divFormularioRegistro').removeClass('hide'); // $('#divFormularioRegistro').addClass('show'); // $('#divFormularioBusqueda').removeClass('show'); // $('#divFormularioBusqueda').addClass('hide'); }); $('#botonFormularioBusqueda').on('click',function(){ $('#botonFormularioRegistro').removeClass('active'); $(this).addClass('active'); $('#divFormularioBusqueda').removeClass('hide'); $('#divFormularioBusqueda').addClass('show'); $('#divFormularioRegistro').removeClass('show'); $('#divFormularioRegistro').addClass('hide'); }); $('#dtp-fecha-desde').datetimepicker({ format: 'DD/MM/YYYY HH:mm' }); $('#dtp-fecha-hasta').datetimepicker({ format: 'DD/MM/YYYY HH:mm' }); });<|fim▁end|>
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for nadbproj project. import os.path DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'nadbproj', # Or path to database file if using sqlite3. 'USER': 'nadbproj', # Not used with sqlite3. 'PASSWORD': 'nadbproj', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = '/static/admin/' # Additional locations of static files STATICFILES_DIRS = ( os.path.join(os.path.dirname(__file__), 'staticfiles'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'zx!582g59qwpwdnds)8b$pm(v-03jgpiq1e1(ix&iyvw*)$_yi' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'nadb-sample-site.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(os.path.dirname(__file__), 'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles',<|fim▁hole|> 'django.contrib.admin', 'django.contrib.comments', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }<|fim▁end|>
'nadb', 'django.contrib.markup',
<|file_name|>Logger.cpp<|end_file_name|><|fim▁begin|>/* * Logger.cpp * * Created on: 2013. 9. 16. * Author: ilvsusie */ #include "Logger.h" #include "Poco/Logger.h" #include "Poco/SimpleFileChannel.h" #include "Poco/AutoPtr.h" #include "Poco/Message.h" #include "Poco/PatternFormatter.h" #include "Poco/FormattingChannel.h" using Poco::Channel; using Poco::Formatter; using Poco::SimpleFileChannel; using Poco::AutoPtr; using Poco::PatternFormatter; using Poco::FormattingChannel; namespace nanolat { Logger gLogger; Logger::Logger() { AutoPtr<SimpleFileChannel> pChannel(new SimpleFileChannel); pChannel->setProperty("path", "nanolat.log"); pChannel->setProperty("rotation", "10 M"); //"%d-%m-%Y %H:%M:%S: %t" AutoPtr<Formatter> formatter(new PatternFormatter("%d-%m-%Y %H:%M:%S %s: %t")); AutoPtr<Channel> formattingChannel(new FormattingChannel(formatter, pChannel));<|fim▁hole|> logger_ = & Poco::Logger::get("SoTopless"); #if defined(NDEBUG) logger_->setLevel(Poco::Message::PRIO_INFORMATION); #else // Debug mode -> PRIO_TRACE logger_->setLevel(Poco::Message::PRIO_TRACE); #endif } Logger::~Logger() { // TODO Auto-generated destructor stub } } /* namespace nanolat */<|fim▁end|>
Poco::Logger::root().setChannel(formattingChannel);
<|file_name|>dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for DeleteConversationDataset # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-dialogflow # [START dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync] from google.cloud import dialogflow_v2<|fim▁hole|> def sample_delete_conversation_dataset(): # Create a client client = dialogflow_v2.ConversationDatasetsClient() # Initialize request argument(s) request = dialogflow_v2.DeleteConversationDatasetRequest( name="name_value", ) # Make the request operation = client.delete_conversation_dataset(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) # [END dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]<|fim▁end|>
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|>''' Usage: manager puzzle_load (--file <filename>) [--url <url>] manager puzzle_del (--name <name>) [--url <url>] manager puzzles [--url <url>] manager puzzleboards_clear [--url <url>] manager puzzleboard_consume [--async-url <url>] (--name <name>) [--size <size>] manager puzzleboard_pop (--name <name>) [--url <url>] Options: --async-url <url> The url to the async function endpoint [default: http://localhost:8080/async-function/huntwordsapi] --url <url> The url to the function [default: http://localhost:8080/function/huntwordsapi]<|fim▁hole|> --file <filename> The filename from which to read the words; one per line --name <name> The puzzle name to give the dictionary of words --size <size> The length of a side of the grid on which to place words [default: 15] -h, --help Print this help text and exit --version Print the version and exit ''' from docopt import docopt from .commands_puzzleboard import command_puzzleboards_clear, command_puzzleboard_consume, command_puzzleboard_pop from .commands_puzzle import command_puzzle_load, command_puzzles # Command pattern verbs = { 'puzzle_load': command_puzzle_load, 'puzzles': command_puzzles, 'puzzleboards_clear': command_puzzleboards_clear, 'puzzleboard_consume': command_puzzleboard_consume, 'puzzleboard_pop': command_puzzleboard_pop } if __name__ == '__main__': opts = docopt(__doc__, version='0.1') command = [v for k, v in verbs.items() if opts[k]][0] command(**opts)<|fim▁end|>
<|file_name|>test_messages.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import json from catmaid.models import Message from .common import CatmaidApiTestCase class MessagesApiTests(CatmaidApiTestCase): def test_read_message_error(self): self.fake_authentication() message_id = 5050 response = self.client.post(f'/messages/{message_id}/mark_read') self.assertEqual(response.status_code, 404) parsed_response = json.loads(response.content.decode('utf-8')) self.assertIn('error', parsed_response) self.assertIn('type', parsed_response) self.assertEquals('Http404', parsed_response['type']) def test_read_message_without_action(self): self.fake_authentication() message_id = 3 response = self.client.post(f'/messages/{message_id}/mark_read') self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) message = Message.objects.get(id=message_id) self.assertEqual(True, message.read) self.assertTrue(parsed_response.get('success')) def test_read_message_with_action(self): self.fake_authentication() message_id = 1 <|fim▁hole|> message = Message.objects.filter(id=message_id)[0] self.assertEqual(True, message.read) def test_list_messages(self): self.fake_authentication() response = self.client.post( '/messages/list', {}) self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) def get_message(data, id): msgs = [d for d in data if d['id'] == id] if len(msgs) != 1: raise ValueError("Malformed message data") return msgs[0] expected_result = { '0': { 'action': '', 'id': 3, 'text': 'Contents of message 3.', 'time': '2014-10-05 11:12:01.360422+00:00', 'title': 'Message 3' }, '1': { 'action': 'http://www.example.com/message2', 'id': 2, 'text': 'Contents of message 2.', 'time': '2011-12-20 16:46:01.360422+00:00', 'title': 'Message 2' }, '2': { 'action': 'http://www.example.com/message1', 'id': 1, 'text': 'Contents of message 1.', 'time': '2011-12-19 16:46:01+00:00', 'title': 'Message 1' }, '3': { 'id': -1, 'notification_count': 0 } } # Check result independent from order for mi in ('0','1','2','3'): self.assertEqual(expected_result[mi], parsed_response[mi])<|fim▁end|>
response = self.client.post(f'/messages/{message_id}/mark_read') self.assertEqual(response.status_code, 302)
<|file_name|>context_processors.py<|end_file_name|><|fim▁begin|>from currencies.models import Currency def currencies(request): currencies = Currency.objects.active() if not request.session.get('currency'): try: currency = Currency.objects.get(is_default__exact=True)<|fim▁hole|> return { 'CURRENCIES': currencies, 'CURRENCY': request.session['currency'] }<|fim▁end|>
except Currency.DoesNotExist: currency = None request.session['currency'] = currency
<|file_name|>beforeFrame.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python ### # Copyright (C) 2012 Shrinidhi Rao [email protected] <|fim▁hole|># This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ### import os import sys import time import socket import subprocess import multiprocessing import tempfile taskId = os.environ['rbhus_taskId'] frameId = os.environ['rbhus_frameId'] user = os.environ['rbhus_user'] fileName = os.environ['rbhus_fileName'] minRam = os.environ['rbhus_minRam'] maxRam = os.environ['rbhus_maxRam'] logBase = os.environ['rbhus_logBase'] framePad = os.environ['rbhus_pad'] rThreads = os.environ['rbhus_threads'] pad = os.environ['rbhus_pad'] outDir = os.environ['rbhus_outDir'] outFile = os.environ['rbhus_outName'] afterFrameCmd = os.environ['rbhus_afCmd'] os.system("del /q \"c:\\Users\\blue\\AppData\\Local\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\"") #os.system("mklink \"c:\\Users\\blue\\AppData\\Local\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\" \"X:\\standard\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\"") os.system("copy \"X:\\standard\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\3dsmax.ini\" \"c:\\Users\\blue\\AppData\\Local\\Autodesk\\3dsMax\\2013 - 64bit\\ENU\\\" /y")<|fim▁end|>
#
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import asyncio from ._perf_stress_runner import _PerfStressRunner from ._perf_stress_test import PerfStressTest from ._random_stream import RandomStream, WriteStream, get_random_bytes from ._async_random_stream import AsyncRandomStream from ._batch_perf_test import BatchPerfTest from ._event_perf_test import EventPerfTest __all__ = [ "PerfStressTest", "BatchPerfTest", "EventPerfTest", "RandomStream", "WriteStream", "AsyncRandomStream", "get_random_bytes" ] <|fim▁hole|> loop = asyncio.get_event_loop() loop.run_until_complete(main_loop.start()) def run_perfstress_debug_cmd(): main_loop = _PerfStressRunner(debug=True) loop = asyncio.get_event_loop() loop.run_until_complete(main_loop.start()) def run_system_perfstress_tests_cmd(): root_dir = os.path.dirname(os.path.abspath(__file__)) sys_test_dir = os.path.join(root_dir, "system_perfstress") main_loop = _PerfStressRunner(test_folder_path=sys_test_dir, debug=True) loop = asyncio.get_event_loop() loop.run_until_complete(main_loop.start())<|fim▁end|>
def run_perfstress_cmd(): main_loop = _PerfStressRunner()
<|file_name|>test_ls.rs<|end_file_name|><|fim▁begin|>/* * Test Eio module. * * List and process all the rust source files '.rs' in the current dir. * */ extern crate libc; extern crate efl; use libc::{c_char}; use std::c_str::CString; use efl::ecore; use efl::eio; fn _filter_cb(count: &mut int, handler: &eio::EioFile, file: *c_char) -> bool { // Get a &str from the raw *c_char type let cstring = unsafe { CString::new(file, false) }; let f = match cstring.as_str() { None => "", Some(s) => s }; // Let's process only rust source files! if !f.ends_with(".rs") { println!("Filtering file: {}", f); return false } return true; } fn _main_cb(count: &mut int, handler: &eio::EioFile, file: *c_char) { let cstring = unsafe { CString::new(file, false) }; let v = match cstring.as_str() { None => "", Some(s) => s }; // Count processed files *count += 1; println!("Processing file: {} ({})", v, *count); } fn _done_cb(count: &mut int, handler: &eio::EioFile) { println!("Number of processed files: {}", *count); println!("Done!"); ecore::main_loop_quit(); } fn _error_cb(count: &mut int, handler: &eio::EioFile, error: int) { println!("Error!"); ecore::main_loop_quit(); } fn main() { ecore::init(); eio::init(); let count: int = 0; eio::file_ls(".", _filter_cb, _main_cb, _done_cb, _error_cb, &count);<|fim▁hole|> ecore::shutdown(); }<|fim▁end|>
ecore::main_loop_begin(); eio::shutdown();
<|file_name|>reset-mode.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// cc #46688 fn surprise(x: i32) { assert_eq!(x, 2); } fn main() { let x = &(1, &2); let (_, &b) = x; surprise(b); }<|fim▁end|>
// run-pass // Test that we "reset" the mode as we pass through a `&` pattern. //
<|file_name|>malformed_json.py<|end_file_name|><|fim▁begin|># Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from collections import deque from proboscis import test from proboscis import asserts from proboscis import after_class from proboscis import before_class from trove.tests.config import CONFIG from trove.tests.api.instances import instance_info from trove.tests.api.instances import VOLUME_SUPPORT from trove.tests.util.users import Requirements from trove.tests.util import assert_contains from trove.tests.util import create_dbaas_client from trove.common.utils import poll_until @test(groups=["dbaas.api.mgmt.malformed_json"]) class MalformedJson(object): @before_class def setUp(self): self.reqs = Requirements(is_admin=False) self.user = CONFIG.users.find_user(self.reqs) self.dbaas = create_dbaas_client(self.user) volume = None if VOLUME_SUPPORT: volume = {"size": 1} self.instance = self.dbaas.instances.create( name="qe_instance", flavor_id=instance_info.dbaas_flavor_href, volume=volume, databases=[{"name": "firstdb", "character_set": "latin2", "collate": "latin2_general_ci"}]) @after_class def tearDown(self): self.dbaas.instances.delete(self.instance) @test def test_bad_instance_data(self):<|fim▁hole|> users = "bar" try: self.dbaas.instances.create("bad_instance", 3, 3, databases=databases, users=users) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s," " exception %s" % (httpCode, e)) databases = "u'foo'" users = "u'bar'" assert_contains( e.message, ["Validation error:", "instance['databases'] %s is not of type 'array'" % databases, "instance['users'] %s is not of type 'array'" % users, "instance['volume'] 3 is not of type 'object'"]) @test def test_bad_database_data(self): _bad_db_data = "{foo}" try: self.dbaas.databases.create(self.instance.id, _bad_db_data) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create database failed with code %s, " "exception %s" % (httpCode, e)) _bad_db_data = "u'{foo}'" asserts.assert_equal(e.message, "Validation error: " "databases %s is not of type 'array'" % _bad_db_data) @test def test_bad_user_data(self): def format_path(values): values = list(values) msg = "%s%s" % (values[0], ''.join(['[%r]' % i for i in values[1:]])) return msg _user = [] _user_name = "F343jasdf" _user.append({"name12": _user_name, "password12": "password"}) try: self.dbaas.users.create(self.instance.id, _user) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create user failed with code %s, " "exception %s" % (httpCode, e)) err_1 = format_path(deque(('users', 0))) assert_contains( e.message, ["Validation error:", "%(err_1)s 'name' is a required property" % {'err_1': err_1}, "%(err_1)s 'password' is a required property" % {'err_1': err_1}]) @test def test_bad_resize_instance_data(self): def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.instances.resize_instance(self.instance.id, "bad data") except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Resize instance failed with code %s, " "exception %s" % (httpCode, e)) @test def test_bad_resize_vol_data(self): def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) data = "bad data" try: self.dbaas.instances.resize_volume(self.instance.id, data) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Resize instance failed with code %s, " "exception %s" % (httpCode, e)) data = "u'bad data'" assert_contains( e.message, ["Validation error:", "resize['volume']['size'] %s is not valid under " "any of the given schemas" % data, "%s is not of type 'integer'" % data, "%s does not match '[0-9]+'" % data]) @test def test_bad_change_user_password(self): password = "" users = [{"name": password}] def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.users.change_passwords(self.instance, users) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Change usr/passwd failed with code %s, " "exception %s" % (httpCode, e)) password = "u''" assert_contains( e.message, ["Validation error: users[0] 'password' " "is a required property", "users[0]['name'] %s is too short" % password, "users[0]['name'] %s does not match " "'^.*[0-9a-zA-Z]+.*$'" % password]) @test def test_bad_grant_user_access(self): dbs = [] def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.users.grant(self.instance, self.user, dbs) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Grant user access failed with code %s, " "exception %s" % (httpCode, e)) @test def test_bad_revoke_user_access(self): db = "" def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.users.revoke(self.instance, self.user, db) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 404, "Revoke user access failed w/code %s, " "exception %s" % (httpCode, e)) asserts.assert_equal(e.message, "The resource could not be found.") @test def test_bad_body_flavorid_create_instance(self): flavorId = ["?"] try: self.dbaas.instances.create("test_instance", flavorId, 2) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s, " "exception %s" % (httpCode, e)) flavorId = [u'?'] assert_contains( e.message, ["Validation error:", "instance['flavorRef'] %s is not valid " "under any of the given schemas" % flavorId, "%s is not of type 'string'" % flavorId, "%s is not of type 'string'" % flavorId, "%s is not of type 'integer'" % flavorId, "instance['volume'] 2 is not of type 'object'"]) @test def test_bad_body_datastore_create_instance(self): datastore = "*" datastore_version = "*" try: self.dbaas.instances.create("test_instance", 3, {"size": 2}, datastore=datastore, datastore_version=datastore_version) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s, " "exception %s" % (httpCode, e)) assert_contains( e.message, ["Validation error:", "instance['datastore']['type']" " u'%s' does not match" " '^.*[0-9a-zA-Z]+.*$'" % datastore, "instance['datastore']['version'] u'%s' " "does not match '^.*[0-9a-zA-Z]+.*$'" % datastore_version]) @test def test_bad_body_volsize_create_instance(self): volsize = "h3ll0" try: self.dbaas.instances.create("test_instance", "1", volsize) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s, " "exception %s" % (httpCode, e)) volsize = "u'h3ll0'" asserts.assert_equal(e.message, "Validation error: " "instance['volume'] %s is not of " "type 'object'" % volsize)<|fim▁end|>
databases = "foo"
<|file_name|>IPv4_HTTP.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- ## ## # Author: Peter Manev # # [email protected] # ## ## ## !!! IMPORTANT - LATEST DEV Scapy is needed !!! # REMOVE your current scapy installation !!! # then -> # hg clone http://hg.secdev.org/scapy-com # python setup.py install from scapy.all import * import sys, urllib , os, subprocess, random from itertools import * import Global_Vars class pacifyIpv4Http: def writeIPv4HttpRule(self, sid_id_http, http_method, http_uri_string, \ http_content_all, directory, src_name): ##creating and writing a sid.rules file rule_file = open('%s/%s.rules' % (directory,sid_id_http), 'w+') content_http_uri_string_ready_for_rule = None content_http_uri_string_ready_for_rule = "" if (len(http_uri_string) > 250): content_http_uri_string_array = [http_uri_string[i:i+250] for i in range(0, len(http_uri_string), 250)] for i in content_http_uri_string_array: i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\ replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\ replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|') content_http_uri_string_ready_for_rule = \ content_http_uri_string_ready_for_rule + \ ("content:\"%s\"; http_raw_uri; " % (i)) else: http_uri_string = http_uri_string.replace('|', '|7C|').\ replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\ replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\ replace('\r', '|0d|').replace('\n', '|0a|') content_http_uri_string_ready_for_rule = \ ("content:\"%s\"; http_raw_uri; " % (http_uri_string)) content_all_ready_for_rule = None content_all_ready_for_rule = "" if (len(http_content_all) > 250): content_http_all_array = [http_content_all[i:i+250] for i in range(0, len(http_content_all), 250)] for i in content_http_all_array: i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\ replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\ replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|') content_all_ready_for_rule = \ content_all_ready_for_rule + \ ("content:\"%s\"; " % (i)) else: http_content_all = http_content_all.replace('|', '|7C|').\ replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\ replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\ replace('\r', '|0d|').replace('\n', '|0a|') content_all_ready_for_rule = \ ("content:\"%s\"; " % (http_content_all)) rule_file.write ( \ "alert http any any -> any any (msg:\"HTTP requests tests - sid %s , \ pcap - %s \"; \ content:\"%s\"; http_method; %s %s \ reference:url,%s; sid:%s; rev:1;)" % \ (sid_id_http, sid_id_http, http_method, \ content_http_uri_string_ready_for_rule, \ content_all_ready_for_rule, \ src_name, sid_id_http) ) rule_file.close() def rebuildIPv4HttpSessionExtraTcpSAs(self, packet, results_directory, \ sid_id_http, src_name, repo_name): #We rebuild the http session , however inject some extra SAs session_packets = list() session_packets_fragmented = list() #print packet[TCP][Raw] #print packet[Ether].src ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport seq_num = random.randint(1024,(2**32)-1) ack_num = random.randint((2**10),(2**16)) # We make sure ack_num_extra* are never going to be the same numbering # as ack_num ack_num_extra_1 = random.randint((2**22)+1 , (2**32)-1) ack_num_extra_2 = random.randint((2**16)+1,(2**22)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) synack_extra_1 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \ type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \ dport=portsrc, seq=ack_num_extra_1, ack=syn.seq+1) synack_extra_2 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \ type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \ dport=portsrc, seq=ack_num_extra_2, ack=syn.seq+1) synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) p_frag_synack = fragment(synack, fragsize=1 ) ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] ##We need to ACK the packet returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(p.seq + len(p[Raw]))) ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) ## # Here we start ordering the stream so that we have 3 SAs. The extra ones are # BEFORE the real one. For the purpose of thoroughness we also # add cases where the real SA arrives fragmented. ## #write the session - normal session_packets.append(syn) session_packets.append(synack_extra_1) session_packets.append(synack_extra_2) session_packets.append(synack) session_packets.append(ack) session_packets.append(p) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Real_SA-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented real SA session_packets_fragmented.append(syn) session_packets_fragmented.append(synack_extra_1) session_packets_fragmented.append(synack_extra_2) for p_fragment in p_frag_synack: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack_extra_1) session_packets_fragmented.append(synack_extra_2) for p_fragment in reversed(p_frag_synack): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack_extra_1) session_packets_fragmented.append(synack_extra_2) random.shuffle(p_frag_synack) #shuffle JUST the fragments in the session for p_fragment in p_frag_synack: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## # Here we start ordering the stream so that we have 3 SAs. The extra ones are # AFTER the real one. For the purpose of thoroughness we also # add cases where the real SA arrives fragmented. ## #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(synack_extra_1) session_packets.append(synack_extra_2) session_packets.append(ack) session_packets.append(p) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Real_SA-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented real SA session_packets_fragmented.append(syn) for p_fragment in p_frag_synack: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(synack_extra_1) session_packets_fragmented.append(synack_extra_2) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) for p_fragment in reversed(p_frag_synack): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(synack_extra_1) session_packets_fragmented.append(synack_extra_2) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) random.shuffle(p_frag_synack) #shuffle JUST the fragments in the session for p_fragment in p_frag_synack: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(synack_extra_1) session_packets_fragmented.append(synack_extra_2) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## # Here we start ordering the stream so that we have 3 SAs. The extra ones are # BEFORE and AFTER the real one. For the purpose of thoroughness we also # add cases where the real SA arrives fragmented. ## #write the session - normal session_packets.append(syn) session_packets.append(synack_extra_1) session_packets.append(synack) session_packets.append(synack_extra_2) session_packets.append(ack) session_packets.append(p) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Real_SA-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented real SA session_packets_fragmented.append(syn) session_packets_fragmented.append(synack_extra_1) for p_fragment in p_frag_synack: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(synack_extra_2) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack_extra_1) for p_fragment in reversed(p_frag_synack): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(synack_extra_2) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack_extra_1) random.shuffle(p_frag_synack) #shuffle JUST the fragments in the session for p_fragment in p_frag_synack: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(synack_extra_2) session_packets_fragmented.append(ack) session_packets_fragmented.append(p) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list def rebuildIPv4HttpSession(self, packet, results_directory, sid_id_http, \ src_name, repo_name): session_packets = list() session_packets_fragmented = list() #print packet[TCP][Raw] #print packet[Ether].src ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport seq_num = random.randint(1024,(2**32)-1) ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ##We need to ACK the packet returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(p.seq + len(p[Raw]))) ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(ack) session_packets.append(p) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag) #shuffle JUST the fragments in the session for p_fragment in p_frag: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list def rebuildIPv4HttpSessionDot1Q(self, packet, results_directory, \ sid_id_http, src_name, repo_name): #Dot1Q VLAN tags session_packets = list() session_packets_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport seq_num = random.randint(1024,(2**32)-1) ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1Q(vlan=1111) synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1Q(vlan=1111) ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1Q(vlan=1111) ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1Q(vlan=1111) ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ## This is the same original data packet - but no VLAN tags p_Dot1Q_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_frag_Dot1Q_untagged = fragment(p_Dot1Q_untagged, fragsize=10) # Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet # Everything else is the same and stays the same p_Dot1Q_tagged_wrong = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_Dot1Q_tagged_wrong.tags = Dot1Q(vlan=3333) ##This is the actual data packet that will be sent containing the payload #- fragmented. p_frag_Dot1Q_tagged_wrong = fragment(p_Dot1Q_tagged_wrong, fragsize=10 ) ##This is the data packet. Fromt this data packet we will edit and tweek # the VLAN tags for one or more fragments of the same data packet ! p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_Dot1Q_data_frag.tags = Dot1Q(vlan=1111) # We fragment the data packet, then we will play around with the fragments # VLAN tags p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 ) p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333) # We fragment the data packet , but we make one fragment untagged. # VLAN tag missing p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 ) p_frag_Dot1Q_data_frag_missing[3].tags = Untagged() # We fragment the data packet , but we make ONLY one fragment tagged # with the correct VLAN tag p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 ) for frag in p_frag_Dot1Q_data_frag_one_tagged: frag.tags = Untagged() p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1111) #We need to ACK the packet returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(p.seq + len(p[Raw]))) returnAck.tags = Dot1Q(vlan=1111) ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1Q(vlan=1111) finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1Q(vlan=1111) #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(ack) session_packets.append(p) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap"\ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap"\ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag) #shuffle JUST the fragments in the session for p_fragment in p_frag: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## # Here we start with the wrong Dot1Q VLAN tags in the data packet # and the creation of the pcaps designed for not alerting # due to changed (fake/hopped) VLAN tag in the same flow ## #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(ack) session_packets.append(p_Dot1Q_tagged_wrong) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q_tagged_wrong-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_tagged_wrong: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_tagged_wrong-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_tagged_wrong): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_tagged_wrong) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_tagged_wrong: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## # Here we start with the missing Dot1Q VLAN tag in the data packet # and the creation of the pcaps designed for not alerting # due to missing VLAN tag in the same flow. ## #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(ack) session_packets.append(p_Dot1Q_untagged) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_untagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_untagged): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_untagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_untagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list def rebuildIPv4HttpSessionDot1QWrongTagInFragments(self, packet, \ results_directory, sid_id_http, src_name, repo_name): #Dot1Q VLAN tags #Here we will change the VLAN tags on one or more frgaments #of the data packet session_packets = list() session_packets_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport seq_num = random.randint(1024,(2**32)-1) ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1Q(vlan=1111) synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1Q(vlan=1111) ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1Q(vlan=1111) ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1Q(vlan=1111) ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ##This is the data packet. Fromt this data packet we will edit and tweek # the VLAN tags for one or more fragments of the same data packet ! p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_Dot1Q_data_frag.tags = Dot1Q(vlan=1111) # We fragment the data packet, then we will play around with the fragments # VLAN tags - one fragment has the wrong VLAN tag p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 ) p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333) # We fragment the data packet , but we make one fragment untagged. # VLAN tag missing p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 ) p_frag_Dot1Q_data_frag_missing[3].tags = Untagged() # We fragment the data packet , but we make ONLY one fragment tagged # with the correct VLAN tag p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 ) for frag in p_frag_Dot1Q_data_frag_one_tagged: frag.tags = Untagged() p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1111) #We need to ACK the packet returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(p.seq + len(p[Raw]))) returnAck.tags = Dot1Q(vlan=1111) ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1Q(vlan=1111) finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1Q(vlan=1111) ## # Here we start with chnaging the Dot1Q VLAN tags in the FRAGMENTS # of the data packetand the creation of the pcaps designed for not alerting # due to missing VLAN tag in the fragments of data in the same flow. ## ## one fragment from the data packet has a missing VLAN tag #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_data_frag_missing: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_data_frag_missing): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_data_frag_missing) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_data_frag_missing: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## one frgament from the data packet has the wrong VLAN tag #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_data_frag_wrong: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_data_frag_wrong): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_data_frag_wrong) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_data_frag_wrong: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## all frgaments from the data packet have no VLAN tags BUT one #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_data_frag_one_tagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_data_frag_one_tagged): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_data_frag_one_tagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_data_frag_one_tagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list def rebuildIPv4HttpSessionQinQ(self, packet, results_directory, \ sid_id_http, src_name, repo_name): #Dot1Q double tags (vlans) = QinQ session_packets = list() session_packets_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport seq_num = random.randint(1024,(2**32)-1) ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) syn.tags[Dot1Q].tpid = 0x88a8 synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) synack.tags[Dot1Q].tpid = 0x88a8 ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) ack.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) p.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ## This is the same original data packet - but no VLAN tags p_QinQ_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_frag_QinQ_untagged = fragment(p_QinQ_untagged, fragsize=10) # QinQ reversed - we reverse/switch the VLAN tags in the data packet # Everything else is the same and stays the same p_QinQ_tag_reversed = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_QinQ_tag_reversed.tags = Dot1AD(vlan=4094)/Dot1Q(vlan=666) p_QinQ_tag_reversed.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be sent containing the payload #- fragmented, QinQ reversed/siwtched tags p_frag_QinQ_tag_reversed = fragment(p_QinQ_tag_reversed, fragsize=10 ) ##We need to ACK the packet returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(p.seq + len(p[Raw]))) returnAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) returnAck.tags[Dot1Q].tpid = 0x88a8 ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) finAck.tags[Dot1Q].tpid = 0x88a8 finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) finalAck.tags[Dot1Q].tpid = 0x88a8 #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(ack) session_packets.append(p) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag) #shuffle JUST the fragments in the session for p_fragment in p_frag: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## # Here we start with the reversed QinQ VLAN tags # and the creation of the pcaps designed for not alerting # due to switched (fake) VLAN tags in the same flow ## #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(ack) session_packets.append(p_QinQ_tag_reversed) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_tag_reversed: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_tag_reversed): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_tag_reversed) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_tag_reversed: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented)<|fim▁hole|> ## # Here we start with the missing Dot1Q VLAN tag in the data packet # and the creation of the pcaps designed for not alerting # due to missing VLAN tag in the same flow ## #write the session - normal session_packets.append(syn) session_packets.append(synack) session_packets.append(ack) session_packets.append(p_QinQ_untagged) session_packets.append(returnAck) session_packets.append(finAck) session_packets.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets) session_packets[:] = [] #empty the list #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_untagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_untagged): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_untagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_untagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list def rebuildIPv4HttpSessionQinQWrongTagInFragments(self, packet, \ results_directory, sid_id_http, src_name, repo_name): #QinQ VLAN tags - double tags #Here we will change the VLAN tags on one or more frgaments #of the QinQ data packet session_packets = list() session_packets_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport seq_num = random.randint(1024,(2**32)-1) ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) syn.tags[Dot1Q].tpid = 0x88a8 synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) synack.tags[Dot1Q].tpid = 0x88a8 ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) ack.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) p.tags[Dot1Q].tpid = 0x88a8 ##This is the data packet. Fromt this data packet we will edit and tweek # the VLAN tags (QinQ) for one or more fragments of the same data packet ! p_QinQ_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_QinQ_data_frag.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) p_QinQ_data_frag.tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet, then we will play around with the fragments # VLAN tags in QinQ # Here we change the VLAN tag of the inner Dot1Q layer p_frag_QinQ_data_frag_wrong_dot1q = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=666)/Dot1Q(vlan=777) p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet, then we will play around with the fragments # VLAN tags in QinQ # Here we change the VLAN tag of the outer 802.1AD layer p_frag_QinQ_data_frag_wrong_dot1ad = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=4094) p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet and make one fragment with both tags # having the wrong VLAN IDs p_frag_QinQ_data_frag_wrong_both = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=444)/Dot1Q(vlan=555) p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet , but we make one fragment untagged. # VLAN tags missing p_frag_QinQ_data_frag_missing_tags = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged() ## We fragment the data packet , but we make one fragment with reversed # VLAN tags p_frag_QinQ_data_frag_reversed_tags = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_reversed_tags[3].tags = \ Dot1AD(vlan=4094)/Dot1Q(vlan=666) p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet , but we make ONLY one fragment QinQ tagged # with the correct VLAN tags p_frag_QinQ_data_frag_one_tagged = fragment(p_QinQ_data_frag, fragsize=10 ) for frag in p_frag_QinQ_data_frag_one_tagged: frag.tags = Untagged() p_frag_QinQ_data_frag_one_tagged[3].tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) p_frag_QinQ_data_frag_one_tagged[3].tags[Dot1Q].tpid = 0x88a8 ##We need to ACK the packet returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(p.seq + len(p[Raw]))) returnAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) returnAck.tags[Dot1Q].tpid = 0x88a8 ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) finAck.tags[Dot1Q].tpid = 0x88a8 finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094) finalAck.tags[Dot1Q].tpid = 0x88a8 ## # Here we start with chnaging the QinQ VLAN tags in the FRAGMENTS # of the data packetand the creation of the pcaps designed for not alerting # due to missing/reversed/nonexisting VLAN tags in the fragments of # data in the same flow. ## ## one fragment from the data packet has a wrong VLAN tag - dot1Q tag. # The other tag (dot1AD- S-VLAN/Carrier VLAN) is correct # write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1q): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## one fragment from the data packet has a wrong VLAN tag - dot1AD tag # -> S-VLAN/Carrier VLAN. The other tag (dot1q) is correct # write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1ad): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## one frgament from the data packet has both VLAN tag IDs wrong #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_wrong_both: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_both): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_wrong_both) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_wrong_both: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## one fragment of the data packet has NO VLAN tags #write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_missing_tags: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_missing_tags): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_missing_tags) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_missing_tags: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## one fragment of the data packet has both VLAN tags switched/reversed # write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_reversed_tags: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_reversed_tags): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_reversed_tags) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_reversed_tags: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list ## one fragment of the data packet has both VLAN tags correct. # The rest do not. # write the session but with an ordered fragmented payload session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_one_tagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_one_tagged_fragments-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_one_tagged): session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_one_tagged_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_fragmented.append(syn) session_packets_fragmented.append(synack) session_packets_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_one_tagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_one_tagged: session_packets_fragmented.append(p_fragment) session_packets_fragmented.append(returnAck) session_packets_fragmented.append(finAck) session_packets_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_one_tagged_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_fragmented) session_packets_fragmented[:] = [] #empty the list def rebuildIPv4HttpSeqOverSpill(self, packet, results_directory, \ sid_id_http, src_name, repo_name): #rebuild session with overspilling seq numbers # seq = 4294967294, 4294967295, 0, 1,....(as per RFC) #seq overspill re-writing session_packets_seq_overspill = list() session_packets_seq_overspill_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport #maximum seq=4294967295 seq_num = 4294967294 ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ##We need to ACK the packet #here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill" returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(len(p[Raw]) -1 )) ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag) #shuffle JUST the fragments in the session for p_fragment in p_frag: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list def rebuildIPv4HttpSeqOverSpillDot1Q(self, packet, results_directory, \ sid_id_http, src_name, repo_name): #Dot1Q - VLAN tags cases. #rebuild session with overspilling seq numbers # seq = 4294967294, 4294967295, 0, 1,....(as per RFC) #seq overspill re-writing session_packets_seq_overspill = list() session_packets_seq_overspill_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport #maximum seq=4294967295 seq_num = 4294967294 ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1Q(vlan=1155) synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1Q(vlan=1155) ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1Q(vlan=1155) ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1Q(vlan=1155) ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ## This is the same original data packet - but no VLAN tags p_Dot1Q_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_frag_Dot1Q_untagged = fragment(p_Dot1Q_untagged, fragsize=10) # Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet # Everything else is the same and stays the same p_Dot1Q_tagged_wrong = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_Dot1Q_tagged_wrong.tags = Dot1Q(vlan=3355) ##This is the actual data packet that will be sent containing the payload #- fragmented, QinQ reversed/siwtched tags p_frag_Dot1Q_tagged_wrong = fragment(p_Dot1Q_tagged_wrong, fragsize=10 ) ##We need to ACK the packet #here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill" returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(len(p[Raw]) -1 )) returnAck.tags = Dot1Q(vlan=1155) ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1Q(vlan=1155) finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1Q(vlan=1155) #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag) #shuffle JUST the fragments in the session for p_fragment in p_frag: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## # Here we start with the wrong Dot1Q VLAN tags in the data packet # and the creation of the pcaps designed for not alerting # due to changed (fake/hopped) VLAN tag in the same flow ## #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p_Dot1Q_tagged_wrong) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q_tagged_wrong-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_tagged_wrong: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_tagged_wrong-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_tagged_wrong): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_tagged_wrong) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_tagged_wrong: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## # Here we start with the missing Dot1Q VLAN tag in the data packet # and the creation of the pcaps designed for not alerting # due to missing VLAN tag in the same flow ## #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p_Dot1Q_untagged) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_untagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_untagged): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_untagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_untagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list def rebuildIPv4HttpSeqOverSpillDot1QWrongTagInFragments(self, packet, \ results_directory, sid_id_http, src_name, repo_name): #Dot1Q - VLAN tags cases. #rebuild session with overspilling seq numbers # seq = 4294967294, 4294967295, 0, 1,....(as per RFC) #seq overspill re-writing session_packets_seq_overspill = list() session_packets_seq_overspill_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport #maximum seq=4294967295 seq_num = 4294967294 ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1Q(vlan=1155) synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1Q(vlan=1155) ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1Q(vlan=1155) ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1Q(vlan=1155) ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ##This is the data packet. Fromt this data packet we will edit and tweek # the VLAN tags for one or more fragments of the same data packet ! p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_Dot1Q_data_frag.tags = Dot1Q(vlan=1155) # We fragment the data packet, then we will play around with the fragments # VLAN tags - one fragment has the wrong VLAN tag p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 ) p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333) # We fragment the data packet , but we make one fragment untagged. # VLAN tag missing p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 ) p_frag_Dot1Q_data_frag_missing[3].tags = Untagged() # We fragment the data packet , but we make ONLY one fragment tagged # with the correct VLAN tag p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 ) for frag in p_frag_Dot1Q_data_frag_one_tagged: frag.tags = Untagged() p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1155) ##We need to ACK the packet #here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill" returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(len(p[Raw]) -1 )) returnAck.tags = Dot1Q(vlan=1155) ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1Q(vlan=1155) finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1Q(vlan=1155) ## # Here we start with chnaging the Dot1Q VLAN tags in the FRAGMENTS # of the data packetand the creation of the pcaps designed for not alerting # due to missing VLAN tag in the fragments of data in the same flow. ## ## one fragment from the data packet has a missing VLAN tag #write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_data_frag_missing: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_data_frag_missing): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_data_frag_missing) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_data_frag_missing: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## one frgament from the data packet has the wrong VLAN tag #write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_data_frag_wrong: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_data_frag_wrong): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_data_frag_wrong) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_data_frag_wrong: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## all frgaments from the data packet have no VLAN tags BUT one #write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_Dot1Q_data_frag_one_tagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_Dot1Q_data_frag_one_tagged): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_Dot1Q_data_frag_one_tagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_Dot1Q_data_frag_one_tagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list def rebuildIPv4HttpSeqOverSpillQinQ(self, packet, results_directory, \ sid_id_http, src_name, repo_name): #QinQ - double VLAN tag cases. #rebuild session with overspilling seq numbers # seq = 4294967294, 4294967295, 0, 1,....(as per RFC) #seq overspill re-writing session_packets_seq_overspill = list() session_packets_seq_overspill_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport #maximum seq=4294967295 seq_num = 4294967294 ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) syn.tags[Dot1Q].tpid = 0x88a8 synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) synack.tags[Dot1Q].tpid = 0x88a8 ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) ack.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) p.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be sent containing the payload #- fragmented p_frag = fragment(p, fragsize=10 ) ## This is the same original data packet - but no VLAN tags p_QinQ_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_frag_QinQ_untagged = fragment(p_QinQ_untagged, fragsize=10) # Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet # Everything else is the same and stays the same p_QinQ_tag_reversed = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_QinQ_tag_reversed.tags = Dot1AD(vlan=4000)/Dot1Q(vlan=777) p_QinQ_tag_reversed.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be sent containing the payload #- fragmented, QinQ reversed/siwtched tags p_frag_QinQ_tag_reversed = fragment(p_QinQ_tag_reversed, fragsize=10 ) ## ONLY Dot1Q VLAN tag - present in the fragments (QinQ expected) p_QinQ_tag_only_dot1q = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_QinQ_tag_only_dot1q.tags = Dot1Q(vlan=1234) #The actual fragmentation - only one VLAN tag - QinQ expected p_frag_QinQ_tag_only_dot1q = fragment(p_QinQ_tag_only_dot1q, fragsize=10 ) ##We need to ACK the packet #here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill" returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(len(p[Raw]) -1 )) returnAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) returnAck.tags[Dot1Q].tpid = 0x88a8 ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) finAck.tags[Dot1Q].tpid = 0x88a8 finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) finalAck.tags[Dot1Q].tpid = 0x88a8 #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag) #shuffle JUST the fragments in the session for p_fragment in p_frag: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## # Here we start with the revrsed/switched QinQ VLAN tags in the data packet # and the creation of the pcaps designed for not alerting # due to changed (fake/hopped) VLAN tag in the same flow ## #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p_QinQ_tag_reversed) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_tag_reversed: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_tag_reversed): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_tag_reversed) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_tag_reversed: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_tags_reversed-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## # Here we start with the missing QinQ VLAN tag in the data packet # and the creation of the pcaps designed for not alerting # due to missing VLAN tag in the same flow ## #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p_QinQ_untagged) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_untagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name) , session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_untagged): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_untagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_untagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_tag_missing-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## # Here we start with only one VLAN tag found in the data packet # QinQ VLAN tags expected ## #write the session - normal session_packets_seq_overspill.append(syn) session_packets_seq_overspill.append(synack) session_packets_seq_overspill.append(ack) session_packets_seq_overspill.append(p_QinQ_tag_only_dot1q) session_packets_seq_overspill.append(returnAck) session_packets_seq_overspill.append(finAck) session_packets_seq_overspill.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill) session_packets_seq_overspill[:] = [] #empty the list #write the fragmented packets - ordered session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_tag_only_dot1q: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_tag_only_dotq-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_tag_only_dot1q): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write mix the fragmented packets #shuffle/unsort/unorder/mix JUST the fragmented packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_tag_only_dot1q) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_tag_only_dot1q: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list def rebuildIPv4HttpSeqOverSpillQinQWrongTagInFragments(self, packet, \ results_directory, sid_id_http, src_name, repo_name): #QinQ - double VLAN tag cases. #rebuild session with overspilling seq numbers # seq = 4294967294, 4294967295, 0, 1,....(as per RFC) #seq overspill re-writing session_packets_seq_overspill = list() session_packets_seq_overspill_fragmented = list() ipsrc = packet[IP].src ipdst = packet[IP].dst portsrc = packet[TCP].sport portdst = packet[TCP].dport #maximum seq=4294967295 seq_num = 4294967294 ack_num = random.randint(1024,(2**32)-1) syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \ seq=seq_num) syn.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) syn.tags[Dot1Q].tpid = 0x88a8 synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \ seq=ack_num, ack=syn.seq+1) synack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) synack.tags[Dot1Q].tpid = 0x88a8 ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1) ack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) ack.tags[Dot1Q].tpid = 0x88a8 ##This is the actual data packet that will be send, containing the payload p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) p.tags[Dot1Q].tpid = 0x88a8 ##This is the data packet. Fromt this data packet we will edit and tweek # the VLAN tags (QinQ) for one or more fragments of the same data packet ! p_QinQ_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \ seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw] p_QinQ_data_frag.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) p_QinQ_data_frag.tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet, then we will play around with the fragments # VLAN tags in QinQ # Here we change the VLAN tag of the outer 802.1AD layer p_frag_QinQ_data_frag_wrong_dot1ad = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=888) p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet, then we will play around with the fragments # VLAN tags in QinQ # Here we change the VLAN tag of the inner Dot1Q layer p_frag_QinQ_data_frag_wrong_dot1q = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=333)/Dot1Q(vlan=4000) p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet, then we will play around with the fragments # VLAN tags in QinQ # Here we make one fragmanet tagged only with one VLAN p_frag_QinQ_data_frag_only_dot1q = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_only_dot1q[3].tags = Dot1Q(vlan=1234) ## We fragment the data packet and make one fragment with both tags # having the wrong VLAN IDs p_frag_QinQ_data_frag_wrong_both = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=444)/Dot1Q(vlan=555) p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet , but we make one fragment untagged. # VLAN tags missing p_frag_QinQ_data_frag_missing_tags = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged() ## We fragment the data packet , but we make one fragment with reversed # VLAN tags p_frag_QinQ_data_frag_reversed_tags = fragment(p_QinQ_data_frag, fragsize=10 ) p_frag_QinQ_data_frag_reversed_tags[3].tags = \ Dot1AD(vlan=4000)/Dot1Q(vlan=777) p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8 ## We fragment the data packet , but we make ONLY one fragment QinQ tagged # with the correct VLAN tags p_frag_QinQ_data_frag_one_tagged = fragment(p_QinQ_data_frag, fragsize=10 ) for frag in p_frag_QinQ_data_frag_one_tagged: frag.tags = Untagged() p_frag_QinQ_data_frag_one_tagged[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) p_frag_QinQ_data_frag_one_tagged[3].tags[Dot1Q].tpid = 0x88a8 ##We need to ACK the packet #here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill" returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=p.ack, ack=(len(p[Raw]) -1 )) returnAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) returnAck.tags[Dot1Q].tpid = 0x88a8 ##Now we build the Finshake finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \ seq=returnAck.ack, ack=returnAck.seq) finAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) finAck.tags[Dot1Q].tpid = 0x88a8 finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \ /IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \ seq=finAck.ack, ack=finAck.seq+1) finalAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000) finalAck.tags[Dot1Q].tpid = 0x88a8 ## # Here we start with chnaging the QinQ VLAN tags in the FRAGMENTS # of the data packet and the creation of the pcaps designed for not alerting # due to missing/reversed/nonexisting VLAN tags in the fragments of # data in the same flow. ## ## one fragment from the data packet has a wrong VLAN tag - dot1Q tag. # The other tag (dot1AD- S-VLAN/Carrier VLAN) is correct # write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1q): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## one fragment from the data packet has a wrong VLAN tag - dot1AD tag # -> S-VLAN/Carrier VLAN. The other tag (dot1q) is correct # write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1ad): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## We make one frgament with only one VLAN tag (not double) # write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_only_dot1q: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_only_dot1q): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_only_dot1q) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_only_dot1q: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## one frgament from the data packet has both VLAN tag IDs wrong #write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_wrong_both: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_both): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_wrong_both) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_wrong_both: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## one fragment of the data packet has NO VLAN tags #write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_missing_tags: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_missing_tags): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_missing_tags) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_missing_tags: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## one fragment of the data packet has both VLAN tags switched/reversed # write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_reversed_tags: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_reversed_tags): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_reversed_tags) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_reversed_tags: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list ## one fragment of the data packet has both VLAN tags correct. # The rest do not. # write the session but with an ordered fragmented payload session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in p_frag_QinQ_data_frag_one_tagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session with reverse fragments order session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) for p_fragment in reversed(p_frag_QinQ_data_frag_one_tagged): session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list #write the session but with unordered/unsorted/mixed JUST fragmented #payload packets session_packets_seq_overspill_fragmented.append(syn) session_packets_seq_overspill_fragmented.append(synack) session_packets_seq_overspill_fragmented.append(ack) random.shuffle(p_frag_QinQ_data_frag_one_tagged) #shuffle JUST the fragments in the session for p_fragment in p_frag_QinQ_data_frag_one_tagged: session_packets_seq_overspill_fragmented.append(p_fragment) session_packets_seq_overspill_fragmented.append(returnAck) session_packets_seq_overspill_fragmented.append(finAck) session_packets_seq_overspill_fragmented.append(finalAck) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), session_packets_seq_overspill_fragmented) session_packets_seq_overspill_fragmented[:] = [] #empty the list def midstreamIPv4Http(self, fragit, results_directory, sid_id_http, \ src_name, repo_name): #forcing correct recalculation of the checksum del fragit[IP].chksum del fragit[TCP].chksum fragit_done = fragment(fragit, fragsize=10 ) #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) #reverse the fragments !!! #permanent change to the list of fragments fragit_done.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(fragit_done) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) def midstreamIPv4HttpDot1Q(self, fragit, results_directory, sid_id_http, \ src_name, repo_name): #Using VLAN Tag - Dot1Q #forcing correct recalculation of the checksum del fragit[IP].chksum del fragit[TCP].chksum fragit[Ether].tags=Dot1Q(vlan=2222) #one midstream packet in Dot1Q wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit) fragit_done = fragment(fragit, fragsize=10 ) #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) #reverse the fragments !!! #permanent change to the list of fragments fragit_done.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(fragit_done) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) def midstreamIPv4HttpDot1QWrongTagInFragments(self, fragit, results_directory, \ sid_id_http, src_name, repo_name): # Wrongly tagged fragments # Using VLAN Tag - Dot1Q #forcing correct recalculation of the checksum del fragit[IP].chksum del fragit[TCP].chksum fragit[Ether].tags = Dot1Q(vlan=2222) ## # one fragment has the wrong VLAN ID tag ## fragit_done_wrong_dot1q_tag = fragment(fragit, fragsize=10 ) fragit_done_wrong_dot1q_tag[3].tags = Dot1Q(vlan=2299) #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done_wrong_dot1q_tag) #reverse the fragments !!! #permanent change to the list of fragments fragit_done_wrong_dot1q_tag.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done_wrong_dot1q_tag) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(fragit_done_wrong_dot1q_tag) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done_wrong_dot1q_tag) ## # one fragment has no VLAN ID tag ## fragit_done_no_dot1q_tag = fragment(fragit, fragsize=10 ) fragit_done_no_dot1q_tag[3].tags = Untagged() #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done_no_dot1q_tag) #reverse the fragments !!! #permanent change to the list of fragments fragit_done_no_dot1q_tag.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done_no_dot1q_tag) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(fragit_done_no_dot1q_tag) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done_no_dot1q_tag) def midstreamIPv4HttpQinQ(self, fragit, results_directory, sid_id_http, \ src_name, repo_name): #Using DOUBLE VLAN Tagging - QinQ #Forcing correct recalculation of the checksum del fragit[IP].chksum del fragit[TCP].chksum fragit.tags = Dot1AD(vlan=3333)/Dot1Q(vlan=1) fragit.tags[Dot1Q].tpid = 0x88a8 #one midstream packet in QinQ wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit) fragit_done = fragment(fragit, fragsize=10 ) #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) #reverse the fragments !!! #permanent change to the list of fragments fragit_done.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(fragit_done) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), fragit_done) def midstreamIPv4HttpQinQWrongTagInFragments(self, fragit, \ results_directory, sid_id_http, src_name, repo_name): #Wrongly tagged fragments #Using DOUBLE VLAN Tagging - QinQ #forcing correct recalculation of the checksum del fragit[IP].chksum del fragit[TCP].chksum fragit.tags = Dot1AD(vlan=3333)/Dot1Q(vlan=1) fragit.tags[Dot1Q].tpid = 0x88a8 ## # We fragment the data packet, we change the VLAN tag of # the outer 802.1AD layer ## p_frag_QinQ_data_frag_wrong_dot1ad = fragment(fragit, fragsize=10 ) p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=3333)/Dot1Q(vlan=777) p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8 ## # We fragment the data packet, we change the VLAN tag of # the inner Dot1Q layer ## p_frag_QinQ_data_frag_wrong_dot1q = fragment(fragit, fragsize=10 ) p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=1) p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8 ## # We fragment the data packet, we make one fragmanet tagged only # with one VLAN ## p_frag_QinQ_data_frag_only_dot1q = fragment(fragit, fragsize=10 ) p_frag_QinQ_data_frag_only_dot1q[3].tags = Dot1Q(vlan=2345) ## # We fragment the data packet and make one fragment with both tags # having the wrong VLAN IDs ## p_frag_QinQ_data_frag_wrong_both = fragment(fragit, fragsize=10 ) p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=111)/Dot1Q(vlan=222) p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8 ## # We fragment the data packet , but we make one fragment untagged. # VLAN tags missing ## p_frag_QinQ_data_frag_missing_tags = fragment(fragit, fragsize=10 ) p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged() ## # We fragment the data packet , but we make one fragment with reversed # VLAN tags ## p_frag_QinQ_data_frag_reversed_tags = fragment(fragit, fragsize=10 ) p_frag_QinQ_data_frag_reversed_tags[3].tags = Dot1AD(vlan=1)/Dot1Q(vlan=3333) p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8 ## # We fragment the data packet, we change the VLAN tag of # the outer 802.1AD layer ## #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad) #reverse the fragments !!! #permanent change to the list of fragments p_frag_QinQ_data_frag_wrong_dot1ad.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad) ## # We fragment the data packet, we change the VLAN tag of # the inner Dot1Q layer ## #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q) #reverse the fragments !!! #permanent change to the list of fragments p_frag_QinQ_data_frag_wrong_dot1q.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q) ## # We fragment the data packet, we make one fragmanet tagged only # with one VLAN ## #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q) #reverse the fragments !!! #permanent change to the list of fragments p_frag_QinQ_data_frag_only_dot1q.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(p_frag_QinQ_data_frag_only_dot1q) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q) ## # We fragment the data packet and make one fragment with both tags # having the wrong VLAN IDs ## #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_both) #reverse the fragments !!! #permanent change to the list of fragments p_frag_QinQ_data_frag_wrong_both.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_both) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(p_frag_QinQ_data_frag_wrong_both) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_wrong_both) ## # We fragment the data packet , but we make one fragment untagged. # VLAN tags missing ## #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_missing_tags) #reverse the fragments !!! #permanent change to the list of fragments p_frag_QinQ_data_frag_missing_tags.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_missing_tags) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(p_frag_QinQ_data_frag_missing_tags) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_missing_tags) ## # We fragment the data packet , but we make one fragment with reversed # VLAN tags ## #write the ordered fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags) #reverse the fragments !!! #permanent change to the list of fragments p_frag_QinQ_data_frag_reversed_tags.reverse() #write the reversed fragmented payload packet and write wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags) #shuffle(unorder/mix) the fragmented payload packet and write random.shuffle(p_frag_QinQ_data_frag_reversed_tags) wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \ % (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \ , src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags) def reconstructIPv4HttpPacket(self, packet): # here we make the original HTTP packet into a just TCP packet if packet.haslayer(IPv6): ipsrc = "1.1.1.1" ipdst = "9.9.9.9" else: ipsrc = packet[IP].src ipdst = packet[IP].dst p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \ /IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=packet[TCP].sport, \ dport=packet[TCP].dport, seq=packet.seq, ack=packet.ack)/packet[TCP][Raw] return p def incrementPcapId(self, action): if action == "byOne": Global_Vars.pcap_id = Global_Vars.pcap_id+1 return '{0:03}'.format(Global_Vars.pcap_id) elif action == "clear": Global_Vars.pcap_id = 000 return '{0:03}'.format(Global_Vars.pcap_id) else: sys.exit("Invalid argument for function incrementPcapId()") def httpReWrite(self, scapy_load, FN, pcap_id, results_directory, \ source_name, sid_id_http, url_method, url_str, content_all, repository_name): # writing the http request packet to pcap # in regression script format # 2002031-001-sandnet-public-tp-01.pcap - example ## 001 - starts here ## ipv4_ready = self.reconstructIPv4HttpPacket(scapy_load[FN]) if Global_Vars.yaml_options['Protocols']['HTTP']['WriteRule']: self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Rules'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['Midstream']: wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream-%s-tp-01.pcap" \ % (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne"), \ source_name, repository_name) , ipv4_ready) self.midstreamIPv4Http(ipv4_ready, results_directory, sid_id_http, \ source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Midstream', 'Regular'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['Dot1Q']: self.midstreamIPv4HttpDot1Q(ipv4_ready, results_directory, sid_id_http, \ source_name, repository_name) self.midstreamIPv4HttpDot1QWrongTagInFragments(ipv4_ready, \ results_directory, sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Midstream', 'Dot1Q'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['QinQ']: self.midstreamIPv4HttpQinQ(ipv4_ready, results_directory, \ sid_id_http, source_name, repository_name) self.midstreamIPv4HttpQinQWrongTagInFragments(ipv4_ready, \ results_directory, sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Midstream', 'QinQ'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Session']: self.rebuildIPv4HttpSession(ipv4_ready, results_directory, sid_id_http, \ source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Regular'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['ExtraTcpSA']: self.rebuildIPv4HttpSessionExtraTcpSAs(ipv4_ready, results_directory, \ sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Regular'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Dot1Q']: self.rebuildIPv4HttpSessionDot1Q(ipv4_ready, results_directory, \ sid_id_http, source_name, repository_name) self.rebuildIPv4HttpSessionDot1QWrongTagInFragments(ipv4_ready, \ results_directory, sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Dot1Q'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['QinQ']: self.rebuildIPv4HttpSessionQinQ(ipv4_ready, results_directory, \ sid_id_http, source_name, repository_name) self.rebuildIPv4HttpSessionQinQWrongTagInFragments(ipv4_ready, \ results_directory, sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory,'QinQ'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['SeqOverspill']: self.rebuildIPv4HttpSeqOverSpill(ipv4_ready, results_directory, \ sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Regular'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Dot1Q']: self.rebuildIPv4HttpSeqOverSpillDot1Q(ipv4_ready, results_directory, \ sid_id_http, source_name, repository_name) self.rebuildIPv4HttpSeqOverSpillDot1QWrongTagInFragments(ipv4_ready, \ results_directory, sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory, 'Dot1Q'), source_name) if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['QinQ']: self.rebuildIPv4HttpSeqOverSpillQinQ(ipv4_ready, \ results_directory, sid_id_http, source_name, repository_name) self.rebuildIPv4HttpSeqOverSpillQinQWrongTagInFragments(ipv4_ready, \ results_directory, sid_id_http, source_name, repository_name) self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \ os.path.join(results_directory,'QinQ'), source_name) def __init__(self, scapy_load, FN, pcap_id, results_directory, source_name, \ sid_id_http, url_method, url_str, content_all, repository_name): self.scapy_load_to_pass = scapy_load self.FN_to_pass = FN self.pcap_id_to_pass = pcap_id self.results_directory_to_pass = results_directory self.source_name_to_pass = source_name self.sid_id_http_to_pass = sid_id_http self.url_method_to_pass = url_method self.url_str_to_pass = url_str self.content_all_to_pass = content_all self.repository_name_to_pass = repository_name # if HTTP over IPv4 is enabled in yaml if Global_Vars.yaml_options['Protocols']['HTTP']['IPv4']: self.httpReWrite( \ self.scapy_load_to_pass, self.FN_to_pass, self.pcap_id_to_pass, \ self.results_directory_to_pass, self.source_name_to_pass, \ self.sid_id_http_to_pass, self.url_method_to_pass, \ self.url_str_to_pass, self.content_all_to_pass, \ self.repository_name_to_pass )<|fim▁end|>
session_packets_fragmented[:] = [] #empty the list
<|file_name|>pr-check.js<|end_file_name|><|fim▁begin|>/** * Copyright 2016 The AMP HTML Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS-IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview This file is executed by Travis (configured via * .travis.yml in the root directory) and is the main driver script * for running tests. Execution herein is entirely synchronous, that * is, commands are executed on after the other (see the exec * function). Should a command fail, this script will then also fail. * This script attempts to introduce some granularity for our * presubmit checking, via the determineBuildTargets method. */ const child_process = require('child_process'); const exec = require('./exec.js').exec; const execOrDie = require('./exec.js').execOrDie; const path = require('path'); const minimist = require('minimist'); const util = require('gulp-util'); const gulp = 'node_modules/gulp/bin/gulp.js'; const fileLogPrefix = util.colors.yellow.bold('pr-check.js:'); /** * Starts a timer to measure the execution time of the given function. * @param {string} functionName * @return {DOMHighResTimeStamp} */ function startTimer(functionName) { const startTime = Date.now(); console.log( '\n' + fileLogPrefix, 'Running', util.colors.cyan(functionName), '...'); return startTime; } /** * Stops the timer for the given function and prints the execution time. * @param {string} functionName * @return {Number} */ function stopTimer(functionName, startTime) { const endTime = Date.now(); const executionTime = new Date(endTime - startTime); const mins = executionTime.getMinutes(); const secs = executionTime.getSeconds(); console.log( fileLogPrefix, 'Done running', util.colors.cyan(functionName), 'Total time:', util.colors.green(mins + 'm ' + secs + 's')); } /** * Executes the provided command, returning its stdout as an array of lines. * This will throw an exception if something goes wrong. * @param {string} cmd * @return {!Array<string>} */ function getStdout(cmd) { return child_process.execSync(cmd, {'encoding': 'utf-8'}).trim().split('\n'); } /** * Executes the provided command and times it. * @param {string} cmd */ function timedExec(cmd) { const startTime = startTimer(cmd); exec(cmd); stopTimer(cmd, startTime); } /** * Executes the provided command and times it. The program terminates in case of * failure. * @param {string} cmd */ function timedExecOrDie(cmd) { const startTime = startTimer(cmd); execOrDie(cmd); stopTimer(cmd, startTime); } /** * For a provided commit range identifiying a pull request (PR), * yields the list of files. * @param {string} travisCommitRange * @return {!Array<string>} */ function filesInPr(travisCommitRange) { return getStdout(`git diff --name-only ${travisCommitRange}`); } /** * Determines whether the given file belongs to the Validator webui, * that is, the 'VALIDATOR_WEBUI' target. * @param {string} filePath * @return {boolean} */ function isValidatorWebuiFile(filePath) { return filePath.startsWith('validator/webui'); } /** * Determines whether the given file belongs to the Validator webui, * that is, the 'BUILD_SYSTEM' target. * @param {string} filePath * @return {boolean} */ function isBuildSystemFile(filePath) { return filePath.startsWith('build-system') && // Exclude textproto from build-system since we want it to trigger // tests and type check. path.extname(filePath) != '.textproto' && // Exclude config files from build-system since we want it to trigger // the flag config check. !isFlagConfig(filePath); } /** * Determines whether the given file belongs to the validator, * that is, the 'VALIDATOR' target. This assumes (but does not * check) that the file is not part of 'VALIDATOR_WEBUI'. * @param {string} filePath * @return {boolean} */ function isValidatorFile(filePath) { if (filePath.startsWith('validator/')) return true; if (!path.dirname(filePath).endsWith('0.1') && !path.dirname(filePath).endsWith('test')) return false; const name = path.basename(filePath); return name.startsWith('validator-') && (name.endsWith('.out') || name.endsWith('.html') || name.endsWith('.protoascii')); } /** * @param {string} filePath * @return {boolean} */ function isDocFile(filePath) { return path.extname(filePath) == '.md'; } /** * Determines if the given file contains flag configurations, by comparing it * against the well-known json config filenames for prod and canary. * @param {string} filePath * @return {boolean} */ function isFlagConfig(filePath) { const filename = path.basename(filePath); return (filename == 'prod-config.json' || filename == 'canary-config.json'); } /** * Determines the targets that will be executed by the main method of * this script. The order within this function matters. * @param {!Array<string>} filePaths * @returns {!Set<string>} */ function determineBuildTargets(filePaths) { if (filePaths.length == 0) { return new Set([ 'BUILD_SYSTEM', 'VALIDATOR_WEBUI', 'VALIDATOR', 'RUNTIME', 'DOCS', 'FLAG_CONFIG']); } const targetSet = new Set(); for (p of filePaths) { if (isBuildSystemFile(p)) { targetSet.add('BUILD_SYSTEM'); } else if (isValidatorWebuiFile(p)) { targetSet.add('VALIDATOR_WEBUI'); } else if (isValidatorFile(p)) { targetSet.add('VALIDATOR'); } else if (isDocFile(p)) { targetSet.add('DOCS'); } else if (isFlagConfig(p)) { targetSet.add('FLAG_CONFIG'); } else { targetSet.add('RUNTIME'); } } return targetSet; } const command = { testBuildSystem: function() { timedExecOrDie('npm run ava'); }, testDocumentLinks: function(files) { let docFiles = files.filter(isDocFile); timedExecOrDie(`${gulp} check-links --files ${docFiles.join(',')}`); }, runPreBuildChecks: function() { timedExecOrDie(`${gulp} clean`); timedExecOrDie(`${gulp} lint`); }, buildRuntime: function() { timedExecOrDie(`${gulp} clean`); timedExecOrDie(`${gulp} build`); timedExecOrDie(`${gulp} dist --fortesting`); }, runDepAndTypeChecks: function() { timedExecOrDie(`${gulp} build --css-only`); timedExecOrDie(`${gulp} dep-check`); timedExecOrDie(`${gulp} check-types`); }, runUnitTests: function() { // Unit tests with Travis' default chromium timedExecOrDie(`${gulp} test --nobuild --compiled`); // All unit tests with an old chrome (best we can do right now to pass tests // and not start relying on new features). // Disabled because it regressed. Better to run the other saucelabs tests. // timedExecOrDie( // `${gulp} test --nobuild --saucelabs --oldchrome --compiled`); }, runIntegrationTests: function() { // Integration tests with all saucelabs browsers timedExecOrDie( `${gulp} test --nobuild --saucelabs --integration --compiled`); }, runVisualDiffTests: function() { // This must only be run for push builds, since Travis hides the encrypted // environment variables required by Percy during pull request builds. // For now, this is warning-only. timedExec(`${gulp} visual-diff`); }, presubmit: function() { timedExecOrDie(`${gulp} presubmit`); }, buildValidatorWebUI: function() { timedExecOrDie('cd validator/webui && python build.py'); }, buildValidator: function() { timedExecOrDie('cd validator && python build.py'); }, }; function runAllCommands() { // Run different sets of independent tasks in parallel to reduce build time. if (process.env.BUILD_SHARD == "pre_build_checks") { command.testBuildSystem(); command.runPreBuildChecks(); command.runDepAndTypeChecks(); // Skip testDocumentLinks() during push builds. command.buildValidatorWebUI(); command.buildValidator(); } if (process.env.BUILD_SHARD == "integration_tests") { command.buildRuntime(); command.presubmit(); // Must be run after the runtime is built. command.runVisualDiffTests(); // Only called during push builds. command.runIntegrationTests(); } if (process.env.BUILD_SHARD == "unit_tests") { // Unit tests should need a CSS-only build, but for now, we need a full dist // because some of the tests are integration tests. // TODO(rsimha-amp, 9404): Clean up unit tests and change to css-only build. command.buildRuntime(); command.runUnitTests(); } } /** * The main method for the script execution which much like a C main function * receives the command line arguments and returns an exit status. * @param {!Array<string>} argv * @returns {number} */ function main(argv) { const startTime = startTimer('pr-check.js'); console.log( fileLogPrefix, 'Running build shard', util.colors.cyan(process.env.BUILD_SHARD)); // If $TRAVIS_PULL_REQUEST_SHA is empty then it is a push build and not a PR. if (!process.env.TRAVIS_PULL_REQUEST_SHA) { console.log(fileLogPrefix, 'Running all commands on push build.'); runAllCommands(); stopTimer('pr-check.js', startTime); return 0; } const travisCommitRange = `master...${process.env.TRAVIS_PULL_REQUEST_SHA}`; const files = filesInPr(travisCommitRange); const buildTargets = determineBuildTargets(files); if (buildTargets.has('FLAG_CONFIG')) { files.forEach((file) => { if (!isFlagConfig(file)) { console.log(fileLogPrefix, util.colors.red('ERROR:'), 'PRs may not include *config.json files and non-flag-config ' + 'files. Please make the changes in separate PRs.'); console.log(fileLogPrefix, util.colors.yellow('NOTE:'), 'If you see a long list of unrelated files below, it is likely ' + 'that your private branch is significantly out of sync.'); console.log(fileLogPrefix, 'A sync to upstream/master and a push to origin should clear' + ' this error. If a normal push doesn\'t work, try a force push:'); console.log(util.colors.cyan('\t git fetch upstream master')); console.log(util.colors.cyan('\t git rebase upstream/master')); console.log(util.colors.cyan('\t git push origin --force')); console.log('\nFull list of files in this PR:'); files.forEach((file) => { console.log('\t' + file); }); stopTimer('pr-check.js', startTime); process.exit(1); } }); } //if (files.includes('package.json') ? //!files.includes('yarn.lock') : files.includes('yarn.lock')) { //console.error('pr-check.js - any update to package.json or yarn.lock ' + //'must include the other file. Please update through yarn.'); //process.exit(1); //} const sortedBuildTargets = []; for (const t of buildTargets) { sortedBuildTargets.push(t); } sortedBuildTargets.sort(); console.log( fileLogPrefix, 'Detected build targets:', util.colors.cyan(sortedBuildTargets.join(', '))); // Run different sets of independent tasks in parallel to reduce build time. if (process.env.BUILD_SHARD == "pre_build_checks") { if (buildTargets.has('BUILD_SYSTEM')) { command.testBuildSystem(); } if (buildTargets.has('DOCS')) { command.testDocumentLinks(files); } if (buildTargets.has('RUNTIME')) { command.runPreBuildChecks(); command.runDepAndTypeChecks(); } if (buildTargets.has('VALIDATOR_WEBUI')) { command.buildValidatorWebUI(); } if (buildTargets.has('VALIDATOR')) { command.buildValidator(); } } if (process.env.BUILD_SHARD == "integration_tests") { // The integration_tests shard can be skipped for PRs. console.log(fileLogPrefix, 'Skipping integration_tests for PRs'); } if (process.env.BUILD_SHARD == "unit_tests" && buildTargets.has('RUNTIME')) {<|fim▁hole|> // Presubmit needs to run after `gulp dist` as some checks run through // the dist/ folder. // Also presubmit always needs to run even for just docs to check for // copyright at the top. // TODO(rsimha-amp, 9404): Move to integration_tests once it's enabled. command.presubmit(); // Finally, run all unit tests. command.runUnitTests(); } stopTimer('pr-check.js', startTime); return 0; } process.exit(main());<|fim▁end|>
// Unit tests should need a CSS-only build, but for now, we need a full dist // because some of the tests are integration tests. // TODO(rsimha-amp, 9404): Clean up unit tests and change to css-only build. command.buildRuntime();
<|file_name|>create_listener_request_response.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved. // This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. // Code generated. DO NOT EDIT. package loadbalancer import ( "github.com/oracle/oci-go-sdk/v46/common" "net/http" ) // CreateListenerRequest wrapper for the CreateListener operation // // See also // // Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/loadbalancer/CreateListener.go.html to see an example of how to use CreateListenerRequest. type CreateListenerRequest struct { // Details to add a listener. CreateListenerDetails `contributesTo:"body"` // The OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the load balancer on which to add a listener. LoadBalancerId *string `mandatory:"true" contributesTo:"path" name:"loadBalancerId"` // The unique Oracle-assigned identifier for the request. If you need to contact Oracle about a // particular request, please provide the request ID. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before then due to conflicting operations (e.g., if a resource // has been deleted and purged from the system, then a retry of the original creation request // may be rejected). OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata } func (request CreateListenerRequest) String() string { return common.PointerString(request) } // HTTPRequest implements the OCIRequest interface func (request CreateListenerRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) { return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders) } // BinaryRequestBody implements the OCIRequest interface func (request CreateListenerRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) { return nil, false } // RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy. func (request CreateListenerRequest) RetryPolicy() *common.RetryPolicy { return request.RequestMetadata.RetryPolicy } // CreateListenerResponse wrapper for the CreateListener operation type CreateListenerResponse struct { // The underlying http response RawResponse *http.Response // The OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the work request. OpcWorkRequestId *string `presentIn:"header" name:"opc-work-request-id"`<|fim▁hole|> // Unique Oracle-assigned identifier for the request. If you need to contact Oracle about // a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` } func (response CreateListenerResponse) String() string { return common.PointerString(response) } // HTTPResponse implements the OCIResponse interface func (response CreateListenerResponse) HTTPResponse() *http.Response { return response.RawResponse }<|fim▁end|>
<|file_name|>did.rs<|end_file_name|><|fim▁begin|>#[macro_use] mod utils; inject_indy_dependencies!(); extern crate indyrs as indy; extern crate indyrs as api; use crate::utils::{did, pool, ledger}; use crate::utils::constants::*; use crate::utils::types::ResponseType; use crate::utils::Setup; use self::indy::ErrorCode; use crate::api::{INVALID_WALLET_HANDLE, INVALID_POOL_HANDLE}; #[cfg(feature = "local_nodes_pool")] use std::thread; pub const ENCRYPTED_MESSAGE: &'static [u8; 45] = &[187, 227, 10, 29, 46, 178, 12, 179, 197, 69, 171, 70, 228, 204, 52, 22, 199, 54, 62, 13, 115, 5, 216, 66, 20, 131, 121, 29, 251, 224, 253, 201, 75, 73, 225, 237, 219, 133, 35, 217, 131, 135, 232, 129, 32]; pub const SIGNATURE: &'static [u8; 64] = &[20, 191, 100, 213, 101, 12, 197, 198, 203, 49, 89, 220, 205, 192, 224, 221, 97, 77, 220, 190, 90, 60, 142, 23, 16, 240, 189, 129, 45, 148, 245, 8, 102, 95, 95, 249, 100, 89, 41, 227, 213, 25, 100, 1, 232, 188, 245, 235, 186, 21, 52, 176, 236, 11, 99, 70, 155, 159, 89, 215, 197, 239, 138, 5]; mod high_cases { use super::*; mod key_for_did { use super::*; #[test] fn indy_key_for_did_works_for_my_did() { let setup = Setup::wallet(); let (did, verkey) = did::create_and_store_my_did(setup.wallet_handle, Some(MY1_SEED)).unwrap(); let received_verkey = did::key_for_did(-1, setup.wallet_handle, &did).unwrap(); assert_eq!(verkey, received_verkey); } #[test] fn indy_key_for_did_works_for_their_did() { let setup = Setup::wallet(); did::store_their_did_from_parts(setup.wallet_handle, DID, VERKEY).unwrap(); let received_verkey = did::key_for_did(-1, setup.wallet_handle, DID).unwrap(); assert_eq!(VERKEY, received_verkey); } #[test] fn indy_key_for_did_works_for_get_key_from_ledger() { let setup = Setup::wallet_and_pool(); let received_verkey = did::key_for_did(setup.pool_handle, setup.wallet_handle, DID_TRUSTEE).unwrap(); assert_eq!(VERKEY_TRUSTEE.to_string(), received_verkey); } #[test] fn indy_key_for_did_works_for_unknown_did() { let setup = Setup::wallet_and_pool(); let res = did::key_for_did(setup.pool_handle, setup.wallet_handle, DID); assert_code!(ErrorCode::WalletItemNotFound, res); } #[test] fn indy_key_for_did_works_for_fully_qualified_my_did() { let setup = Setup::wallet(); let (did, verkey) = did::create_and_store_my_did_v1(setup.wallet_handle, Some(MY1_SEED)).unwrap(); let received_verkey = did::key_for_did(-1, setup.wallet_handle, &did).unwrap(); assert_eq!(verkey, received_verkey); } #[test] fn indy_key_for_did_works_for_fully_qualified_their_did() { let setup = Setup::wallet(); did::store_their_did_from_parts(setup.wallet_handle, DID_V1, VERKEY).unwrap(); let received_verkey = did::key_for_did(-1, setup.wallet_handle, DID_V1).unwrap(); assert_eq!(VERKEY, received_verkey); } } mod key_for_local_did { use super::*; #[test] fn indy_key_for_local_did_works_for_my_did() { let setup = Setup::did(); let received_verkey = did::key_for_local_did(setup.wallet_handle, &setup.did).unwrap(); assert_eq!(setup.verkey, received_verkey); } #[test] fn indy_key_for_local_did_works_for_their_did() { let setup = Setup::wallet(); did::store_their_did_from_parts(setup.wallet_handle, DID, VERKEY).unwrap(); let received_verkey = did::key_for_local_did(setup.wallet_handle, DID).unwrap(); assert_eq!(VERKEY, received_verkey); } #[test] fn indy_key_for_local_did_works_for_unknown_did() { let setup = Setup::wallet(); let res = did::key_for_local_did(setup.wallet_handle, DID); assert_code!(ErrorCode::WalletItemNotFound, res); } #[test] fn indy_key_for_local_did_works_for_fully_qualified_my_did() { let setup = Setup::did_fully_qualified(); let received_verkey = did::key_for_local_did(setup.wallet_handle, &setup.did).unwrap(); assert_eq!(setup.verkey, received_verkey); } } mod set_endpoint_for_did { use super::*; #[test] fn indy_set_endpoint_for_did_works() { let setup = Setup::wallet(); did::set_endpoint_for_did(setup.wallet_handle, DID, ENDPOINT, VERKEY).unwrap(); } #[test] fn indy_set_endpoint_for_did_works_for_fully_qualified_did() { let setup = Setup::wallet(); did::set_endpoint_for_did(setup.wallet_handle, DID_V1, ENDPOINT, VERKEY).unwrap(); } } mod get_endpoint_for_did { use super::*; #[test] fn indy_get_endpoint_for_did_works() { let setup = Setup::wallet(); did::set_endpoint_for_did(setup.wallet_handle, DID, ENDPOINT, VERKEY).unwrap(); let (endpoint, key) = did::get_endpoint_for_did(setup.wallet_handle, -1, DID).unwrap(); assert_eq!(ENDPOINT, endpoint); assert_eq!(VERKEY, key.unwrap()); } #[test] fn indy_get_endpoint_for_did_works_for_fully_qualified_did() { let setup = Setup::wallet(); did::set_endpoint_for_did(setup.wallet_handle, DID_V1, ENDPOINT, VERKEY).unwrap(); let (endpoint, key) = did::get_endpoint_for_did(setup.wallet_handle, -1, DID_V1).unwrap(); assert_eq!(ENDPOINT, endpoint); assert_eq!(VERKEY, key.unwrap()); } #[test] fn indy_get_endpoint_for_did_works_from_ledger() { let setup = Setup::new_identity(); let attrib_data = json!({"endpoint": {"ha": ENDPOINT, "verkey": VERKEY_TRUSTEE}}).to_string(); let attrib_request = ledger::build_attrib_request(&setup.did, &setup.did, None, Some(&attrib_data), None).unwrap(); ledger::sign_and_submit_request(setup.pool_handle, setup.wallet_handle, &setup.did, &attrib_request).unwrap(); thread::sleep(std::time::Duration::from_secs(1)); let (endpoint, key) = did::get_endpoint_for_did(setup.wallet_handle, setup.pool_handle, &setup.did).unwrap(); assert_eq!(ENDPOINT, endpoint); assert_eq!(VERKEY_TRUSTEE, key.unwrap()); } #[test] fn indy_get_endpoint_for_did_works_from_ledger_for_address_only() { let setup = Setup::new_identity(); let attrib_data = json!({"endpoint": {"ha": ENDPOINT}}).to_string(); let attrib_request = ledger::build_attrib_request(&setup.did, &setup.did, None, Some(&attrib_data), None).unwrap(); ledger::sign_and_submit_request(setup.pool_handle, setup.wallet_handle, &setup.did, &attrib_request).unwrap(); thread::sleep(std::time::Duration::from_secs(1)); let (endpoint, key) = did::get_endpoint_for_did(setup.wallet_handle, setup.pool_handle, &setup.did).unwrap(); assert_eq!(ENDPOINT, endpoint); assert_eq!(None, key); } #[test] fn indy_get_endpoint_for_did_works_for_unknown_did() { let setup = Setup::wallet_and_pool(); let res = did::get_endpoint_for_did(setup.wallet_handle, setup.pool_handle, DID); assert_code!(ErrorCode::CommonInvalidState, res); } #[test] fn indy_get_endpoint_for_did_works_invalid_poll_handle() { let setup = Setup::wallet(); let res = did::get_endpoint_for_did(setup.wallet_handle, INVALID_POOL_HANDLE, DID); assert_code!(ErrorCode::PoolLedgerInvalidPoolHandle, res); } #[test] fn indy_get_endpoint_for_did_works_invalid_wallet_handle() { Setup::empty(); let res = did::get_endpoint_for_did(INVALID_WALLET_HANDLE, -1, DID); assert_code!(ErrorCode::WalletInvalidHandle, res); } } mod set_did_metadata { use super::*; #[test] fn indy_set_did_metadata_works() { let setup = Setup::did(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); } #[test] fn indy_set_did_metadata_works_for_fully_qualified_did() { let setup = Setup::did_fully_qualified(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); } #[test] fn indy_set_did_metadata_works_for_their_did() { let setup = Setup::wallet(); did::store_their_did_from_parts(setup.wallet_handle, DID, VERKEY).unwrap(); did::set_did_metadata(setup.wallet_handle, DID, METADATA).unwrap(); } #[test] fn indy_set_did_metadata_works_for_replace() { let setup = Setup::did(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); let metadata = did::get_did_metadata(setup.wallet_handle, &setup.did).unwrap(); assert_eq!(METADATA.to_string(), metadata); let new_metadata = "updated metadata"; did::set_did_metadata(setup.wallet_handle, &setup.did, new_metadata).unwrap(); let updated_metadata = did::get_did_metadata(setup.wallet_handle, &setup.did).unwrap(); assert_eq!(new_metadata, updated_metadata); } #[test] fn indy_set_did_metadata_works_for_empty_string() { let setup = Setup::did(); did::set_did_metadata(setup.wallet_handle, &setup.did, "").unwrap(); } #[test] fn indy_set_did_metadata_works_for_invalid_did() { let setup = Setup::wallet(); let res = did::set_did_metadata(setup.wallet_handle, INVALID_BASE58_DID, METADATA); assert_code!(ErrorCode::CommonInvalidStructure, res); } #[test] fn indy_set_did_metadata_works_for_unknown_did() { let setup = Setup::wallet(); did::set_did_metadata(setup.wallet_handle, &DID, METADATA).unwrap(); } #[test] fn indy_set_did_metadata_works_for_invalid_handle() { Setup::empty(); let res = did::set_did_metadata(INVALID_WALLET_HANDLE, DID_TRUSTEE, METADATA); assert_code!(ErrorCode::WalletInvalidHandle, res); } } mod get_did_metadata { use super::*; #[test] fn indy_get_did_metadata_works() { let setup = Setup::did(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); let metadata = did::get_did_metadata(setup.wallet_handle, &setup.did).unwrap(); assert_eq!(METADATA.to_string(), metadata); } #[test] fn indy_get_did_metadata_works_for_fully_qualified_did() { let setup = Setup::did_fully_qualified(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); let metadata = did::get_did_metadata(setup.wallet_handle, &setup.did).unwrap(); assert_eq!(METADATA.to_string(), metadata); } #[test] fn indy_get_did_metadata_works_for_their_did() { let setup = Setup::wallet(); did::store_their_did_from_parts(setup.wallet_handle, DID, VERKEY).unwrap(); did::set_did_metadata(setup.wallet_handle, DID, METADATA).unwrap(); let metadata = did::get_did_metadata(setup.wallet_handle, DID).unwrap(); assert_eq!(METADATA.to_string(), metadata); } #[test] fn indy_get_did_metadata_works_for_no_metadata() { let setup = Setup::did(); let res = did::get_did_metadata(setup.wallet_handle, &setup.did); assert_code!(ErrorCode::WalletItemNotFound, res); } #[test] fn indy_get_did_metadata_works_for_unknown_did() { let setup = Setup::wallet(); let res = did::get_did_metadata(setup.wallet_handle, DID); assert_code!(ErrorCode::WalletItemNotFound, res); } } mod get_my_did_metadata { use super::*; #[test] fn indy_get_my_did_metadata_works() { let setup = Setup::did(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); did::get_my_did_with_metadata(setup.wallet_handle, &setup.did).unwrap(); } #[test] fn indy_get_my_did_metadata_works_for_fullq_qualified_did() { let setup = Setup::did_fully_qualified(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); did::get_my_did_with_metadata(setup.wallet_handle, &setup.did).unwrap(); } #[test] fn indy_get_my_did_metadata_works_for_no_metadata() { let setup = Setup::did(); did::get_my_did_with_metadata(setup.wallet_handle, &setup.did).unwrap(); } #[test] fn indy_get_my_did_metadata_works_with_temp_verkey() { let setup = Setup::did(); did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); did::replace_keys_start(setup.wallet_handle, &setup.did, "{}").unwrap(); did::get_my_did_with_metadata(setup.wallet_handle, &setup.did).unwrap(); } #[test] fn indy_get_my_did_metadata_works_for_unknown_did() { let setup = Setup::wallet(); let res = did::get_my_did_with_metadata(setup.wallet_handle, DID); assert_code!(ErrorCode::WalletItemNotFound, res); } } mod create_my_did { use super::*; use rust_base58::FromBase58; #[test] fn indy_create_my_did_works_for_empty_json() { let setup = Setup::wallet(); let (my_did, my_verkey) = did::create_my_did(setup.wallet_handle, "{}").unwrap(); assert_eq!(my_did.from_base58().unwrap().len(), 16); assert_eq!(my_verkey.from_base58().unwrap().len(), 32); } #[test] fn indy_create_my_did_works_for_fully_qualified() { let setup = Setup::wallet(); let my_did_json = json!({"method_name": DEFAULT_METHOD_NAME}).to_string(); let (my_did, my_verkey) = did::create_my_did(setup.wallet_handle, &my_did_json).unwrap(); assert!(my_did.starts_with(DEFAULT_PREFIX)); assert_eq!(my_did.replace(DEFAULT_PREFIX, "").from_base58().unwrap().len(), 16); assert_eq!(my_verkey.from_base58().unwrap().len(), 32); } #[test] fn indy_create_my_did_works_for_several_dids_but_different_methods() { let setup = Setup::wallet(); let (my_did_1, my_verkey_1) = did::create_and_store_my_did(setup.wallet_handle, Some(MY1_SEED)).unwrap(); let (my_did_2, my_verkey_2) = did::create_and_store_my_did_v1(setup.wallet_handle, Some(MY1_SEED)).unwrap(); let my_did_json = json!({"method_name": "indy", "seed": MY1_SEED}).to_string(); let (my_did_3, my_verkey_3) = did::create_my_did(setup.wallet_handle, &my_did_json).unwrap(); assert_eq!(my_did_1.from_base58().unwrap().len(), 16); assert!(my_did_2.starts_with(DEFAULT_PREFIX)); assert!(my_did_3.starts_with("did:indy:")); assert_eq!(my_verkey_1, my_verkey_2); assert_eq!(my_verkey_2, my_verkey_3); assert_eq!(my_verkey_1, did::key_for_local_did(setup.wallet_handle, &my_did_1).unwrap()); assert_eq!(my_verkey_2, did::key_for_local_did(setup.wallet_handle, &my_did_2).unwrap()); assert_eq!(my_verkey_3, did::key_for_local_did(setup.wallet_handle, &my_did_3).unwrap()); } #[test] fn indy_create_my_did_works_with_seed() { let setup = Setup::wallet(); let (my_did, my_verkey) = did::create_and_store_my_did(setup.wallet_handle, Some(MY1_SEED)).unwrap(); assert_eq!(my_did, DID_MY1); assert_eq!(my_verkey, VERKEY_MY1); } #[test] fn indy_create_my_did_works_with_hex_seed() { let setup = Setup::wallet(); let (my_did, my_verkey) = did::create_and_store_my_did(setup.wallet_handle, Some("94a823a6387cdd30d8f7687d95710ebab84c6e277b724790a5b221440beb7df6")).unwrap(); assert_eq!(my_did, "HWvjYf77k1dqQAk6sE4gaS"); assert_eq!(my_verkey, "A16wi1xHBu5KT4SqNhZXrKZfoQbXJCbDozgSTJhUgu9x"); } #[test] fn indy_create_my_did_works_for_duplicate() { let setup = Setup::wallet(); let (did, verkey) = did::create_and_store_my_did(setup.wallet_handle, Some(MY1_SEED)).unwrap(); let (dup_did, dup_verkey) = did::create_and_store_my_did(setup.wallet_handle, Some(MY1_SEED)).unwrap(); assert_eq!(did, dup_did); assert_eq!(verkey, dup_verkey); let res = did::create_my_did(setup.wallet_handle, &json!({"did": did}).to_string()); assert_code!(ErrorCode::DidAlreadyExistsError, res); } } mod replace_keys_start { use super::*; #[test] fn indy_replace_keys_start_works() { let setup = Setup::did(); let new_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, "{}").unwrap(); assert_ne!(new_verkey, setup.verkey); } #[test] fn indy_replace_keys_start_works_for_fully_qualified() { let setup = Setup::did_fully_qualified(); let new_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, "{}").unwrap(); assert_ne!(new_verkey, setup.verkey); } #[test] fn indy_replace_keys_start_works_for_seed() { let setup = Setup::did(); let new_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, r#"{"seed":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}"#).unwrap(); assert_eq!(new_verkey, VERKEY); assert_ne!(setup.verkey, new_verkey); } } mod replace_keys_apply { use super::*; #[test] fn indy_replace_keys_apply_works() {<|fim▁hole|> assert_ne!(new_verkey, setup.verkey); did::replace_keys_apply(setup.wallet_handle, &setup.did).unwrap(); } #[test] fn indy_replace_keys_apply_works_for_fully_qualified() { let setup = Setup::did_fully_qualified(); let new_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, "{}").unwrap(); assert_ne!(new_verkey, setup.verkey); did::replace_keys_apply(setup.wallet_handle, &setup.did).unwrap(); } #[test] fn indy_replace_keys_apply_works_without_calling_replace_start() { let setup = Setup::did(); let res = did::replace_keys_apply(setup.wallet_handle, &setup.did); assert_code!(ErrorCode::WalletItemNotFound, res); } #[test] fn indy_replace_keys_apply_works_for_unknown_did() { let setup = Setup::wallet(); let res = did::replace_keys_apply(setup.wallet_handle, DID); assert_code!(ErrorCode::WalletItemNotFound, res); } #[test] fn indy_replace_keys_works_for_two_dids_have_same_verkey() { let setup = Setup::wallet(); let (my_did_1, my_verkey_1) = did::create_and_store_my_did(setup.wallet_handle, Some(MY1_SEED)).unwrap(); let (my_did_2, my_verkey_2) = did::create_and_store_my_did_v1(setup.wallet_handle, Some(MY1_SEED)).unwrap(); let _ = did::replace_keys_start(setup.wallet_handle, &my_did_1, "{}").unwrap(); did::replace_keys_apply(setup.wallet_handle, &my_did_1).unwrap(); assert_ne!(my_verkey_1, did::key_for_local_did(setup.wallet_handle, &my_did_1).unwrap()); assert_eq!(my_verkey_2, did::key_for_local_did(setup.wallet_handle, &my_did_2).unwrap()); } } mod store_their_did { use super::*; #[test] fn indy_store_their_did_works_for_did_only() { let setup = Setup::wallet(); let identity_json = json!({"did": DID}).to_string(); did::store_their_did(setup.wallet_handle, &identity_json).unwrap(); } #[test] fn indy_store_their_did_works_for_fully_qualified_did_only() { let setup = Setup::wallet(); let identity_json = json!({"did": DID_V1}).to_string(); did::store_their_did(setup.wallet_handle, &identity_json).unwrap(); } #[test] fn indy_store_their_did_works_for_verkey() { let setup = Setup::wallet(); let identity_json = json!({"did": DID, "verkey": VERKEY}).to_string(); did::store_their_did(setup.wallet_handle, &identity_json).unwrap(); } #[test] fn indy_store_their_did_works_twice() { let setup = Setup::wallet(); let identity_json = json!({"did": DID, "verkey": VERKEY}).to_string(); did::store_their_did(setup.wallet_handle, &identity_json).unwrap(); let identity_json = json!({"did": DID, "verkey": VERKEY_TRUSTEE}).to_string(); did::store_their_did(setup.wallet_handle, &identity_json).unwrap(); let verkey = did::key_for_local_did(setup.wallet_handle, DID).unwrap(); assert_eq!(VERKEY_TRUSTEE, verkey); } } mod replace_keys { use super::*; #[test] fn indy_replace_keys_demo() { // 1. Create and open pool // 2. Create and open wallet // 3. Generate did from Trustee seed // 4. Generate my did // 5. Send Nym request to Ledger let setup = Setup::new_identity(); // 6. Start replacing of keys let new_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, "{}").unwrap(); // 7. Send Nym request to Ledger with new verkey let nym_request = ledger::build_nym_request(&setup.did, &setup.did, Some(&new_verkey), None, None).unwrap(); ledger::sign_and_submit_request(setup.pool_handle, setup.wallet_handle, &setup.did, &nym_request).unwrap(); // 8. Send Schema request before apply replacing of keys let schema_request = ledger::build_schema_request(&setup.did, SCHEMA_DATA).unwrap(); let response = ledger::sign_and_submit_request(setup.pool_handle, setup.wallet_handle, &setup.did, &schema_request).unwrap(); pool::check_response_type(&response, ResponseType::REQNACK); // 9. Apply replacing of keys did::replace_keys_apply(setup.wallet_handle, &setup.did).unwrap(); // 10. Send Schema request ledger::sign_and_submit_request(setup.pool_handle, setup.wallet_handle, &setup.did, &schema_request).unwrap(); } } mod abbreviate_verkey { use super::*; #[test] fn indy_abbreviate_verkey_works_for_abbr_key() { let setup = Setup::did(); let abbr_verkey = did::abbreviate_verkey(&setup.did, &setup.verkey).unwrap(); assert_ne!(setup.verkey, abbr_verkey); } #[test] fn indy_abbreviate_verkey_works_for_abbr_key_for_fully_qualified_did() { let setup = Setup::did_fully_qualified(); let abbr_verkey = did::abbreviate_verkey(&setup.did, &setup.verkey).unwrap(); assert_ne!(setup.verkey, abbr_verkey); } #[test] fn indy_abbreviate_verkey_works_for_not_abbr_key() { let setup = Setup::wallet(); let (did, verkey) = did::create_my_did(setup.wallet_handle, &format!(r#"{{"did":{:?}}}"#, DID_TRUSTEE)).unwrap(); let full_verkey = did::abbreviate_verkey(&did, &verkey).unwrap(); assert_eq!(verkey, full_verkey); } } mod qualify_did { use super::*; const CUSTOM_METHOD: &str = "peer"; #[test] fn qualify_did_for_appending_prefix() { let setup = Setup::new_identity(); let full_qualified_did = did::qualify_did(setup.wallet_handle, &setup.did, DEFAULT_METHOD_NAME).unwrap(); assert_eq!(full_qualified_did, format!("{}{}", DEFAULT_PREFIX, setup.did)); } #[test] fn qualify_did_for_updating_prefix() { let setup = Setup::did(); let full_qualified_did = did::qualify_did(setup.wallet_handle, &setup.did, DEFAULT_METHOD_NAME).unwrap(); let new_full_qualified_did = did::qualify_did(setup.wallet_handle, &full_qualified_did, CUSTOM_METHOD).unwrap(); assert_eq!(new_full_qualified_did, format!("did:{}:{}", CUSTOM_METHOD, setup.did)); } #[test] fn qualify_did_for_keeping_related_entities() { let setup = Setup::new_identity(); // set Metadata did::set_did_metadata(setup.wallet_handle, &setup.did, METADATA).unwrap(); // set Endpoint did::set_endpoint_for_did(setup.wallet_handle, &setup.did, ENDPOINT, VERKEY).unwrap(); // set Temporary Verkey let temp_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, "{}").unwrap(); // set Pairwise did::store_their_did(setup.wallet_handle, &json!({"did": DID}).to_string()).unwrap(); utils::pairwise::create_pairwise(setup.wallet_handle, DID, &setup.did, None).unwrap(); let identity_json = json!({"did": DID_TRUSTEE, "verkey": VERKEY_TRUSTEE}).to_string(); did::store_their_did(setup.wallet_handle, &identity_json).unwrap(); utils::pairwise::create_pairwise(setup.wallet_handle, DID_TRUSTEE, &setup.did, None).unwrap(); let full_qualified_did = did::qualify_did(setup.wallet_handle, &setup.did, DEFAULT_METHOD_NAME).unwrap(); assert_eq!(full_qualified_did, format!("{}{}", DEFAULT_PREFIX, setup.did)); { // check key for did let res = did::key_for_local_did(setup.wallet_handle, &setup.did); assert_code!(ErrorCode::WalletItemNotFound, res); let verkey = did::key_for_local_did(setup.wallet_handle, &full_qualified_did).unwrap(); assert_eq!(setup.verkey, verkey); } { // check did metadata let res = did::get_did_metadata(setup.wallet_handle, &setup.did); assert_code!(ErrorCode::WalletItemNotFound, res); let meta = did::get_did_metadata(setup.wallet_handle, &full_qualified_did).unwrap(); assert_eq!(METADATA.to_string(), meta); } { // check endpoint let res = did::get_endpoint_for_did(setup.wallet_handle, setup.pool_handle, &setup.did); assert_code!(ErrorCode::CommonInvalidState, res); // TODO: IS is correct code WalletItemNotFound LedgerNotFound? let (endpoint, verkey) = did::get_endpoint_for_did(setup.wallet_handle, INVALID_POOL_HANDLE, &full_qualified_did).unwrap(); assert_eq!(ENDPOINT.to_string(), endpoint); assert_eq!(VERKEY.to_string(), verkey.unwrap()); } { // check temporary key let res = did::get_my_did_with_metadata(setup.wallet_handle, &setup.did); assert_code!(ErrorCode::WalletItemNotFound, res); let meta = did::get_my_did_with_metadata(setup.wallet_handle, &full_qualified_did).unwrap(); let meta: serde_json::Value = serde_json::from_str(&meta).unwrap(); assert_eq!(temp_verkey, meta["tempVerkey"].as_str().unwrap().to_string()); } { // check pairwise 1 let pairwise = utils::pairwise::get_pairwise(setup.wallet_handle, DID).unwrap(); let pairwise: serde_json::Value = serde_json::from_str(&pairwise).unwrap(); assert_eq!(full_qualified_did, pairwise["my_did"].as_str().unwrap().to_string()); // check pairwise 2 let pairwise = utils::pairwise::get_pairwise(setup.wallet_handle, DID_TRUSTEE).unwrap(); let pairwise: serde_json::Value = serde_json::from_str(&pairwise).unwrap(); assert_eq!(full_qualified_did, pairwise["my_did"].as_str().unwrap().to_string()); } } } } #[cfg(not(feature = "only_high_cases"))] mod medium_cases { use super::*; mod key_for_did { use super::*; #[test] fn indy_key_for_did_works_for_invalid_pool_handle() { let setup = Setup::wallet(); let res = did::key_for_did(INVALID_POOL_HANDLE, setup.wallet_handle, DID_TRUSTEE); assert_code!(ErrorCode::PoolLedgerInvalidPoolHandle, res); } #[test] fn indy_key_for_did_works_for_invalid_wallet_handle() { Setup::empty(); let res = did::key_for_did(-1, INVALID_WALLET_HANDLE, DID); assert_code!(ErrorCode::WalletInvalidHandle, res); } } mod key_for_local_did { use super::*; #[test] fn indy_key_for_local_did_works_for_invalid_wallet_handle() { Setup::empty(); let res = did::key_for_local_did(INVALID_WALLET_HANDLE, DID_TRUSTEE); assert_code!(ErrorCode::WalletInvalidHandle, res); } } mod set_endpoint_for_did { use super::*; #[test] fn indy_set_endpoint_for_did_works_for_replace() { let setup = Setup::wallet_and_pool(); did::set_endpoint_for_did(setup.wallet_handle, DID, ENDPOINT, VERKEY).unwrap(); let (endpoint, key) = did::get_endpoint_for_did(setup.wallet_handle, setup.pool_handle, DID).unwrap(); assert_eq!(ENDPOINT, endpoint); assert_eq!(VERKEY, key.unwrap()); let new_endpoint = "10.10.10.1:9710"; did::set_endpoint_for_did(setup.wallet_handle, DID, new_endpoint, VERKEY_MY2).unwrap(); let (updated_endpoint, updated_key) = did::get_endpoint_for_did(setup.wallet_handle, setup.pool_handle, DID).unwrap(); assert_eq!(new_endpoint, updated_endpoint); assert_eq!(VERKEY_MY2, updated_key.unwrap()); } } mod get_did_metadata { use super::*; #[test] fn indy_get_did_metadata_works_for_empty_string() { let setup = Setup::did(); did::set_did_metadata(setup.wallet_handle, &setup.did, "").unwrap(); let metadata = did::get_did_metadata(setup.wallet_handle, &setup.did).unwrap(); assert_eq!("", metadata); } #[test] fn indy_get_did_metadata_works_for_invalid_handle() { Setup::empty(); let res = did::get_did_metadata(INVALID_WALLET_HANDLE, DID); assert_code!(ErrorCode::WalletInvalidHandle, res); } } mod create_my_did { use super::*; #[test] fn indy_create_my_did_works_as_cid() { let setup = Setup::wallet(); let (my_did, my_verkey) = did::create_my_did(setup.wallet_handle, r#"{"seed":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","cid":true}"#).unwrap(); assert_eq!(my_did, VERKEY); assert_eq!(my_verkey, VERKEY); } #[test] fn indy_create_my_did_works_with_passed_did() { let setup = Setup::wallet(); let (my_did, my_verkey) = did::create_my_did(setup.wallet_handle, &format!(r#"{{"did":"{}","seed":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}}"#, DID)).unwrap(); assert_eq!(my_did, DID); assert_eq!(my_verkey, VERKEY); } #[test] fn indy_create_my_did_works_for_exists_crypto_type() { let setup = Setup::wallet(); did::create_my_did(setup.wallet_handle, r#"{"crypto_type":"ed25519"}"#).unwrap(); } #[test] fn indy_create_my_did_works_for_invalid_wallet_handle() { Setup::empty(); let res = did::create_my_did(INVALID_WALLET_HANDLE, "{}"); assert_code!(ErrorCode::WalletInvalidHandle, res); } } mod replace_keys_start { use super::*; #[test] fn indy_replace_keys_start_works_for_invalid_wallet_handle() { Setup::empty(); let res = did::replace_keys_start(INVALID_WALLET_HANDLE, DID, "{}"); assert_code!(ErrorCode::WalletInvalidHandle, res); } #[test] fn indy_replace_keys_start_works_for_seed() { let setup = Setup::did(); let new_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, r#"{"seed":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}"#).unwrap(); assert_eq!(new_verkey, VERKEY); assert_ne!(setup.verkey, new_verkey); } } mod replace_keys_apply { use super::*; #[test] fn indy_replace_keys_apply_works_for_invalid_wallet_handle() { Setup::empty(); let res = did::replace_keys_apply(INVALID_WALLET_HANDLE, DID); assert_code!(ErrorCode::WalletInvalidHandle, res); } } mod store_their_did { use super::*; #[test] fn indy_store_their_did_works_for_verkey_with_crypto_type() { let setup = Setup::wallet(); let identity_json = json!({"did": DID, "verkey": VERKEY.to_owned() + ":ed25519"}).to_string(); did::store_their_did(setup.wallet_handle, &identity_json).unwrap(); } #[test] fn indy_create_my_did_works_for_invalid_seed() { let setup = Setup::wallet(); let res = did::create_my_did(setup.wallet_handle, r#"{"seed":"seed"}"#); assert_code!(ErrorCode::CommonInvalidStructure, res); } #[test] fn indy_store_their_did_works_for_invalid_wallet_handle() { Setup::empty(); let identity_json = json!({"did": DID}).to_string(); let res = did::store_their_did(INVALID_WALLET_HANDLE, &identity_json); assert_code!(ErrorCode::WalletInvalidHandle, res); } #[test] fn indy_store_their_did_works_for_abbreviated_verkey() { let setup = Setup::wallet(); let identity_json = r#"{"did":"8wZcEriaNLNKtteJvx7f8i", "verkey":"~NcYxiDXkpYi6ov5FcYDi1e"}"#; did::store_their_did(setup.wallet_handle, identity_json).unwrap(); } #[test] fn indy_store_their_did_works_for_abbreviated_verkey_for_fully_qualified() { let setup = Setup::wallet(); let identity_json = r#"{"did":"did:sov:8wZcEriaNLNKtteJvx7f8i", "verkey":"~NcYxiDXkpYi6ov5FcYDi1e"}"#; did::store_their_did(setup.wallet_handle, identity_json).unwrap(); } #[test] fn indy_create_my_did_works_for_invalid_json() { let setup = Setup::wallet(); let res = did::create_my_did(setup.wallet_handle, r#"{"seed":123}"#); assert_code!(ErrorCode::CommonInvalidStructure, res); } #[test] fn indy_store_their_did_works_for_invalid_did() { let setup = Setup::wallet(); let identity_json = json!({"did": INVALID_BASE58_DID}).to_string(); let res = did::store_their_did(setup.wallet_handle, &identity_json); assert_code!(ErrorCode::CommonInvalidStructure, res); } #[test] fn indy_store_their_did_works_for_invalid_verkey() { let setup = Setup::wallet(); let identity_json = json!({"did": "did", "verkey":"invalid_base58string"}).to_string(); let res = did::store_their_did(setup.wallet_handle, &identity_json); assert_code!(ErrorCode::CommonInvalidStructure, res); } #[test] fn indy_store_their_did_works_for_verkey_with_invalid_crypto_type() { let setup = Setup::wallet(); let identity_json = json!({"did": DID, "verkey": VERKEY.to_owned() + ":crypto_type"}).to_string(); let res = did::store_their_did(setup.wallet_handle, &identity_json); assert_code!(ErrorCode::UnknownCryptoTypeError, res); } #[test] fn indy_store_my_did_works_for_is_802() { let setup = Setup::wallet(); let identity_json = json!({"did": DID}).to_string(); // 1. Try 'createAndStoreMyDid' operation with say did1 and verkey1 did::create_my_did(setup.wallet_handle, &identity_json).unwrap(); // 2. Repeat above operation (with same did and ver key used in #1) // but this time catch and swallow the exception (it will throw the exception WalletItemAlreadyExistsException) let res = did::create_my_did(setup.wallet_handle, &identity_json); assert_code!(ErrorCode::DidAlreadyExistsError, res); // 3. Then, now if you try 'createAndStoreMyDid' operation // (either with same did and verkey or you can choose different did and verkey), // in IS-802 it fails with error 'Storage error occurred during wallet operation.' let res = did::create_my_did (setup.wallet_handle, &identity_json); assert_code!(ErrorCode::DidAlreadyExistsError, res); } } mod replace_keys { use super::*; #[test] fn indy_replace_keys_without_nym_transaction() { let setup = Setup::wallet_and_pool(); let (my_did, _) = did::create_store_and_publish_my_did_from_trustee(setup.wallet_handle, setup.pool_handle).unwrap(); did::replace_keys_start(setup.wallet_handle, &my_did, "{}").unwrap(); did::replace_keys_apply(setup.wallet_handle, &my_did).unwrap(); let schema_request = ledger::build_schema_request(&my_did, SCHEMA_DATA).unwrap(); let response = ledger::sign_and_submit_request(setup.pool_handle, setup.wallet_handle, &my_did, &schema_request).unwrap(); pool::check_response_type(&response, ResponseType::REQNACK); } } mod abbreviate_verkey { use super::*; #[test] fn indy_abbreviate_verkey_works_for_invalid_did() { let res = did::abbreviate_verkey(INVALID_BASE58_DID, VERKEY_TRUSTEE); assert_code!(ErrorCode::CommonInvalidStructure, res); } #[test] fn indy_abbreviate_verkey_works_for_invalid_verkey() { let res = did::abbreviate_verkey(DID_TRUSTEE, INVALID_BASE58_VERKEY); assert_code!(ErrorCode::CommonInvalidStructure, res); } } }<|fim▁end|>
let setup = Setup::did(); let new_verkey = did::replace_keys_start(setup.wallet_handle, &setup.did, "{}").unwrap();
<|file_name|>self_hosted_integration_runtime_node.py<|end_file_name|><|fim▁begin|># coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class SelfHostedIntegrationRuntimeNode(Model): """Properties of Self-hosted integration runtime node. Variables are only populated by the server, and will be ignored when sending a request. :ivar node_name: Name of the integration runtime node. :vartype node_name: str :ivar machine_name: Machine name of the integration runtime node. :vartype machine_name: str :ivar host_service_uri: URI for the host machine of the integration runtime. :vartype host_service_uri: str :ivar status: Status of the integration runtime node. Possible values include: 'NeedRegistration', 'Online', 'Limited', 'Offline', 'Upgrading', 'Initializing', 'InitializeFailed' :vartype status: str or ~azure.mgmt.datafactory.models.SelfHostedIntegrationRuntimeNodeStatus :ivar capabilities: The integration runtime capabilities dictionary :vartype capabilities: dict[str, str] :ivar version_status: Status of the integration runtime node version. :vartype version_status: str :ivar version: Version of the integration runtime node. :vartype version: str :ivar register_time: The time at which the integration runtime node was registered in ISO8601 format. :vartype register_time: datetime :ivar last_connect_time: The most recent time at which the integration runtime was connected in ISO8601 format. :vartype last_connect_time: datetime :ivar expiry_time: The time at which the integration runtime will expire in ISO8601 format. :vartype expiry_time: datetime :ivar last_start_time: The time the node last started up. :vartype last_start_time: datetime :ivar last_stop_time: The integration runtime node last stop time. :vartype last_stop_time: datetime :ivar last_update_result: The result of the last integration runtime node update. Possible values include: 'Succeed', 'Fail' :vartype last_update_result: str or ~azure.mgmt.datafactory.models.IntegrationRuntimeUpdateResult :ivar last_start_update_time: The last time for the integration runtime node update start. :vartype last_start_update_time: datetime :ivar last_end_update_time: The last time for the integration runtime node update end. :vartype last_end_update_time: datetime :ivar is_active_dispatcher: Indicates whether this node is the active dispatcher for integration runtime requests. :vartype is_active_dispatcher: bool :ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration runtime node. :vartype concurrent_jobs_limit: int :ivar max_concurrent_jobs: The maximum concurrent jobs in this integration runtime. :vartype max_concurrent_jobs: int """ _validation = { 'node_name': {'readonly': True}, 'machine_name': {'readonly': True}, 'host_service_uri': {'readonly': True}, 'status': {'readonly': True}, 'capabilities': {'readonly': True}, 'version_status': {'readonly': True},<|fim▁hole|> 'last_connect_time': {'readonly': True}, 'expiry_time': {'readonly': True}, 'last_start_time': {'readonly': True}, 'last_stop_time': {'readonly': True}, 'last_update_result': {'readonly': True}, 'last_start_update_time': {'readonly': True}, 'last_end_update_time': {'readonly': True}, 'is_active_dispatcher': {'readonly': True}, 'concurrent_jobs_limit': {'readonly': True}, 'max_concurrent_jobs': {'readonly': True}, } _attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'}, 'machine_name': {'key': 'machineName', 'type': 'str'}, 'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'capabilities': {'key': 'capabilities', 'type': '{str}'}, 'version_status': {'key': 'versionStatus', 'type': 'str'}, 'version': {'key': 'version', 'type': 'str'}, 'register_time': {'key': 'registerTime', 'type': 'iso-8601'}, 'last_connect_time': {'key': 'lastConnectTime', 'type': 'iso-8601'}, 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, 'last_start_time': {'key': 'lastStartTime', 'type': 'iso-8601'}, 'last_stop_time': {'key': 'lastStopTime', 'type': 'iso-8601'}, 'last_update_result': {'key': 'lastUpdateResult', 'type': 'str'}, 'last_start_update_time': {'key': 'lastStartUpdateTime', 'type': 'iso-8601'}, 'last_end_update_time': {'key': 'lastEndUpdateTime', 'type': 'iso-8601'}, 'is_active_dispatcher': {'key': 'isActiveDispatcher', 'type': 'bool'}, 'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'}, 'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'}, } def __init__(self): self.node_name = None self.machine_name = None self.host_service_uri = None self.status = None self.capabilities = None self.version_status = None self.version = None self.register_time = None self.last_connect_time = None self.expiry_time = None self.last_start_time = None self.last_stop_time = None self.last_update_result = None self.last_start_update_time = None self.last_end_update_time = None self.is_active_dispatcher = None self.concurrent_jobs_limit = None self.max_concurrent_jobs = None<|fim▁end|>
'version': {'readonly': True}, 'register_time': {'readonly': True},
<|file_name|>player.js<|end_file_name|><|fim▁begin|>/** * JS for the player character. * * * * */ import * as Consts from './consts'; var leftLeg; var rightLeg; var leftArm; var rightArm; const BODY_HEIGHT = 5; const LEG_HEIGHT = 5; const HEAD_HEIGHT = Consts.BLOCK_WIDTH * (3/5); const SKIN_COLORS = [0xFADCAB, 0x9E7245, 0x4F3F2F]; const BASE_MAT = new THREE.MeshLambertMaterial({color: 0xFF0000}); export var Player = function() { THREE.Object3D.call(this); this.position.y += BODY_HEIGHT / 2 + LEG_HEIGHT / 2 + HEAD_HEIGHT / 2 + HEAD_HEIGHT; this.moveLeft = false; this.moveRight = false; this.moveUp = false; this.moveDown = false; this.orientation = "backward"; var scope = this; var legGeo = new THREE.BoxGeometry(Consts.BLOCK_WIDTH / 2, LEG_HEIGHT, Consts.BLOCK_WIDTH / 2); var armGeo = new THREE.BoxGeometry(Consts.BLOCK_WIDTH / 2, BODY_HEIGHT, Consts.BLOCK_WIDTH / 2); // Base mat(s) var redMaterial = new THREE.MeshLambertMaterial({color: 0xFF2E00}); var blueMaterial = new THREE.MeshLambertMaterial({color: 0x23A8FC}); var yellowMaterial = new THREE.MeshLambertMaterial({color: 0xFFD000}); // Skin color mat, only used for head var skinColor = SKIN_COLORS[Math.floor(Math.random() * SKIN_COLORS.length)] var skinMat = new THREE.MeshLambertMaterial({color: skinColor}); // Body material var bodyFrontMat = new THREE.MeshPhongMaterial({color: 0xFFFFFF}); var bodyFrontTexture = new THREE.TextureLoader().load("img/tetratowerbodyfront.png", function(texture) { bodyFrontMat.map = texture; bodyFrontMat.needsUpdate = true; }) var bodyMat = new THREE.MultiMaterial([ redMaterial, redMaterial, redMaterial, redMaterial, bodyFrontMat, bodyFrontMat ]); var armSideMat = new THREE.MeshLambertMaterial({color: 0xFFFFFF}) var armTopMat = new THREE.MeshLambertMaterial({color: 0xFFFFFF}); var armMat = new THREE.MultiMaterial([ armSideMat, armSideMat, armTopMat, armTopMat, armSideMat, armSideMat ]); // Leg material var legSideMat = new THREE.MeshLambertMaterial({color: 0xFFFFFF}) var legMat = new THREE.MultiMaterial([ legSideMat, legSideMat, blueMaterial, blueMaterial, legSideMat, legSideMat ]); var legTexture = new THREE.TextureLoader().load("/img/tetratowerleg.png", function (texture) { legSideMat.map = texture; legSideMat.needsUpdate = true; }); var textureURL; switch (skinColor) { case SKIN_COLORS[0]: textureURL = "/img/tetratowerarm_white.png"; break; case SKIN_COLORS[1]: textureURL = "/img/tetratowerarm_brown.png"; break; case SKIN_COLORS[2]: textureURL = "/img/tetratowerarm_black.png"; break; default:<|fim▁hole|> break; } var armTexture = new THREE.TextureLoader().load(textureURL, function(texture) { armSideMat.map = texture; armSideMat.needsUpdate = true; }); var armTopTexture = new THREE.TextureLoader().load("img/tetratowerarmtop.png", function(texture) { armTopMat.map = texture; armTopMat.needsUpdate = true; }) // Create a body var bodyGeo = new THREE.BoxGeometry(Consts.BLOCK_WIDTH, BODY_HEIGHT, Consts.BLOCK_WIDTH / 2); var body = new THREE.Mesh(bodyGeo, bodyMat); this.add(body); // Create some leggy legs leftLeg = new THREE.Mesh(legGeo, legMat); this.add(leftLeg) leftLeg.translateX(-Consts.BLOCK_WIDTH / 4); leftLeg.translateY(-(LEG_HEIGHT + BODY_HEIGHT) / 2); rightLeg = new THREE.Mesh(legGeo, legMat); this.add(rightLeg); rightLeg.translateX(Consts.BLOCK_WIDTH / 4); rightLeg.translateY(-(LEG_HEIGHT + BODY_HEIGHT) / 2); // Create the arms leftArm = new THREE.Mesh(armGeo, armMat); this.add(leftArm); leftArm.translateX(-(Consts.BLOCK_WIDTH / 4 + Consts.BLOCK_WIDTH / 2)); rightArm = new THREE.Mesh(armGeo, armMat); this.add(rightArm); rightArm.translateX((Consts.BLOCK_WIDTH / 4 + Consts.BLOCK_WIDTH / 2)); // Now add a head var headGeo = new THREE.BoxGeometry(Consts.BLOCK_WIDTH * (3/5), Consts.BLOCK_WIDTH * (3/5), Consts.BLOCK_WIDTH * (3/5)); var head = new THREE.Mesh(headGeo, skinMat); this.add(head); head.translateY((BODY_HEIGHT + HEAD_HEIGHT) / 2); // And a fashionable hat var hatBodyGeo = new THREE.BoxGeometry(HEAD_HEIGHT * 1.05, HEAD_HEIGHT * (4/5), HEAD_HEIGHT * 1.05); var hatBody = new THREE.Mesh(hatBodyGeo, yellowMaterial); head.add(hatBody); hatBody.translateY(HEAD_HEIGHT * (4/5)); var hatBrimGeo = new THREE.BoxGeometry(HEAD_HEIGHT * 1.05, HEAD_HEIGHT / 5, HEAD_HEIGHT * 0.525); var hatBrim = new THREE.Mesh(hatBrimGeo, yellowMaterial); head.add(hatBrim); hatBrim.translateZ((HEAD_HEIGHT * 1.05) / 2 + (HEAD_HEIGHT * 0.525 / 2)); hatBrim.translateY(HEAD_HEIGHT / 2); // Add some listeners var onKeyDown = function(event) { switch(event.keyCode) { case 38: // up case 87: // w scope.moveForward = true; break; case 40: // down case 83: // s scope.moveBackward = true; break; case 37: // left case 65: // a scope.moveLeft = true; break; case 39: // right case 68: // d scope.moveRight = true; break; } } var onKeyUp = function(event) { switch(event.keyCode) { case 38: // up case 87: // w scope.moveForward = false; break; case 40: // down case 83: // s scope.moveBackward = false; break; case 37: // left case 65: // a scope.moveLeft = false; break; case 39: // right case 68: // d scope.moveRight = false; break; } } document.addEventListener('keydown', onKeyDown, false); document.addEventListener('keyup', onKeyUp, false); } Player.prototype = new THREE.Object3D(); Player.prototype.constructor = Player; THREE.Object3D.prototype.worldToLocal = function ( vector ) { if ( !this.__inverseMatrixWorld ) this.__inverseMatrixWorld = new THREE.Matrix4(); return vector.applyMatrix4( this.__inverseMatrixWorld.getInverse( this.matrixWorld )); }; THREE.Object3D.prototype.lookAtWorld = function( vector ) { vector = vector.clone(); this.parent.worldToLocal( vector ); this.lookAt( vector ); };<|fim▁end|>
textureURL = "/img/tetratowerarm.png";
<|file_name|>thirteen.rs<|end_file_name|><|fim▁begin|>extern crate num; use self::num::bigint::BigInt; use std::str::FromStr; pub fn run() { let nums: Vec<BigInt> = vec![ BigInt::from_str("37107287533902102798797998220837590246510135740250").unwrap(), BigInt::from_str("46376937677490009712648124896970078050417018260538").unwrap(), BigInt::from_str("74324986199524741059474233309513058123726617309629").unwrap(), BigInt::from_str("91942213363574161572522430563301811072406154908250").unwrap(), BigInt::from_str("23067588207539346171171980310421047513778063246676").unwrap(), BigInt::from_str("89261670696623633820136378418383684178734361726757").unwrap(), BigInt::from_str("28112879812849979408065481931592621691275889832738").unwrap(), BigInt::from_str("44274228917432520321923589422876796487670272189318").unwrap(), BigInt::from_str("47451445736001306439091167216856844588711603153276").unwrap(), BigInt::from_str("70386486105843025439939619828917593665686757934951").unwrap(), BigInt::from_str("62176457141856560629502157223196586755079324193331").unwrap(), BigInt::from_str("64906352462741904929101432445813822663347944758178").unwrap(), BigInt::from_str("92575867718337217661963751590579239728245598838407").unwrap(), BigInt::from_str("58203565325359399008402633568948830189458628227828").unwrap(), BigInt::from_str("80181199384826282014278194139940567587151170094390").unwrap(), BigInt::from_str("35398664372827112653829987240784473053190104293586").unwrap(), BigInt::from_str("86515506006295864861532075273371959191420517255829").unwrap(), BigInt::from_str("71693888707715466499115593487603532921714970056938").unwrap(), BigInt::from_str("54370070576826684624621495650076471787294438377604").unwrap(), BigInt::from_str("53282654108756828443191190634694037855217779295145").unwrap(), BigInt::from_str("36123272525000296071075082563815656710885258350721").unwrap(), BigInt::from_str("45876576172410976447339110607218265236877223636045").unwrap(), BigInt::from_str("17423706905851860660448207621209813287860733969412").unwrap(), BigInt::from_str("81142660418086830619328460811191061556940512689692").unwrap(), BigInt::from_str("51934325451728388641918047049293215058642563049483").unwrap(), BigInt::from_str("62467221648435076201727918039944693004732956340691").unwrap(), BigInt::from_str("15732444386908125794514089057706229429197107928209").unwrap(), BigInt::from_str("55037687525678773091862540744969844508330393682126").unwrap(), BigInt::from_str("18336384825330154686196124348767681297534375946515").unwrap(), BigInt::from_str("80386287592878490201521685554828717201219257766954").unwrap(), BigInt::from_str("78182833757993103614740356856449095527097864797581").unwrap(), BigInt::from_str("16726320100436897842553539920931837441497806860984").unwrap(), BigInt::from_str("48403098129077791799088218795327364475675590848030").unwrap(), BigInt::from_str("87086987551392711854517078544161852424320693150332").unwrap(), BigInt::from_str("59959406895756536782107074926966537676326235447210").unwrap(), BigInt::from_str("69793950679652694742597709739166693763042633987085").unwrap(), BigInt::from_str("41052684708299085211399427365734116182760315001271").unwrap(), BigInt::from_str("65378607361501080857009149939512557028198746004375").unwrap(), BigInt::from_str("35829035317434717326932123578154982629742552737307").unwrap(), BigInt::from_str("94953759765105305946966067683156574377167401875275").unwrap(), BigInt::from_str("88902802571733229619176668713819931811048770190271").unwrap(), BigInt::from_str("25267680276078003013678680992525463401061632866526").unwrap(), BigInt::from_str("36270218540497705585629946580636237993140746255962").unwrap(), BigInt::from_str("24074486908231174977792365466257246923322810917141").unwrap(), BigInt::from_str("91430288197103288597806669760892938638285025333403").unwrap(), BigInt::from_str("34413065578016127815921815005561868836468420090470").unwrap(), BigInt::from_str("23053081172816430487623791969842487255036638784583").unwrap(), BigInt::from_str("11487696932154902810424020138335124462181441773470").unwrap(), BigInt::from_str("63783299490636259666498587618221225225512486764533").unwrap(), BigInt::from_str("67720186971698544312419572409913959008952310058822").unwrap(), BigInt::from_str("95548255300263520781532296796249481641953868218774").unwrap(), BigInt::from_str("76085327132285723110424803456124867697064507995236").unwrap(), BigInt::from_str("37774242535411291684276865538926205024910326572967").unwrap(), BigInt::from_str("23701913275725675285653248258265463092207058596522").unwrap(), BigInt::from_str("29798860272258331913126375147341994889534765745501").unwrap(), BigInt::from_str("18495701454879288984856827726077713721403798879715").unwrap(),<|fim▁hole|> BigInt::from_str("29746152185502371307642255121183693803580388584903").unwrap(), BigInt::from_str("41698116222072977186158236678424689157993532961922").unwrap(), BigInt::from_str("62467957194401269043877107275048102390895523597457").unwrap(), BigInt::from_str("23189706772547915061505504953922979530901129967519").unwrap(), BigInt::from_str("86188088225875314529584099251203829009407770775672").unwrap(), BigInt::from_str("11306739708304724483816533873502340845647058077308").unwrap(), BigInt::from_str("82959174767140363198008187129011875491310547126581").unwrap(), BigInt::from_str("97623331044818386269515456334926366572897563400500").unwrap(), BigInt::from_str("42846280183517070527831839425882145521227251250327").unwrap(), BigInt::from_str("55121603546981200581762165212827652751691296897789").unwrap(), BigInt::from_str("32238195734329339946437501907836945765883352399886").unwrap(), BigInt::from_str("75506164965184775180738168837861091527357929701337").unwrap(), BigInt::from_str("62177842752192623401942399639168044983993173312731").unwrap(), BigInt::from_str("32924185707147349566916674687634660915035914677504").unwrap(), BigInt::from_str("99518671430235219628894890102423325116913619626622").unwrap(), BigInt::from_str("73267460800591547471830798392868535206946944540724").unwrap(), BigInt::from_str("76841822524674417161514036427982273348055556214818").unwrap(), BigInt::from_str("97142617910342598647204516893989422179826088076852").unwrap(), BigInt::from_str("87783646182799346313767754307809363333018982642090").unwrap(), BigInt::from_str("10848802521674670883215120185883543223812876952786").unwrap(), BigInt::from_str("71329612474782464538636993009049310363619763878039").unwrap(), BigInt::from_str("62184073572399794223406235393808339651327408011116").unwrap(), BigInt::from_str("66627891981488087797941876876144230030984490851411").unwrap(), BigInt::from_str("60661826293682836764744779239180335110989069790714").unwrap(), BigInt::from_str("85786944089552990653640447425576083659976645795096").unwrap(), BigInt::from_str("66024396409905389607120198219976047599490197230297").unwrap(), BigInt::from_str("64913982680032973156037120041377903785566085089252").unwrap(), BigInt::from_str("16730939319872750275468906903707539413042652315011").unwrap(), BigInt::from_str("94809377245048795150954100921645863754710598436791").unwrap(), BigInt::from_str("78639167021187492431995700641917969777599028300699").unwrap(), BigInt::from_str("15368713711936614952811305876380278410754449733078").unwrap(), BigInt::from_str("40789923115535562561142322423255033685442488917353").unwrap(), BigInt::from_str("44889911501440648020369068063960672322193204149535").unwrap(), BigInt::from_str("41503128880339536053299340368006977710650566631954").unwrap(), BigInt::from_str("81234880673210146739058568557934581403627822703280").unwrap(), BigInt::from_str("82616570773948327592232845941706525094512325230608").unwrap(), BigInt::from_str("22918802058777319719839450180888072429661980811197").unwrap(), BigInt::from_str("77158542502016545090413245809786882778948721859617").unwrap(), BigInt::from_str("72107838435069186155435662884062257473692284509516").unwrap(), BigInt::from_str("20849603980134001723930671666823555245252804609722").unwrap(), BigInt::from_str("53503534226472524250874054075591789781264330331690").unwrap(), ]; let zero = BigInt::from_str("0").unwrap(); println!("Sum: {}", nums.iter().fold(zero, |memo, n| memo + n)); }<|fim▁end|>
BigInt::from_str("38298203783031473527721580348144513491373226651381").unwrap(), BigInt::from_str("34829543829199918180278916522431027392251122869539").unwrap(), BigInt::from_str("40957953066405232632538044100059654939159879593635").unwrap(),
<|file_name|>leveldb.go<|end_file_name|><|fim▁begin|>// Copyright 2012 The LevelDB-Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package leveldb provides an ordered key/value store. // // BUG: This package is incomplete. package leveldb import ( "bytes" "fmt" "io" "path/filepath" "sort" "code.google.com/p/leveldb-go/leveldb/db" "code.google.com/p/leveldb-go/leveldb/memdb" "code.google.com/p/leveldb-go/leveldb/record" "code.google.com/p/leveldb-go/leveldb/table" ) // TODO: document DB. type DB struct { dirname string opts *db.Options icmp internalKeyComparer fileLock io.Closer logFile db.File log *record.Writer versions versionSet } var _ db.DB = (*DB)(nil) func (d *DB) Get(key []byte, opts *db.ReadOptions) ([]byte, error) { panic("unimplemented") } func (d *DB) Set(key, value []byte, opts *db.WriteOptions) error { panic("unimplemented") } func (d *DB) Delete(key []byte, opts *db.WriteOptions) error { panic("unimplemented") } func (d *DB) Apply(batch Batch, opts *db.WriteOptions) error { panic("unimplemented") } func (d *DB) Find(key []byte, opts *db.ReadOptions) db.Iterator { panic("unimplemented") } func (d *DB) Close() error { if d.fileLock == nil { return nil } err := d.fileLock.Close() d.fileLock = nil return err } type fileNumAndName struct { num uint64 name string } type fileNumAndNameSlice []fileNumAndName func (p fileNumAndNameSlice) Len() int { return len(p) } func (p fileNumAndNameSlice) Less(i, j int) bool { return p[i].num < p[j].num } func (p fileNumAndNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }<|fim▁hole|>// Open opens a LevelDB whose files live in the given directory. func Open(dirname string, opts *db.Options) (*DB, error) { d := &DB{ dirname: dirname, opts: opts, icmp: internalKeyComparer{opts.GetComparer()}, } fs := opts.GetFileSystem() // Lock the database directory. err := fs.MkdirAll(dirname, 0755) if err != nil { return nil, err } fileLock, err := fs.Lock(dbFilename(dirname, fileTypeLock, 0)) if err != nil { return nil, err } defer func() { if fileLock != nil { fileLock.Close() } }() // TODO: add options for CreateIfMissing and ErrorIfExists, and check them here. // Load the version set. err = d.versions.load(dirname, opts) if err != nil { return nil, err } // Replay any newer log files than the ones named in the manifest. var ve versionEdit ls, err := fs.List(dirname) if err != nil { return nil, err } var logFiles fileNumAndNameSlice for _, filename := range ls { n := logFileNum(filename) if n != 0 && (n >= d.versions.logNumber || n == d.versions.prevLogNumber) { logFiles = append(logFiles, fileNumAndName{n, filename}) } } sort.Sort(logFiles) for _, lf := range logFiles { maxSeqNum, err := d.replayLogFile(&ve, fs, filepath.Join(dirname, lf.name)) if err != nil { return nil, err } d.versions.markFileNumUsed(lf.num) if d.versions.lastSequence < maxSeqNum { d.versions.lastSequence = maxSeqNum } } // Create an empty .log file. ve.logNumber = d.versions.nextFileNum() logFile, err := fs.Create(dbFilename(dirname, fileTypeLog, ve.logNumber)) if err != nil { return nil, err } defer func() { if logFile != nil { logFile.Close() } }() d.log = record.NewWriter(logFile) // Write a new manifest to disk. if err := d.versions.logAndApply(dirname, &ve); err != nil { return nil, err } // TODO: delete obsolete files. // TODO: maybe schedule compaction? d.logFile, logFile = logFile, nil d.fileLock, fileLock = fileLock, nil return d, nil } func (d *DB) replayLogFile(ve *versionEdit, fs db.FileSystem, filename string) (maxSeqNum uint64, err error) { file, err := fs.Open(filename) if err != nil { return 0, err } defer file.Close() var ( mem *memdb.MemDB batchBuf = new(bytes.Buffer) ikeyBuf = make(internalKey, 512) rr = record.NewReader(file) ) for { r, err := rr.Next() if err == io.EOF { break } if err != nil { return 0, err } _, err = io.Copy(batchBuf, r) if err != nil { return 0, err } if batchBuf.Len() < batchHeaderLen { return 0, fmt.Errorf("leveldb: corrupt log file %q", filename) } b := Batch{batchBuf.Bytes()} seqNum := b.seqNum() seqNum1 := seqNum + uint64(b.count()) if maxSeqNum < seqNum1 { maxSeqNum = seqNum1 } if mem == nil { mem = memdb.New(&db.Options{ Comparer: d.icmp, }) } t := b.iter() for ; seqNum != seqNum1; seqNum++ { kind, key, value, ok := t.next() if !ok { return 0, fmt.Errorf("leveldb: corrupt log file %q", filename) } // Convert seqNum, kind and key into an internalKey, and add that ikey/value // pair to mem. // // TODO: instead of copying to an intermediate buffer (ikeyBuf), is it worth // adding a SetTwoPartKey(db.TwoPartKey{key0, key1}, value, opts) method to // memdb.MemDB? What effect does that have on the db.Comparer interface? // // The C++ LevelDB code does not need an intermediate copy because its memdb // implementation is a private implementation detail, and copies each internal // key component from the Batch format straight to the skiplist buffer. // // Go's LevelDB considers the memdb functionality to be useful in its own // right, and so leveldb/memdb is a separate package that is usable without // having to import the top-level leveldb package. That extra abstraction // means that we need to copy to an intermediate buffer here, to reconstruct // the complete internal key to pass to the memdb. if n := len(ikeyBuf); n < len(key)+8 { for { n *= 2 if n >= len(key)+8 { break } } ikeyBuf = make(internalKey, n) } ikey := ikeyBuf[:len(key)+8] copy(ikey, key) ikey.encodeTrailer(kind, seqNum) mem.Set(ikey, value, nil) } if len(t) != 0 { return 0, fmt.Errorf("leveldb: corrupt log file %q", filename) } // TODO: if mem is large enough, write it to a level-0 table and set mem = nil. batchBuf.Reset() } if mem != nil && !mem.Empty() { meta, err := d.writeLevel0Table(fs, mem) if err != nil { return 0, err } ve.newFiles = append(ve.newFiles, newFileEntry{level: 0, meta: meta}) } return maxSeqNum, nil } // firstError returns the first non-nil error of err0 and err1, or nil if both // are nil. func firstError(err0, err1 error) error { if err0 != nil { return err0 } return err1 } func (d *DB) writeLevel0Table(fs db.FileSystem, mem *memdb.MemDB) (meta fileMetadata, err error) { meta.fileNum = d.versions.nextFileNum() filename := dbFilename(d.dirname, fileTypeTable, meta.fileNum) // TODO: add meta.fileNum to a set of 'pending outputs' so that a // concurrent sweep of obsolete db files won't delete the fileNum file. // It is the caller's responsibility to remove that fileNum from the // set of pending outputs. var ( file db.File tw *table.Writer iter db.Iterator ) defer func() { if iter != nil { err = firstError(err, iter.Close()) } if tw != nil { err = firstError(err, tw.Close()) } if file != nil { err = firstError(err, file.Close()) } if err != nil { fs.Remove(filename) meta = fileMetadata{} } }() file, err = fs.Create(filename) if err != nil { return fileMetadata{}, err } tw = table.NewWriter(file, &db.Options{ Comparer: d.icmp, }) iter = mem.Find(nil, nil) iter.Next() meta.smallest = internalKey(iter.Key()).clone() for { meta.largest = iter.Key() if err1 := tw.Set(meta.largest, iter.Value(), nil); err1 != nil { return fileMetadata{}, err1 } if !iter.Next() { break } } meta.largest = meta.largest.clone() if err1 := iter.Close(); err1 != nil { iter = nil return fileMetadata{}, err1 } iter = nil if err1 := tw.Close(); err1 != nil { tw = nil return fileMetadata{}, err1 } tw = nil // TODO: currently, closing a table.Writer closes its underlying file. // We have to re-open the file to Sync or Stat it, which seems stupid. file, err = fs.Open(filename) if err != nil { return fileMetadata{}, err } if err1 := file.Sync(); err1 != nil { return fileMetadata{}, err1 } if stat, err1 := file.Stat(); err1 != nil { return fileMetadata{}, err1 } else { size := stat.Size() if size < 0 { return fileMetadata{}, fmt.Errorf("leveldb: table file %q has negative size %d", filename, size) } meta.size = uint64(size) } // TODO: compaction stats. return meta, nil }<|fim▁end|>
<|file_name|>qgl_x11.cpp<|end_file_name|><|fim▁begin|>/**************************************************************************** ** ** Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies). ** Contact: http://www.qt-project.org/legal ** ** This file is part of the QtOpenGL module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Digia. For licensing terms and ** conditions see http://qt.digia.com/licensing. For further information ** use the contact form at http://qt.digia.com/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 2.1 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 2.1 requirements ** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Digia gives you certain additional ** rights. These rights are described in the Digia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 3.0 as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU General Public License version 3.0 requirements will be ** met: http://www.gnu.org/copyleft/gpl.html. ** ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qgl.h" #include "qgl_p.h" #include "qmap.h" #include "qapplication.h" #include "qcolormap.h" #include "qdesktopwidget.h" #include "qpixmap.h" #include "qhash.h" #include "qlibrary.h" #include "qdebug.h" #include <private/qfontengine_ft_p.h> #include <private/qt_x11_p.h> #include <private/qpixmap_x11_p.h> #include <private/qimagepixmapcleanuphooks_p.h> #include <private/qunicodetables_p.h> #ifdef Q_OS_HPUX // for GLXPBuffer #include <private/qglpixelbuffer_p.h> #endif // We always define GLX_EXT_texture_from_pixmap ourselves because // we can't trust system headers to do it properly #define GLX_EXT_texture_from_pixmap 1 #define INT8 dummy_INT8 #define INT32 dummy_INT32 #include <GL/glx.h> #undef INT8 #undef INT32 #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #ifdef Q_OS_VXWORS # ifdef open # undef open # endif # ifdef getpid # undef getpid # endif #endif // Q_OS_VXWORKS #include <X11/Xatom.h> #if defined(Q_OS_LINUX) || defined(Q_OS_BSD4) #include <dlfcn.h> #endif QT_BEGIN_NAMESPACE extern Drawable qt_x11Handle(const QPaintDevice *pd); extern const QX11Info *qt_x11Info(const QPaintDevice *pd); #ifndef GLX_ARB_multisample #define GLX_SAMPLE_BUFFERS_ARB 100000 #define GLX_SAMPLES_ARB 100001 #endif #ifndef GLX_TEXTURE_2D_BIT_EXT #define GLX_TEXTURE_2D_BIT_EXT 0x00000002 #define GLX_TEXTURE_RECTANGLE_BIT_EXT 0x00000004 #define GLX_BIND_TO_TEXTURE_RGB_EXT 0x20D0 #define GLX_BIND_TO_TEXTURE_RGBA_EXT 0x20D1 #define GLX_BIND_TO_MIPMAP_TEXTURE_EXT 0x20D2 #define GLX_BIND_TO_TEXTURE_TARGETS_EXT 0x20D3 #define GLX_Y_INVERTED_EXT 0x20D4 #define GLX_TEXTURE_FORMAT_EXT 0x20D5 #define GLX_TEXTURE_TARGET_EXT 0x20D6 #define GLX_MIPMAP_TEXTURE_EXT 0x20D7 #define GLX_TEXTURE_FORMAT_NONE_EXT 0x20D8 #define GLX_TEXTURE_FORMAT_RGB_EXT 0x20D9 #define GLX_TEXTURE_FORMAT_RGBA_EXT 0x20DA #define GLX_TEXTURE_2D_EXT 0x20DC #define GLX_TEXTURE_RECTANGLE_EXT 0x20DD #define GLX_FRONT_LEFT_EXT 0x20DE #endif #ifndef GLX_ARB_create_context #define GLX_CONTEXT_DEBUG_BIT_ARB 0x00000001 #define GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002 #define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091 #define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092 #define GLX_CONTEXT_FLAGS_ARB 0x2094 #endif #ifndef GLX_ARB_create_context_profile #define GLX_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001 #define GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB 0x00000002 #define GLX_CONTEXT_PROFILE_MASK_ARB 0x9126 #endif /* The qt_gl_choose_cmap function is internal and used by QGLWidget::setContext() and GLX (not Windows). If the application can't find any sharable colormaps, it must at least create as few colormaps as possible. The dictionary solution below ensures only one colormap is created per visual. Colormaps are also deleted when the application terminates. */ struct QCMapEntry { QCMapEntry(); ~QCMapEntry(); Colormap cmap; bool alloc; XStandardColormap scmap; }; QCMapEntry::QCMapEntry() { cmap = 0; alloc = false; scmap.colormap = 0; } QCMapEntry::~QCMapEntry() { if (alloc) XFreeColormap(X11->display, cmap); } typedef QHash<int, QCMapEntry *> CMapEntryHash; typedef QHash<int, QMap<int, QRgb> > GLCMapHash; static bool mesa_gl = false; static bool first_time = true; static void cleanup_cmaps(); struct QGLCMapCleanupHandler { QGLCMapCleanupHandler() { cmap_hash = new CMapEntryHash; qglcmap_hash = new GLCMapHash; } ~QGLCMapCleanupHandler() { delete cmap_hash; delete qglcmap_hash; } CMapEntryHash *cmap_hash; GLCMapHash *qglcmap_hash; }; Q_GLOBAL_STATIC(QGLCMapCleanupHandler, cmap_handler) static void cleanup_cmaps() { CMapEntryHash *hash = cmap_handler()->cmap_hash; QHash<int, QCMapEntry *>::ConstIterator it = hash->constBegin(); while (it != hash->constEnd()) { delete it.value(); ++it; } hash->clear(); cmap_handler()->qglcmap_hash->clear(); } Colormap qt_gl_choose_cmap(Display *dpy, XVisualInfo *vi) { if (first_time) { const char *v = glXQueryServerString(dpy, vi->screen, GLX_VERSION); if (v) mesa_gl = (strstr(v, "Mesa") != 0); first_time = false; } CMapEntryHash *hash = cmap_handler()->cmap_hash; CMapEntryHash::ConstIterator it = hash->constFind((long) vi->visualid + (vi->screen * 256)); if (it != hash->constEnd()) return it.value()->cmap; // found colormap for visual if (vi->visualid == XVisualIDFromVisual((Visual *) QX11Info::appVisual(vi->screen))) { // qDebug("Using x11AppColormap"); return QX11Info::appColormap(vi->screen); } QCMapEntry *x = new QCMapEntry(); XStandardColormap *c; int n, i; // qDebug("Choosing cmap for vID %0x", vi->visualid); if (mesa_gl) { // we're using MesaGL Atom hp_cmaps = XInternAtom(dpy, "_HP_RGB_SMOOTH_MAP_LIST", true); if (hp_cmaps && vi->visual->c_class == TrueColor && vi->depth == 8) { if (XGetRGBColormaps(dpy,RootWindow(dpy,vi->screen),&c,&n, hp_cmaps)) { i = 0; while (i < n && x->cmap == 0) { if (c[i].visualid == vi->visual->visualid) { x->cmap = c[i].colormap; x->scmap = c[i]; //qDebug("Using HP_RGB scmap"); } i++; } XFree((char *)c); } } } if (!x->cmap) { if (XGetRGBColormaps(dpy,RootWindow(dpy,vi->screen),&c,&n, XA_RGB_DEFAULT_MAP)) { for (int i = 0; i < n && x->cmap == 0; ++i) { if (!c[i].red_max || !c[i].green_max || !c[i].blue_max || !c[i].red_mult || !c[i].green_mult || !c[i].blue_mult) continue; // invalid stdcmap if (c[i].visualid == vi->visualid) { x->cmap = c[i].colormap; x->scmap = c[i]; //qDebug("Using RGB_DEFAULT scmap"); } } XFree((char *)c); } } if (!x->cmap) { // no shared cmap found x->cmap = XCreateColormap(dpy, RootWindow(dpy,vi->screen), vi->visual, AllocNone); x->alloc = true; // qDebug("Allocating cmap"); } // colormap hash should be cleanup only when the QApplication dtor is called if (hash->isEmpty()) qAddPostRoutine(cleanup_cmaps); // associate cmap with visualid hash->insert((long) vi->visualid + (vi->screen * 256), x); return x->cmap; } struct QTransColor { VisualID vis; int screen; long color; }; static QVector<QTransColor> trans_colors; static int trans_colors_init = false; static void find_trans_colors() { struct OverlayProp { long visual; long type; long value; long layer; }; trans_colors_init = true; Display* appDisplay = X11->display; int scr; int lastsize = 0; for (scr = 0; scr < ScreenCount(appDisplay); scr++) { QWidget* rootWin = QApplication::desktop()->screen(scr); if (!rootWin) return; // Should not happen Atom overlayVisualsAtom = XInternAtom(appDisplay, "SERVER_OVERLAY_VISUALS", True); if (overlayVisualsAtom == XNone) return; // Server has no overlays Atom actualType; int actualFormat; ulong nItems; ulong bytesAfter; unsigned char *retval = 0; int res = XGetWindowProperty(appDisplay, rootWin->winId(), overlayVisualsAtom, 0, 10000, False, overlayVisualsAtom, &actualType, &actualFormat, &nItems, &bytesAfter, &retval); if (res != Success || actualType != overlayVisualsAtom || actualFormat != 32 || nItems < 4 || !retval) return; // Error reading property OverlayProp *overlayProps = (OverlayProp *)retval; int numProps = nItems / 4; trans_colors.resize(lastsize + numProps); int j = lastsize; for (int i = 0; i < numProps; i++) { if (overlayProps[i].type == 1) { trans_colors[j].vis = (VisualID)overlayProps[i].visual; trans_colors[j].screen = scr; trans_colors[j].color = (int)overlayProps[i].value; j++; } } XFree(overlayProps); lastsize = j; trans_colors.resize(lastsize); } } /***************************************************************************** QGLFormat UNIX/GLX-specific code *****************************************************************************/ void* qglx_getProcAddress(const char* procName) { // On systems where the GL driver is pluggable (like Mesa), we have to use // the glXGetProcAddressARB extension to resolve other function pointers as // the symbols wont be in the GL library, but rather in a plugin loaded by // the GL library. typedef void* (*qt_glXGetProcAddressARB)(const char *); static qt_glXGetProcAddressARB glXGetProcAddressARB = 0; static bool triedResolvingGlxGetProcAddress = false; if (!triedResolvingGlxGetProcAddress) { triedResolvingGlxGetProcAddress = true; QGLExtensionMatcher extensions(glXGetClientString(QX11Info::display(), GLX_EXTENSIONS)); if (extensions.match("GLX_ARB_get_proc_address")) { #if defined(Q_OS_LINUX) || defined(Q_OS_BSD4) void *handle = dlopen(NULL, RTLD_LAZY); if (handle) { glXGetProcAddressARB = (qt_glXGetProcAddressARB) dlsym(handle, "glXGetProcAddressARB"); dlclose(handle); } if (!glXGetProcAddressARB) #endif { #if !defined(QT_NO_LIBRARY) extern const QString qt_gl_library_name(); QLibrary lib(qt_gl_library_name()); lib.setLoadHints(QLibrary::ImprovedSearchHeuristics); glXGetProcAddressARB = (qt_glXGetProcAddressARB) lib.resolve("glXGetProcAddressARB"); #endif } } } void *procAddress = 0; if (glXGetProcAddressARB) procAddress = glXGetProcAddressARB(procName); // If glXGetProcAddress didn't work, try looking the symbol up in the GL library #if defined(Q_OS_LINUX) || defined(Q_OS_BSD4) if (!procAddress) { void *handle = dlopen(NULL, RTLD_LAZY); if (handle) { procAddress = dlsym(handle, procName); dlclose(handle); } } #endif #if !defined(QT_NO_LIBRARY) if (!procAddress) { extern const QString qt_gl_library_name(); QLibrary lib(qt_gl_library_name()); lib.setLoadHints(QLibrary::ImprovedSearchHeuristics); procAddress = lib.resolve(procName); } #endif return procAddress; } bool QGLFormat::hasOpenGL() { return glXQueryExtension(X11->display, 0, 0) != 0; } bool QGLFormat::hasOpenGLOverlays() { if (!trans_colors_init) find_trans_colors(); return trans_colors.size() > 0; } static bool buildSpec(int* spec, const QGLFormat& f, QPaintDevice* paintDevice, int bufDepth, bool onlyFBConfig = false) { int i = 0; spec[i++] = GLX_LEVEL; spec[i++] = f.plane(); const QX11Info *xinfo = qt_x11Info(paintDevice); bool useFBConfig = onlyFBConfig; #if defined(GLX_VERSION_1_3) && !defined(QT_NO_XRENDER) && !defined(Q_OS_HPUX) /* HPUX defines GLX_VERSION_1_3 but does not implement the corresponding functions. Specifically glXChooseFBConfig and glXGetVisualFromFBConfig are not implemented. */ QWidget* widget = 0; if (paintDevice->devType() == QInternal::Widget) widget = static_cast<QWidget*>(paintDevice); // Only use glXChooseFBConfig for widgets if we're trying to get an ARGB visual if (widget && widget->testAttribute(Qt::WA_TranslucentBackground) && X11->use_xrender) useFBConfig = true; #endif #if defined(GLX_VERSION_1_1) && defined(GLX_EXT_visual_info) static bool useTranspExt = false; static bool useTranspExtChecked = false; if (f.plane() && !useTranspExtChecked && paintDevice) { QGLExtensionMatcher extensions(glXQueryExtensionsString(xinfo->display(), xinfo->screen())); useTranspExt = extensions.match("GLX_EXT_visual_info"); //# (A bit simplistic; that could theoretically be a substring) if (useTranspExt) { QByteArray cstr(glXGetClientString(xinfo->display(), GLX_VENDOR)); useTranspExt = !cstr.contains("Xi Graphics"); // bug workaround if (useTranspExt) { // bug workaround - some systems (eg. FireGL) refuses to return an overlay // visual if the GLX_TRANSPARENT_TYPE_EXT attribute is specified, even if // the implementation supports transparent overlays int tmpSpec[] = { GLX_LEVEL, f.plane(), GLX_TRANSPARENT_TYPE_EXT, f.rgba() ? GLX_TRANSPARENT_RGB_EXT : GLX_TRANSPARENT_INDEX_EXT, XNone }; XVisualInfo * vinf = glXChooseVisual(xinfo->display(), xinfo->screen(), tmpSpec); if (!vinf) { useTranspExt = false; } } } useTranspExtChecked = true; } if (f.plane() && useTranspExt && !useFBConfig) { // Required to avoid non-transparent overlay visual(!) on some systems spec[i++] = GLX_TRANSPARENT_TYPE_EXT; spec[i++] = f.rgba() ? GLX_TRANSPARENT_RGB_EXT : GLX_TRANSPARENT_INDEX_EXT; } #endif #if defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) // GLX_RENDER_TYPE is only in glx >=1.3 if (useFBConfig) { spec[i++] = GLX_RENDER_TYPE; spec[i++] = f.rgba() ? GLX_RGBA_BIT : GLX_COLOR_INDEX_BIT; } #endif if (f.doubleBuffer()) spec[i++] = GLX_DOUBLEBUFFER; if (useFBConfig) spec[i++] = True; if (f.depth()) { spec[i++] = GLX_DEPTH_SIZE; spec[i++] = f.depthBufferSize() == -1 ? 1 : f.depthBufferSize(); } if (f.stereo()) { spec[i++] = GLX_STEREO; if (useFBConfig) spec[i++] = True; } if (f.stencil()) { spec[i++] = GLX_STENCIL_SIZE; spec[i++] = f.stencilBufferSize() == -1 ? 1 : f.stencilBufferSize(); } if (f.rgba()) { if (!useFBConfig) spec[i++] = GLX_RGBA; spec[i++] = GLX_RED_SIZE; spec[i++] = f.redBufferSize() == -1 ? 1 : f.redBufferSize(); spec[i++] = GLX_GREEN_SIZE; spec[i++] = f.greenBufferSize() == -1 ? 1 : f.greenBufferSize(); spec[i++] = GLX_BLUE_SIZE; spec[i++] = f.blueBufferSize() == -1 ? 1 : f.blueBufferSize(); if (f.alpha()) { spec[i++] = GLX_ALPHA_SIZE; spec[i++] = f.alphaBufferSize() == -1 ? 1 : f.alphaBufferSize(); } if (f.accum()) { spec[i++] = GLX_ACCUM_RED_SIZE; spec[i++] = f.accumBufferSize() == -1 ? 1 : f.accumBufferSize(); spec[i++] = GLX_ACCUM_GREEN_SIZE; spec[i++] = f.accumBufferSize() == -1 ? 1 : f.accumBufferSize(); spec[i++] = GLX_ACCUM_BLUE_SIZE; spec[i++] = f.accumBufferSize() == -1 ? 1 : f.accumBufferSize(); if (f.alpha()) { spec[i++] = GLX_ACCUM_ALPHA_SIZE; spec[i++] = f.accumBufferSize() == -1 ? 1 : f.accumBufferSize(); } } } else { spec[i++] = GLX_BUFFER_SIZE; spec[i++] = bufDepth; } if (f.sampleBuffers()) { spec[i++] = GLX_SAMPLE_BUFFERS_ARB; spec[i++] = 1; spec[i++] = GLX_SAMPLES_ARB; spec[i++] = f.samples() == -1 ? 4 : f.samples(); } #if defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) if (useFBConfig) { spec[i++] = GLX_DRAWABLE_TYPE; switch(paintDevice->devType()) { case QInternal::Pixmap: spec[i++] = GLX_PIXMAP_BIT; break; case QInternal::Pbuffer: spec[i++] = GLX_PBUFFER_BIT; break; default: qWarning("QGLContext: Unknown paint device type %d", paintDevice->devType()); // Fall-through & assume it's a window case QInternal::Widget: spec[i++] = GLX_WINDOW_BIT; break; }; } #endif spec[i] = XNone; return useFBConfig; } /***************************************************************************** QGLContext UNIX/GLX-specific code *****************************************************************************/ bool QGLContext::chooseContext(const QGLContext* shareContext) { Q_D(QGLContext); const QX11Info *xinfo = qt_x11Info(d->paintDevice); Display* disp = xinfo->display(); d->vi = chooseVisual(); if (!d->vi) return false; if (deviceIsPixmap() && (((XVisualInfo*)d->vi)->depth != xinfo->depth() || ((XVisualInfo*)d->vi)->screen != xinfo->screen())) { XFree(d->vi); XVisualInfo appVisInfo; memset(&appVisInfo, 0, sizeof(XVisualInfo)); appVisInfo.visualid = XVisualIDFromVisual((Visual *) xinfo->visual()); appVisInfo.screen = xinfo->screen(); int nvis; d->vi = XGetVisualInfo(disp, VisualIDMask | VisualScreenMask, &appVisInfo, &nvis); if (!d->vi) return false; int useGL; glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_USE_GL, &useGL); if (!useGL) return false; //# Chickening out already... } int res; glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_LEVEL, &res); d->glFormat.setPlane(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_DOUBLEBUFFER, &res); d->glFormat.setDoubleBuffer(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_DEPTH_SIZE, &res); d->glFormat.setDepth(res); if (d->glFormat.depth()) d->glFormat.setDepthBufferSize(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_RGBA, &res); d->glFormat.setRgba(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_RED_SIZE, &res); d->glFormat.setRedBufferSize(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_GREEN_SIZE, &res); d->glFormat.setGreenBufferSize(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_BLUE_SIZE, &res); d->glFormat.setBlueBufferSize(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_ALPHA_SIZE, &res); d->glFormat.setAlpha(res); if (d->glFormat.alpha()) d->glFormat.setAlphaBufferSize(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_ACCUM_RED_SIZE, &res); d->glFormat.setAccum(res); if (d->glFormat.accum()) d->glFormat.setAccumBufferSize(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_STENCIL_SIZE, &res); d->glFormat.setStencil(res); if (d->glFormat.stencil()) d->glFormat.setStencilBufferSize(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_STEREO, &res); d->glFormat.setStereo(res); glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_SAMPLE_BUFFERS_ARB, &res); d->glFormat.setSampleBuffers(res); if (d->glFormat.sampleBuffers()) { glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_SAMPLES_ARB, &res); d->glFormat.setSamples(res); } Bool direct = format().directRendering() ? True : False; if (shareContext && (!shareContext->isValid() || !shareContext->d_func()->cx)) { qWarning("QGLContext::chooseContext(): Cannot share with invalid context"); shareContext = 0; } // 1. Sharing between rgba and color-index will give wrong colors. // 2. Contexts cannot be shared btw. direct/non-direct renderers. // 3. Pixmaps cannot share contexts that are set up for direct rendering. // 4. If the contexts are not created on the same screen, they can't be shared if (shareContext && (format().rgba() != shareContext->format().rgba() || (deviceIsPixmap() && glXIsDirect(disp, (GLXContext)shareContext->d_func()->cx)) || (shareContext->d_func()->screen != xinfo->screen()))) { shareContext = 0; } const int major = d->reqFormat.majorVersion(); const int minor = d->reqFormat.minorVersion(); const int profile = d->reqFormat.profile() == QGLFormat::CompatibilityProfile ? GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB : GLX_CONTEXT_CORE_PROFILE_BIT_ARB; d->cx = 0; #if defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) /* HPUX defines GLX_VERSION_1_3 but does not implement the corresponding functions. Specifically glXChooseFBConfig and glXGetVisualFromFBConfig are not implemented. */ if ((major == 3 && minor >= 2) || major > 3) { QGLTemporaryContext *tmpContext = 0; if (!QGLContext::currentContext()) tmpContext = new QGLTemporaryContext; int attributes[] = { GLX_CONTEXT_MAJOR_VERSION_ARB, major, GLX_CONTEXT_MINOR_VERSION_ARB, minor, GLX_CONTEXT_PROFILE_MASK_ARB, profile, 0 }; typedef GLXContext ( * Q_PFNGLXCREATECONTEXTATTRIBSARBPROC) (Display* dpy, GLXFBConfig config, GLXContext share_context, Bool direct, const int *attrib_list); Q_PFNGLXCREATECONTEXTATTRIBSARBPROC glXCreateContextAttribs = (Q_PFNGLXCREATECONTEXTATTRIBSARBPROC) qglx_getProcAddress("glXCreateContextAttribsARB"); if (glXCreateContextAttribs) { int spec[45]; glXGetConfig(disp, (XVisualInfo*)d->vi, GLX_BUFFER_SIZE, &res); buildSpec(spec, format(), d->paintDevice, res, true); GLXFBConfig *configs; int configCount = 0; configs = glXChooseFBConfig(disp, xinfo->screen(), spec, &configCount); if (configs && configCount > 0) { d->cx = glXCreateContextAttribs(disp, configs[0], shareContext ? (GLXContext)shareContext->d_func()->cx : 0, direct, attributes); if (!d->cx && shareContext) { shareContext = 0; d->cx = glXCreateContextAttribs(disp, configs[0], 0, direct, attributes); } d->screen = ((XVisualInfo*)d->vi)->screen; } XFree(configs); } else { qWarning("QGLContext::chooseContext(): OpenGL %d.%d is not supported", major, minor); } if (tmpContext) delete tmpContext; } #else Q_UNUSED(major); Q_UNUSED(minor); Q_UNUSED(profile); #endif if (!d->cx && shareContext) { d->cx = glXCreateContext(disp, (XVisualInfo *)d->vi, (GLXContext)shareContext->d_func()->cx, direct); d->screen = ((XVisualInfo*)d->vi)->screen; } if (!d->cx) { d->cx = glXCreateContext(disp, (XVisualInfo *)d->vi, NULL, direct); d->screen = ((XVisualInfo*)d->vi)->screen; shareContext = 0; } if (shareContext && d->cx) { QGLContext *share = const_cast<QGLContext *>(shareContext); d->sharing = true; share->d_func()->sharing = true; } if (!d->cx) return false; d->glFormat.setDirectRendering(glXIsDirect(disp, (GLXContext)d->cx)); if (deviceIsPixmap()) { #if defined(GLX_MESA_pixmap_colormap) && defined(QGL_USE_MESA_EXT) d->gpm = glXCreateGLXPixmapMESA(disp, (XVisualInfo *)d->vi, qt_x11Handle(d->paintDevice), qt_gl_choose_cmap(disp, (XVisualInfo *)d->vi)); #else d->gpm = (quint32)glXCreateGLXPixmap(disp, (XVisualInfo *)d->vi, qt_x11Handle(d->paintDevice)); #endif if (!d->gpm) return false; } QGLExtensionMatcher extensions(glXQueryExtensionsString(xinfo->display(), xinfo->screen())); if (extensions.match("GLX_SGI_video_sync")) { if (d->glFormat.swapInterval() == -1) d->glFormat.setSwapInterval(0); } else { d->glFormat.setSwapInterval(-1); } return true; } /* See qgl.cpp for qdoc comment. */ void *QGLContext::chooseVisual() { Q_D(QGLContext); static const int bufDepths[] = { 8, 4, 2, 1 }; // Try 16, 12 also? //todo: if pixmap, also make sure that vi->depth == pixmap->depth void* vis = 0; int i = 0; bool fail = false; QGLFormat fmt = format(); bool tryDouble = !fmt.doubleBuffer(); // Some GL impl's only have double bool triedDouble = false; bool triedSample = false; if (fmt.sampleBuffers()) fmt.setSampleBuffers(QGLExtensions::glExtensions() & QGLExtensions::SampleBuffers); while(!fail && !(vis = tryVisual(fmt, bufDepths[i]))) { if (!fmt.rgba() && bufDepths[i] > 1) { i++; continue; } if (tryDouble) { fmt.setDoubleBuffer(true); tryDouble = false; triedDouble = true; continue; } else if (triedDouble) { fmt.setDoubleBuffer(false); triedDouble = false; } if (!triedSample && fmt.sampleBuffers()) { fmt.setSampleBuffers(false); triedSample = true; continue; } if (fmt.stereo()) { fmt.setStereo(false); continue; } if (fmt.accum()) { fmt.setAccum(false); continue; } if (fmt.stencil()) { fmt.setStencil(false); continue; } if (fmt.alpha()) { fmt.setAlpha(false); continue; } if (fmt.depth()) { fmt.setDepth(false); continue; } if (fmt.doubleBuffer()) { fmt.setDoubleBuffer(false); continue; } fail = true; } d->glFormat = fmt; return vis; } /* See qgl.cpp for qdoc comment. */ void *QGLContext::tryVisual(const QGLFormat& f, int bufDepth) { Q_D(QGLContext); int spec[45]; const QX11Info *xinfo = qt_x11Info(d->paintDevice); bool useFBConfig = buildSpec(spec, f, d->paintDevice, bufDepth, false); XVisualInfo* chosenVisualInfo = 0; #if defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) while (useFBConfig) { GLXFBConfig *configs; int configCount = 0; configs = glXChooseFBConfig(xinfo->display(), xinfo->screen(), spec, &configCount); if (!configs) break; // fallback to trying glXChooseVisual for (int i = 0; i < configCount; ++i) { XVisualInfo* vi; vi = glXGetVisualFromFBConfig(xinfo->display(), configs[i]); if (!vi) continue; #if !defined(QT_NO_XRENDER) QWidget* w = 0; if (d->paintDevice->devType() == QInternal::Widget) w = static_cast<QWidget*>(d->paintDevice); if (w && w->testAttribute(Qt::WA_TranslucentBackground) && f.alpha()) { // Attempt to find a config who's visual has a proper alpha channel XRenderPictFormat *pictFormat; pictFormat = XRenderFindVisualFormat(xinfo->display(), vi->visual); if (pictFormat && (pictFormat->type == PictTypeDirect) && pictFormat->direct.alphaMask) { // The pict format for the visual matching the FBConfig indicates ARGB if (chosenVisualInfo) XFree(chosenVisualInfo); chosenVisualInfo = vi; break; } } else #endif //QT_NO_XRENDER if (chosenVisualInfo) { // If we've got a visual we can use and we're not trying to find one with a // real alpha channel, we might as well just use the one we've got break; } if (!chosenVisualInfo) chosenVisualInfo = vi; // Have something to fall back to else XFree(vi); } XFree(configs); break; } #endif // defined(GLX_VERSION_1_3) if (!chosenVisualInfo) chosenVisualInfo = glXChooseVisual(xinfo->display(), xinfo->screen(), spec); return chosenVisualInfo; } void QGLContext::reset() { Q_D(QGLContext); if (!d->valid) return; d->cleanup(); const QX11Info *xinfo = qt_x11Info(d->paintDevice); doneCurrent(); if (d->gpm) glXDestroyGLXPixmap(xinfo->display(), (GLXPixmap)d->gpm); d->gpm = 0; glXDestroyContext(xinfo->display(), (GLXContext)d->cx); if (d->vi) XFree(d->vi); d->vi = 0; d->cx = 0; d->crWin = false; d->sharing = false; d->valid = false; d->transpColor = QColor(); d->initDone = false; QGLContextGroup::removeShare(this); } void QGLContext::makeCurrent() { Q_D(QGLContext); if (!d->valid) { qWarning("QGLContext::makeCurrent(): Cannot make invalid context current."); return; } const QX11Info *xinfo = qt_x11Info(d->paintDevice); bool ok = true; if (d->paintDevice->devType() == QInternal::Pixmap) { ok = glXMakeCurrent(xinfo->display(), (GLXPixmap)d->gpm, (GLXContext)d->cx); } else if (d->paintDevice->devType() == QInternal::Pbuffer) { ok = glXMakeCurrent(xinfo->display(), (GLXPbuffer)d->pbuf, (GLXContext)d->cx); } else if (d->paintDevice->devType() == QInternal::Widget) { ok = glXMakeCurrent(xinfo->display(), ((QWidget *)d->paintDevice)->internalWinId(), (GLXContext)d->cx); } if (!ok) qWarning("QGLContext::makeCurrent(): Failed."); if (ok) QGLContextPrivate::setCurrentContext(this); } void QGLContext::doneCurrent() { Q_D(QGLContext); glXMakeCurrent(qt_x11Info(d->paintDevice)->display(), 0, 0); QGLContextPrivate::setCurrentContext(0); } void QGLContext::swapBuffers() const { Q_D(const QGLContext); if (!d->valid) return; if (!deviceIsPixmap()) { int interval = d->glFormat.swapInterval(); if (interval > 0) { typedef int (*qt_glXGetVideoSyncSGI)(uint *); typedef int (*qt_glXWaitVideoSyncSGI)(int, int, uint *); static qt_glXGetVideoSyncSGI glXGetVideoSyncSGI = 0; static qt_glXWaitVideoSyncSGI glXWaitVideoSyncSGI = 0; static bool resolved = false; if (!resolved) { const QX11Info *xinfo = qt_x11Info(d->paintDevice); QGLExtensionMatcher extensions(glXQueryExtensionsString(xinfo->display(), xinfo->screen())); if (extensions.match("GLX_SGI_video_sync")) { glXGetVideoSyncSGI = (qt_glXGetVideoSyncSGI)qglx_getProcAddress("glXGetVideoSyncSGI"); glXWaitVideoSyncSGI = (qt_glXWaitVideoSyncSGI)qglx_getProcAddress("glXWaitVideoSyncSGI"); } resolved = true; } if (glXGetVideoSyncSGI && glXWaitVideoSyncSGI) { uint counter; if (!glXGetVideoSyncSGI(&counter)) glXWaitVideoSyncSGI(interval + 1, (counter + interval) % (interval + 1), &counter); } } glXSwapBuffers(qt_x11Info(d->paintDevice)->display(), static_cast<QWidget *>(d->paintDevice)->winId()); } } QColor QGLContext::overlayTransparentColor() const { if (isValid()) return Qt::transparent; return QColor(); // Invalid color } static uint qt_transparent_pixel(VisualID id, int screen) { for (int i = 0; i < trans_colors.size(); i++) { if (trans_colors[i].vis == id && trans_colors[i].screen == screen) return trans_colors[i].color; } return 0; } uint QGLContext::colorIndex(const QColor& c) const { Q_D(const QGLContext); int screen = ((XVisualInfo *)d->vi)->screen; QColormap colmap = QColormap::instance(screen); if (isValid()) { if (format().plane() && c == Qt::transparent) { return qt_transparent_pixel(((XVisualInfo *)d->vi)->visualid, ((XVisualInfo *)d->vi)->screen); } if (((XVisualInfo*)d->vi)->visualid == XVisualIDFromVisual((Visual *) QX11Info::appVisual(screen))) return colmap.pixel(c); // We're using QColor's cmap XVisualInfo *info = (XVisualInfo *) d->vi; CMapEntryHash *hash = cmap_handler()->cmap_hash; CMapEntryHash::ConstIterator it = hash->constFind(long(info->visualid) + (info->screen * 256)); QCMapEntry *x = 0; if (it != hash->constEnd()) x = it.value(); if (x && !x->alloc) { // It's a standard colormap int rf = (int)(((float)c.red() * (x->scmap.red_max+1))/256.0); int gf = (int)(((float)c.green() * (x->scmap.green_max+1))/256.0); int bf = (int)(((float)c.blue() * (x->scmap.blue_max+1))/256.0); uint p = x->scmap.base_pixel + (rf * x->scmap.red_mult) + (gf * x->scmap.green_mult) + (bf * x->scmap.blue_mult); return p; } else { QMap<int, QRgb> &cmap = (*cmap_handler()->qglcmap_hash)[(long)info->visualid]; // already in the map? QRgb target = c.rgb(); QMap<int, QRgb>::Iterator it = cmap.begin(); for (; it != cmap.end(); ++it) { if ((*it) == target) return it.key(); } // need to alloc color unsigned long plane_mask[2]; unsigned long color_map_entry; if (!XAllocColorCells (QX11Info::display(), x->cmap, true, plane_mask, 0, &color_map_entry, 1)) return colmap.pixel(c); XColor col; col.flags = DoRed | DoGreen | DoBlue; col.pixel = color_map_entry; col.red = (ushort)((qRed(c.rgb()) / 255.0) * 65535.0 + 0.5); col.green = (ushort)((qGreen(c.rgb()) / 255.0) * 65535.0 + 0.5); col.blue = (ushort)((qBlue(c.rgb()) / 255.0) * 65535.0 + 0.5); XStoreColor(QX11Info::display(), x->cmap, &col); cmap.insert(color_map_entry, target); return color_map_entry; } } return 0; } #ifndef QT_NO_FONTCONFIG /*! \internal This is basically a substitute for glxUseXFont() which can only handle XLFD fonts. This version relies on freetype to render the glyphs, but it works with all fonts that fontconfig provides - both antialiased and aliased bitmap and outline fonts. */ static void qgl_use_font(QFontEngineFT *engine, int first, int count, int listBase) { GLfloat color[4]; glGetFloatv(GL_CURRENT_COLOR, color); // save the pixel unpack state GLint gl_swapbytes, gl_lsbfirst, gl_rowlength, gl_skiprows, gl_skippixels, gl_alignment; glGetIntegerv (GL_UNPACK_SWAP_BYTES, &gl_swapbytes); glGetIntegerv (GL_UNPACK_LSB_FIRST, &gl_lsbfirst); glGetIntegerv (GL_UNPACK_ROW_LENGTH, &gl_rowlength); glGetIntegerv (GL_UNPACK_SKIP_ROWS, &gl_skiprows); glGetIntegerv (GL_UNPACK_SKIP_PIXELS, &gl_skippixels); glGetIntegerv (GL_UNPACK_ALIGNMENT, &gl_alignment); glPixelStorei(GL_UNPACK_SWAP_BYTES, GL_FALSE); glPixelStorei(GL_UNPACK_LSB_FIRST, GL_FALSE); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); const bool antialiased = engine->drawAntialiased(); FT_Face face = engine->lockFace(); // start generating font glyphs for (int i = first; i < count; ++i) { int list = listBase + i; GLfloat x0, y0, dx, dy; FT_Error err; err = FT_Load_Glyph(face, FT_Get_Char_Index(face, i), FT_LOAD_DEFAULT); if (err) { qDebug("failed loading glyph %d from font", i); Q_ASSERT(!err); } err = FT_Render_Glyph(face->glyph, (antialiased ? FT_RENDER_MODE_NORMAL : FT_RENDER_MODE_MONO)); if (err) { qDebug("failed rendering glyph %d from font", i); Q_ASSERT(!err); } FT_Bitmap bm = face->glyph->bitmap; x0 = face->glyph->metrics.horiBearingX >> 6; y0 = (face->glyph->metrics.height - face->glyph->metrics.horiBearingY) >> 6; dx = face->glyph->metrics.horiAdvance >> 6; dy = 0; int sz = bm.pitch * bm.rows; uint *aa_glyph = 0; uchar *ua_glyph = 0; if (antialiased) aa_glyph = new uint[sz]; else ua_glyph = new uchar[sz]; // convert to GL format for (int y = 0; y < bm.rows; ++y) { for (int x = 0; x < bm.pitch; ++x) { int c1 = y*bm.pitch + x; int c2 = (bm.rows - y - 1) > 0 ? (bm.rows-y-1)*bm.pitch + x : x; if (antialiased) { aa_glyph[c1] = (int(color[0]*255) << 24) | (int(color[1]*255) << 16) | (int(color[2]*255) << 8) | bm.buffer[c2]; } else { ua_glyph[c1] = bm.buffer[c2]; } } } glNewList(list, GL_COMPILE); if (antialiased) { // calling glBitmap() is just a trick to move the current // raster pos, since glGet*() won't work in display lists glBitmap(0, 0, 0, 0, x0, -y0, 0); glDrawPixels(bm.pitch, bm.rows, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, aa_glyph); glBitmap(0, 0, 0, 0, dx-x0, y0, 0); } else { glBitmap(bm.pitch*8, bm.rows, -x0, y0, dx, dy, ua_glyph); } glEndList(); antialiased ? delete[] aa_glyph : delete[] ua_glyph; } engine->unlockFace(); // restore pixel unpack settings glPixelStorei(GL_UNPACK_SWAP_BYTES, gl_swapbytes); glPixelStorei(GL_UNPACK_LSB_FIRST, gl_lsbfirst); glPixelStorei(GL_UNPACK_ROW_LENGTH, gl_rowlength); glPixelStorei(GL_UNPACK_SKIP_ROWS, gl_skiprows); glPixelStorei(GL_UNPACK_SKIP_PIXELS, gl_skippixels); glPixelStorei(GL_UNPACK_ALIGNMENT, gl_alignment); } #endif #undef d void QGLContext::generateFontDisplayLists(const QFont & fnt, int listBase) { QFont f(fnt); QFontEngine *engine = f.d->engineForScript(QUnicodeTables::Common); if (engine->type() == QFontEngine::Multi) engine = static_cast<QFontEngineMulti *>(engine)->engine(0); #ifndef QT_NO_FONTCONFIG if(engine->type() == QFontEngine::Freetype) { qgl_use_font(static_cast<QFontEngineFT *>(engine), 0, 256, listBase); return; } #endif // glXUseXFont() only works with XLFD font structures and a few GL // drivers crash if 0 is passed as the font handle f.setStyleStrategy(QFont::OpenGLCompatible); if (f.handle() && engine->type() == QFontEngine::XLFD) glXUseXFont(static_cast<Font>(f.handle()), 0, 256, listBase); } void *QGLContext::getProcAddress(const QString &proc) const { typedef void *(*qt_glXGetProcAddressARB)(const GLubyte *); static qt_glXGetProcAddressARB glXGetProcAddressARB = 0; static bool resolved = false; if (resolved && !glXGetProcAddressARB) return 0; if (!glXGetProcAddressARB) { QGLExtensionMatcher extensions(glXGetClientString(QX11Info::display(), GLX_EXTENSIONS)); if (extensions.match("GLX_ARB_get_proc_address")) { #if defined(Q_OS_LINUX) || defined(Q_OS_BSD4) void *handle = dlopen(NULL, RTLD_LAZY); if (handle) { glXGetProcAddressARB = (qt_glXGetProcAddressARB) dlsym(handle, "glXGetProcAddressARB"); dlclose(handle); } if (!glXGetProcAddressARB) #endif { #if !defined(QT_NO_LIBRARY) extern const QString qt_gl_library_name(); QLibrary lib(qt_gl_library_name()); lib.setLoadHints(QLibrary::ImprovedSearchHeuristics); glXGetProcAddressARB = (qt_glXGetProcAddressARB) lib.resolve("glXGetProcAddressARB"); #endif } } resolved = true; } if (!glXGetProcAddressARB) return 0; return glXGetProcAddressARB(reinterpret_cast<const GLubyte *>(proc.toLatin1().data())); } /* QGLTemporaryContext implementation */ class QGLTemporaryContextPrivate { public: bool initialized; Window drawable; GLXContext context; GLXDrawable oldDrawable; GLXContext oldContext; }; QGLTemporaryContext::QGLTemporaryContext(bool, QWidget *) : d(new QGLTemporaryContextPrivate) { d->initialized = false; d->oldDrawable = 0; d->oldContext = 0; int screen = 0; int attribs[] = {GLX_RGBA, XNone}; XVisualInfo *vi = glXChooseVisual(X11->display, screen, attribs); if (!vi) { qWarning("QGLTempContext: No GL capable X visuals available."); return; } int useGL; glXGetConfig(X11->display, vi, GLX_USE_GL, &useGL); if (!useGL) { XFree(vi); return; } d->oldDrawable = glXGetCurrentDrawable(); d->oldContext = glXGetCurrentContext(); XSetWindowAttributes a; a.colormap = qt_gl_choose_cmap(X11->display, vi); d->drawable = XCreateWindow(X11->display, RootWindow(X11->display, screen), 0, 0, 1, 1, 0, vi->depth, InputOutput, vi->visual, CWColormap, &a); d->context = glXCreateContext(X11->display, vi, 0, True); if (d->context && glXMakeCurrent(X11->display, d->drawable, d->context)) { d->initialized = true; } else { qWarning("QGLTempContext: Unable to create GL context."); XDestroyWindow(X11->display, d->drawable); } XFree(vi); } QGLTemporaryContext::~QGLTemporaryContext() { if (d->initialized) { glXMakeCurrent(X11->display, 0, 0); glXDestroyContext(X11->display, d->context); XDestroyWindow(X11->display, d->drawable); } if (d->oldDrawable && d->oldContext) glXMakeCurrent(X11->display, d->oldDrawable, d->oldContext); } /***************************************************************************** QGLOverlayWidget (Internal overlay class for X11) *****************************************************************************/ class QGLOverlayWidget : public QGLWidget { Q_OBJECT public: QGLOverlayWidget(const QGLFormat& format, QGLWidget* parent, const QGLWidget* shareWidget=0); protected: void initializeGL(); void paintGL(); void resizeGL(int w, int h); bool x11Event(XEvent *e) { return realWidget->x11Event(e); } private: QGLWidget* realWidget; private: Q_DISABLE_COPY(QGLOverlayWidget) }; QGLOverlayWidget::QGLOverlayWidget(const QGLFormat& format, QGLWidget* parent, const QGLWidget* shareWidget) : QGLWidget(format, parent, shareWidget ? shareWidget->d_func()->olw : 0) { setAttribute(Qt::WA_X11OpenGLOverlay); realWidget = parent; } void QGLOverlayWidget::initializeGL() { QColor transparentColor = context()->overlayTransparentColor(); if (transparentColor.isValid()) qglClearColor(transparentColor); else qWarning("QGLOverlayWidget::initializeGL(): Could not get transparent color"); realWidget->initializeOverlayGL(); } void QGLOverlayWidget::resizeGL(int w, int h) { glViewport(0, 0, w, h); realWidget->resizeOverlayGL(w, h); } void QGLOverlayWidget::paintGL() { realWidget->paintOverlayGL(); } #undef Bool QT_BEGIN_INCLUDE_NAMESPACE #include "qgl_x11.moc" QT_END_INCLUDE_NAMESPACE /***************************************************************************** QGLWidget UNIX/GLX-specific code *****************************************************************************/ void QGLWidgetPrivate::init(QGLContext *context, const QGLWidget *shareWidget) { Q_Q(QGLWidget); initContext(context, shareWidget); olw = 0; if (q->isValid() && context->format().hasOverlay()) { QString olwName = q->objectName(); olwName += QLatin1String("-QGL_internal_overlay_widget"); olw = new QGLOverlayWidget(QGLFormat::defaultOverlayFormat(), q, shareWidget); olw->setObjectName(olwName); if (olw->isValid()) { olw->setAutoBufferSwap(false); olw->setFocusProxy(q); } else { delete olw; olw = 0; glcx->d_func()->glFormat.setOverlay(false); } } } bool QGLWidgetPrivate::renderCxPm(QPixmap* pm) { Q_Q(QGLWidget); if (((XVisualInfo*)glcx->d_func()->vi)->depth != pm->depth()) return false; GLXPixmap glPm; #if defined(GLX_MESA_pixmap_colormap) && defined(QGL_USE_MESA_EXT) glPm = glXCreateGLXPixmapMESA(X11->display, (XVisualInfo*)glcx->vi, (Pixmap)pm->handle(), qt_gl_choose_cmap(pm->X11->display, (XVisualInfo*)glcx->vi)); #else glPm = (quint32)glXCreateGLXPixmap(X11->display, (XVisualInfo*)glcx->d_func()->vi, (Pixmap)pm->handle()); #endif if (!glXMakeCurrent(X11->display, glPm, (GLXContext)glcx->d_func()->cx)) { glXDestroyGLXPixmap(X11->display, glPm); return false; } glDrawBuffer(GL_FRONT); if (!glcx->initialized()) q->glInit(); q->resizeGL(pm->width(), pm->height()); q->paintGL(); glFlush(); q->makeCurrent(); glXDestroyGLXPixmap(X11->display, glPm); q->resizeGL(q->width(), q->height()); return true; } void QGLWidgetPrivate::cleanupColormaps() { if (!cmap.handle()) { return; } else { XFreeColormap(X11->display, (Colormap) cmap.handle()); cmap.setHandle(0); } } void QGLWidget::setMouseTracking(bool enable) { Q_D(QGLWidget); if (d->olw) d->olw->setMouseTracking(enable); QWidget::setMouseTracking(enable); } void QGLWidget::resizeEvent(QResizeEvent *) { Q_D(QGLWidget); if (!isValid()) return; makeCurrent(); if (!d->glcx->initialized()) glInit(); glXWaitX(); resizeGL(width(), height()); if (d->olw) d->olw->setGeometry(rect()); } const QGLContext* QGLWidget::overlayContext() const { Q_D(const QGLWidget); if (d->olw) return d->olw->context(); else return 0; } void QGLWidget::makeOverlayCurrent() { Q_D(QGLWidget); if (d->olw) d->olw->makeCurrent(); } void QGLWidget::updateOverlayGL() { Q_D(QGLWidget); if (d->olw) d->olw->updateGL(); } /*! \internal Sets a new QGLContext, \a context, for this QGLWidget, using the shared context, \a shareContext. If \a deleteOldContext is true, the original context is deleted; otherwise it is overridden. */ void QGLWidget::setContext(QGLContext *context, const QGLContext* shareContext, bool deleteOldContext) { Q_D(QGLWidget); if (context == 0) { qWarning("QGLWidget::setContext: Cannot set null context"); return; } if (!context->deviceIsPixmap() && context->device() != this) { qWarning("QGLWidget::setContext: Context must refer to this widget"); return; } if (d->glcx) d->glcx->doneCurrent(); QGLContext* oldcx = d->glcx; d->glcx = context; if (parentWidget()) { // force creation of delay-created widgets parentWidget()->winId(); if (parentWidget()->x11Info().screen() != x11Info().screen()) d_func()->xinfo = parentWidget()->d_func()->xinfo; } // If the application has set WA_TranslucentBackground and not explicitly set // the alpha buffer size to zero, modify the format so it have an alpha channel QGLFormat& fmt = d->glcx->d_func()->glFormat; if (testAttribute(Qt::WA_TranslucentBackground) && fmt.alphaBufferSize() == -1) fmt.setAlphaBufferSize(1); bool createFailed = false; if (!d->glcx->isValid()) { if (!d->glcx->create(shareContext ? shareContext : oldcx)) createFailed = true; } if (createFailed) { if (deleteOldContext) delete oldcx; return; } if (d->glcx->windowCreated() || d->glcx->deviceIsPixmap()) { if (deleteOldContext) delete oldcx; return; } bool visible = isVisible(); if (visible) hide(); XVisualInfo *vi = (XVisualInfo*)d->glcx->d_func()->vi; XSetWindowAttributes a; QColormap colmap = QColormap::instance(vi->screen); a.colormap = qt_gl_choose_cmap(QX11Info::display(), vi); // find best colormap a.background_pixel = colmap.pixel(palette().color(backgroundRole())); a.border_pixel = colmap.pixel(Qt::black); Window p = RootWindow(X11->display, vi->screen); if (parentWidget()) p = parentWidget()->winId(); Window w = XCreateWindow(X11->display, p, x(), y(), width(), height(), 0, vi->depth, InputOutput, vi->visual, CWBackPixel|CWBorderPixel|CWColormap, &a); Window *cmw; Window *cmwret; int count; if (XGetWMColormapWindows(X11->display, window()->winId(), &cmwret, &count)) { cmw = new Window[count+1]; memcpy((char *)cmw, (char *)cmwret, sizeof(Window)*count); XFree((char *)cmwret); int i; for (i=0; i<count; i++) { if (cmw[i] == winId()) { // replace old window cmw[i] = w; break; } } if (i >= count) // append new window cmw[count++] = w; } else { count = 1; cmw = new Window[count]; cmw[0] = w; } #if defined(GLX_MESA_release_buffers) && defined(QGL_USE_MESA_EXT) if (oldcx && oldcx->windowCreated()) glXReleaseBuffersMESA(X11->display, winId()); #endif if (deleteOldContext) delete oldcx; oldcx = 0; if (testAttribute(Qt::WA_WState_Created)) create(w); else d->createWinId(w); XSetWMColormapWindows(X11->display, window()->winId(), cmw, count); delete [] cmw; // calling QWidget::create() will always result in a new paint // engine being created - get rid of it and replace it with our // own if (visible) show(); XFlush(X11->display); d->glcx->setWindowCreated(true); } const QGLColormap & QGLWidget::colormap() const { Q_D(const QGLWidget); return d->cmap; } /*\internal Store color values in the given colormap. */ static void qStoreColors(QWidget * tlw, Colormap cmap, const QGLColormap & cols) { Q_UNUSED(tlw); XColor c; QRgb color; for (int i = 0; i < cols.size(); i++) { color = cols.entryRgb(i); c.pixel = i; c.red = (ushort)((qRed(color) / 255.0) * 65535.0 + 0.5); c.green = (ushort)((qGreen(color) / 255.0) * 65535.0 + 0.5); c.blue = (ushort)((qBlue(color) / 255.0) * 65535.0 + 0.5); c.flags = DoRed | DoGreen | DoBlue; XStoreColor(X11->display, cmap, &c); } } /*\internal Check whether the given visual supports dynamic colormaps or not. */ static bool qCanAllocColors(QWidget * w) { bool validVisual = false; int numVisuals; long mask; XVisualInfo templ; XVisualInfo * visuals; VisualID id = XVisualIDFromVisual((Visual *) w->window()->x11Info().visual()); mask = VisualScreenMask; templ.screen = w->x11Info().screen(); visuals = XGetVisualInfo(X11->display, mask, &templ, &numVisuals); for (int i = 0; i < numVisuals; i++) { if (visuals[i].visualid == id) { switch (visuals[i].c_class) { case TrueColor: case StaticColor: case StaticGray: case XGrayScale: validVisual = false; break; case DirectColor: case PseudoColor:<|fim▁hole|> break; } } XFree(visuals); if (!validVisual) return false; return true; } void QGLWidget::setColormap(const QGLColormap & c) { Q_D(QGLWidget); QWidget * tlw = window(); // must return a valid widget d->cmap = c; if (!d->cmap.handle()) return; if (!qCanAllocColors(this)) { qWarning("QGLWidget::setColormap: Cannot create a read/write " "colormap for this visual"); return; } // If the child GL widget is not of the same visual class as the // toplevel widget we will get in trouble.. Window wid = tlw->winId(); Visual * vis = (Visual *) tlw->x11Info().visual();; VisualID cvId = XVisualIDFromVisual((Visual *) x11Info().visual()); VisualID tvId = XVisualIDFromVisual((Visual *) tlw->x11Info().visual()); if (cvId != tvId) { wid = winId(); vis = (Visual *) x11Info().visual(); } if (!d->cmap.handle()) // allocate a cmap if necessary d->cmap.setHandle(XCreateColormap(X11->display, wid, vis, AllocAll)); qStoreColors(this, (Colormap) d->cmap.handle(), c); XSetWindowColormap(X11->display, wid, (Colormap) d->cmap.handle()); // tell the wm that this window has a special colormap Window * cmw; Window * cmwret; int count; if (XGetWMColormapWindows(X11->display, tlw->winId(), &cmwret, &count)) { cmw = new Window[count+1]; memcpy((char *) cmw, (char *) cmwret, sizeof(Window) * count); XFree((char *) cmwret); int i; for (i = 0; i < count; i++) { if (cmw[i] == winId()) { break; } } if (i >= count) // append new window only if not in the list cmw[count++] = winId(); } else { count = 1; cmw = new Window[count]; cmw[0] = winId(); } XSetWMColormapWindows(X11->display, tlw->winId(), cmw, count); delete [] cmw; } // Solaris defines glXBindTexImageEXT as part of the GL library #if defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) typedef void (*qt_glXBindTexImageEXT)(Display*, GLXDrawable, int, const int*); typedef void (*qt_glXReleaseTexImageEXT)(Display*, GLXDrawable, int); static qt_glXBindTexImageEXT glXBindTexImageEXT = 0; static qt_glXReleaseTexImageEXT glXReleaseTexImageEXT = 0; static bool qt_resolveTextureFromPixmap(QPaintDevice *paintDevice) { static bool resolvedTextureFromPixmap = false; if (!resolvedTextureFromPixmap) { resolvedTextureFromPixmap = true; // Check to see if we have NPOT texture support if ( !(QGLExtensions::glExtensions() & QGLExtensions::NPOTTextures) && !(QGLFormat::openGLVersionFlags() & QGLFormat::OpenGL_Version_2_0)) { return false; // Can't use TFP without NPOT } const QX11Info *xinfo = qt_x11Info(paintDevice); Display *display = xinfo ? xinfo->display() : X11->display; int screen = xinfo ? xinfo->screen() : X11->defaultScreen; QGLExtensionMatcher serverExtensions(glXQueryExtensionsString(display, screen)); QGLExtensionMatcher clientExtensions(glXGetClientString(display, GLX_EXTENSIONS)); if (serverExtensions.match("GLX_EXT_texture_from_pixmap") && clientExtensions.match("GLX_EXT_texture_from_pixmap")) { glXBindTexImageEXT = (qt_glXBindTexImageEXT) qglx_getProcAddress("glXBindTexImageEXT"); glXReleaseTexImageEXT = (qt_glXReleaseTexImageEXT) qglx_getProcAddress("glXReleaseTexImageEXT"); } } return glXBindTexImageEXT && glXReleaseTexImageEXT; } #endif //defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) QGLTexture *QGLContextPrivate::bindTextureFromNativePixmap(QPixmap *pixmap, const qint64 key, QGLContext::BindOptions options) { #if !defined(GLX_VERSION_1_3) || defined(Q_OS_HPUX) return 0; #else // Check we have GLX 1.3, as it is needed for glXCreatePixmap & glXDestroyPixmap int majorVersion = 0; int minorVersion = 0; glXQueryVersion(X11->display, &majorVersion, &minorVersion); if (majorVersion < 1 || (majorVersion == 1 && minorVersion < 3)) return 0; Q_Q(QGLContext); QX11PixmapData *pixmapData = static_cast<QX11PixmapData*>(pixmap->data_ptr().data()); Q_ASSERT(pixmapData->classId() == QPixmapData::X11Class); // We can't use TFP if the pixmap has a separate X11 mask if (pixmapData->x11_mask) return 0; if (!qt_resolveTextureFromPixmap(paintDevice)) return 0; const QX11Info &x11Info = pixmapData->xinfo; // Store the configs (Can be static because configs aren't dependent on current context) static GLXFBConfig glxRGBPixmapConfig = 0; static bool RGBConfigInverted = false; static GLXFBConfig glxRGBAPixmapConfig = 0; static bool RGBAConfigInverted = false; bool hasAlpha = pixmapData->hasAlphaChannel(); // Check to see if we need a config if ( (hasAlpha && !glxRGBAPixmapConfig) || (!hasAlpha && !glxRGBPixmapConfig) ) { GLXFBConfig *configList = 0; int configCount = 0; int configAttribs[] = { hasAlpha ? GLX_BIND_TO_TEXTURE_RGBA_EXT : GLX_BIND_TO_TEXTURE_RGB_EXT, True, GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT, GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT, // QGLContext::bindTexture() can't return an inverted texture, but QPainter::drawPixmap() can: GLX_Y_INVERTED_EXT, int(options & QGLContext::CanFlipNativePixmapBindOption ? GLX_DONT_CARE : False), XNone }; configList = glXChooseFBConfig(x11Info.display(), x11Info.screen(), configAttribs, &configCount); if (!configList) return 0; int yInv; glXGetFBConfigAttrib(x11Info.display(), configList[0], GLX_Y_INVERTED_EXT, &yInv); if (hasAlpha) { glxRGBAPixmapConfig = configList[0]; RGBAConfigInverted = yInv; } else { glxRGBPixmapConfig = configList[0]; RGBConfigInverted = yInv; } XFree(configList); } // Check to see if the surface is still valid if (pixmapData->gl_surface && hasAlpha != (pixmapData->flags & QX11PixmapData::GlSurfaceCreatedWithAlpha)) { // Surface is invalid! destroyGlSurfaceForPixmap(pixmapData); } // Check to see if we need a surface if (!pixmapData->gl_surface) { GLXPixmap glxPixmap; int pixmapAttribs[] = { GLX_TEXTURE_FORMAT_EXT, hasAlpha ? GLX_TEXTURE_FORMAT_RGBA_EXT : GLX_TEXTURE_FORMAT_RGB_EXT, GLX_TEXTURE_TARGET_EXT, GLX_TEXTURE_2D_EXT, GLX_MIPMAP_TEXTURE_EXT, False, // Maybe needs to be don't care XNone }; // Wrap the X Pixmap into a GLXPixmap: glxPixmap = glXCreatePixmap(x11Info.display(), hasAlpha ? glxRGBAPixmapConfig : glxRGBPixmapConfig, pixmapData->handle(), pixmapAttribs); if (!glxPixmap) return 0; pixmapData->gl_surface = (void*)glxPixmap; // Make sure the cleanup hook gets called so we can delete the glx pixmap QImagePixmapCleanupHooks::enableCleanupHooks(pixmapData); } GLuint textureId; glGenTextures(1, &textureId); glBindTexture(GL_TEXTURE_2D, textureId); glXBindTexImageEXT(x11Info.display(), (GLXPixmap)pixmapData->gl_surface, GLX_FRONT_LEFT_EXT, 0); glBindTexture(GL_TEXTURE_2D, textureId); GLuint filtering = (options & QGLContext::LinearFilteringBindOption) ? GL_LINEAR : GL_NEAREST; glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, filtering); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, filtering); if (!((hasAlpha && RGBAConfigInverted) || (!hasAlpha && RGBConfigInverted))) options &= ~QGLContext::InvertedYBindOption; QGLTexture *texture = new QGLTexture(q, textureId, GL_TEXTURE_2D, options); if (texture->options & QGLContext::InvertedYBindOption) pixmapData->flags |= QX11PixmapData::InvertedWhenBoundToTexture; // We assume the cost of bound pixmaps is zero QGLTextureCache::instance()->insert(q, key, texture, 0); return texture; #endif //!defined(GLX_VERSION_1_3) || defined(Q_OS_HPUX) } void QGLContextPrivate::destroyGlSurfaceForPixmap(QPixmapData* pmd) { #if defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) Q_ASSERT(pmd->classId() == QPixmapData::X11Class); QX11PixmapData *pixmapData = static_cast<QX11PixmapData*>(pmd); if (pixmapData->gl_surface) { glXDestroyPixmap(QX11Info::display(), (GLXPixmap)pixmapData->gl_surface); pixmapData->gl_surface = 0; } #endif } void QGLContextPrivate::unbindPixmapFromTexture(QPixmapData* pmd) { #if defined(GLX_VERSION_1_3) && !defined(Q_OS_HPUX) Q_ASSERT(pmd->classId() == QPixmapData::X11Class); Q_ASSERT(QGLContext::currentContext()); QX11PixmapData *pixmapData = static_cast<QX11PixmapData*>(pmd); if (pixmapData->gl_surface) glXReleaseTexImageEXT(QX11Info::display(), (GLXPixmap)pixmapData->gl_surface, GLX_FRONT_LEFT_EXT); #endif } QT_END_NAMESPACE<|fim▁end|>
validVisual = true; break; }
<|file_name|>Domain.java<|end_file_name|><|fim▁begin|>/* * Copyright 2013 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package leap.orm.domain; import leap.lang.*; import leap.lang.annotation.Nullable; import leap.lang.enums.Bool; import leap.lang.expression.Expression; import leap.lang.jdbc.JdbcType; import leap.orm.generator.IdGenerator; import java.util.regex.Pattern; public class Domain implements Sourced,Named { private final Object source; private final String name; private final String defaultColumnName; private final JdbcType type; private final Integer length; private final Integer precision; private final Integer scale; private final Boolean nullable; private final String defaultValue; private final Boolean insert; private final Boolean update; private final Expression insertValue; private final Expression updateValue; private final Boolean filterable; private final Boolean sortable; private final Boolean filter; private final Expression filterValue; private final Expression filterIfValue; private final Float sortOrder; private final boolean autoMapping; private final IdGenerator idGenerator; public Domain(Object source, String name, String defaultColumnName, JdbcType type, Integer length, Integer precision, Integer scale, Boolean nullable, String defaultValue, Boolean insert, Expression insertValue, Boolean update, Expression updateValue, Boolean filterable, Boolean sortable, Boolean filter, Expression filterValue,Expression filterIfValue, Float sortOrder, boolean autoMapping, IdGenerator idGenerator) { Args.notEmpty(name,"name"); this.source = source; this.name = name; this.defaultColumnName = defaultColumnName; this.type = type; this.length = length; this.precision = precision; this.scale = scale; this.nullable = nullable; this.defaultValue = defaultValue; this.insert = insert; this.insertValue = insertValue; this.update = update; this.updateValue = updateValue; this.filterable = filterable; this.sortable = sortable; this.filter = filter; this.filterValue = filterValue; this.filterIfValue=filterIfValue; this.sortOrder = sortOrder; this.autoMapping = autoMapping; this.idGenerator = idGenerator; } @Override public Object getSource() { return source; } public String getName() { return name; } public String getDefaultColumnName() { return defaultColumnName; } public JdbcType getType() { return type; } public Integer getLength() { return length; } public Integer getPrecision() { return precision; } public Integer getScale() { return scale; } public Boolean getNullable() { return nullable; } public String getDefaultValue() { return defaultValue; } <|fim▁hole|> } public Boolean getInsert() { return insert; } public Boolean getUpdate() { return update; } public Expression getUpdateValue() { return updateValue; } public Boolean getFilterable() { return filterable; } public Boolean getSortable() { return sortable; } public Boolean getFilter() { return filter; } public Expression getFilterValue() { return filterValue; } public Float getSortOrder() { return sortOrder; } public boolean isAutoMapping() { return autoMapping; } public IdGenerator getIdGenerator() { return idGenerator; } @Override public String toString() { return "Domain : " + name; } public Expression getFilterIfValue() { return filterIfValue; } }<|fim▁end|>
public Expression getInsertValue() { return insertValue;
<|file_name|>dji2papywizard.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ Copyright 2016 Alex Cortelyou Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import argparse import xml.etree.ElementTree as ET from datetime import datetime from glob import glob import exifread # pylint: disable=C0103 #arguments parser = argparse.ArgumentParser(description='Extracts DJI image metadata into a Papywizard panohead data file for use with stitching software', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('file', metavar='INPUT', nargs='+', help='image files to process') parser.add_argument('-t', dest='title', default='Untitled Panorama', help='title for panohead file') parser.add_argument('-c', dest='comment', default='Generated by ' + os.path.basename(__file__), help='comment for panohead file') parser.add_argument('-o', dest='output', default='pano.xml', help='filename for output') parser.add_argument('--no-bracket', dest='no_bracket', action='store_true', help='ignore hdr bracketing information') args = parser.parse_args() #constants djiDateFormat = "%Y:%m:%d %H:%M:%S" panDateFormat = "%Y-%m-%d_%Hh%Mm%Ss" tagTime = 'EXIF DateTimeOriginal' tagBias = 'EXIF ExposureBiasValue' tagFocal = 'EXIF FocalLengthIn35mmFilm' tagFile = 'Image ImageDescription'<|fim▁hole|>tagGimbalPitch = '{http://www.dji.com/drone-dji/1.0/}GimbalPitchDegree' tagGimbalRoll = '{http://www.dji.com/drone-dji/1.0/}GimbalRollDegree' tagGimbalYaw = '{http://www.dji.com/drone-dji/1.0/}GimbalYawDegree' #variables shots = [] brackets = {} startTime = None endTime = None focal = None #process files for a in args.file: for f in glob(a): #extract metadata handle = open(f, 'rb') exif = exifread.process_file(handle, debug=True) exif.update(ET.fromstring(exif[tagNotes].printable)[0][0].attrib) handle.close() #check times time = datetime.strptime(exif[tagTime].printable, djiDateFormat) if not startTime or startTime > time: startTime = time if not endTime or endTime < time: endTime = time #check bracket bias = str(exif[tagBias]) if bias not in brackets.keys(): brackets[bias] = len(brackets) + 1 #check focal if not focal: focal = exif[tagFocal] #add shot to list shots.append({ "time": time, "bracket": bias, "file": str(exif[tagFile]), "roll": str(exif[tagGimbalRoll]), "yaw": str(exif[tagGimbalYaw]), "pitch": str(exif[tagGimbalPitch]), }) #quit if nothing to output if len(shots) == 0: print "no shots found" quit() #sort the output by order taken shots = sorted(shots, key=lambda k: k['file']) #create panohead document root = ET.Element("papywizard", version="c") header = ET.SubElement(root, "header") general = ET.SubElement(header, "general") ET.SubElement(general, "title").text = args.title ET.SubElement(general, "gps").text = '' ET.SubElement(general, "comment").text = args.comment shooting = ET.SubElement(header, "shooting", mode="mosaic") ET.SubElement(shooting, "headOrientation").text = "up" ET.SubElement(shooting, "cameraOrientation").text = "landscape" ET.SubElement(shooting, "stabilizationDelay").text = "1" ET.SubElement(shooting, "counter").text = "1" ET.SubElement(shooting, "startTime").text = startTime.strftime(panDateFormat) ET.SubElement(shooting, "endTime").text = endTime.strftime(panDateFormat) camera = ET.SubElement(header, "camera") ET.SubElement(camera, "timeValue").text = "1" ET.SubElement(camera, "bracketing", nbPicts=str(len(brackets)) if not args.no_bracket else '1') ET.SubElement(camera, "sensor", coef="1.0", ratio="4:3") lens = ET.SubElement(header, "lens", type='rectilinear') ET.SubElement(lens, "focal").text = str(focal) mosaic = ET.SubElement(header, "mosaic") ET.SubElement(mosaic, "nbPicts", pitch="1", yaw="1") ET.SubElement(mosaic, "overlap", minimum="0.5", pitch="1.0", yaw="0.5") shoot = ET.SubElement(root, "shoot") #add each shot to document count = 0 for shot in shots: count += 1 bracket = str(brackets[shot['bracket']]) if not args.no_bracket else '1' pict = ET.SubElement(shoot, "pict", id=str(count), bracket=bracket) ET.SubElement(pict, "time").text = shot['time'].strftime(panDateFormat) ET.SubElement(pict, "position", pitch=shot['pitch'], roll=shot['roll'], yaw=shot['yaw']) #write xml to file ET.ElementTree(root).write(args.output, encoding='utf8', method='xml') #success print count, 'shots processed' """ 'EXIF DateTimeOriginal' 'EXIF ExposureMode' 'EXIF ExposureIndex' 'EXIF ExposureProgram' 'EXIF ExposureTime' 'EXIF MeteringMode' 'EXIF ShutterSpeedValue' 'GPS GPSAltitude' 'GPS GPSLatitude' 'GPS GPSLongitude' '{http://ns.adobe.com/tiff/1.0/}Make' '{http://ns.adobe.com/tiff/1.0/}Model' '{http://www.dji.com/drone-dji/1.0/}GimbalRollDegree' '{http://www.dji.com/drone-dji/1.0/}FlightRollDegree' '{http://www.dji.com/drone-dji/1.0/}GimbalPitchDegree' '{http://www.dji.com/drone-dji/1.0/}FlightPitchDegree' '{http://www.dji.com/drone-dji/1.0/}GimbalYawDegree' '{http://www.dji.com/drone-dji/1.0/}FlightYawDegree' """<|fim▁end|>
tagNotes = 'Image ApplicationNotes'