prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>fast_forward_button.js<|end_file_name|><|fim▁begin|>/*! @license * Shaka Player * Copyright 2016 Google LLC * SPDX-License-Identifier: Apache-2.0 */ goog.provide('shaka.ui.FastForwardButton'); goog.require('shaka.ui.Controls'); goog.require('shaka.ui.Element'); goog.require('shaka.ui.Enums'); goog.require('shaka.ui.Locales'); goog.require('shaka.ui.Localization'); goog.require('shaka.util.Dom'); /** * @extends {shaka.ui.Element} * @final * @export */ shaka.ui.FastForwardButton = class extends shaka.ui.Element { /** * @param {!HTMLElement} parent * @param {!shaka.ui.Controls} controls */ constructor(parent, controls) { super(parent, controls); /** @private {!HTMLButtonElement} */ this.button_ = shaka.util.Dom.createButton(); this.button_.classList.add('material-icons-round'); this.button_.classList.add('shaka-fast-forward-button'); this.button_.classList.add('shaka-tooltip-status'); this.button_.setAttribute('shaka-status', '1x'); this.button_.textContent = shaka.ui.Enums.MaterialDesignIcons.FAST_FORWARD; this.parent.appendChild(this.button_); this.updateAriaLabel_(); /** @private {!Array.<number>} */ this.fastForwardRates_ = this.controls.getConfig().fastForwardRates; this.eventManager.listen( this.localization, shaka.ui.Localization.LOCALE_UPDATED, () => { this.updateAriaLabel_(); }); this.eventManager.listen( this.localization, shaka.ui.Localization.LOCALE_CHANGED, () => { this.updateAriaLabel_(); }); this.eventManager.listen(this.button_, 'click', () => { this.fastForward_(); }); } /** * @private */ updateAriaLabel_() { this.button_.ariaLabel = this.localization.resolve(shaka.ui.Locales.Ids.FAST_FORWARD); } /** * Cycles trick play rate between the selected fast forward rates. * @private */ fastForward_() { if (!this.video.duration) { return; } const trickPlayRate = this.player.getPlaybackRate(); const newRateIndex = this.fastForwardRates_.indexOf(trickPlayRate) + 1; // When the button is clicked, the next rate in this.fastForwardRates_ is // selected. If no more rates are available, the first one is set. const newRate = (newRateIndex != this.fastForwardRates_.length) ? this.fastForwardRates_[newRateIndex] : this.fastForwardRates_[0]; this.player.trickPlay(newRate); this.button_.setAttribute('shaka-status', newRate + 'x'); }<|fim▁hole|>/** * @implements {shaka.extern.IUIElement.Factory} * @final */ shaka.ui.FastForwardButton.Factory = class { /** @override */ create(rootElement, controls) { return new shaka.ui.FastForwardButton(rootElement, controls); } }; shaka.ui.Controls.registerElement( 'fast_forward', new shaka.ui.FastForwardButton.Factory());<|fim▁end|>
};
<|file_name|>postsynaptictypes.py<|end_file_name|><|fim▁begin|><|fim▁hole|># -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 Michael Hull. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- from morphforgecontrib.simulation.synapse_templates.exponential_form.expsyn.core import PostSynapticMech_ExpSyn_Base as ExpSynTemplateType from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2syn.core import PostSynapticMech_Exp2Syn_Base as Exp2SynTemplateType from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2synnmda.core import PostSynapticMech_Exp2SynNMDA_Base as Exp2NMDASynTemplateType<|fim▁end|>
#!/usr/bin/python
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>import { Bank, Pattern, Transaction, Parser, } from './src/types'; export const getPatternFromMessage:<|fim▁hole|> (message: string, bankId?: string) => Pattern | void; export const getTransaction: (message: string, pattern: Pattern, timezone: string) => Transaction | void; declare const main: (message: string, props?: { bankId?: string, timezone?: number | string | null }) => Transaction | void; export default main;<|fim▁end|>
<|file_name|>login.js<|end_file_name|><|fim▁begin|>(function(){ function isValid($, eventName){ var valid = true; $.each(function(i, el){ valid = valid && el.dispatchEvent(new Event(eventName));<|fim▁hole|> }); return valid; } function setHandler(els){ els.each(function(i, el){ if(!(el instanceof HTMLInputElement || el instanceof HTMLFormElement)) return; var $Input, $Form; function formHnd(e){ var valid = $Form[0].checkValidity() && $Form[0].querySelectorAll("input[w-equal-to]").length == $Form[0].querySelectorAll("input[w-equal-to][w-is-equal]").length; valid = isValid($Form, valid ? "validdata" : "invaliddata") && valid; if(!valid && e.type == "submit") e.preventDefault(); } function inputHnd(e){ if(e.type == "invalid") return e.target.dispatchEvent(new Event("invaliddata")); try { var curretPos = e.target.selectionStart; e.target.value = e.target.value; curretPos = curretPos > e.target.value.length ? e.target.value.length : curretPos; e.target.setSelectionRange(curretPos,curretPos); } catch(e) {} var isEqual = true; if(e.target.getAttribute("w-equal-to") != null) if(e.target.value == document.querySelector(e.target.getAttribute("w-equal-to")).value) e.target.getAttribute("w-is-equal") = ""; else { isEqual = false; e.target.removeAttribute("w-is-equal"); } e.target.dispatchEvent(new Event(e.target.checkValidity() && isEqual ? "validdata" : "invaliddata")); }; if(el instanceof HTMLFormElement){ $Form = $(el).on("submit keyup",formHnd); $Input = $(el).find("input, textarea").on("input invalid",inputHnd); } else if(el instanceof HTMLInputElement || el instanceof HTMLTextAreaElement){ $Input = $(el).on("input invalid",inputHnd); $Form = $(el.form).on("submit keyup",formHnd); } }); }; $.fn.onvalid = function(handler){ setHandler(this); this.on("validdata",handler); return this; }; $.fn.oninvalid = function(handler){ setHandler(this); this.on("invaliddata",handler); return this; }; })(); function setThankCount() { $.post("/api", { action: "thanksCount"}, function(data) { $("[gt-count]").html(data.count); }); } $(function(){ localStorage.clear(); $("form input").oninvalid(function(e){ $(e.target).parent().addClass("invalid"); }).onvalid(function(e){ $(e.target).parent().removeClass("invalid"); }); $("form").submit(function(){ if ($(this).find(".invalid").length) return false; }); setThankCount(); setInterval(setThankCount, 5000); });<|fim▁end|>
<|file_name|>history.rs<|end_file_name|><|fim▁begin|>use std::io::prelude::*; use std::path::Path; use curl::http; use yaml_rust::Yaml; use super::file; use super::yaml_util; use super::request::SpagRequest; use super::remember; const HISTORY_DIR: &'static str = ".spag"; const HISTORY_FILE: &'static str = ".spag/history.yml"; const HISTORY_LIMIT: usize = 100; pub fn ensure_history_exists() { if !Path::new(HISTORY_FILE).exists() { file::ensure_dir_exists(HISTORY_DIR); file::write_file(HISTORY_FILE, "[]"); } } pub fn append(req: &SpagRequest, resp: &http::Response) -> Result<(), String> { ensure_history_exists(); let mut y = &mut try!(yaml_util::load_yaml_file(&HISTORY_FILE)); if let Yaml::Array(ref mut arr) = *y { // Trim the history, -1 so that our request fits under the limit if arr.len() > HISTORY_LIMIT - 1 { while arr.len() > HISTORY_LIMIT - 1 { arr.remove(0); } } let new_entry = remember::serialize(req, resp); arr.insert(0, new_entry); } Ok(try!(yaml_util::dump_yaml_file(HISTORY_FILE, &y))) } pub fn list() -> Result<Vec<String>, String> { ensure_history_exists(); let mut result = Vec::new(); let mut y = &mut try!(yaml_util::load_yaml_file(&HISTORY_FILE)); if let Yaml::Array(ref mut arr) = *y { for y in arr.iter() { let method = try!(yaml_util::get_value_as_string(&y, &["request", "method"])); let endpoint = try!(yaml_util::get_value_as_string(&y, &["request", "endpoint"])); let uri = try!(yaml_util::get_value_as_string(&y, &["request", "uri"])); let s = format!("{} {}{}", method, endpoint, uri); result.push(s); } } Ok(result) }<|fim▁hole|> ensure_history_exists(); let index = raw_index.parse().unwrap(); let mut y = &mut try!(yaml_util::load_yaml_file(&HISTORY_FILE)); if let Yaml::Array(ref mut arr) = *y { let target = match arr.get(index) { Some(yaml) => yaml, None => return Err(format!("No request at #{}", index)), }; // Request data let mut output = "-------------------- Request ---------------------\n".to_string(); let method = try!(yaml_util::get_value_as_string(&target, &["request", "method"])); let endpoint = try!(yaml_util::get_value_as_string(&target, &["request", "endpoint"])); let uri = try!(yaml_util::get_value_as_string(&target, &["request", "uri"])); let body = try!(yaml_util::get_value_as_string(&target, &["request", "body"])); output.push_str(format!("{} {}{}\n", method, endpoint, uri).as_str()); match yaml_util::get_nested_value(&target, &["request", "headers"]) { Some(&Yaml::Hash(ref headers)) => { for (key, value) in headers.iter() { output.push_str(format!("{}: {}\n", key.as_str().unwrap(), value.as_str().unwrap()).as_str()); } }, None => {}, _ => { return Err(format!("Invalid headers in request history #{}.", index))}, }; output.push_str(format!("Body:\n{}\n", body).as_str()); // Response Data output.push_str("-------------------- Response ---------------------\n"); let body = try!(yaml_util::get_value_as_string(&target, &["response", "body"])); let status = try!(yaml_util::get_value_as_string(&target, &["response", "status"])); output.push_str(format!("Status code {}\n", status).as_str()); match yaml_util::get_nested_value(&target, &["response", "headers"]) { Some(&Yaml::Hash(ref headers)) => { for (key, value) in headers.iter() { output.push_str(format!("{}: {}\n", key.as_str().unwrap(), value.as_str().unwrap()).as_str()); } }, None => {}, _ => { return Err(format!("Invalid headers in request history #{}.", index))}, }; output.push_str(format!("Body:\n{}\n", body).as_str()); Ok(output.to_string()) } else { Err(format!("Failed to load history file {}", HISTORY_FILE)) } }<|fim▁end|>
pub fn get(raw_index: &String) -> Result<String, String> {
<|file_name|>plugin_repo_test.go<|end_file_name|><|fim▁begin|>package pluginrepo_test import ( "fmt" "net/http" "net/http/httptest" . "github.com/cloudfoundry/cli/cf/actors/pluginrepo" "github.com/cloudfoundry/cli/cf/models" . "github.com/cloudfoundry/cli/testhelpers/matchers" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("PluginRepo", func() { var ( repoActor PluginRepo testServer1CallCount int testServer2CallCount int<|fim▁hole|> BeforeEach(func() { repoActor = NewPluginRepo() }) Context("request data from all repos", func() { BeforeEach(func() { testServer1CallCount = 0 h1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { testServer1CallCount++ fmt.Fprintln(w, `{"plugins":[]}`) }) testServer1 = httptest.NewServer(h1) testServer2CallCount = 0 h2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { testServer2CallCount++ fmt.Fprintln(w, `{"plugins":[]}`) }) testServer2 = httptest.NewServer(h2) }) AfterEach(func() { testServer1.Close() testServer2.Close() }) It("make query to all repos listed in config.json", func() { repoActor.GetPlugins([]models.PluginRepo{ models.PluginRepo{ Name: "repo1", URL: testServer1.URL, }, models.PluginRepo{ Name: "repo2", URL: testServer2.URL, }, }) Expect(testServer1CallCount).To(Equal(1)) Expect(testServer2CallCount).To(Equal(1)) }) It("lists each of the repos in config.json", func() { list, _ := repoActor.GetPlugins([]models.PluginRepo{ models.PluginRepo{ Name: "repo1", URL: testServer1.URL, }, models.PluginRepo{ Name: "repo2", URL: testServer2.URL, }, }) Expect(list["repo1"]).NotTo(BeNil()) Expect(list["repo2"]).NotTo(BeNil()) }) }) Context("Getting data from repos", func() { Context("When data is valid", func() { BeforeEach(func() { h1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, `{"plugins":[ { "name":"plugin1", "description":"none", "version":"1.3.4", "binaries":[ { "platform":"osx", "url":"https://github.com/simonleung8/cli-plugin-echo/raw/master/bin/osx/echo", "checksum":"2a087d5cddcfb057fbda91e611c33f46" } ] }, { "name":"plugin2", "binaries":[ { "platform":"windows", "url":"http://going.no.where", "checksum":"abcdefg" } ] }] }`) }) testServer1 = httptest.NewServer(h1) }) AfterEach(func() { testServer1.Close() }) It("lists the info for each plugin", func() { list, _ := repoActor.GetPlugins([]models.PluginRepo{ models.PluginRepo{ Name: "repo1", URL: testServer1.URL, }, }) Expect(list["repo1"]).NotTo(BeNil()) Expect(len(list["repo1"])).To(Equal(2)) Expect(list["repo1"][0].Name).To(Equal("plugin1")) Expect(list["repo1"][0].Description).To(Equal("none")) Expect(list["repo1"][0].Version).To(Equal("1.3.4")) Expect(list["repo1"][0].Binaries[0].Platform).To(Equal("osx")) Expect(list["repo1"][1].Name).To(Equal("plugin2")) Expect(list["repo1"][1].Binaries[0].Platform).To(Equal("windows")) }) }) }) Context("When data is invalid", func() { Context("json is invalid", func() { BeforeEach(func() { h1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, `"plugins":[]}`) }) testServer1 = httptest.NewServer(h1) }) AfterEach(func() { testServer1.Close() }) It("informs user of invalid json", func() { _, err := repoActor.GetPlugins([]models.PluginRepo{ models.PluginRepo{ Name: "repo1", URL: testServer1.URL, }, }) Expect(err).To(ContainSubstrings( []string{"Invalid json data"}, )) }) }) Context("when data is valid json, but not valid plugin repo data", func() { BeforeEach(func() { h1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, `{"bad_plugin_tag":[]}`) }) testServer1 = httptest.NewServer(h1) }) AfterEach(func() { testServer1.Close() }) It("informs user of invalid repo data", func() { _, err := repoActor.GetPlugins([]models.PluginRepo{ models.PluginRepo{ Name: "repo1", URL: testServer1.URL, }, }) Expect(err).To(ContainSubstrings( []string{"Invalid data", "plugin data does not exist"}, )) }) }) }) })<|fim▁end|>
testServer1 *httptest.Server testServer2 *httptest.Server )
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013 - The pycangjie authors # # This file is part of pycangjie, the Python bindings to libcangjie. # # pycangjie is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pycangjie is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with pycangjie. If not, see <http://www.gnu.org/licenses/>. import itertools import operator import string import subprocess import unittest import cangjie class MetaTest(type): """Metaclass for our test cases The goal is to provide every TestCase class with methods like test_a(), test_b(), etc..., in other words, one method per potential Cangjie input code. Well, not quite, because that would be 12356630 methods (the number of strings composed of 1 to 5 lowercase ascii letters), and even though my laptop has 8Go of RAM, the test process gets killed by the OOM killer. :) So we cheat, and use libcangjie's wildcard support, so that we only generate 26 + 26^2 = 702 methods. """ def __init__(cls, name, bases, dct): super(MetaTest, cls).__init__(name, bases, dct) def gen_codes(): """Generate the 702 possible input codes""" # First, the 1-character codes for c in string.ascii_lowercase: yield c # Next, the 2-characters-with-wildcard codes for t in itertools.product(string.ascii_lowercase, repeat=2): yield '*'.join(t) def tester(code): def func(cls): return cls.run_test(code) return func # Generate the test_* methods<|fim▁hole|> class BaseTestCase(unittest.TestCase): """Base test class, grouping the common stuff for all our unit tests""" def __init__(self, name): super().__init__(name) self.cli_cmd = ["/usr/bin/libcangjie_cli"] + self.cli_options self.language = (cangjie.filters.BIG5 | cangjie.filters.HKSCS | cangjie.filters.PUNCTUATION | cangjie.filters.CHINESE | cangjie.filters.ZHUYIN | cangjie.filters.KANJI | cangjie.filters.KATAKANA | cangjie.filters.HIRAGANA | cangjie.filters.SYMBOLS) def setUp(self): self.cj = cangjie.Cangjie(self.version, self.language) def tearDown(self): del self.cj def run_command(self, cmd): """Run a command, deal with errors, and return its stdout""" proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() try: cangjie.errors.handle_error_code(proc.returncode, msg="Unknown error while running" " libcangjie_cli (%d)" % proc.returncode) except cangjie.errors.CangjieNoCharsError: return "" try: return out.decode("utf-8") except UnicodeDecodeError: # Python's 'utf-8' codec trips over b"\xed\xa1\x9d\xed\xbc\xb2", # but according to [1] and [2], it is a valid sequence of 2 chars: # U+D85D \xed\xa1\x9d # U+DF32 \xed\xbc\xb2 # [1] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=55389&utf8=string-literal # [2] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=57138&utf8=string-literal # TODO: Investigate this further, and eventually open a bug report out2 = [] for line in out.split("\n".encode("utf-8")): try: out2.append(line.decode("utf-8")) except UnicodeDecodeError: pass return "\n".join(out2) def run_test(self, input_code): """Run the actual test This compares the output of the libcangjie_cli tool with the output from pycangjie. The idea is that if pycangjie produces the same results as a C++ tool compiled against libcangjie, then pycangjie properly wraps libcangjie. We do not try to verify that pycangjie produces valid results here, validity is to be checked in libcangjie. Note that this whole test is based on scraping the output of libcangjie_cli, which is quite fragile. """ # Get a list of CangjieChar from libcangjie_cli as a reference tmp_expected = self.run_command(self.cli_cmd+[input_code]).split("\n") tmp_expected = map(lambda x: x.strip(" \n"), tmp_expected) tmp_expected = filter(lambda x: len(x) > 0, tmp_expected) expected = [] for item in tmp_expected: chchar, simpchar, code, frequency = item.split(", ") chchar = chchar.split(": ")[-1].strip("'") simpchar = simpchar.split(": ")[-1].strip("'") code = code.split(": ")[-1].strip("'") frequency = int(frequency.split(" ")[-1]) expected.append(cangjie._core.CangjieChar(chchar.encode("utf-8"), simpchar.encode("utf-8"), code.encode("utf-8"), frequency)) expected = sorted(expected, key=operator.attrgetter('chchar', 'code')) try: # And compare with what pycangjie produces results = sorted(self.cj.get_characters(input_code), key=operator.attrgetter('chchar', 'code')) self.assertEqual(results, expected) except cangjie.errors.CangjieNoCharsError: self.assertEqual(len(expected), 0)<|fim▁end|>
for code in gen_codes(): setattr(cls, "test_%s" % code.replace("*", ""), tester(code))
<|file_name|>metadata_definitions.py<|end_file_name|><|fim▁begin|># coding=utf-8 """InaSAFE Disaster risk tool by Australian Aid - Flood Raster Impact on Population. Contact : [email protected] .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Rizky Maulana Nugraha' from safe.common.utilities import OrderedDict from safe.defaults import ( default_minimum_needs, default_gender_postprocessor, age_postprocessor, minimum_needs_selector) from safe.impact_functions.impact_function_metadata import \ ImpactFunctionMetadata from safe.utilities.i18n import tr from safe.definitions import ( layer_mode_continuous, layer_geometry_raster, hazard_flood, hazard_category_single_event, unit_metres, unit_feet, count_exposure_unit, exposure_population ) class FloodEvacuationRasterHazardMetadata(ImpactFunctionMetadata): """Metadata for FloodEvacuationFunction. .. versionadded:: 2.1 We only need to re-implement as_dict(), all other behaviours are inherited from the abstract base class. """ @staticmethod def as_dict(): """Return metadata as a dictionary. This is a static method. You can use it to get the metadata in dictionary format for an impact function. :returns: A dictionary representing all the metadata for the concrete impact function. :rtype: dict """ dict_meta = { 'id': 'FloodEvacuationRasterHazardFunction', 'name': tr('Raster flood on population'),<|fim▁hole|> 'function_type': 'old-style', 'author': 'AIFDR', 'date_implemented': 'N/A', 'overview': tr( 'To assess the impacts of flood inundation in raster ' 'format on population.'), 'detailed_description': tr( 'The population subject to inundation exceeding a ' 'threshold (default 1m) is calculated and returned as a ' 'raster layer. In addition the total number of affected ' 'people and the required needs based on the user ' 'defined minimum needs are reported. The threshold can be ' 'changed and even contain multiple numbers in which case ' 'evacuation and needs are calculated using the largest number ' 'with population breakdowns provided for the smaller numbers. ' 'The population raster is resampled to the resolution of the ' 'hazard raster and is rescaled so that the resampled ' 'population counts reflect estimates of population count ' 'per resampled cell. The resulting impact layer has the ' 'same resolution and reflects population count per cell ' 'which are affected by inundation.'), 'hazard_input': tr( 'A hazard raster layer where each cell represents flood ' 'depth (in meters).'), 'exposure_input': tr( 'An exposure raster layer where each cell represent ' 'population count.'), 'output': tr( 'Raster layer contains people affected and the minimum ' 'needs based on the people affected.'), 'actions': tr( 'Provide details about how many people would likely need ' 'to be evacuated, where they are located and what ' 'resources would be required to support them.'), 'limitations': [ tr('The default threshold of 1 meter was selected based ' 'on consensus, not hard evidence.') ], 'citations': [], 'layer_requirements': { 'hazard': { 'layer_mode': layer_mode_continuous, 'layer_geometries': [layer_geometry_raster], 'hazard_categories': [hazard_category_single_event], 'hazard_types': [hazard_flood], 'continuous_hazard_units': [unit_feet, unit_metres], 'vector_hazard_classifications': [], 'raster_hazard_classifications': [], 'additional_keywords': [] }, 'exposure': { 'layer_mode': layer_mode_continuous, 'layer_geometries': [layer_geometry_raster], 'exposure_types': [exposure_population], 'exposure_units': [count_exposure_unit], 'exposure_class_fields': [], 'additional_keywords': [] } }, 'parameters': OrderedDict([ ('thresholds [m]', [1.0]), ('postprocessors', OrderedDict([ ('Gender', default_gender_postprocessor()), ('Age', age_postprocessor()), ('MinimumNeeds', minimum_needs_selector()), ])), ('minimum needs', default_minimum_needs()) ]) } return dict_meta<|fim▁end|>
'impact': tr('Need evacuation'), 'title': tr('Need evacuation'),
<|file_name|>horizontal-footer-details.ts<|end_file_name|><|fim▁begin|>import {Component, View} from "angular2/angular2"; @Component({ selector: "horizontal-footer-details"<|fim▁hole|> templateUrl: "./app/components/horizontal-footer-details/horizontal-footer-details.html" }) export class GdgHorizontalFooterDetails { }<|fim▁end|>
}) @View({
<|file_name|>sword_verses.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0: # -*- coding: UTF8 -*- # # A sword KJV indexed search module. # Copyright (C) 2012-2013 Josiah Gordon <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http:#www.gnu.org/licenses/>. from collections import defaultdict from xml.dom.minidom import parseString from textwrap import fill from os.path import dirname as os_dirname from os.path import join as os_join import dbm import sys import re import Sword from .utils import * data_path = os_join(os_dirname(__file__), 'data') def book_gen(): """ A Generator function that yields book names in order. """ # Yield a list of all the book names in the bible. verse_key = Sword.VerseKey('Genesis 1:1') for testament in [1, 2]: for book in range(1, verse_key.bookCount(testament) + 1): yield(verse_key.bookName(testament, book)) # book_list = list(book_gen()) try: book_list = [] for book in book_gen(): book_list.append(book) except: pass # Key function used to sort a list of verse references. def sort_key(ref): """ Sort verses by book. """ try: book, chap_verse = ref.rsplit(' ', 1) chap, verse = chap_verse.split(':') val = '%02d%03d%03d' % (int(book_list.index(book)), int(chap), int(verse)) return val except Exception as err: print('Error sorting "%s": %s' % (ref, err), file=sys.stderr) sys.exit() def parse_verse_range(verse_ref_list): """ Uses VerseKey ParseVerseList to parse the reference list. """ # Make the argument a parseable string. if isinstance(verse_ref_list, str): verse_ref_str = verse_ref_list else: verse_ref_str = ' '.join(verse_ref_list) verse_key = Sword.VerseKey() # Parse the list. # args: verse_list, default_key, expand_range, chapter_as_verse?<|fim▁hole|> False) verse_set = set() for i in range(verse_list.getCount()): key = Sword.VerseKey(verse_list.getElement(i)) if key: upper = key.getUpperBound().getText() lower = key.getLowerBound().getText() if upper != lower: verse_set.update(VerseIter(lower, upper)) else: verse_set.add(key.getText()) return verse_set def add_context(ref_set, count=0): """ Add count number of verses before and after each reference. """ if count == 0: return ref_set # Make a copy to work on. clone_set = set(ref_set) for ref in ref_set: start = Sword.VerseKey(ref) end = Sword.VerseKey(ref) # Pass the beginning of the book. start.decrement() start.decrement(count - 1) # Pass the end of the book. end.increment() end.increment(count - 1) clone_set.update(VerseIter(start.getText(), end.getText())) return clone_set def mod_to_dbm(module: str, key_iter: iter, path: str) -> str: """ Reads all the elements of key_iter from the module and saves them to a dbm file. """ lookup = Lookup(module_name=module) dbm_name = '%s/%s.dbm' % (path, module) with IndexDbm(dbm_name, 'nf') as dbm_file: for key in key_iter: dbm_file[key] = lookup.get_raw_text(key) return dbm_name def make_daily_dbm(path: str=INDEX_PATH) -> str: """ Saves the daily devotional to a dbm file. """ from datetime import date, timedelta # Use a leap year to get all the days in February. start = date(2012, 1, 1) date_iter = ((start + timedelta(i)).strftime('%m.%d') for i in range(365)) return mod_to_dbm('Daily', date_iter, path) def make_strongs_dbm(path: str=INDEX_PATH) -> str: """ Saves the StrongsReal modules as dbms. """ keys = IndexDict('KJV')['_strongs_'] greek_keys = (i[1:] for i in keys if i.startswith('G')) hebrew_keys = (i[1:] for i in keys if i.startswith('H')) greek_file = mod_to_dbm('StrongsRealGreek', greek_keys, path) hebrew_file = mod_to_dbm('StrongsRealHebrew', hebrew_keys, path) return '\n'.join((greek_file, hebrew_file)) def make_robinson_dbm(path: str=INDEX_PATH) -> str: """ Save robinson morph definitions in a dbm. """ keys = IndexDict('KJV')['_morph_'] robinson_keys = (i for i in keys if not i.startswith('TH')) return mod_to_dbm('Robinson', robinson_keys, path) def make_raw_kjv_dbm(path: str=INDEX_PATH) -> str: """ Saves the KJV modules raw text as a dbm. """ verse_iter = VerseIter('Genesis 1:1') return mod_to_dbm('KJV', verse_iter, path) class Lookup(object): """ A generic object to lookup refrences in differend sword modules. """ def __init__(self, module_name='KJV', markup=Sword.FMT_PLAIN): """ Setup the module to look up information in. """ markup = Sword.MarkupFilterMgr(markup) # We don't own this or it will segfault. markup.thisown = False self._library = Sword.SWMgr(markup) self._module = self._library.getModule(module_name) self._bold_regx = re.compile(r'<b>(\w+)</b>', re.I) self._italic_regx = re.compile(r''' (?:<i>|<hi\s*type="italic">) ([\w\s]+)(?:</i>|</hi>) ''', re.I | re.X) self._br_regx = re.compile(r'(<br[\s]*/>|<lb/>)[\s]?', re.I) self._cleanup_regx = re.compile(r'<[^>]*>') self._brace_regx = re.compile(r'\{([\W]*)([\w]*)([\W]*)\}') self._parenthesis_regx = re.compile(r'\(([\W]*)([\w]*)([\W]*)\)') self._bracket_regx = re.compile(r'\[([\W]*)([\w ]*)([\W]*)\]') self._verse_ref_regx = re.compile(r''' <scripRef[^>]*> ([^<]*) </scripRef> ''', re.I) def get_text(self, key): """ Get the text at the given key in the module. i.e. get_text('3778') returns the greek strongs. """ encoding = get_encoding() self._module.setKey(Sword.SWKey(key)) item_text = self._module.renderText() # Make the text printable. item_text = item_text.encode(encoding, 'replace') item_text = item_text.decode(encoding, 'replace') return fill(item_text, screen_size()[1]) def get_raw_text(self, key): """ Get the text at the given key in the module. i.e. get_text('3778') returns the greek strongs. """ encoding = get_encoding() self._module.setKey(Sword.SWKey(key)) item_text = self._module.getRawEntry() # Make the text printable. item_text = item_text.encode(encoding, 'replace') item_text = item_text.decode(encoding, 'replace') return item_text def get_formatted_text(self, key): """ Returns the formated raw text of the specified key. """ text = self.get_raw_text(key) # Format and highlight the text. text = self._bold_regx.sub('\033[1m\\1\033[m', text) text = self._italic_regx.sub('\033[36m\\1\033[m', text) text = self._br_regx.sub('\n', text) text = self._bracket_regx.sub('[\\1\033[33m\\2\033[m\\3]', text) text = self._brace_regx.sub('{\\1\033[35m\\2\033[m\\3}', text) text = self._parenthesis_regx.sub('(\\1\033[34m\\2\033[m\\3)', text) text = self._verse_ref_regx.sub('\033[32m\\1\033[m', text) text = self._cleanup_regx.sub('', text) return text class VerseTextIter(object): """ An iterable object for accessing verses in the Bible. Maybe it will be easier maybe not. """ def __init__(self, reference_iter, strongs=False, morph=False, module='KJV', markup=Sword.FMT_PLAIN, render=''): """ Initialize. """ markup = Sword.MarkupFilterMgr(markup) # We don't own this or it will segfault. markup.thisown = False self._library = Sword.SWMgr(markup) self._library.setGlobalOption("Headings", "On") self._library.setGlobalOption("Cross-references", "Off") if strongs: self._library.setGlobalOption("Strong's Numbers", "On") else: self._library.setGlobalOption("Strong's Numbers", "Off") if morph: self._library.setGlobalOption("Morphological Tags", "On") else: self._library.setGlobalOption("Morphological Tags", "Off") # Strings for finding the heading. self._head_str = Sword.SWBuf('Heading') self._preverse_str = Sword.SWBuf('Preverse') self._canon_str = Sword.SWBuf('canonical') self._module = self._library.getModule(module) self._key = self._module.getKey() if render.lower() == 'raw': self._render_func = self._module.getRawEntry elif render.lower() == 'render_raw': self._fix_space_regx = re.compile(r'([^\.:\?!])\s+') self._fix_end_regx = re.compile(r'\s+([\.:\?!,;])') self._fix_start_tag_regx = re.compile(r'(<[npi]>)\s*') self._fix_end_tag_regx = re.compile(r'\s*(</[npi]>)') self._upper_divname_regx = re.compile(r'(\w+)([\'s]*)') self._render_func = \ lambda: self._parse_raw(self._module.getRawEntry(), strongs, morph) else: self._render_func = self._module.renderText self._ref_iter = reference_iter def next(self): """ Returns the next verse reference and text. """ return self.__next__() def __next__(self): """ Returns a tuple of the next verse reference and text. """ # Retrieve the next reference. verse_ref = next(self._ref_iter) self._key.setText(verse_ref) # Set the verse and render the text. verse_text = self._get_text(verse_ref) return (verse_ref, verse_text) def __iter__(self): """ Returns an iterator of self. """ return self def _get_text(self, verse_ref): """ Returns the verse text. Override this to produce formatted verse text. """ verse_text = self._render_func() if self._render_func == self._module.renderText: verse_text = '%s %s' % (self._get_heading(), verse_text) return verse_text def _get_heading(self): """ Returns the verse heading if there is one. """ attr_map = self._module.getEntryAttributesMap() heading_list = [] head_str = self._head_str preverse_str = self._preverse_str canon_str = self._canon_str if head_str in attr_map: heading_attrs = attr_map[head_str] if self._preverse_str in heading_attrs: preverse_attrs = heading_attrs[preverse_str] for k, val in preverse_attrs.items(): if canon_str in heading_attrs[k]: if heading_attrs[k][canon_str].c_str() == 'true': heading_list.append(val.c_str()) if heading_list: return self._module.renderText(''.join(heading_list)) else: return '' def _parse_xml(self, xml_dom, strongs=False, morph=False): """ Recursively parse all the childNodes in a xml minidom, and build the verse text. """ # The string that will hold the verse. verse_text = '' # The name of the current tag. name = xml_dom.localName if xml_dom.localName else '' strongs_str = morph_str = '' if xml_dom.attributes: attr_dict = dict(xml_dom.attributes.items()) info_print(attr_dict, tag=4) # Get any paragraph marker. if 'marker' in attr_dict: verse_text = '<p>%s</p> ' % attr_dict['marker'] else: verse_text = '' italic_str = '%s' note_str = '%s' for key, value in attr_dict.items(): # Italicize any added text. if 'added' in value.lower(): italic_str = '<i>%s</i> ' # Label study notes. elif 'study' in value.lower() or 'note' in name.lower(): note_str = '<n>%s</n>' # Check for strongs. elif 'lemma' in key.lower() and strongs: for num in value.split(): strongs_str += ' <%s>' % num.split(':')[1] # Check for morphology. elif 'morph' in key.lower() and morph: for tag in value.split(): morph_str += ' {%s}' % tag.split(':')[1] # Recursively build the text from all the child nodes. for node in xml_dom.childNodes: child_s = self._parse_xml(node, strongs, morph) if 'divine' in name.lower(): verse_text += \ ' %s' % self._upper_divname_regx.sub( lambda m: m.group(1).upper() + m.group(2), child_s) else: verse_text += '%s' % child_s if xml_dom.attributes: return italic_str % note_str % '%s%s%s' % (verse_text, strongs_str, morph_str) if hasattr(xml_dom, 'data'): info_print(xml_dom.data, tag=4) return xml_dom.data return verse_text.strip() def _parse_raw(self, raw_text, strongs=False, morph=False): """ Parse raw verse text and return a formated version. """ # A hack to make the raw text parse as xml. xml_text = '''<?xml version="1.0"?> <root xmlns="%s"> %s </root>''' # It works now we can parse the xml dom. try: parsed_xml = parseString(xml_text % ('verse', raw_text)) parsed_str = self._parse_xml(parsed_xml, strongs, morph) except Exception as err: print('Error %s while processing %s.\n' % (err, raw_text), file=sys.stderr) parsed_str = raw_text # Make all the spacing correct. fixed_str = self._fix_end_regx.sub('\\1', parsed_str) fixed_str = self._fix_space_regx.sub('\\1 ', fixed_str) fixed_str = self._fix_start_tag_regx.sub('\\1', fixed_str) fixed_str = self._fix_end_tag_regx.sub('\\1', fixed_str) return fixed_str.replace('\n', '') class RawDict(object): """ Parse raw verse text into a dictionary so it can easly be found out how words are translated and how Strong's numbers are used. """ def __init__(self, reference_iter, module='KJV'): """ Initialize the sword module. """ # This doesn't matter. markup = Sword.MarkupFilterMgr(Sword.FMT_PLAIN) # We don't own this or it will segfault. markup.thisown = False self._library = Sword.SWMgr(markup) self._module = self._library.getModule(module) self._key = self._module.getKey() self._ref_iter = reference_iter self._fix_space_regx = re.compile(r'([^\.:\?!])\s+') self._fix_end_regx = re.compile(r'\s+([\.:\?!,;])') self._remove_tag_regx = re.compile(r'(<i>\s?|\s?</i>)') self._fix_start_tag_regx = re.compile(r'(<i>)\s*') self._fix_end_tag_regx = re.compile(r'\s*(</i>)') def next(self): """ Returns the next verse reference and text. """ return self.__next__() def __next__(self): """ Returns a tuple of the next verse reference and text. """ # Retrieve the next reference. verse_ref = next(self._ref_iter) self._key.setText(verse_ref) # Set the verse and render the text. verse_dict = self.get_dict(verse_ref) return (verse_ref, verse_dict) def __iter__(self): """ Returns an iterator of self. """ return self def get_dict(self, verse_reference): """ Lookup the verse reference in the sword module specified and return a dictionary from it. """ self._key.setText(verse_reference) raw_text = self._module.getRawEntry() return self._get_parsed_dict(raw_text, True, True) def _raw_to_dict(self, xml_dom, strongs=False, morph=False): """ Recursively parse all the childNodes in a xml minidom, and build a dictionary to use for telling what strongs numbers go to what words and vise versa. """ # The dictionary that will hold the verse. verse_dict = defaultdict(list) verse_dict['_words'].append(defaultdict(list)) # Recursively build the text from all the child nodes. child_s = '' # The string that will hold the verse. verse_text = '' # The name of the current tag. name = xml_dom.localName if xml_dom.localName else '' # Build up the dictionary and verse text from the child nodes. for node in xml_dom.childNodes: child_s, child_d = self._raw_to_dict(node, strongs, morph) if 'divine' in name.lower(): # Uppercase 'LORD's in the text. verse_text += ' %s' % child_s.upper() else: verse_text += ' %s' % child_s for key, value in child_d.items(): # Cleanup the items in the dictionary. if value and not isinstance(value[0], dict): new_list = set(value).union(verse_dict[key]) else: new_list = value if key == '_words': # Update the words dictionary. for words, lst in value[0].items(): new_list = filter(any, lst) verse_dict['_words'][0][words].extend(new_list) else: # Make sure all items in the list are not None. verse_dict[key].extend(filter(any, new_list)) if xml_dom.attributes: attr_dict = dict(xml_dom.attributes.items()) # Cleanup and format the verse text. verse_text = self._fix_end_regx.sub('\\1', verse_text) verse_text = self._fix_space_regx.sub('\\1 ', verse_text) verse_text = self._fix_start_tag_regx.sub('\\1', verse_text) verse_text = self._fix_end_tag_regx.sub('\\1', verse_text) verse_text = verse_text.replace('\n', '') # Text clean of all italic tags. clean_text = self._remove_tag_regx.sub('', verse_text) italic_str = '%s' # Dictionary to hold Strong's and Morphological attributes. attrib_dict = defaultdict(list) strongs_str = morph_str = '' for key, value in attr_dict.items(): # Check for strongs. if 'lemma' in key.lower(): for num in value.split(): # Get the number. num = num.split(':')[1] attrib_dict['strongs'].append(num) # Associate the text with the number. verse_dict[num].append(clean_text.strip()) if strongs: strongs_str += ' <%s> ' % num # Cleanup the attribute dictionary. attrib_dict['strongs'] = list(set(attrib_dict['strongs'])) # Check for morphology. elif 'morph' in key.lower(): for tag in value.split(): # Get the tag. tag = tag.split(':')[1] attrib_dict['morph'].append(tag) # Associate the text with the tag. verse_dict[tag].append(clean_text.strip()) if morph: morph_str += ' {%s} ' % tag # Cleanup the attribute dictionary. attrib_dict['morph'] = list(set(attrib_dict['morph'])) if attrib_dict: # Associate the numbers and tags with the text. verse_dict['_words'][0][clean_text.strip()].append(attrib_dict) elif 'type' in attr_dict or 'subType' in attr_dict: _sub_type = attr_dict.get('subType', '') _type = attr_dict.get('type', _sub_type) if _type.lower() == 'x-p' or 'marker' in attr_dict: # Get any paragraph marker. verse_dict['_x-p'].append(attr_dict['marker'].strip()) elif 'study' in _type.lower() or 'note' in name.lower(): verse_dict['_notes'].append(verse_text.strip()) if 'added' in _type.lower() or 'added' in _sub_type.lower(): if 'marker' not in attr_dict: # Italicize any added text. italic_str = '<i>%s</i>' verse_dict['_added'].append(verse_text.strip()) elif 'section' in _type.lower() or \ 'preverse' in _sub_type.lower(): # Add the preverse heading. verse_dict['_preverse'].append(verse_text.strip()) else: # Don't include unwanted tags (e.g. strongs markup and # notes) in the text. verse_text = '' elif 'xmlns' in attr_dict: verse_text = verse_text.strip() # Include the entire verse text in the dictionary. verse_dict['_%s' % attr_dict['xmlns']].append(verse_text) # Build up the verse string. temp_str = '%s%s%s' % (verse_text, strongs_str, morph_str) verse_text = italic_str % temp_str if hasattr(xml_dom, 'data'): return xml_dom.data, verse_dict return verse_text, verse_dict def _get_parsed_dict(self, raw_text, strongs=False, morph=False): """ Parse raw verse text and return a formated version. """ info_print(raw_text, tag=31) # A hack to make the raw text parse as xml. xml_text = '''<?xml version="1.0"?> <root xmlns="%s"> %s </root>''' % ('verse_text', raw_text) # It works now we can parse the xml dom. try: parsed_xml = parseString(xml_text) return self._raw_to_dict(parsed_xml, strongs, morph) except Exception as err: info_print('Error %s while processing %s.\n' % (err, raw_text), tag=31) return raw_text, {'_verse_text': [raw_text], '_words': [defaultdict(list)]} class VerseIter(object): """ Iterator of verse references. """ def __init__(self, start, end='Revelation of John 22:21'): """ Setup the start and end references of the range. """ # Make sure the range is in order. start, end = sorted([start, end], key=sort_key) self._verse = Sword.VerseKey(start, end) self._end_ref = self._verse.getUpperBound().getText() self._verse_ref = '' def __next__(self): """ Returns the next verse reference. """ # End the iteration when we reach the end of the range. if self._verse_ref == self._end_ref: raise StopIteration() # Get the current verse reference. self._verse_ref = self._verse.getText() # Load the next verse in the range. self._verse.increment() # Return only the reference. return self._verse_ref def __iter__(self): """ Returns an iterator of self. """ return self def next(self): """ Returns the next verse reference. """ return self.__next__() class ChapterIter(VerseIter): """ Iterates over just one chapter. """ def __init__(self, book='Genesis', chapter=1): """ Setup iterator. """ start = Sword.VerseKey('%s %s:1' % (book, chapter)) end = Sword.VerseKey(start.clone()) end.setVerse(end.getVerseMax()) super(ChapterIter, self).__init__(start.getText(), end.getText()) class BookIter(VerseIter): """ Iterates over just one book. """ def __init__(self, book='Genesis'): """ Setup iterator. """ start = Sword.VerseKey('%s 1:1' % book) end = Sword.VerseKey(start.clone()) end.setChapter(end.getChapterMax()) end.setVerse(end.getVerseMax()) super(BookIter, self).__init__(start.getText(), end.getText()) class IndexBible(object): """ Index the bible by Strong's Numbers, Morphological Tags, and words. """ def __init__(self, module='KJV', path=''): """ Initialize the index dicts. """ self._module_name = module self._path = path if path else INDEX_PATH # Remove morphological and strongs information. self._cleanup_regx = re.compile(r'\s*(<([GH]\d*)>|\{([A-Z\d-]*)\})') # Note removal regular expression. self._remove_notes_regex = re.compile(r'\s?<n>\s?(.*?)\s?</n>', re.S) self._remove_tags_regex = re.compile(r'<[/]?[pin]>') self._non_alnum_regx = re.compile(r'\W') self._fix_regx = re.compile(r'\s+') self._strongs_regx = re.compile(r'\s<([GH]\d+)>', re.I) self._morph_regx = re.compile(r'\s\{([\w-]+)\}', re.I) self._module_dict = defaultdict(list) # lower_case is used to store lower_case words case sensitive # counterpart. _Words_ is for easy key lookup for partial words. self._words_set = set() self._strongs_set = set() self._morph_set = set() self._module_dict.update({'lower_case': defaultdict(list)}) self._index_dict = { '%s_index_i' % self._module_name: self._module_dict } self._index_built = False def _book_gen(self): """ A Generator function that yields book names in order. """ # Yield a list of all the book names in the bible. verse_key = Sword.VerseKey('Genesis 1:1') for testament in [1, 2]: for book in range(1, verse_key.bookCount(testament) + 1): yield(verse_key.bookName(testament, book)) def _index_strongs(self, verse_ref, verse_text): """ Update the modules strongs dictionary from the verse text. """ strongs_list = set(self._strongs_regx.findall(verse_text)) for strongs_num in strongs_list: self._strongs_set.add(strongs_num) self._module_dict[strongs_num].append(verse_ref) def _index_morph(self, verse_ref, verse_text): """ Update the modules mophological dictionary from the verse text. """ morph_list = set(self._morph_regx.findall(verse_text)) for morph_num in morph_list: self._morph_set.add(morph_num) self._module_dict[morph_num].append(verse_ref) def _index_words(self, verse_ref, verse_text): """ Update the modules word dictionary from the verse text. """ # Remove all the morphological and strongs stuff. clean_text = self._cleanup_regx.sub('', verse_text) # Remove any non-alpha-numeric stuff. clean_text = self._non_alnum_regx.sub(' ', clean_text) # Replace runs of one or more spaces with just a single space. clean_text = self._fix_regx.sub(' ', clean_text).strip() # Remove the strongs and morphological stuff in such a way that # split words are still split (i.e. where in, instead of wherein). # So there are split versions and non-split versions just to be sure # that the correct one is in there. verse_text = self._strongs_regx.sub('', verse_text) verse_text = self._morph_regx.sub('', verse_text) # Strip out all unicode so we can search correctly. verse_text = verse_text.encode('ascii', 'ignore') verse_text = verse_text.decode('ascii', 'ignore') verse_text = self._non_alnum_regx.sub(' ', verse_text) verse_text = self._fix_regx.sub(' ', verse_text).strip() # Include the capitalized words for case sensitive search. word_set = set(verse_text.split()) word_set.update(set(clean_text.split())) for word in word_set: if word: self._words_set.add(word) self._module_dict[word].append(verse_ref) l_word = word.lower() if l_word != word: # Map the lowercase word to the regular word for case # insensitive searches. if word not in self._module_dict['lower_case'][l_word]: self._module_dict['lower_case'][l_word].append(word) def _index_book(self, book_name="Genesis"): """ Creates indexes for strongs, morphology and words. """ book_iter = BookIter(book_name) verse_iter = VerseTextIter(book_iter, True, True, self._module_name, render='render_raw') for verse_ref, verse_text in verse_iter: info_print('\033[%dD\033[KIndexing...%s' % \ (len(verse_ref) + 20, verse_ref), end='') # Put the entire Bible in the index, so we can pull it out # faster. self._module_dict[verse_ref] = verse_text # Remove the notes so we don't search them. verse_text = self._remove_notes_regex.sub('', verse_text) # Remove tags so they don't mess anything up. verse_text = self._remove_tags_regex.sub('', verse_text) # Index everything else. self._index_strongs(verse_ref, verse_text) self._index_morph(verse_ref, verse_text) self._index_words(verse_ref, verse_text) def build_index(self): """ Create index files of the bible for strongs numbers, morphological tags, and case (in)sensitive words. """ info_print("Indexing %s could take a while..." % self._module_name) try: for book in self._book_gen(): self._index_book(book) except: pass self._module_dict['_words_'].extend(self._words_set) self._module_dict['_strongs_'].extend(self._strongs_set) self._module_dict['_morph_'].extend(self._morph_set) info_print('\nDone.') self._index_built = True def write_index(self): """ Write all the index dictionaries to their respective files. If Any of the dictionaries is empty, then build the index. The indexes are just json-ed dictionaries. The keys are the indexed items and the values are the verse references that contain the key. """ if not self._index_built: self.build_index() # Build the index if it's not already built. for name, dic in self._index_dict.items(): info_print("Writing %s.dbm..." % name) # Save as just a plain text file. Has to be loaded all at once, # so it is really slow. #with open(name, 'w') as index_file: #json.dump(dic, index_file, indent=4) #return # Save a dbm database that we can access without loading it all # into memeory so it is fast. dbm_name = '%s/%s.dbm' % (self._path, name) with IndexDbm(dbm_name, 'nf') as index_file: #with open(name, 'r') as i_file: #dic =json.load(i_file) index_file.update(dic)<|fim▁end|>
verse_list = verse_key.parseVerseList(verse_ref_str, 'Genesis 1:1', True,
<|file_name|>buildSoftBenchmark.js<|end_file_name|><|fim▁begin|>/** * node buildHardBenchmark.js ./wiki_benchmark ./soft_benchmark * * nohup node buildSoftBenchmark.js ./soft_benchmark > buildSoftBenchmark.out 2>&1& */ var fs = require('fs'); var csv = require('csv'); //var outputDir = process.argv[2]; var targetBenchmarkDir = process.argv[2]; var surfaceFormsScores = []; var alreadyBenchmarked = {}; var repeatingParagraphs = 0; var nonRepeatingParagraphs = 0; var writtenBenchParagraph = 0; var alreadyBenchmarkedParagraph = 0; function produceSoftBenchmark(paragraphs) { console.log("==============================="); console.log("Writing benchamrk files"); console.log(paragraphs); console.log("==============================="); console.log("==============================="); csv().from(fs.createReadStream(__dirname + '/../../csv/encsv/paragraph.csv'), { delimiter : ',', escape: '"', relax: true }) .on('record', function(csvRow,index){ var paragraphId = csvRow[0]; var paragraphText = csvRow[1]; if (paragraphs[paragraphId] && !alreadyBenchmarked[paragraphs[paragraphId].surfaceForm + "##$##" + paragraphs[paragraphId].entity]) { // if (paragraphId == "344210#32") // console.log("PARAGRAPH!! 344210#32 %j", paragraphs[paragraphId]); fs.writeFileSync(targetBenchmarkDir + "/paragraph__" + paragraphId + "__" + paragraphs[paragraphId].score, paragraphText); fs.writeFileSync(targetBenchmarkDir + "/golden_standard__" + paragraphId, paragraphs[paragraphId].surfaceForm + "\n" + paragraphs[paragraphId].entity); alreadyBenchmarked[paragraphs[paragraphId].surfaceForm + "##$##" + paragraphs[paragraphId].entity] = 1; writtenBenchParagraph++; } else if (paragraphs[paragraphId]) { alreadyBenchmarkedParagraph++; } }) .on('parse_error', function(row){ console.log("Parsing error %j", row); return row.split(',') ; }) .on('end', function(count) { console.log('Read all lines: ' + count); console.log("Repeating paragraphs " + repeatingParagraphs); console.log("Non repeating paragraphs " + nonRepeatingParagraphs); console.log("Written paragraphs " + writtenBenchParagraph); console.log("Already benchmarked paragraphs " + alreadyBenchmarkedParagraph); }).on('error', function(error) { console.log("ERROR: " + error.message); process.exit(1); }); } var processedParagrahs = 0; function collectSoftParagraphs(surfaceFormMap) { console.log("Collecting benchmark paragraphs"); var paragraphs = {}; csv().from(fs.createReadStream(__dirname + '/../../csv/encsv/cleanedSorted/sortBySurfaceForms.csv'), { delimiter : ',', escape: '"', relax: true }) .on('record', function(csvRow,index){ var surfaceForm = csvRow[1]; var entity = csvRow[0].replace("http://en.wikipedia.org/wiki/", ""); var paragraph = csvRow[2]; if ((paragraph == "344210#32")){ //|| (entity == "Great_North_Road_(Great_Britain)")) { console.log("PARAGRAPH 344210#32!!"); console.log("csvRow %j", csvRow); } if (surfaceFormMap[surfaceForm + "###" + entity]) { if (paragraphs[paragraph]) { console.log("Repeating paragraph " + paragraph); repeatingParagraphs++; } else { nonRepeatingParagraphs++; } paragraphs[paragraph] = JSON.parse(JSON.stringify(surfaceFormMap[surfaceForm + "###" + entity])); paragraphs[paragraph].entity = entity; if ((paragraph == "344210#32")){ //|| (entity == "Great_North_Road_(Great_Britain)")) { console.log("PARAGRAPH 344210#32!! Storing..."); console.log(paragraphs[paragraph]); // console.log(paragraphs); // console.log("===$$==="); } } if (((++processedParagrahs) % 50000) == 0) { console.log("Processed mentions: " + processedParagrahs); } }) .on('parse_error', function(row){ console.log("Parsing error %j", row); return row.split(',') ; }) .on('end', function(count) { console.log("Paragraphs collected."); console.log(paragraphs); produceSoftBenchmark(paragraphs); }).on('error', function(error) { console.log("ERROR: " + error.message); process.exit(1); }); } csv().from(fs.createReadStream(__dirname + '/../../csv/encsv/hardSurfaceForms.csv'), { //csv().from(fs.createReadStream(__dirname + '/test/hardSurfaceForm.csv'), { delimiter : ',', escape: '"', relax: true }) .on('record', function(csvRow,index){ if (index > 0) { var surfaceForm = csvRow[0]; var candidates = csvRow[1]; var candidatesArr = candidates.split(";"); var candidatesCounts = []; for (var i in candidatesArr) { if (candidatesArr[i]) { var candidate = candidatesArr[i].split(" "); if(candidate[1]) { candidatesCounts.push({candidate: candidate[0], occurrences: candidate[1].match(/\[(.*)\]/)[1]}); } } } candidatesCounts = candidatesCounts.sort(function(candidateA, candidateB) { var a = parseInt(candidateA.occurrences); var b = parseInt(candidateB.occurrences); if (a < b) return 1; else if (a == b) return 0; else return -1; }); // console.log(candidatesCounts); surfaceFormsScores.push({ surfaceForm: surfaceForm, score: parseFloat(candidatesCounts[0].occurrences) / parseFloat(candidatesCounts[1].occurrences), candidateA: candidatesCounts[0].candidate, candidateB: candidatesCounts[1].candidate }); } }) .on('parse_error', function(row){ console.log("Parsing error %j", row); return row.split(',') ; }) .on('end', function(count) { console.log('Read all lines: ' + count); surfaceFormsScores.sort(function(sfA, sfB) { if (sfA.score > sfB.score) return 1; else if (sfA.score == sfB.score) return 0; else return -1; }); surfaceFormsScores = surfaceFormsScores.slice(0, 199); console.log("Prepared surface forms"); console.log(surfaceFormsScores); var surfaceFormMap = {}; for (var i in surfaceFormsScores) { surfaceFormMap[surfaceFormsScores[i].surfaceForm + "###" + surfaceFormsScores[i].candidateA] = surfaceFormsScores[i]; surfaceFormMap[surfaceFormsScores[i].surfaceForm + "###" + surfaceFormsScores[i].candidateB] = surfaceFormsScores[i]; surfaceFormMap[surfaceFormsScores[i].surfaceForm + "#A#" + surfaceFormsScores[i].candidateA] = surfaceFormsScores[i]; surfaceFormMap[surfaceFormsScores[i].surfaceForm + "#B#" + surfaceFormsScores[i].candidateB] = surfaceFormsScores[i]; }<|fim▁hole|> collectSoftParagraphs(surfaceFormMap); }).on('error', function(error) { console.log("ERROR: " + error.message); process.exit(1); });<|fim▁end|>
// console.log(surfaceFormMap);
<|file_name|>is_vertex_manifold.cpp<|end_file_name|><|fim▁begin|>// This file is part of libigl, a simple c++ geometry processing library. // // Copyright (C) 2015 Alec Jacobson <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla Public License // v. 2.0. If a copy of the MPL was not distributed with this file, You can // obtain one at http://mozilla.org/MPL/2.0/. #include "is_vertex_manifold.h" #include "triangle_triangle_adjacency.h" #include "vertex_triangle_adjacency.h" #include "unique.h" #include <vector> #include <cassert> #include <map> #include <queue> #include <iostream> template <typename DerivedF,typename DerivedB> IGL_INLINE bool igl::is_vertex_manifold( const Eigen::PlainObjectBase<DerivedF>& F, Eigen::PlainObjectBase<DerivedB>& B) { using namespace std; using namespace Eigen; assert(F.cols() == 3 && "F must contain triangles"); typedef typename DerivedF::Scalar Index; typedef typename DerivedF::Index FIndex; const FIndex m = F.rows(); const Index n = F.maxCoeff()+1; vector<vector<vector<FIndex > > > TT; vector<vector<vector<FIndex > > > TTi; triangle_triangle_adjacency(F,TT,TTi); vector<vector<FIndex > > V2F,_1; vertex_triangle_adjacency(n,F,V2F,_1); const auto & check_vertex = [&](const Index v)->bool { vector<FIndex> uV2Fv; { vector<size_t> _1,_2; unique(V2F[v],uV2Fv,_1,_2); } const FIndex one_ring_size = uV2Fv.size(); if(one_ring_size == 0) { return false; } const FIndex g = uV2Fv[0]; queue<Index> Q; Q.push(g); map<FIndex,bool> seen; while(!Q.empty()) { const FIndex f = Q.front(); Q.pop(); if(seen.count(f)==1) { continue; } seen[f] = true; // Face f's neighbor lists opposite opposite each corner for(const auto & c : TT[f])<|fim▁hole|> { bool contains_v = false; for(Index nc = 0;nc<F.cols();nc++) { if(F(n,nc) == v) { contains_v = true; break; } } if(seen.count(n)==0 && contains_v) { Q.push(n); } } } } return one_ring_size == (FIndex) seen.size(); }; // Unreferenced vertices are considered non-manifold B.setConstant(n,1,false); // Loop over all vertices touched by F bool all = true; for(Index v = 0;v<n;v++) { all &= B(v) = check_vertex(v); } return all; } #ifdef IGL_STATIC_LIBRARY template bool igl::is_vertex_manifold<Eigen::Matrix<int, -1, -1, 0, -1, -1>, Eigen::Matrix<int, -1, 1, 0, -1, 1> >(Eigen::PlainObjectBase<Eigen::Matrix<int, -1, -1, 0, -1, -1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, 1, 0, -1, 1> >&); #endif<|fim▁end|>
{ // Each neighbor for(const auto & n : c)
<|file_name|>LogSettings.java<|end_file_name|><|fim▁begin|>/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. */ package com.microsoft.azure.management.monitor; import com.fasterxml.jackson.annotation.JsonProperty; /** * Part of MultiTenantDiagnosticSettings. Specifies the settings for a * particular log. */ public class LogSettings { /** * Name of a Diagnostic Log category for a resource type this setting is * applied to. To obtain the list of Diagnostic Log categories for a * resource, first perform a GET diagnostic settings operation. */ @JsonProperty(value = "category") private String category; /** * a value indicating whether this log is enabled. */ @JsonProperty(value = "enabled", required = true) private boolean enabled; /** * the retention policy for this log. */ @JsonProperty(value = "retentionPolicy")<|fim▁hole|> * * @return the category value */ public String category() { return this.category; } /** * Set the category value. * * @param category the category value to set * @return the LogSettings object itself. */ public LogSettings withCategory(String category) { this.category = category; return this; } /** * Get the enabled value. * * @return the enabled value */ public boolean enabled() { return this.enabled; } /** * Set the enabled value. * * @param enabled the enabled value to set * @return the LogSettings object itself. */ public LogSettings withEnabled(boolean enabled) { this.enabled = enabled; return this; } /** * Get the retentionPolicy value. * * @return the retentionPolicy value */ public RetentionPolicy retentionPolicy() { return this.retentionPolicy; } /** * Set the retentionPolicy value. * * @param retentionPolicy the retentionPolicy value to set * @return the LogSettings object itself. */ public LogSettings withRetentionPolicy(RetentionPolicy retentionPolicy) { this.retentionPolicy = retentionPolicy; return this; } }<|fim▁end|>
private RetentionPolicy retentionPolicy; /** * Get the category value.
<|file_name|>pivot_switcher.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ oyPivotSwitcher.py by Erkan Ozgur Yilmaz (c) 2009 v10.5.17 Description : ------------- A tool for easy animating of switching of pivots Version History : ----------------- v10.5.17 - modifications for Maya 2011 and PyMel 1.0.2 v9.12.25 - removed oyAxialCorrectionGroup script import - moved to new versioning scheme v1.0.1 - setup check: now the objects pivot attributes are checked for safe setup v1.0.0 - initial working version v1.0.0.preAlpha - development version TODO List : ----------- ---------------------------------------------------------------------------- """ __version__ = "10.5.17" import pymel.core as pm from anima.dcc.mayaEnv import auxiliary class PivotSwitcher(object): """A utility class to help dynamically switch pivot positions in maya""" def __init__(self, _object): # the object self._object = auxiliary.get_valid_dag_node(_object) assert isinstance(self._object, pm.nodetypes.Transform) # the data self._futurePivot = pm.nodetypes.Transform self._isSetup = False # read the settings self._read_settings() def _read_settings(self): """reads the settings from the objects pivotData attribute""" # check if the object has pivotData attribute if self._object.hasAttr("pivotData"): # get the future pivot object self._futurePivot = auxiliary.get_valid_dag_node( pm.listConnections(self._object.attr("pivotData.futurePivot"))[0] ) # set isSetup flag to True self._isSetup = True return True return False def _save_settings(self): """save settings inside objects pivotData attribute""" # data to be save : # ----------------- # futurePivot node # create attributes self._create_data_attribute() # connect futurePivot node pm.connectAttr( "%s%s" % (self._futurePivot.name(), ".message"), self._object.attr("pivotData.futurePivot"), f=True, ) def _create_data_attribute(self): """creates attribute in self._object to hold the data""" if not self._object.hasAttr("pivotData"): pm.addAttr(self._object, ln="pivotData", at="compound", nc=1) if not self._object.hasAttr("futurePivot"): pm.addAttr(self._object, ln="futurePivot", at="message", p="pivotData") def _create_future_pivot(self): """creates the futurePivot locator""" if self._isSetup: return # create a locator and move it to the current pivot # parent the locator under the object locator_name = self._object.name() + "_futurePivotLocator#" self._futurePivot = auxiliary.get_valid_dag_node( pm.spaceLocator(n=locator_name) ) pm.parent(self._futurePivot, self._object) current_pivot_pos = pm.xform(self._object, q=True, ws=True, piv=True) pm.xform(self._futurePivot, ws=True, t=current_pivot_pos[0:3]) # change the color self._futurePivot.setAttr("overrideEnabled", 1) self._futurePivot.setAttr("overrideColor", 13) # set translate and visibility to non-keyable self._futurePivot.setAttr("tx", k=False, channelBox=True) self._futurePivot.setAttr("ty", k=False, channelBox=True) self._futurePivot.setAttr("tz", k=False, channelBox=True) self._futurePivot.setAttr("v", k=False, channelBox=True) # lock scale and rotate self._futurePivot.setAttr("rx", lock=True, k=False, channelBox=False) self._futurePivot.setAttr("ry", lock=True, k=False, channelBox=False) self._futurePivot.setAttr("rz", lock=True, k=False, channelBox=False) self._futurePivot.setAttr("sx", lock=True, k=False, channelBox=False) self._futurePivot.setAttr("sy", lock=True, k=False, channelBox=False) self._futurePivot.setAttr("sz", lock=True, k=False, channelBox=False) # hide it self._futurePivot.setAttr("v", 0) def setup(self): """setups specified object for pivot switching""" # if it is setup before, don't do anything if self._isSetup: return if not self.is_good_for_setup(): pm.PopupError( "the objects pivots are connected to something\n" "THE OBJECT CANNOT BE SETUP!!!" ) return # create the parent constraint self._create_future_pivot() # create attributes for data holding self._create_data_attribute() # save the settings self._save_settings() self._isSetup = True def toggle(self): """toggles pivot visibility""" if not self._isSetup: return # toggle the pivot visibility current_vis = self._futurePivot.getAttr("v") current_vis = (current_vis + 1) % 2 self._futurePivot.setAttr("v", current_vis) def switch(self): """switches the pivot to the futurePivot""" if not self._isSetup: return # get the current frame frame = pm.currentTime(q=True) # get the current position of the object current_object_pos = pm.xform(self._object, q=True, ws=True, t=True) current_pivot_pos = pm.xform(self._object, q=True, ws=True, piv=True)<|fim▁hole|> displacement = ( future_pivot_pos[0] - current_pivot_pos[0], future_pivot_pos[1] - current_pivot_pos[1], future_pivot_pos[2] - current_pivot_pos[2], ) # move the pivot to the future_pivot pm.xform(self._object, ws=True, piv=future_pivot_pos[0:3]) # set keyframes pm.setKeyframe(self._object, at="rotatePivotX", t=frame, ott="step") pm.setKeyframe(self._object, at="rotatePivotY", t=frame, ott="step") pm.setKeyframe(self._object, at="rotatePivotZ", t=frame, ott="step") pm.setKeyframe(self._object, at="scalePivotX", t=frame, ott="step") pm.setKeyframe(self._object, at="scalePivotY", t=frame, ott="step") pm.setKeyframe(self._object, at="scalePivotZ", t=frame, ott="step") # set pivot translations self._object.setAttr("rotatePivotTranslate", -1 * displacement) self._object.setAttr("scalePivotTranslate", -1 * displacement) # set keyframes pm.setKeyframe(self._object, at="rotatePivotTranslateX", t=frame, ott="step") pm.setKeyframe(self._object, at="rotatePivotTranslateY", t=frame, ott="step") pm.setKeyframe(self._object, at="rotatePivotTranslateZ", t=frame, ott="step") pm.setKeyframe(self._object, at="scalePivotTranslateX", t=frame, ott="step") pm.setKeyframe(self._object, at="scalePivotTranslateY", t=frame, ott="step") pm.setKeyframe(self._object, at="scalePivotTranslateZ", t=frame, ott="step") def _set_dg_dirty(self): """sets the DG to dirty for _object, currentPivot and futurePivot""" pm.dgdirty(self._object, self._futurePivot) def fix_jump(self): """fixes the jumps after editing the keyframes""" pass def is_good_for_setup(self): """checks if the objects rotatePivot, scalePivot, rotatePivotTranslate and scalePivotTranslate is not connected to anything """ attributes = [ "rotatePivot", "scalePivot", "rotatePivotTranslate", "scalePivotTranslate", ] for attrStr in attributes: connections = self._object.attr(attrStr).connections() if len(connections) > 0: return False return True def get_one_switcher(): """returns a generator that generates a PivotSwitcher object for every transform node in the selection """ for node in pm.ls(sl=True): try: node = auxiliary.get_valid_dag_node(node) if node.type() == "transform": my_pivot_switcher = PivotSwitcher(node) yield my_pivot_switcher except TypeError: pass def setup_pivot(): """setups pivot switching for selected objects""" for piv_switcher in get_one_switcher(): piv_switcher.setup() def switch_pivot(): """switches pivot for selected objects""" for piv_switcher in get_one_switcher(): piv_switcher.switch() def toggle_pivot(): """toggles pivot visibilities for selected objects""" for piv_switcher in get_one_switcher(): piv_switcher.toggle()<|fim▁end|>
future_pivot_pos = pm.xform(self._futurePivot, q=True, ws=True, t=True)
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from google.oauth2 import service_account from ..config import config cred_file = config.get("GoogleCloud", "CredentialPath") credentials = service_account.Credentials.from_service_account_file(cred_file) scoped_credentials = credentials.with_scopes([ 'https://www.googleapis.com/auth/devstorage.read_write'<|fim▁hole|><|fim▁end|>
])
<|file_name|>bigip_device_dns.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: bigip_device_dns short_description: Manage BIG-IP device DNS settings description: - Manage BIG-IP device DNS settings version_added: "2.2" options: cache: description: - Specifies whether the system caches DNS lookups or performs the operation each time a lookup is needed. Please note that this applies only to Access Policy Manager features, such as ACLs, web application rewrites, and authentication. default: disable choices: - enabled - disabled name_servers: description: - A list of name servers that the system uses to validate DNS lookups forwarders: deprecated: Deprecated in 2.4. Use the GUI or edit named.conf. description: - A list of BIND servers that the system can use to perform DNS lookups search: description: - A list of domains that the system searches for local domain lookups, to resolve local host names. ip_version: description: - Specifies whether the DNS specifies IP addresses using IPv4 or IPv6. choices: - 4 - 6 state: description: - The state of the variable on the system. When C(present), guarantees that an existing variable is set to C(value). default: present choices: - absent - present notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. extends_documentation_fragment: f5 requirements: - f5-sdk author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Set the DNS settings on the BIG-IP bigip_device_dns: name_servers: - 208.67.222.222 - 208.67.220.220 search: - localdomain - lab.local password: secret server: lb.mydomain.com user: admin validate_certs: no delegate_to: localhost ''' RETURN = r''' cache: description: The new value of the DNS caching returned: changed type: string sample: enabled name_servers: description: List of name servers that were set returned: changed type: list sample: ['192.0.2.10', '172.17.12.10'] search: description: List of search domains that were set returned: changed type: list sample: ['192.0.2.10', '172.17.12.10'] ip_version: description: IP version that was set that DNS will specify IP addresses in returned: changed type: int sample: 4 warnings: description: The list of warnings (if any) generated by module based on arguments returned: always type: list sample: ['...', '...'] ''' from ansible.module_utils.f5_utils import AnsibleF5Client from ansible.module_utils.f5_utils import AnsibleF5Parameters from ansible.module_utils.f5_utils import HAS_F5SDK from ansible.module_utils.f5_utils import F5ModuleError try: from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False class Parameters(AnsibleF5Parameters): api_map = { 'dhclient.mgmt': 'dhcp', 'dns.cache': 'cache', 'nameServers': 'name_servers', 'include': 'ip_version' } api_attributes = [ 'nameServers', 'search', 'include' ] updatables = [ 'cache', 'name_servers', 'search', 'ip_version' ] returnables = [ 'cache', 'name_servers', 'search', 'ip_version' ] absentables = [ 'name_servers', 'search' ] def to_return(self): result = {} for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result def api_params(self): result = {} for api_attribute in self.api_attributes: if self.api_map is not None and api_attribute in self.api_map: result[api_attribute] = getattr(self, self.api_map[api_attribute]) else: result[api_attribute] = getattr(self, api_attribute) result = self._filter_params(result) return result @property def search(self): result = [] if self._values['search'] is None: return None for server in self._values['search']: result.append(str(server)) return result @property def name_servers(self): result = [] if self._values['name_servers'] is None: return None for server in self._values['name_servers']: result.append(str(server)) return result @property def cache(self): if str(self._values['cache']) in ['enabled', 'enable']: return 'enable' else: return 'disable' @property def dhcp(self): valid = ['enable', 'enabled'] return True if self._values['dhcp'] in valid else False @property def forwarders(self): if self._values['forwarders'] is None: return None else: raise F5ModuleError( "The modifying of forwarders is not supported." ) @property def ip_version(self): if self._values['ip_version'] in [6, '6', 'options inet6']: return "options inet6" elif self._values['ip_version'] in [4, '4', '']: return "" else: return None class ModuleManager(object): def __init__(self, client): self.client = client self.have = None self.want = Parameters(self.client.module.params) self.changes = Parameters() def _update_changed_options(self): changed = {} for key in Parameters.updatables: if getattr(self.want, key) is not None: attr1 = getattr(self.want, key) attr2 = getattr(self.have, key) if attr1 != attr2: changed[key] = attr1 if changed: self.changes = Parameters(changed) return True return False def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.update() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def read_current_from_device(self): want_keys = ['dns.cache'] result = dict() dbs = self.client.api.tm.sys.dbs.get_collection() for db in dbs: if db.name in want_keys: result[db.name] = db.value dns = self.client.api.tm.sys.dns.load() attrs = dns.attrs if 'include' not in attrs: attrs['include'] = 4 result.update(attrs) return Parameters(result) def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.client.check_mode: return True self.update_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def update_on_device(self): params = self.want.api_params() tx = self.client.api.tm.transactions.transaction with BigIpTxContext(tx) as api: cache = api.tm.sys.dbs.db.load(name='dns.cache') dns = api.tm.sys.dns.load() # Empty values can be supplied, but you cannot supply the # None value, so we check for that specifically if self.want.cache is not None: cache.update(value=self.want.cache) if params: dns.update(**params) def _absent_changed_options(self): changed = {} for key in Parameters.absentables: if getattr(self.want, key) is not None: set_want = set(getattr(self.want, key)) set_have = set(getattr(self.have, key)) set_new = set_have - set_want if set_new != set_have: changed[key] = list(set_new) if changed: self.changes = Parameters(changed) return True return False def should_absent(self): result = self._absent_changed_options() if result: return True return False def absent(self): self.have = self.read_current_from_device() if not self.should_absent(): return False if self.client.check_mode: return True self.absent_on_device() return True def absent_on_device(self): params = self.changes.api_params() tx = self.client.api.tm.transactions.transaction with BigIpTxContext(tx) as api: dns = api.tm.sys.dns.load() dns.update(**params) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True self.argument_spec = dict( cache=dict( required=False, choices=['disabled', 'enabled', 'disable', 'enable'], default=None ), name_servers=dict( required=False, default=None, type='list' ), forwarders=dict( required=False,<|fim▁hole|> default=None, type='list' ), search=dict( required=False, default=None, type='list' ), ip_version=dict( required=False, default=None, choices=[4, 6], type='int' ), state=dict( required=False, default='present', choices=['absent', 'present'] ) ) self.required_one_of = [ ['name_servers', 'search', 'forwarders', 'ip_version', 'cache'] ] self.f5_product_name = 'bigip' def main(): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") spec = ArgumentSpec() client = AnsibleF5Client( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, f5_product_name=spec.f5_product_name, required_one_of=spec.required_one_of ) try: mm = ModuleManager(client) results = mm.exec_module() client.module.exit_json(**results) except F5ModuleError as e: client.module.fail_json(msg=str(e)) if __name__ == '__main__': main()<|fim▁end|>
<|file_name|>retinanet_model.py<|end_file_name|><|fim▁begin|># Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model defination for the RetinaNet Model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from absl import logging import tensorflow.compat.v2 as tf from tensorflow.python.keras import backend from official.vision.detection.dataloader import mode_keys from official.vision.detection.evaluation import factory as eval_factory from official.vision.detection.modeling import base_model from official.vision.detection.modeling import losses from official.vision.detection.modeling.architecture import factory from official.vision.detection.ops import postprocess_ops class RetinanetModel(base_model.Model): """RetinaNet model function.""" def __init__(self, params): super(RetinanetModel, self).__init__(params) # For eval metrics. self._params = params # Architecture generators. self._backbone_fn = factory.backbone_generator(params) self._fpn_fn = factory.multilevel_features_generator(params) self._head_fn = factory.retinanet_head_generator(params.retinanet_head) # Loss function. self._cls_loss_fn = losses.RetinanetClassLoss(params.retinanet_loss) self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss) self._box_loss_weight = params.retinanet_loss.box_loss_weight self._keras_model = None # Predict function. self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator( params.postprocess) self._transpose_input = params.train.transpose_input assert not self._transpose_input, 'Transpose input is not supportted.' # Input layer. input_shape = ( params.retinanet_parser.output_size + [params.retinanet_parser.num_channels]) self._input_layer = tf.keras.layers.Input( shape=input_shape, name='', dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32) def build_outputs(self, inputs, mode): # If the input image is transposed (from NHWC to HWCN), we need to revert it # back to the original shape before it's used in the computation. if self._transpose_input: inputs = tf.transpose(inputs, [3, 0, 1, 2]) backbone_features = self._backbone_fn( inputs, is_training=(mode == mode_keys.TRAIN)) fpn_features = self._fpn_fn( backbone_features, is_training=(mode == mode_keys.TRAIN)) cls_outputs, box_outputs = self._head_fn( fpn_features, is_training=(mode == mode_keys.TRAIN)) if self._use_bfloat16: levels = cls_outputs.keys() for level in levels: cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32) box_outputs[level] = tf.cast(box_outputs[level], tf.float32) model_outputs = { 'cls_outputs': cls_outputs, 'box_outputs': box_outputs, } return model_outputs def build_loss_fn(self): if self._keras_model is None: raise ValueError('build_loss_fn() must be called after build_model().') filter_fn = self.make_filter_trainable_variables_fn() trainable_variables = filter_fn(self._keras_model.trainable_variables) def _total_loss_fn(labels, outputs): cls_loss = self._cls_loss_fn(outputs['cls_outputs'],<|fim▁hole|> labels['box_targets'], labels['num_positives']) model_loss = cls_loss + self._box_loss_weight * box_loss l2_regularization_loss = self.weight_decay_loss(self._l2_weight_decay, trainable_variables) total_loss = model_loss + l2_regularization_loss return { 'total_loss': total_loss, 'cls_loss': cls_loss, 'box_loss': box_loss, 'model_loss': model_loss, 'l2_regularization_loss': l2_regularization_loss, } return _total_loss_fn def build_model(self, params, mode=None): if self._keras_model is None: with backend.get_graph().as_default(): outputs = self.model_outputs(self._input_layer, mode) model = tf.keras.models.Model( inputs=self._input_layer, outputs=outputs, name='retinanet') assert model is not None, 'Fail to build tf.keras.Model.' model.optimizer = self.build_optimizer() self._keras_model = model return self._keras_model def post_processing(self, labels, outputs): # TODO(yeqing): Moves the output related part into build_outputs. required_output_fields = ['cls_outputs', 'box_outputs'] for field in required_output_fields: if field not in outputs: raise ValueError('"%s" is missing in outputs, requried %s found %s', field, required_output_fields, outputs.keys()) required_label_fields = ['image_info', 'groundtruths'] for field in required_label_fields: if field not in labels: raise ValueError('"%s" is missing in outputs, requried %s found %s', field, required_label_fields, labels.keys()) boxes, scores, classes, valid_detections = self._generate_detections_fn( outputs['box_outputs'], outputs['cls_outputs'], labels['anchor_boxes'], labels['image_info'][:, 1:2, :]) # Discards the old output tensors to save memory. The `cls_outputs` and # `box_outputs` are pretty big and could potentiall lead to memory issue. outputs = { 'source_id': labels['groundtruths']['source_id'], 'image_info': labels['image_info'], 'num_detections': valid_detections, 'detection_boxes': boxes, 'detection_classes': classes, 'detection_scores': scores, } if 'groundtruths' in labels: labels['source_id'] = labels['groundtruths']['source_id'] labels['boxes'] = labels['groundtruths']['boxes'] labels['classes'] = labels['groundtruths']['classes'] labels['areas'] = labels['groundtruths']['areas'] labels['is_crowds'] = labels['groundtruths']['is_crowds'] return labels, outputs def eval_metrics(self): return eval_factory.evaluator_generator(self._params.eval)<|fim▁end|>
labels['cls_targets'], labels['num_positives']) box_loss = self._box_loss_fn(outputs['box_outputs'],
<|file_name|>event_confirm_registration.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import fields, osv from tools.translate import _ class event_confirm_registration(osv.osv_memory): """ Confirm Event Registration """ _name = "event.confirm.registration" _description = "Confirmation for Event Registration" _columns = { 'msg': fields.text('Message', readonly=True), } _defaults = { 'msg': 'The event limit is reached. What do you want to do?' } <|fim▁hole|> def default_get(self, cr, uid, fields, context=None): """ This function gets default values """ if context is None: context = {} registration_pool = self.pool.get('event.registration') registration_ids = context.get('registration_ids', []) res = super(event_confirm_registration, self).default_get(cr, uid, fields, context=context) msg = "" overlimit_event_ids = [] for registration in registration_pool.browse(cr, uid, registration_ids, context=context): register_max = registration.event_id.register_max if registration.event_id.id not in overlimit_event_ids: overlimit_event_ids.append(registration.event_id.id) msg += _("Warning: The Event '%s' has reached its Maximum Limit (%s).") \ %(registration.event_id.name, register_max) if 'msg' in fields: res.update({'msg': msg}) return res def confirm(self, cr, uid, ids, context=None): if context is None: context = {} registration_pool = self.pool.get('event.registration') registration_ids = context.get('registration_ids', []) registration_pool.do_open(cr, uid, registration_ids, context=context) return {'type': 'ir.actions.act_window_close'} event_confirm_registration() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:<|fim▁end|>
<|file_name|>syslog_plugin.go<|end_file_name|><|fim▁begin|>package command import ( "os" "strings" "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/client/driver" ) <|fim▁hole|>type SyslogPluginCommand struct { Meta } func (e *SyslogPluginCommand) Help() string { helpText := ` This is a command used by Nomad internally to launch a syslog collector" ` return strings.TrimSpace(helpText) } func (s *SyslogPluginCommand) Synopsis() string { return "internal - lanch a syslog collector plugin" } func (s *SyslogPluginCommand) Run(args []string) int { if len(args) == 0 { s.Ui.Error("log output file isn't provided") } logFileName := args[0] stdo, err := os.OpenFile(logFileName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666) if err != nil { s.Ui.Error(err.Error()) return 1 } plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: driver.HandshakeConfig, Plugins: driver.GetPluginMap(stdo), }) return 0 }<|fim▁end|>
<|file_name|>UserDirBlock.py<|end_file_name|><|fim▁begin|>import time from Block import Block from ..ProtectFlags import ProtectFlags class UserDirBlock(Block): def __init__(self, blkdev, blk_num): Block.__init__(self, blkdev, blk_num, is_type=Block.T_SHORT, is_sub_type=Block.ST_USERDIR) def set(self, data): self._set_data(data)<|fim▁hole|> def read(self): self._read_data() self._read() def _read(self): Block.read(self) if not self.valid: return False # UserDir fields self.own_key = self._get_long(1) self.protect = self._get_long(-48) self.comment = self._get_bstr(-46, 79) self.mod_ts = self._get_timestamp(-23) self.name = self._get_bstr(-20, 30) self.hash_chain = self._get_long(-4) self.parent = self._get_long(-3) self.extension = self._get_long(-2) # hash table of entries self.hash_table = [] self.hash_size = self.blkdev.block_longs - 56 for i in xrange(self.hash_size): self.hash_table.append(self._get_long(6+i)) self.valid = (self.own_key == self.blk_num) return self.valid def create(self, parent, name, protect=0, comment=None, mod_ts=None, hash_chain=0, extension=0): Block.create(self) self.own_key = self.blk_num self.protect = protect if comment == None: self.comment = '' else: self.comment = comment # timestamps self.mod_ts = mod_ts self.name = name self.hash_chain = hash_chain self.parent = parent self.extension = extension # empty hash table self.hash_table = [] self.hash_size = self.blkdev.block_longs - 56 for i in xrange(self.hash_size): self.hash_table.append(0) self.valid = True return True def write(self): Block._create_data(self) self._put_long(1, self.own_key) self._put_long(-48, self.protect) self._put_bstr(-46, 79, self.comment) self._put_timestamp(-23, self.mod_ts) self._put_bstr(-20, 30, self.name) self._put_long(-4, self.hash_chain) self._put_long(-3, self.parent) self._put_long(-2, self.extension) # hash table for i in xrange(self.hash_size): self._put_long(6+i, self.hash_table[i]) Block.write(self) def dump(self): Block.dump(self,"UserDir") print " own_key: %d" % (self.own_key) pf = ProtectFlags(self.protect) print " protect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf) print " comment: '%s'" % self.comment print " mod_ts: %s" % self.mod_ts print " name: '%s'" % self.name print " hash_chain: %d" % self.hash_chain print " parent: %d" % self.parent print " extension: %s" % self.extension<|fim▁end|>
self._read()
<|file_name|>CountsTest.java<|end_file_name|><|fim▁begin|>package com.jgrillo.wordcount.api; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.collect.ImmutableMap; import org.junit.Test; import java.io.IOException; import java.util.List; import java.util.stream.Collectors; import static org.quicktheories.QuickTheory.qt; import static org.quicktheories.generators.SourceDSL.*; import static io.dropwizard.testing.FixtureHelpers.*; import static org.assertj.core.api.Assertions.assertThat; public final class CountsTest { private static final ObjectMapper mapper = new ObjectMapper() .disable(SerializationFeature.CLOSE_CLOSEABLE) .disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); private static final ObjectWriter writer = mapper.writerFor(Counts.class); private static final ObjectReader reader = mapper.readerFor(Counts.class); private static final JsonFactory factory = mapper.getFactory(); /** * Test the encode-decode invariant for the Counts model. */ @Test public void testCountsEncodeDecode() throws Exception { qt().forAll( lists().of(strings().allPossible().ofLengthBetween(0, 100)).ofSize(1000).describedAs( Object::toString ), lists().of(longs().all()).ofSize(1000).describedAs(Object::toString) ).as((words, counts) -> { final ImmutableMap.Builder<String, Long> mapBuilder = ImmutableMap.builder(); final List<String> distinctWords = words.stream().distinct().collect(Collectors.toList()); for (int i = 0; i < distinctWords.size(); i++) {<|fim▁hole|> return mapBuilder.build(); }).checkAssert((wordCounts) -> { try { final byte[] bytes = writer.writeValueAsBytes(new Counts(wordCounts)); final JsonParser parser = factory.createParser(bytes); final Counts countsModel = reader.readValue(parser); assertThat(countsModel.getCounts()).isEqualTo(wordCounts); } catch (IOException e) { throw new RuntimeException("Caught IOE while checking counts", e); } }); } @Test public void testCountsSerializesToJSON() throws Exception { final Counts counts = new Counts( ImmutableMap.<String, Long>builder() .put("word", 3L) .put("wat", 1L) .build() ); final String expected = writer.writeValueAsString(reader.readValue(fixture("fixtures/counts.json"))); assertThat(writer.writeValueAsString(counts)).isEqualTo(expected); } @Test public void testCountsDeserializesFromJSON() throws Exception { final Counts counts = new Counts( ImmutableMap.<String, Long>builder() .put("word", 3L) .put("wat", 1L) .build() ); final Counts deserializedCounts = reader.readValue(fixture("fixtures/counts.json")); assertThat(deserializedCounts.getCounts()).isEqualTo(counts.getCounts()); } }<|fim▁end|>
mapBuilder.put(distinctWords.get(i), counts.get(i)); // counts.size() >= distinctWords.size() }
<|file_name|>CoreClientOverOneWaySSLKerb5Test.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.artemis.tests.integration.ssl; import org.apache.activemq.artemis.api.core.ActiveMQSecurityException; import org.apache.activemq.artemis.api.core.QueueConfiguration; import org.apache.activemq.artemis.api.core.RoutingType; import org.apache.activemq.artemis.api.core.SimpleString; import org.apache.activemq.artemis.api.core.TransportConfiguration; import org.apache.activemq.artemis.api.core.client.ActiveMQClient; import org.apache.activemq.artemis.api.core.client.ClientConsumer; import org.apache.activemq.artemis.api.core.client.ClientMessage; import org.apache.activemq.artemis.api.core.client.ClientProducer; import org.apache.activemq.artemis.api.core.client.ClientSession; import org.apache.activemq.artemis.api.core.client.ClientSessionFactory; import org.apache.activemq.artemis.api.core.client.ServerLocator; import org.apache.activemq.artemis.core.config.impl.ConfigurationImpl; import org.apache.activemq.artemis.core.remoting.impl.netty.TransportConstants; import org.apache.activemq.artemis.core.security.Role; import org.apache.activemq.artemis.core.server.ActiveMQServer; import org.apache.activemq.artemis.core.settings.HierarchicalRepository; import org.apache.activemq.artemis.spi.core.security.ActiveMQJAASSecurityManager; import org.apache.activemq.artemis.core.server.ActiveMQServers; import org.apache.activemq.artemis.spi.core.security.ActiveMQSecurityManager; import org.apache.activemq.artemis.tests.util.ActiveMQTestBase; import org.apache.activemq.artemis.utils.RandomUtil; import org.apache.activemq.artemis.utils.RetryRule; import org.apache.hadoop.minikdc.MiniKdc; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import java.io.File; import java.lang.management.ManagementFactory; import java.net.URL; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; public class CoreClientOverOneWaySSLKerb5Test extends ActiveMQTestBase { @Rule public RetryRule retryRule = new RetryRule(2); public static final SimpleString QUEUE = new SimpleString("QueueOverKrb5SSL"); public static final String CLIENT_PRINCIPAL = "client"; public static final String SNI_HOST = "sni.host"; public static final String SERVICE_PRINCIPAL = "host/" + SNI_HOST; static { String path = System.getProperty("java.security.auth.login.config"); if (path == null) { URL resource = CoreClientOverOneWaySSLKerb5Test.class.getClassLoader().getResource("login.config"); if (resource != null) { path = resource.getFile(); System.setProperty("java.security.auth.login.config", path); } } } private MiniKdc kdc; private ActiveMQServer server; private TransportConfiguration tc; private TransportConfiguration inVMTc; private String userPrincipal; @Test public void testOneWaySSLWithGoodClientCipherSuite() throws Exception { // hard coded match, default_keytab_name in minikdc-krb5.conf template File userKeyTab = new File("target/test.krb5.keytab"); kdc.createPrincipal(userKeyTab, CLIENT_PRINCIPAL, SERVICE_PRINCIPAL); createCustomSslServer(); tc.getParams().put(TransportConstants.SSL_ENABLED_PROP_NAME, true); tc.getParams().put(TransportConstants.ENABLED_CIPHER_SUITES_PROP_NAME, getSuitableCipherSuite()); tc.getParams().put(TransportConstants.SNIHOST_PROP_NAME, SNI_HOST); // static service name rather than dynamic machine name tc.getParams().put(TransportConstants.SSL_KRB5_CONFIG_PROP_NAME, "core-tls-krb5-client"); final ServerLocator locator = addServerLocator(ActiveMQClient.createServerLocatorWithoutHA(tc)); ClientSessionFactory sf = null; try { sf = createSessionFactory(locator); ClientSession session = sf.createSession(false, true, true); session.createQueue(new QueueConfiguration(CoreClientOverOneWaySSLKerb5Test.QUEUE).setRoutingType(RoutingType.ANYCAST)); ClientProducer producer = session.createProducer(CoreClientOverOneWaySSLKerb5Test.QUEUE); final String text = RandomUtil.randomString(); ClientMessage message = createTextMessage(session, text); producer.send(message); ClientConsumer consumer = session.createConsumer(CoreClientOverOneWaySSLKerb5Test.QUEUE); session.start(); ClientMessage m = consumer.receive(1000); Assert.assertNotNull(m); Assert.assertEquals(text, m.getReadOnlyBodyBuffer().readString()); System.err.println("m:" + m + ", user:" + m.getValidatedUserID()); Assert.assertNotNull("got validated user", m.getValidatedUserID()); Assert.assertTrue("krb id in validated user", m.getValidatedUserID().contains(CLIENT_PRINCIPAL)); } finally { if (sf != null) { sf.close(); } locator.close(); } // validate only ssl creds work, try and fake the principal w/o ssl final ServerLocator inVmLocator = addServerLocator(ActiveMQClient.createServerLocatorWithoutHA(inVMTc)); ClientSessionFactory inVmSf = null; try { inVmSf = createSessionFactory(inVmLocator); inVmSf.createSession(userPrincipal, "", false, false, false, false, 10); fail("supposed to throw exception"); } catch (ActiveMQSecurityException e) { // expected } finally { if (inVmSf != null) { inVmSf.close(); } inVmLocator.close(); } } <|fim▁hole|> // Package protected --------------------------------------------- @Override @Before public void setUp() throws Exception { super.setUp(); kdc = new MiniKdc(MiniKdc.createConf(), temporaryFolder.newFolder("kdc")); kdc.start(); } @Override @After public void tearDown() throws Exception { try { kdc.stop(); } finally { super.tearDown(); } } private void createCustomSslServer() throws Exception { Map<String, Object> params = new HashMap<>(); params.put(TransportConstants.SSL_ENABLED_PROP_NAME, true); params.put(TransportConstants.ENABLED_CIPHER_SUITES_PROP_NAME, getSuitableCipherSuite()); params.put(TransportConstants.SSL_KRB5_CONFIG_PROP_NAME, "core-tls-krb5-server"); ConfigurationImpl config = createBasicConfig().addAcceptorConfiguration(new TransportConfiguration(NETTY_ACCEPTOR_FACTORY, params, "nettySSL")); config.setPopulateValidatedUser(true); // so we can verify the kerb5 id is present config.setSecurityEnabled(true); config.addAcceptorConfiguration(new TransportConfiguration(INVM_ACCEPTOR_FACTORY)); ActiveMQSecurityManager securityManager = new ActiveMQJAASSecurityManager("Krb5Plus"); server = addServer(ActiveMQServers.newActiveMQServer(config, ManagementFactory.getPlatformMBeanServer(), securityManager, false)); HierarchicalRepository<Set<Role>> securityRepository = server.getSecurityRepository(); final String roleName = "ALLOW_ALL"; Role role = new Role(roleName, true, true, true, true, true, true, true, true, true, true); Set<Role> roles = new HashSet<>(); roles.add(role); securityRepository.addMatch(QUEUE.toString(), roles); server.start(); waitForServerToStart(server); // note kerberos user does not exist on the broker save as a role member in dual-authentication-roles.properties userPrincipal = CLIENT_PRINCIPAL + "@" + kdc.getRealm(); tc = new TransportConfiguration(NETTY_CONNECTOR_FACTORY); inVMTc = new TransportConfiguration(INVM_CONNECTOR_FACTORY); } }<|fim▁end|>
public String getSuitableCipherSuite() throws Exception { return "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"; }
<|file_name|>SwatchesColor.js<|end_file_name|><|fim▁begin|>import React from 'react'; import reactCSS from 'reactcss'; import { Swatch } from 'react-color/lib/components/common'; export const SwatchesColor = ({ color, onClick = () => {}, onSwatchHover, first, last, active, }) => { const styles = reactCSS( { default: { color: { width: '40px', height: '24px', cursor: 'pointer', background: color,<|fim▁hole|> fill: '#fff', marginLeft: '8px', display: 'none', }, }, first: { color: { overflow: 'hidden', borderRadius: '2px 2px 0 0', }, }, last: { color: { overflow: 'hidden', borderRadius: '0 0 2px 2px', }, }, active: { check: { display: 'block', }, }, 'color-#FFFFFF': { color: { boxShadow: 'inset 0 0 0 1px #ddd', }, check: { fill: '#333', }, }, transparent: { check: { fill: '#333', }, }, }, { first, last, active, 'color-#FFFFFF': color === '#FFFFFF', transparent: color === 'transparent', }, ); return ( <Swatch color={color} style={styles.color} onClick={onClick} onHover={onSwatchHover} focusStyle={{ boxShadow: `0 0 4px ${color}` }} > <div style={styles.check}> <svg style={{ width: '24px', height: '24px' }} viewBox="0 0 24 24"> <path d="M21,7L9,19L3.5,13.5L4.91,12.09L9,16.17L19.59,5.59L21,7Z" /> </svg> </div> </Swatch> ); }; export default SwatchesColor;<|fim▁end|>
marginBottom: '1px', }, check: {
<|file_name|>properties_data_metaball.py<|end_file_name|><|fim▁begin|># ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> import bpy from rna_prop_ui import PropertyPanel class DataButtonsPanel(): bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = "data" @classmethod def poll(cls, context): return context.meta_ball class DATA_PT_context_metaball(DataButtonsPanel, bpy.types.Panel): bl_label = "" bl_options = {'HIDE_HEADER'} def draw(self, context): layout = self.layout ob = context.object mball = context.meta_ball space = context.space_data if ob: layout.template_ID(ob, "data", unlink="None") elif mball: layout.template_ID(space, "pin_id", unlink="None") class DATA_PT_metaball(DataButtonsPanel, bpy.types.Panel): bl_label = "Metaball" def draw(self, context): layout = self.layout mball = context.meta_ball split = layout.split() col = split.column() col.label(text="Resolution:") sub = col.column(align=True) sub.prop(mball, "resolution", text="View") sub.prop(mball, "render_resolution", text="Render") col = split.column() col.label(text="Settings:") col.prop(mball, "threshold", text="Threshold") layout.label(text="Update:") layout.prop(mball, "update_method", expand=True) class DATA_PT_metaball_element(DataButtonsPanel, bpy.types.Panel): bl_label = "Active Element" @classmethod def poll(cls, context): return (context.meta_ball and context.meta_ball.elements.active) def draw(self, context): layout = self.layout metaelem = context.meta_ball.elements.active layout.prop(metaelem, "type") split = layout.split() col = split.column(align=True) col.label(text="Settings:") col.prop(metaelem, "stiffness", text="Stiffness") col.prop(metaelem, "use_negative", text="Negative") col.prop(metaelem, "hide", text="Hide") col = split.column(align=True) if metaelem.type in ('CUBE', 'ELLIPSOID'): col.label(text="Size:") col.prop(metaelem, "size_x", text="X") col.prop(metaelem, "size_y", text="Y") col.prop(metaelem, "size_z", text="Z") elif metaelem.type == 'TUBE': col.label(text="Size:") col.prop(metaelem, "size_x", text="X") elif metaelem.type == 'PLANE': col.label(text="Size:") col.prop(metaelem, "size_x", text="X") col.prop(metaelem, "size_y", text="Y") class DATA_PT_custom_props_metaball(DataButtonsPanel, PropertyPanel, bpy.types.Panel): COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'} _context_path = "object.data" _property_type = bpy.types.MetaBall def register(): pass <|fim▁hole|> if __name__ == "__main__": register()<|fim▁end|>
def unregister(): pass
<|file_name|>test_cache.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import import sys import types from contextlib import contextmanager from kombu.utils.encoding import str_to_bytes from celery import signature from celery import states from celery import group from celery.backends.cache import CacheBackend, DummyClient from celery.exceptions import ImproperlyConfigured from celery.five import items, string, text_t from celery.utils import uuid from celery.tests.case import ( AppCase, Mock, mask_modules, patch, reset_modules, ) PY3 = sys.version_info[0] == 3 class SomeClass(object): def __init__(self, data): self.data = data class test_CacheBackend(AppCase): def setup(self): self.tb = CacheBackend(backend='memory://', app=self.app) self.tid = uuid() def test_no_backend(self): self.app.conf.CELERY_CACHE_BACKEND = None with self.assertRaises(ImproperlyConfigured): CacheBackend(backend=None, app=self.app) def test_mark_as_done(self): self.assertEqual(self.tb.get_status(self.tid), states.PENDING) self.assertIsNone(self.tb.get_result(self.tid)) self.tb.mark_as_done(self.tid, 42) self.assertEqual(self.tb.get_status(self.tid), states.SUCCESS) self.assertEqual(self.tb.get_result(self.tid), 42) def test_is_pickled(self): result = {'foo': 'baz', 'bar': SomeClass(12345)} self.tb.mark_as_done(self.tid, result) # is serialized properly. rindb = self.tb.get_result(self.tid) self.assertEqual(rindb.get('foo'), 'baz') self.assertEqual(rindb.get('bar').data, 12345) def test_mark_as_failure(self): try: raise KeyError('foo') except KeyError as exception: self.tb.mark_as_failure(self.tid, exception) self.assertEqual(self.tb.get_status(self.tid), states.FAILURE) self.assertIsInstance(self.tb.get_result(self.tid), KeyError) def test_apply_chord(self): tb = CacheBackend(backend='memory://', app=self.app) gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] tb.apply_chord(group(app=self.app), (), gid, {}, result=res) @patch('celery.result.GroupResult.restore') def test_on_chord_part_return(self, restore): tb = CacheBackend(backend='memory://', app=self.app) deps = Mock() deps.__len__ = Mock() deps.__len__.return_value = 2 restore.return_value = deps task = Mock() task.name = 'foobarbaz' self.app.tasks['foobarbaz'] = task task.request.chord = signature(task) gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] task.request.group = gid tb.apply_chord(group(app=self.app), (), gid, {}, result=res) self.assertFalse(deps.join_native.called) tb.on_chord_part_return(task, 'SUCCESS', 10) self.assertFalse(deps.join_native.called) tb.on_chord_part_return(task, 'SUCCESS', 10) deps.join_native.assert_called_with(propagate=True, timeout=3.0) deps.delete.assert_called_with() def test_mget(self): self.tb.set('foo', 1) self.tb.set('bar', 2) self.assertDictEqual(self.tb.mget(['foo', 'bar']), {'foo': 1, 'bar': 2}) def test_forget(self): self.tb.mark_as_done(self.tid, {'foo': 'bar'}) x = self.app.AsyncResult(self.tid, backend=self.tb) x.forget() self.assertIsNone(x.result) def test_process_cleanup(self): self.tb.process_cleanup() def test_expires_as_int(self): tb = CacheBackend(backend='memory://', expires=10, app=self.app) self.assertEqual(tb.expires, 10) def test_unknown_backend_raises_ImproperlyConfigured(self): with self.assertRaises(ImproperlyConfigured): CacheBackend(backend='unknown://', app=self.app) class MyMemcachedStringEncodingError(Exception): pass class MemcachedClient(DummyClient): def set(self, key, value, *args, **kwargs): if PY3: key_t, must_be, not_be, cod = bytes, 'string', 'bytes', 'decode' else: key_t, must_be, not_be, cod = text_t, 'bytes', 'string', 'encode' if isinstance(key, key_t): raise MyMemcachedStringEncodingError( 'Keys must be {0}, not {1}. Convert your ' 'strings using mystring.{2}(charset)!'.format( must_be, not_be, cod)) return super(MemcachedClient, self).set(key, value, *args, **kwargs) class MockCacheMixin(object): @contextmanager def mock_memcache(self): memcache = types.ModuleType('memcache') memcache.Client = MemcachedClient memcache.Client.__module__ = memcache.__name__ prev, sys.modules['memcache'] = sys.modules.get('memcache'), memcache try: yield True finally: if prev is not None: sys.modules['memcache'] = prev @contextmanager def mock_pylibmc(self): pylibmc = types.ModuleType('pylibmc') pylibmc.Client = MemcachedClient pylibmc.Client.__module__ = pylibmc.__name__ prev = sys.modules.get('pylibmc') sys.modules['pylibmc'] = pylibmc try: yield True finally: if prev is not None: sys.modules['pylibmc'] = prev class test_get_best_memcache(AppCase, MockCacheMixin): def test_pylibmc(self): with self.mock_pylibmc(): with reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] self.assertEqual(cache.get_best_memcache()[0].__module__, 'pylibmc') def test_memcache(self): with self.mock_memcache(): with reset_modules('celery.backends.cache'): with mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] self.assertEqual(cache.get_best_memcache()[0]().__module__, 'memcache') def test_no_implementations(self): with mask_modules('pylibmc', 'memcache'): with reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] with self.assertRaises(ImproperlyConfigured): cache.get_best_memcache() def test_cached(self): with self.mock_pylibmc(): with reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] cache.get_best_memcache()[0](behaviors={'foo': 'bar'}) self.assertTrue(cache._imp[0]) cache.get_best_memcache()[0]() def test_backends(self): from celery.backends.cache import backends with self.mock_memcache(): for name, fun in items(backends): self.assertTrue(fun()) class test_memcache_key(AppCase, MockCacheMixin): def test_memcache_unicode_key(self): with self.mock_memcache(): with reset_modules('celery.backends.cache'): with mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] task_id, result = string(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) def test_memcache_bytes_key(self): with self.mock_memcache(): with reset_modules('celery.backends.cache'): with mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result)<|fim▁hole|> from celery.backends import cache cache._imp = [None] task_id, result = string(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) def test_pylibmc_bytes_key(self): with reset_modules('celery.backends.cache'): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result)<|fim▁end|>
def test_pylibmc_unicode_key(self): with reset_modules('celery.backends.cache'): with self.mock_pylibmc():
<|file_name|>IFDSSolver.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 2012 Eric Bodden. * Copyright (c) 2013 Tata Consultancy Services & Ecole Polytechnique de Montreal * All rights reserved. This program and the accompanying materials * are made available under the terms of the GNU Lesser Public License v2.1 * which accompanies this distribution, and is available at * http://www.gnu.org/licenses/old-licenses/gpl-2.0.html * * Contributors: * Eric Bodden - initial API and implementation * Marc-Andre Laverdiere-Papineau - Fixed race condition * Steven Arzt - Created FastSolver implementation ******************************************************************************/ package soot.jimple.infoflow.solver.fastSolver; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.cache.CacheBuilder; import heros.DontSynchronize; import heros.FlowFunction; import heros.FlowFunctionCache; import heros.FlowFunctions; import heros.IFDSTabulationProblem; import heros.SynchronizedBy; import heros.ZeroedFlowFunctions; import heros.solver.Pair; import heros.solver.PathEdge; import soot.SootMethod; import soot.Unit; import soot.jimple.infoflow.collect.ConcurrentHashSet; import soot.jimple.infoflow.collect.MyConcurrentHashMap; import soot.jimple.infoflow.memory.IMemoryBoundedSolver; import soot.jimple.infoflow.solver.executors.InterruptableExecutor; import soot.jimple.infoflow.solver.executors.SetPoolExecutor; import soot.jimple.infoflow.solver.memory.IMemoryManager; import soot.jimple.toolkits.ide.icfg.BiDiInterproceduralCFG; /** * A solver for an {@link IFDSTabulationProblem}. This solver is not based on the IDESolver * implementation in Heros for performance reasons. * * @param <N> The type of nodes in the interprocedural control-flow graph. Typically {@link Unit}. * @param <D> The type of data-flow facts to be computed by the tabulation problem. * @param <I> The type of inter-procedural control-flow graph being used. * @see IFDSTabulationProblem */ public class IFDSSolver<N,D extends FastSolverLinkedNode<D, N>,I extends BiDiInterproceduralCFG<N, SootMethod>> implements IMemoryBoundedSolver { public static CacheBuilder<Object, Object> DEFAULT_CACHE_BUILDER = CacheBuilder.newBuilder().concurrencyLevel (Runtime.getRuntime().availableProcessors()).initialCapacity(10000).softValues(); protected static final Logger logger = LoggerFactory.getLogger(IFDSSolver.class); //enable with -Dorg.slf4j.simpleLogger.defaultLogLevel=trace public static final boolean DEBUG = logger.isDebugEnabled(); protected InterruptableExecutor executor; @DontSynchronize("only used by single thread") protected int numThreads; @SynchronizedBy("thread safe data structure, consistent locking when used") protected MyConcurrentHashMap<PathEdge<N, D>,D> jumpFunctions = new MyConcurrentHashMap<PathEdge<N,D>, D>(); @SynchronizedBy("thread safe data structure, only modified internally") protected final I icfg; //stores summaries that were queried before they were computed //see CC 2010 paper by Naeem, Lhotak and Rodriguez @SynchronizedBy("consistent lock on 'incoming'") protected final MyConcurrentHashMap<Pair<SootMethod,D>,Set<Pair<N,D>>> endSummary = new MyConcurrentHashMap<Pair<SootMethod,D>, Set<Pair<N,D>>>(); //edges going along calls //see CC 2010 paper by Naeem, Lhotak and Rodriguez @SynchronizedBy("consistent lock on field") protected final MyConcurrentHashMap<Pair<SootMethod,D>,MyConcurrentHashMap<N,Map<D, D>>> incoming = new MyConcurrentHashMap<Pair<SootMethod,D>,MyConcurrentHashMap<N,Map<D, D>>>(); @DontSynchronize("stateless") protected final FlowFunctions<N, D, SootMethod> flowFunctions; @DontSynchronize("only used by single thread") protected final Map<N,Set<D>> initialSeeds; @DontSynchronize("benign races") public long propagationCount; @DontSynchronize("stateless") protected final D zeroValue; @DontSynchronize("readOnly") protected final FlowFunctionCache<N,D,SootMethod> ffCache; @DontSynchronize("readOnly") protected final boolean followReturnsPastSeeds; @DontSynchronize("readOnly") protected boolean setJumpPredecessors = false; @DontSynchronize("readOnly") private boolean enableMergePointChecking = false; @DontSynchronize("readOnly") private boolean singleJoinPointAbstraction = false; @DontSynchronize("readOnly") protected IMemoryManager<D, N> memoryManager = null; protected boolean solverId; private Set<IMemoryBoundedSolverStatusNotification> notificationListeners = new HashSet<>(); private boolean killFlag = false; /** * Creates a solver for the given problem, which caches flow functions and edge functions. * The solver must then be started by calling {@link #solve()}. */ public IFDSSolver(IFDSTabulationProblem<N,D,SootMethod,I> tabulationProblem) { this(tabulationProblem, DEFAULT_CACHE_BUILDER); } /** * Creates a solver for the given problem, constructing caches with the * given {@link CacheBuilder}. The solver must then be started by calling * {@link #solve()}. * @param tabulationProblem The tabulation problem to solve * @param flowFunctionCacheBuilder A valid {@link CacheBuilder} or * <code>null</code> if no caching is to be used for flow functions. */ public IFDSSolver(IFDSTabulationProblem<N,D,SootMethod,I> tabulationProblem, @SuppressWarnings("rawtypes") CacheBuilder flowFunctionCacheBuilder) { if(logger.isDebugEnabled()) flowFunctionCacheBuilder = flowFunctionCacheBuilder.recordStats(); this.zeroValue = tabulationProblem.zeroValue(); this.icfg = tabulationProblem.interproceduralCFG(); FlowFunctions<N, D, SootMethod> flowFunctions = tabulationProblem.autoAddZero() ? new ZeroedFlowFunctions<N,D,SootMethod>(tabulationProblem.flowFunctions(), zeroValue) : tabulationProblem.flowFunctions(); if(flowFunctionCacheBuilder!=null) { ffCache = new FlowFunctionCache<N,D,SootMethod>(flowFunctions, flowFunctionCacheBuilder); flowFunctions = ffCache; } else { ffCache = null; } this.flowFunctions = flowFunctions; this.initialSeeds = tabulationProblem.initialSeeds(); this.followReturnsPastSeeds = tabulationProblem.followReturnsPastSeeds(); this.numThreads = Math.max(1,tabulationProblem.numThreads()); this.executor = getExecutor(); } public void setSolverId(boolean solverId) { this.solverId = solverId; } /** * Runs the solver on the configured problem. This can take some time. */ public void solve() { // Notify the listeners that the solver has been started for (IMemoryBoundedSolverStatusNotification listener : notificationListeners) listener.notifySolverStarted(this); submitInitialSeeds(); awaitCompletionComputeValuesAndShutdown(); // Notify the listeners that the solver has been terminated for (IMemoryBoundedSolverStatusNotification listener : notificationListeners) listener.notifySolverTerminated(this); } /** * Schedules the processing of initial seeds, initiating the analysis. * Clients should only call this methods if performing synchronization on * their own. Normally, {@link #solve()} should be called instead. */ protected void submitInitialSeeds() { for(Entry<N, Set<D>> seed: initialSeeds.entrySet()) { N startPoint = seed.getKey(); for(D val: seed.getValue()) propagate(zeroValue, startPoint, val, null, false); addFunction(new PathEdge<N, D>(zeroValue, startPoint, zeroValue)); } } /** * Awaits the completion of the exploded super graph. When complete, computes result values, * shuts down the executor and returns. */ protected void awaitCompletionComputeValuesAndShutdown() { { //run executor and await termination of tasks runExecutorAndAwaitCompletion(); } if(logger.isDebugEnabled()) printStats(); //ask executor to shut down; //this will cause new submissions to the executor to be rejected, //but at this point all tasks should have completed anyway executor.shutdown(); // Wait for the executor to be really gone while (!executor.isTerminated()) { try { Thread.sleep(100); } catch (InterruptedException e) { // silently ignore the exception, it's not an issue if the // thread gets aborted } } } /** * Runs execution, re-throwing exceptions that might be thrown during its execution. */ private void runExecutorAndAwaitCompletion() { try { executor.awaitCompletion(); } catch (InterruptedException e) { e.printStackTrace(); } Throwable exception = executor.getException(); if(exception!=null) { throw new RuntimeException("There were exceptions during IFDS analysis. Exiting.",exception); } } /** * Dispatch the processing of a given edge. It may be executed in a different thread. * @param edge the edge to process */ protected void scheduleEdgeProcessing(PathEdge<N,D> edge){ // If the executor has been killed, there is little point // in submitting new tasks if (killFlag || executor.isTerminating() || executor.isTerminated()) return; executor.execute(new PathEdgeProcessingTask(edge, solverId)); propagationCount++; } /** * Lines 13-20 of the algorithm; processing a call site in the caller's context. * * For each possible callee, registers incoming call edges. * Also propagates call-to-return flows and summarized callee flows within the caller. * * @param edge an edge whose target node resembles a method call */ private void processCall(PathEdge<N,D> edge) { final D d1 = edge.factAtSource(); final N n = edge.getTarget(); // a call node; line 14... final D d2 = edge.factAtTarget(); assert d2 != null; Collection<N> returnSiteNs = icfg.getReturnSitesOfCallAt(n); //for each possible callee Collection<SootMethod> callees = icfg.getCalleesOfCallAt(n); for(SootMethod sCalledProcN: callees) { //still line 14 // Early termination check if (killFlag) return; //compute the call-flow function FlowFunction<D> function = flowFunctions.getCallFlowFunction(n, sCalledProcN); Set<D> res = computeCallFlowFunction(function, d1, d2); Collection<N> startPointsOf = icfg.getStartPointsOf(sCalledProcN); //for each result node of the call-flow function for(D d3: res) { if (memoryManager != null) d3 = memoryManager.handleGeneratedMemoryObject(d2, d3); if (d3 == null) continue; //for each callee's start point(s) for(N sP: startPointsOf) { //create initial self-loop propagate(d3, sP, d3, n, false, true); //line 15 } //register the fact that <sp,d3> has an incoming edge from <n,d2> //line 15.1 of Naeem/Lhotak/Rodriguez if (!addIncoming(sCalledProcN,d3,n,d1,d2)) continue; //line 15.2 Set<Pair<N, D>> endSumm = endSummary(sCalledProcN, d3);<|fim▁hole|> //because we have observed a potentially new incoming edge into <sP,d3> if (endSumm != null && !endSumm.isEmpty()) for(Pair<N, D> entry: endSumm) { N eP = entry.getO1(); D d4 = entry.getO2(); //for each return site for(N retSiteN: returnSiteNs) { //compute return-flow function FlowFunction<D> retFunction = flowFunctions.getReturnFlowFunction(n, sCalledProcN, eP, retSiteN); //for each target value of the function for(D d5: computeReturnFlowFunction(retFunction, d3, d4, n, Collections.singleton(d1))) { if (memoryManager != null) d5 = memoryManager.handleGeneratedMemoryObject(d4, d5); // If we have not changed anything in the callee, we do not need the facts // from there. Even if we change something: If we don't need the concrete // path, we can skip the callee in the predecessor chain D d5p = d5; if (d5.equals(d2)) d5p = d2; else if (setJumpPredecessors && d5p != d2) { d5p = d5p.clone(); d5p.setPredecessor(d2); } propagate(d1, retSiteN, d5p, n, false, true); } } } } } //line 17-19 of Naeem/Lhotak/Rodriguez //process intra-procedural flows along call-to-return flow functions for (N returnSiteN : returnSiteNs) { FlowFunction<D> callToReturnFlowFunction = flowFunctions.getCallToReturnFlowFunction(n, returnSiteN); for(D d3: computeCallToReturnFlowFunction(callToReturnFlowFunction, d1, d2)) { if (memoryManager != null) d3 = memoryManager.handleGeneratedMemoryObject(d2, d3); if (d3 != null) propagate(d1, returnSiteN, d3, n, false); } } } /** * Computes the call flow function for the given call-site abstraction * @param callFlowFunction The call flow function to compute * @param d1 The abstraction at the current method's start node. * @param d2 The abstraction at the call site * @return The set of caller-side abstractions at the callee's start node */ protected Set<D> computeCallFlowFunction (FlowFunction<D> callFlowFunction, D d1, D d2) { return callFlowFunction.computeTargets(d2); } /** * Computes the call-to-return flow function for the given call-site * abstraction * @param callToReturnFlowFunction The call-to-return flow function to * compute * @param d1 The abstraction at the current method's start node. * @param d2 The abstraction at the call site * @return The set of caller-side abstractions at the return site */ protected Set<D> computeCallToReturnFlowFunction (FlowFunction<D> callToReturnFlowFunction, D d1, D d2) { return callToReturnFlowFunction.computeTargets(d2); } /** * Lines 21-32 of the algorithm. * * Stores callee-side summaries. * Also, at the side of the caller, propagates intra-procedural flows to return sites * using those newly computed summaries. * * @param edge an edge whose target node resembles a method exits */ protected void processExit(PathEdge<N,D> edge) { final N n = edge.getTarget(); // an exit node; line 21... SootMethod methodThatNeedsSummary = icfg.getMethodOf(n); final D d1 = edge.factAtSource(); final D d2 = edge.factAtTarget(); //for each of the method's start points, determine incoming calls //line 21.1 of Naeem/Lhotak/Rodriguez //register end-summary if (!addEndSummary(methodThatNeedsSummary, d1, n, d2)) return; Map<N,Map<D, D>> inc = incoming(d1, methodThatNeedsSummary); //for each incoming call edge already processed //(see processCall(..)) if (inc != null) for (Entry<N,Map<D, D>> entry: inc.entrySet()) { // Early termination check if (killFlag) return; //line 22 N c = entry.getKey(); Set<D> callerSideDs = entry.getValue().keySet(); //for each return site for(N retSiteC: icfg.getReturnSitesOfCallAt(c)) { //compute return-flow function FlowFunction<D> retFunction = flowFunctions.getReturnFlowFunction(c, methodThatNeedsSummary,n,retSiteC); Set<D> targets = computeReturnFlowFunction(retFunction, d1, d2, c, callerSideDs); //for each incoming-call value for(Entry<D, D> d1d2entry : entry.getValue().entrySet()) { final D d4 = d1d2entry.getKey(); final D predVal = d1d2entry.getValue(); for(D d5: targets) { if (memoryManager != null) d5 = memoryManager.handleGeneratedMemoryObject(d2, d5); if (d5 == null) continue; // If we have not changed anything in the callee, we do not need the facts // from there. Even if we change something: If we don't need the concrete // path, we can skip the callee in the predecessor chain D d5p = d5; if (d5.equals(predVal)) d5p = predVal; else if (setJumpPredecessors && d5p != predVal) { d5p = d5p.clone(); d5p.setPredecessor(predVal); } propagate(d4, retSiteC, d5p, c, false, true); } } } } //handling for unbalanced problems where we return out of a method with a fact for which we have no incoming flow //note: we propagate that way only values that originate from ZERO, as conditionally generated values should only //be propagated into callers that have an incoming edge for this condition if(followReturnsPastSeeds && d1 == zeroValue && (inc == null || inc.isEmpty())) { Collection<N> callers = icfg.getCallersOf(methodThatNeedsSummary); for(N c: callers) { SootMethod callerMethod = icfg.getMethodOf(c); for(N retSiteC: icfg.getReturnSitesOfCallAt(c)) { FlowFunction<D> retFunction = flowFunctions.getReturnFlowFunction(c, methodThatNeedsSummary,n,retSiteC); Set<D> targets = computeReturnFlowFunction(retFunction, d1, d2, c, Collections.singleton(zeroValue)); for(D d5: targets) { if (memoryManager != null) d5 = memoryManager.handleGeneratedMemoryObject(d2, d5); if (d5 != null) propagate(zeroValue, retSiteC, d5, c, true, callerMethod == methodThatNeedsSummary); } } } //in cases where there are no callers, the return statement would normally not be processed at all; //this might be undesirable if the flow function has a side effect such as registering a taint; //instead we thus call the return flow function will a null caller if(callers.isEmpty()) { FlowFunction<D> retFunction = flowFunctions.getReturnFlowFunction(null, methodThatNeedsSummary,n,null); retFunction.computeTargets(d2); } } } /** * Computes the return flow function for the given set of caller-side * abstractions. * @param retFunction The return flow function to compute * @param d1 The abstraction at the beginning of the callee * @param d2 The abstraction at the exit node in the callee * @param callSite The call site * @param callerSideDs The abstractions at the call site * @return The set of caller-side abstractions at the return site */ protected Set<D> computeReturnFlowFunction (FlowFunction<D> retFunction, D d1, D d2, N callSite, Collection<D> callerSideDs) { return retFunction.computeTargets(d2); } /** * Lines 33-37 of the algorithm. * Simply propagate normal, intra-procedural flows. * @param edge */ private void processNormalFlow(PathEdge<N,D> edge) { final D d1 = edge.factAtSource(); final N n = edge.getTarget(); final D d2 = edge.factAtTarget(); for (N m : icfg.getSuccsOf(n)) { // Early termination check if (killFlag) return; // Compute the flow function FlowFunction<D> flowFunction = flowFunctions.getNormalFlowFunction(n,m); Set<D> res = computeNormalFlowFunction(flowFunction, d1, d2); for (D d3 : res) { if (memoryManager != null && d2 != d3) d3 = memoryManager.handleGeneratedMemoryObject(d2, d3); if (d3 != null) propagate(d1, m, d3, null, false); } } } /** * Computes the normal flow function for the given set of start and end * abstractions. * @param flowFunction The normal flow function to compute * @param d1 The abstraction at the method's start node * @param d2 The abstraction at the current node * @return The set of abstractions at the successor node */ protected Set<D> computeNormalFlowFunction (FlowFunction<D> flowFunction, D d1, D d2) { return flowFunction.computeTargets(d2); } /** * Propagates the flow further down the exploded super graph. * @param sourceVal the source value of the propagated summary edge * @param target the target statement * @param targetVal the target value at the target statement * @param relatedCallSite for call and return flows the related call statement, <code>null</code> otherwise * (this value is not used within this implementation but may be useful for subclasses of {@link IFDSSolver}) * @param isUnbalancedReturn <code>true</code> if this edge is propagating an unbalanced return * (this value is not used within this implementation but may be useful for subclasses of {@link IFDSSolver}) */ protected void propagate(D sourceVal, N target, D targetVal, /* deliberately exposed to clients */ N relatedCallSite, /* deliberately exposed to clients */ boolean isUnbalancedReturn) { propagate(sourceVal, target, targetVal, relatedCallSite, isUnbalancedReturn, false); } /** * Propagates the flow further down the exploded super graph. * @param sourceVal the source value of the propagated summary edge * @param target the target statement * @param targetVal the target value at the target statement * @param relatedCallSite for call and return flows the related call statement, <code>null</code> otherwise * (this value is not used within this implementation but may be useful for subclasses of {@link IFDSSolver}) * @param isUnbalancedReturn <code>true</code> if this edge is propagating an unbalanced return * (this value is not used within this implementation but may be useful for subclasses of {@link IFDSSolver}) * @param forceRegister True if the jump function must always be registered with jumpFn . * This can happen when externally injecting edges that don't come out of this * solver. */ protected void propagate(D sourceVal, N target, D targetVal, /* deliberately exposed to clients */ N relatedCallSite, /* deliberately exposed to clients */ boolean isUnbalancedReturn, boolean forceRegister) { // Let the memory manager run if (memoryManager != null) { sourceVal = memoryManager.handleMemoryObject(sourceVal); targetVal = memoryManager.handleMemoryObject(targetVal); if (sourceVal == null || targetVal == null) return; } final PathEdge<N,D> edge = new PathEdge<N,D>(sourceVal, target, targetVal); final D existingVal = (forceRegister || !enableMergePointChecking || isMergePoint(target)) ? addFunction(edge) : null; if (existingVal != null) { if (existingVal != targetVal) { // Check whether we need to retain this abstraction boolean isEssential; if (memoryManager == null) isEssential = relatedCallSite != null && icfg.isCallStmt(relatedCallSite); else isEssential = memoryManager.isEssentialJoinPoint(targetVal, relatedCallSite); if (!singleJoinPointAbstraction || isEssential) existingVal.addNeighbor(targetVal); } } else { // If this is an inactive abstraction and we have already processed // its active counterpart, we can skip this one D activeVal = targetVal.getActiveCopy(); if (activeVal != targetVal) { PathEdge<N, D> activeEdge = new PathEdge<>(sourceVal, target, activeVal); if (jumpFunctions.containsKey(activeEdge)) return; } scheduleEdgeProcessing(edge); } } /** * Records a jump function. The source statement is implicit. * @see PathEdge */ public D addFunction(PathEdge<N, D> edge) { return jumpFunctions.putIfAbsent(edge, edge.factAtTarget()); } /** * Gets whether the given unit is a merge point in the ICFG * @param target The unit to check * @return True if the given unit is a merge point in the ICFG, otherwise * false */ private boolean isMergePoint(N target) { // Check whether there is more than one possibility to reach this unit List<N> preds = icfg.getPredsOf(target); int size = preds.size(); if (size > 1) if (!icfg.getEndPointsOf(icfg.getMethodOf(target)).contains(target)) return true; // Special case: If this is the first unit in the method, there is an // implicit second way (through method call) if (size == 1) { if (icfg.getStartPointsOf(icfg.getMethodOf(target)).contains(target)) if (!icfg.getEndPointsOf(icfg.getMethodOf(target)).contains(target)) return true; } return false; } protected Set<Pair<N, D>> endSummary(SootMethod m, D d3) { Set<Pair<N, D>> map = endSummary.get(new Pair<SootMethod, D>(m, d3)); return map; } private boolean addEndSummary(SootMethod m, D d1, N eP, D d2) { if (d1 == zeroValue) return true; Set<Pair<N, D>> summaries = endSummary.putIfAbsentElseGet (new Pair<SootMethod, D>(m, d1), new ConcurrentHashSet<Pair<N, D>>()); return summaries.add(new Pair<N, D>(eP, d2)); } protected Map<N, Map<D, D>> incoming(D d1, SootMethod m) { Map<N, Map<D, D>> map = incoming.get(new Pair<SootMethod, D>(m, d1)); return map; } protected boolean addIncoming(SootMethod m, D d3, N n, D d1, D d2) { MyConcurrentHashMap<N, Map<D, D>> summaries = incoming.putIfAbsentElseGet (new Pair<SootMethod, D>(m, d3), new MyConcurrentHashMap<N, Map<D, D>>()); Map<D, D> set = summaries.putIfAbsentElseGet(n, new ConcurrentHashMap<D, D>()); return set.put(d1, d2) == null; } /** * Factory method for this solver's thread-pool executor. */ protected InterruptableExecutor getExecutor() { return new SetPoolExecutor(1, this.numThreads, 30, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); } /** * Returns a String used to identify the output of this solver in debug mode. * Subclasses can overwrite this string to distinguish the output from different solvers. */ protected String getDebugName() { return "FAST IFDS SOLVER"; } public void printStats() { if(logger.isDebugEnabled()) { if(ffCache!=null) ffCache.printStats(); } else { logger.info("No statistics were collected, as DEBUG is disabled."); } } private class PathEdgeProcessingTask implements Runnable { private final PathEdge<N,D> edge; private final boolean solverId; public PathEdgeProcessingTask(PathEdge<N,D> edge, boolean solverId) { this.edge = edge; this.solverId = solverId; } public void run() { if(icfg.isCallStmt(edge.getTarget())) { processCall(edge); } else { //note that some statements, such as "throw" may be //both an exit statement and a "normal" statement if(icfg.isExitStmt(edge.getTarget())) processExit(edge); if(!icfg.getSuccsOf(edge.getTarget()).isEmpty()) processNormalFlow(edge); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((edge == null) ? 0 : edge.hashCode()); result = prime * result + (solverId ? 1231 : 1237); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; PathEdgeProcessingTask other = (PathEdgeProcessingTask) obj; if (edge == null) { if (other.edge != null) return false; } else if (!edge.equals(other.edge)) return false; if (solverId != other.solverId) return false; return true; } } /** * Sets whether abstractions on method returns shall be connected to the * respective call abstractions to shortcut paths. * @param setJumpPredecessors True if return abstractions shall be connected * to call abstractions as predecessors, otherwise false. */ public void setJumpPredecessors(boolean setJumpPredecessors) { this.setJumpPredecessors = setJumpPredecessors; } /** * Sets whether only abstractions at merge points shall be recorded to jumpFn. * @param enableMergePointChecking True if only abstractions at merge points * shall be recorded to jumpFn, otherwise false. */ public void setEnableMergePointChecking(boolean enableMergePointChecking) { this.enableMergePointChecking = enableMergePointChecking; } /** * Sets whether only a single abstraction shall be recorded per join point. * In other words, enabling this option disables the recording of neighbors. * @param singleJoinPointAbstraction True to only record a single abstraction * per join point, false to record all incoming neighbors */ public void setSingleJoinPointAbstraction(boolean singleJoinPointAbstraction) { this.singleJoinPointAbstraction = singleJoinPointAbstraction; } /** * Sets the memory manager that shall be used to manage the abstractions * @param memoryManager The memory manager that shall be used to manage the * abstractions */ public void setMemoryManager(IMemoryManager<D, N> memoryManager) { this.memoryManager = memoryManager; } /** * Gets the memory manager used by this solver to reduce memory consumption * @return The memory manager registered with this solver */ public IMemoryManager<D, N> getMemoryManager() { return this.memoryManager; } @Override public void forceTerminate() { this.killFlag = true; this.executor.interrupt(); this.executor.shutdown(); } @Override public boolean isTerminated() { return killFlag || this.executor.isFinished(); } @Override public boolean isKilled() { return killFlag; } @Override public void reset() { this.killFlag = false; } @Override public void addStatusListener(IMemoryBoundedSolverStatusNotification listener) { this.notificationListeners.add(listener); } }<|fim▁end|>
//still line 15.2 of Naeem/Lhotak/Rodriguez //for each already-queried exit value <eP,d4> reachable from <sP,d3>, //create new caller-side jump functions to the return sites
<|file_name|>actual.js<|end_file_name|><|fim▁begin|>let arr = [];<|fim▁hole|>}<|fim▁end|>
for(let i = 0; i < 10; i++) { for (let i = 0; i < 10; i++) { arr.push(() => i); }
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>""" Setup for the API """ import api log = api.logger.use(__name__) def index_mongo():<|fim▁hole|> """ Ensure the mongo collections are indexed. """ db = api.common.get_conn() log.debug("Ensuring mongo is indexed.") db.users.ensure_index("uid", unique=True, name="unique uid") db.users.ensure_index("username", unique=True, name="unique username") db.groups.ensure_index("gid", unique=True, name="unique gid") db.problems.ensure_index("pid", unique=True, name="unique pid") db.submissions.ensure_index("tid", name="submission tids") db.ssh.ensure_index("tid", unique=True, name="unique ssh tid") db.teams.ensure_index("team_name", unique=True, name="unique team names") db.cache.ensure_index("expireAt", expireAfterSeconds=0) db.cache.ensure_index("kwargs", name="kwargs") db.cache.ensure_index("args", name="args")<|fim▁end|>
<|file_name|>index.tsx<|end_file_name|><|fim▁begin|>import * as Container from '../../util/container' import * as Constants from '../../constants/profile' import * as ProfileGen from '../profile-gen' import * as RPCTypes from '../../constants/types/rpc-gen' import * as RouteTreeGen from '../route-tree-gen' import * as Saga from '../../util/saga' import * as TrackerConstants from '../../constants/tracker2' import * as Tracker2Gen from '../tracker2-gen' import logger from '../../logger' import openURL from '../../util/open-url' import {RPCError} from '../../util/errors' import {pgpSaga} from './pgp' import {proofsSaga} from './proofs' const editProfile = async (state: Container.TypedState, action: ProfileGen.EditProfilePayload) => { await RPCTypes.userProfileEditRpcPromise( { bio: action.payload.bio, fullName: action.payload.fullname, location: action.payload.location, }, TrackerConstants.waitingKey ) return Tracker2Gen.createShowUser({asTracker: false, username: state.config.username}) } const uploadAvatar = async (action: ProfileGen.UploadAvatarPayload) => { try { await RPCTypes.userUploadUserAvatarRpcPromise( { crop: action.payload.crop, filename: action.payload.filename, }, Constants.uploadAvatarWaitingKey ) return RouteTreeGen.createNavigateUp() } catch (error_) { const error = error_ as RPCError // error displayed in component logger.warn(`Error uploading user avatar: ${error.message}`) return false } } const finishRevoking = (state: Container.TypedState) => [ Tracker2Gen.createShowUser({asTracker: false, username: state.config.username}), Tracker2Gen.createLoad({ assertion: state.config.username, guiID: TrackerConstants.generateGUIID(), inTracker: false, reason: '', }), ProfileGen.createRevokeFinish(), ] const showUserProfile = (action: ProfileGen.ShowUserProfilePayload) => { const {username} = action.payload return [ ...(Container.isMobile ? [RouteTreeGen.createClearModals()] : []), RouteTreeGen.createNavigateAppend({path: [{props: {username}, selected: 'profile'}]}), ] } const onClickAvatar = (action: ProfileGen.OnClickAvatarPayload) => { if (!action.payload.username) { return } if (!action.payload.openWebsite) { return ProfileGen.createShowUserProfile({username: action.payload.username}) } else {<|fim▁hole|> const submitRevokeProof = async ( state: Container.TypedState, action: ProfileGen.SubmitRevokeProofPayload ) => { const you = TrackerConstants.getDetails(state, state.config.username) if (!you || !you.assertions) return null const proof = [...you.assertions.values()].find(a => a.sigID === action.payload.proofId) if (!proof) return null if (proof.type === 'pgp') { try { await RPCTypes.revokeRevokeKeyRpcPromise({keyID: proof.kid}, Constants.waitingKey) return false } catch (e) { logger.info('error in dropping pgp key', e) return ProfileGen.createRevokeFinish({error: `Error in dropping Pgp Key: ${e}`}) } } else { try { await RPCTypes.revokeRevokeSigsRpcPromise( {sigIDQueries: [action.payload.proofId]}, Constants.waitingKey ) return ProfileGen.createFinishRevoking() } catch (error) { logger.warn(`Error when revoking proof ${action.payload.proofId}`, error) return ProfileGen.createRevokeFinish({ error: 'There was an error revoking your proof. You can click the button to try again.', }) } } } const submitBlockUser = async (action: ProfileGen.SubmitBlockUserPayload) => { try { await RPCTypes.userBlockUserRpcPromise({username: action.payload.username}, Constants.blockUserWaitingKey) return [ ProfileGen.createFinishBlockUser(), Tracker2Gen.createLoad({ assertion: action.payload.username, guiID: TrackerConstants.generateGUIID(), inTracker: false, reason: '', }), ] } catch (error_) { const error = error_ as RPCError logger.warn(`Error blocking user ${action.payload.username}`, error) return ProfileGen.createFinishBlockUser({ error: error.desc || `There was an error blocking ${action.payload.username}.`, }) } } const submitUnblockUser = async (action: ProfileGen.SubmitUnblockUserPayload) => { try { await RPCTypes.userUnblockUserRpcPromise( {username: action.payload.username}, Constants.blockUserWaitingKey ) return Tracker2Gen.createLoad({ assertion: action.payload.username, guiID: TrackerConstants.generateGUIID(), inTracker: false, reason: '', }) } catch (error_) { const error = error_ as RPCError logger.warn(`Error unblocking user ${action.payload.username}`, error) return Tracker2Gen.createUpdateResult({ guiID: action.payload.guiID, reason: `Failed to unblock ${action.payload.username}: ${error.desc}`, result: 'error', }) } } const hideStellar = async (_: Container.TypedState, action: ProfileGen.HideStellarPayload) => { try { await RPCTypes.apiserverPostRpcPromise( { args: [{key: 'hidden', value: action.payload.hidden ? '1' : '0'}], endpoint: 'stellar/hidden', }, TrackerConstants.waitingKey ) } catch (e) { logger.warn('Error setting Stellar hidden:', e) } } const editAvatar = () => Container.isMobile ? undefined // handled in platform specific : RouteTreeGen.createNavigateAppend({path: [{props: {image: null}, selected: 'profileEditAvatar'}]}) const backToProfile = (state: Container.TypedState) => [ RouteTreeGen.createNavigateUp(), Tracker2Gen.createShowUser({asTracker: false, username: state.config.username}), ] const wotVouch = async ( state: Container.TypedState, action: ProfileGen.WotVouchPayload, logger: Saga.SagaLogger ) => { const {guiID, otherText, proofs, statement, username, verificationType} = action.payload const details = state.tracker2.usernameToDetails.get(username) if (!details) { return ProfileGen.createWotVouchSetError({error: 'Missing user details.'}) } else if (details.state !== 'valid') { return ProfileGen.createWotVouchSetError({error: `User is not in a valid state. (${details.state})`}) } else if (details.resetBrokeTrack) { return ProfileGen.createWotVouchSetError({error: 'User has reset their account since following.'}) } try { await RPCTypes.wotWotVouchRpcPromise( { confidence: { other: otherText, proofs, usernameVerifiedVia: verificationType, }, guiID, username, vouchText: statement, }, Constants.wotAuthorWaitingKey ) } catch (error_) { const error = error_ as RPCError logger.warn('Error from wotVouch:', error) return ProfileGen.createWotVouchSetError({ error: error.desc || `There was an error submitting the claim.`, }) } return [ProfileGen.createWotVouchSetError({error: ''}), RouteTreeGen.createClearModals()] } function* _profileSaga() { yield* Saga.chainAction2(ProfileGen.submitRevokeProof, submitRevokeProof) yield* Saga.chainAction(ProfileGen.submitBlockUser, submitBlockUser) yield* Saga.chainAction(ProfileGen.submitUnblockUser, submitUnblockUser) yield* Saga.chainAction2(ProfileGen.backToProfile, backToProfile) yield* Saga.chainAction2(ProfileGen.editProfile, editProfile) yield* Saga.chainAction(ProfileGen.uploadAvatar, uploadAvatar) yield* Saga.chainAction2(ProfileGen.finishRevoking, finishRevoking) yield* Saga.chainAction(ProfileGen.onClickAvatar, onClickAvatar) yield* Saga.chainAction(ProfileGen.showUserProfile, showUserProfile) yield* Saga.chainAction2(ProfileGen.editAvatar, editAvatar) yield* Saga.chainAction2(ProfileGen.hideStellar, hideStellar) yield* Saga.chainAction2(ProfileGen.wotVouch, wotVouch) } function* profileSaga() { yield Saga.spawn(_profileSaga) yield Saga.spawn(pgpSaga) yield Saga.spawn(proofsSaga) } export default profileSaga<|fim▁end|>
openURL(`https://keybase.io/${action.payload.username}`) return undefined } }
<|file_name|>bench.rs<|end_file_name|><|fim▁begin|>/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use lz4_pyframe::compress; use lz4_pyframe::decompress; use minibench::bench; use minibench::elapsed; use rand_core::RngCore; use rand_core::SeedableRng; fn main() { let mut rng = rand_chacha::ChaChaRng::seed_from_u64(0); let mut buf = vec![0u8; 100_000000]; rng.fill_bytes(&mut buf); let compressed = compress(&buf).unwrap(); bench("compress (100M)", || { elapsed(|| { compress(&buf).unwrap(); }) }); bench("decompress (~100M)", || { elapsed(|| { decompress(&compressed).unwrap(); })<|fim▁hole|><|fim▁end|>
}); }
<|file_name|>caching.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python ''' Import this module to have access to a global redis cache named GLOBAL_CACHE. USAGE: from caching import GLOBAL_CACHE GLOBAL_CACHE.store('foo', 'bar') GLOBAL_CACHE.get('foo') >> bar ''' from redis_cache import SimpleCache try:<|fim▁hole|> GLOBAL_CACHE except NameError: GLOBAL_CACHE = SimpleCache(limit=1000, expire=60*60*24, namespace="GLOBAL_CACHE") else: # Already defined... pass<|fim▁end|>
<|file_name|>websocketService.js<|end_file_name|><|fim▁begin|>'use strict'; angular.module('MainConsole') .factory('WebSocketService', ['$rootScope', '$q', '$filter', '$location', function ($rootScope, $q, $filter, $location) { var service = {}; <|fim▁hole|> service.wsConnect = function() { // Websocket is at wss://hostname:port/ws var host = $location.host(); var port = $location.port(); var wsUrl = $rootScope.urlscheme.websocket + '/ws'; var ws = new WebSocket(wsUrl); ws.onopen = function(){ $rootScope.connected = 1; console.log("Socket has been opened!"); }; ws.onerror = function(){ $rootScope.connected = 0; console.log("Socket received an error!"); }; ws.onclose = function(){ $rootScope.connected = 0; console.log("Socket has been closed!"); } ws.onmessage = function(message) { //listener(JSON.parse(message.data)); service.callback(JSON.parse(message.data)); }; service.ws = ws; console.log('WebSocket Initialized'); }; service.listener = function(callback) { service.callback = callback; }; service.send = function(message) { service.ws.send(message); }; service.close = function(){ service.ws.close(); $rootScope.connected = 0; console.log("Socket has been closed!"); }; return service; }]);<|fim▁end|>
<|file_name|>manually_destroyed_control.rs<|end_file_name|><|fim▁begin|>/* Copyright 2015 Jordan Miner * * Licensed under the MIT license <LICENSE or * http://opensource.org/licenses/MIT>. This file may not be copied, * modified, or distributed except according to those terms. */ extern crate clear_coat; extern crate iup_sys; use std::ptr; use clear_coat::*; use clear_coat::common_attrs_cbs::*; use iup_sys::*;<|fim▁hole|>fn test_manually_destroyed_control() { let button = Button::new(); assert!(button.handle() != ptr::null_mut()); unsafe { IupDestroy(button.handle()); } button.set_title("Hello"); // should panic since control is destroyed (pointer should be zeroed) }<|fim▁end|>
#[test] #[should_panic(expected="attempted to use destroyed control")]
<|file_name|>context_processors.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from datetime import date from django.conf import settings def settings_context(request): """ Makes available a template var for some interesting var in settings.py """ try: ITEMS_PER_PAGE = settings.ITEMS_PER_PAGE except AttributeError: print "oooo" ITEMS_PER_PAGE = 20 <|fim▁hole|> return {"ITEMS_PER_PAGE": ITEMS_PER_PAGE, "TAGS_PER_PAGE": TAGS_PER_PAGE}<|fim▁end|>
try: TAGS_PER_PAGE = settings.TAGS_PER_PAGE except AttributeError: TAGS_PER_PAGE = 200
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python ############################################################# # ubi_reader/ubi_io # (c) 2013 Jason Pruitt ([email protected]) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ############################################################# from ubi.block import sort class ubi_file(object): """UBI image file object Arguments: Str:path -- Path to file to parse Int:block_size -- Erase block size of NAND in bytes. Int:start_offset -- (optional) Where to start looking in the file for UBI data. Int:end_offset -- (optional) Where to stop looking in the file. Methods: seek -- Put file head to specified byte offset. Int:offset read -- Read specified bytes from file handle. Int:size tell -- Returns byte offset of current file location. read_block -- Returns complete PEB data of provided block description. Obj:block read_block_data -- Returns LEB data only from provided block. Obj:block reader -- Generator that returns data from file. reset -- Reset file position to start_offset Handles all the actual file interactions, read, seek, extract blocks, etc. """ def __init__(self, path, block_size, start_offset=0, end_offset=None): self._fhandle = open(path, 'rb') self._start_offset = start_offset if end_offset: self._end_offset = end_offset else: self._fhandle.seek(0, 2) self._end_offset = self.tell() self._block_size = block_size if start_offset >= self._end_offset: raise Exception('Start offset larger than file size!') self._fhandle.seek(self._start_offset) def _set_start(self, i): self._start_offset = i def _get_start(self): return self._start_offset start_offset = property(_get_start, _set_start) def _get_end(self): return self._end_offset end_offset = property(_get_end) def _get_block_size(self): return self._block_size block_size = property(_get_block_size) def seek(self, offset): self._fhandle.seek(offset) def read(self, size): return self._fhandle.read(size) def tell(self): return self._fhandle.tell() def reset(self): self._fhandle.seek(self.start_offset) def reader(self): self.reset() while True: cur_loc = self._fhandle.tell() if self.end_offset and cur_loc > self.end_offset: break elif self.end_offset and self.end_offset - cur_loc < self.block_size: chunk_size = self.end_offset - cur_loc<|fim▁hole|> if not buf: break yield buf def read_block(self, block): """Read complete PEB data from file. Argument: Obj:block -- Block data is desired for. """ self.seek(block.file_offset) return self._fhandle.read(block.size) def read_block_data(self, block): """Read LEB data from file Argument: Obj:block -- Block data is desired for. """ self.seek(block.file_offset + block.ec_hdr.data_offset) buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad) return buf class leb_virtual_file(): def __init__(self, ubi, volume): self._ubi = ubi self._volume = volume self._blocks = sort.by_leb(self._volume.get_blocks(self._ubi.blocks)) self._seek = 0 self.leb_data_size = len(self._blocks) * self._ubi.leb_size self._last_leb = -1 self._last_buf = '' def read(self, i): buf = '' leb = int(self.tell() / self._ubi.leb_size) offset = self.tell() % self._ubi.leb_size if leb == self._last_leb: self.seek(self.tell() + i) return self._last_buf[offset:offset + i] else: buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]]) self._last_buf = buf self._last_leb = leb self.seek(self.tell() + i) return buf[offset:offset + i] def reset(self): self.seek(0) def seek(self, offset): self._seek = offset def tell(self): return self._seek def reader(self): last_leb = 0 for block in self._blocks: while 0 != (self._ubi.blocks[block].leb_num - last_leb): last_leb += 1 yield '\xff' * self._ubi.leb_size last_leb += 1 yield self._ubi.file.read_block_data(self._ubi.blocks[block])<|fim▁end|>
else: chunk_size = self.block_size buf = self.read(chunk_size)
<|file_name|>test_file_browser_netcdf4.py<|end_file_name|><|fim▁begin|>from pydons import MatStruct, FileBrowser, LazyDataset import netCDF4 import numpy as np import tempfile import os DATADIR = os.path.join(os.path.dirname(__file__), 'data') def test_netcdf4(): d = MatStruct() data1 = np.random.rand(np.random.randint(1, 1000)) with tempfile.NamedTemporaryFile(suffix=".nc") as tmpf: fh = netCDF4.Dataset(tmpf.name, mode='w') grp = fh.createGroup('mygroup') dim1 = grp.createDimension('dim1') var1 = grp.createVariable('var1', data1.dtype.str, (dim1.name, )) var1[:] = data1 fh.close() <|fim▁hole|> assert np.all(dd.mygroup.var1[:] == data1)<|fim▁end|>
dd = FileBrowser(tmpf.name) assert 'mygroup' in dd assert 'var1' in dd.mygroup
<|file_name|>api.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import {GeneratedFile, ParseSourceSpan, Position} from '@angular/compiler'; import * as ts from 'typescript'; export const DEFAULT_ERROR_CODE = 100; export const UNKNOWN_ERROR_CODE = 500; export const SOURCE = 'angular' as 'angular'; export interface DiagnosticMessageChain { messageText: string; position?: Position; next?: DiagnosticMessageChain; } export interface Diagnostic { messageText: string; span?: ParseSourceSpan; position?: Position; chain?: DiagnosticMessageChain; category: ts.DiagnosticCategory; code: number; source: 'angular';<|fim▁hole|> export function isTsDiagnostic(diagnostic: any): diagnostic is ts.Diagnostic { return diagnostic != null && diagnostic.source !== 'angular'; } export function isNgDiagnostic(diagnostic: any): diagnostic is Diagnostic { return diagnostic != null && diagnostic.source === 'angular'; } export interface CompilerOptions extends ts.CompilerOptions { // NOTE: These comments and aio/content/guides/aot-compiler.md should be kept in sync. // Write statistics about compilation (e.g. total time, ...) // Note: this is the --diagnostics command line option from TS (which is @internal // on ts.CompilerOptions interface). diagnostics?: boolean; // Absolute path to a directory where generated file structure is written. // If unspecified, generated files will be written alongside sources. // @deprecated - no effect genDir?: string; // Path to the directory containing the tsconfig.json file. basePath?: string; // Don't produce .metadata.json files (they don't work for bundled emit with --out) skipMetadataEmit?: boolean; // Produce an error if the metadata written for a class would produce an error if used. strictMetadataEmit?: boolean; // Don't produce .ngfactory.js or .ngstyle.js files skipTemplateCodegen?: boolean; // Always report errors when the type of a parameter supplied whose injection type cannot // be determined. When this value option is not provided or is `false`, constructor // parameters of classes marked with `@Injectable` whose type cannot be resolved will // produce a warning. With this option `true`, they produce an error. When this option is // not provided is treated as if it were `false`. In Angular 6.0, if this option is not // provided, it will be treated as `true`. strictInjectionParameters?: boolean; // Whether to generate a flat module index of the given name and the corresponding // flat module metadata. This option is intended to be used when creating flat // modules similar to how `@angular/core` and `@angular/common` are packaged. // When this option is used the `package.json` for the library should referred to the // generated flat module index instead of the library index file. When using this // option only one .metadata.json file is produced that contains all the metadata // necessary for symbols exported from the library index. // In the generated .ngfactory.ts files flat module index is used to import symbols // includes both the public API from the library index as well as shrowded internal // symbols. // By default the .ts file supplied in the `files` files field is assumed to be // library index. If more than one is specified, uses `libraryIndex` to select the // file to use. If more than on .ts file is supplied and no `libraryIndex` is supplied // an error is produced. // A flat module index .d.ts and .js will be created with the given `flatModuleOutFile` // name in the same location as the library index .d.ts file is emitted. // For example, if a library uses `public_api.ts` file as the library index of the // module the `tsconfig.json` `files` field would be `["public_api.ts"]`. The // `flatModuleOutFile` options could then be set to, for example `"index.js"`, which // produces `index.d.ts` and `index.metadata.json` files. The library's // `package.json`'s `module` field would be `"index.js"` and the `typings` field would // be `"index.d.ts"`. flatModuleOutFile?: string; // Preferred module id to use for importing flat module. References generated by `ngc` // will use this module name when importing symbols from the flat module. This is only // meaningful when `flatModuleOutFile` is also supplied. It is otherwise ignored. flatModuleId?: string; // A prefix to insert in generated private symbols, e.g. for "my_prefix_" we // would generate private symbols named like `ɵmy_prefix_a`. flatModulePrivateSymbolPrefix?: string; // Whether to generate code for library code. // If true, produce .ngfactory.ts and .ngstyle.ts files for .d.ts inputs. // Default is true. generateCodeForLibraries?: boolean; // Whether to enable all type checks for templates. // This will be true be default in Angular 6. fullTemplateTypeCheck?: boolean; // Whether to use the CompilerHost's fileNameToModuleName utility (if available) to generate // import module specifiers. This is false by default, and exists to support running ngtsc // within Google. This option is internal and is used by the ng_module.bzl rule to switch // behavior between Bazel and Blaze. _useHostForImportGeneration?: boolean; // Insert JSDoc type annotations needed by Closure Compiler annotateForClosureCompiler?: boolean; // Modify how angular annotations are emitted to improve tree-shaking. // Default is static fields. // decorators: Leave the Decorators in-place. This makes compilation faster. // TypeScript will emit calls to the __decorate helper. // `--emitDecoratorMetadata` can be used for runtime reflection. // However, the resulting code will not properly tree-shake. // static fields: Replace decorators with a static field in the class. // Allows advanced tree-shakers like Closure Compiler to remove // unused classes. annotationsAs?: 'decorators'|'static fields'; // Print extra information while running the compiler trace?: boolean; // Whether to enable lowering expressions lambdas and expressions in a reference value // position. disableExpressionLowering?: boolean; // Disable TypeScript Version Check. disableTypeScriptVersionCheck?: boolean; // Locale of the application i18nOutLocale?: string; // Export format (xlf, xlf2 or xmb) i18nOutFormat?: string; // Path to the extracted message file i18nOutFile?: string; // Import format if different from `i18nFormat` i18nInFormat?: string; // Locale of the imported translations i18nInLocale?: string; // Path to the translation file i18nInFile?: string; // How to handle missing messages i18nInMissingTranslations?: 'error'|'warning'|'ignore'; // Whether translation variable name should contain external message id // (used by Closure Compiler's output of `goog.getMsg` for transition period) i18nUseExternalIds?: boolean; // Whether to remove blank text nodes from compiled templates. It is `false` by default starting // from Angular 6. preserveWhitespaces?: boolean; /** generate all possible generated files */ allowEmptyCodegenFiles?: boolean; /** * Whether to generate .ngsummary.ts files that allow to use AOTed artifacts * in JIT mode. This is off by default. */ enableSummariesForJit?: boolean; /** * Whether to replace the `templateUrl` and `styleUrls` property in all * @Component decorators with inlined contents in `template` and `styles` * properties. * When enabled, the .js output of ngc will have no lazy-loaded `templateUrl` * or `styleUrl`s. Note that this requires that resources be available to * load statically at compile-time. */ enableResourceInlining?: boolean; /** * Tells the compiler to generate definitions using the Render3 style code generation. * This option defaults to `false`. * * Not all features are supported with this option enabled. It is only supported * for experimentation and testing of Render3 style code generation. * * Acceptable values are as follows: * * `false` - run ngc normally * `true` - run the ngtsc compiler instead of the normal ngc compiler * `ngtsc` - alias for `true` * `tsc` - behave like plain tsc as much as possible (used for testing JIT code) * * @publicApi */ enableIvy?: boolean|'ngtsc'|'tsc'; /** @internal */ collectAllErrors?: boolean; /** * Whether NGC should generate re-exports for external symbols which are referenced * in Angular metadata (e.g. @Component, @Inject, @ViewChild). This can be enabled in * order to avoid dynamically generated module dependencies which can break strict * dependency enforcements. This is not enabled by default. * Read more about this here: https://github.com/angular/angular/issues/25644. */ createExternalSymbolFactoryReexports?: boolean; } export interface CompilerHost extends ts.CompilerHost { /** * Converts a module name that is used in an `import` to a file path. * I.e. `path/to/containingFile.ts` containing `import {...} from 'module-name'`. */ moduleNameToFileName?(moduleName: string, containingFile: string): string|null; /** * Converts a file path to a module name that can be used as an `import ...` * I.e. `path/to/importedFile.ts` should be imported by `path/to/containingFile.ts`. */ fileNameToModuleName?(importedFilePath: string, containingFilePath: string): string; /** * Converts a file path for a resource that is used in a source file or another resource * into a filepath. */ resourceNameToFileName?(resourceName: string, containingFilePath: string): string|null; /** * Converts a file name into a representation that should be stored in a summary file. * This has to include changing the suffix as well. * E.g. * `some_file.ts` -> `some_file.d.ts` * * @param referringSrcFileName the soure file that refers to fileName */ toSummaryFileName?(fileName: string, referringSrcFileName: string): string; /** * Converts a fileName that was processed by `toSummaryFileName` back into a real fileName * given the fileName of the library that is referrig to it. */ fromSummaryFileName?(fileName: string, referringLibFileName: string): string; /** * Load a referenced resource either statically or asynchronously. If the host returns a * `Promise<string>` it is assumed the user of the corresponding `Program` will call * `loadNgStructureAsync()`. Returning `Promise<string>` outside `loadNgStructureAsync()` will * cause a diagnostics diagnostic error or an exception to be thrown. */ readResource?(fileName: string): Promise<string>|string; /** * Produce an AMD module name for the source file. Used in Bazel. * * An AMD module can have an arbitrary name, so that it is require'd by name * rather than by path. See http://requirejs.org/docs/whyamd.html#namedmodules */ amdModuleName?(sf: ts.SourceFile): string|undefined; } export enum EmitFlags { DTS = 1 << 0, JS = 1 << 1, Metadata = 1 << 2, I18nBundle = 1 << 3, Codegen = 1 << 4, Default = DTS | JS | Codegen, All = DTS | JS | Metadata | I18nBundle | Codegen, } export interface CustomTransformers { beforeTs?: ts.TransformerFactory<ts.SourceFile>[]; afterTs?: ts.TransformerFactory<ts.SourceFile>[]; } export interface TsEmitArguments { program: ts.Program; host: CompilerHost; options: CompilerOptions; targetSourceFile?: ts.SourceFile; writeFile?: ts.WriteFileCallback; cancellationToken?: ts.CancellationToken; emitOnlyDtsFiles?: boolean; customTransformers?: ts.CustomTransformers; } export interface TsEmitCallback { (args: TsEmitArguments): ts.EmitResult; } export interface TsMergeEmitResultsCallback { (results: ts.EmitResult[]): ts.EmitResult; } export interface LibrarySummary { fileName: string; text: string; sourceFile?: ts.SourceFile; } export interface LazyRoute { route: string; module: {name: string, filePath: string}; referencedModule: {name: string, filePath: string}; } export interface Program { /** * Retrieve the TypeScript program used to produce semantic diagnostics and emit the sources. * * Angular structural information is required to produce the program. */ getTsProgram(): ts.Program; /** * Retrieve options diagnostics for the TypeScript options used to create the program. This is * faster than calling `getTsProgram().getOptionsDiagnostics()` since it does not need to * collect Angular structural information to produce the errors. */ getTsOptionDiagnostics(cancellationToken?: ts.CancellationToken): ReadonlyArray<ts.Diagnostic>; /** * Retrieve options diagnostics for the Angular options used to create the program. */ getNgOptionDiagnostics(cancellationToken?: ts.CancellationToken): ReadonlyArray<ts.Diagnostic|Diagnostic>; /** * Retrieve the syntax diagnostics from TypeScript. This is faster than calling * `getTsProgram().getSyntacticDiagnostics()` since it does not need to collect Angular structural * information to produce the errors. */ getTsSyntacticDiagnostics(sourceFile?: ts.SourceFile, cancellationToken?: ts.CancellationToken): ReadonlyArray<ts.Diagnostic>; /** * Retrieve the diagnostics for the structure of an Angular application is correctly formed. * This includes validating Angular annotations and the syntax of referenced and imbedded HTML * and CSS. * * Note it is important to displaying TypeScript semantic diagnostics along with Angular * structural diagnostics as an error in the program structure might cause errors detected in * semantic analysis and a semantic error might cause errors in specifying the program structure. * * Angular structural information is required to produce these diagnostics. */ getNgStructuralDiagnostics(cancellationToken?: ts.CancellationToken): ReadonlyArray<Diagnostic>; /** * Retrieve the semantic diagnostics from TypeScript. This is equivalent to calling * `getTsProgram().getSemanticDiagnostics()` directly and is included for completeness. */ getTsSemanticDiagnostics(sourceFile?: ts.SourceFile, cancellationToken?: ts.CancellationToken): ReadonlyArray<ts.Diagnostic>; /** * Retrieve the Angular semantic diagnostics. * * Angular structural information is required to produce these diagnostics. */ getNgSemanticDiagnostics(fileName?: string, cancellationToken?: ts.CancellationToken): ReadonlyArray<ts.Diagnostic|Diagnostic>; /** * Load Angular structural information asynchronously. If this method is not called then the * Angular structural information, including referenced HTML and CSS files, are loaded * synchronously. If the supplied Angular compiler host returns a promise from `loadResource()` * will produce a diagnostic error message or, `getTsProgram()` or `emit` to throw. */ loadNgStructureAsync(): Promise<void>; /** * Returns the lazy routes in the program. * @param entryRoute A reference to an NgModule like `someModule#name`. If given, * will recursively analyze routes starting from this symbol only. * Otherwise will list all routes for all NgModules in the program/ */ listLazyRoutes(entryRoute?: string): LazyRoute[]; /** * Emit the files requested by emitFlags implied by the program. * * Angular structural information is required to emit files. */ emit({emitFlags, cancellationToken, customTransformers, emitCallback, mergeEmitResultsCallback}?: { emitFlags?: EmitFlags, cancellationToken?: ts.CancellationToken, customTransformers?: CustomTransformers, emitCallback?: TsEmitCallback, mergeEmitResultsCallback?: TsMergeEmitResultsCallback }): ts.EmitResult; /** * Returns the .d.ts / .ngsummary.json / .ngfactory.d.ts files of libraries that have been emitted * in this program or previous programs with paths that emulate the fact that these libraries * have been compiled before with no outDir. */ getLibrarySummaries(): Map<string, LibrarySummary>; /** * @internal */ getEmittedGeneratedFiles(): Map<string, GeneratedFile>; /** * @internal */ getEmittedSourceFiles(): Map<string, ts.SourceFile>; }<|fim▁end|>
}
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>"""<|fim▁end|>
""" This module contains transformation functions (clip->clip) One file for one fx. The file's name is the fx's name
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>from urlparse import urljoin <|fim▁hole|> """ Return a slash or empty string based on tastypie setting """ if getattr(settings, 'TASTYPIE_ALLOW_MISSING_SLASH', False): return '' return '/' def urljoin_forced(base, path, **kwargs): """ urljoin base with path, except append '/' to base if it doesnt exist """ base = base.endswith('/') and base or '%s/' % base return urljoin(base, path, **kwargs)<|fim▁end|>
from django.conf import settings def trailing_slash_or_none():
<|file_name|>AccessEvent.java<|end_file_name|><|fim▁begin|>/** * Logback: the reliable, generic, fast and flexible logging framework. * Copyright (C) 1999-2015, QOS.ch. All rights reserved. * * This program and the accompanying materials are dual-licensed under * either the terms of the Eclipse Public License v1.0 as published by * the Eclipse Foundation * * or (per the licensee's choosing) * * under the terms of the GNU Lesser General Public License version 2.1 * as published by the Free Software Foundation. */ package ch.qos.logback.access.spi; import ch.qos.logback.access.AccessConstants; import ch.qos.logback.access.pattern.AccessConverter; import ch.qos.logback.access.servlet.Util; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.Serializable; import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.Vector; // Contributors: Joern Huxhorn (see also bug #110) /** * The Access module's internal representation of logging events. When the * logging component instance is called in the container to log then a * <code>AccessEvent</code> instance is created. This instance is passed * around to the different logback components. * * @author Ceki G&uuml;lc&uuml; * @author S&eacute;bastien Pennec */ public class AccessEvent implements Serializable, IAccessEvent { private static final long serialVersionUID = 866718993618836343L; private static final String EMPTY = ""; private transient final HttpServletRequest httpRequest; private transient final HttpServletResponse httpResponse; String requestURI; String requestURL; String remoteHost; String remoteUser; String remoteAddr; String protocol; String method; String serverName; String requestContent; String responseContent; long elapsedTime; Map<String, String> requestHeaderMap; Map<String, String[]> requestParameterMap; Map<String, String> responseHeaderMap; Map<String, Object> attributeMap; long contentLength = SENTINEL; int statusCode = SENTINEL; int localPort = SENTINEL; transient ServerAdapter serverAdapter; /** * The number of milliseconds elapsed from 1/1/1970 until logging event was * created. */ private long timeStamp = 0; public AccessEvent(HttpServletRequest httpRequest, HttpServletResponse httpResponse, ServerAdapter adapter) { this.httpRequest = httpRequest; this.httpResponse = httpResponse; this.timeStamp = System.currentTimeMillis(); this.serverAdapter = adapter; this.elapsedTime = calculateElapsedTime(); } /** * Returns the underlying HttpServletRequest. After serialization the returned * value will be null. * * @return */ @Override public HttpServletRequest getRequest() { return httpRequest; } /** * Returns the underlying HttpServletResponse. After serialization the returned * value will be null. * * @return */ @Override public HttpServletResponse getResponse() { return httpResponse; } @Override public long getTimeStamp() { return timeStamp; } public void setTimeStamp(long timeStamp) { if (this.timeStamp != 0) { throw new IllegalStateException( "timeStamp has been already set for this event."); } else { this.timeStamp = timeStamp; } } @Override public String getRequestURI() { if (requestURI == null) { if (httpRequest != null) { requestURI = httpRequest.getRequestURI(); } else { requestURI = NA; } } return requestURI; } /** * The first line of the request. */ @Override public String getRequestURL() { if (requestURL == null) { if (httpRequest != null) { StringBuilder buf = new StringBuilder(); buf.append(httpRequest.getMethod()); buf.append(AccessConverter.SPACE_CHAR); buf.append(httpRequest.getRequestURI()); final String qStr = httpRequest.getQueryString(); if (qStr != null) { buf.append(AccessConverter.QUESTION_CHAR); buf.append(qStr); } buf.append(AccessConverter.SPACE_CHAR); buf.append(httpRequest.getProtocol()); requestURL = buf.toString(); } else { requestURL = NA; } } return requestURL; } @Override public String getRemoteHost() { if (remoteHost == null) { if (httpRequest != null) { // the underlying implementation of HttpServletRequest will // determine if remote lookup will be performed remoteHost = httpRequest.getRemoteHost(); } else { remoteHost = NA; } } return remoteHost; } @Override public String getRemoteUser() { if (remoteUser == null) { if (httpRequest != null) { remoteUser = httpRequest.getRemoteUser(); } else { remoteUser = NA; } } return remoteUser; } @Override public String getProtocol() { if (protocol == null) { if (httpRequest != null) { protocol = httpRequest.getProtocol(); } else { protocol = NA; } } return protocol; } @Override public String getMethod() { if (method == null) { if (httpRequest != null) { method = httpRequest.getMethod(); } else { method = NA; } } return method; } @Override public String getServerName() { if (serverName == null) { if (httpRequest != null) { serverName = httpRequest.getServerName(); } else { serverName = NA; } } return serverName; } @Override public String getRemoteAddr() { if (remoteAddr == null) { if (httpRequest != null) { remoteAddr = httpRequest.getRemoteAddr(); } else { remoteAddr = NA; } } return remoteAddr; } @Override public String getRequestHeader(String key) { String result = null; key = key.toLowerCase(); if (requestHeaderMap == null) { if (httpRequest != null) { buildRequestHeaderMap(); result = requestHeaderMap.get(key); } } else { result = requestHeaderMap.get(key); } if (result != null) { return result; } else { return NA; } } @Override public Enumeration getRequestHeaderNames() { // post-serialization if (httpRequest == null) { Vector<String> list = new Vector<String>(getRequestHeaderMap().keySet()); return list.elements(); } return httpRequest.getHeaderNames(); } @Override public Map<String, String> getRequestHeaderMap() { if (requestHeaderMap == null) { buildRequestHeaderMap(); } return requestHeaderMap; } public void buildRequestHeaderMap() { // according to RFC 2616 header names are case insensitive // latest versions of Tomcat return header names in lower-case requestHeaderMap = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER); Enumeration e = httpRequest.getHeaderNames(); if (e == null) { return; } while (e.hasMoreElements()) { String key = (String) e.nextElement(); requestHeaderMap.put(key, httpRequest.getHeader(key)); } } public void buildRequestParameterMap() { requestParameterMap = new HashMap<String, String[]>(); Enumeration e = httpRequest.getParameterNames(); if (e == null) { return; } while (e.hasMoreElements()) { String key = (String) e.nextElement(); requestParameterMap.put(key, httpRequest.getParameterValues(key)); } } @Override public Map<String, String[]> getRequestParameterMap() { if (requestParameterMap == null) { buildRequestParameterMap(); } return requestParameterMap; } @Override public String getAttribute(String key) { Object value = null; if (attributeMap != null) { // Event was prepared for deferred processing so we have a copy of attribute map and must use that copy value = attributeMap.get(key); } else if (httpRequest != null) { // We have original request so take attribute from it value = httpRequest.getAttribute(key); } return value != null ? value.toString() : NA; } private void copyAttributeMap() { if (httpRequest == null) { return; } attributeMap = new HashMap<String, Object>(); Enumeration<String> names = httpRequest.getAttributeNames(); while (names.hasMoreElements()) { String name = names.nextElement(); Object value = httpRequest.getAttribute(name); if (shouldCopyAttribute(name, value)) { attributeMap.put(name, value); } } } private boolean shouldCopyAttribute(String name, Object value) { if (AccessConstants.LB_INPUT_BUFFER.equals(name) || AccessConstants.LB_OUTPUT_BUFFER.equals(name)) { // Do not copy attributes used by logback internally - these are available via other getters anyway return false; } else if (value == null) { // No reasons to copy nulls - Map.get() will return null for missing keys and the list of attribute // names is not available through IAccessEvent return false; } else { // Only copy what is serializable return value instanceof Serializable; } } @Override public String[] getRequestParameter(String key) { if (httpRequest != null) { String[] value = httpRequest.getParameterValues(key); if (value == null) { return new String[]{ NA }; } else { return value; } } else { return new String[]{ NA }; } } @Override public String getCookie(String key) { if (httpRequest != null) { Cookie[] cookieArray = httpRequest.getCookies(); if (cookieArray == null) { return NA; } for (Cookie cookie : cookieArray) { if (key.equals(cookie.getName())) { return cookie.getValue(); } } } return NA; } @Override public long getContentLength() { if (contentLength == SENTINEL) { if (httpResponse != null) { contentLength = serverAdapter.getContentLength(); return contentLength; } } return contentLength; } public int getStatusCode() { if (statusCode == SENTINEL) { if (httpResponse != null) { statusCode = serverAdapter.getStatusCode(); } } return statusCode; } public long getElapsedTime() { return elapsedTime; } private long calculateElapsedTime() { if (serverAdapter.getRequestTimestamp() < 0) { return -1; } return getTimeStamp() - serverAdapter.getRequestTimestamp(); } public String getRequestContent() { if (requestContent != null) { return requestContent; } if (Util.isFormUrlEncoded(httpRequest)) { StringBuilder buf = new StringBuilder(); Enumeration pramEnumeration = httpRequest.getParameterNames(); // example: id=1234&user=cgu // number=1233&x=1 int count = 0; try { while (pramEnumeration.hasMoreElements()) { String key = (String) pramEnumeration.nextElement(); if (count++ != 0) { buf.append("&"); } buf.append(key); buf.append("="); String val = httpRequest.getParameter(key); if (val != null) { buf.append(val); } else { buf.append(""); } } } catch (Exception e) { // FIXME Why is try/catch required? e.printStackTrace(); } requestContent = buf.toString(); } else { // retrieve the byte array placed by TeeFilter byte[] inputBuffer = (byte[]) httpRequest .getAttribute(AccessConstants.LB_INPUT_BUFFER); if (inputBuffer != null) { requestContent = new String(inputBuffer); } if (requestContent == null || requestContent.length() == 0) { requestContent = EMPTY; } } return requestContent; } public String getResponseContent() { if (responseContent != null) { return responseContent; } if (Util.isImageResponse(httpResponse)) { responseContent = "[IMAGE CONTENTS SUPPRESSED]"; } else { // retreive the byte array previously placed by TeeFilter byte[] outputBuffer = (byte[]) httpRequest .getAttribute(AccessConstants.LB_OUTPUT_BUFFER); if (outputBuffer != null) { responseContent = new String(outputBuffer); } if (responseContent == null || responseContent.length() == 0) { responseContent = EMPTY; } } return responseContent; } public int getLocalPort() { if (localPort == SENTINEL) { if (httpRequest != null) { localPort = httpRequest.getLocalPort(); } } return localPort; } public ServerAdapter getServerAdapter() { return serverAdapter; } public String getResponseHeader(String key) { buildResponseHeaderMap(); return responseHeaderMap.get(key); } void buildResponseHeaderMap() { if (responseHeaderMap == null) { responseHeaderMap = serverAdapter.buildResponseHeaderMap(); } } public Map<String, String> getResponseHeaderMap() { buildResponseHeaderMap(); return responseHeaderMap; } public List<String> getResponseHeaderNameList() { buildResponseHeaderMap(); return new ArrayList<String>(responseHeaderMap.keySet()); } public void prepareForDeferredProcessing() { getRequestHeaderMap(); getRequestParameterMap(); getResponseHeaderMap(); getLocalPort(); getMethod(); <|fim▁hole|> getRequestURI(); getRequestURL(); getServerName(); getTimeStamp(); getElapsedTime(); getStatusCode(); getContentLength(); getRequestContent(); getResponseContent(); copyAttributeMap(); } }<|fim▁end|>
getProtocol(); getRemoteAddr(); getRemoteHost(); getRemoteUser();
<|file_name|>AggregateControllerTest.java<|end_file_name|><|fim▁begin|>/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.processor.aggregator; import org.apache.camel.ContextTestSupport; import org.apache.camel.Exchange; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.processor.aggregate.AggregateController; import org.apache.camel.processor.aggregate.AggregationStrategy; import org.apache.camel.processor.aggregate.DefaultAggregateController;<|fim▁hole|> /** * */ public class AggregateControllerTest extends ContextTestSupport { private AggregateController controller; public AggregateController getAggregateController() { if (controller == null) { controller = new DefaultAggregateController(); } return controller; } @Test public void testForceCompletionOfAll() throws Exception { getMockEndpoint("mock:aggregated").expectedMessageCount(0); template.sendBodyAndHeader("direct:start", "test1", "id", "1"); template.sendBodyAndHeader("direct:start", "test2", "id", "2"); template.sendBodyAndHeader("direct:start", "test3", "id", "1"); template.sendBodyAndHeader("direct:start", "test4", "id", "2"); assertMockEndpointsSatisfied(); getMockEndpoint("mock:aggregated").expectedMessageCount(2); getMockEndpoint("mock:aggregated").expectedBodiesReceivedInAnyOrder("test1test3", "test2test4"); getMockEndpoint("mock:aggregated").expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "force"); int groups = getAggregateController().forceCompletionOfAllGroups(); assertEquals(2, groups); assertMockEndpointsSatisfied(); } @Test public void testForceCompletionOfGroup() throws Exception { getMockEndpoint("mock:aggregated").expectedMessageCount(0); template.sendBodyAndHeader("direct:start", "test1", "id", "1"); template.sendBodyAndHeader("direct:start", "test2", "id", "2"); template.sendBodyAndHeader("direct:start", "test3", "id", "1"); template.sendBodyAndHeader("direct:start", "test4", "id", "2"); assertMockEndpointsSatisfied(); getMockEndpoint("mock:aggregated").expectedMessageCount(1); getMockEndpoint("mock:aggregated").expectedBodiesReceivedInAnyOrder("test1test3"); getMockEndpoint("mock:aggregated").expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "force"); int groups = getAggregateController().forceCompletionOfGroup("1"); assertEquals(1, groups); assertMockEndpointsSatisfied(); } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { @Override public void configure() throws Exception { from("direct:start") .aggregate(header("id"), new MyAggregationStrategy()).aggregateController(getAggregateController()) .completionSize(10) .to("mock:aggregated"); } }; } public static class MyAggregationStrategy implements AggregationStrategy { public Exchange aggregate(Exchange oldExchange, Exchange newExchange) { if (oldExchange == null) { return newExchange; } String body1 = oldExchange.getIn().getBody(String.class); String body2 = newExchange.getIn().getBody(String.class); oldExchange.getIn().setBody(body1 + body2); return oldExchange; } } }<|fim▁end|>
import org.junit.Test;
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from backend import photos, boards <|fim▁hole|>#print p.new('asdf',1,1) print p.get(1) b = boards() print p.all(1) print b.get(1)<|fim▁end|>
p = photos()
<|file_name|>wiggle_to_binned_array.py<|end_file_name|><|fim▁begin|>#!/afs/bx.psu.edu/project/pythons/py2.7-linux-x86_64-ucs4/bin/python2.7 """ Convert wiggle data to a binned array. This assumes the input data is on a single chromosome and does no sanity checks! usage: %prog score_file out_file < wiggle_data -c, --comp=type: compression type (none, zlib, lzo) """ from __future__ import division import sys import psyco_full import bx.wiggle from bx.binned_array import BinnedArray from bx_extras.fpconst import isNaN from bx.cookbook import doc_optparse from bx import misc def main(): # Parse command line options, args = doc_optparse.parse( __doc__ ) try: if options.comp: comp_type = options.comp else: comp_type = None score_fname = args[0] out_fname = args[1] except: doc_optparse.exit() scores = BinnedArray() ## last_chrom = None for i, ( chrom, pos, val ) in enumerate( bx.wiggle.Reader( misc.open_compressed( score_fname ) ) ): #if last_chrom is None: # last_chrom = chrom #else: # assert chrom == last_chrom, "This script expects a 'wiggle' input on only one chromosome" scores[pos] = val # Status if i % 10000 == 0: print i, "scores processed" out = open( out_fname, "w" ) if comp_type: scores.to_file( out, comp_type=comp_type )<|fim▁hole|>if __name__ == "__main__": main()<|fim▁end|>
else: scores.to_file( out ) out.close()
<|file_name|>custom_tests.rs<|end_file_name|><|fim▁begin|>use crate::generated::{CloudWatch, CloudWatchClient, Dimension, MetricDatum, PutMetricDataInput}; use rusoto_core::param::Params; use rusoto_core::signature::SignedRequest; use rusoto_core::signature::SignedRequestPayload; use rusoto_core::Region; use rusoto_mock::*; use serde_urlencoded; #[tokio::test] async fn should_serialize_complex_metric_data_params() { let mock = MockRequestDispatcher::with_status(200) .with_body("") .with_request_checker(|request: &SignedRequest| { assert_eq!("POST", request.method); assert_eq!("/", request.path); if let Some(SignedRequestPayload::Buffer(ref buffer)) = request.payload { let params: Params = serde_urlencoded::from_bytes(buffer).unwrap(); assert_eq!( params.get("Namespace"), Some(&Some("TestNamespace".to_owned())) ); assert_eq!( params.get("MetricData.member.1.MetricName"), Some(&Some("buffers".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Unit"), Some(&Some("Bytes".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Value"), Some(&Some("1".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Dimensions.member.1.Name"), Some(&Some("foo".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Dimensions.member.1.Value"), Some(&Some("bar".to_owned())) ); } else { panic!("Unexpected request.payload: {:?}", request.payload); } }); let metric_data = vec![MetricDatum { dimensions: Some(vec![Dimension { name: "foo".to_string(), value: "bar".to_string(), }]), metric_name: "buffers".to_string(), statistic_values: None, timestamp: None, unit: Some("Bytes".to_string()),<|fim▁hole|> namespace: "TestNamespace".to_string(), metric_data: metric_data, }; let client = CloudWatchClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); let response = client.put_metric_data(request).await.unwrap(); println!("{:#?}", response); }<|fim▁end|>
value: Some(1.0), ..Default::default() }]; let request = PutMetricDataInput {
<|file_name|>testing.go<|end_file_name|><|fim▁begin|>// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package testing provides support for automated testing of Go packages. // It is intended to be used in concert with the ``go test'' command, which automates // execution of any function of the form // func TestXxx(*testing.T) // where Xxx can be any alphanumeric string (but the first letter must not be in // [a-z]) and serves to identify the test routine. // // Within these functions, use the Error, Fail or related methods to signal failure. // // To write a new test suite, create a file whose name ends _test.go that // contains the TestXxx functions as described here. Put the file in the same // package as the one being tested. The file will be excluded from regular // package builds but will be included when the ``go test'' command is run. // For more detail, run ``go help test'' and ``go help testflag''. // // Tests and benchmarks may be skipped if not applicable with a call to // the Skip method of *T and *B: // func TestTimeConsuming(t *testing.T) { // if testing.Short() { // t.Skip("skipping test in short mode.") // } // ... // } // // Benchmarks // // Functions of the form // func BenchmarkXxx(*testing.B) // are considered benchmarks, and are executed by the "go test" command when // its -bench flag is provided. Benchmarks are run sequentially. // // For a description of the testing flags, see // https://golang.org/cmd/go/#hdr-Description_of_testing_flags. // // A sample benchmark function looks like this: // func BenchmarkHello(b *testing.B) { // for i := 0; i < b.N; i++ { // fmt.Sprintf("hello") // } // } // // The benchmark function must run the target code b.N times. // During benchmark execution, b.N is adjusted until the benchmark function lasts // long enough to be timed reliably. The output // BenchmarkHello 10000000 282 ns/op // means that the loop ran 10000000 times at a speed of 282 ns per loop. // // If a benchmark needs some expensive setup before running, the timer // may be reset: // // func BenchmarkBigLen(b *testing.B) { // big := NewBig() // b.ResetTimer() // for i := 0; i < b.N; i++ { // big.Len() // } // } // // If a benchmark needs to test performance in a parallel setting, it may use // the RunParallel helper function; such benchmarks are intended to be used with // the go test -cpu flag: // // func BenchmarkTemplateParallel(b *testing.B) { // templ := template.Must(template.New("test").Parse("Hello, {{.}}!")) // b.RunParallel(func(pb *testing.PB) { // var buf bytes.Buffer // for pb.Next() { // buf.Reset() // templ.Execute(&buf, "World") // } // }) // } // // Examples // // The package also runs and verifies example code. Example functions may // include a concluding line comment that begins with "Output:" and is compared with // the standard output of the function when the tests are run. (The comparison // ignores leading and trailing space.) These are examples of an example: // // func ExampleHello() { // fmt.Println("hello") // // Output: hello // } // // func ExampleSalutations() { // fmt.Println("hello, and") // fmt.Println("goodbye") // // Output: // // hello, and // // goodbye // } // // The comment prefix "Unordered output:" is like "Output:", but matches any // line order: // // func ExamplePerm() { // for _, value := range Perm(4) { // fmt.Println(value) // } // // Unordered output: 4 // // 2 // // 1 // // 3 // // 0 // } // // Example functions without output comments are compiled but not executed. // // The naming convention to declare examples for the package, a function F, a type T and // method M on type T are: // // func Example() { ... } // func ExampleF() { ... } // func ExampleT() { ... } // func ExampleT_M() { ... } // // Multiple example functions for a package/type/function/method may be provided by // appending a distinct suffix to the name. The suffix must start with a // lower-case letter. // // func Example_suffix() { ... } // func ExampleF_suffix() { ... } // func ExampleT_suffix() { ... } // func ExampleT_M_suffix() { ... } // // The entire test file is presented as the example when it contains a single // example function, at least one other function, type, variable, or constant // declaration, and no test or benchmark functions. // // Subtests and Sub-benchmarks // // The Run methods of T and B allow defining subtests and sub-benchmarks, // without having to define separate functions for each. This enables uses // like table-driven benchmarks and creating hierarchical tests. // It also provides a way to share common setup and tear-down code: // // func TestFoo(t *testing.T) { // // <setup code> // t.Run("A=1", func(t *testing.T) { ... }) // t.Run("A=2", func(t *testing.T) { ... }) // t.Run("B=1", func(t *testing.T) { ... }) // // <tear-down code> // } // // Each subtest and sub-benchmark has a unique name: the combination of the name // of the top-level test and the sequence of names passed to Run, separated by // slashes, with an optional trailing sequence number for disambiguation. // // The argument to the -run and -bench command-line flags is an unanchored regular // expression that matches the test's name. For tests with multiple slash-separated // elements, such as subtests, the argument is itself slash-separated, with // expressions matching each name element in turn. Because it is unanchored, an // empty expression matches any string. // For example, using "matching" to mean "whose name contains": // // go test -run '' # Run all tests. // go test -run Foo # Run top-level tests matching "Foo", such as "TestFooBar". // go test -run Foo/A= # For top-level tests matching "Foo", run subtests matching "A=". // go test -run /A=1 # For all top-level tests, run subtests matching "A=1". // // Subtests can also be used to control parallelism. A parent test will only // complete once all of its subtests complete. In this example, all tests are // run in parallel with each other, and only with each other, regardless of // other top-level tests that may be defined: // // func TestGroupedParallel(t *testing.T) { // for _, tc := range tests { // tc := tc // capture range variable // t.Run(tc.Name, func(t *testing.T) { // t.Parallel() // ... // }) // } // } // // Run does not return until parallel subtests have completed, providing a way // to clean up after a group of parallel tests: // // func TestTeardownParallel(t *testing.T) { // // This Run will not return until the parallel tests finish. // t.Run("group", func(t *testing.T) { // t.Run("Test1", parallelTest1) // t.Run("Test2", parallelTest2) // t.Run("Test3", parallelTest3) // }) // // <tear-down code> // } // // Main // // It is sometimes necessary for a test program to do extra setup or teardown // before or after testing. It is also sometimes necessary for a test to control // which code runs on the main thread. To support these and other cases, // if a test file contains a function: // // func TestMain(m *testing.M) // // then the generated test will call TestMain(m) instead of running the tests // directly. TestMain runs in the main goroutine and can do whatever setup // and teardown is necessary around a call to m.Run. It should then call // os.Exit with the result of m.Run. When TestMain is called, flag.Parse has // not been run. If TestMain depends on command-line flags, including those // of the testing package, it should call flag.Parse explicitly. // // A simple implementation of TestMain is: // // func TestMain(m *testing.M) { // // call flag.Parse() here if TestMain uses flags // os.Exit(m.Run()) // } // package testing import ( "bytes" "errors" "flag" "fmt" "internal/race" "io" "os" "runtime" "runtime/debug" "runtime/trace" "strconv" "strings" "sync" "sync/atomic" "time" ) var ( // The short flag requests that tests run more quickly, but its functionality // is provided by test writers themselves. The testing package is just its // home. The all.bash installation script sets it to make installation more // efficient, but by default the flag is off so a plain "go test" will do a // full test of the package. short = flag.Bool("test.short", false, "run smaller test suite to save time") // The directory in which to create profile files and the like. When run from // "go test", the binary always runs in the source directory for the package; // this flag lets "go test" tell the binary to write the files in the directory where // the "go test" command is run. outputDir = flag.String("test.outputdir", "", "write profiles to `dir`") // Report as tests are run; default is silent for success. chatty = flag.Bool("test.v", false, "verbose: print additional output") count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times") coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`") matchList = flag.String("test.list", "", "list tests, examples, and benchmarch maching `regexp` then exit") match = flag.String("test.run", "", "run only tests and examples matching `regexp`") memProfile = flag.String("test.memprofile", "", "write a memory profile to `file`") memProfileRate = flag.Int("test.memprofilerate", 0, "set memory profiling `rate` (see runtime.MemProfileRate)") cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to `file`") blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to `file`") blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)") mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution") mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()") traceFile = flag.String("test.trace", "", "write an execution trace to `file`") timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (0 means unlimited)") cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with") parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel") haveExamples bool // are there examples? cpuList []int ) // common holds the elements common between T and B and // captures common methods such as Errorf. type common struct { mu sync.RWMutex // guards this group of fields output []byte // Output generated by test or benchmark. w io.Writer // For flushToParent. ran bool // Test or benchmark (or one of its subtests) was executed. failed bool // Test or benchmark has failed. skipped bool // Test of benchmark has been skipped. done bool // Test is finished and all subtests have completed. helpers map[string]struct{} // functions to be skipped when writing file/line info chatty bool // A copy of the chatty flag. finished bool // Test function has completed. hasSub int32 // written atomically raceErrors int // number of races detected during test runner string // function name of tRunner running the test parent *common level int // Nesting depth of test or benchmark. name string // Name of test or benchmark. start time.Time // Time test or benchmark started duration time.Duration barrier chan bool // To signal parallel subtests they may start. signal chan bool // To signal a test is done. sub []*T // Queue of subtests to be run in parallel. } // Short reports whether the -test.short flag is set. func Short() bool { return *short } // CoverMode reports what the test coverage mode is set to. The // values are "set", "count", or "atomic". The return value will be // empty if test coverage is not enabled. func CoverMode() string { return cover.Mode } // Verbose reports whether the -test.v flag is set. func Verbose() bool { return *chatty } // frameSkip searches, starting after skip frames, for the first caller frame // in a function not marked as a helper and returns the frames to skip // to reach that site. The search stops if it finds a tRunner function that // was the entry point into the test. // This function must be called with c.mu held. func (c *common) frameSkip(skip int) int { if c.helpers == nil { return skip } var pc [50]uintptr // Skip two extra frames to account for this function // and runtime.Callers itself. n := runtime.Callers(skip+2, pc[:]) if n == 0 { panic("testing: zero callers found") } frames := runtime.CallersFrames(pc[:n]) var frame runtime.Frame more := true for i := 0; more; i++ { frame, more = frames.Next() if frame.Function == c.runner { // We've gone up all the way to the tRunner calling // the test function (so the user must have // called tb.Helper from inside that test function). // Only skip up to the test function itself. return skip + i - 1 } if _, ok := c.helpers[frame.Function]; !ok { // Found a frame that wasn't inside a helper function. return skip + i } } return skip } // decorate prefixes the string with the file and line of the call site // and inserts the final newline if needed and indentation tabs for formatting. // This function must be called with c.mu held. func (c *common) decorate(s string) string { skip := c.frameSkip(3) // decorate + log + public function. _, file, line, ok := runtime.Caller(skip) if ok { // Truncate file name at last file name separator. if index := strings.LastIndex(file, "/"); index >= 0 { file = file[index+1:] } else if index = strings.LastIndex(file, "\\"); index >= 0 { file = file[index+1:] } } else { file = "???" line = 1 } buf := new(bytes.Buffer) // Every line is indented at least one tab. buf.WriteByte('\t') fmt.Fprintf(buf, "%s:%d: ", file, line) lines := strings.Split(s, "\n") if l := len(lines); l > 1 && lines[l-1] == "" { lines = lines[:l-1] } for i, line := range lines { if i > 0 { // Second and subsequent lines are indented an extra tab. buf.WriteString("\n\t\t") } buf.WriteString(line) } buf.WriteByte('\n') return buf.String() } // flushToParent writes c.output to the parent after first writing the header // with the given format and arguments. func (c *common) flushToParent(format string, args ...interface{}) { p := c.parent p.mu.Lock() defer p.mu.Unlock() fmt.Fprintf(p.w, format, args...) c.mu.Lock() defer c.mu.Unlock() io.Copy(p.w, bytes.NewReader(c.output)) c.output = c.output[:0] } type indenter struct { c *common } func (w indenter) Write(b []byte) (n int, err error) { n = len(b) for len(b) > 0 { end := bytes.IndexByte(b, '\n') if end == -1 { end = len(b) } else { end++ } // An indent of 4 spaces will neatly align the dashes with the status // indicator of the parent. const indent = " " w.c.output = append(w.c.output, indent...) w.c.output = append(w.c.output, b[:end]...) b = b[end:] } return } // fmtDuration returns a string representing d in the form "87.00s". func fmtDuration(d time.Duration) string { return fmt.Sprintf("%.2fs", d.Seconds()) } // TB is the interface common to T and B. type TB interface { Error(args ...interface{}) Errorf(format string, args ...interface{}) Fail() FailNow() Failed() bool Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) Skipped() bool Helper() // A private method to prevent users implementing the // interface and so future additions to it will not // violate Go 1 compatibility. private() } var _ TB = (*T)(nil) var _ TB = (*B)(nil) // T is a type passed to Test functions to manage test state and support formatted test logs. // Logs are accumulated during execution and dumped to standard output when done. // // A test ends when its Test function returns or calls any of the methods // FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods, as well as // the Parallel method, must be called only from the goroutine running the // Test function. // // The other reporting methods, such as the variations of Log and Error, // may be called simultaneously from multiple goroutines. type T struct { common isParallel bool context *testContext // For running tests and subtests. } func (c *common) private() {} // Name returns the name of the running test or benchmark. func (c *common) Name() string { return c.name } func (c *common) setRan() { if c.parent != nil { c.parent.setRan() } c.mu.Lock() defer c.mu.Unlock() c.ran = true } // Fail marks the function as having failed but continues execution. func (c *common) Fail() { if c.parent != nil { c.parent.Fail() } c.mu.Lock() defer c.mu.Unlock() // c.done needs to be locked to synchronize checks to c.done in parent tests. if c.done { panic("Fail in goroutine after " + c.name + " has completed") } c.failed = true } // Failed reports whether the function has failed. func (c *common) Failed() bool { c.mu.RLock() failed := c.failed c.mu.RUnlock() return failed || c.raceErrors+race.Errors() > 0 } // FailNow marks the function as having failed and stops its execution. // Execution will continue at the next test or benchmark. // FailNow must be called from the goroutine running the // test or benchmark function, not from other goroutines // created during the test. Calling FailNow does not stop // those other goroutines. func (c *common) FailNow() { c.Fail() // Calling runtime.Goexit will exit the goroutine, which // will run the deferred functions in this goroutine, // which will eventually run the deferred lines in tRunner, // which will signal to the test loop that this test is done. // // A previous version of this code said: // // c.duration = ... // c.signal <- c.self // runtime.Goexit() // // This previous version duplicated code (those lines are in // tRunner no matter what), but worse the goroutine teardown // implicit in runtime.Goexit was not guaranteed to complete // before the test exited. If a test deferred an important cleanup // function (like removing temporary files), there was no guarantee // it would run on a test failure. Because we send on c.signal during // a top-of-stack deferred function now, we know that the send // only happens after any other stacked defers have completed. c.finished = true runtime.Goexit() } // log generates the output. It's always at the same stack depth. func (c *common) log(s string) { c.mu.Lock() defer c.mu.Unlock() c.output = append(c.output, c.decorate(s)...) } // Log formats its arguments using default formatting, analogous to Println, // and records the text in the error log. For tests, the text will be printed only if // the test fails or the -test.v flag is set. For benchmarks, the text is always // printed to avoid having performance depend on the value of the -test.v flag. func (c *common) Log(args ...interface{}) { c.log(fmt.Sprintln(args...)) } // Logf formats its arguments according to the format, analogous to Printf, and // records the text in the error log. A final newline is added if not provided. For // tests, the text will be printed only if the test fails or the -test.v flag is // set. For benchmarks, the text is always printed to avoid having performance // depend on the value of the -test.v flag. func (c *common) Logf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) } // Error is equivalent to Log followed by Fail. func (c *common) Error(args ...interface{}) { c.log(fmt.Sprintln(args...)) c.Fail() } // Errorf is equivalent to Logf followed by Fail. func (c *common) Errorf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) c.Fail() } // Fatal is equivalent to Log followed by FailNow. func (c *common) Fatal(args ...interface{}) { c.log(fmt.Sprintln(args...)) c.FailNow() } // Fatalf is equivalent to Logf followed by FailNow. func (c *common) Fatalf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) c.FailNow() } // Skip is equivalent to Log followed by SkipNow. func (c *common) Skip(args ...interface{}) { c.log(fmt.Sprintln(args...)) c.SkipNow() } // Skipf is equivalent to Logf followed by SkipNow. func (c *common) Skipf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...))<|fim▁hole|> // SkipNow marks the test as having been skipped and stops its execution. // If a test fails (see Error, Errorf, Fail) and is then skipped, // it is still considered to have failed. // Execution will continue at the next test or benchmark. See also FailNow. // SkipNow must be called from the goroutine running the test, not from // other goroutines created during the test. Calling SkipNow does not stop // those other goroutines. func (c *common) SkipNow() { c.skip() c.finished = true runtime.Goexit() } func (c *common) skip() { c.mu.Lock() defer c.mu.Unlock() c.skipped = true } // Skipped reports whether the test was skipped. func (c *common) Skipped() bool { c.mu.RLock() defer c.mu.RUnlock() return c.skipped } // Helper marks the calling function as a test helper function. // When printing file and line information, that function will be skipped. // Helper may be called simultaneously from multiple goroutines. // Helper has no effect if it is called directly from a TestXxx/BenchmarkXxx // function or a subtest/sub-benchmark function. func (c *common) Helper() { c.mu.Lock() defer c.mu.Unlock() if c.helpers == nil { c.helpers = make(map[string]struct{}) } c.helpers[callerName(1)] = struct{}{} } // callerName gives the function name (qualified with a package path) // for the caller after skip frames (where 0 means the current function). func callerName(skip int) string { // Make room for the skip PC. var pc [2]uintptr n := runtime.Callers(skip+2, pc[:]) // skip + runtime.Callers + callerName if n == 0 { panic("testing: zero callers found") } frames := runtime.CallersFrames(pc[:n]) frame, _ := frames.Next() return frame.Function } // Parallel signals that this test is to be run in parallel with (and only with) // other parallel tests. When a test is run multiple times due to use of // -test.count or -test.cpu, multiple instances of a single test never run in // parallel with each other. func (t *T) Parallel() { if t.isParallel { panic("testing: t.Parallel called multiple times") } t.isParallel = true // We don't want to include the time we spend waiting for serial tests // in the test duration. Record the elapsed time thus far and reset the // timer afterwards. t.duration += time.Since(t.start) // Add to the list of tests to be released by the parent. t.parent.sub = append(t.parent.sub, t) t.raceErrors += race.Errors() t.signal <- true // Release calling test. <-t.parent.barrier // Wait for the parent test to complete. t.context.waitParallel() t.start = time.Now() t.raceErrors += -race.Errors() } // An internal type but exported because it is cross-package; part of the implementation // of the "go test" command. type InternalTest struct { Name string F func(*T) } func tRunner(t *T, fn func(t *T)) { t.runner = callerName(0) // When this goroutine is done, either because fn(t) // returned normally or because a test failure triggered // a call to runtime.Goexit, record the duration and send // a signal saying that the test is done. defer func() { t.raceErrors += race.Errors() if t.raceErrors > 0 { t.Errorf("race detected during execution of test") } t.duration += time.Now().Sub(t.start) // If the test panicked, print any test output before dying. err := recover() if !t.finished && err == nil { err = fmt.Errorf("test executed panic(nil) or runtime.Goexit") } if err != nil { t.Fail() t.report() panic(err) } if len(t.sub) > 0 { // Run parallel subtests. // Decrease the running count for this test. t.context.release() // Release the parallel subtests. close(t.barrier) // Wait for subtests to complete. for _, sub := range t.sub { <-sub.signal } if !t.isParallel { // Reacquire the count for sequential tests. See comment in Run. t.context.waitParallel() } } else if t.isParallel { // Only release the count for this test if it was run as a parallel // test. See comment in Run method. t.context.release() } t.report() // Report after all subtests have finished. // Do not lock t.done to allow race detector to detect race in case // the user does not appropriately synchronizes a goroutine. t.done = true if t.parent != nil && atomic.LoadInt32(&t.hasSub) == 0 { t.setRan() } t.signal <- true }() t.start = time.Now() t.raceErrors = -race.Errors() fn(t) t.finished = true } // Run runs f as a subtest of t called name. It reports whether f succeeded. Run // runs f in a separate goroutine and will block until all its parallel subtests // have completed. // // Run may be called simultaneously from multiple goroutines, but all such calls // must return before the outer test function for t returns. func (t *T) Run(name string, f func(t *T)) bool { atomic.StoreInt32(&t.hasSub, 1) testName, ok, _ := t.context.match.fullName(&t.common, name) if !ok { return true } t = &T{ common: common{ barrier: make(chan bool), signal: make(chan bool), name: testName, parent: &t.common, level: t.level + 1, chatty: t.chatty, }, context: t.context, } t.w = indenter{&t.common} if t.chatty { // Print directly to root's io.Writer so there is no delay. root := t.parent for ; root.parent != nil; root = root.parent { } root.mu.Lock() fmt.Fprintf(root.w, "=== RUN %s\n", t.name) root.mu.Unlock() } // Instead of reducing the running count of this test before calling the // tRunner and increasing it afterwards, we rely on tRunner keeping the // count correct. This ensures that a sequence of sequential tests runs // without being preempted, even when their parent is a parallel test. This // may especially reduce surprises if *parallel == 1. go tRunner(t, f) <-t.signal return !t.failed } // testContext holds all fields that are common to all tests. This includes // synchronization primitives to run at most *parallel tests. type testContext struct { match *matcher mu sync.Mutex // Channel used to signal tests that are ready to be run in parallel. startParallel chan bool // running is the number of tests currently running in parallel. // This does not include tests that are waiting for subtests to complete. running int // numWaiting is the number tests waiting to be run in parallel. numWaiting int // maxParallel is a copy of the parallel flag. maxParallel int } func newTestContext(maxParallel int, m *matcher) *testContext { return &testContext{ match: m, startParallel: make(chan bool), maxParallel: maxParallel, running: 1, // Set the count to 1 for the main (sequential) test. } } func (c *testContext) waitParallel() { c.mu.Lock() if c.running < c.maxParallel { c.running++ c.mu.Unlock() return } c.numWaiting++ c.mu.Unlock() <-c.startParallel } func (c *testContext) release() { c.mu.Lock() if c.numWaiting == 0 { c.running-- c.mu.Unlock() return } c.numWaiting-- c.mu.Unlock() c.startParallel <- true // Pick a waiting test to be run. } // No one should be using func Main anymore. // See the doc comment on func Main and use MainStart instead. var errMain = errors.New("testing: unexpected use of func Main") type matchStringOnly func(pat, str string) (bool, error) func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f(pat, str) } func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return errMain } func (f matchStringOnly) StopCPUProfile() {} func (f matchStringOnly) WriteHeapProfile(w io.Writer) error { return errMain } func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain } func (f matchStringOnly) ImportPath() string { return "" } // Main is an internal function, part of the implementation of the "go test" command. // It was exported because it is cross-package and predates "internal" packages. // It is no longer used by "go test" but preserved, as much as possible, for other // systems that simulate "go test" using Main, but Main sometimes cannot be updated as // new functionality is added to the testing package. // Systems simulating "go test" should be updated to use MainStart. func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) { os.Exit(MainStart(matchStringOnly(matchString), tests, benchmarks, examples).Run()) } // M is a type passed to a TestMain function to run the actual tests. type M struct { deps testDeps tests []InternalTest benchmarks []InternalBenchmark examples []InternalExample } // testDeps is an internal interface of functionality that is // passed into this package by a test's generated main package. // The canonical implementation of this interface is // testing/internal/testdeps's TestDeps. type testDeps interface { MatchString(pat, str string) (bool, error) StartCPUProfile(io.Writer) error StopCPUProfile() WriteHeapProfile(io.Writer) error WriteProfileTo(string, io.Writer, int) error ImportPath() string } // MainStart is meant for use by tests generated by 'go test'. // It is not meant to be called directly and is not subject to the Go 1 compatibility document. // It may change signature from release to release. func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M { return &M{ deps: deps, tests: tests, benchmarks: benchmarks, examples: examples, } } // Run runs the tests. It returns an exit code to pass to os.Exit. func (m *M) Run() int { // TestMain may have already called flag.Parse. if !flag.Parsed() { flag.Parse() } if len(*matchList) != 0 { listTests(m.deps.MatchString, m.tests, m.benchmarks, m.examples) return 0 } parseCpuList() m.before() startAlarm() haveExamples = len(m.examples) > 0 testRan, testOk := runTests(m.deps.MatchString, m.tests) exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples) stopAlarm() if !testRan && !exampleRan && *matchBenchmarks == "" { fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") } if !testOk || !exampleOk || !runBenchmarks(m.deps.ImportPath(), m.deps.MatchString, m.benchmarks) || race.Errors() > 0 { fmt.Println("FAIL") m.after() return 1 } fmt.Println("PASS") m.after() return 0 } func (t *T) report() { if t.parent == nil { return } dstr := fmtDuration(t.duration) format := "--- %s: %s (%s)\n" if t.Failed() { t.flushToParent(format, "FAIL", t.name, dstr) } else if t.chatty { if t.Skipped() { t.flushToParent(format, "SKIP", t.name, dstr) } else { t.flushToParent(format, "PASS", t.name, dstr) } } } func listTests(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) { if _, err := matchString(*matchList, "non-empty"); err != nil { fmt.Fprintf(os.Stderr, "testing: invalid regexp in -test.list (%q): %s\n", *matchList, err) os.Exit(1) } for _, test := range tests { if ok, _ := matchString(*matchList, test.Name); ok { fmt.Println(test.Name) } } for _, bench := range benchmarks { if ok, _ := matchString(*matchList, bench.Name); ok { fmt.Println(bench.Name) } } for _, example := range examples { if ok, _ := matchString(*matchList, example.Name); ok && example.Output != "" { fmt.Println(example.Name) } } } // An internal function but exported because it is cross-package; part of the implementation // of the "go test" command. func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) { ran, ok := runTests(matchString, tests) if !ran && !haveExamples { fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") } return ok } func runTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ran, ok bool) { ok = true for _, procs := range cpuList { runtime.GOMAXPROCS(procs) ctx := newTestContext(*parallel, newMatcher(matchString, *match, "-test.run")) t := &T{ common: common{ signal: make(chan bool), barrier: make(chan bool), w: os.Stdout, chatty: *chatty, }, context: ctx, } tRunner(t, func(t *T) { for _, test := range tests { t.Run(test.Name, test.F) } // Run catching the signal rather than the tRunner as a separate // goroutine to avoid adding a goroutine during the sequential // phase as this pollutes the stacktrace output when aborting. go func() { <-t.signal }() }) ok = ok && !t.Failed() ran = ran || t.ran } return ran, ok } // before runs before all testing. func (m *M) before() { if *memProfileRate > 0 { runtime.MemProfileRate = *memProfileRate } if *cpuProfile != "" { f, err := os.Create(toOutputDir(*cpuProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) return } if err := m.deps.StartCPUProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err) f.Close() return } // Could save f so after can call f.Close; not worth the effort. } if *traceFile != "" { f, err := os.Create(toOutputDir(*traceFile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) return } if err := trace.Start(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't start tracing: %s\n", err) f.Close() return } // Could save f so after can call f.Close; not worth the effort. } if *blockProfile != "" && *blockProfileRate >= 0 { runtime.SetBlockProfileRate(*blockProfileRate) } if *mutexProfile != "" && *mutexProfileFraction >= 0 { runtime.SetMutexProfileFraction(*mutexProfileFraction) } if *coverProfile != "" && cover.Mode == "" { fmt.Fprintf(os.Stderr, "testing: cannot use -test.coverprofile because test binary was not built with coverage enabled\n") os.Exit(2) } } // after runs after all testing. func (m *M) after() { if *cpuProfile != "" { m.deps.StopCPUProfile() // flushes profile to disk } if *traceFile != "" { trace.Stop() // flushes trace to disk } if *memProfile != "" { f, err := os.Create(toOutputDir(*memProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } runtime.GC() // materialize all statistics if err = m.deps.WriteHeapProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err) os.Exit(2) } f.Close() } if *blockProfile != "" && *blockProfileRate >= 0 { f, err := os.Create(toOutputDir(*blockProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } if err = m.deps.WriteProfileTo("block", f, 0); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) os.Exit(2) } f.Close() } if *mutexProfile != "" && *mutexProfileFraction >= 0 { f, err := os.Create(toOutputDir(*mutexProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } if err = m.deps.WriteProfileTo("mutex", f, 0); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) os.Exit(2) } f.Close() } if cover.Mode != "" { coverReport() } } // toOutputDir returns the file name relocated, if required, to outputDir. // Simple implementation to avoid pulling in path/filepath. func toOutputDir(path string) string { if *outputDir == "" || path == "" { return path } if runtime.GOOS == "windows" { // On Windows, it's clumsy, but we can be almost always correct // by just looking for a drive letter and a colon. // Absolute paths always have a drive letter (ignoring UNC). // Problem: if path == "C:A" and outputdir == "C:\Go" it's unclear // what to do, but even then path/filepath doesn't help. // TODO: Worth doing better? Probably not, because we're here only // under the management of go test. if len(path) >= 2 { letter, colon := path[0], path[1] if ('a' <= letter && letter <= 'z' || 'A' <= letter && letter <= 'Z') && colon == ':' { // If path starts with a drive letter we're stuck with it regardless. return path } } } if os.IsPathSeparator(path[0]) { return path } return fmt.Sprintf("%s%c%s", *outputDir, os.PathSeparator, path) } var timer *time.Timer // startAlarm starts an alarm if requested. func startAlarm() { if *timeout > 0 { timer = time.AfterFunc(*timeout, func() { debug.SetTraceback("all") panic(fmt.Sprintf("test timed out after %v", *timeout)) }) } } // stopAlarm turns off the alarm. func stopAlarm() { if *timeout > 0 { timer.Stop() } } func parseCpuList() { for _, val := range strings.Split(*cpuListStr, ",") { val = strings.TrimSpace(val) if val == "" { continue } cpu, err := strconv.Atoi(val) if err != nil || cpu <= 0 { fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu\n", val) os.Exit(1) } for i := uint(0); i < *count; i++ { cpuList = append(cpuList, cpu) } } if cpuList == nil { for i := uint(0); i < *count; i++ { cpuList = append(cpuList, runtime.GOMAXPROCS(-1)) } } }<|fim▁end|>
c.SkipNow() }
<|file_name|>reach_api.py<|end_file_name|><|fim▁begin|>import os import json import tempfile import urllib, urllib2 import requests from indra.java_vm import autoclass, JavaException<|fim▁hole|>from processor import ReachProcessor def process_pmc(pmc_id): xml_str = pmc_client.get_xml(pmc_id) with tempfile.NamedTemporaryFile() as fh: fh.write(xml_str) fh.flush() rp = process_nxml(fh.name) return rp def process_text(txt, use_tempdir=False, offline=False): if offline: nxml_txt = '<article><body><sec><p>%s</p></sec></body></article>' % txt tmp_file = tempfile.NamedTemporaryFile() tmp_file.file.write(nxml_txt) tmp_file.file.flush() return process_nxml(tmp_file.name) else: url = 'http://agathon.sista.arizona.edu:8080/odinweb/api/text' req = urllib2.Request(url, data=urllib.urlencode({'text': txt})) res = urllib2.urlopen(req) json_str = res.read() json_dict = json.loads(json_str) events_dict = json_dict['events'] events_json_str = json.dumps(events_dict, indent=1) with open('reach_output.json', 'wt') as fh: fh.write(json_str) return process_json_str(events_json_str) def process_nxml(file_name, use_tempdir=False, offline=False): if offline: base = os.path.basename(file_name) file_id = os.path.splitext(base)[0] if use_tempdir: tmp_dir = tempfile.mkdtemp() else: tmp_dir = '.' try: paper_reader = autoclass('edu.arizona.sista.reach.ReadPaper') paper_reader.main([file_name, tmp_dir]) except JavaException: print 'Could not process file %s.' % file_name return None json_file_name = os.path.join(tmp_dir, file_id + '.uaz.events.json') return process_json_file(json_file_name) else: url = 'http://agathon.sista.arizona.edu:8080/odinweb/api/nxml' txt = open(file_name, 'rt').read() req = urllib2.Request(url, data=urllib.urlencode({'nxml': txt})) res = urllib2.urlopen(req) json_str = res.read() json_dict = json.loads(json_str) return process_json_str(json_str, events_only=False) def process_json_file(file_name): try: with open(file_name, 'rt') as fh: json_str = fh.read() return process_json_str(json_str) except IOError: print 'Could not read file %s.' % file_name def process_json_str(json_str, events_only=True): if not events_only: json_dict = json.loads(json_str) events_dict = json_dict['events'] events_json_str = json.dumps(events_dict, indent=1) else: events_json_str = json_str events_json_str = events_json_str.replace('frame-id','frame_id') events_json_str = events_json_str.replace('argument-label','argument_label') events_json_str = events_json_str.replace('object-meta','object_meta') events_json_str = events_json_str.replace('doc-id','doc_id') json_dict = json.loads(events_json_str) rp = ReachProcessor(json_dict) rp.get_phosphorylation() rp.get_complexes() return rp if __name__ == '__main__': rp = process_json_file('PMC0000001.uaz.events.json')<|fim▁end|>
import indra.databases.pmc_client as pmc_client
<|file_name|>uint1.cpp<|end_file_name|><|fim▁begin|>/*============================================================================= Copyright (c) 2001-2011 Joel de Guzman Copyright (c) 2001-2011 Hartmut Kaiser Copyright (c) 2011 Bryce Lelbach Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #include "uint.hpp" int main() { using spirit_test::test; using spirit_test::test_attr; /////////////////////////////////////////////////////////////////////////// // unsigned tests /////////////////////////////////////////////////////////////////////////// { using boost::spirit::qi::uint_; unsigned u; BOOST_TEST(test("123456", uint_)); BOOST_TEST(test_attr("123456", uint_, u)); BOOST_TEST(u == 123456); BOOST_TEST(test(max_unsigned, uint_)); BOOST_TEST(test_attr(max_unsigned, uint_, u)); BOOST_TEST(u == UINT_MAX); BOOST_TEST(!test(unsigned_overflow, uint_)); BOOST_TEST(!test_attr(unsigned_overflow, uint_, u)); } /////////////////////////////////////////////////////////////////////////// // binary tests /////////////////////////////////////////////////////////////////////////// { using boost::spirit::qi::bin; unsigned u; BOOST_TEST(test("11111110", bin)); BOOST_TEST(test_attr("11111110", bin, u)); BOOST_TEST(u == 0xFE); BOOST_TEST(test(max_binary, bin)); BOOST_TEST(test_attr(max_binary, bin, u)); BOOST_TEST(u == UINT_MAX); BOOST_TEST(!test(binary_overflow, bin)); BOOST_TEST(!test_attr(binary_overflow, bin, u)); } /////////////////////////////////////////////////////////////////////////// // octal tests /////////////////////////////////////////////////////////////////////////// { using boost::spirit::qi::oct; unsigned u; BOOST_TEST(test("12545674515", oct)); BOOST_TEST(test_attr("12545674515", oct, u)); BOOST_TEST(u == 012545674515); BOOST_TEST(test(max_octal, oct)); BOOST_TEST(test_attr(max_octal, oct, u)); BOOST_TEST(u == UINT_MAX); BOOST_TEST(!test(octal_overflow, oct)); BOOST_TEST(!test_attr(octal_overflow, oct, u)); } /////////////////////////////////////////////////////////////////////////// // hex tests /////////////////////////////////////////////////////////////////////////// { using boost::spirit::qi::hex; unsigned u; BOOST_TEST(test("95BC8DF", hex)); BOOST_TEST(test_attr("95BC8DF", hex, u)); BOOST_TEST(u == 0x95BC8DF); BOOST_TEST(test("abcdef12", hex)); BOOST_TEST(test_attr("abcdef12", hex, u)); BOOST_TEST(u == 0xabcdef12); BOOST_TEST(test(max_hex, hex)); BOOST_TEST(test_attr(max_hex, hex, u)); BOOST_TEST(u == UINT_MAX); BOOST_TEST(!test(hex_overflow, hex)); BOOST_TEST(!test_attr(hex_overflow, hex, u)); } /////////////////////////////////////////////////////////////////////////// // limited fieldwidth /////////////////////////////////////////////////////////////////////////// { unsigned u; using boost::spirit::qi::uint_parser; uint_parser<unsigned, 10, 1, 3> uint3; BOOST_TEST(test("123456", uint3, false)); BOOST_TEST(test_attr("123456", uint3, u, false)); BOOST_TEST(u == 123); uint_parser<unsigned, 10, 2, 4> uint4; BOOST_TEST(test("123456", uint4, false)); BOOST_TEST(test_attr("123456", uint4, u, false)); BOOST_TEST(u == 1234); char const * first = "0000000"; char const * last = first + std::strlen(first); uint_parser<unsigned, 10, 4, 4> uint_exact4; BOOST_TEST(boost::spirit::qi::parse(first, last, uint_exact4, u) && first != last && (last-first == 3) && u == 0); first = "0001400"; last = first + std::strlen(first); BOOST_TEST(boost::spirit::qi::parse(first, last, uint_exact4, u) && first != last && (last-first == 3) && u == 1); BOOST_TEST(!test("1", uint4)); BOOST_TEST(!test_attr("1", uint4, u)); BOOST_TEST(test_attr("014567", uint4, u, false) && u == 145); } /////////////////////////////////////////////////////////////////////////// // action tests /////////////////////////////////////////////////////////////////////////// { using boost::phoenix::ref;<|fim▁hole|> using boost::spirit::qi::uint_; using boost::spirit::ascii::space; int n; BOOST_TEST(test("123", uint_[ref(n) = _1])); BOOST_TEST(n == 123); BOOST_TEST(test(" 456", uint_[ref(n) = _1], space)); BOOST_TEST(n == 456); } /////////////////////////////////////////////////////////////////////////// // Check overflow is parse error /////////////////////////////////////////////////////////////////////////// { boost::spirit::qi::uint_parser<boost::uint8_t> uint8_; boost::uint8_t u8; BOOST_TEST(!test_attr("999", uint8_, u8)); BOOST_TEST(!test_attr("256", uint8_, u8)); BOOST_TEST(test_attr("255", uint8_, u8)); boost::spirit::qi::uint_parser<boost::uint16_t> uint16_; boost::uint16_t u16; BOOST_TEST(!test_attr("99999", uint16_, u16)); BOOST_TEST(!test_attr("65536", uint16_, u16)); BOOST_TEST(test_attr("65535", uint16_, u16)); boost::spirit::qi::uint_parser<boost::uint32_t> uint32_; boost::uint32_t u32; BOOST_TEST(!test_attr("9999999999", uint32_, u32)); BOOST_TEST(!test_attr("4294967296", uint32_, u32)); BOOST_TEST(test_attr("4294967295", uint32_, u32)); } /////////////////////////////////////////////////////////////////////////// // custom uint tests /////////////////////////////////////////////////////////////////////////// { using boost::spirit::qi::uint_; using boost::spirit::qi::uint_parser; custom_uint u; BOOST_TEST(test_attr("123456", uint_, u)); uint_parser<custom_uint, 10, 1, 2> uint2; BOOST_TEST(test_attr("12", uint2, u)); } return boost::report_errors(); }<|fim▁end|>
using boost::spirit::qi::_1;
<|file_name|>gcp_compute_operator.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from airflow import AirflowException from airflow.contrib.hooks.gcp_compute_hook import GceHook from airflow.contrib.utils.gcp_field_validator import GcpBodyFieldValidator from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class GceBaseOperator(BaseOperator): """ Abstract base operator for Google Compute Engine operators to inherit from. """ @apply_defaults def __init__(self, project_id, zone, resource_id, gcp_conn_id='google_cloud_default', api_version='v1', *args, **kwargs): self.project_id = project_id self.zone = zone self.full_location = 'projects/{}/zones/{}'.format(self.project_id, self.zone) self.resource_id = resource_id self.gcp_conn_id = gcp_conn_id self.api_version = api_version self._validate_inputs() self._hook = GceHook(gcp_conn_id=self.gcp_conn_id, api_version=self.api_version) super(GceBaseOperator, self).__init__(*args, **kwargs) def _validate_inputs(self): if not self.project_id: raise AirflowException("The required parameter 'project_id' is missing") if not self.zone: raise AirflowException("The required parameter 'zone' is missing") if not self.resource_id: raise AirflowException("The required parameter 'resource_id' is missing") def execute(self, context): pass class GceInstanceStartOperator(GceBaseOperator): """ Start an instance in Google Compute Engine. :param project_id: Google Cloud Platform project where the Compute Engine instance exists. :type project_id: str :param zone: Google Cloud Platform zone where the instance exists. :type zone: str :param resource_id: Name of the Compute Engine instance resource. :type resource_id: str :param gcp_conn_id: The connection ID used to connect to Google Cloud Platform. :type gcp_conn_id: str :param api_version: API version used (e.g. v1). :type api_version: str """ template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version') @apply_defaults def __init__(self, project_id, zone, resource_id, gcp_conn_id='google_cloud_default', api_version='v1', *args, **kwargs): super(GceInstanceStartOperator, self).__init__( project_id=project_id, zone=zone, resource_id=resource_id, gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs) def execute(self, context): return self._hook.start_instance(self.project_id, self.zone, self.resource_id) class GceInstanceStopOperator(GceBaseOperator): """ Stop an instance in Google Compute Engine. :param project_id: Google Cloud Platform project where the Compute Engine instance exists. :type project_id: str :param zone: Google Cloud Platform zone where the instance exists. :type zone: str :param resource_id: Name of the Compute Engine instance resource. :type resource_id: str :param gcp_conn_id: The connection ID used to connect to Google Cloud Platform. :type gcp_conn_id: str :param api_version: API version used (e.g. v1). :type api_version: str """ template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version') @apply_defaults def __init__(self, project_id, zone, resource_id, gcp_conn_id='google_cloud_default', api_version='v1', *args, **kwargs): super(GceInstanceStopOperator, self).__init__( project_id=project_id, zone=zone, resource_id=resource_id, gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs) def execute(self, context): return self._hook.stop_instance(self.project_id, self.zone, self.resource_id) SET_MACHINE_TYPE_VALIDATION_SPECIFICATION = [ dict(name="machineType", regexp="^.+$"), ] class GceSetMachineTypeOperator(GceBaseOperator): """ Changes the machine type for a stopped instance to the machine type specified in the request. :param project_id: Google Cloud Platform project where the Compute Engine instance exists. :type project_id: str :param zone: Google Cloud Platform zone where the instance exists. :type zone: str :param resource_id: Name of the Compute Engine instance resource. :type resource_id: str :param body: Body required by the Compute Engine setMachineType API, as described in https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType#request-body :type body: dict :param gcp_conn_id: The connection ID used to connect to Google Cloud Platform. :type gcp_conn_id: str :param api_version: API version used (e.g. v1). :type api_version: str """<|fim▁hole|> def __init__(self, project_id, zone, resource_id, body, gcp_conn_id='google_cloud_default', api_version='v1', validate_body=True, *args, **kwargs): self.body = body self._field_validator = None if validate_body: self._field_validator = GcpBodyFieldValidator( SET_MACHINE_TYPE_VALIDATION_SPECIFICATION, api_version=api_version) super(GceSetMachineTypeOperator, self).__init__( project_id=project_id, zone=zone, resource_id=resource_id, gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs) def _validate_all_body_fields(self): if self._field_validator: self._field_validator.validate(self.body) def execute(self, context): self._validate_all_body_fields() return self._hook.set_machine_type(self.project_id, self.zone, self.resource_id, self.body)<|fim▁end|>
template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version') @apply_defaults
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- class ImproperlyConfigured(Exception):<|fim▁hole|> class TaskHandlingError(Exception): pass<|fim▁end|>
pass
<|file_name|>Tuplerator.java<|end_file_name|><|fim▁begin|>/* This file is part of VoltDB. * Copyright (C) 2008-2015 VoltDB Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with VoltDB. If not, see <http://www.gnu.org/licenses/>. */ package org.voltdb.groovy; import static java.lang.Character.toLowerCase; import static java.lang.Character.toUpperCase; import groovy.lang.Closure; import groovy.lang.GString; import groovy.lang.GroovyObjectSupport; import java.util.NavigableMap; import org.voltdb.VoltTable; import org.voltdb.VoltType; import com.google_voltpatches.common.collect.ImmutableSortedMap; /** * Groovy table access expediter. It allows you to easily navigate a VoltTable, * and access its column values. * <p> * Example usage on a query that returns results for the : * <code><pre> * cr = client.callProcedure('@AdHoc','select INTEGER_COL, STRING_COL from FOO') * tuplerator(cr.results[0]).eachRow { * * integerColValueByIndex = it[0] * stringColValueByIndex = it[1] *<|fim▁hole|> * * integerColValueByField = it.integerCol * stringColValyeByField = it.stringCol * } * * </code></pre> * */ public class Tuplerator extends GroovyObjectSupport { private final VoltTable table; private final VoltType [] byIndex; private final NavigableMap<String, Integer> byName; public static Tuplerator newInstance(VoltTable table) { return new Tuplerator(table); } public Tuplerator(final VoltTable table) { this.table = table; this.byIndex = new VoltType[table.getColumnCount()]; ImmutableSortedMap.Builder<String, Integer> byNameBuilder = ImmutableSortedMap.naturalOrder(); for (int c = 0; c < byIndex.length; ++c) { VoltType cType = table.getColumnType(c); StringBuilder cName = new StringBuilder(table.getColumnName(c)); byIndex[c] = cType; boolean upperCaseIt = false; for (int i = 0; i < cName.length();) { char chr = cName.charAt(i); if (chr == '_' || chr == '.' || chr == '$') { cName.deleteCharAt(i); upperCaseIt = true; } else { chr = upperCaseIt ? toUpperCase(chr) : toLowerCase(chr); cName.setCharAt(i, chr); upperCaseIt = false; ++i; } } byNameBuilder.put(cName.toString(),c); } byName = byNameBuilder.build(); } /** * It calls the given closure on each row of the underlying table by passing itself * as the only closure parameter * * @param c the self instance of Tuplerator */ public void eachRow(Closure<Void> c) { while (table.advanceRow()) { c.call(this); } table.resetRowPosition(); } /** * It calls the given closure on each row of the underlying table for up to the specified limit, * by passing itself as the only closure parameter * * @param maxRows maximum rows to call the closure on * @param c closure */ public void eachRow(int maxRows, Closure<Void> c) { while (--maxRows >= 0 && table.advanceRow()) { c.call(this); } } public Object getAt(int cidx) { Object cval = table.get(cidx, byIndex[cidx]); if (table.wasNull()) cval = null; return cval; } public Object getAt(String cname) { Integer cidx = byName.get(cname); if (cidx == null) { throw new IllegalArgumentException("No Column named '" + cname + "'"); } return getAt(cidx); } public Object getAt(GString cname) { return getAt(cname.toString()); } @Override public Object getProperty(String name) { return getAt(name); } /** * Sets the table row cursor to the given position * @param num row number to set the row cursor to * @return an instance of self */ public Tuplerator atRow(int num) { table.advanceToRow(num); return this; } /** * Resets the table row cursor * @return an instance of self */ public Tuplerator reset() { table.resetRowPosition(); return this; } /** * Returns the underlying table * @return the underlying table */ public VoltTable getTable() { return table; } }<|fim▁end|>
* integerColValueByName = it['integerCol'] * stringColValyeByName = it['stringCol']
<|file_name|>overlays.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals from functools import total_ordering from django.contrib.gis.geos import ( LinearRing, LineString, Point, Polygon, fromstr, ) from django.utils import six from django.utils.encoding import python_2_unicode_compatible from django.utils.html import html_safe @html_safe @python_2_unicode_compatible class GEvent(object): """ A Python wrapper for the Google GEvent object. Events can be attached to any object derived from GOverlayBase with the add_event() call. For more information please see the Google Maps API Reference: https://developers.google.com/maps/documentation/javascript/reference#event Example: from django.shortcuts import render_to_response from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline def sample_request(request): polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)') event = GEvent('click', 'function() { location.href = "http://www.google.com"}') polyline.add_event(event) return render_to_response('mytemplate.html', {'google' : GoogleMap(polylines=[polyline])}) """ def __init__(self, event, action): """ Initializes a GEvent object. Parameters: <|fim▁hole|> event: string for the event, such as 'click'. The event must be a valid event for the object in the Google Maps API. There is no validation of the event type within Django. action: string containing a Javascript function, such as 'function() { location.href = "newurl";}' The string must be a valid Javascript function. Again there is no validation fo the function within Django. """ self.event = event self.action = action def __str__(self): "Returns the parameter part of a GEvent." return '"%s", %s' % (self.event, self.action) @html_safe @python_2_unicode_compatible class GOverlayBase(object): def __init__(self): self.events = [] def latlng_from_coords(self, coords): "Generates a JavaScript array of GLatLng objects for the given coordinates." return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords) def add_event(self, event): "Attaches a GEvent to the overlay object." self.events.append(event) def __str__(self): "The string representation is the JavaScript API call." return '%s(%s)' % (self.__class__.__name__, self.js_params) class GPolygon(GOverlayBase): """ A Python wrapper for the Google GPolygon object. For more information please see the Google Maps API Reference: https://developers.google.com/maps/documentation/javascript/reference#Polygon """ def __init__(self, poly, stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1, fill_color='#0000ff', fill_opacity=0.4): """ The GPolygon object initializes on a GEOS Polygon or a parameter that may be instantiated into GEOS Polygon. Please note that this will not depict a Polygon's internal rings. Keyword Options: stroke_color: The color of the polygon outline. Defaults to '#0000ff' (blue). stroke_weight: The width of the polygon outline, in pixels. Defaults to 2. stroke_opacity: The opacity of the polygon outline, between 0 and 1. Defaults to 1. fill_color: The color of the polygon fill. Defaults to '#0000ff' (blue). fill_opacity: The opacity of the polygon fill. Defaults to 0.4. """ if isinstance(poly, six.string_types): poly = fromstr(poly) if isinstance(poly, (tuple, list)): poly = Polygon(poly) if not isinstance(poly, Polygon): raise TypeError('GPolygon may only initialize on GEOS Polygons.') # Getting the envelope of the input polygon (used for automatically # determining the zoom level). self.envelope = poly.envelope # Translating the coordinates into a JavaScript array of # Google `GLatLng` objects. self.points = self.latlng_from_coords(poly.shell.coords) # Stroke settings. self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight # Fill settings. self.fill_color, self.fill_opacity = fill_color, fill_opacity super(GPolygon, self).__init__() @property def js_params(self): return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity, self.fill_color, self.fill_opacity) class GPolyline(GOverlayBase): """ A Python wrapper for the Google GPolyline object. For more information please see the Google Maps API Reference: https://developers.google.com/maps/documentation/javascript/reference#Polyline """ def __init__(self, geom, color='#0000ff', weight=2, opacity=1): """ The GPolyline object may be initialized on GEOS LineStirng, LinearRing, and Polygon objects (internal rings not supported) or a parameter that may instantiated into one of the above geometries. Keyword Options: color: The color to use for the polyline. Defaults to '#0000ff' (blue). weight: The width of the polyline, in pixels. Defaults to 2. opacity: The opacity of the polyline, between 0 and 1. Defaults to 1. """ # If a GEOS geometry isn't passed in, try to construct one. if isinstance(geom, six.string_types): geom = fromstr(geom) if isinstance(geom, (tuple, list)): geom = Polygon(geom) # Generating the lat/lng coordinate pairs. if isinstance(geom, (LineString, LinearRing)): self.latlngs = self.latlng_from_coords(geom.coords) elif isinstance(geom, Polygon): self.latlngs = self.latlng_from_coords(geom.shell.coords) else: raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.') # Getting the envelope for automatic zoom determination. self.envelope = geom.envelope self.color, self.weight, self.opacity = color, weight, opacity super(GPolyline, self).__init__() @property def js_params(self): return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity) @total_ordering class GIcon(object): """ Creates a GIcon object to pass into a Gmarker object. The keyword arguments map to instance attributes of the same name. These, in turn, correspond to a subset of the attributes of the official GIcon javascript object: https://developers.google.com/maps/documentation/javascript/reference#Icon Because a Google map often uses several different icons, a name field has been added to the required arguments. Required Arguments: varname: A string which will become the basis for the js variable name of the marker, for this reason, your code should assign a unique name for each GIcon you instantiate, otherwise there will be name space collisions in your javascript. Keyword Options: image: The url of the image to be used as the icon on the map defaults to 'G_DEFAULT_ICON' iconsize: a tuple representing the pixel size of the foreground (not the shadow) image of the icon, in the format: (width, height) ex.: GIcon('fast_food', image="/media/icon/star.png", iconsize=(15,10)) Would indicate your custom icon was 15px wide and 10px height. shadow: the url of the image of the icon's shadow shadowsize: a tuple representing the pixel size of the shadow image, format is the same as ``iconsize`` iconanchor: a tuple representing the pixel coordinate relative to the top left corner of the icon image at which this icon is anchored to the map. In (x, y) format. x increases to the right in the Google Maps coordinate system and y increases downwards in the Google Maps coordinate system.) infowindowanchor: The pixel coordinate relative to the top left corner of the icon image at which the info window is anchored to this icon. """ def __init__(self, varname, image=None, iconsize=None, shadow=None, shadowsize=None, iconanchor=None, infowindowanchor=None): self.varname = varname self.image = image self.iconsize = iconsize self.shadow = shadow self.shadowsize = shadowsize self.iconanchor = iconanchor self.infowindowanchor = infowindowanchor def __eq__(self, other): return self.varname == other.varname def __lt__(self, other): return self.varname < other.varname def __hash__(self): # XOR with hash of GIcon type so that hash('varname') won't # equal hash(GIcon('varname')). return hash(self.__class__) ^ hash(self.varname) class GMarker(GOverlayBase): """ A Python wrapper for the Google GMarker object. For more information please see the Google Maps API Reference: https://developers.google.com/maps/documentation/javascript/reference#Marker Example: from django.shortcuts import render_to_response from django.contrib.gis.maps.google.overlays import GMarker, GEvent def sample_request(request): marker = GMarker('POINT(101 26)') event = GEvent('click', 'function() { location.href = "http://www.google.com"}') marker.add_event(event) return render_to_response('mytemplate.html', {'google' : GoogleMap(markers=[marker])}) """ def __init__(self, geom, title=None, draggable=False, icon=None): """ The GMarker object may initialize on GEOS Points or a parameter that may be instantiated into a GEOS point. Keyword options map to GMarkerOptions -- so far only the title option is supported. Keyword Options: title: Title option for GMarker, will be displayed as a tooltip. draggable: Draggable option for GMarker, disabled by default. """ # If a GEOS geometry isn't passed in, try to construct one. if isinstance(geom, six.string_types): geom = fromstr(geom) if isinstance(geom, (tuple, list)): geom = Point(geom) if isinstance(geom, Point): self.latlng = self.latlng_from_coords(geom.coords) else: raise TypeError('GMarker may only initialize on GEOS Point geometry.') # Getting the envelope for automatic zoom determination. self.envelope = geom.envelope # TODO: Add support for more GMarkerOptions self.title = title self.draggable = draggable self.icon = icon super(GMarker, self).__init__() def latlng_from_coords(self, coords): return 'new GLatLng(%s,%s)' % (coords[1], coords[0]) def options(self): result = [] if self.title: result.append('title: "%s"' % self.title) if self.icon: result.append('icon: %s' % self.icon.varname) if self.draggable: result.append('draggable: true') return '{%s}' % ','.join(result) @property def js_params(self): return '%s, %s' % (self.latlng, self.options())<|fim▁end|>
<|file_name|>MockFile.java<|end_file_name|><|fim▁begin|>/** * Copyright (C) 2010-2015 Gordon Fraser, Andrea Arcuri and EvoSuite * contributors * * This file is part of EvoSuite. * * EvoSuite is free software: you can redistribute it and/or modify it * under the terms of the GNU Lesser Public License as published by the * Free Software Foundation, either version 3.0 of the License, or (at your * option) any later version. * * EvoSuite is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser Public License for more details. * * You should have received a copy of the GNU Lesser Public License along * with EvoSuite. If not, see <http://www.gnu.org/licenses/>. */ package org.evosuite.runtime.mock.java.io; import java.io.File; import java.io.FileFilter; import java.io.FilenameFilter; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.util.ArrayList; import org.evosuite.runtime.RuntimeSettings; import org.evosuite.runtime.mock.MockFramework; import org.evosuite.runtime.mock.OverrideMock; import org.evosuite.runtime.mock.java.lang.MockIllegalArgumentException; import org.evosuite.runtime.mock.java.net.MockURL; import org.evosuite.runtime.vfs.FSObject; import org.evosuite.runtime.vfs.VFile; import org.evosuite.runtime.vfs.VFolder; import org.evosuite.runtime.vfs.VirtualFileSystem; /** * This class is used in the mocking framework to replace File instances. * * <p> * All files are created in memory, and no access to disk is ever done * * @author arcuri * */ public class MockFile extends File implements OverrideMock { private static final long serialVersionUID = -8217763202925800733L; /* * Constructors, with same inputs as in File. Note: it is not possible to inherit JavaDocs for constructors. */ public MockFile(String pathname) { super(pathname); } public MockFile(String parent, String child) { super(parent,child); } public MockFile(File parent, String child) { this(parent.getPath(),child); } public MockFile(URI uri) { super(uri); } /* * TODO: Java 7 * * there is only one method in File that depends on Java 7: * * public Path toPath() * * * but if we include it here, we will break compatibility with Java 6. * Once we drop such backward compatibility, we will need to override * such method */ /* * --------- static methods ------------------ * * recall: it is not possible to override static methods. * In the SUT, all calls to those static methods of File, eg File.foo(), * will need to be replaced with EvoFile.foo() */ public static File[] listRoots() { if(! MockFramework.isEnabled()){ return File.listRoots(); } File[] roots = File.listRoots(); MockFile[] mocks = new MockFile[roots.length]; for(int i=0; i<roots.length; i++){ mocks[i] = new MockFile(roots[i].getAbsolutePath()); } return mocks; } public static File createTempFile(String prefix, String suffix, File directory) throws IOException{ if(! MockFramework.isEnabled()){ return File.createTempFile(prefix, suffix, directory); } VirtualFileSystem.getInstance().throwSimuledIOExceptionIfNeeded(""); String path = VirtualFileSystem.getInstance().createTempFile(prefix, suffix, directory); if(path==null){ throw new MockIOException(); } return new MockFile(path); } public static File createTempFile(String prefix, String suffix) throws IOException { return createTempFile(prefix, suffix, null); } // -------- modified methods ---------------- @Override public int compareTo(File pathname) { if(! MockFramework.isEnabled()){ return super.compareTo(pathname); } return new File(getAbsolutePath()).compareTo(pathname); } @Override public File getParentFile() { if(! MockFramework.isEnabled()){ return super.getParentFile(); } String p = this.getParent(); if (p == null) return null; return new MockFile(p); } @Override public File getAbsoluteFile() { if(! MockFramework.isEnabled()){ return super.getAbsoluteFile(); } String absPath = getAbsolutePath(); return new MockFile(absPath); } @Override public File getCanonicalFile() throws IOException { if(! MockFramework.isEnabled()){ return super.getCanonicalFile(); } String canonPath = getCanonicalPath(); VirtualFileSystem.getInstance().throwSimuledIOExceptionIfNeeded(getAbsolutePath()); return new MockFile(canonPath); } @Override public boolean canRead() { if(! MockFramework.isEnabled()){ return super.canRead(); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; }<|fim▁hole|> @Override public boolean setReadOnly() { if(! MockFramework.isEnabled()){ return super.setReadOnly(); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; } file.setReadPermission(true); file.setExecutePermission(false); file.setWritePermission(false); return true; } @Override public boolean setReadable(boolean readable, boolean ownerOnly) { if(! MockFramework.isEnabled()){ return super.setReadable(readable, ownerOnly); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; } file.setReadPermission(readable); return true; } @Override public boolean canWrite() { if(! MockFramework.isEnabled()){ return super.canWrite(); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; } return file.isWritePermission(); } @Override public boolean setWritable(boolean writable, boolean ownerOnly) { if(! MockFramework.isEnabled()){ return super.setWritable(writable, ownerOnly); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; } file.setWritePermission(writable); return true; } @Override public boolean setExecutable(boolean executable, boolean ownerOnly) { if(! MockFramework.isEnabled()){ return super.setExecutable(executable, ownerOnly); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; } file.setExecutePermission(executable); return true; } @Override public boolean canExecute() { if(! MockFramework.isEnabled()){ return super.canExecute(); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; } return file.isExecutePermission(); } @Override public boolean exists() { if(! MockFramework.isEnabled()){ return super.exists(); } return VirtualFileSystem.getInstance().exists(getAbsolutePath()); } @Override public boolean isDirectory() { if(! MockFramework.isEnabled()){ return super.isDirectory(); } FSObject file = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(file==null){ return false; } return file.isFolder(); } @Override public boolean isFile() { if(! MockFramework.isEnabled()){ return super.isFile(); } return !isDirectory(); } @Override public boolean isHidden() { if(! MockFramework.isEnabled()){ return super.isHidden(); } if(getName().startsWith(".")){ //this is not necessarily true in Windows return true; } else { return false; } } @Override public boolean setLastModified(long time) { if(! MockFramework.isEnabled()){ return super.setLastModified(time); } if (time < 0){ throw new MockIllegalArgumentException("Negative time"); } FSObject target = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(target==null){ return false; } return target.setLastModified(time); } @Override public long lastModified() { if(! MockFramework.isEnabled()){ return super.lastModified(); } FSObject target = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(target==null){ return 0; } return target.getLastModified(); } @Override public long length() { if(! MockFramework.isEnabled()){ return super.length(); } FSObject target = VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); if(target==null){ return 0; } if(target.isFolder() || target.isDeleted()){ return 0; } VFile file = (VFile) target; return file.getDataSize(); } //following 3 methods are never used in SF110 @Override public long getTotalSpace() { if(! MockFramework.isEnabled()){ return super.getTotalSpace(); } return 0; //TODO } @Override public long getFreeSpace() { if(! MockFramework.isEnabled()){ return super.getFreeSpace(); } return 0; //TODO } @Override public long getUsableSpace() { if(! MockFramework.isEnabled()){ return super.getUsableSpace(); } return 0; //TODO } @Override public boolean createNewFile() throws IOException { if(! MockFramework.isEnabled()){ return super.createNewFile(); } VirtualFileSystem.getInstance().throwSimuledIOExceptionIfNeeded(getAbsolutePath()); return VirtualFileSystem.getInstance().createFile(getAbsolutePath()); } @Override public boolean delete() { if(! MockFramework.isEnabled()){ return super.delete(); } return VirtualFileSystem.getInstance().deleteFSObject(getAbsolutePath()); } @Override public boolean renameTo(File dest) { if(! MockFramework.isEnabled()){ return super.renameTo(dest); } boolean renamed = VirtualFileSystem.getInstance().rename( this.getAbsolutePath(), dest.getAbsolutePath()); return renamed; } @Override public boolean mkdir() { if(! MockFramework.isEnabled()){ return super.mkdir(); } String parent = this.getParent(); if(parent==null || !VirtualFileSystem.getInstance().exists(parent)){ return false; } return VirtualFileSystem.getInstance().createFolder(getAbsolutePath()); } @Override public void deleteOnExit() { if(! MockFramework.isEnabled()){ super.deleteOnExit(); } /* * do nothing, as anyway no actual file is created */ } @Override public String[] list() { if(! MockFramework.isEnabled()){ return super.list(); } if(!isDirectory() || !exists()){ return null; } else { VFolder dir = (VFolder) VirtualFileSystem.getInstance().findFSObject(getAbsolutePath()); return dir.getChildrenNames(); } } @Override public File[] listFiles() { if(! MockFramework.isEnabled()){ return super.listFiles(); } String[] ss = list(); if (ss == null) return null; int n = ss.length; MockFile[] fs = new MockFile[n]; for (int i = 0; i < n; i++) { fs[i] = new MockFile(this,ss[i]); } return fs; } @Override public File[] listFiles(FileFilter filter) { if(! MockFramework.isEnabled()){ return super.listFiles(filter); } String ss[] = list(); if (ss == null) return null; ArrayList<File> files = new ArrayList<File>(); for (String s : ss) { File f = new MockFile(this,s); if ((filter == null) || filter.accept(f)) files.add(f); } return files.toArray(new File[files.size()]); } @Override public String getCanonicalPath() throws IOException { if(! MockFramework.isEnabled()){ return super.getCanonicalPath(); } VirtualFileSystem.getInstance().throwSimuledIOExceptionIfNeeded(getAbsolutePath()); return super.getCanonicalPath(); } @Override public URL toURL() throws MalformedURLException { if(! MockFramework.isEnabled() || !RuntimeSettings.useVNET){ return super.toURL(); } URL url = super.toURL(); return MockURL.URL(url.toString()); } // -------- unmodified methods -------------- @Override public String getName(){ return super.getName(); } @Override public String getParent() { return super.getParent(); } @Override public String getPath() { return super.getPath(); } @Override public boolean isAbsolute() { return super.isAbsolute(); } @Override public String getAbsolutePath() { return super.getAbsolutePath(); } @Override public URI toURI() { return super.toURI(); //no need of VNET here } @Override public String[] list(FilenameFilter filter) { //no need to mock it, as it uses the mocked list() return super.list(filter); } @Override public boolean mkdirs() { //no need to mock it, as all methods it calls are mocked return super.mkdirs(); } @Override public boolean setWritable(boolean writable) { return super.setWritable(writable); // it calls mocked method } @Override public boolean setReadable(boolean readable) { return super.setReadable(readable); //it calls mocked method } @Override public boolean setExecutable(boolean executable) { return super.setExecutable(executable); // it calls mocked method } // ------- Object methods ----------- @Override public boolean equals(Object obj) { return super.equals(obj); } @Override public int hashCode() { return super.hashCode(); } @Override public String toString() { return super.toString(); } }<|fim▁end|>
return file.isReadPermission(); }
<|file_name|>sco_bulletins_json.py<|end_file_name|><|fim▁begin|># -*- mode: python -*- # -*- coding: iso8859-15 -*- ############################################################################## # # Gestion scolarite IUT # # Copyright (c) 2001 - 2013 Emmanuel Viennet. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Emmanuel Viennet [email protected] # ############################################################################## """Génération du bulletin en format JSON (beta, non completement testé) """ from notes_table import * import sco_photos import ZAbsences import sco_bulletins # -------- Bulletin en JSON import mx class ScoDocJSONEncoder(json.JSONEncoder): def default(self, o): # horrible hack pour encoder les dates if str(type(o)) == "<type 'mx.DateTime.DateTime'>": return o.strftime("%Y-%m-%dT%H:%M:%S") else: log('not mx: %s' % type(o)) return json.JSONEncoder.default(self, o) def make_json_formsemestre_bulletinetud( context, formsemestre_id, etudid, REQUEST=None, xml_with_decisions=False, version='long', force_publishing=False # force publication meme si semestre non publie sur "portail" ): """Renvoie bulletin en chaine JSON""" d = formsemestre_bulletinetud_published_dict( context, formsemestre_id, etudid, force_publishing=force_publishing, REQUEST=REQUEST, xml_with_decisions=xml_with_decisions, version=version) if REQUEST: REQUEST.RESPONSE.setHeader('content-type', JSON_MIMETYPE) return json.dumps(d, cls=ScoDocJSONEncoder, encoding=SCO_ENCODING) # (fonction séparée: n'utilise pas formsemestre_bulletinetud_dict() # pour simplifier le code, mais attention a la maintenance !) # def formsemestre_bulletinetud_published_dict( context, formsemestre_id, etudid, force_publishing=False, xml_nodate=False, REQUEST=None, xml_with_decisions=False, # inclue les decisions même si non publiées version='long' ): """Dictionnaire representant les informations _publiees_ du bulletin de notes Utilisé pour JSON, devrait l'être aussi pour XML. (todo) """ d = {} sem = context.get_formsemestre(formsemestre_id) if sem['bul_hide_xml'] == '0' or force_publishing: published=1 else: published=0 if xml_nodate: docdate = '' else:<|fim▁hole|> d.update( etudid=etudid, formsemestre_id=formsemestre_id, date=docdate, publie=published, etape_apo=sem['etape_apo'] or '', etape_apo2=sem['etape_apo2'] or '', etape_apo3=sem['etape_apo3'] or '', etape_apo4=sem['etape_apo4'] or '' ) # Infos sur l'etudiant etudinfo = context.getEtudInfo(etudid=etudid,filled=1)[0] d['etudiant'] = dict( etudid=etudid, code_nip=etudinfo['code_nip'], code_ine=etudinfo['code_ine'], nom=quote_xml_attr(etudinfo['nom']), prenom=quote_xml_attr(etudinfo['prenom']), sexe=quote_xml_attr(etudinfo['sexe']), photo_url=quote_xml_attr(sco_photos.etud_photo_url(context, etudinfo)), email=quote_xml_attr(etudinfo['email'])) # Disponible pour publication ? if not published: return d # stop ! # Groupes: partitions = sco_groups.get_partitions_list(context, formsemestre_id, with_default=False) partitions_etud_groups = {} # { partition_id : { etudid : group } } for partition in partitions: pid=partition['partition_id'] partitions_etud_groups[pid] = sco_groups.get_etud_groups_in_partition(context, pid) nt = context._getNotesCache().get_NotesTable(context, formsemestre_id) #> toutes notes ues = nt.get_ues() modimpls = nt.get_modimpls() nbetuds = len(nt.rangs) mg = fmt_note(nt.get_etud_moy_gen(etudid)) if nt.get_moduleimpls_attente() or context.get_preference('bul_show_rangs', formsemestre_id) == 0: # n'affiche pas le rang sur le bulletin s'il y a des # notes en attente dans ce semestre rang = '' rang_gr = {} ninscrits_gr = {} else: rang = str(nt.get_etud_rang(etudid)) rang_gr, ninscrits_gr, gr_name = sco_bulletins.get_etud_rangs_groups( context, etudid, formsemestre_id, partitions, partitions_etud_groups, nt) d['note'] = dict( value=mg, min=fmt_note(nt.moy_min), max=fmt_note(nt.moy_max), moy=fmt_note(nt.moy_moy) ) d['rang'] = dict( value=rang, ninscrits=nbetuds ) d['rang_group'] = [] if rang_gr: for partition in partitions: d['rang_group'].append( dict( group_type=partition['partition_name'], group_name=gr_name[partition['partition_id']], value=rang_gr[partition['partition_id']], ninscrits=ninscrits_gr[partition['partition_id']] )) d['note_max'] = dict( value=20 ) # notes toujours sur 20 d['bonus_sport_culture'] = dict( value=nt.bonus[etudid] ) # Liste les UE / modules /evals d['ue'] = [] d['ue_capitalisee'] = [] for ue in ues: ue_status = nt.get_etud_ue_status(etudid, ue['ue_id']) u = dict( id=ue['ue_id'], numero=quote_xml_attr(ue['numero']), acronyme=quote_xml_attr(ue['acronyme']), titre=quote_xml_attr(ue['titre']), note = dict(value=fmt_note(ue_status['cur_moy_ue']), min=fmt_note(ue['min']), max=fmt_note(ue['max'])), rang = str(nt.ue_rangs[ue['ue_id']][0][etudid]), effectif = str(nt.ue_rangs[ue['ue_id']][1] - nt.nb_demissions) ) d['ue'].append(u) u['module'] = [] # Liste les modules de l'UE ue_modimpls = [ mod for mod in modimpls if mod['module']['ue_id'] == ue['ue_id'] ] for modimpl in ue_modimpls: mod_moy = fmt_note(nt.get_etud_mod_moy(modimpl['moduleimpl_id'], etudid)) if mod_moy == 'NI': # ne mentionne pas les modules ou n'est pas inscrit continue mod = modimpl['module'] #if mod['ects'] is None: # ects = '' #else: # ects = str(mod['ects']) modstat = nt.get_mod_stats(modimpl['moduleimpl_id']) m = dict( id=modimpl['moduleimpl_id'], code=mod['code'], coefficient=mod['coefficient'], numero=mod['numero'], titre=quote_xml_attr(mod['titre']), abbrev=quote_xml_attr(mod['abbrev']), # ects=ects, ects des modules maintenant inutilisés note = dict( value=mod_moy ) ) m['note'].update(modstat) for k in ('min', 'max', 'moy'): # formatte toutes les notes m['note'][k] = fmt_note(m['note'][k]) u['module'].append(m) if context.get_preference('bul_show_mod_rangs', formsemestre_id): m['rang'] = dict( value=nt.mod_rangs[modimpl['moduleimpl_id']][0][etudid] ) m['effectif'] = dict( value=nt.mod_rangs[modimpl['moduleimpl_id']][1] ) # --- notes de chaque eval: evals = nt.get_evals_in_mod(modimpl['moduleimpl_id']) m['evaluation'] = [] if version != 'short': for e in evals: if e['visibulletin'] == '1' or version == 'long': val = e['notes'].get(etudid, {'value':'NP'})['value'] # NA si etud demissionnaire val = fmt_note(val, note_max=e['note_max'] ) m['evaluation'].append( dict( jour=DateDMYtoISO(e['jour'], null_is_empty=True), heure_debut=TimetoISO8601(e['heure_debut'], null_is_empty=True), heure_fin=TimetoISO8601(e['heure_fin'], null_is_empty=True), coefficient=e['coefficient'], evaluation_type=e['evaluation_type'], description=quote_xml_attr(e['description']), note = val )) # Evaluations incomplètes ou futures: complete_eval_ids = Set( [ e['evaluation_id'] for e in evals ] ) if context.get_preference('bul_show_all_evals', formsemestre_id): all_evals = context.do_evaluation_list(args={ 'moduleimpl_id' : modimpl['moduleimpl_id'] }) all_evals.reverse() # plus ancienne d'abord for e in all_evals: if e['evaluation_id'] not in complete_eval_ids: m['evaluation'].append( dict( jour=DateDMYtoISO(e['jour'], null_is_empty=True), heure_debut=TimetoISO8601(e['heure_debut'], null_is_empty=True), heure_fin=TimetoISO8601(e['heure_fin'], null_is_empty=True), coefficient=e['coefficient'], description=quote_xml_attr(e['description']), incomplete='1') ) # UE capitalisee (listee seulement si meilleure que l'UE courante) if ue_status['is_capitalized']: d['ue_capitalisee'].append( dict( id=ue['ue_id'], numero=quote_xml_attr(ue['numero']), acronyme=quote_xml_attr(ue['acronyme']), titre=quote_xml_attr(ue['titre']), note = fmt_note(ue_status['moy']), coefficient_ue = fmt_note(ue_status['coef_ue']), date_capitalisation = DateDMYtoISO(ue_status['event_date']) )) # --- Absences if context.get_preference('bul_show_abs', formsemestre_id): debut_sem = DateDMYtoISO(sem['date_debut']) fin_sem = DateDMYtoISO(sem['date_fin']) AbsEtudSem = ZAbsences.getAbsSemEtud(context, formsemestre_id, etudid) nbabs = AbsEtudSem.CountAbs() nbabsjust = AbsEtudSem.CountAbsJust() d['absences'] = dict(nbabs=nbabs, nbabsjust=nbabsjust) # --- Decision Jury if context.get_preference('bul_show_decision', formsemestre_id) or xml_with_decisions: infos, dpv = sco_bulletins.etud_descr_situation_semestre( context, etudid, formsemestre_id, format='xml', show_uevalid=context.get_preference('bul_show_uevalid',formsemestre_id)) d['situation'] = quote_xml_attr(infos['situation']) if dpv: decision = dpv['decisions'][0] etat = decision['etat'] if decision['decision_sem']: code = decision['decision_sem']['code'] else: code = '' d['decision'] = dict( code=code, etat=etat) d['decision_ue'] = [] if decision['decisions_ue']: # and context.get_preference('bul_show_uevalid', formsemestre_id): always publish (car utile pour export Apogee) for ue_id in decision['decisions_ue'].keys(): ue = context.do_ue_list({ 'ue_id' : ue_id})[0] d['decision_ue'].append(dict( ue_id=ue['ue_id'], numero=quote_xml_attr(ue['numero']), acronyme=quote_xml_attr(ue['acronyme']), titre=quote_xml_attr(ue['titre']), code=decision['decisions_ue'][ue_id]['code'], ects=quote_xml_attr(ue['ects'] or '') )) d['autorisation_inscription'] = [] for aut in decision['autorisations']: d['autorisation_inscription'].append(dict( semestre_id=aut['semestre_id'] )) else: d['decision'] = dict( code='', etat='DEM' ) # --- Appreciations cnx = context.GetDBConnexion() apprecs = scolars.appreciations_list( cnx, args={'etudid':etudid, 'formsemestre_id' : formsemestre_id } ) d['appreciation'] = [] for app in apprecs: d['appreciation'].append( dict( comment=quote_xml_attr(app['comment']), date=DateDMYtoISO(app['date'])) ) # return d<|fim▁end|>
docdate = datetime.datetime.now().isoformat()
<|file_name|>issue_234.rs<|end_file_name|><|fim▁begin|>use zip::result::ZipError; const BUF: &[u8] = &[ 0, 80, 75, 1, 2, 127, 120, 0, 3, 3, 75, 80, 232, 3, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 7, 0, 0, 0, 0, 65, 0, 1, 0, 0, 0, 4, 0, 0, 224, 255, 0, 255, 255, 255, 255, 255, 255, 20, 39, 221, 221, 221, 221, 221, 221, 205, 221, 221, 221, 42, 221, 221, 221, 221, 221, 221, 221, 221, 38, 34, 34, 219, 80, 75, 5, 6, 0, 0, 0, 0, 5, 96, 0, 1, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 234, 236, 124,<|fim▁hole|> 234, 0, 0, 0, 3, 8, 4, 232, 3, 0, 0, 0, 255, 255, 255, 255, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 3, 0, 221, 209, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 58, 58, 42, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 234, 0, 0, 0, 3, 8, 0, 0, 0, 12, 0, 0, 0, 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 0, 0, 58, 58, 58, 42, 175, 221, 253, 221, 221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 221, 221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, ]; #[test] fn invalid_header() { let reader = std::io::Cursor::new(&BUF); let archive = zip::ZipArchive::new(reader); match archive { Err(ZipError::InvalidArchive(_)) => {} value => panic!("Unexpected value: {:?}", value), } }<|fim▁end|>
221, 221, 37, 221, 221, 221, 221, 221, 129, 4, 0, 0, 221, 221, 80, 75, 1, 2, 127, 120, 0, 4, 0, 0, 2, 127, 120, 0, 79, 75, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0,
<|file_name|>tree_0a5cf070976fd6198437c00ce2cc4fcb.js<|end_file_name|><|fim▁begin|>/** * @version: $Id: tree.js 904 2011-03-02 20:15:56Z Radek Suski $<|fim▁hole|> * Email: sobi[at]sigsiu.net * Url: http://www.Sigsiu.NET * =================================================== * @copyright Copyright (C) 2006 - 2011 Sigsiu.NET GmbH (http://www.sigsiu.net). All rights reserved. * @license see http://www.gnu.org/licenses/lgpl.html GNU/LGPL Version 3. * You can use, redistribute this file and/or modify it under the terms of the GNU Lesser General Public License version 3 * =================================================== * $Date: 2011-03-02 21:15:56 +0100 (Wed, 02 Mar 2011) $ * $Revision: 904 $ * $Author: Radek Suski $ * File location: components/com_sobipro/lib/js/tree.js $ */ // Created at Thu Jan 17 23:14:36 ART 2013 by Sobi Pro Component var sobiCats_stmcid = 0; var sobiCats_stmLastNode = 147; var sobiCats_stmImgs = new Array(); var sobiCats_stmImgMatrix = new Array(); var sobiCats_stmParents = new Array(); var sobiCats_stmSemaphor = 0; var sobiCats_stmPid = 0; var sobiCats_stmWait = 'http://localhost/SocialGarbage/media/sobipro/styles/spinner.gif'; sobiCats_stmImgs[ 'root' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/base.gif'; sobiCats_stmImgs[ 'join' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/join.gif'; sobiCats_stmImgs[ 'joinBottom' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/joinbottom.gif'; sobiCats_stmImgs[ 'plus' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/plus.gif'; sobiCats_stmImgs[ 'plusBottom' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/plusbottom.gif'; sobiCats_stmImgs[ 'minus' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/minus.gif'; sobiCats_stmImgs[ 'minusBottom' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/minusbottom.gif'; sobiCats_stmImgs[ 'folder' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/folder.gif'; sobiCats_stmImgs[ 'disabled' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/disabled.gif'; sobiCats_stmImgs[ 'folderOpen' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/folderopen.gif'; sobiCats_stmImgs[ 'line' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/line.gif'; sobiCats_stmImgs[ 'empty' ] = 'http://localhost/SocialGarbage/media/sobipro/tree/empty.gif';; sobiCats_stmImgMatrix[ 149 ] = new Array( 'joinBottom' ); sobiCats_stmImgMatrix[ 150 ] = new Array( 'joinBottom' ); sobiCats_stmImgMatrix[ 148 ] = new Array( 'joinBottom' ); sobiCats_stmImgMatrix[ 147 ] = new Array( 'join' );; //__PARENT_ARR__ function sobiCats_stmExpand( catid, deep, pid ) { try { SP_id( "sobiCats_imgFolder" + catid ).src = sobiCats_stmWait; } catch( e ) {} sobiCats_stmcid = catid; sobiCats_stmPid = pid; url = "index.php?option=com_sobipro&task=category.chooser&sid=146&out=xml&expand=" + sobiCats_stmcid + "&pid=" + sobiCats_stmPid + "&tmpl=component&format=raw"; sobiCats_stmMakeRequest( url, deep, catid ); } function sobiCats_stmCatData( node, val ) { return node.getElementsByTagName( val ).item( 0 ).firstChild.data; } function sobiCats_stmAddSubcats( XMLDoc, deep, ccatid ) { var categories = XMLDoc.getElementsByTagName( 'category' ); var subcats = ""; deep++; for( i = 0; i < categories.length; i++ ) { var category = categories[ i ]; var catid = sobiCats_stmCatData( category, 'catid' ); var name = sobiCats_stmCatData( category, 'name' ); var introtext = sobiCats_stmCatData( category, 'introtext' ); var parentid = sobiCats_stmCatData( category, 'parentid' ); var url = sobiCats_stmCatData( category, 'url' ); var childs = sobiCats_stmCatData( category, 'childs' ); var join = "<img src='" + sobiCats_stmImgs['join'] + "' alt=''/>"; var margin = ""; var childContainer = ""; name = name.replace( "\\", "" ); introtext = introtext.replace( "\\", "" ); url = url.replace( "\\\\", "" ); for( j = 0; j < deep; j++ ) { if( sobiCats_stmImgMatrix[ parentid ][ j ] ) { switch( sobiCats_stmImgMatrix[ parentid ][ j ] ) { case 'plus': case 'minus': case 'line': image = 'line'; break; default: image = 'empty'; break; } } else { image = 'empty'; } if( !sobiCats_stmImgMatrix[ catid ] ) { catArray = new Array(); catArray[ j ] = image; sobiCats_stmImgMatrix[ catid ] = catArray; } else { sobiCats_stmImgMatrix[ catid ][ j ] = image; } margin = margin + "<img src='"+ sobiCats_stmImgs[ image ] +"' style='border-style:none;' alt=''/>"; } if( childs > 0 ) { join = "<a href='javascript:sobiCats_stmExpand( " + catid + ", " + deep + ", " + sobiCats_stmPid + " );' id='sobiCats_imgUrlExpand" + catid + "'><img src='"+ sobiCats_stmImgs['plus'] + "' id='sobiCats_imgExpand" + catid + "' style='border-style:none;' alt='expand'/></a>"; sobiCats_stmImgMatrix[catid][j] = 'plus'; } if( sobiCats_stmcid == sobiCats_stmLastNode ) { line = "<img src='"+sobiCats_stmImgs['empty']+"' alt=''>"; } if( i == categories.length - 1 ) { if( childs > 0 ) { join = "<a href='javascript:sobiCats_stmExpand( " + catid + ", " + deep + ", " + sobiCats_stmPid + " );' id='sobiCats_imgUrlExpand" + catid + "'><img src='"+ sobiCats_stmImgs[ 'plusBottom' ] + "' id='sobiCats_imgExpand" + catid + "' style='border-style:none;' alt='expand'/></a>"; sobiCats_stmImgMatrix[ catid ][ j ] = 'plusBottom'; } else { join = "<img src='" + sobiCats_stmImgs[ 'joinBottom' ] + "' style='border-style:none;' alt=''/>"; sobiCats_stmImgMatrix[ catid ][ j ] = 'joinBottom'; } } subcats = subcats + "<div class='sigsiuTreeNode' id='sobiCatsstNode" + catid + "'>" + margin + join + "<a id='sobiCats" + catid + "' href=\"" + url + "\"><img src='" + sobiCats_stmImgs[ 'folder' ] + "' id='sobiCats_imgFolder" + catid + "' alt=''></a><a class = 'treeNode' id='sobiCats_CatUrl" + catid + "' href=\"" + url + "\">" + name + "</a></div>"; if( childs > 0 ) { subcats = subcats + "<div class='clip' id='sobiCats_childsContainer" + catid + "' style='display: block; display:none;'></div>" } } var childsCont = "sobiCats_childsContainer" + ccatid; SP_id( childsCont ).innerHTML = subcats; } function sobiCats_stmMakeRequest( url, deep, catid ) { var sobiCats_stmHttpRequest; if ( window.XMLHttpRequest ) { sobiCats_stmHttpRequest = new XMLHttpRequest(); if ( sobiCats_stmHttpRequest.overrideMimeType ) { sobiCats_stmHttpRequest.overrideMimeType( 'text/xml' ); } } else if ( window.ActiveXObject ) { try { sobiCats_stmHttpRequest = new ActiveXObject( "Msxml2.XMLHTTP" ); } catch ( e ) { try { sobiCats_stmHttpRequest = new ActiveXObject("Microsoft.XMLHTTP"); } catch (e) {} } } if ( !sobiCats_stmHttpRequest ) { // alert( 'AJAX_FAIL' ); return false; } sobiCats_stmHttpRequest.onreadystatechange = function() { sobiCats_stmGetSubcats( sobiCats_stmHttpRequest,deep,catid ); }; sobiCats_stmHttpRequest.open( 'GET', url, true ); sobiCats_stmHttpRequest.send( null ); } function sobiCats_stmGetSubcats( sobiCats_stmHttpRequest, deep, catid ) { if ( sobiCats_stmHttpRequest.readyState == 4 ) { if ( sobiCats_stmHttpRequest.status == 200 ) { if( SP_id( "sobiCats_imgFolder" + catid ) == undefined ) { window.setTimeout( function() { sobiCats_stmGetSubcats( sobiCats_stmHttpRequest, deep, catid ); } , 200 ); } else { SP_id( "sobiCats_imgFolder" + catid ).src = sobiCats_stmImgs[ 'folderOpen' ]; if ( sobiCats_stmcid == sobiCats_stmLastNode ) { SP_id( "sobiCats_imgExpand" + catid ).src = sobiCats_stmImgs[ 'minusBottom' ]; } else { if( SP_id( "sobiCats_imgExpand" + catid ).src == sobiCats_stmImgs[ 'plusBottom' ] ) { SP_id( "sobiCats_imgExpand" + catid ).src = sobiCats_stmImgs[ 'minusBottom' ]; } else { SP_id( "sobiCats_imgExpand" + catid ).src = sobiCats_stmImgs[ 'minus' ]; } } SP_id( "sobiCats_imgUrlExpand" + catid ).href = "javascript:sobiCats_stmColapse( " + catid + ", " + deep + " );"; SP_id( "sobiCats_childsContainer" + catid ).style.display = ""; sobiCats_stmAddSubcats( sobiCats_stmHttpRequest.responseXML, deep, catid ); } } else { // SobiPro.Alert( 'AJAX_FAIL' ); } } } function sobiCats_stmColapse( id, deep ) { SP_id( "sobiCats_childsContainer" + id ).style.display = "none"; SP_id( "sobiCats_imgFolder" + id ).src = sobiCats_stmImgs[ 'folder' ]; if( id == sobiCats_stmLastNode ) { SP_id( "sobiCats_imgExpand" + id ).src = sobiCats_stmImgs[ 'plusBottom' ]; } else if(SP_id( "sobiCats_imgExpand" + sobiCats_stmcid ).src == sobiCats_stmImgs[ 'minusBottom' ] ){ SP_id( "sobiCats_imgExpand" + sobiCats_stmcid ).src = sobiCats_stmImgs[ 'plusBottom' ]; } else { SP_id( "sobiCats_imgExpand" + id ).src = sobiCats_stmImgs[ 'plus' ]; } SP_id( "sobiCats_imgUrlExpand" + id ).href = "javascript:sobiCats_stmExpand( " + id + ", " + deep + ", " + sobiCats_stmPid + " );"; }<|fim▁end|>
* @package: SobiPro Library * =================================================== * @author * Name: Sigrid Suski & Radek Suski, Sigsiu.NET GmbH
<|file_name|>test_vec_val_vect.py<|end_file_name|><|fim▁begin|>import numpy as np from numpy.random import randn from numpy.testing import assert_almost_equal, dec from dipy.reconst.vec_val_sum import vec_val_vect def make_vecs_vals(shape): return randn(*(shape)), randn(*(shape[:-2] + shape[-1:])) try: np.einsum except AttributeError: with_einsum = dec.skipif(True, "Need einsum for benchmark") else: def with_einsum(f): return f @with_einsum def test_vec_val_vect(): for shape0 in ((10,), (100,), (10, 12), (12, 10, 5)): for shape1 in ((3, 3), (4, 3), (3, 4)): shape = shape0 + shape1 evecs, evals = make_vecs_vals(shape) res1 = np.einsum('...ij,...j,...kj->...ik', evecs, evals, evecs) assert_almost_equal(res1, vec_val_vect(evecs, evals)) def dumb_sum(vecs, vals): N, rows, cols = vecs.shape res2 = np.zeros((N, rows, rows)) for i in range(N): Q = vecs[i] L = vals[i] res2[i] = np.dot(Q, np.dot(np.diag(L), Q.T)) return res2 def test_vec_val_vect_dumber(): for shape0 in ((10,), (100,)): for shape1 in ((3, 3), (4, 3), (3, 4)):<|fim▁hole|> assert_almost_equal(res1, vec_val_vect(evecs, evals))<|fim▁end|>
shape = shape0 + shape1 evecs, evals = make_vecs_vals(shape) res1 = dumb_sum(evecs, evals)
<|file_name|>tictactoe.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go. // source: tictactoe.proto // DO NOT EDIT! /* Package tictactoe is a generated protocol buffer package. It is generated from these files: tictactoe.proto It has these top-level messages: CreateRequest CreateReply TurnRequest Winner TurnReply MoveRange Event */ package tictactoe import proto "github.com/protogalaxy/service-tictactoe-game/Godeps/_workspace/src/github.com/golang/protobuf/proto" import ( context "github.com/protogalaxy/service-tictactoe-game/Godeps/_workspace/src/golang.org/x/net/context" grpc "github.com/protogalaxy/service-tictactoe-game/Godeps/_workspace/src/google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal type Mark int32 const ( Mark_EMPTY Mark = 0 Mark_X Mark = 1 Mark_Y Mark = 2 ) var Mark_name = map[int32]string{ 0: "EMPTY", 1: "X", 2: "Y", } var Mark_value = map[string]int32{ "EMPTY": 0, "X": 1, "Y": 2, } func (x Mark) String() string { return proto.EnumName(Mark_name, int32(x)) } type CreateReply_ResponseStatus int32 const ( CreateReply_SUCCESS CreateReply_ResponseStatus = 0 ) var CreateReply_ResponseStatus_name = map[int32]string{ 0: "SUCCESS", } var CreateReply_ResponseStatus_value = map[string]int32{ "SUCCESS": 0, } func (x CreateReply_ResponseStatus) String() string { return proto.EnumName(CreateReply_ResponseStatus_name, int32(x)) } type Winner_Location_Direction int32 const ( Winner_Location_HORIZONTAL Winner_Location_Direction = 0 Winner_Location_VERTICAL Winner_Location_Direction = 1 Winner_Location_DIAGONAL_DOWN Winner_Location_Direction = 2 Winner_Location_DIAGONAL_UP Winner_Location_Direction = 3 ) var Winner_Location_Direction_name = map[int32]string{ 0: "HORIZONTAL", 1: "VERTICAL", 2: "DIAGONAL_DOWN", 3: "DIAGONAL_UP", } var Winner_Location_Direction_value = map[string]int32{ "HORIZONTAL": 0, "VERTICAL": 1, "DIAGONAL_DOWN": 2, "DIAGONAL_UP": 3, } func (x Winner_Location_Direction) String() string { return proto.EnumName(Winner_Location_Direction_name, int32(x)) } type TurnReply_ResponseStatus int32 const ( TurnReply_SUCCESS TurnReply_ResponseStatus = 0 TurnReply_INVALID_MOVE TurnReply_ResponseStatus = 1 TurnReply_NOT_ACTIVE_PLAYER TurnReply_ResponseStatus = 2 TurnReply_FINISHED TurnReply_ResponseStatus = 3 TurnReply_INVALID_MOVE_ID TurnReply_ResponseStatus = 4 ) var TurnReply_ResponseStatus_name = map[int32]string{ 0: "SUCCESS", 1: "INVALID_MOVE", 2: "NOT_ACTIVE_PLAYER", 3: "FINISHED", 4: "INVALID_MOVE_ID", } var TurnReply_ResponseStatus_value = map[string]int32{ "SUCCESS": 0, "INVALID_MOVE": 1, "NOT_ACTIVE_PLAYER": 2, "FINISHED": 3, "INVALID_MOVE_ID": 4, } func (x TurnReply_ResponseStatus) String() string { return proto.EnumName(TurnReply_ResponseStatus_name, int32(x)) } type Event_Type int32 const ( Event_GAME_CREATED Event_Type = 0 Event_TURN_PLAYED Event_Type = 1 ) var Event_Type_name = map[int32]string{ 0: "GAME_CREATED", 1: "TURN_PLAYED", } var Event_Type_value = map[string]int32{ "GAME_CREATED": 0, "TURN_PLAYED": 1, } func (x Event_Type) String() string { return proto.EnumName(Event_Type_name, int32(x)) } type CreateRequest struct { UserIds []string `protobuf:"bytes,1,rep,name=user_ids" json:"user_ids,omitempty"` } func (m *CreateRequest) Reset() { *m = CreateRequest{} } func (m *CreateRequest) String() string { return proto.CompactTextString(m) } func (*CreateRequest) ProtoMessage() {} type CreateReply struct { Status CreateReply_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=tictactoe.CreateReply_ResponseStatus" json:"status,omitempty"` GameId string `protobuf:"bytes,2,opt,name=game_id" json:"game_id,omitempty"` } func (m *CreateReply) Reset() { *m = CreateReply{} } func (m *CreateReply) String() string { return proto.CompactTextString(m) } func (*CreateReply) ProtoMessage() {} type TurnRequest struct { GameId string `protobuf:"bytes,1,opt,name=game_id" json:"game_id,omitempty"` UserId string `protobuf:"bytes,2,opt,name=user_id" json:"user_id,omitempty"` MoveId int64 `protobuf:"varint,3,opt,name=move_id" json:"move_id,omitempty"` Move *TurnRequest_Square `protobuf:"bytes,4,opt,name=move" json:"move,omitempty"` } func (m *TurnRequest) Reset() { *m = TurnRequest{} } func (m *TurnRequest) String() string { return proto.CompactTextString(m) } func (*TurnRequest) ProtoMessage() {} func (m *TurnRequest) GetMove() *TurnRequest_Square { if m != nil { return m.Move } return nil } type TurnRequest_Square struct { X int32 `protobuf:"varint,1,opt,name=x" json:"x,omitempty"` Y int32 `protobuf:"varint,2,opt,name=y" json:"y,omitempty"` } func (m *TurnRequest_Square) Reset() { *m = TurnRequest_Square{} } func (m *TurnRequest_Square) String() string { return proto.CompactTextString(m) } func (*TurnRequest_Square) ProtoMessage() {} type Winner struct { Draw bool `protobuf:"varint,1,opt,name=draw" json:"draw,omitempty"` UserId string `protobuf:"bytes,2,opt,name=user_id" json:"user_id,omitempty"` Locations []*Winner_Location `protobuf:"bytes,3,rep,name=locations" json:"locations,omitempty"` } func (m *Winner) Reset() { *m = Winner{} } func (m *Winner) String() string { return proto.CompactTextString(m) } func (*Winner) ProtoMessage() {} func (m *Winner) GetLocations() []*Winner_Location { if m != nil { return m.Locations } return nil } type Winner_Location struct { Direction Winner_Location_Direction `protobuf:"varint,1,opt,name=direction,enum=tictactoe.Winner_Location_Direction" json:"direction,omitempty"` Position int32 `protobuf:"varint,2,opt,name=position" json:"position,omitempty"` } func (m *Winner_Location) Reset() { *m = Winner_Location{} } func (m *Winner_Location) String() string { return proto.CompactTextString(m) } func (*Winner_Location) ProtoMessage() {} type TurnReply struct { Status TurnReply_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=tictactoe.TurnReply_ResponseStatus" json:"status,omitempty"` MoveId int64 `protobuf:"varint,2,opt,name=move_id" json:"move_id,omitempty"` } func (m *TurnReply) Reset() { *m = TurnReply{} } func (m *TurnReply) String() string { return proto.CompactTextString(m) } func (*TurnReply) ProtoMessage() {} type MoveRange struct { FromX int32 `protobuf:"varint,1,opt,name=from_x" json:"from_x,omitempty"` FromY int32 `protobuf:"varint,2,opt,name=from_y" json:"from_y,omitempty"` ToX int32 `protobuf:"varint,3,opt,name=to_x" json:"to_x,omitempty"` ToY int32 `protobuf:"varint,4,opt,name=to_y" json:"to_y,omitempty"` } func (m *MoveRange) Reset() { *m = MoveRange{} } func (m *MoveRange) String() string { return proto.CompactTextString(m) } func (*MoveRange) ProtoMessage() {} type Event struct { Type Event_Type `protobuf:"varint,1,opt,name=type,enum=tictactoe.Event_Type" json:"type,omitempty"` Timestamp int64 `protobuf:"varint,2,opt,name=timestamp" json:"timestamp,omitempty"` GameId string `protobuf:"bytes,3,opt,name=game_id" json:"game_id,omitempty"` UserId string `protobuf:"bytes,4,opt,name=user_id" json:"user_id,omitempty"` UserList []string `protobuf:"bytes,5,rep,name=user_list" json:"user_list,omitempty"` Move *TurnRequest_Square `protobuf:"bytes,6,opt,name=move" json:"move,omitempty"` TurnStatus TurnReply_ResponseStatus `protobuf:"varint,7,opt,name=turn_status,enum=tictactoe.TurnReply_ResponseStatus" json:"turn_status,omitempty"` Winner *Winner `protobuf:"bytes,8,opt,name=winner" json:"winner,omitempty"` MoveId int64 `protobuf:"varint,9,opt,name=move_id" json:"move_id,omitempty"` NextPlayer string `protobuf:"bytes,10,opt,name=next_player" json:"next_player,omitempty"` ValidMoves []*MoveRange `protobuf:"bytes,11,rep,name=valid_moves" json:"valid_moves,omitempty"`<|fim▁hole|>func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (m *Event) GetMove() *TurnRequest_Square { if m != nil { return m.Move } return nil } func (m *Event) GetWinner() *Winner { if m != nil { return m.Winner } return nil } func (m *Event) GetValidMoves() []*MoveRange { if m != nil { return m.ValidMoves } return nil } func init() { proto.RegisterEnum("tictactoe.Mark", Mark_name, Mark_value) proto.RegisterEnum("tictactoe.CreateReply_ResponseStatus", CreateReply_ResponseStatus_name, CreateReply_ResponseStatus_value) proto.RegisterEnum("tictactoe.Winner_Location_Direction", Winner_Location_Direction_name, Winner_Location_Direction_value) proto.RegisterEnum("tictactoe.TurnReply_ResponseStatus", TurnReply_ResponseStatus_name, TurnReply_ResponseStatus_value) proto.RegisterEnum("tictactoe.Event_Type", Event_Type_name, Event_Type_value) } // Client API for GameManager service type GameManagerClient interface { CreateGame(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateReply, error) PlayTurn(ctx context.Context, in *TurnRequest, opts ...grpc.CallOption) (*TurnReply, error) } type gameManagerClient struct { cc *grpc.ClientConn } func NewGameManagerClient(cc *grpc.ClientConn) GameManagerClient { return &gameManagerClient{cc} } func (c *gameManagerClient) CreateGame(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateReply, error) { out := new(CreateReply) err := grpc.Invoke(ctx, "/tictactoe.GameManager/CreateGame", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *gameManagerClient) PlayTurn(ctx context.Context, in *TurnRequest, opts ...grpc.CallOption) (*TurnReply, error) { out := new(TurnReply) err := grpc.Invoke(ctx, "/tictactoe.GameManager/PlayTurn", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for GameManager service type GameManagerServer interface { CreateGame(context.Context, *CreateRequest) (*CreateReply, error) PlayTurn(context.Context, *TurnRequest) (*TurnReply, error) } func RegisterGameManagerServer(s *grpc.Server, srv GameManagerServer) { s.RegisterService(&_GameManager_serviceDesc, srv) } func _GameManager_CreateGame_Handler(srv interface{}, ctx context.Context, buf []byte) (proto.Message, error) { in := new(CreateRequest) if err := proto.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(GameManagerServer).CreateGame(ctx, in) if err != nil { return nil, err } return out, nil } func _GameManager_PlayTurn_Handler(srv interface{}, ctx context.Context, buf []byte) (proto.Message, error) { in := new(TurnRequest) if err := proto.Unmarshal(buf, in); err != nil { return nil, err } out, err := srv.(GameManagerServer).PlayTurn(ctx, in) if err != nil { return nil, err } return out, nil } var _GameManager_serviceDesc = grpc.ServiceDesc{ ServiceName: "tictactoe.GameManager", HandlerType: (*GameManagerServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CreateGame", Handler: _GameManager_CreateGame_Handler, }, { MethodName: "PlayTurn", Handler: _GameManager_PlayTurn_Handler, }, }, Streams: []grpc.StreamDesc{}, }<|fim▁end|>
} func (m *Event) Reset() { *m = Event{} }
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># This file is part of Boomer Core. # # Boomer Core is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Boomer Core is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Boomer Core. If not, see <http://www.gnu.org/licenses/>. # # Forked from Mycroft Core on 2017-07-29 import os from os.path import join, expanduser, isdir __author__ = 'jdorleans' class FileSystemAccess(object): """ A class for providing access to the boomer FS sandbox. Intended to be attached to skills at initialization time to provide a skill-specific namespace. """ def __init__(self, path): self.path = self.__init_path(path) @staticmethod<|fim▁hole|> if not isinstance(path, str) or len(path) == 0: raise ValueError("path must be initialized as a non empty string") path = join(expanduser('~'), '.boomer', path) if not isdir(path): os.makedirs(path) return path def open(self, filename, mode): """ Get a handle to a file (with the provided mode) within the skill-specific namespace. :param filename: a str representing a path relative to the namespace. subdirs not currently supported. :param mode: a file handle mode :return: an open file handle. """ file_path = join(self.path, filename) return open(file_path, mode) def exists(self, filename): return os.path.exists(join(self.path, filename))<|fim▁end|>
def __init_path(path):
<|file_name|>component.rs<|end_file_name|><|fim▁begin|>use std::comm::{TryRecvError,Empty,Disconnected}; use std::fmt; use message::{Message,MessageData}; use message::MessageData::{MsgStart}; #[deriving(PartialEq,Clone)] pub enum ComponentType { ManagerComponent, ExtractorComponent, AudioDecoderComponent, VideoDecoderComponent, ClockComponent, AudioRendererComponent, VideoRendererComponent, UiComponent, } impl fmt::Show for ComponentType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ComponentType::ManagerComponent => write!(f, "ComponentManager"), ComponentType::ExtractorComponent => write!(f, "Extractor"), ComponentType::AudioDecoderComponent => write!(f, "AudioDecoder"), ComponentType::VideoDecoderComponent => write!(f, "VideoDecoder"), ComponentType::ClockComponent => write!(f, "Clock"), ComponentType::AudioRendererComponent => write!(f, "AudioRenderer"), ComponentType::VideoRendererComponent => write!(f, "VideoRenderer"), ComponentType::UiComponent => write!(f, "UI"), } } } pub struct ComponentStruct { pub component_type: ComponentType, pub mgr_sender: Option<Sender<Message>>, pub receiver: Receiver<Message>, pub sender: Option<Sender<Message>>,<|fim▁hole|> let (sender, receiver) = channel::<Message>(); ComponentStruct { component_type: component_type, mgr_sender: None, receiver: receiver, sender: Some(sender), } } pub fn set_mgr_sender(&mut self, sender: Sender<Message>) { self.mgr_sender= Some(sender); } pub fn take_sender(&mut self) -> Sender<Message> { self.sender.take().unwrap() } pub fn send(&self, to: ComponentType, msg:MessageData) -> bool { match self.mgr_sender.as_ref().unwrap().send_opt(Message { from: self.component_type.clone(), to: to, msg: msg }) { Ok(_) => true, Err(_) => false } } pub fn recv(&self) -> Message { self.receiver.recv() } pub fn try_recv(&self) -> Result<Message, TryRecvError> { self.receiver.try_recv() } pub fn flush(&self) { loop { match self.receiver.try_recv() { Ok(_msg) => { debug!("{} flush", self.component_type); } Err(Empty) => { break } Err(Disconnected) => { break; } } } } pub fn wait_for_start(&self) { match self.recv() { Message { from: ComponentType::ManagerComponent, msg: MsgStart, .. } => { info!("start {}", self.component_type); } _ => { panic!("unexpected message received"); } } } } pub trait Component { fn get<'a>(&'a mut self) -> &'a mut ComponentStruct; }<|fim▁end|>
} impl ComponentStruct { pub fn new(component_type: ComponentType) -> ComponentStruct {
<|file_name|>main-menu.controller.spec.js<|end_file_name|><|fim▁begin|>/* jshint -W030 */<|fim▁hole|> // load the controller's module beforeEach(module('ftiApp.mainMenu')); var controller; var menuEntry = {name: 'test', state: 'test.main'}; var mainMenuMock = { getMenu: function () { return [menuEntry] } }; var $mdSidenavMock = function () { return { open: function () {}, close: function () {} } } // Initialize the controller and a mock scope beforeEach(inject(function ($controller, $rootScope) { controller = $controller('MainMenuController', { mainMenu: mainMenuMock, $mdSidenav: $mdSidenavMock }); })); it('object should exist', function () { Should.exist(controller); controller.should.be.an.Object; }); it('should have an items property', function () { Should.exist(controller.items); controller.items.should.be.an.Array; controller.items.should.eql([menuEntry]); }); });<|fim▁end|>
'use strict'; describe('Controller: MainMenuController', function () {
<|file_name|>iter2.rs<|end_file_name|><|fim▁begin|>// iter2.rs fn main() { let arr = [10, 20, 30]; for i in arr { println!("{}", i); }<|fim▁hole|><|fim▁end|>
} // this doensn't work, try to compile it!
<|file_name|>xbee.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """XBeeModem.py bypasses the XBee's 802.15.4 capabilities and simply uses it modem for communications You don't have to master 802.15.4 and a large set of XBee commands to make a very simple but potentially useful network. At its core, the XBee radio is a modem and you can use it directly for simple serial communications. Reference Materials: Non-blocking read from stdin in python - http://repolinux.wordpress.com/2012/10/09/non-blocking-read-from-stdin-in-python/ Non-blocking read on a subprocess.PIPE in python - http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python Originally Created By:<|fim▁hole|># import os # portable way of using operating system dependent functionality import sys # provides access to some variables used or maintained by the interpreter import time # provides various time-related functions # import fcntl # performs file control and I/O control on file descriptors # import serial # encapsulates the access for the serial port # import urllib from serial import Serial # from pretty import switchColor, printc # provides colored text for xterm & VT100 type terminals using ANSI escape sequences from webiopi.clients import PiHttpClient, Macro from webiopi.utils.logger import exception, setDebug, info, debug, logToFile from webiopi.utils.thread import stop VERSION = '1.0' def displayHelp(): print("Xbee command-line usage") print("xbee [-h] [-c config] [-l log] [-d] [port]") print("") print("Options:") print(" -h, --help Display this help") print(" -c, --config file Load config from file") print(" -l, --log file Log to file") print(" -d, --debug Enable DEBUG") print("") print("Arguments:") print(" port WebIOPi port") exit() def main(argv): port = 8000 configfile = None logfile = None i = 1 while i < len(argv): if argv[i] in ["-c", "-C", "--config-file"]: configfile = argv[i+1] i+=1 elif argv[i] in ["-l", "-L", "--log-file"]: logfile = argv[i+1] i+=1 elif argv[i] in ["-h", "-H", "--help"]: displayHelp() elif argv[i] in ["-d", "--debug"]: setDebug() else: try: port = int(argv[i]) except ValueError: displayHelp() i+=1 if logfile: logToFile(logfile) info("Starting XBee %s" % VERSION) # setup serial serial = Serial() serial.port = '/dev/ttyAMA0' serial.baudrate = 9600 serial.timeout = 1 serial.writeTimeout = 1 serial.open() # disregard any pending data in xbee buffer serial.flushInput() # force to show xbee boot menu time.sleep(.5) serial.writelines("\r") time.sleep(.5) # read menu while serial.inWaiting() > 0: debug("%s" % serial.readline()) # trigger bypass automatically serial.writelines("B") # post startup message to other XBee's and at stdout #serial.writelines("RPi #1 is up and running.\r\n") info("RPi #1 is up and running.") try: while True: waitToSend = True # read a line from XBee and convert it from b'xxx\r\n' to xxx and send to webiopi while serial.inWaiting() > 0: try: line = serial.readline().decode('utf-8').strip('\n\r') if line: waitToSend = False debug("Received: %s" % line) try: client = PiHttpClient("127.0.0.1") macro = Macro(client, "setCarInfo") macro.call(line.replace(",", "%2C")) except: exception("setting car info failed!") except KeyboardInterrupt: raise except Exception as e: exception(e) time.sleep(1.) try: time.sleep(1.) client = PiHttpClient("127.0.0.1") macro = Macro(client, "getPitInfo") data = macro.call() if data: debug("Sending: %s" % data) serial.writelines(data + "\n") except KeyboardInterrupt: raise except Exception as e: exception(e) time.sleep(1.) except KeyboardInterrupt: info("*** Ctrl-C keyboard interrupt ***") if __name__ == "__main__": try: main(sys.argv) except Exception as e: exception(e) stop() info("RPi #1 is going down")<|fim▁end|>
Jeff Irland ([email protected]) in March 2013 """ # imported modules
<|file_name|>specular_reflection.rs<|end_file_name|><|fim▁begin|>//! Defines a BRDF that describes specular reflection use std::f32; use enum_set::EnumSet; use linalg::Vector; use film::Colorf; use bxdf::{self, BxDF, BxDFType}; use bxdf::fresnel::Fresnel; /// Specular reflection BRDF that implements a specularly reflective material model pub struct SpecularReflection { /// Color of the reflective material reflectance: Colorf, /// Fresnel term for the reflection model fresnel: Box<Fresnel + Send + Sync> } impl SpecularReflection { /// Create a specularly reflective BRDF with the reflective color and Fresnel term pub fn new(c: &Colorf, fresnel: Box<Fresnel + Send + Sync>) -> SpecularReflection {<|fim▁hole|> } } impl BxDF for SpecularReflection { fn bxdf_type(&self) -> EnumSet<BxDFType> { let mut e = EnumSet::new(); e.insert(BxDFType::Specular); e.insert(BxDFType::Reflection); e } /// We'll never exactly hit the specular reflection direction with some pair /// so this just returns black. Use `sample` instead fn eval(&self, _: &Vector, _: &Vector) -> Colorf { Colorf::broadcast(0.0) } /// Sampling the specular BRDF just returns the specular reflection direction /// for the light leaving along `w_o` fn sample(&self, w_o: &Vector, _: &(f32, f32)) -> (Colorf, Vector, f32) { if w_o.z != 0.0 { let w_i = Vector::new(-w_o.x, -w_o.y, w_o.z); let c = self.fresnel.fresnel(-bxdf::cos_theta(w_o)) * self.reflectance / f32::abs(bxdf::cos_theta(&w_i)); (c, w_i, 1.0) } else { (Colorf::black(), Vector::broadcast(0.0), 0.0) } } }<|fim▁end|>
SpecularReflection { reflectance: *c, fresnel: fresnel }
<|file_name|>plugin.py<|end_file_name|><|fim▁begin|>from Plugins.Plugin import PluginDescriptor from Screens.Console import Console from Screens.ChoiceBox import ChoiceBox from Screens.MessageBox import MessageBox from Screens.Screen import Screen from Screens.Standby import TryQuitMainloop from Screens.Ipkg import Ipkg from Screens.SoftwareUpdate import UpdatePlugin from Components.ActionMap import ActionMap, NumberActionMap from Components.Input import Input from Components.Ipkg import IpkgComponent from Components.Sources.StaticText import StaticText from Components.ScrollLabel import ScrollLabel from Components.Pixmap import Pixmap from Components.MenuList import MenuList from Components.Sources.List import List from Components.Slider import Slider from Components.Harddisk import harddiskmanager from Components.config import config,getConfigListEntry, ConfigSubsection, ConfigText, ConfigLocations, ConfigYesNo, ConfigSelection from Components.ConfigList import ConfigListScreen from Components.Console import Console from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest from Components.SelectionList import SelectionList from Components.PluginComponent import plugins from Components.About import about from Components.PackageInfo import PackageInfoHandler from Components.Language import language from Components.AVSwitch import AVSwitch from Components.Task import job_manager from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_PLUGIN, SCOPE_ACTIVE_SKIN, SCOPE_METADIR from Tools.LoadPixmap import LoadPixmap from Tools.NumericalTextInput import NumericalTextInput from enigma import eTimer, RT_HALIGN_LEFT, RT_VALIGN_CENTER, eListboxPythonMultiContent, eListbox, gFont, getDesktop, ePicLoad, eRCInput, getPrevAsciiCode, eEnv, iRecordableService from cPickle import dump, load from os import path as os_path, system as os_system, unlink, stat, mkdir, popen, makedirs, listdir, access, rename, remove, W_OK, R_OK, F_OK from time import time, gmtime, strftime, localtime from stat import ST_MTIME from datetime import date from twisted.web import client from twisted.internet import reactor from ImageWizard import ImageWizard from BackupRestore import BackupSelection, RestoreMenu, BackupScreen, RestoreScreen, getBackupPath, getBackupFilename from SoftwareTools import iSoftwareTools config.plugins.configurationbackup = ConfigSubsection() config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False) config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname']) config.plugins.softwaremanager = ConfigSubsection() config.plugins.softwaremanager.overwriteConfigFiles = ConfigSelection( [ ("Y", _("Yes, always")), ("N", _("No, never")), ("ask", _("Always ask")) ], "Y") config.plugins.softwaremanager.onSetupMenu = ConfigYesNo(default=False) config.plugins.softwaremanager.onBlueButton = ConfigYesNo(default=False) def write_cache(cache_file, cache_data): #Does a cPickle dump if not os_path.isdir( os_path.dirname(cache_file) ): try: mkdir( os_path.dirname(cache_file) ) except OSError: print os_path.dirname(cache_file), 'is a file' fd = open(cache_file, 'w') dump(cache_data, fd, -1) fd.close() def valid_cache(cache_file, cache_ttl): #See if the cache file exists and is still living try: mtime = stat(cache_file)[ST_MTIME] except: return 0 curr_time = time() if (curr_time - mtime) > cache_ttl: return 0 else: return 1 def load_cache(cache_file): #Does a cPickle load fd = open(cache_file) cache_data = load(fd) fd.close() return cache_data class UpdatePluginMenu(Screen): skin = """ <screen name="UpdatePluginMenu" position="center,center" size="610,410" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <ePixmap pixmap="border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" /> <widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText, ], "fonts": [gFont("Regular", 22)], "itemHeight": 25 } </convert> </widget> <widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description, ], "fonts": [gFont("Regular", 22)], "itemHeight": 300 } </convert> </widget> <widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" /> </screen>""" def __init__(self, session, args = 0): Screen.__init__(self, session) Screen.setTitle(self, _("Software management")) self.skin_path = plugin_path self.menu = args self.list = [] self.oktext = _("\nPress OK on your remote control to continue.") self.menutext = _("Press MENU on your remote control for additional options.") self.infotext = _("Press INFO on your remote control for additional information.") self.text = "" self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.getValue() ) if self.menu == 0: print "building menu entries" self.list.append(("install-extensions", _("Manage extensions"), _("\nManage extensions or plugins for your STB_BOX" ) + self.oktext, None)) self.list.append(("software-update", _("Software update"), _("\nOnline update of your STB_BOX software." ) + self.oktext, None)) self.list.append(("software-restore", _("Software restore"), _("\nRestore your STB_BOX with a new firmware." ) + self.oktext, None)) self.list.append(("system-backup", _("Backup system settings"), _("\nBackup your STB_BOX settings." ) + self.oktext + "\n\n" + self.infotext, None)) self.list.append(("system-restore",_("Restore system settings"), _("\nRestore your STB_BOX settings." ) + self.oktext, None)) self.list.append(("ipkg-install", _("Install local extension"), _("\nScan for local extensions and install them." ) + self.oktext, None)) for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER): if p.__call__.has_key("SoftwareSupported"): callFnc = p.__call__["SoftwareSupported"](None) if callFnc is not None: if p.__call__.has_key("menuEntryName"): menuEntryName = p.__call__["menuEntryName"](None) else: menuEntryName = _('Extended Software') if p.__call__.has_key("menuEntryDescription"): menuEntryDescription = p.__call__["menuEntryDescription"](None) else: menuEntryDescription = _('Extended Software Plugin') self.list.append(('default-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc)) if config.usage.setup_level.index >= 2: # expert+ self.list.append(("advanced", _("Advanced options"), _("\nAdvanced options and settings." ) + self.oktext, None)) elif self.menu == 1: self.list.append(("advancedrestore", _("Advanced restore"), _("\nRestore your backups by date." ) + self.oktext, None)) self.list.append(("backuplocation", _("Select backup location"), _("\nSelect your backup device.\nCurrent device: " ) + config.plugins.configurationbackup.backuplocation.getValue() + self.oktext, None)) self.list.append(("backupfiles", _("Select backup files"), _("Select files for backup.") + self.oktext + "\n\n" + self.infotext, None)) if config.usage.setup_level.index >= 2: # expert+ self.list.append(("ipkg-manager", _("Packet management"), _("\nView, install and remove available or installed packages." ) + self.oktext, None)) self.list.append(("ipkg-source",_("Select upgrade source"), _("\nEdit the upgrade source address." ) + self.oktext, None)) for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER): if p.__call__.has_key("AdvancedSoftwareSupported"): callFnc = p.__call__["AdvancedSoftwareSupported"](None) if callFnc is not None: if p.__call__.has_key("menuEntryName"): menuEntryName = p.__call__["menuEntryName"](None) else: menuEntryName = _('Advanced software') if p.__call__.has_key("menuEntryDescription"): menuEntryDescription = p.__call__["menuEntryDescription"](None) else: menuEntryDescription = _('Advanced software plugin') self.list.append(('advanced-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc)) self["menu"] = List(self.list) self["key_red"] = StaticText(_("Close")) self["status"] = StaticText(self.menutext) self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "MenuActions", "NumberActions"], { "ok": self.go, "back": self.close, "red": self.close, "menu": self.handleMenu, "showEventInfo": self.handleInfo, "1": self.go, "2": self.go, "3": self.go, "4": self.go, "5": self.go, "6": self.go, "7": self.go, "8": self.go, "9": self.go, }, -1) self.onLayoutFinish.append(self.layoutFinished) self.backuppath = getBackupPath() self.backupfile = getBackupFilename() self.fullbackupfilename = self.backuppath + "/" + self.backupfile self.onShown.append(self.setWindowTitle) self.onChangedEntry = [] self["menu"].onSelectionChanged.append(self.selectionChanged) def createSummary(self): from Screens.PluginBrowser import PluginBrowserSummary return PluginBrowserSummary def selectionChanged(self): item = self["menu"].getCurrent() if item: name = item[1] desc = item[2] else: name = "-" desc = "" for cb in self.onChangedEntry: cb(name, desc) def layoutFinished(self): idx = 0 self["menu"].index = idx def setWindowTitle(self): self.setTitle(_("Software management")) def cleanup(self): iSoftwareTools.cleanupSoftwareTools() def getUpdateInfos(self): if iSoftwareTools.NetworkConnectionAvailable is True: if iSoftwareTools.available_updates is not 0: self.text = _("There are at least %s updates available.") % (str(iSoftwareTools.available_updates)) else: self.text = "" #_("There are no updates available.") if iSoftwareTools.list_updating is True: self.text += "\n" + _("A search for available updates is currently in progress.") else: self.text = _("No network connection available.") self["status"].setText(self.text) def handleMenu(self): self.session.open(SoftwareManagerSetup) def handleInfo(self): current = self["menu"].getCurrent() if current: currentEntry = current[0] if currentEntry in ("system-backup","backupfiles"): self.session.open(SoftwareManagerInfo, mode = "backupinfo") def go(self, num = None): if num is not None: num -= 1 if not num < self["menu"].count(): return self["menu"].setIndex(num) current = self["menu"].getCurrent() if current: currentEntry = current[0] if self.menu == 0: if (currentEntry == "software-update"): self.session.open(UpdatePlugin) elif (currentEntry == "software-restore"): self.session.open(ImageWizard) elif (currentEntry == "install-extensions"): self.session.open(PluginManager, self.skin_path) elif (currentEntry == "system-backup"): self.session.openWithCallback(self.backupDone,BackupScreen, runBackup = True) elif (currentEntry == "system-restore"): if os_path.exists(self.fullbackupfilename): self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore the backup?\nYour receiver will restart after the backup has been restored!")) else: self.session.open(MessageBox, _("Sorry, no backups found!"), MessageBox.TYPE_INFO, timeout = 10) elif (currentEntry == "ipkg-install"): try: from Plugins.Extensions.MediaScanner.plugin import main main(self.session) except: self.session.open(MessageBox, _("Sorry, %s has not been installed!") % ("MediaScanner"), MessageBox.TYPE_INFO, timeout = 10) elif (currentEntry == "default-plugin"): self.extended = current[3] self.extended(self.session, None) elif (currentEntry == "advanced"): self.session.open(UpdatePluginMenu, 1) elif self.menu == 1: if (currentEntry == "ipkg-manager"): self.session.open(PacketManager, self.skin_path) elif (currentEntry == "backuplocation"): parts = [ (r.description, r.mountpoint, self.session) for r in harddiskmanager.getMountedPartitions(onlyhotplug = False)] for x in parts: if not access(x[1], F_OK|R_OK|W_OK) or x[1] == '/': parts.remove(x) if len(parts): self.session.openWithCallback(self.backuplocation_choosen, ChoiceBox, title = _("Please select medium to use as backup location"), list = parts) elif (currentEntry == "backupfiles"): self.session.openWithCallback(self.backupfiles_choosen,BackupSelection) elif (currentEntry == "advancedrestore"): self.session.open(RestoreMenu, self.skin_path) elif (currentEntry == "ipkg-source"): self.session.open(IPKGMenu, self.skin_path) elif (currentEntry == "advanced-plugin"): self.extended = current[3] self.extended(self.session, None) def backupfiles_choosen(self, ret): self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.getValue() ) config.plugins.configurationbackup.backupdirs.save() config.plugins.configurationbackup.save() config.save() def backuplocation_choosen(self, option): oldpath = config.plugins.configurationbackup.backuplocation.getValue() if option is not None: config.plugins.configurationbackup.backuplocation.value = str(option[1]) config.plugins.configurationbackup.backuplocation.save() config.plugins.configurationbackup.save() config.save() newpath = config.plugins.configurationbackup.backuplocation.getValue() if newpath != oldpath: self.createBackupfolders() def createBackupfolders(self): print "Creating backup folder if not already there..." self.backuppath = getBackupPath() try: if (os_path.exists(self.backuppath) == False): makedirs(self.backuppath) except OSError: self.session.open(MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10) def backupDone(self,retval = None): if retval is True: self.session.open(MessageBox, _("Backup completed."), MessageBox.TYPE_INFO, timeout = 10) else: self.session.open(MessageBox, _("Backup failed."), MessageBox.TYPE_INFO, timeout = 10) def startRestore(self, ret = False): if (ret == True): self.exe = True self.session.open(RestoreScreen, runRestore = True) class SoftwareManagerSetup(Screen, ConfigListScreen): skin = """ <screen name="SoftwareManagerSetup" position="center,center" size="560,440" title="SoftwareManager setup"> <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" /> <widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" /> <widget name="config" position="5,50" size="550,350" scrollbarMode="showOnDemand" /> <ePixmap pixmap="div-h.png" position="0,400" zPosition="1" size="560,2" /> <widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" /> </screen>""" def __init__(self, session, skin_path = None): Screen.__init__(self, session) self.session = session self.skin_path = skin_path if self.skin_path == None: self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager") self.onChangedEntry = [ ] self.setup_title = _("Software manager setup") self.overwriteConfigfilesEntry = None self.list = [ ] ConfigListScreen.__init__(self, self.list, session = session, on_change = self.changedEntry) self["actions"] = ActionMap(["SetupActions", "MenuActions"], { "cancel": self.keyCancel, "save": self.apply, "menu": self.closeRecursive, }, -2) self["key_red"] = StaticText(_("Cancel")) self["key_green"] = StaticText(_("OK")) self["key_yellow"] = StaticText() self["key_blue"] = StaticText() self["introduction"] = StaticText() self.createSetup() self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setTitle(self.setup_title) def createSetup(self): self.list = [ ] self.overwriteConfigfilesEntry = getConfigListEntry(_("Overwrite configuration files?"), config.plugins.softwaremanager.overwriteConfigFiles) self.list.append(self.overwriteConfigfilesEntry) self.list.append(getConfigListEntry(_("show softwaremanager in plugin menu"), config.plugins.softwaremanager.onSetupMenu)) self.list.append(getConfigListEntry(_("show softwaremanager on blue button"), config.plugins.softwaremanager.onBlueButton)) self["config"].list = self.list self["config"].l.setSeperation(400) self["config"].l.setList(self.list) if not self.selectionChanged in self["config"].onSelectionChanged: self["config"].onSelectionChanged.append(self.selectionChanged) self.selectionChanged() def selectionChanged(self): if self["config"].getCurrent() == self.overwriteConfigfilesEntry: self["introduction"].setText(_("Overwrite configuration files during software upgrade?")) else: self["introduction"].setText("") def newConfig(self): pass def keyLeft(self): ConfigListScreen.keyLeft(self) def keyRight(self): ConfigListScreen.keyRight(self) def confirm(self, confirmed): if not confirmed: print "not confirmed" return else: self.keySave() plugins.clearPluginList() plugins.readPluginList(resolveFilename(SCOPE_PLUGINS)) def apply(self): self.session.openWithCallback(self.confirm, MessageBox, _("Use these settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = True) def cancelConfirm(self, result): if not result: return for x in self["config"].list: x[1].cancel() self.close() def keyCancel(self): if self["config"].isChanged(): self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = False) else: self.close() # for summary: def changedEntry(self): for x in self.onChangedEntry: x() self.selectionChanged() def getCurrentEntry(self): return self["config"].getCurrent()[0] def getCurrentValue(self): return str(self["config"].getCurrent()[1].getValue()) def createSummary(self): from Screens.Setup import SetupSummary return SetupSummary class SoftwareManagerInfo(Screen): skin = """ <screen name="SoftwareManagerInfo" position="center,center" size="560,440" title="SoftwareManager information"> <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" /> <widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" /> <widget source="list" render="Listbox" position="5,50" size="550,340" scrollbarMode="showOnDemand" selectionDisabled="0"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (5, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT | RT_HALIGN_CENTER, text = 0), # index 0 is the name ], "fonts": [gFont("Regular", 24),gFont("Regular", 22)], "itemHeight": 26 } </convert> </widget> <ePixmap pixmap="div-h.png" position="0,400" zPosition="1" size="560,2" /> <widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" /> </screen>""" def __init__(self, session, skin_path = None, mode = None): Screen.__init__(self, session) self.session = session self.mode = mode self.skin_path = skin_path if self.skin_path == None: self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager") self["actions"] = ActionMap(["ShortcutActions", "WizardActions"], { "back": self.close, "red": self.close, }, -2) self.list = [] self["list"] = List(self.list) self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText() self["key_yellow"] = StaticText() self["key_blue"] = StaticText() self["introduction"] = StaticText() self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setTitle(_("Softwaremanager information")) if self.mode is not None: self.showInfos() def showInfos(self): if self.mode == "backupinfo": self.list = [] backupfiles = config.plugins.configurationbackup.backupdirs.getValue() for entry in backupfiles: self.list.append((entry,)) self['list'].setList(self.list) class PluginManager(Screen, PackageInfoHandler): skin = """ <screen name="PluginManager" position="center,center" size="560,440" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" /> <widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" /> <widget source="list" render="Listbox" position="5,50" size="550,360" scrollbarMode="showOnDemand"> <convert type="TemplatedMultiContent"> {"templates": {"default": (51,[ MultiContentEntryText(pos = (0, 1), size = (470, 24), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name MultiContentEntryText(pos = (0, 25), size = (470, 24), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description MultiContentEntryPixmapAlphaTest(pos = (475, 0), size = (48, 48), png = 5), # index 5 is the status pixmap MultiContentEntryPixmapAlphaTest(pos = (0, 49), size = (550, 2), png = 6), # index 6 is the div pixmap ]), "category": (40,[ MultiContentEntryText(pos = (30, 0), size = (500, 22), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name MultiContentEntryText(pos = (30, 22), size = (500, 16), font=2, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the description MultiContentEntryPixmapAlphaTest(pos = (0, 38), size = (550, 2), png = 3), # index 3 is the div pixmap ]) }, "fonts": [gFont("Regular", 22),gFont("Regular", 20),gFont("Regular", 16)], "itemHeight": 52 } </convert> </widget> <widget source="status" render="Label" position="5,410" zPosition="10" size="540,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" /> </screen>""" def __init__(self, session, plugin_path = None, args = None): Screen.__init__(self, session) Screen.setTitle(self, _("Extensions management")) self.session = session self.skin_path = plugin_path if self.skin_path == None: self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager") self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "HelpActions" ], { "ok": self.handleCurrent, "back": self.exit, "red": self.exit, "green": self.handleCurrent, "yellow": self.handleSelected, "showEventInfo": self.handleSelected, "displayHelp": self.handleHelp, }, -1) self.list = [] self.statuslist = [] self.selectedFiles = [] self.categoryList = [] self.packetlist = [] self["list"] = List(self.list) self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText("") self["key_yellow"] = StaticText("") self["key_blue"] = StaticText("") self["status"] = StaticText("") self.cmdList = [] self.oktext = _("\nAfter pressing OK, please wait!") if not self.selectionChanged in self["list"].onSelectionChanged: self["list"].onSelectionChanged.append(self.selectionChanged) self.currList = "" self.currentSelectedTag = None self.currentSelectedIndex = None self.currentSelectedPackage = None self.saved_currentSelectedPackage = None self.restartRequired = False self.onShown.append(self.setWindowTitle) self.onLayoutFinish.append(self.getUpdateInfos) def setWindowTitle(self): self.setTitle(_("Extensions management")) def exit(self): if self.currList == "packages": self.currList = "category" self.currentSelectedTag = None self["list"].style = "category" self['list'].setList(self.categoryList) self["list"].setIndex(self.currentSelectedIndex) self["list"].updateList(self.categoryList) self.selectionChanged() else: iSoftwareTools.cleanupSoftwareTools() self.prepareInstall() if len(self.cmdList): self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList) else: self.close() def handleHelp(self): if self.currList != "status": self.session.open(PluginManagerHelp, self.skin_path) def setState(self,status = None): if status: self.currList = "status" self.statuslist = [] self["key_green"].setText("") self["key_blue"].setText("") self["key_yellow"].setText("") divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png")) if status == 'update': statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png")) self.statuslist.append(( _("Updating software catalog"), '', _("Searching for available updates. Please wait..." ),'', '', statuspng, divpng, None, '' )) elif status == 'sync': statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png")) self.statuslist.append(( _("Package list update"), '', _("Searching for new installed or removed packages. Please wait..." ),'', '', statuspng, divpng, None, '' )) elif status == 'error': self["key_green"].setText(_("Continue")) statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png")) self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'', '', statuspng, divpng, None, '' )) self["list"].style = "default" self['list'].setList(self.statuslist) def getUpdateInfos(self): if (iSoftwareTools.lastDownloadDate is not None and iSoftwareTools.NetworkConnectionAvailable is False): self.rebuildList() else: self.setState('update') iSoftwareTools.startSoftwareTools(self.getUpdateInfosCB) def getUpdateInfosCB(self, retval = None): if retval is not None: if retval is True: if iSoftwareTools.available_updates is not 0: self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + ' ' + _("updates available.")) else: self["status"].setText(_("There are no updates available.")) self.rebuildList() elif retval is False: if iSoftwareTools.lastDownloadDate is None: self.setState('error') if iSoftwareTools.NetworkConnectionAvailable: self["status"].setText(_("Updatefeed not available.")) else: self["status"].setText(_("No network connection available.")) else: iSoftwareTools.lastDownloadDate = time() iSoftwareTools.list_updating = True self.setState('update') iSoftwareTools.getUpdates(self.getUpdateInfosCB) def rebuildList(self, retval = None): if self.currentSelectedTag is None: self.buildCategoryList() else: self.buildPacketList(self.currentSelectedTag) def selectionChanged(self): current = self["list"].getCurrent() self["status"].setText("") if current: if self.currList == "packages": self["key_red"].setText(_("Back")) if current[4] == 'installed': self["key_green"].setText(_("Uninstall")) elif current[4] == 'installable': self["key_green"].setText(_("Install")) if iSoftwareTools.NetworkConnectionAvailable is False: self["key_green"].setText("") elif current[4] == 'remove': self["key_green"].setText(_("Undo uninstall")) elif current[4] == 'install': self["key_green"].setText(_("Undo install")) if iSoftwareTools.NetworkConnectionAvailable is False: self["key_green"].setText("") self["key_yellow"].setText(_("View details")) self["key_blue"].setText("") if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0: self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + ' ' + _("updates available.")) elif len(self.selectedFiles) is not 0: self["status"].setText(str(len(self.selectedFiles)) + ' ' + _("packages selected.")) else: self["status"].setText(_("There are currently no outstanding actions.")) elif self.currList == "category": self["key_red"].setText(_("Close")) self["key_green"].setText("") self["key_yellow"].setText("") self["key_blue"].setText("") if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0: self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + ' ' + _("updates available.")) self["key_yellow"].setText(_("Update")) elif len(self.selectedFiles) is not 0: self["status"].setText(str(len(self.selectedFiles)) + ' ' + _("packages selected.")) self["key_yellow"].setText(_("Process")) else: self["status"].setText(_("There are currently no outstanding actions.")) def getSelectionState(self, detailsFile): for entry in self.selectedFiles: if entry[0] == detailsFile: return True return False def handleCurrent(self): current = self["list"].getCurrent() if current: if self.currList == "category": self.currentSelectedIndex = self["list"].index selectedTag = current[2] self.buildPacketList(selectedTag) elif self.currList == "packages": if current[7] is not '': idx = self["list"].getIndex() detailsFile = self.list[idx][1] if self.list[idx][7] == True: for entry in self.selectedFiles: if entry[0] == detailsFile: self.selectedFiles.remove(entry) else: alreadyinList = False for entry in self.selectedFiles: if entry[0] == detailsFile: alreadyinList = True if not alreadyinList: if (iSoftwareTools.NetworkConnectionAvailable is False and current[4] in ('installable','install')): pass else: self.selectedFiles.append((detailsFile,current[4],current[3])) self.currentSelectedPackage = ((detailsFile,current[4],current[3])) if current[4] == 'installed': self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'remove', True) elif current[4] == 'installable': if iSoftwareTools.NetworkConnectionAvailable: self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'install', True) elif current[4] == 'remove': self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installed', False) elif current[4] == 'install': if iSoftwareTools.NetworkConnectionAvailable: self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installable',False) self["list"].setList(self.list) self["list"].setIndex(idx) self["list"].updateList(self.list) self.selectionChanged() elif self.currList == "status": iSoftwareTools.lastDownloadDate = time() iSoftwareTools.list_updating = True self.setState('update') iSoftwareTools.getUpdates(self.getUpdateInfosCB) def handleSelected(self): current = self["list"].getCurrent() if current: if self.currList == "packages": if current[7] is not '': detailsfile = iSoftwareTools.directory[0] + "/" + current[1] if (os_path.exists(detailsfile) == True): self.saved_currentSelectedPackage = self.currentSelectedPackage self.session.openWithCallback(self.detailsClosed, PluginDetails, self.skin_path, current) else: self.session.open(MessageBox, _("Sorry, no details available!"), MessageBox.TYPE_INFO, timeout = 10) elif self.currList == "category": self.prepareInstall() if len(self.cmdList): self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList) def detailsClosed(self, result = None): if result is not None: if result is not False: self.setState('sync') iSoftwareTools.lastDownloadDate = time() for entry in self.selectedFiles: if entry == self.saved_currentSelectedPackage: self.selectedFiles.remove(entry) iSoftwareTools.startIpkgListInstalled(self.rebuildList) def buildEntryComponent(self, name, details, description, packagename, state, selected = False): divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png")) installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png")) installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png")) removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png")) installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png")) if state == 'installed': return((name, details, description, packagename, state, installedpng, divpng, selected)) elif state == 'installable': return((name, details, description, packagename, state, installablepng, divpng, selected)) elif state == 'remove': return((name, details, description, packagename, state, removepng, divpng, selected)) elif state == 'install': return((name, details, description, packagename, state, installpng, divpng, selected)) def buildPacketList(self, categorytag = None): if categorytag is not None: self.currList = "packages" self.currentSelectedTag = categorytag self.packetlist = [] for package in iSoftwareTools.packagesIndexlist[:]: prerequisites = package[0]["prerequisites"] if prerequisites.has_key("tag"): for foundtag in prerequisites["tag"]: if categorytag == foundtag: attributes = package[0]["attributes"] if attributes.has_key("packagetype"): if attributes["packagetype"] == "internal": continue self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]]) else: self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]]) self.list = [] for x in self.packetlist: status = "" name = x[0].strip() details = x[1].strip() description = x[2].strip() if not description: description = "No description available." packagename = x[3].strip() selectState = self.getSelectionState(details) if iSoftwareTools.installed_packetlist.has_key(packagename): if selectState == True: status = "remove" else: status = "installed" self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState)) else: if selectState == True: status = "install" else: status = "installable" self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState)) if len(self.list): self.list.sort(key=lambda x: x[0]) self["list"].style = "default" self['list'].setList(self.list) self["list"].updateList(self.list) self.selectionChanged() def buildCategoryList(self): self.currList = "category" self.categories = [] self.categoryList = [] for package in iSoftwareTools.packagesIndexlist[:]: prerequisites = package[0]["prerequisites"] if prerequisites.has_key("tag"): for foundtag in prerequisites["tag"]: attributes = package[0]["attributes"] if foundtag not in self.categories: self.categories.append(foundtag) self.categoryList.append(self.buildCategoryComponent(foundtag)) self.categoryList.sort(key=lambda x: x[0]) self["list"].style = "category" self['list'].setList(self.categoryList) self["list"].updateList(self.categoryList) self.selectionChanged() def buildCategoryComponent(self, tag = None): divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png")) if tag is not None: if tag == 'System': return(( _("System"), _("View list of available system extensions" ), tag, divpng )) elif tag == 'Skin': return(( _("Skins"), _("View list of available skins" ), tag, divpng )) elif tag == 'Recording': return(( _("Recordings"), _("View list of available recording extensions" ), tag, divpng )) elif tag == 'Network': return(( _("Network"), _("View list of available networking extensions" ), tag, divpng )) elif tag == 'CI': return(( _("Common Interface"), _("View list of available CommonInterface extensions" ), tag, divpng )) elif tag == 'Default': return(( _("Default settings"), _("View list of available default settings" ), tag, divpng )) elif tag == 'SAT': return(( _("Satellite equipment"), _("View list of available Satellite equipment extensions." ), tag, divpng )) elif tag == 'Software': return(( _("Software"), _("View list of available software extensions" ), tag, divpng )) elif tag == 'Multimedia': return(( _("Multimedia"), _("View list of available multimedia extensions." ), tag, divpng )) elif tag == 'Display': return(( _("Display and userinterface"), _("View list of available display and userinterface extensions." ), tag, divpng )) elif tag == 'EPG': return(( _("Electronic Program Guide"), _("View list of available EPG extensions." ), tag, divpng )) elif tag == 'Communication': return(( _("Communication"), _("View list of available communication extensions." ), tag, divpng )) else: # dynamically generate non existent tags return(( str(tag), _("View list of available ") + str(tag) + ' ' + _("extensions." ), tag, divpng )) def prepareInstall(self): self.cmdList = [] if iSoftwareTools.available_updates > 0: self.cmdList.append((IpkgComponent.CMD_UPGRADE, { "test_only": False })) if self.selectedFiles and len(self.selectedFiles): for plugin in self.selectedFiles: detailsfile = iSoftwareTools.directory[0] + "/" + plugin[0] if (os_path.exists(detailsfile) == True): iSoftwareTools.fillPackageDetails(plugin[0]) self.package = iSoftwareTools.packageDetails[0] if self.package[0].has_key("attributes"): self.attributes = self.package[0]["attributes"] if self.attributes.has_key("needsRestart"): self.restartRequired = True if self.attributes.has_key("package"): self.packagefiles = self.attributes["package"] if plugin[1] == 'installed': if self.packagefiles: for package in self.packagefiles[:]: self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] })) else: self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] })) else: if self.packagefiles: for package in self.packagefiles[:]: self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] })) else: self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] })) else: if plugin[1] == 'installed': self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] })) else: self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] })) def runExecute(self, result = None): if result is not None: if result[0] is True: self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList) elif result[0] is False: self.cmdList = result[1] self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList) else: self.close() def runExecuteFinished(self): self.reloadPluginlist() if plugins.restartRequired or self.restartRequired: self.session.openWithCallback(self.ExecuteReboot, MessageBox, _("Install or remove finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO) else: self.selectedFiles = [] self.restartRequired = False self.detailsClosed(True) def ExecuteReboot(self, result): if result: self.session.open(TryQuitMainloop,retvalue=3) else: self.selectedFiles = [] self.restartRequired = False self.detailsClosed(True) def reloadPluginlist(self): plugins.readPluginList(resolveFilename(SCOPE_PLUGINS)) class PluginManagerInfo(Screen): skin = """ <screen name="PluginManagerInfo" position="center,center" size="560,450" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (50, 0), size = (150, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap ], "fonts": [gFont("Regular", 24),gFont("Regular", 22)], "itemHeight": 50 } </convert> </widget> <ePixmap pixmap="div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" /> <widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" /> </screen>""" def __init__(self, session, plugin_path, cmdlist = None): Screen.__init__(self, session) Screen.setTitle(self, _("Plugin manager activity information")) self.session = session self.skin_path = plugin_path self.cmdlist = cmdlist self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"], { "ok": self.process_all, "back": self.exit, "red": self.exit, "green": self.process_extensions, }, -1) self.list = [] self["list"] = List(self.list) self["key_red"] = StaticText(_("Cancel")) self["key_green"] = StaticText(_("Only extensions.")) self["status"] = StaticText(_("Following tasks will be done after you press OK!")) self.onShown.append(self.setWindowTitle) self.onLayoutFinish.append(self.rebuildList) def setWindowTitle(self): self.setTitle(_("Plugin manager activity information")) def rebuildList(self): self.list = [] if self.cmdlist is not None: for entry in self.cmdlist: action = "" info = "" cmd = entry[0] if cmd == 0: action = 'install' elif cmd == 2: action = 'remove' else: action = 'upgrade' args = entry[1] if cmd == 0: info = args['package'] elif cmd == 2: info = args['package'] else: info = _("STB_BOX software because updates are available.") self.list.append(self.buildEntryComponent(action,info)) self['list'].setList(self.list) self['list'].updateList(self.list) def buildEntryComponent(self, action,info): divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png")) upgradepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png")) installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png")) removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png")) if action == 'install': return(( _('Installing'), info, installpng, divpng)) elif action == 'remove': return(( _('Removing'), info, removepng, divpng)) else: return(( _('Upgrading'), info, upgradepng, divpng)) def exit(self): self.close() def process_all(self): self.close((True,None)) def process_extensions(self): self.list = [] if self.cmdlist is not None: for entry in self.cmdlist: cmd = entry[0] if entry[0] in (0,2): self.list.append((entry)) self.close((False,self.list)) class PluginManagerHelp(Screen): skin = """ <screen name="PluginManagerHelp" position="center,center" size="560,450" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (50, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap ], "fonts": [gFont("Regular", 24),gFont("Regular", 22)], "itemHeight": 50 } </convert> </widget> <ePixmap pixmap="div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" /> <widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" /> </screen>""" def __init__(self, session, plugin_path): Screen.__init__(self, session) Screen.setTitle(self, _("Plugin manager help")) self.session = session self.skin_path = plugin_path self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"], { "back": self.exit, "red": self.exit, }, -1) self.list = [] self["list"] = List(self.list) self["key_red"] = StaticText(_("Close")) self["status"] = StaticText(_("A small overview of the available icon states and actions.")) self.onShown.append(self.setWindowTitle) self.onLayoutFinish.append(self.rebuildList) def setWindowTitle(self): self.setTitle(_("Plugin manager help")) def rebuildList(self): self.list = [] self.list.append(self.buildEntryComponent('install')) self.list.append(self.buildEntryComponent('installable')) self.list.append(self.buildEntryComponent('installed')) self.list.append(self.buildEntryComponent('remove')) self['list'].setList(self.list) self['list'].updateList(self.list) def buildEntryComponent(self, state): divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png")) installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png")) installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png")) removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png")) installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png")) if state == 'installed': return(( _('This plugin is installed.'), _('You can remove this plugin.'), installedpng, divpng)) elif state == 'installable': return(( _('This plugin is not installed.'), _('You can install this plugin.'), installablepng, divpng)) elif state == 'install': return(( _('This plugin will be installed.'), _('You can cancel the installation.'), installpng, divpng)) elif state == 'remove': return(( _('This plugin will be removed.'), _('You can cancel the removal.'), removepng, divpng)) def exit(self): self.close() class PluginDetails(Screen, PackageInfoHandler): skin = """ <screen name="PluginDetails" position="center,center" size="600,440" title="Plugin details" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="author" render="Label" position="10,50" size="500,25" zPosition="10" font="Regular;21" transparent="1" /> <widget name="statuspic" position="550,40" size="48,48" alphatest="on"/> <widget name="divpic" position="0,80" size="600,2" alphatest="on"/> <widget name="detailtext" position="10,90" size="270,330" zPosition="10" font="Regular;21" transparent="1" halign="left" valign="top"/> <widget name="screenshot" position="290,90" size="300,330" alphatest="on"/> </screen>""" def __init__(self, session, plugin_path, packagedata = None): Screen.__init__(self, session) Screen.setTitle(self, _("Plugin details")) self.skin_path = plugin_path self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country" self.attributes = None PackageInfoHandler.__init__(self, self.statusCallback, blocking = False) self.directory = resolveFilename(SCOPE_METADIR) if packagedata: self.pluginname = packagedata[0] self.details = packagedata[1] self.pluginstate = packagedata[4] self.statuspicinstance = packagedata[5] self.divpicinstance = packagedata[6] self.fillPackageDetails(self.details) self.thumbnail = "" self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"], { "back": self.exit, "red": self.exit, "green": self.go, "up": self.pageUp, "down": self.pageDown, "left": self.pageUp, "right": self.pageDown, }, -1) self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText("") self["author"] = StaticText() self["statuspic"] = Pixmap() self["divpic"] = Pixmap() self["screenshot"] = Pixmap() self["detailtext"] = ScrollLabel() self["statuspic"].hide() self["screenshot"].hide() self["divpic"].hide() self.package = self.packageDetails[0] if self.package[0].has_key("attributes"): self.attributes = self.package[0]["attributes"] self.restartRequired = False self.cmdList = [] self.oktext = _("\nAfter pressing OK, please wait!") self.picload = ePicLoad() self.picload.PictureData.get().append(self.paintScreenshotPixmapCB) self.onShown.append(self.setWindowTitle) self.onLayoutFinish.append(self.setInfos) def setWindowTitle(self): self.setTitle(_("Details for plugin: ") + self.pluginname ) def exit(self): self.close(False) def pageUp(self): self["detailtext"].pageUp() def pageDown(self): self["detailtext"].pageDown() def statusCallback(self, status, progress): pass def setInfos(self): if self.attributes.has_key("screenshot"): self.loadThumbnail(self.attributes) if self.attributes.has_key("name"): self.pluginname = self.attributes["name"] else: self.pluginname = _("unknown") if self.attributes.has_key("author"): self.author = self.attributes["author"] else: self.author = _("unknown") if self.attributes.has_key("description"): self.description = _(self.attributes["description"].replace("\\n", "\n")) else: self.description = _("No description available.") self["author"].setText(_("Author: ") + self.author) self["detailtext"].setText(_(self.description)) if self.pluginstate in ('installable', 'install'): if iSoftwareTools.NetworkConnectionAvailable: self["key_green"].setText(_("Install")) else: self["key_green"].setText("") else: self["key_green"].setText(_("Remove")) def loadThumbnail(self, entry): thumbnailUrl = None if entry.has_key("screenshot"): thumbnailUrl = entry["screenshot"] if self.language == "de": if thumbnailUrl[-7:] == "_en.jpg": thumbnailUrl = thumbnailUrl[:-7] + "_de.jpg" if thumbnailUrl is not None: self.thumbnail = "/tmp/" + thumbnailUrl.split('/')[-1] print "[PluginDetails] downloading screenshot " + thumbnailUrl + " to " + self.thumbnail if iSoftwareTools.NetworkConnectionAvailable: client.downloadPage(thumbnailUrl,self.thumbnail).addCallback(self.setThumbnail).addErrback(self.fetchFailed) else: self.setThumbnail(noScreenshot = True) else: self.setThumbnail(noScreenshot = True) def setThumbnail(self, noScreenshot = False): if not noScreenshot: filename = self.thumbnail else: filename = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/noprev.png") sc = AVSwitch().getFramebufferScale() self.picload.setPara((self["screenshot"].instance.size().width(), self["screenshot"].instance.size().height(), sc[0], sc[1], False, 1, "#00000000")) self.picload.startDecode(filename) if self.statuspicinstance != None: self["statuspic"].instance.setPixmap(self.statuspicinstance.__deref__()) self["statuspic"].show() if self.divpicinstance != None: self["divpic"].instance.setPixmap(self.divpicinstance.__deref__()) self["divpic"].show() def paintScreenshotPixmapCB(self, picInfo=None): ptr = self.picload.getData() if ptr != None: self["screenshot"].instance.setPixmap(ptr.__deref__()) self["screenshot"].show() else: self.setThumbnail(noScreenshot = True) def go(self): if self.attributes.has_key("package"): self.packagefiles = self.attributes["package"] if self.attributes.has_key("needsRestart"): self.restartRequired = True self.cmdList = [] if self.pluginstate in ('installed', 'remove'): if self.packagefiles: for package in self.packagefiles[:]: self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] })) if len(self.cmdList): self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + self.pluginname + "\n" + self.oktext) else: if iSoftwareTools.NetworkConnectionAvailable: if self.packagefiles: for package in self.packagefiles[:]: self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] })) if len(self.cmdList): self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + self.pluginname + "\n" + self.oktext) def runUpgrade(self, result): if result: self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList) def runUpgradeFinished(self): self.reloadPluginlist() if plugins.restartRequired or self.restartRequired: self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Installation finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO) else: self.close(True) def UpgradeReboot(self, result): if result: self.session.open(TryQuitMainloop,retvalue=3) self.close(True) def runRemove(self, result): if result: self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList) def runRemoveFinished(self): self.close(True) def reloadPluginlist(self): plugins.readPluginList(resolveFilename(SCOPE_PLUGINS)) def fetchFailed(self,string): self.setThumbnail(noScreenshot = True) print "[PluginDetails] fetch failed " + string.getErrorMessage() class IPKGMenu(Screen): skin = """ <screen name="IPKGMenu" position="center,center" size="560,400" title="Select upgrade source to edit." > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget name="filelist" position="5,50" size="550,340" scrollbarMode="showOnDemand" /> </screen>""" def __init__(self, session, plugin_path): Screen.__init__(self, session) Screen.setTitle(self, _("Select upgrade source to edit.")) self.skin_path = plugin_path self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText(_("Edit")) self.sel = [] self.val = [] self.entry = False self.exe = False self.path = "" self["actions"] = NumberActionMap(["SetupActions"], { "ok": self.KeyOk, "cancel": self.keyCancel }, -1) self["shortcuts"] = ActionMap(["ShortcutActions"], { "red": self.keyCancel, "green": self.KeyOk, }) self["filelist"] = MenuList([]) self.fill_list() self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setWindowTitle() def setWindowTitle(self): self.setTitle(_("Select upgrade source to edit.")) def fill_list(self): flist = [] self.path = '/etc/opkg/' if (os_path.exists(self.path) == False): self.entry = False return for file in listdir(self.path): if file.endswith(".conf"): if file not in ('arch.conf', 'opkg.conf'): flist.append((file)) self.entry = True self["filelist"].l.setList(flist) def KeyOk(self): if (self.exe == False) and (self.entry == True): self.sel = self["filelist"].getCurrent() self.val = self.path + self.sel self.session.open(IPKGSource, self.val) def keyCancel(self): self.close() def Exit(self): self.close() class IPKGSource(Screen): skin = """ <screen name="IPKGSource" position="center,center" size="560,80" title="Edit upgrade source url." > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget name="text" position="5,50" size="550,25" font="Regular;20" backgroundColor="background" foregroundColor="#cccccc" /> </screen>""" def __init__(self, session, configfile = None): Screen.__init__(self, session) self.session = session self.configfile = configfile text = "" if self.configfile: try: fp = file(configfile, 'r') sources = fp.readlines() if sources: text = sources[0] fp.close() except IOError: pass desk = getDesktop(0) x= int(desk.size().width()) y= int(desk.size().height()) self["key_red"] = StaticText(_("Cancel")) self["key_green"] = StaticText(_("Save")) if (y>=720): self["text"] = Input(text, maxSize=False, type=Input.TEXT) else: self["text"] = Input(text, maxSize=False, visible_width = 55, type=Input.TEXT) self["actions"] = NumberActionMap(["WizardActions", "InputActions", "TextEntryActions", "KeyboardInputActions","ShortcutActions"], { "ok": self.go, "back": self.close, "red": self.close, "green": self.go, "left": self.keyLeft, "right": self.keyRight, "home": self.keyHome, "end": self.keyEnd, "deleteForward": self.keyDeleteForward, "deleteBackward": self.keyDeleteBackward, "1": self.keyNumberGlobal, "2": self.keyNumberGlobal, "3": self.keyNumberGlobal, "4": self.keyNumberGlobal, "5": self.keyNumberGlobal, "6": self.keyNumberGlobal, "7": self.keyNumberGlobal, "8": self.keyNumberGlobal, "9": self.keyNumberGlobal, "0": self.keyNumberGlobal }, -1) self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setWindowTitle() self["text"].right() def setWindowTitle(self): self.setTitle(_("Edit upgrade source url.")) def go(self): text = self["text"].getText() if text: fp = file(self.configfile, 'w') fp.write(text) fp.write("\n") fp.close() self.close() def keyLeft(self): self["text"].left() def keyRight(self): self["text"].right() def keyHome(self): self["text"].home() def keyEnd(self): self["text"].end() def keyDeleteForward(self): self["text"].delete() def keyDeleteBackward(self): self["text"].deleteBackward() def keyNumberGlobal(self, number): self["text"].number(number) class PacketManager(Screen, NumericalTextInput): skin = """ <screen name="PacketManager" position="center,center" size="530,420" title="Packet manager" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="list" render="Listbox" position="5,50" size="520,365" scrollbarMode="showOnDemand"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryText(pos = (5, 1), size = (440, 28), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name MultiContentEntryText(pos = (5, 26), size = (440, 20), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description MultiContentEntryPixmapAlphaTest(pos = (445, 2), size = (48, 48), png = 4), # index 4 is the status pixmap MultiContentEntryPixmapAlphaTest(pos = (5, 50), size = (510, 2), png = 5), # index 4 is the div pixmap ], "fonts": [gFont("Regular", 22),gFont("Regular", 14)], "itemHeight": 52 } </convert> </widget> </screen>""" def __init__(self, session, plugin_path, args = None): Screen.__init__(self, session) NumericalTextInput.__init__(self) self.session = session self.skin_path = plugin_path if config.usage.show_channel_jump_in_servicelist.getValue() == "alpha": self.setUseableChars(u'abcdefghijklmnopqrstuvwxyz1234567890') else: self.setUseableChars(u'1234567890abcdefghijklmnopqrstuvwxyz') self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "NumberActions", "InputActions", "InputAsciiActions", "KeyboardInputActions" ], { "ok": self.go, "back": self.exit, "red": self.exit, "green": self.reload, "gotAsciiCode": self.keyGotAscii, "1": self.keyNumberGlobal, "2": self.keyNumberGlobal, "3": self.keyNumberGlobal, "4": self.keyNumberGlobal, "5": self.keyNumberGlobal, "6": self.keyNumberGlobal, "7": self.keyNumberGlobal, "8": self.keyNumberGlobal, "9": self.keyNumberGlobal, "0": self.keyNumberGlobal }, -1) self.list = [] self.statuslist = [] self["list"] = List(self.list) self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText(_("Reload")) self.list_updating = True self.packetlist = [] self.installed_packetlist = {} self.upgradeable_packages = {} self.Console = Console() self.cmdList = [] self.cachelist = [] self.cache_ttl = 86400 #600 is default, 0 disables, Seconds cache is considered valid (24h should be ok for caching ipkgs) self.cache_file = eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/SoftwareManager/packetmanager.cache') #Path to cache directory self.oktext = _("\nAfter pressing OK, please wait!") self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', 'busybox') self.ipkg = IpkgComponent()<|fim▁hole|> self.onLayoutFinish.append(self.rebuildList) rcinput = eRCInput.getInstance() if config.misc.remotecontrol_text_support.getValue(): rcinput.setKeyboardMode(rcinput.kmNone) else: rcinput.setKeyboardMode(rcinput.kmAscii) def keyNumberGlobal(self, val): key = self.getKey(val) if key is not None: keyvalue = key.encode("utf-8") if len(keyvalue) == 1: self.setNextIdx(keyvalue[0]) def keyGotAscii(self): keyvalue = unichr(getPrevAsciiCode()).encode("utf-8") if len(keyvalue) == 1: self.setNextIdx(keyvalue[0]) def setNextIdx(self,char): if char in ("0", "1", "a"): self["list"].setIndex(0) else: idx = self.getNextIdx(char) if idx and idx <= self["list"].count: self["list"].setIndex(idx) def getNextIdx(self,char): for idx, i in enumerate(self["list"].list): if i[0] and (i[0][0] == char): return idx def exit(self): self.ipkg.stop() if self.Console is not None: if len(self.Console.appContainers): for name in self.Console.appContainers.keys(): self.Console.kill(name) rcinput = eRCInput.getInstance() rcinput.setKeyboardMode(rcinput.kmNone) self.close() def reload(self): if (os_path.exists(self.cache_file) == True): remove(self.cache_file) self.list_updating = True self.rebuildList() def setWindowTitle(self): self.setTitle(_("Packet manager")) def setStatus(self,status = None): if status: self.statuslist = [] divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png")) if status == 'update': statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png")) self.statuslist.append(( _("Package list update"), '', _("Trying to download a new packetlist. Please wait..." ),'',statuspng, divpng )) self['list'].setList(self.statuslist) elif status == 'error': statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png")) self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'',statuspng, divpng )) self['list'].setList(self.statuslist) def rebuildList(self): self.setStatus('update') self.inv_cache = 0 self.vc = valid_cache(self.cache_file, self.cache_ttl) if self.cache_ttl > 0 and self.vc != 0: try: self.buildPacketList() except: self.inv_cache = 1 if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0: self.run = 0 self.ipkg.startCmd(IpkgComponent.CMD_UPDATE) def go(self, returnValue = None): cur = self["list"].getCurrent() if cur: status = cur[3] package = cur[0] self.cmdList = [] if status == 'installed': self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package })) if len(self.cmdList): self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + package + "\n" + self.oktext) elif status == 'upgradeable': self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package })) if len(self.cmdList): self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to upgrade the package:\n") + package + "\n" + self.oktext) elif status == "installable": self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package })) if len(self.cmdList): self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + package + "\n" + self.oktext) def runRemove(self, result): if result: self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList) def runRemoveFinished(self): self.session.openWithCallback(self.RemoveReboot, MessageBox, _("Remove finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO) def RemoveReboot(self, result): if result is None: return if result is False: cur = self["list"].getCurrent() if cur: item = self['list'].getIndex() self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installable') self.cachelist[item] = [cur[0], cur[1], cur[2], 'installable'] self['list'].setList(self.list) write_cache(self.cache_file, self.cachelist) self.reloadPluginlist() if result: self.session.open(TryQuitMainloop,retvalue=3) def runUpgrade(self, result): if result: self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList) def runUpgradeFinished(self): self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Upgrade finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO) def UpgradeReboot(self, result): if result is None: return if result is False: cur = self["list"].getCurrent() if cur: item = self['list'].getIndex() self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installed') self.cachelist[item] = [cur[0], cur[1], cur[2], 'installed'] self['list'].setList(self.list) write_cache(self.cache_file, self.cachelist) self.reloadPluginlist() if result: self.session.open(TryQuitMainloop,retvalue=3) def ipkgCallback(self, event, param): if event == IpkgComponent.EVENT_ERROR: self.list_updating = False self.setStatus('error') elif event == IpkgComponent.EVENT_DONE: if self.list_updating: self.list_updating = False if not self.Console: self.Console = Console() cmd = self.ipkg.ipkg + " list" self.Console.ePopen(cmd, self.IpkgList_Finished) pass def IpkgList_Finished(self, result, retval, extra_args = None): result = result.replace('\n ',' - ') if result: self.packetlist = [] last_name = "" for x in result.splitlines(): tokens = x.split(' - ') name = tokens[0].strip() if not any((name.endswith(x) or name.find('locale') != -1) for x in self.unwanted_extensions): l = len(tokens) version = l > 1 and tokens[1].strip() or "" descr = l > 3 and tokens[3].strip() or l > 2 and tokens[2].strip() or "" if name == last_name: continue last_name = name self.packetlist.append([name, version, descr]) if not self.Console: self.Console = Console() cmd = self.ipkg.ipkg + " list_installed" self.Console.ePopen(cmd, self.IpkgListInstalled_Finished) def IpkgListInstalled_Finished(self, result, retval, extra_args = None): if result: self.installed_packetlist = {} for x in result.splitlines(): tokens = x.split(' - ') name = tokens[0].strip() if not any(name.endswith(x) for x in self.unwanted_extensions): l = len(tokens) version = l > 1 and tokens[1].strip() or "" self.installed_packetlist[name] = version if not self.Console: self.Console = Console() cmd = "opkg list-upgradable" self.Console.ePopen(cmd, self.OpkgListUpgradeable_Finished) def OpkgListUpgradeable_Finished(self, result, retval, extra_args = None): if result: self.upgradeable_packages = {} for x in result.splitlines(): tokens = x.split(' - ') name = tokens[0].strip() if not any(name.endswith(x) for x in self.unwanted_extensions): l = len(tokens) version = l > 2 and tokens[2].strip() or "" self.upgradeable_packages[name] = version self.buildPacketList() def buildEntryComponent(self, name, version, description, state): divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png")) if not description: description = "No description available." if state == 'installed': installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png")) return((name, version, _(description), state, installedpng, divpng)) elif state == 'upgradeable': upgradeablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgradeable.png")) return((name, version, _(description), state, upgradeablepng, divpng)) else: installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png")) return((name, version, _(description), state, installablepng, divpng)) def buildPacketList(self): self.list = [] self.cachelist = [] if self.cache_ttl > 0 and self.vc != 0: print 'Loading packagelist cache from ',self.cache_file try: self.cachelist = load_cache(self.cache_file) if len(self.cachelist) > 0: for x in self.cachelist: self.list.append(self.buildEntryComponent(x[0], x[1], x[2], x[3])) self['list'].setList(self.list) except: self.inv_cache = 1 if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0: print 'rebuilding fresh package list' for x in self.packetlist: status = "" if self.installed_packetlist.has_key(x[0]): if self.upgradeable_packages.has_key(x[0]): status = "upgradeable" else: status = "installed" else: status = "installable" self.list.append(self.buildEntryComponent(x[0], x[1], x[2], status)) self.cachelist.append([x[0], x[1], x[2], status]) write_cache(self.cache_file, self.cachelist) self['list'].setList(self.list) def reloadPluginlist(self): plugins.readPluginList(resolveFilename(SCOPE_PLUGINS)) class IpkgInstaller(Screen): skin = """ <screen name="IpkgInstaller" position="center,center" size="550,450" title="Install extensions" > <ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" /> <ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" /> <widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" /> <widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" /> <widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" /> <widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" /> <widget name="list" position="5,50" size="540,360" /> <ePixmap pixmap="div-h.png" position="0,410" zPosition="10" size="560,2" transparent="1" alphatest="on" /> <widget source="introduction" render="Label" position="5,420" zPosition="10" size="550,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" /> </screen>""" def __init__(self, session, list): Screen.__init__(self, session) self.list = SelectionList() self["list"] = self.list for listindex in range(len(list)): self.list.addSelection(list[listindex], list[listindex], listindex, False) self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText(_("Install")) self["key_yellow"] = StaticText() self["key_blue"] = StaticText(_("Invert")) self["introduction"] = StaticText(_("Press OK to toggle the selection.")) self["actions"] = ActionMap(["OkCancelActions", "ColorActions"], { "ok": self.list.toggleSelection, "cancel": self.close, "red": self.close, "green": self.install, "blue": self.list.toggleAllSelection }, -1) def install(self): list = self.list.getSelectionsList() cmdList = [] for item in list: cmdList.append((IpkgComponent.CMD_INSTALL, { "package": item[1] })) self.session.open(Ipkg, cmdList = cmdList) def filescan_open(list, session, **kwargs): filelist = [x.path for x in list] session.open(IpkgInstaller, filelist) # list def filescan(**kwargs): from Components.Scanner import Scanner, ScanPath return \ Scanner(mimetypes = ["application/x-debian-package"], paths_to_scan = [ ScanPath(path = "ipk", with_subdirs = True), ScanPath(path = "", with_subdirs = False), ], name = "Ipkg", description = _("Install extensions."), openfnc = filescan_open, ) def UpgradeMain(session, **kwargs): session.open(UpdatePluginMenu) def startSetup(menuid): if menuid == "setup" and config.plugins.softwaremanager.onSetupMenu.getValue(): return [(_("Software management"), UpgradeMain, "software_manager", 50)] return [ ] def Plugins(path, **kwargs): global plugin_path plugin_path = path list = [ PluginDescriptor(name=_("Software management"), description=_("Manage your STB_BOX's software"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=startSetup), PluginDescriptor(name=_("Ipkg"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan) ] if not config.plugins.softwaremanager.onSetupMenu.getValue() and not config.plugins.softwaremanager.onBlueButton.getValue(): list.append(PluginDescriptor(name=_("Software management"), description=_("Manage your STB_BOX's software"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=UpgradeMain)) if config.plugins.softwaremanager.onBlueButton.getValue(): list.append(PluginDescriptor(name=_("Software management"), description=_("Manage your STB_BOX's software"), where = PluginDescriptor.WHERE_EXTENSIONSMENU, needsRestart = False, fnc=UpgradeMain)) return list<|fim▁end|>
self.ipkg.addCallback(self.ipkgCallback) self.onShown.append(self.setWindowTitle)
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! This module provides rust bindings for the XPCOM string types. //! //! # TL;DR (what types should I use) //! //! Use `&{mut,} nsA[C]String` for functions in rust which wish to take or //! mutate XPCOM strings. The other string types `Deref` to this type. //! //! Use `ns[C]String<'a>` (`ns[C]String` in C++) for string struct members, and //! as an intermediate between rust string data structures (such as `String`, //! `Vec<u16>`, `&str`, and `&[u16]`) and `&{mut,} nsA[C]String` (using //! `ns[C]String::from(value)`). These conversions, when possible, will not //! perform any allocations. When using this type in structs shared with C++, //! the correct lifetime argument is usually `'static`. //! //! Use `nsFixed[C]String` or `ns_auto_[c]string!` for dynamic stack allocated //! strings which are expected to hold short string values. //! //! Use `*{const,mut} nsA[C]String` (`{const,} nsA[C]String*` in C++) for //! function arguments passed across the rust/C++ language boundary. //! //! # String Types //! //! ## `nsA[C]String` //! //! The core types in this module are `nsAString` and `nsACString`. These types //! are zero-sized as far as rust is concerned, and are safe to pass around //! behind both references (in rust code), and pointers (in C++ code). They //! represent a handle to a XPCOM string which holds either `u16` or `u8` //! characters respectively. The backing character buffer is guaranteed to live //! as long as the reference to the `nsAString` or `nsACString`. //! //! These types in rust are simply used as dummy types. References to them //! represent a pointer to the beginning of a variable-sized `#[repr(C)]` struct //! which is common between both C++ and Rust implementations. In C++, their //! corresponding types are also named `nsAString` or `nsACString`, and they are //! defined within the `nsTSubstring.{cpp,h}` file. //! //! ### Valid Operations //! //! An `&nsA[C]String` acts like rust's `&str`, in that it is a borrowed //! reference to the backing data. When used as an argument to other functions //! on `&mut nsA[C]String`, optimizations can be performed to avoid copying //! buffers, as information about the backing storage is preserved. //! //! An `&mut nsA[C]String` acts like rust's `&mut Cow<str>`, in that it is a //! mutable reference to a potentially borrowed string, which when modified will //! ensure that it owns its own backing storage. This type can be appended to //! with the methods `.append`, `.append_utf{8,16}`, and with the `write!` //! macro, and can be assigned to with `.assign`. //! //! ## `ns[C]String<'a>` //! //! This type is an maybe-owned string type. It acts similarially to a //! `Cow<[{u8,u16}]>`. This type provides `Deref` and `DerefMut` implementations //! to `nsA[C]String`, which provides the methods for manipulating this type. //! This type's lifetime parameter, `'a`, represents the lifetime of the backing //! storage. When modified this type may re-allocate in order to ensure that it //! does not mutate its backing storage. //! //! `ns[C]String`s can be constructed either with `ns[C]String::new()`, which //! creates an empty `ns[C]String<'static>`, or through one of the provided //! `From` implementations. Both string types may be constructed `From<&'a //! str>`, with `nsCString` having a `'a` lifetime, as the storage is shared //! with the `str`, while `nsString` has a `'static` lifetime, as its storage //! has to be transcoded. //! //! When passing this type by reference, prefer passing a `&nsA[C]String` or //! `&mut nsA[C]String`. to passing this type. //! //! When passing this type across the language boundary, pass it as `*const //! nsA[C]String` for an immutable reference, or `*mut nsA[C]String` for a //! mutable reference. This struct may also be included in `#[repr(C)]` //! structs shared with C++. //! //! ## `nsFixed[C]String<'a>` //! //! This type is a string type with fixed backing storage. It is created with //! `nsFixed[C]String::new(buffer)`, passing a mutable reference to a buffer as //! the argument. This buffer will be used as backing storage whenever the //! resulting string will fit within it, falling back to heap allocations only //! when the string size exceeds that of the backing buffer. //! //! Like `ns[C]String`, this type dereferences to `nsA[C]String` which provides //! the methods for manipulating the type, and is not `#[repr(C)]`. //! //! When passing this type by reference, prefer passing a `&nsA[C]String` or //! `&mut nsA[C]String`. to passing this type. //! //! When passing this type across the language boundary, pass it as `*const //! nsA[C]String` for an immutable reference, or `*mut nsA[C]String` for a //! mutable reference. This struct may also be included in `#[repr(C)]` //! structs shared with C++, although `nsFixed[C]String` objects are uncommon //! as struct members. //! //! ## `ns_auto_[c]string!($name)` //! //! This is a helper macro which defines a fixed size, (currently 64 character), //! backing array on the stack, and defines a local variable with name `$name` //! which is a `nsFixed[C]String` using this buffer as its backing storage. //! //! Usage of this macro is similar to the C++ type `nsAuto[C]String`, but could //! not be implemented as a basic type due to the differences between rust and //! C++'s move semantics. //! //! ## `ns[C]StringRepr` //! //! This crate also provides the type `ns[C]StringRepr` which acts conceptually //! similar to an `ns[C]String<'static>`, however, it does not have a `Drop` //! implementation. //! //! If this type is dropped in rust, it will not free its backing storage. This //! can be useful when implementing FFI types which contain `ns[C]String` members //! which invoke their member's destructors through C++ code. #![allow(non_camel_case_types)] #![deny(warnings)] use std::ops::{Deref, DerefMut}; use std::marker::PhantomData; use std::slice; use std::ptr; use std::mem; use std::fmt; use std::cmp; use std::str; use std::u32; use std::os::raw::c_void; ////////////////////////////////// // Internal Implemenation Flags // ////////////////////////////////// const F_NONE: u32 = 0; // no flags // data flags are in the lower 16-bits const F_OWNED: u32 = 1 << 3; // mData points to a heap-allocated, raw buffer // class flags are in the upper 16-bits const F_CLASS_FIXED: u32 = 1 << 16; // indicates that |this| is of type nsTFixedString //////////////////////////////////// // Generic String Bindings Macros // //////////////////////////////////// macro_rules! define_string_types { { char_t = $char_t: ty; AString = $AString: ident; String = $String: ident; FixedString = $FixedString: ident; StringRepr = $StringRepr: ident; } => { /// The representation of a ns[C]String type in C++. This type is /// used internally by our definition of ns[C]String to ensure layout /// compatibility with the C++ ns[C]String type. /// /// This type may also be used in place of a C++ ns[C]String inside of /// struct definitions which are shared with C++, as it has identical /// layout to our ns[C]String type. /// /// This struct will leak its data if dropped from rust. See the module /// documentation for more information on this type. #[repr(C)] #[derive(Debug)] pub struct $StringRepr { data: *const $char_t, length: u32, flags: u32, } impl Deref for $StringRepr { type Target = $AString; fn deref(&self) -> &$AString { unsafe { mem::transmute(self) } } } impl DerefMut for $StringRepr { fn deref_mut(&mut self) -> &mut $AString { unsafe { mem::transmute(self) } } } /// This type is the abstract type which is used for interacting with /// strings in rust. Each string type can derefence to an instance of /// this type, which provides the useful operations on strings. /// /// NOTE: Rust thinks this type has a size of 0, because the data /// associated with it is not necessarially safe to move. It is not safe /// to construct a nsAString yourself, unless it is received by /// dereferencing one of these types. /// /// NOTE: The `[u8; 0]` member is zero sized, and only exists to prevent /// the construction by code outside of this module. It is used instead /// of a private `()` member because the `improper_ctypes` lint complains /// about some ZST members in `extern "C"` function declarations. #[repr(C)] pub struct $AString { _prohibit_constructor: [u8; 0], } impl Deref for $AString { type Target = [$char_t]; fn deref(&self) -> &[$char_t] { unsafe { // All $AString values point to a struct prefix which is // identical to $StringRepr, this we can transmute `self` // into $StringRepr to get the reference to the underlying // data. let this: &$StringRepr = mem::transmute(self); if this.data.is_null() { debug_assert!(this.length == 0); // Use an arbitrary non-null value as the pointer slice::from_raw_parts(0x1 as *const $char_t, 0) } else { slice::from_raw_parts(this.data, this.length as usize) } } } } impl AsRef<[$char_t]> for $AString { fn as_ref(&self) -> &[$char_t] { self } } impl cmp::PartialEq for $AString { fn eq(&self, other: &$AString) -> bool { &self[..] == &other[..] } } impl cmp::PartialEq<[$char_t]> for $AString { fn eq(&self, other: &[$char_t]) -> bool { &self[..] == other } } impl<'a> cmp::PartialEq<$String<'a>> for $AString { fn eq(&self, other: &$String<'a>) -> bool { self.eq(&**other) } } impl<'a> cmp::PartialEq<$FixedString<'a>> for $AString { fn eq(&self, other: &$FixedString<'a>) -> bool { self.eq(&**other) } } #[repr(C)] pub struct $String<'a> { hdr: $StringRepr, _marker: PhantomData<&'a [$char_t]>, } impl $String<'static> { pub fn new() -> $String<'static> { $String { hdr: $StringRepr { data: ptr::null(), length: 0, flags: F_NONE, }, _marker: PhantomData, } } } impl<'a> Deref for $String<'a> { type Target = $AString; fn deref(&self) -> &$AString { &self.hdr } } impl<'a> DerefMut for $String<'a> { fn deref_mut(&mut self) -> &mut $AString { &mut self.hdr } } impl<'a> AsRef<[$char_t]> for $String<'a> { fn as_ref(&self) -> &[$char_t] { &self } } impl<'a> From<&'a String> for $String<'a> { fn from(s: &'a String) -> $String<'a> { $String::from(&s[..]) } } impl<'a> From<&'a Vec<$char_t>> for $String<'a> { fn from(s: &'a Vec<$char_t>) -> $String<'a> { $String::from(&s[..]) } } impl<'a> From<&'a [$char_t]> for $String<'a> { fn from(s: &'a [$char_t]) -> $String<'a> { assert!(s.len() < (u32::MAX as usize)); $String { hdr: $StringRepr { data: if s.is_empty() { ptr::null() } else { s.as_ptr()}, length: s.len() as u32, flags: F_NONE, }, _marker: PhantomData, } } } impl From<Box<[$char_t]>> for $String<'static> { fn from(s: Box<[$char_t]>) -> $String<'static> { assert!(s.len() < (u32::MAX as usize)); if s.is_empty() { return $String::new(); } // SAFETY NOTE: This method produces an F_OWNED ns[C]String from // a Box<[$char_t]>. this is only safe because in the Gecko // tree, we use the same allocator for Rust code as for C++ // code, meaning that our box can be legally freed with // libc::free(). let length = s.len() as u32; let ptr = s.as_ptr(); mem::forget(s); unsafe { Gecko_IncrementStringAdoptCount(ptr as *mut _); } $String { hdr: $StringRepr { data: ptr, length: length, flags: F_OWNED, }, _marker: PhantomData, } } } impl From<Vec<$char_t>> for $String<'static> { fn from(s: Vec<$char_t>) -> $String<'static> { s.into_boxed_slice().into() } } impl<'a> From<&'a $AString> for $String<'static> { fn from(s: &'a $AString) -> $String<'static> { let mut string = $String::new(); string.assign(s); string } } impl<'a> fmt::Write for $String<'a> { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { $AString::write_str(self, s) } } impl<'a> fmt::Display for $String<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { <$AString as fmt::Display>::fmt(self, f) } } impl<'a> fmt::Debug for $String<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { <$AString as fmt::Debug>::fmt(self, f) } } impl<'a> cmp::PartialEq for $String<'a> { fn eq(&self, other: &$String<'a>) -> bool { $AString::eq(self, other) } } impl<'a> cmp::PartialEq<[$char_t]> for $String<'a> { fn eq(&self, other: &[$char_t]) -> bool { $AString::eq(self, other) } } impl<'a, 'b> cmp::PartialEq<&'b [$char_t]> for $String<'a> { fn eq(&self, other: &&'b [$char_t]) -> bool { $AString::eq(self, *other) }<|fim▁hole|> fn eq(&self, other: &str) -> bool { $AString::eq(self, other) } } impl<'a, 'b> cmp::PartialEq<&'b str> for $String<'a> { fn eq(&self, other: &&'b str) -> bool { $AString::eq(self, *other) } } /// A nsFixed[C]String is a string which uses a fixed size mutable /// backing buffer for storing strings which will fit within that /// buffer, rather than using heap allocations. #[repr(C)] pub struct $FixedString<'a> { base: $String<'a>, capacity: u32, buffer: *mut $char_t, _marker: PhantomData<&'a mut [$char_t]>, } impl<'a> $FixedString<'a> { pub fn new(buf: &'a mut [$char_t]) -> $FixedString<'a> { let len = buf.len(); assert!(len < (u32::MAX as usize)); let buf_ptr = buf.as_mut_ptr(); $FixedString { base: $String { hdr: $StringRepr { data: ptr::null(), length: 0, flags: F_CLASS_FIXED, }, _marker: PhantomData, }, capacity: len as u32, buffer: buf_ptr, _marker: PhantomData, } } } impl<'a> Deref for $FixedString<'a> { type Target = $AString; fn deref(&self) -> &$AString { &self.base } } impl<'a> DerefMut for $FixedString<'a> { fn deref_mut(&mut self) -> &mut $AString { &mut self.base } } impl<'a> AsRef<[$char_t]> for $FixedString<'a> { fn as_ref(&self) -> &[$char_t] { &self } } impl<'a> fmt::Write for $FixedString<'a> { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { $AString::write_str(self, s) } } impl<'a> fmt::Display for $FixedString<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { <$AString as fmt::Display>::fmt(self, f) } } impl<'a> fmt::Debug for $FixedString<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { <$AString as fmt::Debug>::fmt(self, f) } } impl<'a> cmp::PartialEq for $FixedString<'a> { fn eq(&self, other: &$FixedString<'a>) -> bool { $AString::eq(self, other) } } impl<'a> cmp::PartialEq<[$char_t]> for $FixedString<'a> { fn eq(&self, other: &[$char_t]) -> bool { $AString::eq(self, other) } } impl<'a, 'b> cmp::PartialEq<&'b [$char_t]> for $FixedString<'a> { fn eq(&self, other: &&'b [$char_t]) -> bool { $AString::eq(self, *other) } } impl<'a> cmp::PartialEq<str> for $FixedString<'a> { fn eq(&self, other: &str) -> bool { $AString::eq(self, other) } } impl<'a, 'b> cmp::PartialEq<&'b str> for $FixedString<'a> { fn eq(&self, other: &&'b str) -> bool { $AString::eq(self, *other) } } } } /////////////////////////////////////////// // Bindings for nsCString (u8 char type) // /////////////////////////////////////////// define_string_types! { char_t = u8; AString = nsACString; String = nsCString; FixedString = nsFixedCString; StringRepr = nsCStringRepr; } impl nsACString { pub fn assign<T: AsRef<[u8]> + ?Sized>(&mut self, other: &T) { let s = nsCString::from(other.as_ref()); unsafe { Gecko_AssignCString(self, &*s); } } pub fn assign_utf16<T: AsRef<[u16]> + ?Sized>(&mut self, other: &T) { self.assign(&[]); self.append_utf16(other); } pub fn append<T: AsRef<[u8]> + ?Sized>(&mut self, other: &T) { let s = nsCString::from(other.as_ref()); unsafe { Gecko_AppendCString(self, &*s); } } pub fn append_utf16<T: AsRef<[u16]> + ?Sized>(&mut self, other: &T) { let s = nsString::from(other.as_ref()); unsafe { Gecko_AppendUTF16toCString(self, &*s); } } pub unsafe fn as_str_unchecked(&self) -> &str { str::from_utf8_unchecked(self) } pub fn truncate(&mut self) { unsafe { Gecko_TruncateCString(self); } } } impl<'a> Drop for nsCString<'a> { fn drop(&mut self) { unsafe { Gecko_FinalizeCString(&mut **self); } } } impl<'a> From<&'a str> for nsCString<'a> { fn from(s: &'a str) -> nsCString<'a> { s.as_bytes().into() } } impl From<Box<str>> for nsCString<'static> { fn from(s: Box<str>) -> nsCString<'static> { s.into_string().into() } } impl From<String> for nsCString<'static> { fn from(s: String) -> nsCString<'static> { s.into_bytes().into() } } // Support for the write!() macro for appending to nsACStrings impl fmt::Write for nsACString { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.append(&nsCString::from(s)); Ok(()) } } impl fmt::Display for nsACString { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fmt::Display::fmt(&String::from_utf8_lossy(&self[..]), f) } } impl fmt::Debug for nsACString { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fmt::Debug::fmt(&String::from_utf8_lossy(&self[..]), f) } } impl cmp::PartialEq<str> for nsACString { fn eq(&self, other: &str) -> bool { &self[..] == other.as_bytes() } } #[macro_export] macro_rules! ns_auto_cstring { ($name:ident) => { let mut buf: [u8; 64] = [0; 64]; let mut $name = $crate::nsFixedCString::new(&mut buf); } } /////////////////////////////////////////// // Bindings for nsString (u16 char type) // /////////////////////////////////////////// define_string_types! { char_t = u16; AString = nsAString; String = nsString; FixedString = nsFixedString; StringRepr = nsStringRepr; } impl nsAString { pub fn assign<T: AsRef<[u16]> + ?Sized>(&mut self, other: &T) { let s = nsString::from(other.as_ref()); unsafe { Gecko_AssignString(self, &*s); } } pub fn assign_utf8<T: AsRef<[u8]> + ?Sized>(&mut self, other: &T) { self.assign(&[]); self.append_utf8(other); } pub fn append<T: AsRef<[u16]> + ?Sized>(&mut self, other: &T) { let s = nsString::from(other.as_ref()); unsafe { Gecko_AppendString(self, &*s); } } pub fn append_utf8<T: AsRef<[u8]> + ?Sized>(&mut self, other: &T) { let s = nsCString::from(other.as_ref()); unsafe { Gecko_AppendUTF8toString(self, &*s); } } pub fn truncate(&mut self) { unsafe { Gecko_TruncateString(self); } } } impl<'a> Drop for nsString<'a> { fn drop(&mut self) { unsafe { Gecko_FinalizeString(&mut **self); } } } // NOTE: The From impl for a string slice for nsString produces a <'static> // lifetime, as it allocates. impl<'a> From<&'a str> for nsString<'static> { fn from(s: &'a str) -> nsString<'static> { s.encode_utf16().collect::<Vec<u16>>().into() } } // Support for the write!() macro for writing to nsStrings impl fmt::Write for nsAString { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { // Directly invoke gecko's routines for appending utf8 strings to // nsAString values, to avoid as much overhead as possible self.append_utf8(&nsCString::from(s)); Ok(()) } } impl fmt::Display for nsAString { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fmt::Display::fmt(&String::from_utf16_lossy(&self[..]), f) } } impl fmt::Debug for nsAString { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fmt::Debug::fmt(&String::from_utf16_lossy(&self[..]), f) } } impl cmp::PartialEq<str> for nsAString { fn eq(&self, other: &str) -> bool { other.encode_utf16().eq(self.iter().cloned()) } } #[macro_export] macro_rules! ns_auto_string { ($name:ident) => { let mut buf: [u16; 64] = [0; 64]; let mut $name = $crate::nsFixedString::new(&mut buf); } } #[cfg(not(debug_assertions))] #[allow(non_snake_case)] unsafe fn Gecko_IncrementStringAdoptCount(_: *mut c_void) {} // NOTE: These bindings currently only expose infallible operations. Perhaps // consider allowing for fallible methods? extern "C" { #[cfg(debug_assertions)] fn Gecko_IncrementStringAdoptCount(data: *mut c_void); // Gecko implementation in nsSubstring.cpp fn Gecko_FinalizeCString(this: *mut nsACString); fn Gecko_AssignCString(this: *mut nsACString, other: *const nsACString); fn Gecko_AppendCString(this: *mut nsACString, other: *const nsACString); fn Gecko_TruncateCString(this: *mut nsACString); fn Gecko_FinalizeString(this: *mut nsAString); fn Gecko_AssignString(this: *mut nsAString, other: *const nsAString); fn Gecko_AppendString(this: *mut nsAString, other: *const nsAString); fn Gecko_TruncateString(this: *mut nsAString); // Gecko implementation in nsReadableUtils.cpp fn Gecko_AppendUTF16toCString(this: *mut nsACString, other: *const nsAString); fn Gecko_AppendUTF8toString(this: *mut nsAString, other: *const nsACString); }<|fim▁end|>
} impl<'a> cmp::PartialEq<str> for $String<'a> {
<|file_name|>ex1.py<|end_file_name|><|fim▁begin|>import pandas as pd <|fim▁hole|>print(tv_budget_x)<|fim▁end|>
adv = pd.read_csv('Advertising.csv') tv_budget_x = adv.TV.tolist()
<|file_name|>cone.rs<|end_file_name|><|fim▁begin|>extern crate ncollide; use ncollide::shape::Cone; fn main() { let cone = Cone::new(0.5f32, 0.75); assert!(cone.half_height() == 0.5); assert!(cone.radius() == 0.75);<|fim▁hole|><|fim▁end|>
}
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from .split import Split from .tree import Tree from .node import Node from .column import NominalColumn, OrdinalColumn, ContinuousColumn from .stats import Stats from .invalid_split_reason import InvalidSplitReason <|fim▁hole|><|fim▁end|>
__version__ = "5.3.0"
<|file_name|>func_use_for_or_listcomp_var.py<|end_file_name|><|fim▁begin|>"""test a warning is triggered when using for a lists comprehension variable""" __revision__ = 'yo' TEST_LC = [C for C in __revision__ if C.isalpha()] print C # WARN C = 4 print C # this one shouldn't trigger any warning B = [B for B in __revision__ if B.isalpha()] print B # nor this one <|fim▁hole|> var1 = var2 + 4 print var1 # WARN for note in __revision__: note.something() for line in __revision__: for note in line: A = note.anotherthing() for x in []: pass for x in range(3): print (lambda : x)() # OK<|fim▁end|>
for var1, var2 in TEST_LC:
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>######## # Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import wraps from os.path import basename import httplib2 from Crypto.Random import atfork from httplib2 import ServerNotFoundError from googleapiclient.errors import HttpError from googleapiclient.discovery import build from oauth2client.service_account import ServiceAccountCredentials from cloudify.exceptions import OperationRetry from . import constants def check_response(func): """ Decorator checking first REST response. :return: """ def _decorator(self, *args, **kwargs): try: response = func(self, *args, **kwargs) except ServerNotFoundError as e: raise OperationRetry( 'Warning: {0}. ' 'If problem persists, error may be fatal.'.format( e.message)) if 'error' in response: self.logger.error('Response with error {0}' .format(response['error'])) raise GCPError(response['error']) return response return wraps(func)(_decorator) class GoogleCloudApi(object): """ Base class for execute call by google api """ def __init__(self, config, logger, scope=constants.COMPUTE_SCOPE, discovery=constants.COMPUTE_DISCOVERY, api_version=constants.API_V1): """ GoogleCloudApi class constructor. Create API discovery object that will be making GCP REST API calls. :param config: dictionary with object properties :param logger: logger object that the class methods will be logging to :return: """ self.auth = config['auth'] self.config = config self.logger = logger.getChild('GCP') self.scope = scope self.__discovery = discovery self.api_version = api_version<|fim▁hole|> Lazily load the discovery so we don't make API calls during __init__ """ if hasattr(self, '_discovery'): return self._discovery self._discovery = self.create_discovery(self.__discovery, self.scope, self.api_version) return self._discovery def get_credentials(self, scope): raise GCPError( "Please implement {}: {}".format(__name__, repr(scope)) ) def create_discovery(self, discovery, scope, api_version): """ Create Google Cloud API discovery object and perform authentication. :param discovery: name of the API discovery to be created :param scope: scope the API discovery will have :param api_version: version of the API :return: discovery object :raise: GCPError if there is a problem with service account JSON file: e.g. the file is not under the given path or it has wrong permissions """ # Crypto.Random.atfork() must be called here because celery doesn't do # it atfork() try: credentials = self.get_credentials(scope) http = httplib2.Http() credentials.authorize(http) return build(discovery, api_version, http=http) except IOError as e: self.logger.error(str(e)) raise GCPError(str(e)) class GoogleCloudPlatform(GoogleCloudApi): """ Class using google-python-api-client library to connect to Google Cloud Platform. """ def __init__(self, config, logger, name, additional_settings=None, scope=constants.COMPUTE_SCOPE, discovery=constants.COMPUTE_DISCOVERY, api_version=constants.API_V1): """ GoogleCloudPlatform class constructor. Create API discovery object that will be making GCP REST API calls. :param config: dictionary with project properties: path to auth file, project and zone :param logger: logger object that the class methods will be logging to :param name: name of GCP resource represented by this object :param scope: scope string of GCP connection :param discovery: name of Google service :param api_version: version of used API to communicate with GCP :return: """ super(GoogleCloudPlatform, self).__init__(config, logger, scope, discovery, api_version) self.auth = config['auth'] self.project = config['project'] self.zone = config['zone'] self.name = name self.body = additional_settings if additional_settings else {} def get_credentials(self, scope): # Crypto.Random.atfork() must be called here because celery doesn't do # it atfork() if hasattr(self.auth, 'get'): creds_func = ServiceAccountCredentials.from_json_keyfile_dict else: creds_func = ServiceAccountCredentials.from_json_keyfile_name return creds_func(self.auth, scopes=scope) def get_common_instance_metadata(self): """ Get project's common instance metadata. :return: CommonInstanceMetadata list extracted from REST response get project metadata. """ self.logger.info( 'Get commonInstanceMetadata for project {0}'.format(self.project)) metadata = self.discovery.projects().get( project=self.project).execute() return metadata['commonInstanceMetadata'] @property def ZONES(self): if not hasattr(self, '_ZONES'): zones = {} request = self.discovery.zones().list(project=self.project) while request is not None: response = request.execute() for zone in response['items']: zones[zone['name']] = zone zone['region_name'] = basename(zone['region']) request = self.discovery.zones().list_next( previous_request=request, previous_response=response) self._ZONES = zones return self._ZONES class GCPError(Exception): """ Exception raised from GoogleCloudPlatform class. """ def __init__(self, message): super(GCPError, self).__init__(message) def is_missing_resource_error(error): return isinstance(error, HttpError) and error.resp.status == 404 def is_resource_used_error(error): return isinstance(error, HttpError) and error.resp.status == 400<|fim▁end|>
@property def discovery(self): """
<|file_name|>init.js<|end_file_name|><|fim▁begin|>/* ************************************************************** #Inicia as funções e plugins * ***************************************************************/ (function() { // Ativa o menu mobile app.menu(); // Ativa o slideshow, se presente na página. var slideshow = document.getElementById("slider"); if(slideshow) { window.mySwipe = new Swipe(slideshow, { speed: 400, auto: 4000, continuous: false, callback: function(){ // Ativa os bullets de navegação do slideshow var i = mySwipe.getPos(), el = document.querySelectorAll("#slider > ul > li"); // Remove a classe ".is-active" de todos os bullets for(var x = 0; x < el.length; x++) {<|fim▁hole|> }; // Ativa a bullet correta el[i].classList.add("is-active"); } }); }; })();<|fim▁end|>
if(el[x].classList.contains("is-active")) { el[x].classList.remove("is-active"); }
<|file_name|>sequences.py<|end_file_name|><|fim▁begin|># sequences.py # strings >>> # 4 ways to make a string >>> str1 = 'This is a string. We built it with single quotes.' >>> str2 = "This is also a string, but built with double quotes." >>> str3 = '''This is built using triple quotes, ... so it can span multiple lines.''' >>> str4 = """This too ... is a multiline one ... built with triple double-quotes.""" >>> str4 #A 'This too\nis a multiline one\nbuilt with triple double-quotes.' >>> print(str4) #B This too is a multiline one built with triple double-quotes. >>> # encode / decode >>> s = "This is üŋíc0de" # unicode string: code points >>> type(s) <class 'str'><|fim▁hole|><class 'bytes'> >>> encoded_s.decode('utf-8') # let's revert to the original 'This is üŋíc0de' >>> bytes_obj = b"A bytes object" # a bytes object >>> type(bytes_obj) <class 'bytes'> # length >>> len(str1) 49 # indexing and slicing >>> s = "The trouble is you think you have time." >>> s[0] # indexing at position 0, which is the first char 'T' >>> s[5] # indexing at position 5, which is the sixth char 'r' >>> s[:4] # slicing, we specify only the stop position 'The ' >>> s[4:] # slicing, we specify only the start position 'trouble is you think you have time.' >>> s[2:14] # slicing, both start and stop positions 'e trouble is' >>> s[2:14:3] # slicing, start, stop and step (every 3 chars) 'erb ' >>> s[:] # quick way of making a copy 'The trouble is you think you have time.'<|fim▁end|>
>>> encoded_s = s.encode('utf-8') # utf-8 encoded version of s >>> encoded_s b'This is \xc3\xbc\xc5\x8b\xc3\xadc0de' # result: bytes object >>> type(encoded_s) # another way to verify it
<|file_name|>KeyEdit.cpp<|end_file_name|><|fim▁begin|>/************************************************************************** Copyright [2009] [CrypTool Team] This file is part of CrypTool. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. **************************************************************************/<|fim▁hole|> // KeyEdit.cpp: Implementierungsdatei // #include "stdafx.h" #include "CrypToolApp.h" #include "KeyEdit.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CKeyEdit CKeyEdit::CKeyEdit() { } CKeyEdit::~CKeyEdit() { } BEGIN_MESSAGE_MAP(CKeyEdit, CEdit) //{{AFX_MSG_MAP(CKeyEdit) ON_WM_KEYDOWN() ON_WM_SYSKEYDOWN() //}}AFX_MSG_MAP END_MESSAGE_MAP() ///////////////////////////////////////////////////////////////////////////// // Behandlungsroutinen für Nachrichten CKeyEdit void CKeyEdit::OnKeyDown(UINT nChar, UINT nRepCnt, UINT nFlags) { // TODO: Code für die Behandlungsroutine für Nachrichten hier einfügen und/oder Standard aufrufen CEdit::OnKeyDown(nChar, nRepCnt, nFlags); } void CKeyEdit::OnSysKeyDown(UINT nChar, UINT nRepCnt, UINT nFlags) { /* if(nFlags & (1<<13)) { switch(nChar) { case('p'): case('P'): m_Param_NCtrl.SetFocus(); return; case('a'): case('A'): m_Param_bCtrl.SetFocus(); return; } } // TODO: Code für die Behandlungsroutine für Nachrichten hier einfügen und/oder Standard aufrufen */ CEdit::OnSysKeyDown(nChar, nRepCnt, nFlags); }<|fim▁end|>
<|file_name|>viz.js<|end_file_name|><|fim▁begin|>'use strict'; var _ = require('lodash'); var utils = require('../utils'); var d3 = require('d3'); var sunCalc = require('suncalc'); var geocoder = require('geocoder'); var Path = require('svg-path-generator'); var margin = { top: 20, right: 0, bottom: 20, left: 0 }; var dayOfYear = function(d) { var j1 = new Date(d); j1.setMonth(0, 0); return Math.round((d - j1) / 8.64e7) - 1; }; /* * View controller */ function Viz($el) { if (!(this instanceof Viz)) { return new Viz($el); } this.$el = $el; var $tooltip = $('#tooltip'); // do some cool vizualization here var width = $el.width() - margin.left - margin.right; var height = (Math.min(width * 0.6, $(document).height() - $el.offset().top - 180)) - margin.top - margin.bottom; var today = new Date(); var start = new Date(today.getFullYear(), 0, 1, 12, 0, 0, 0, 0); var end = new Date(today.getFullYear(), 11, 31, 12, 0, 0, 0, 0); var dateX = d3.time.scale().domain([start, end]).range([0, width]); this.x = d3.scale.linear() .domain([0, 365]) .range([0, width]); this.y = d3.scale.linear() .domain([0, 24]) .range([0, height]); var inverseX = d3.scale.linear() .range([0, 365]) .domain([0, width]); var xAxis = d3.svg.axis() .scale(dateX); var svg = d3.select($el[0]) .append('svg') .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append('g') .classed('container', true) .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); var self = this; var hideTimeout; svg.on('mousemove', function() { <|fim▁hole|> if(!self.times.length) { return; } var coordinates = d3.mouse(this); var x = coordinates[0]; var i = inverseX(x); i = Math.floor(i); self.svg.selectAll('g.day').classed('hover', function(d, idx) { return idx === i; }); var format = d3.time.format('%B %e'); $tooltip.find('.date').text(format(self.dates[i])); var sunset = new Date(self.times[i].sunset); var sunrise = new Date(self.times[i].sunrise); format = d3.time.format('%I:%M %p'); console.log(format(sunrise)); console.log(format(sunset)); $tooltip.find('.sunrise').text(format(sunrise)); $tooltip.find('.sunset').text(format(sunset)); var offset = self.$el.offset(); var top = offset.top; top += self.y(sunrise.getHours() + sunrise.getMinutes() / 60); var left = self.x(i) + offset.left; left -= $tooltip.width() / 2; top -= $tooltip.height() - 15; $tooltip.css('top', top).css('left', left).show(); clearTimeout(hideTimeout); }).on('mouseout', function(){ hideTimeout = setTimeout(function() { $tooltip.fadeOut(); self.svg.selectAll('g.day').classed('hover', false); }, 750); }); d3.select($tooltip[0]).on('mouseenter', function() { clearTimeout(hideTimeout); }); this.svg = svg; svg.append('rect') .attr('x', 0) .attr('y', 0) .attr('width', width) .attr('height', height); svg.append('g') .attr('class', 'x axis') .attr('transform', 'translate(0,' + height + ')') .call(xAxis); var max = 0; for (var d = start, i=0; d < end; d.setDate(d.getDate() + 1), i++) { this._drawDay(i); if(i > max) { max = i; } } var avgGroup = this.svg.append('g').classed('average', true); avgGroup .append('path') .attr('d', function() { return new Path() .moveTo(self.x(0), self.y(12)) .horizontalLineTo(self.x(max)) .end(); }) .classed('sunrise', true); avgGroup .append('path') .attr('d', function() { return new Path() .moveTo(self.x(0), self.y(12)) .horizontalLineTo(self.x(max)) .end(); }) .classed('sunset', true); avgGroup .append('text') .attr('x', self.x(50)) .attr('y', self.y(12)) .style('opacity', 0) .classed('sunrise', true); avgGroup .append('text') .attr('x', self.x(250)) .attr('y', self.y(12)) .style('opacity', 0) .classed('sunset', true); this.svg .append('path') .attr('d', function() { return new Path() .moveTo(self.x(0), self.y(today.getHours() + today.getMinutes() / 60)) .horizontalLineTo(self.x(max)) .end(); }) .classed('now', true); } Viz.prototype.updatePlace = function(placeName) { var self = this; if(placeName.trim() === '') { return; } var times = []; var dates = []; geocoder.geocode(placeName, function(err, res) { if(err) { return console.log(err); } if(!res.results.length) { return $('.place-name-container').text('Could not find ' + placeName + '!'); } $('.place-name-container').text(res.results[0].formatted_address); var location = res.results[0].geometry.location; var today = new Date(); var start = new Date(today.getFullYear(), 0, 1, 12, 0, 0, 0, 0); var end = new Date(today.getFullYear()+1, 0, 1, 12, 0, 0, 0, 0); for (var d = start, i=0; d < end; d.setDate(d.getDate() + 1), i++) { var time = sunCalc.getTimes(d, location.lat, location.lng); var isToday = false; if(d.getDate() === today.getDate() && d.getMonth() === today.getMonth()) { console.log('Today!'); console.log(d); isToday = true; } self._updateToday(time); self._updateLine(i, time, isToday); times.push(time); dates.push(new Date(d)); } self._updateAverages(times); }); this.times = times; this.dates = dates; }; Viz.prototype._updateToday = function(times) { }; Viz.prototype._updateAverages = function(times) { var avgSunrise = 0, avgSunset = 0; _.each(times, function(time, i) { var sunrise = new Date(time.sunrise); var sunset = new Date(time.sunset); if(sunset.getDate() !== sunrise.getDate()) { if(dayOfYear(sunrise) !== i) { avgSunrise -= 24; } else { avgSunset += 24; } } avgSunset += sunset.getHours() + sunset.getMinutes() / 60; avgSunrise += sunrise.getHours() + sunrise.getMinutes() / 60; }); avgSunset /= times.length; avgSunrise /= times.length; avgSunrise = (avgSunrise + 24) % 24; avgSunset = (avgSunset + 24) % 24; var avg = this.svg.select('g.average'); var self = this; avg.select('path.sunrise') .transition() .delay(150) .duration(1500) .attr('d', function() { return new Path() .moveTo(self.x(0), self.y(avgSunrise)) .horizontalLineTo(self.x(times.length)) .end(); }); avg.select('path.sunset') .transition() .delay(150) .duration(1500) .attr('d', function() { return new Path() .moveTo(self.x(0), self.y(avgSunset)) .horizontalLineTo(self.x(times.length)) .end(); }); var format = d3.time.format('%I:%M %p'); var getTimeZone = function() { return /\((.*)\)/.exec(new Date().toString())[1]; }; var formatHour = function(n) { var d = new Date(); var hour = Math.floor(n); var minutes = n - Math.floor(n); minutes = Math.round(minutes * 60); d.setHours(hour); d.setMinutes(minutes); return format(d) + ' (' + getTimeZone() + ')'; }; avg.select('text.sunrise') .transition() .delay(150) .duration(1500) .style('opacity', 1) .attr('y', function() { if(avgSunrise < 4) { return self.y(avgSunrise) + 20; } return self.y(avgSunrise) - 7; }) .text(function() { return 'Average Sunrise: ' + formatHour(avgSunrise); }); avg.select('text.sunset') .transition() .delay(150) .duration(1500) .style('opacity', 1).attr('y', function() { if(avgSunset < 4) { return self.y(avgSunset) + 20; } return self.y(avgSunset) - 7; }) .text(function() { return 'Average Sunset: ' + formatHour(avgSunset); }); }; Viz.prototype._updateLine = function(i, times, today) { var sunrise = new Date(times.sunrise); var sunset = new Date(times.sunset); today = today || false; var self = this; var group = this.svg.selectAll('g.day').filter(function(d, idx) { return i === idx; }); var start = self.y(sunrise.getHours() + sunrise.getMinutes() / 60); var end = self.y(sunset.getHours() + sunset.getMinutes() / 60); if(start < end) { group .select('path.day') .transition() .duration(1500) .attr('d', function() { return new Path() .moveTo(self.x(i), start) .verticalLineTo(end) .end(); }); group .select('path.day-wrap') .transition() .duration(1500) .attr('d', function() { return new Path() .moveTo(self.x(i), self.y(24)) .verticalLineTo(self.y(24)) .end(); }) .style('stroke-width', 0); } else { group .select('path.day') .transition() .duration(1500) .attr('d', function() { return new Path() .moveTo(self.x(i), 0) .verticalLineTo(end) .end(); }); group .select('path.day-wrap') .transition() .duration(1500) .attr('d', function() { return new Path() .moveTo(self.x(i), start) .verticalLineTo(self.y(24)) .end(); }) .style('stroke-width', (today) ? 2 : 0.5); } } Viz.prototype._drawDay = function(i) { var today = dayOfYear(new Date()) === i; var self = this; var group = this.svg.append('g').classed('day', true); group .append('path') .attr('d', function() { return new Path() .moveTo(self.x(i + 0.5), self.y(11.9)) .verticalLineTo(self.y(12.1)) .end(); }) // .style('stroke-width', self.x(i+1) - self.x(i) - .5) .style('stroke-width', function() { if(today) { return 2; } return 0.5; }) .classed('day', true) .classed('today', today); group .append('path') .attr('d', function() { return new Path() .moveTo(self.x(i + 0.5), self.y(24)) .verticalLineTo(self.y(24)) .end(); }) .classed('day-wrap', true) .classed('today', today); }; Viz.prototype.destroy = function() { // destroy d3 object }; module.exports = Viz;<|fim▁end|>
<|file_name|>parser.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![macro_escape] use abi; use ast::{BareFnTy, ClosureTy}; use ast::{StaticRegionTyParamBound, OtherRegionTyParamBound, TraitTyParamBound}; use ast::{Provided, Public, FnStyle}; use ast::{Mod, BiAdd, Arg, Arm, Attribute, BindByRef, BindByValue}; use ast::{BiBitAnd, BiBitOr, BiBitXor, Block}; use ast::{BlockCheckMode, UnBox}; use ast::{Crate, CrateConfig, Decl, DeclItem}; use ast::{DeclLocal, DefaultBlock, UnDeref, BiDiv, EMPTY_CTXT, EnumDef, ExplicitSelf}; use ast::{Expr, Expr_, ExprAddrOf, ExprMatch, ExprAgain}; use ast::{ExprAssign, ExprAssignOp, ExprBinary, ExprBlock, ExprBox}; use ast::{ExprBreak, ExprCall, ExprCast}; use ast::{ExprField, ExprFnBlock, ExprIf, ExprIndex}; use ast::{ExprLit, ExprLoop, ExprMac}; use ast::{ExprMethodCall, ExprParen, ExprPath, ExprProc}; use ast::{ExprRepeat, ExprRet, ExprStruct, ExprTup, ExprUnary, ExprUnboxedFn}; use ast::{ExprVec, ExprVstore, ExprVstoreSlice}; use ast::{ExprVstoreMutSlice, ExprWhile, ExprForLoop, Field, FnDecl}; use ast::{ExprVstoreUniq, Once, Many}; use ast::{ForeignItem, ForeignItemStatic, ForeignItemFn, ForeignMod}; use ast::{Ident, NormalFn, Inherited, Item, Item_, ItemStatic}; use ast::{ItemEnum, ItemFn, ItemForeignMod, ItemImpl}; use ast::{ItemMac, ItemMod, ItemStruct, ItemTrait, ItemTy, Lit, Lit_}; use ast::{LitBool, LitChar, LitByte, LitBinary}; use ast::{LitNil, LitStr, LitUint, Local, LocalLet}; use ast::{MutImmutable, MutMutable, Mac_, MacInvocTT, Matcher, MatchNonterminal}; use ast::{MatchSeq, MatchTok, Method, MutTy, BiMul, Mutability}; use ast::{NamedField, UnNeg, NoReturn, UnNot, P, Pat, PatEnum}; use ast::{PatIdent, PatLit, PatRange, PatRegion, PatStruct}; use ast::{PatTup, PatBox, PatWild, PatWildMulti}; use ast::{BiRem, Required}; use ast::{RetStyle, Return, BiShl, BiShr, Stmt, StmtDecl}; use ast::{StmtExpr, StmtSemi, StmtMac, StructDef, StructField}; use ast::{StructVariantKind, BiSub}; use ast::StrStyle; use ast::{SelfExplicit, SelfRegion, SelfStatic, SelfValue}; use ast::{TokenTree, TraitMethod, TraitRef, TTDelim, TTSeq, TTTok}; use ast::{TTNonterminal, TupleVariantKind, Ty, Ty_, TyBot, TyBox}; use ast::{TypeField, TyFixedLengthVec, TyClosure, TyProc, TyBareFn}; use ast::{TyTypeof, TyInfer, TypeMethod}; use ast::{TyNil, TyParam, TyParamBound, TyParen, TyPath, TyPtr, TyRptr}; use ast::{TyTup, TyU32, TyUnboxedFn, TyUniq, TyVec, UnUniq}; use ast::{UnboxedFnTy, UnboxedFnTyParamBound, UnnamedField, UnsafeBlock}; use ast::{UnsafeFn, ViewItem, ViewItem_, ViewItemExternCrate, ViewItemUse}; use ast::{ViewPath, ViewPathGlob, ViewPathList, ViewPathSimple}; use ast::Visibility; use ast; use ast_util::{as_prec, ident_to_path, lit_is_str, operator_prec}; use ast_util; use attr; use codemap::{Span, BytePos, Spanned, spanned, mk_sp}; use codemap; use parse; use parse::attr::ParserAttr; use parse::classify; use parse::common::{SeqSep, seq_sep_none}; use parse::common::{seq_sep_trailing_disallowed, seq_sep_trailing_allowed}; use parse::lexer::Reader; use parse::lexer::TokenAndSpan; use parse::obsolete::*; use parse::token::{INTERPOLATED, InternedString, can_begin_expr}; use parse::token::{is_ident, is_ident_or_path, is_plain_ident}; use parse::token::{keywords, special_idents, token_to_binop}; use parse::token; use parse::{new_sub_parser_from_file, ParseSess}; use owned_slice::OwnedSlice; use std::collections::HashSet; use std::mem::replace; use std::rc::Rc; use std::gc::{Gc, GC}; #[allow(non_camel_case_types)] #[deriving(PartialEq)] pub enum restriction { UNRESTRICTED, RESTRICT_STMT_EXPR, RESTRICT_NO_BAR_OP, RESTRICT_NO_BAR_OR_DOUBLEBAR_OP, RESTRICT_NO_STRUCT_LITERAL, } type ItemInfo = (Ident, Item_, Option<Vec<Attribute> >); /// How to parse a path. There are four different kinds of paths, all of which /// are parsed somewhat differently. #[deriving(PartialEq)] pub enum PathParsingMode { /// A path with no type parameters; e.g. `foo::bar::Baz` NoTypesAllowed, /// A path with a lifetime and type parameters, with no double colons /// before the type parameters; e.g. `foo::bar<'a>::Baz<T>` LifetimeAndTypesWithoutColons, /// A path with a lifetime and type parameters with double colons before /// the type parameters; e.g. `foo::bar::<'a>::Baz::<T>` LifetimeAndTypesWithColons, /// A path with a lifetime and type parameters with bounds before the last /// set of type parameters only; e.g. `foo::bar<'a>::Baz+X+Y<T>` This /// form does not use extra double colons. LifetimeAndTypesAndBounds, } /// A path paired with optional type bounds. pub struct PathAndBounds { pub path: ast::Path, pub bounds: Option<OwnedSlice<TyParamBound>>, } enum ItemOrViewItem { /// Indicates a failure to parse any kind of item. The attributes are /// returned. IoviNone(Vec<Attribute>), IoviItem(Gc<Item>), IoviForeignItem(Gc<ForeignItem>), IoviViewItem(ViewItem) } /// Possibly accept an `INTERPOLATED` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the /// result of macro expansion) /// Placement of these is not as complex as I feared it would be. /// The important thing is to make sure that lookahead doesn't balk /// at INTERPOLATED tokens macro_rules! maybe_whole_expr ( ($p:expr) => ( { let found = match $p.token { INTERPOLATED(token::NtExpr(e)) => { Some(e) } INTERPOLATED(token::NtPath(_)) => { // FIXME: The following avoids an issue with lexical borrowck scopes, // but the clone is unfortunate. let pt = match $p.token { INTERPOLATED(token::NtPath(ref pt)) => (**pt).clone(), _ => unreachable!() }; let span = $p.span; Some($p.mk_expr(span.lo, span.hi, ExprPath(pt))) } INTERPOLATED(token::NtBlock(b)) => { let span = $p.span; Some($p.mk_expr(span.lo, span.hi, ExprBlock(b))) } _ => None }; match found { Some(e) => { $p.bump(); return e; } None => () } } ) ) /// As maybe_whole_expr, but for things other than expressions macro_rules! maybe_whole ( ($p:expr, $constructor:ident) => ( { let found = match ($p).token { INTERPOLATED(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { Some(INTERPOLATED(token::$constructor(x))) => { return x.clone() } _ => {} } } ); (no_clone $p:expr, $constructor:ident) => ( { let found = match ($p).token { INTERPOLATED(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { Some(INTERPOLATED(token::$constructor(x))) => { return x } _ => {} } } ); (deref $p:expr, $constructor:ident) => ( { let found = match ($p).token { INTERPOLATED(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { Some(INTERPOLATED(token::$constructor(x))) => { return (*x).clone() } _ => {} } } ); (Some $p:expr, $constructor:ident) => ( { let found = match ($p).token { INTERPOLATED(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { Some(INTERPOLATED(token::$constructor(x))) => { return Some(x.clone()), } _ => {} } } ); (iovi $p:expr, $constructor:ident) => ( { let found = match ($p).token { INTERPOLATED(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { Some(INTERPOLATED(token::$constructor(x))) => { return IoviItem(x.clone()) } _ => {} } } ); (pair_empty $p:expr, $constructor:ident) => ( { let found = match ($p).token { INTERPOLATED(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { Some(INTERPOLATED(token::$constructor(x))) => { return (Vec::new(), x) } _ => {} } } ) ) fn maybe_append(lhs: Vec<Attribute> , rhs: Option<Vec<Attribute> >) -> Vec<Attribute> { match rhs { None => lhs, Some(ref attrs) => lhs.append(attrs.as_slice()) } } struct ParsedItemsAndViewItems { attrs_remaining: Vec<Attribute>, view_items: Vec<ViewItem>, items: Vec<Gc<Item>>, foreign_items: Vec<Gc<ForeignItem>> } /* ident is handled by common.rs */ pub struct Parser<'a> { pub sess: &'a ParseSess, /// the current token: pub token: token::Token, /// the span of the current token: pub span: Span, /// the span of the prior token: pub last_span: Span, pub cfg: CrateConfig, /// the previous token or None (only stashed sometimes). pub last_token: Option<Box<token::Token>>, pub buffer: [TokenAndSpan, ..4], pub buffer_start: int, pub buffer_end: int, pub tokens_consumed: uint, pub restriction: restriction, pub quote_depth: uint, // not (yet) related to the quasiquoter pub reader: Box<Reader>, pub interner: Rc<token::IdentInterner>, /// The set of seen errors about obsolete syntax. Used to suppress /// extra detail when the same error is seen twice pub obsolete_set: HashSet<ObsoleteSyntax>, /// Used to determine the path to externally loaded source files pub mod_path_stack: Vec<InternedString>, /// Stack of spans of open delimiters. Used for error message. pub open_braces: Vec<Span>, /// Flag if this parser "owns" the directory that it is currently parsing /// in. This will affect how nested files are looked up. pub owns_directory: bool, /// Name of the root module this parser originated from. If `None`, then the /// name is not known. This does not change while the parser is descending /// into modules, and sub-parsers have new values for this name. pub root_module_name: Option<String>, } fn is_plain_ident_or_underscore(t: &token::Token) -> bool { is_plain_ident(t) || *t == token::UNDERSCORE } /// Get a token the parser cares about fn real_token(rdr: &mut Reader) -> TokenAndSpan { let mut t = rdr.next_token(); loop { match t.tok { token::WS | token::COMMENT | token::SHEBANG(_) => { t = rdr.next_token(); }, _ => break } } t } impl<'a> Parser<'a> { pub fn new(sess: &'a ParseSess, cfg: ast::CrateConfig, mut rdr: Box<Reader>) -> Parser<'a> { let tok0 = real_token(rdr); let span = tok0.sp; let placeholder = TokenAndSpan { tok: token::UNDERSCORE, sp: span, }; Parser { reader: rdr, interner: token::get_ident_interner(), sess: sess, cfg: cfg, token: tok0.tok, span: span, last_span: span, last_token: None, buffer: [ placeholder.clone(), placeholder.clone(), placeholder.clone(), placeholder.clone(), ], buffer_start: 0, buffer_end: 0, tokens_consumed: 0, restriction: UNRESTRICTED, quote_depth: 0, obsolete_set: HashSet::new(), mod_path_stack: Vec::new(), open_braces: Vec::new(), owns_directory: true, root_module_name: None, } } /// Convert a token to a string using self's reader pub fn token_to_string(token: &token::Token) -> String { token::to_string(token) } /// Convert the current token to a string using self's reader pub fn this_token_to_string(&mut self) -> String { Parser::token_to_string(&self.token) } pub fn unexpected_last(&mut self, t: &token::Token) -> ! { let token_str = Parser::token_to_string(t); let last_span = self.last_span; self.span_fatal(last_span, format!("unexpected token: `{}`", token_str).as_slice()); } pub fn unexpected(&mut self) -> ! { let this_token = self.this_token_to_string(); self.fatal(format!("unexpected token: `{}`", this_token).as_slice()); } /// Expect and consume the token t. Signal an error if /// the next token is not t. pub fn expect(&mut self, t: &token::Token) { if self.token == *t { self.bump(); } else { let token_str = Parser::token_to_string(t); let this_token_str = self.this_token_to_string(); self.fatal(format!("expected `{}` but found `{}`", token_str, this_token_str).as_slice()) } } /// Expect next token to be edible or inedible token. If edible, /// then consume it; if inedible, then return without consuming /// anything. Signal a fatal error if next token is unexpected. pub fn expect_one_of(&mut self, edible: &[token::Token], inedible: &[token::Token]) { fn tokens_to_string(tokens: &[token::Token]) -> String { let mut i = tokens.iter(); // This might be a sign we need a connect method on Iterator. let b = i.next() .map_or("".to_string(), |t| Parser::token_to_string(t)); i.fold(b, |b,a| { let mut b = b; b.push_str("`, `"); b.push_str(Parser::token_to_string(a).as_slice()); b }) } if edible.contains(&self.token) { self.bump(); } else if inedible.contains(&self.token) { // leave it in the input } else { let expected = edible.iter().map(|x| (*x).clone()).collect::<Vec<_>>().append(inedible); let expect = tokens_to_string(expected.as_slice()); let actual = self.this_token_to_string(); self.fatal( (if expected.len() != 1 { (format!("expected one of `{}` but found `{}`", expect, actual)) } else { (format!("expected `{}` but found `{}`", expect, actual)) }).as_slice() ) } } /// Check for erroneous `ident { }`; if matches, signal error and /// recover (without consuming any expected input token). Returns /// true if and only if input was consumed for recovery. pub fn check_for_erroneous_unit_struct_expecting(&mut self, expected: &[token::Token]) -> bool { if self.token == token::LBRACE && expected.iter().all(|t| *t != token::LBRACE) && self.look_ahead(1, |t| *t == token::RBRACE) { // matched; signal non-fatal error and recover. let span = self.span; self.span_err(span, "unit-like struct construction is written with no trailing `{ }`"); self.eat(&token::LBRACE); self.eat(&token::RBRACE); true } else { false } } /// Commit to parsing a complete expression `e` expected to be /// followed by some token from the set edible + inedible. Recover /// from anticipated input errors, discarding erroneous characters. pub fn commit_expr(&mut self, e: Gc<Expr>, edible: &[token::Token], inedible: &[token::Token]) { debug!("commit_expr {:?}", e); match e.node { ExprPath(..) => { // might be unit-struct construction; check for recoverableinput error. let expected = edible.iter().map(|x| (*x).clone()).collect::<Vec<_>>() .append(inedible); self.check_for_erroneous_unit_struct_expecting( expected.as_slice()); } _ => {} } self.expect_one_of(edible, inedible) } pub fn commit_expr_expecting(&mut self, e: Gc<Expr>, edible: token::Token) { self.commit_expr(e, &[edible], &[]) } /// Commit to parsing a complete statement `s`, which expects to be /// followed by some token from the set edible + inedible. Check /// for recoverable input errors, discarding erroneous characters. pub fn commit_stmt(&mut self, s: Gc<Stmt>, edible: &[token::Token], inedible: &[token::Token]) { debug!("commit_stmt {:?}", s); let _s = s; // unused, but future checks might want to inspect `s`. if self.last_token .as_ref() .map_or(false, |t| is_ident_or_path(&**t)) { let expected = edible.iter().map(|x| (*x).clone()).collect::<Vec<_>>() .append(inedible.as_slice()); self.check_for_erroneous_unit_struct_expecting( expected.as_slice()); } self.expect_one_of(edible, inedible) } pub fn commit_stmt_expecting(&mut self, s: Gc<Stmt>, edible: token::Token) { self.commit_stmt(s, &[edible], &[]) } pub fn parse_ident(&mut self) -> ast::Ident { self.check_strict_keywords(); self.check_reserved_keywords(); match self.token { token::IDENT(i, _) => { self.bump(); i } token::INTERPOLATED(token::NtIdent(..)) => { self.bug("ident interpolation not converted to real token"); } _ => { let token_str = self.this_token_to_string(); self.fatal((format!("expected ident, found `{}`", token_str)).as_slice()) } } } pub fn parse_path_list_item(&mut self) -> ast::PathListItem { let lo = self.span.lo; let node = if self.eat_keyword(keywords::Mod) { ast::PathListMod { id: ast::DUMMY_NODE_ID } } else { let ident = self.parse_ident(); ast::PathListIdent { name: ident, id: ast::DUMMY_NODE_ID } }; let hi = self.last_span.hi; spanned(lo, hi, node) } /// Consume token 'tok' if it exists. Returns true if the given /// token was present, false otherwise. pub fn eat(&mut self, tok: &token::Token) -> bool { let is_present = self.token == *tok; if is_present { self.bump() } is_present } pub fn is_keyword(&mut self, kw: keywords::Keyword) -> bool { token::is_keyword(kw, &self.token) } /// If the next token is the given keyword, eat it and return /// true. Otherwise, return false. pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> bool { match self.token { token::IDENT(sid, false) if kw.to_name() == sid.name => { self.bump(); true } _ => false } } /// If the given word is not a keyword, signal an error. /// If the next token is not the given word, signal an error. /// Otherwise, eat it. pub fn expect_keyword(&mut self, kw: keywords::Keyword) { if !self.eat_keyword(kw) { let id_interned_str = token::get_name(kw.to_name()); let token_str = self.this_token_to_string(); self.fatal(format!("expected `{}`, found `{}`", id_interned_str, token_str).as_slice()) } } /// Signal an error if the given string is a strict keyword pub fn check_strict_keywords(&mut self) { if token::is_strict_keyword(&self.token) { let token_str = self.this_token_to_string(); let span = self.span; self.span_err(span, format!("found `{}` in ident position", token_str).as_slice()); } } /// Signal an error if the current token is a reserved keyword pub fn check_reserved_keywords(&mut self) { if token::is_reserved_keyword(&self.token) { let token_str = self.this_token_to_string(); self.fatal(format!("`{}` is a reserved keyword", token_str).as_slice()) } } /// Expect and consume an `&`. If `&&` is seen, replace it with a single /// `&` and continue. If an `&` is not seen, signal an error. fn expect_and(&mut self) { match self.token { token::BINOP(token::AND) => self.bump(), token::ANDAND => { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::BINOP(token::AND), lo, span.hi) } _ => { let token_str = self.this_token_to_string(); let found_token = Parser::token_to_string(&token::BINOP(token::AND)); self.fatal(format!("expected `{}`, found `{}`", found_token, token_str).as_slice()) } } } /// Expect and consume a `|`. If `||` is seen, replace it with a single /// `|` and continue. If a `|` is not seen, signal an error. fn expect_or(&mut self) { match self.token { token::BINOP(token::OR) => self.bump(), token::OROR => { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::BINOP(token::OR), lo, span.hi) } _ => { let found_token = self.this_token_to_string(); let token_str = Parser::token_to_string(&token::BINOP(token::OR)); self.fatal(format!("expected `{}`, found `{}`", token_str, found_token).as_slice()) } } } /// Attempt to consume a `<`. If `<<` is seen, replace it with a single /// `<` and continue. If a `<` is not seen, return false. /// /// This is meant to be used when parsing generics on a path to get the /// starting token. The `force` parameter is used to forcefully break up a /// `<<` token. If `force` is false, then `<<` is only broken when a lifetime /// shows up next. For example, consider the expression: /// /// foo as bar << test /// /// The parser needs to know if `bar <<` is the start of a generic path or if /// it's a left-shift token. If `test` were a lifetime, then it's impossible /// for the token to be a left-shift, but if it's not a lifetime, then it's /// considered a left-shift. /// /// The reason for this is that the only current ambiguity with `<<` is when /// parsing closure types: /// /// foo::<<'a> ||>(); /// impl Foo<<'a> ||>() { ... } fn eat_lt(&mut self, force: bool) -> bool { match self.token { token::LT => { self.bump(); true } token::BINOP(token::SHL) => { let next_lifetime = self.look_ahead(1, |t| match *t { token::LIFETIME(..) => true, _ => false, }); if force || next_lifetime { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::LT, lo, span.hi); true } else { false } } _ => false, } } fn expect_lt(&mut self) { if !self.eat_lt(true) { let found_token = self.this_token_to_string(); let token_str = Parser::token_to_string(&token::LT); self.fatal(format!("expected `{}`, found `{}`", token_str, found_token).as_slice()) } } /// Parse a sequence bracketed by `|` and `|`, stopping before the `|`. fn parse_seq_to_before_or<T>( &mut self, sep: &token::Token, f: |&mut Parser| -> T) -> Vec<T> { let mut first = true; let mut vector = Vec::new(); while self.token != token::BINOP(token::OR) && self.token != token::OROR { if first { first = false } else { self.expect(sep) } vector.push(f(self)) } vector } /// Expect and consume a GT. if a >> is seen, replace it /// with a single > and continue. If a GT is not seen, /// signal an error. pub fn expect_gt(&mut self) { match self.token { token::GT => self.bump(), token::BINOP(token::SHR) => { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::GT, lo, span.hi) } token::BINOPEQ(token::SHR) => { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::GE, lo, span.hi) } token::GE => { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::EQ, lo, span.hi) } _ => { let gt_str = Parser::token_to_string(&token::GT); let this_token_str = self.this_token_to_string(); self.fatal(format!("expected `{}`, found `{}`", gt_str, this_token_str).as_slice()) } } } /// Parse a sequence bracketed by '<' and '>', stopping /// before the '>'. pub fn parse_seq_to_before_gt<T>( &mut self, sep: Option<token::Token>, f: |&mut Parser| -> T) -> OwnedSlice<T> { let mut first = true; let mut v = Vec::new(); while self.token != token::GT && self.token != token::BINOP(token::SHR) && self.token != token::GE && self.token != token::BINOPEQ(token::SHR) { match sep { Some(ref t) => { if first { first = false; } else { self.expect(t); } } _ => () } v.push(f(self)); } return OwnedSlice::from_vec(v); } pub fn parse_seq_to_gt<T>( &mut self, sep: Option<token::Token>, f: |&mut Parser| -> T) -> OwnedSlice<T> { let v = self.parse_seq_to_before_gt(sep, f); self.expect_gt(); return v; } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_end<T>( &mut self, ket: &token::Token, sep: SeqSep, f: |&mut Parser| -> T) -> Vec<T> { let val = self.parse_seq_to_before_end(ket, sep, f); self.bump(); val } /// Parse a sequence, not including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_before_end<T>( &mut self, ket: &token::Token, sep: SeqSep, f: |&mut Parser| -> T) -> Vec<T> { let mut first: bool = true; let mut v = vec!(); while self.token != *ket { match sep.sep { Some(ref t) => { if first { first = false; } else { self.expect(t); } } _ => () } if sep.trailing_sep_allowed && self.token == *ket { break; } v.push(f(self)); } return v; } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_unspanned_seq<T>( &mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: |&mut Parser| -> T) -> Vec<T> { self.expect(bra); let result = self.parse_seq_to_before_end(ket, sep, f); self.bump(); result } /// Parse a sequence parameter of enum variant. For consistency purposes, /// these should not be empty. pub fn parse_enum_variant_seq<T>( &mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: |&mut Parser| -> T) -> Vec<T> { let result = self.parse_unspanned_seq(bra, ket, sep, f); if result.is_empty() { let last_span = self.last_span; self.span_err(last_span, "nullary enum variants are written with no trailing `( )`"); } result } // NB: Do not use this function unless you actually plan to place the // spanned list in the AST. pub fn parse_seq<T>( &mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: |&mut Parser| -> T) -> Spanned<Vec<T> > { let lo = self.span.lo; self.expect(bra); let result = self.parse_seq_to_before_end(ket, sep, f); let hi = self.span.hi; self.bump(); spanned(lo, hi, result) } /// Advance the parser by one token pub fn bump(&mut self) { self.last_span = self.span; // Stash token for error recovery (sometimes; clone is not necessarily cheap). self.last_token = if is_ident_or_path(&self.token) { Some(box self.token.clone()) } else { None }; let next = if self.buffer_start == self.buffer_end { real_token(self.reader) } else { // Avoid token copies with `replace`. let buffer_start = self.buffer_start as uint; let next_index = (buffer_start + 1) & 3 as uint; self.buffer_start = next_index as int; let placeholder = TokenAndSpan { tok: token::UNDERSCORE, sp: self.span, }; replace(&mut self.buffer[buffer_start], placeholder) }; self.span = next.sp; self.token = next.tok; self.tokens_consumed += 1u; } /// Advance the parser by one token and return the bumped token. pub fn bump_and_get(&mut self) -> token::Token { let old_token = replace(&mut self.token, token::UNDERSCORE); self.bump(); old_token } /// EFFECT: replace the current token and span with the given one pub fn replace_token(&mut self, next: token::Token, lo: BytePos, hi: BytePos) { self.last_span = mk_sp(self.span.lo, lo); self.token = next; self.span = mk_sp(lo, hi); } pub fn buffer_length(&mut self) -> int { if self.buffer_start <= self.buffer_end { return self.buffer_end - self.buffer_start; } return (4 - self.buffer_start) + self.buffer_end; } pub fn look_ahead<R>(&mut self, distance: uint, f: |&token::Token| -> R) -> R { let dist = distance as int; while self.buffer_length() < dist { self.buffer[self.buffer_end as uint] = real_token(self.reader); self.buffer_end = (self.buffer_end + 1) & 3; } f(&self.buffer[((self.buffer_start + dist - 1) & 3) as uint].tok) } pub fn fatal(&mut self, m: &str) -> ! { self.sess.span_diagnostic.span_fatal(self.span, m) } pub fn span_fatal(&mut self, sp: Span, m: &str) -> ! { self.sess.span_diagnostic.span_fatal(sp, m) } pub fn span_note(&mut self, sp: Span, m: &str) { self.sess.span_diagnostic.span_note(sp, m) } pub fn bug(&mut self, m: &str) -> ! { self.sess.span_diagnostic.span_bug(self.span, m) } pub fn warn(&mut self, m: &str) { self.sess.span_diagnostic.span_warn(self.span, m) } pub fn span_warn(&mut self, sp: Span, m: &str) { self.sess.span_diagnostic.span_warn(sp, m) } pub fn span_err(&mut self, sp: Span, m: &str) { self.sess.span_diagnostic.span_err(sp, m) } pub fn abort_if_errors(&mut self) { self.sess.span_diagnostic.handler().abort_if_errors(); } pub fn id_to_interned_str(&mut self, id: Ident) -> InternedString { token::get_ident(id) } /// Is the current token one of the keywords that signals a bare function /// type? pub fn token_is_bare_fn_keyword(&mut self) -> bool { if token::is_keyword(keywords::Fn, &self.token) { return true } if token::is_keyword(keywords::Unsafe, &self.token) || token::is_keyword(keywords::Once, &self.token) { return self.look_ahead(1, |t| token::is_keyword(keywords::Fn, t)) } false } /// Is the current token one of the keywords that signals a closure type? pub fn token_is_closure_keyword(&mut self) -> bool { token::is_keyword(keywords::Unsafe, &self.token) || token::is_keyword(keywords::Once, &self.token) } /// Is the current token one of the keywords that signals an old-style /// closure type (with explicit sigil)? pub fn token_is_old_style_closure_keyword(&mut self) -> bool { token::is_keyword(keywords::Unsafe, &self.token) || token::is_keyword(keywords::Once, &self.token) || token::is_keyword(keywords::Fn, &self.token) } pub fn token_is_lifetime(tok: &token::Token) -> bool { match *tok { token::LIFETIME(..) => true, _ => false, } } pub fn get_lifetime(&mut self) -> ast::Ident { match self.token { token::LIFETIME(ref ident) => *ident, _ => self.bug("not a lifetime"), } } /// parse a TyBareFn type: pub fn parse_ty_bare_fn(&mut self) -> Ty_ { /* [unsafe] [extern "ABI"] fn <'lt> (S) -> T ^~~~^ ^~~~^ ^~~~^ ^~^ ^ | | | | | | | | | Return type | | | Argument types | | Lifetimes | ABI Function Style */ let fn_style = self.parse_unsafety(); let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi().unwrap_or(abi::C) } else { abi::Rust }; self.expect_keyword(keywords::Fn); let (decl, lifetimes) = self.parse_ty_fn_decl(true); return TyBareFn(box(GC) BareFnTy { abi: abi, fn_style: fn_style, lifetimes: lifetimes, decl: decl }); } /// Parses a procedure type (`proc`). The initial `proc` keyword must /// already have been parsed. pub fn parse_proc_type(&mut self) -> Ty_ { /* proc <'lt> (S) [:Bounds] -> T ^~~^ ^~~~^ ^ ^~~~~~~~^ ^ | | | | | | | | | Return type | | | Bounds | | Argument types | Lifetimes the `proc` keyword */ let lifetimes = if self.eat(&token::LT) { let lifetimes = self.parse_lifetimes(); self.expect_gt(); lifetimes } else { Vec::new() }; let (inputs, variadic) = self.parse_fn_args(false, false); let bounds = { if self.eat(&token::COLON) { let (_, bounds) = self.parse_ty_param_bounds(false); Some(bounds) } else { None } }; let (ret_style, ret_ty) = self.parse_ret_ty(); let decl = P(FnDecl { inputs: inputs, output: ret_ty, cf: ret_style, variadic: variadic }); TyProc(box(GC) ClosureTy { fn_style: NormalFn, onceness: Once, bounds: bounds, decl: decl, lifetimes: lifetimes, }) } /// Parse a TyClosure type pub fn parse_ty_closure(&mut self) -> Ty_ { /* [unsafe] [once] <'lt> |S| [:Bounds] -> T ^~~~~~~^ ^~~~~^ ^~~~^ ^ ^~~~~~~~^ ^ | | | | | | | | | | | Return type | | | | Closure bounds | | | Argument types | | Lifetimes | Once-ness (a.k.a., affine) Function Style */ let fn_style = self.parse_unsafety(); let onceness = if self.eat_keyword(keywords::Once) {Once} else {Many}; let lifetimes = if self.eat(&token::LT) { let lifetimes = self.parse_lifetimes(); self.expect_gt(); lifetimes } else { Vec::new() }; let (is_unboxed, inputs) = if self.eat(&token::OROR) { (false, Vec::new()) } else { self.expect_or(); let is_unboxed = self.token == token::BINOP(token::AND) && self.look_ahead(1, |t| { token::is_keyword(keywords::Mut, t) }) && self.look_ahead(2, |t| *t == token::COLON); if is_unboxed { self.bump(); self.bump(); self.bump(); } let inputs = self.parse_seq_to_before_or( &token::COMMA, |p| p.parse_arg_general(false)); self.expect_or(); (is_unboxed, inputs) }; let (region, bounds) = { if self.eat(&token::COLON) { let (region, bounds) = self.parse_ty_param_bounds(true); (region, Some(bounds)) } else { (None, None) } }; let (return_style, output) = self.parse_ret_ty(); let decl = P(FnDecl { inputs: inputs, output: output, cf: return_style, variadic: false }); if is_unboxed { TyUnboxedFn(box(GC) UnboxedFnTy { decl: decl, }) } else { TyClosure(box(GC) ClosureTy { fn_style: fn_style, onceness: onceness, bounds: bounds, decl: decl, lifetimes: lifetimes, }, region) } } pub fn parse_unsafety(&mut self) -> FnStyle { if self.eat_keyword(keywords::Unsafe) { return UnsafeFn; } else { return NormalFn; } } /// Parse a function type (following the 'fn') pub fn parse_ty_fn_decl(&mut self, allow_variadic: bool) -> (P<FnDecl>, Vec<ast::Lifetime>) { /* (fn) <'lt> (S) -> T ^~~~^ ^~^ ^ | | | | | Return type | Argument types Lifetimes */ let lifetimes = if self.eat(&token::LT) { let lifetimes = self.parse_lifetimes(); self.expect_gt(); lifetimes } else { Vec::new() }; let (inputs, variadic) = self.parse_fn_args(false, allow_variadic); let (ret_style, ret_ty) = self.parse_ret_ty(); let decl = P(FnDecl { inputs: inputs, output: ret_ty, cf: ret_style, variadic: variadic }); (decl, lifetimes) } /// Parse the methods in a trait declaration pub fn parse_trait_methods(&mut self) -> Vec<TraitMethod> { self.parse_unspanned_seq( &token::LBRACE, &token::RBRACE, seq_sep_none(), |p| { let attrs = p.parse_outer_attributes(); let lo = p.span.lo; // NB: at the moment, trait methods are public by default; this // could change. let vis = p.parse_visibility(); let abi = if p.eat_keyword(keywords::Extern) { p.parse_opt_abi().unwrap_or(abi::C) } else if attr::contains_name(attrs.as_slice(), "rust_call_abi_hack") { // FIXME(stage0, pcwalton): Remove this awful hack after a // snapshot, and change to `extern "rust-call" fn`. abi::RustCall } else { abi::Rust }; let style = p.parse_fn_style(); let ident = p.parse_ident(); let generics = p.parse_generics(); let (explicit_self, d) = p.parse_fn_decl_with_self(|p| { // This is somewhat dubious; We don't want to allow argument // names to be left off if there is a definition... p.parse_arg_general(false) }); let hi = p.last_span.hi; match p.token { token::SEMI => { p.bump(); debug!("parse_trait_methods(): parsing required method"); Required(TypeMethod { ident: ident, attrs: attrs, fn_style: style, decl: d, generics: generics, abi: abi, explicit_self: explicit_self, id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), vis: vis, }) } token::LBRACE => { debug!("parse_trait_methods(): parsing provided method"); let (inner_attrs, body) = p.parse_inner_attrs_and_block(); let attrs = attrs.append(inner_attrs.as_slice()); Provided(box(GC) ast::Method { attrs: attrs, id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), node: ast::MethDecl(ident, generics, abi, explicit_self, style, d, body, vis) }) } _ => { let token_str = p.this_token_to_string(); p.fatal((format!("expected `;` or `{{` but found `{}`", token_str)).as_slice()) } } }) } /// Parse a possibly mutable type pub fn parse_mt(&mut self) -> MutTy { let mutbl = self.parse_mutability(); let t = self.parse_ty(true); MutTy { ty: t, mutbl: mutbl } } /// Parse [mut/const/imm] ID : TY /// now used only by obsolete record syntax parser... pub fn parse_ty_field(&mut self) -> TypeField { let lo = self.span.lo; let mutbl = self.parse_mutability(); let id = self.parse_ident(); self.expect(&token::COLON); let ty = self.parse_ty(true); let hi = ty.span.hi; ast::TypeField { ident: id, mt: MutTy { ty: ty, mutbl: mutbl }, span: mk_sp(lo, hi), } } /// Parse optional return type [ -> TY ] in function decl pub fn parse_ret_ty(&mut self) -> (RetStyle, P<Ty>) { return if self.eat(&token::RARROW) { let lo = self.span.lo; if self.eat(&token::NOT) { ( NoReturn, P(Ty { id: ast::DUMMY_NODE_ID, node: TyBot, span: mk_sp(lo, self.last_span.hi) }) ) } else { (Return, self.parse_ty(true)) } } else { let pos = self.span.lo; ( Return, P(Ty { id: ast::DUMMY_NODE_ID, node: TyNil, span: mk_sp(pos, pos), }) ) } } /// Parse a type. /// /// The second parameter specifies whether the `+` binary operator is /// allowed in the type grammar. pub fn parse_ty(&mut self, plus_allowed: bool) -> P<Ty> { maybe_whole!(no_clone self, NtTy); let lo = self.span.lo; let t = if self.token == token::LPAREN { self.bump(); if self.token == token::RPAREN { self.bump(); TyNil } else { // (t) is a parenthesized ty // (t,) is the type of a tuple with only one field, // of type t let mut ts = vec!(self.parse_ty(true)); let mut one_tuple = false; while self.token == token::COMMA { self.bump(); if self.token != token::RPAREN { ts.push(self.parse_ty(true)); } else { one_tuple = true; } } if ts.len() == 1 && !one_tuple { self.expect(&token::RPAREN); TyParen(*ts.get(0)) } else { let t = TyTup(ts); self.expect(&token::RPAREN); t } } } else if self.token == token::AT { // MANAGED POINTER self.bump(); let span = self.last_span; self.obsolete(span, ObsoleteManagedType); TyBox(self.parse_ty(plus_allowed)) } else if self.token == token::TILDE { // OWNED POINTER self.bump(); let last_span = self.last_span; match self.token { token::LBRACKET => self.obsolete(last_span, ObsoleteOwnedVector), _ => self.obsolete(last_span, ObsoleteOwnedType), }; TyUniq(self.parse_ty(true)) } else if self.token == token::BINOP(token::STAR) { // STAR POINTER (bare pointer?) self.bump(); TyPtr(self.parse_ptr()) } else if self.token == token::LBRACKET { // VECTOR self.expect(&token::LBRACKET); let t = self.parse_ty(true); // Parse the `, ..e` in `[ int, ..e ]` // where `e` is a const expression let t = match self.maybe_parse_fixed_vstore() { None => TyVec(t), Some(suffix) => TyFixedLengthVec(t, suffix) }; self.expect(&token::RBRACKET); t } else if self.token == token::BINOP(token::AND) || self.token == token::ANDAND { // BORROWED POINTER self.expect_and(); self.parse_borrowed_pointee() } else if self.is_keyword(keywords::Extern) || self.is_keyword(keywords::Unsafe) || self.token_is_bare_fn_keyword() { // BARE FUNCTION self.parse_ty_bare_fn() } else if self.token_is_closure_keyword() || self.token == token::BINOP(token::OR) || self.token == token::OROR || self.token == token::LT { // CLOSURE // // FIXME(pcwalton): Eventually `token::LT` will not unambiguously // introduce a closure, once procs can have lifetime bounds. We // will need to refactor the grammar a little bit at that point. self.parse_ty_closure() } else if self.eat_keyword(keywords::Typeof) { // TYPEOF // In order to not be ambiguous, the type must be surrounded by parens. self.expect(&token::LPAREN); let e = self.parse_expr(); self.expect(&token::RPAREN); TyTypeof(e) } else if self.eat_keyword(keywords::Proc) { self.parse_proc_type() } else if self.token == token::MOD_SEP || is_ident_or_path(&self.token) { // NAMED TYPE let mode = if plus_allowed { LifetimeAndTypesAndBounds } else { LifetimeAndTypesWithoutColons }; let PathAndBounds { path, bounds } = self.parse_path(mode); TyPath(path, bounds, ast::DUMMY_NODE_ID) } else if self.eat(&token::UNDERSCORE) { // TYPE TO BE INFERRED TyInfer } else { let msg = format!("expected type, found token {:?}", self.token); self.fatal(msg.as_slice()); }; let sp = mk_sp(lo, self.last_span.hi); P(Ty {id: ast::DUMMY_NODE_ID, node: t, span: sp}) } pub fn parse_borrowed_pointee(&mut self) -> Ty_ { // look for `&'lt` or `&'foo ` and interpret `foo` as the region name: let opt_lifetime = self.parse_opt_lifetime(); let mt = self.parse_mt(); return TyRptr(opt_lifetime, mt); } pub fn parse_ptr(&mut self) -> MutTy { let mutbl = if self.eat_keyword(keywords::Mut) { MutMutable } else if self.eat_keyword(keywords::Const) { MutImmutable } else { let span = self.last_span; self.span_err(span, "bare raw pointers are no longer allowed, you should \ likely use `*mut T`, but otherwise `*T` is now \ known as `*const T`"); MutImmutable }; let t = self.parse_ty(true); MutTy { ty: t, mutbl: mutbl } } pub fn is_named_argument(&mut self) -> bool { let offset = match self.token { token::BINOP(token::AND) => 1, token::ANDAND => 1, _ if token::is_keyword(keywords::Mut, &self.token) => 1, _ => 0 }; debug!("parser is_named_argument offset:{}", offset); if offset == 0 { is_plain_ident_or_underscore(&self.token) && self.look_ahead(1, |t| *t == token::COLON) } else { self.look_ahead(offset, |t| is_plain_ident_or_underscore(t)) && self.look_ahead(offset + 1, |t| *t == token::COLON) } } /// This version of parse arg doesn't necessarily require /// identifier names. pub fn parse_arg_general(&mut self, require_name: bool) -> Arg { let pat = if require_name || self.is_named_argument() { debug!("parse_arg_general parse_pat (require_name:{:?})", require_name); let pat = self.parse_pat(); self.expect(&token::COLON); pat } else { debug!("parse_arg_general ident_to_pat"); ast_util::ident_to_pat(ast::DUMMY_NODE_ID, self.last_span, special_idents::invalid) }; let t = self.parse_ty(true); Arg { ty: t, pat: pat, id: ast::DUMMY_NODE_ID, } } /// Parse a single function argument pub fn parse_arg(&mut self) -> Arg { self.parse_arg_general(true) } /// Parse an argument in a lambda header e.g. |arg, arg| pub fn parse_fn_block_arg(&mut self) -> Arg { let pat = self.parse_pat(); let t = if self.eat(&token::COLON) { self.parse_ty(true) } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyInfer, span: mk_sp(self.span.lo, self.span.hi), }) }; Arg { ty: t, pat: pat, id: ast::DUMMY_NODE_ID } } pub fn maybe_parse_fixed_vstore(&mut self) -> Option<Gc<ast::Expr>> { if self.token == token::COMMA && self.look_ahead(1, |t| *t == token::DOTDOT) { self.bump(); self.bump(); Some(self.parse_expr()) } else { None } } /// Matches token_lit = LIT_INTEGER | ... pub fn lit_from_token(&mut self, tok: &token::Token) -> Lit_ { match *tok { token::LIT_BYTE(i) => LitByte(parse::byte_lit(i.as_str()).val0()), token::LIT_CHAR(i) => LitChar(parse::char_lit(i.as_str()).val0()), token::LIT_INTEGER(s) => parse::integer_lit(s.as_str(), &self.sess.span_diagnostic, self.span), token::LIT_FLOAT(s) => parse::float_lit(s.as_str()), token::LIT_STR(s) => { LitStr(token::intern_and_get_ident(parse::str_lit(s.as_str()).as_slice()), ast::CookedStr) } token::LIT_STR_RAW(s, n) => { LitStr(token::intern_and_get_ident(parse::raw_str_lit(s.as_str()).as_slice()), ast::RawStr(n)) } token::LIT_BINARY(i) => LitBinary(parse::binary_lit(i.as_str())), token::LIT_BINARY_RAW(i, _) => LitBinary(Rc::new(i.as_str().as_bytes().iter().map(|&x| x).collect())), token::LPAREN => { self.expect(&token::RPAREN); LitNil }, _ => { self.unexpected_last(tok); } } } /// Matches lit = true | false | token_lit pub fn parse_lit(&mut self) -> Lit { let lo = self.span.lo; let lit = if self.eat_keyword(keywords::True) { LitBool(true) } else if self.eat_keyword(keywords::False) { LitBool(false) } else { let token = self.bump_and_get(); let lit = self.lit_from_token(&token); lit }; codemap::Spanned { node: lit, span: mk_sp(lo, self.last_span.hi) } } /// matches '-' lit | lit pub fn parse_literal_maybe_minus(&mut self) -> Gc<Expr> { let minus_lo = self.span.lo; let minus_present = self.eat(&token::BINOP(token::MINUS)); let lo = self.span.lo; let literal = box(GC) self.parse_lit(); let hi = self.span.hi; let expr = self.mk_expr(lo, hi, ExprLit(literal)); if minus_present { let minus_hi = self.span.hi; let unary = self.mk_unary(UnNeg, expr); self.mk_expr(minus_lo, minus_hi, unary) } else { expr } } /// Parses a path and optional type parameter bounds, depending on the /// mode. The `mode` parameter determines whether lifetimes, types, and/or /// bounds are permitted and whether `::` must precede type parameter /// groups. pub fn parse_path(&mut self, mode: PathParsingMode) -> PathAndBounds { // Check for a whole path... let found = match self.token { INTERPOLATED(token::NtPath(_)) => Some(self.bump_and_get()), _ => None, }; match found { Some(INTERPOLATED(token::NtPath(box path))) => { return PathAndBounds { path: path, bounds: None, } } _ => {} } let lo = self.span.lo; let is_global = self.eat(&token::MOD_SEP); // Parse any number of segments and bound sets. A segment is an // identifier followed by an optional lifetime and a set of types. // A bound set is a set of type parameter bounds. let mut segments = Vec::new(); loop { // First, parse an identifier. let identifier = self.parse_ident(); // Parse the '::' before type parameters if it's required. If // it is required and wasn't present, then we're done. if mode == LifetimeAndTypesWithColons && !self.eat(&token::MOD_SEP) { segments.push(ast::PathSegment { identifier: identifier, lifetimes: Vec::new(), types: OwnedSlice::empty(), }); break } // Parse the `<` before the lifetime and types, if applicable. let (any_lifetime_or_types, lifetimes, types) = { if mode != NoTypesAllowed && self.eat_lt(false) { let (lifetimes, types) = self.parse_generic_values_after_lt(); (true, lifetimes, OwnedSlice::from_vec(types)) } else { (false, Vec::new(), OwnedSlice::empty()) } }; // Assemble and push the result. segments.push(ast::PathSegment { identifier: identifier, lifetimes: lifetimes, types: types, }); // We're done if we don't see a '::', unless the mode required // a double colon to get here in the first place. if !(mode == LifetimeAndTypesWithColons && !any_lifetime_or_types) { if !self.eat(&token::MOD_SEP) { break } } } // Next, parse a plus and bounded type parameters, if applicable. let bounds = if mode == LifetimeAndTypesAndBounds { let bounds = { if self.eat(&token::BINOP(token::PLUS)) { let (_, bounds) = self.parse_ty_param_bounds(false); if bounds.len() == 0 { let last_span = self.last_span; self.span_err(last_span, "at least one type parameter bound \ must be specified after the `+`"); } Some(bounds) } else { None } }; bounds } else { None }; // Assemble the span. let span = mk_sp(lo, self.last_span.hi); // Assemble the result. PathAndBounds { path: ast::Path { span: span, global: is_global, segments: segments, }, bounds: bounds, } } /// parses 0 or 1 lifetime pub fn parse_opt_lifetime(&mut self) -> Option<ast::Lifetime> { match self.token { token::LIFETIME(..) => { Some(self.parse_lifetime()) } _ => { None } } } /// Parses a single lifetime /// Matches lifetime = LIFETIME pub fn parse_lifetime(&mut self) -> ast::Lifetime { match self.token { token::LIFETIME(i) => { let span = self.span; self.bump(); return ast::Lifetime { id: ast::DUMMY_NODE_ID, span: span, name: i.name }; } _ => { self.fatal(format!("expected a lifetime name").as_slice()); } } } // matches lifetimes = ( lifetime ) | ( lifetime , lifetimes ) // actually, it matches the empty one too, but putting that in there // messes up the grammar.... pub fn parse_lifetimes(&mut self) -> Vec<ast::Lifetime> { /*! * * Parses zero or more comma separated lifetimes. * Expects each lifetime to be followed by either * a comma or `>`. Used when parsing type parameter * lists, where we expect something like `<'a, 'b, T>`. */ let mut res = Vec::new(); loop { match self.token { token::LIFETIME(_) => { res.push(self.parse_lifetime()); } _ => { return res; } } match self.token { token::COMMA => { self.bump();} token::GT => { return res; } token::BINOP(token::SHR) => { return res; } _ => { let msg = format!("expected `,` or `>` after lifetime \ name, got: {:?}", self.token); self.fatal(msg.as_slice()); } } } } pub fn token_is_mutability(tok: &token::Token) -> bool { token::is_keyword(keywords::Mut, tok) || token::is_keyword(keywords::Const, tok) } /// Parse mutability declaration (mut/const/imm) pub fn parse_mutability(&mut self) -> Mutability { if self.eat_keyword(keywords::Mut) { MutMutable } else { MutImmutable } } /// Parse ident COLON expr pub fn parse_field(&mut self) -> Field { let lo = self.span.lo; let i = self.parse_ident(); let hi = self.last_span.hi; self.expect(&token::COLON); let e = self.parse_expr(); ast::Field { ident: spanned(lo, hi, i), expr: e, span: mk_sp(lo, e.span.hi), } } pub fn mk_expr(&mut self, lo: BytePos, hi: BytePos, node: Expr_) -> Gc<Expr> { box(GC) Expr { id: ast::DUMMY_NODE_ID, node: node, span: mk_sp(lo, hi), } } pub fn mk_unary(&mut self, unop: ast::UnOp, expr: Gc<Expr>) -> ast::Expr_ { ExprUnary(unop, expr) } pub fn mk_binary(&mut self, binop: ast::BinOp, lhs: Gc<Expr>, rhs: Gc<Expr>) -> ast::Expr_ { ExprBinary(binop, lhs, rhs) } pub fn mk_call(&mut self, f: Gc<Expr>, args: Vec<Gc<Expr>>) -> ast::Expr_ { ExprCall(f, args) } fn mk_method_call(&mut self, ident: ast::SpannedIdent, tps: Vec<P<Ty>>, args: Vec<Gc<Expr>>) -> ast::Expr_ { ExprMethodCall(ident, tps, args) } pub fn mk_index(&mut self, expr: Gc<Expr>, idx: Gc<Expr>) -> ast::Expr_ { ExprIndex(expr, idx) } pub fn mk_field(&mut self, expr: Gc<Expr>, ident: ast::SpannedIdent, tys: Vec<P<Ty>>) -> ast::Expr_ { ExprField(expr, ident, tys) } pub fn mk_assign_op(&mut self, binop: ast::BinOp, lhs: Gc<Expr>, rhs: Gc<Expr>) -> ast::Expr_ { ExprAssignOp(binop, lhs, rhs) } pub fn mk_mac_expr(&mut self, lo: BytePos, hi: BytePos, m: Mac_) -> Gc<Expr> { box(GC) Expr { id: ast::DUMMY_NODE_ID, node: ExprMac(codemap::Spanned {node: m, span: mk_sp(lo, hi)}), span: mk_sp(lo, hi), } } pub fn mk_lit_u32(&mut self, i: u32) -> Gc<Expr> { let span = &self.span; let lv_lit = box(GC) codemap::Spanned { node: LitUint(i as u64, TyU32), span: *span }; box(GC) Expr { id: ast::DUMMY_NODE_ID, node: ExprLit(lv_lit), span: *span, } } /// At the bottom (top?) of the precedence hierarchy, /// parse things like parenthesized exprs, /// macros, return, etc. pub fn parse_bottom_expr(&mut self) -> Gc<Expr> { maybe_whole_expr!(self); let lo = self.span.lo; let mut hi = self.span.hi; let ex: Expr_; match self.token { token::LPAREN => { self.bump(); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut trailing_comma = false; if self.token == token::RPAREN { hi = self.span.hi; self.bump(); let lit = box(GC) spanned(lo, hi, LitNil); return self.mk_expr(lo, hi, ExprLit(lit)); } let mut es = vec!(self.parse_expr()); self.commit_expr(*es.last().unwrap(), &[], &[token::COMMA, token::RPAREN]); while self.token == token::COMMA { self.bump(); if self.token != token::RPAREN { es.push(self.parse_expr()); self.commit_expr(*es.last().unwrap(), &[], &[token::COMMA, token::RPAREN]); } else { trailing_comma = true; } } hi = self.span.hi; self.commit_expr_expecting(*es.last().unwrap(), token::RPAREN); return if es.len() == 1 && !trailing_comma { self.mk_expr(lo, hi, ExprParen(*es.get(0))) } else { self.mk_expr(lo, hi, ExprTup(es)) } }, token::LBRACE => { self.bump(); let blk = self.parse_block_tail(lo, DefaultBlock); return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); }, token::BINOP(token::OR) | token::OROR => { return self.parse_lambda_expr(); }, _ if self.eat_keyword(keywords::Proc) => { let decl = self.parse_proc_decl(); let body = self.parse_expr(); let fakeblock = P(ast::Block { view_items: Vec::new(), stmts: Vec::new(), expr: Some(body), id: ast::DUMMY_NODE_ID, rules: DefaultBlock, span: body.span, }); return self.mk_expr(lo, body.span.hi, ExprProc(decl, fakeblock)); }, // FIXME #13626: Should be able to stick in // token::SELF_KEYWORD_NAME token::IDENT(id @ ast::Ident{ name: ast::Name(token::SELF_KEYWORD_NAME_NUM), ctxt: _ } ,false) => { self.bump(); let path = ast_util::ident_to_path(mk_sp(lo, hi), id); ex = ExprPath(path); hi = self.last_span.hi; } _ if self.eat_keyword(keywords::If) => { return self.parse_if_expr(); }, _ if self.eat_keyword(keywords::For) => { return self.parse_for_expr(None); }, _ if self.eat_keyword(keywords::While) => { return self.parse_while_expr(); }, _ if Parser::token_is_lifetime(&self.token) => { let lifetime = self.get_lifetime(); self.bump(); self.expect(&token::COLON); if self.eat_keyword(keywords::For) { return self.parse_for_expr(Some(lifetime)) } else if self.eat_keyword(keywords::Loop) { return self.parse_loop_expr(Some(lifetime)) } else { self.fatal("expected `for` or `loop` after a label") } }, _ if self.eat_keyword(keywords::Loop) => { return self.parse_loop_expr(None); }, _ if self.eat_keyword(keywords::Continue) => { let lo = self.span.lo; let ex = if Parser::token_is_lifetime(&self.token) { let lifetime = self.get_lifetime(); self.bump(); ExprAgain(Some(lifetime)) } else { ExprAgain(None) }; let hi = self.span.hi; return self.mk_expr(lo, hi, ex); }, _ if self.eat_keyword(keywords::Match) => { return self.parse_match_expr(); }, _ if self.eat_keyword(keywords::Unsafe) => { return self.parse_block_expr(lo, UnsafeBlock(ast::UserProvided)); }, token::LBRACKET => { self.bump(); if self.token == token::RBRACKET { // Empty vector. self.bump(); ex = ExprVec(Vec::new()); } else { // Nonempty vector. let first_expr = self.parse_expr(); if self.token == token::COMMA && self.look_ahead(1, |t| *t == token::DOTDOT) { // Repeating vector syntax: [ 0, ..512 ] self.bump(); self.bump(); let count = self.parse_expr(); self.expect(&token::RBRACKET); ex = ExprRepeat(first_expr, count); } else if self.token == token::COMMA { // Vector with two or more elements. self.bump(); let remaining_exprs = self.parse_seq_to_end( &token::RBRACKET, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_expr() ); let mut exprs = vec!(first_expr); exprs.push_all_move(remaining_exprs); ex = ExprVec(exprs); } else { // Vector with one element. self.expect(&token::RBRACKET); ex = ExprVec(vec!(first_expr)); } } hi = self.last_span.hi; }, _ if self.eat_keyword(keywords::Return) => { // RETURN expression if can_begin_expr(&self.token) { let e = self.parse_expr(); hi = e.span.hi; ex = ExprRet(Some(e)); } else { ex = ExprRet(None); } }, _ if self.eat_keyword(keywords::Break) => { // BREAK expression if Parser::token_is_lifetime(&self.token) { let lifetime = self.get_lifetime(); self.bump(); ex = ExprBreak(Some(lifetime)); } else { ex = ExprBreak(None); } hi = self.span.hi; }, _ if self.token == token::MOD_SEP || is_ident(&self.token) && !self.is_keyword(keywords::True) && !self.is_keyword(keywords::False) => { let pth = self.parse_path(LifetimeAndTypesWithColons).path; // `!`, as an operator, is prefix, so we know this isn't that if self.token == token::NOT { // MACRO INVOCATION expression self.bump(); let ket = token::close_delimiter_for(&self.token) .unwrap_or_else(|| self.fatal("expected open delimiter")); self.bump(); let tts = self.parse_seq_to_end(&ket, seq_sep_none(), |p| p.parse_token_tree()); let hi = self.span.hi; return self.mk_mac_expr(lo, hi, MacInvocTT(pth, tts, EMPTY_CTXT)); } else if self.token == token::LBRACE { // This is a struct literal, unless we're prohibited from // parsing struct literals here. if self.restriction != RESTRICT_NO_STRUCT_LITERAL { // It's a struct literal. self.bump(); let mut fields = Vec::new(); let mut base = None; while self.token != token::RBRACE { if self.eat(&token::DOTDOT) { base = Some(self.parse_expr()); break; } fields.push(self.parse_field()); self.commit_expr(fields.last().unwrap().expr, &[token::COMMA], &[token::RBRACE]); } if fields.len() == 0 && base.is_none() { let last_span = self.last_span; self.span_err(last_span, "structure literal must either have at \ least one field or use functional \ structure update syntax"); } hi = self.span.hi; self.expect(&token::RBRACE); ex = ExprStruct(pth, fields, base); return self.mk_expr(lo, hi, ex); } } hi = pth.span.hi; ex = ExprPath(pth); }, _ => { // other literal expression let lit = self.parse_lit(); hi = lit.span.hi; ex = ExprLit(box(GC) lit); } } return self.mk_expr(lo, hi, ex); } /// Parse a block or unsafe block pub fn parse_block_expr(&mut self, lo: BytePos, blk_mode: BlockCheckMode) -> Gc<Expr> { self.expect(&token::LBRACE); let blk = self.parse_block_tail(lo, blk_mode); return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); } /// parse a.b or a(13) or a[4] or just a pub fn parse_dot_or_call_expr(&mut self) -> Gc<Expr> { let b = self.parse_bottom_expr(); self.parse_dot_or_call_expr_with(b) } pub fn parse_dot_or_call_expr_with(&mut self, e0: Gc<Expr>) -> Gc<Expr> { let mut e = e0; let lo = e.span.lo; let mut hi; loop { // expr.f if self.eat(&token::DOT) { match self.token { token::IDENT(i, _) => { let dot = self.last_span.hi; hi = self.span.hi; self.bump(); let (_, tys) = if self.eat(&token::MOD_SEP) { self.expect_lt(); self.parse_generic_values_after_lt() } else { (Vec::new(), Vec::new()) }; // expr.f() method call match self.token { token::LPAREN => { let mut es = self.parse_unspanned_seq( &token::LPAREN, &token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_expr() ); hi = self.last_span.hi; es.unshift(e); let id = spanned(dot, hi, i); let nd = self.mk_method_call(id, tys, es); e = self.mk_expr(lo, hi, nd); } _ => { let id = spanned(dot, hi, i); let field = self.mk_field(e, id, tys); e = self.mk_expr(lo, hi, field) } } } _ => self.unexpected() } continue; } if self.expr_is_complete(e) { break; } match self.token { // expr(...) token::LPAREN => { let es = self.parse_unspanned_seq( &token::LPAREN, &token::RPAREN, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_expr() ); hi = self.last_span.hi; let nd = self.mk_call(e, es); e = self.mk_expr(lo, hi, nd); } // expr[...] token::LBRACKET => { self.bump(); let ix = self.parse_expr(); hi = self.span.hi; self.commit_expr_expecting(ix, token::RBRACKET); let index = self.mk_index(e, ix); e = self.mk_expr(lo, hi, index) } _ => return e } } return e; } /// Parse an optional separator followed by a kleene-style /// repetition token (+ or *). pub fn parse_sep_and_zerok(&mut self) -> (Option<token::Token>, bool) { fn parse_zerok(parser: &mut Parser) -> Option<bool> { match parser.token { token::BINOP(token::STAR) | token::BINOP(token::PLUS) => { let zerok = parser.token == token::BINOP(token::STAR); parser.bump(); Some(zerok) }, _ => None } }; match parse_zerok(self) { Some(zerok) => return (None, zerok), None => {} } let separator = self.bump_and_get(); match parse_zerok(self) { Some(zerok) => (Some(separator), zerok), None => self.fatal("expected `*` or `+`") } } /// parse a single token tree from the input. pub fn parse_token_tree(&mut self) -> TokenTree { // FIXME #6994: currently, this is too eager. It // parses token trees but also identifies TTSeq's // and TTNonterminal's; it's too early to know yet // whether something will be a nonterminal or a seq // yet. maybe_whole!(deref self, NtTT); // this is the fall-through for the 'match' below. // invariants: the current token is not a left-delimiter, // not an EOF, and not the desired right-delimiter (if // it were, parse_seq_to_before_end would have prevented // reaching this point. fn parse_non_delim_tt_tok(p: &mut Parser) -> TokenTree { maybe_whole!(deref p, NtTT); match p.token { token::RPAREN | token::RBRACE | token::RBRACKET => { // This is a conservative error: only report the last unclosed delimiter. The // previous unclosed delimiters could actually be closed! The parser just hasn't // gotten to them yet. match p.open_braces.last() { None => {} Some(&sp) => p.span_note(sp, "unclosed delimiter"), }; let token_str = p.this_token_to_string(); p.fatal(format!("incorrect close delimiter: `{}`", token_str).as_slice()) }, /* we ought to allow different depths of unquotation */ token::DOLLAR if p.quote_depth > 0u => { p.bump(); let sp = p.span; if p.token == token::LPAREN { let seq = p.parse_seq( &token::LPAREN, &token::RPAREN, seq_sep_none(), |p| p.parse_token_tree() ); let (s, z) = p.parse_sep_and_zerok(); let seq = match seq { Spanned { node, .. } => node, }; TTSeq(mk_sp(sp.lo, p.span.hi), Rc::new(seq), s, z) } else { TTNonterminal(sp, p.parse_ident()) } } _ => { parse_any_tt_tok(p) } } } // turn the next token into a TTTok: fn parse_any_tt_tok(p: &mut Parser) -> TokenTree { TTTok(p.span, p.bump_and_get()) } match (&self.token, token::close_delimiter_for(&self.token)) { (&token::EOF, _) => { let open_braces = self.open_braces.clone(); for sp in open_braces.iter() { self.span_note(*sp, "Did you mean to close this delimiter?"); } // There shouldn't really be a span, but it's easier for the test runner // if we give it one self.fatal("this file contains an un-closed delimiter "); } (_, Some(close_delim)) => { // Parse the open delimiter. self.open_braces.push(self.span); let mut result = vec!(parse_any_tt_tok(self)); let trees = self.parse_seq_to_before_end(&close_delim, seq_sep_none(), |p| p.parse_token_tree()); result.push_all_move(trees); // Parse the close delimiter. result.push(parse_any_tt_tok(self)); self.open_braces.pop().unwrap(); TTDelim(Rc::new(result)) } _ => parse_non_delim_tt_tok(self) } } // parse a stream of tokens into a list of TokenTree's, // up to EOF. pub fn parse_all_token_trees(&mut self) -> Vec<TokenTree> { let mut tts = Vec::new(); while self.token != token::EOF { tts.push(self.parse_token_tree()); } tts } pub fn parse_matchers(&mut self) -> Vec<Matcher> { // unification of Matcher's and TokenTree's would vastly improve // the interpolation of Matcher's maybe_whole!(self, NtMatchers); let mut name_idx = 0u; match token::close_delimiter_for(&self.token) { Some(other_delimiter) => { self.bump(); self.parse_matcher_subseq_upto(&mut name_idx, &other_delimiter) } None => self.fatal("expected open delimiter") } } /// This goofy function is necessary to correctly match parens in Matcher's. /// Otherwise, `$( ( )` would be a valid Matcher, and `$( () )` would be /// invalid. It's similar to common::parse_seq. pub fn parse_matcher_subseq_upto(&mut self, name_idx: &mut uint, ket: &token::Token) -> Vec<Matcher> { let mut ret_val = Vec::new(); let mut lparens = 0u; while self.token != *ket || lparens > 0u { if self.token == token::LPAREN { lparens += 1u; } if self.token == token::RPAREN { lparens -= 1u; } ret_val.push(self.parse_matcher(name_idx)); } self.bump(); return ret_val; } pub fn parse_matcher(&mut self, name_idx: &mut uint) -> Matcher { let lo = self.span.lo; let m = if self.token == token::DOLLAR { self.bump(); if self.token == token::LPAREN { let name_idx_lo = *name_idx; self.bump(); let ms = self.parse_matcher_subseq_upto(name_idx, &token::RPAREN); if ms.len() == 0u { self.fatal("repetition body must be nonempty"); } let (sep, zerok) = self.parse_sep_and_zerok(); MatchSeq(ms, sep, zerok, name_idx_lo, *name_idx) } else { let bound_to = self.parse_ident(); self.expect(&token::COLON); let nt_name = self.parse_ident(); let m = MatchNonterminal(bound_to, nt_name, *name_idx); *name_idx += 1; m } } else { MatchTok(self.bump_and_get()) }; return spanned(lo, self.span.hi, m); } /// Parse a prefix-operator expr pub fn parse_prefix_expr(&mut self) -> Gc<Expr> { let lo = self.span.lo; let hi; let ex; match self.token { token::NOT => { self.bump(); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = self.mk_unary(UnNot, e); } token::BINOP(token::MINUS) => { self.bump(); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = self.mk_unary(UnNeg, e); } token::BINOP(token::STAR) => { self.bump(); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = self.mk_unary(UnDeref, e); } token::BINOP(token::AND) | token::ANDAND => { self.expect_and(); let m = self.parse_mutability(); let e = self.parse_prefix_expr(); hi = e.span.hi; // HACK: turn &[...] into a &-vec ex = match e.node { ExprVec(..) if m == MutImmutable => { ExprVstore(e, ExprVstoreSlice) } ExprVec(..) if m == MutMutable => { ExprVstore(e, ExprVstoreMutSlice) } _ => ExprAddrOf(m, e) }; } token::AT => { self.bump(); let span = self.last_span; self.obsolete(span, ObsoleteManagedExpr); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = self.mk_unary(UnBox, e); } token::TILDE => { self.bump(); let e = self.parse_prefix_expr(); hi = e.span.hi; // HACK: turn ~[...] into a ~-vec let last_span = self.last_span; ex = match e.node { ExprVec(..) | ExprRepeat(..) => { self.obsolete(last_span, ObsoleteOwnedVector); ExprVstore(e, ExprVstoreUniq) } ExprLit(lit) if lit_is_str(lit) => { self.obsolete(last_span, ObsoleteOwnedExpr); ExprVstore(e, ExprVstoreUniq) } _ => { self.obsolete(last_span, ObsoleteOwnedExpr); self.mk_unary(UnUniq, e) } }; } token::IDENT(_, _) if self.is_keyword(keywords::Box) => { self.bump(); // Check for a place: `box(PLACE) EXPR`. if self.eat(&token::LPAREN) { // Support `box() EXPR` as the default. if !self.eat(&token::RPAREN) { let place = self.parse_expr(); self.expect(&token::RPAREN); let subexpression = self.parse_prefix_expr(); hi = subexpression.span.hi; ex = ExprBox(place, subexpression); return self.mk_expr(lo, hi, ex); } } // Otherwise, we use the unique pointer default. let subexpression = self.parse_prefix_expr(); hi = subexpression.span.hi; // HACK: turn `box [...]` into a boxed-vec ex = match subexpression.node { ExprVec(..) | ExprRepeat(..) => { let last_span = self.last_span; self.obsolete(last_span, ObsoleteOwnedVector); ExprVstore(subexpression, ExprVstoreUniq) } ExprLit(lit) if lit_is_str(lit) => { ExprVstore(subexpression, ExprVstoreUniq) } _ => self.mk_unary(UnUniq, subexpression) }; } _ => return self.parse_dot_or_call_expr() } return self.mk_expr(lo, hi, ex); } /// Parse an expression of binops pub fn parse_binops(&mut self) -> Gc<Expr> { let prefix_expr = self.parse_prefix_expr(); self.parse_more_binops(prefix_expr, 0) } /// Parse an expression of binops of at least min_prec precedence pub fn parse_more_binops(&mut self, lhs: Gc<Expr>, min_prec: uint) -> Gc<Expr> { if self.expr_is_complete(lhs) { return lhs; } // Prevent dynamic borrow errors later on by limiting the // scope of the borrows. { let token: &token::Token = &self.token; let restriction: &restriction = &self.restriction; match (token, restriction) { (&token::BINOP(token::OR), &RESTRICT_NO_BAR_OP) => return lhs, (&token::BINOP(token::OR), &RESTRICT_NO_BAR_OR_DOUBLEBAR_OP) => return lhs, (&token::OROR, &RESTRICT_NO_BAR_OR_DOUBLEBAR_OP) => return lhs, _ => { } } } let cur_opt = token_to_binop(&self.token); match cur_opt { Some(cur_op) => { let cur_prec = operator_prec(cur_op); if cur_prec > min_prec { self.bump(); let expr = self.parse_prefix_expr(); let rhs = self.parse_more_binops(expr, cur_prec); let binary = self.mk_binary(cur_op, lhs, rhs); let bin = self.mk_expr(lhs.span.lo, rhs.span.hi, binary); self.parse_more_binops(bin, min_prec) } else { lhs } } None => { if as_prec > min_prec && self.eat_keyword(keywords::As) { let rhs = self.parse_ty(false); let _as = self.mk_expr(lhs.span.lo, rhs.span.hi, ExprCast(lhs, rhs)); self.parse_more_binops(_as, min_prec) } else { lhs } } } } /// Parse an assignment expression.... /// actually, this seems to be the main entry point for /// parsing an arbitrary expression. pub fn parse_assign_expr(&mut self) -> Gc<Expr> { let lo = self.span.lo; let lhs = self.parse_binops(); match self.token { token::EQ => { self.bump(); let rhs = self.parse_expr(); self.mk_expr(lo, rhs.span.hi, ExprAssign(lhs, rhs)) } token::BINOPEQ(op) => { self.bump(); let rhs = self.parse_expr(); let aop = match op { token::PLUS => BiAdd, token::MINUS => BiSub, token::STAR => BiMul, token::SLASH => BiDiv, token::PERCENT => BiRem, token::CARET => BiBitXor, token::AND => BiBitAnd, token::OR => BiBitOr, token::SHL => BiShl, token::SHR => BiShr }; let assign_op = self.mk_assign_op(aop, lhs, rhs); self.mk_expr(lo, rhs.span.hi, assign_op) } _ => { lhs } } } /// Parse an 'if' expression ('if' token already eaten) pub fn parse_if_expr(&mut self) -> Gc<Expr> { let lo = self.last_span.lo; let cond = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL); let thn = self.parse_block(); let mut els: Option<Gc<Expr>> = None; let mut hi = thn.span.hi; if self.eat_keyword(keywords::Else) { let elexpr = self.parse_else_expr(); els = Some(elexpr); hi = elexpr.span.hi; } self.mk_expr(lo, hi, ExprIf(cond, thn, els)) } // `|args| expr` pub fn parse_lambda_expr(&mut self) -> Gc<Expr> { let lo = self.span.lo; let (decl, is_unboxed) = self.parse_fn_block_decl(); let body = self.parse_expr(); let fakeblock = P(ast::Block { view_items: Vec::new(), stmts: Vec::new(), expr: Some(body), id: ast::DUMMY_NODE_ID, rules: DefaultBlock, span: body.span, }); if is_unboxed { self.mk_expr(lo, body.span.hi, ExprUnboxedFn(decl, fakeblock)) } else { self.mk_expr(lo, body.span.hi, ExprFnBlock(decl, fakeblock)) } } pub fn parse_else_expr(&mut self) -> Gc<Expr> { if self.eat_keyword(keywords::If) { return self.parse_if_expr(); } else { let blk = self.parse_block(); return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); } } /// Parse a 'for' .. 'in' expression ('for' token already eaten) pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>) -> Gc<Expr> { // Parse: `for <src_pat> in <src_expr> <src_loop_block>` let lo = self.last_span.lo; let pat = self.parse_pat(); self.expect_keyword(keywords::In); let expr = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL); let loop_block = self.parse_block(); let hi = self.span.hi; self.mk_expr(lo, hi, ExprForLoop(pat, expr, loop_block, opt_ident)) } pub fn parse_while_expr(&mut self) -> Gc<Expr> { let lo = self.last_span.lo; let cond = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL); let body = self.parse_block(); let hi = body.span.hi; return self.mk_expr(lo, hi, ExprWhile(cond, body)); } pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>) -> Gc<Expr> { let lo = self.last_span.lo; let body = self.parse_block(); let hi = body.span.hi; self.mk_expr(lo, hi, ExprLoop(body, opt_ident)) } fn parse_match_expr(&mut self) -> Gc<Expr> { let lo = self.last_span.lo; let discriminant = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL); self.commit_expr_expecting(discriminant, token::LBRACE); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::RBRACE { let attrs = self.parse_outer_attributes(); let pats = self.parse_pats(); let mut guard = None; if self.eat_keyword(keywords::If) { guard = Some(self.parse_expr()); } self.expect(&token::FAT_ARROW); let expr = self.parse_expr_res(RESTRICT_STMT_EXPR); let require_comma = !classify::expr_is_simple_block(expr) && self.token != token::RBRACE; if require_comma { self.commit_expr(expr, &[token::COMMA], &[token::RBRACE]); } else { self.eat(&token::COMMA); } arms.push(ast::Arm { attrs: attrs, pats: pats, guard: guard, body: expr }); } let hi = self.span.hi; self.bump(); return self.mk_expr(lo, hi, ExprMatch(discriminant, arms)); } /// Parse an expression pub fn parse_expr(&mut self) -> Gc<Expr> { return self.parse_expr_res(UNRESTRICTED); } /// Parse an expression, subject to the given restriction pub fn parse_expr_res(&mut self, r: restriction) -> Gc<Expr> { let old = self.restriction; self.restriction = r; let e = self.parse_assign_expr(); self.restriction = old; return e; } /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self) -> Option<Gc<Expr>> { if self.token == token::EQ { self.bump(); Some(self.parse_expr()) } else { None } } /// Parse patterns, separated by '|' s fn parse_pats(&mut self) -> Vec<Gc<Pat>> { let mut pats = Vec::new(); loop { pats.push(self.parse_pat()); if self.token == token::BINOP(token::OR) { self.bump(); } else { return pats; } }; } fn parse_pat_vec_elements( &mut self, ) -> (Vec<Gc<Pat>> , Option<Gc<Pat>>, Vec<Gc<Pat>> ) { let mut before = Vec::new(); let mut slice = None; let mut after = Vec::new(); let mut first = true; let mut before_slice = true; while self.token != token::RBRACKET { if first { first = false; } else { self.expect(&token::COMMA); } let mut is_slice = false; if before_slice { if self.token == token::DOTDOT { self.bump(); is_slice = true; before_slice = false; } } if is_slice { if self.token == token::COMMA || self.token == token::RBRACKET { slice = Some(box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: PatWildMulti, span: self.span, }) } else { let subpat = self.parse_pat(); match *subpat { ast::Pat { node: PatIdent(_, _, _), .. } => { slice = Some(subpat); } ast::Pat { span, .. } => self.span_fatal( span, "expected an identifier or nothing" ) } } } else { let subpat = self.parse_pat(); if before_slice { before.push(subpat);<|fim▁hole|> } } } (before, slice, after) } /// Parse the fields of a struct-like pattern fn parse_pat_fields(&mut self) -> (Vec<ast::FieldPat> , bool) { let mut fields = Vec::new(); let mut etc = false; let mut first = true; while self.token != token::RBRACE { if first { first = false; } else { self.expect(&token::COMMA); // accept trailing commas if self.token == token::RBRACE { break } } if self.token == token::DOTDOT { self.bump(); if self.token != token::RBRACE { let token_str = self.this_token_to_string(); self.fatal(format!("expected `{}`, found `{}`", "}", token_str).as_slice()) } etc = true; break; } let bind_type = if self.eat_keyword(keywords::Mut) { BindByValue(MutMutable) } else if self.eat_keyword(keywords::Ref) { BindByRef(self.parse_mutability()) } else { BindByValue(MutImmutable) }; let fieldname = self.parse_ident(); let subpat = if self.token == token::COLON { match bind_type { BindByRef(..) | BindByValue(MutMutable) => { let token_str = self.this_token_to_string(); self.fatal(format!("unexpected `{}`", token_str).as_slice()) } _ => {} } self.bump(); self.parse_pat() } else { let fieldpath = codemap::Spanned{span:self.last_span, node: fieldname}; box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: PatIdent(bind_type, fieldpath, None), span: self.last_span } }; fields.push(ast::FieldPat { ident: fieldname, pat: subpat }); } return (fields, etc); } /// Parse a pattern. pub fn parse_pat(&mut self) -> Gc<Pat> { maybe_whole!(self, NtPat); let lo = self.span.lo; let mut hi; let pat; match self.token { // parse _ token::UNDERSCORE => { self.bump(); pat = PatWild; hi = self.last_span.hi; return box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi) } } token::TILDE => { // parse ~pat self.bump(); let sub = self.parse_pat(); pat = PatBox(sub); let last_span = self.last_span; hi = last_span.hi; self.obsolete(last_span, ObsoleteOwnedPattern); return box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi) } } token::BINOP(token::AND) | token::ANDAND => { // parse &pat let lo = self.span.lo; self.expect_and(); let sub = self.parse_pat(); pat = PatRegion(sub); hi = self.last_span.hi; return box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi) } } token::LPAREN => { // parse (pat,pat,pat,...) as tuple self.bump(); if self.token == token::RPAREN { hi = self.span.hi; self.bump(); let lit = box(GC) codemap::Spanned { node: LitNil, span: mk_sp(lo, hi)}; let expr = self.mk_expr(lo, hi, ExprLit(lit)); pat = PatLit(expr); } else { let mut fields = vec!(self.parse_pat()); if self.look_ahead(1, |t| *t != token::RPAREN) { while self.token == token::COMMA { self.bump(); if self.token == token::RPAREN { break; } fields.push(self.parse_pat()); } } if fields.len() == 1 { self.expect(&token::COMMA); } self.expect(&token::RPAREN); pat = PatTup(fields); } hi = self.last_span.hi; return box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi) } } token::LBRACKET => { // parse [pat,pat,...] as vector pattern self.bump(); let (before, slice, after) = self.parse_pat_vec_elements(); self.expect(&token::RBRACKET); pat = ast::PatVec(before, slice, after); hi = self.last_span.hi; return box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi) } } _ => {} } // at this point, token != _, ~, &, &&, (, [ if (!is_ident_or_path(&self.token) && self.token != token::MOD_SEP) || self.is_keyword(keywords::True) || self.is_keyword(keywords::False) { // Parse an expression pattern or exp .. exp. // // These expressions are limited to literals (possibly // preceded by unary-minus) or identifiers. let val = self.parse_literal_maybe_minus(); if self.eat(&token::DOTDOT) { let end = if is_ident_or_path(&self.token) { let path = self.parse_path(LifetimeAndTypesWithColons) .path; let hi = self.span.hi; self.mk_expr(lo, hi, ExprPath(path)) } else { self.parse_literal_maybe_minus() }; pat = PatRange(val, end); } else { pat = PatLit(val); } } else if self.eat_keyword(keywords::Mut) { pat = self.parse_pat_ident(BindByValue(MutMutable)); } else if self.eat_keyword(keywords::Ref) { // parse ref pat let mutbl = self.parse_mutability(); pat = self.parse_pat_ident(BindByRef(mutbl)); } else if self.eat_keyword(keywords::Box) { // `box PAT` // // FIXME(#13910): Rename to `PatBox` and extend to full DST // support. let sub = self.parse_pat(); pat = PatBox(sub); hi = self.last_span.hi; return box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi) } } else { let can_be_enum_or_struct = self.look_ahead(1, |t| { match *t { token::LPAREN | token::LBRACKET | token::LT | token::LBRACE | token::MOD_SEP => true, _ => false, } }); if self.look_ahead(1, |t| *t == token::DOTDOT) { let start = self.parse_expr_res(RESTRICT_NO_BAR_OP); self.eat(&token::DOTDOT); let end = self.parse_expr_res(RESTRICT_NO_BAR_OP); pat = PatRange(start, end); } else if is_plain_ident(&self.token) && !can_be_enum_or_struct { let id = self.parse_ident(); let id_span = self.last_span; let pth1 = codemap::Spanned{span:id_span, node: id}; if self.eat(&token::NOT) { // macro invocation let ket = token::close_delimiter_for(&self.token) .unwrap_or_else(|| self.fatal("expected open delimiter")); self.bump(); let tts = self.parse_seq_to_end(&ket, seq_sep_none(), |p| p.parse_token_tree()); let mac = MacInvocTT(ident_to_path(id_span,id), tts, EMPTY_CTXT); pat = ast::PatMac(codemap::Spanned {node: mac, span: self.span}); } else { let sub = if self.eat(&token::AT) { // parse foo @ pat Some(self.parse_pat()) } else { // or just foo None }; pat = PatIdent(BindByValue(MutImmutable), pth1, sub); } } else { // parse an enum pat let enum_path = self.parse_path(LifetimeAndTypesWithColons) .path; match self.token { token::LBRACE => { self.bump(); let (fields, etc) = self.parse_pat_fields(); self.bump(); pat = PatStruct(enum_path, fields, etc); } _ => { let mut args: Vec<Gc<Pat>> = Vec::new(); match self.token { token::LPAREN => { let is_dotdot = self.look_ahead(1, |t| { match *t { token::DOTDOT => true, _ => false, } }); if is_dotdot { // This is a "top constructor only" pat self.bump(); self.bump(); self.expect(&token::RPAREN); pat = PatEnum(enum_path, None); } else { args = self.parse_enum_variant_seq( &token::LPAREN, &token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_pat() ); pat = PatEnum(enum_path, Some(args)); } }, _ => { if !enum_path.global && enum_path.segments.len() == 1 { // it could still be either an enum // or an identifier pattern, resolve // will sort it out: pat = PatIdent(BindByValue(MutImmutable), codemap::Spanned{ span: enum_path.span, node: enum_path.segments.get(0) .identifier}, None); } else { pat = PatEnum(enum_path, Some(args)); } } } } } } } hi = self.last_span.hi; box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi), } } /// Parse ident or ident @ pat /// used by the copy foo and ref foo patterns to give a good /// error message when parsing mistakes like ref foo(a,b) fn parse_pat_ident(&mut self, binding_mode: ast::BindingMode) -> ast::Pat_ { if !is_plain_ident(&self.token) { let last_span = self.last_span; self.span_fatal(last_span, "expected identifier, found path"); } let ident = self.parse_ident(); let last_span = self.last_span; let name = codemap::Spanned{span: last_span, node: ident}; let sub = if self.eat(&token::AT) { Some(self.parse_pat()) } else { None }; // just to be friendly, if they write something like // ref Some(i) // we end up here with ( as the current token. This shortly // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() if self.token == token::LPAREN { let last_span = self.last_span; self.span_fatal( last_span, "expected identifier, found enum pattern"); } PatIdent(binding_mode, name, sub) } /// Parse a local variable declaration fn parse_local(&mut self) -> Gc<Local> { let lo = self.span.lo; let pat = self.parse_pat(); let mut ty = P(Ty { id: ast::DUMMY_NODE_ID, node: TyInfer, span: mk_sp(lo, lo), }); if self.eat(&token::COLON) { ty = self.parse_ty(true); } let init = self.parse_initializer(); box(GC) ast::Local { ty: ty, pat: pat, init: init, id: ast::DUMMY_NODE_ID, span: mk_sp(lo, self.last_span.hi), source: LocalLet, } } /// Parse a "let" stmt fn parse_let(&mut self) -> Gc<Decl> { let lo = self.span.lo; let local = self.parse_local(); box(GC) spanned(lo, self.last_span.hi, DeclLocal(local)) } /// Parse a structure field fn parse_name_and_ty(&mut self, pr: Visibility, attrs: Vec<Attribute> ) -> StructField { let lo = self.span.lo; if !is_plain_ident(&self.token) { self.fatal("expected ident"); } let name = self.parse_ident(); self.expect(&token::COLON); let ty = self.parse_ty(true); spanned(lo, self.last_span.hi, ast::StructField_ { kind: NamedField(name, pr), id: ast::DUMMY_NODE_ID, ty: ty, attrs: attrs, }) } /// Parse a statement. may include decl. /// Precondition: any attributes are parsed already pub fn parse_stmt(&mut self, item_attrs: Vec<Attribute>) -> Gc<Stmt> { maybe_whole!(self, NtStmt); fn check_expected_item(p: &mut Parser, found_attrs: bool) { // If we have attributes then we should have an item if found_attrs { let last_span = p.last_span; p.span_err(last_span, "expected item after attributes"); } } let lo = self.span.lo; if self.is_keyword(keywords::Let) { check_expected_item(self, !item_attrs.is_empty()); self.expect_keyword(keywords::Let); let decl = self.parse_let(); return box(GC) spanned(lo, decl.span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID)); } else if is_ident(&self.token) && !token::is_any_keyword(&self.token) && self.look_ahead(1, |t| *t == token::NOT) { // it's a macro invocation: check_expected_item(self, !item_attrs.is_empty()); // Potential trouble: if we allow macros with paths instead of // idents, we'd need to look ahead past the whole path here... let pth = self.parse_path(NoTypesAllowed).path; self.bump(); let id = if token::close_delimiter_for(&self.token).is_some() { token::special_idents::invalid // no special identifier } else { self.parse_ident() }; // check that we're pointing at delimiters (need to check // again after the `if`, because of `parse_ident` // consuming more tokens). let (bra, ket) = match token::close_delimiter_for(&self.token) { Some(ket) => (self.token.clone(), ket), None => { // we only expect an ident if we didn't parse one // above. let ident_str = if id.name == token::special_idents::invalid.name { "identifier, " } else { "" }; let tok_str = self.this_token_to_string(); self.fatal(format!("expected {}`(` or `{{`, but found `{}`", ident_str, tok_str).as_slice()) } }; let tts = self.parse_unspanned_seq( &bra, &ket, seq_sep_none(), |p| p.parse_token_tree() ); let hi = self.span.hi; if id.name == token::special_idents::invalid.name { return box(GC) spanned(lo, hi, StmtMac( spanned(lo, hi, MacInvocTT(pth, tts, EMPTY_CTXT)), false)); } else { // if it has a special ident, it's definitely an item return box(GC) spanned(lo, hi, StmtDecl( box(GC) spanned(lo, hi, DeclItem( self.mk_item( lo, hi, id /*id is good here*/, ItemMac(spanned(lo, hi, MacInvocTT(pth, tts, EMPTY_CTXT))), Inherited, Vec::new(/*no attrs*/)))), ast::DUMMY_NODE_ID)); } } else { let found_attrs = !item_attrs.is_empty(); match self.parse_item_or_view_item(item_attrs, false) { IoviItem(i) => { let hi = i.span.hi; let decl = box(GC) spanned(lo, hi, DeclItem(i)); return box(GC) spanned(lo, hi, StmtDecl(decl, ast::DUMMY_NODE_ID)); } IoviViewItem(vi) => { self.span_fatal(vi.span, "view items must be declared at the top of the block"); } IoviForeignItem(_) => { self.fatal("foreign items are not allowed here"); } IoviNone(_) => { /* fallthrough */ } } check_expected_item(self, found_attrs); // Remainder are line-expr stmts. let e = self.parse_expr_res(RESTRICT_STMT_EXPR); return box(GC) spanned(lo, e.span.hi, StmtExpr(e, ast::DUMMY_NODE_ID)); } } /// Is this expression a successfully-parsed statement? fn expr_is_complete(&mut self, e: Gc<Expr>) -> bool { return self.restriction == RESTRICT_STMT_EXPR && !classify::expr_requires_semi_to_be_stmt(e); } /// Parse a block. No inner attrs are allowed. pub fn parse_block(&mut self) -> P<Block> { maybe_whole!(no_clone self, NtBlock); let lo = self.span.lo; self.expect(&token::LBRACE); return self.parse_block_tail_(lo, DefaultBlock, Vec::new()); } /// Parse a block. Inner attrs are allowed. fn parse_inner_attrs_and_block(&mut self) -> (Vec<Attribute> , P<Block>) { maybe_whole!(pair_empty self, NtBlock); let lo = self.span.lo; self.expect(&token::LBRACE); let (inner, next) = self.parse_inner_attrs_and_next(); (inner, self.parse_block_tail_(lo, DefaultBlock, next)) } /// Precondition: already parsed the '{' or '#{' /// I guess that also means "already parsed the 'impure'" if /// necessary, and this should take a qualifier. /// Some blocks start with "#{"... fn parse_block_tail(&mut self, lo: BytePos, s: BlockCheckMode) -> P<Block> { self.parse_block_tail_(lo, s, Vec::new()) } /// Parse the rest of a block expression or function body fn parse_block_tail_(&mut self, lo: BytePos, s: BlockCheckMode, first_item_attrs: Vec<Attribute> ) -> P<Block> { let mut stmts = Vec::new(); let mut expr = None; // wouldn't it be more uniform to parse view items only, here? let ParsedItemsAndViewItems { attrs_remaining: attrs_remaining, view_items: view_items, items: items, .. } = self.parse_items_and_view_items(first_item_attrs, false, false); for item in items.iter() { let decl = box(GC) spanned(item.span.lo, item.span.hi, DeclItem(*item)); stmts.push(box(GC) spanned(item.span.lo, item.span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID))); } let mut attributes_box = attrs_remaining; while self.token != token::RBRACE { // parsing items even when they're not allowed lets us give // better error messages and recover more gracefully. attributes_box.push_all(self.parse_outer_attributes().as_slice()); match self.token { token::SEMI => { if !attributes_box.is_empty() { let last_span = self.last_span; self.span_err(last_span, "expected item after attributes"); attributes_box = Vec::new(); } self.bump(); // empty } token::RBRACE => { // fall through and out. } _ => { let stmt = self.parse_stmt(attributes_box); attributes_box = Vec::new(); match stmt.node { StmtExpr(e, stmt_id) => { // expression without semicolon if classify::stmt_ends_with_semi(&*stmt) { // Just check for errors and recover; do not eat semicolon yet. self.commit_stmt(stmt, &[], &[token::SEMI, token::RBRACE]); } match self.token { token::SEMI => { self.bump(); let span_with_semi = Span { lo: stmt.span.lo, hi: self.last_span.hi, expn_info: stmt.span.expn_info, }; stmts.push(box(GC) codemap::Spanned { node: StmtSemi(e, stmt_id), span: span_with_semi, }); } token::RBRACE => { expr = Some(e); } _ => { stmts.push(stmt); } } } StmtMac(ref m, _) => { // statement macro; might be an expr match self.token { token::SEMI => { self.bump(); stmts.push(box(GC) codemap::Spanned { node: StmtMac((*m).clone(), true), span: stmt.span, }); } token::RBRACE => { // if a block ends in `m!(arg)` without // a `;`, it must be an expr expr = Some( self.mk_mac_expr(stmt.span.lo, stmt.span.hi, m.node.clone())); } _ => { stmts.push(stmt); } } } _ => { // all other kinds of statements: stmts.push(stmt.clone()); if classify::stmt_ends_with_semi(&*stmt) { self.commit_stmt_expecting(stmt, token::SEMI); } } } } } } if !attributes_box.is_empty() { let last_span = self.last_span; self.span_err(last_span, "expected item after attributes"); } let hi = self.span.hi; self.bump(); P(ast::Block { view_items: view_items, stmts: stmts, expr: expr, id: ast::DUMMY_NODE_ID, rules: s, span: mk_sp(lo, hi), }) } fn parse_unboxed_function_type(&mut self) -> UnboxedFnTy { let inputs = if self.eat(&token::OROR) { Vec::new() } else { self.expect_or(); if self.token == token::BINOP(token::AND) && self.look_ahead(1, |t| { token::is_keyword(keywords::Mut, t) }) && self.look_ahead(2, |t| *t == token::COLON) { self.bump(); self.bump(); self.bump(); } let inputs = self.parse_seq_to_before_or(&token::COMMA, |p| { p.parse_arg_general(false) }); self.expect_or(); inputs }; let (return_style, output) = self.parse_ret_ty(); UnboxedFnTy { decl: P(FnDecl { inputs: inputs, output: output, cf: return_style, variadic: false, }) } } /// matches optbounds = ( ( : ( boundseq )? )? ) /// where boundseq = ( bound + boundseq ) | bound /// and bound = 'static | ty /// Returns "None" if there's no colon (e.g. "T"); /// Returns "Some(Empty)" if there's a colon but nothing after (e.g. "T:") /// Returns "Some(stuff)" otherwise (e.g. "T:stuff"). /// NB: The None/Some distinction is important for issue #7264. /// /// Note that the `allow_any_lifetime` argument is a hack for now while the /// AST doesn't support arbitrary lifetimes in bounds on type parameters. In /// the future, this flag should be removed, and the return value of this /// function should be Option<~[TyParamBound]> fn parse_ty_param_bounds(&mut self, allow_any_lifetime: bool) -> (Option<ast::Lifetime>, OwnedSlice<TyParamBound>) { let mut ret_lifetime = None; let mut result = vec!(); loop { match self.token { token::LIFETIME(lifetime) => { let lifetime_interned_string = token::get_ident(lifetime); if lifetime_interned_string.equiv(&("'static")) { result.push(StaticRegionTyParamBound); if allow_any_lifetime && ret_lifetime.is_none() { ret_lifetime = Some(ast::Lifetime { id: ast::DUMMY_NODE_ID, span: self.span, name: lifetime.name }); } } else if allow_any_lifetime && ret_lifetime.is_none() { ret_lifetime = Some(ast::Lifetime { id: ast::DUMMY_NODE_ID, span: self.span, name: lifetime.name }); } else { result.push(OtherRegionTyParamBound(self.span)); } self.bump(); } token::MOD_SEP | token::IDENT(..) => { let tref = self.parse_trait_ref(); result.push(TraitTyParamBound(tref)); } token::BINOP(token::OR) | token::OROR => { let unboxed_function_type = self.parse_unboxed_function_type(); result.push(UnboxedFnTyParamBound(unboxed_function_type)); } _ => break, } if !self.eat(&token::BINOP(token::PLUS)) { break; } } return (ret_lifetime, OwnedSlice::from_vec(result)); } fn trait_ref_from_ident(ident: Ident, span: Span) -> ast::TraitRef { let segment = ast::PathSegment { identifier: ident, lifetimes: Vec::new(), types: OwnedSlice::empty(), }; let path = ast::Path { span: span, global: false, segments: vec![segment], }; ast::TraitRef { path: path, ref_id: ast::DUMMY_NODE_ID, } } /// Matches typaram = (unbound`?`)? IDENT optbounds ( EQ ty )? fn parse_ty_param(&mut self) -> TyParam { // This is a bit hacky. Currently we are only interested in a single // unbound, and it may only be `Sized`. To avoid backtracking and other // complications, we parse an ident, then check for `?`. If we find it, // we use the ident as the unbound, otherwise, we use it as the name of // type param. let mut span = self.span; let mut ident = self.parse_ident(); let mut unbound = None; if self.eat(&token::QUESTION) { let tref = Parser::trait_ref_from_ident(ident, span); unbound = Some(TraitTyParamBound(tref)); span = self.span; ident = self.parse_ident(); } let opt_bounds = { if self.eat(&token::COLON) { let (_, bounds) = self.parse_ty_param_bounds(false); Some(bounds) } else { None } }; // For typarams we don't care about the difference b/w "<T>" and "<T:>". let bounds = opt_bounds.unwrap_or_default(); let default = if self.token == token::EQ { self.bump(); Some(self.parse_ty(true)) } else { None }; TyParam { ident: ident, id: ast::DUMMY_NODE_ID, bounds: bounds, unbound: unbound, default: default, span: span, } } /// Parse a set of optional generic type parameter declarations /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > ) /// | ( < lifetimes , typaramseq ( , )? > ) /// where typaramseq = ( typaram ) | ( typaram , typaramseq ) pub fn parse_generics(&mut self) -> ast::Generics { if self.eat(&token::LT) { let lifetimes = self.parse_lifetimes(); let mut seen_default = false; let ty_params = self.parse_seq_to_gt(Some(token::COMMA), |p| { p.forbid_lifetime(); let ty_param = p.parse_ty_param(); if ty_param.default.is_some() { seen_default = true; } else if seen_default { let last_span = p.last_span; p.span_err(last_span, "type parameters with a default must be trailing"); } ty_param }); ast::Generics { lifetimes: lifetimes, ty_params: ty_params } } else { ast_util::empty_generics() } } fn parse_generic_values_after_lt(&mut self) -> (Vec<ast::Lifetime>, Vec<P<Ty>> ) { let lifetimes = self.parse_lifetimes(); let result = self.parse_seq_to_gt( Some(token::COMMA), |p| { p.forbid_lifetime(); p.parse_ty(true) } ); (lifetimes, result.into_vec()) } fn forbid_lifetime(&mut self) { if Parser::token_is_lifetime(&self.token) { let span = self.span; self.span_fatal(span, "lifetime parameters must be declared \ prior to type parameters"); } } fn parse_fn_args(&mut self, named_args: bool, allow_variadic: bool) -> (Vec<Arg> , bool) { let sp = self.span; let mut args: Vec<Option<Arg>> = self.parse_unspanned_seq( &token::LPAREN, &token::RPAREN, seq_sep_trailing_allowed(token::COMMA), |p| { if p.token == token::DOTDOTDOT { p.bump(); if allow_variadic { if p.token != token::RPAREN { let span = p.span; p.span_fatal(span, "`...` must be last in argument list for variadic function"); } } else { let span = p.span; p.span_fatal(span, "only foreign functions are allowed to be variadic"); } None } else { Some(p.parse_arg_general(named_args)) } } ); let variadic = match args.pop() { Some(None) => true, Some(x) => { // Need to put back that last arg args.push(x); false } None => false }; if variadic && args.is_empty() { self.span_err(sp, "variadic function must be declared with at least one named argument"); } let args = args.move_iter().map(|x| x.unwrap()).collect(); (args, variadic) } /// Parse the argument list and result type of a function declaration pub fn parse_fn_decl(&mut self, allow_variadic: bool) -> P<FnDecl> { let (args, variadic) = self.parse_fn_args(true, allow_variadic); let (ret_style, ret_ty) = self.parse_ret_ty(); P(FnDecl { inputs: args, output: ret_ty, cf: ret_style, variadic: variadic }) } fn is_self_ident(&mut self) -> bool { match self.token { token::IDENT(id, false) => id.name == special_idents::self_.name, _ => false } } fn expect_self_ident(&mut self) -> ast::Ident { match self.token { token::IDENT(id, false) if id.name == special_idents::self_.name => { self.bump(); id }, _ => { let token_str = self.this_token_to_string(); self.fatal(format!("expected `self` but found `{}`", token_str).as_slice()) } } } /// Parse the argument list and result type of a function /// that may have a self type. fn parse_fn_decl_with_self(&mut self, parse_arg_fn: |&mut Parser| -> Arg) -> (ExplicitSelf, P<FnDecl>) { fn maybe_parse_borrowed_explicit_self(this: &mut Parser) -> ast::ExplicitSelf_ { // The following things are possible to see here: // // fn(&mut self) // fn(&mut self) // fn(&'lt self) // fn(&'lt mut self) // // We already know that the current token is `&`. if this.look_ahead(1, |t| token::is_keyword(keywords::Self, t)) { this.bump(); SelfRegion(None, MutImmutable, this.expect_self_ident()) } else if this.look_ahead(1, |t| Parser::token_is_mutability(t)) && this.look_ahead(2, |t| token::is_keyword(keywords::Self, t)) { this.bump(); let mutability = this.parse_mutability(); SelfRegion(None, mutability, this.expect_self_ident()) } else if this.look_ahead(1, |t| Parser::token_is_lifetime(t)) && this.look_ahead(2, |t| token::is_keyword(keywords::Self, t)) { this.bump(); let lifetime = this.parse_lifetime(); SelfRegion(Some(lifetime), MutImmutable, this.expect_self_ident()) } else if this.look_ahead(1, |t| Parser::token_is_lifetime(t)) && this.look_ahead(2, |t| { Parser::token_is_mutability(t) }) && this.look_ahead(3, |t| token::is_keyword(keywords::Self, t)) { this.bump(); let lifetime = this.parse_lifetime(); let mutability = this.parse_mutability(); SelfRegion(Some(lifetime), mutability, this.expect_self_ident()) } else { SelfStatic } } self.expect(&token::LPAREN); // A bit of complexity and lookahead is needed here in order to be // backwards compatible. let lo = self.span.lo; let mut mutbl_self = MutImmutable; let explicit_self = match self.token { token::BINOP(token::AND) => { maybe_parse_borrowed_explicit_self(self) } token::TILDE => { // We need to make sure it isn't a type if self.look_ahead(1, |t| token::is_keyword(keywords::Self, t)) { self.bump(); drop(self.expect_self_ident()); let last_span = self.last_span; self.obsolete(last_span, ObsoleteOwnedSelf) } SelfStatic } token::IDENT(..) if self.is_self_ident() => { let self_ident = self.expect_self_ident(); // Determine whether this is the fully explicit form, `self: // TYPE`. if self.eat(&token::COLON) { SelfExplicit(self.parse_ty(false), self_ident) } else { SelfValue(self_ident) } } token::BINOP(token::STAR) => { // Possibly "*self" or "*mut self" -- not supported. Try to avoid // emitting cryptic "unexpected token" errors. self.bump(); let _mutability = if Parser::token_is_mutability(&self.token) { self.parse_mutability() } else { MutImmutable }; if self.is_self_ident() { let span = self.span; self.span_err(span, "cannot pass self by unsafe pointer"); self.bump(); } // error case, making bogus self ident: SelfValue(special_idents::self_) } _ if Parser::token_is_mutability(&self.token) && self.look_ahead(1, |t| token::is_keyword(keywords::Self, t)) => { mutbl_self = self.parse_mutability(); let self_ident = self.expect_self_ident(); // Determine whether this is the fully explicit form, `self: // TYPE`. if self.eat(&token::COLON) { SelfExplicit(self.parse_ty(false), self_ident) } else { SelfValue(self_ident) } } _ if Parser::token_is_mutability(&self.token) && self.look_ahead(1, |t| *t == token::TILDE) && self.look_ahead(2, |t| token::is_keyword(keywords::Self, t)) => { mutbl_self = self.parse_mutability(); self.bump(); drop(self.expect_self_ident()); let last_span = self.last_span; self.obsolete(last_span, ObsoleteOwnedSelf); SelfStatic } _ => SelfStatic }; let explicit_self_sp = mk_sp(lo, self.span.hi); // shared fall-through for the three cases below. borrowing prevents simply // writing this as a closure macro_rules! parse_remaining_arguments { ($self_id:ident) => { // If we parsed a self type, expect a comma before the argument list. match self.token { token::COMMA => { self.bump(); let sep = seq_sep_trailing_disallowed(token::COMMA); let mut fn_inputs = self.parse_seq_to_before_end( &token::RPAREN, sep, parse_arg_fn ); fn_inputs.unshift(Arg::new_self(explicit_self_sp, mutbl_self, $self_id)); fn_inputs } token::RPAREN => { vec!(Arg::new_self(explicit_self_sp, mutbl_self, $self_id)) } _ => { let token_str = self.this_token_to_string(); self.fatal(format!("expected `,` or `)`, found `{}`", token_str).as_slice()) } } } } let fn_inputs = match explicit_self { SelfStatic => { let sep = seq_sep_trailing_disallowed(token::COMMA); self.parse_seq_to_before_end(&token::RPAREN, sep, parse_arg_fn) } SelfValue(id) => parse_remaining_arguments!(id), SelfRegion(_,_,id) => parse_remaining_arguments!(id), SelfExplicit(_,id) => parse_remaining_arguments!(id), }; self.expect(&token::RPAREN); let hi = self.span.hi; let (ret_style, ret_ty) = self.parse_ret_ty(); let fn_decl = P(FnDecl { inputs: fn_inputs, output: ret_ty, cf: ret_style, variadic: false }); (spanned(lo, hi, explicit_self), fn_decl) } // parse the |arg, arg| header on a lambda fn parse_fn_block_decl(&mut self) -> (P<FnDecl>, bool) { let (is_unboxed, inputs_captures) = { if self.eat(&token::OROR) { (false, Vec::new()) } else { self.expect(&token::BINOP(token::OR)); let is_unboxed = self.token == token::BINOP(token::AND) && self.look_ahead(1, |t| { token::is_keyword(keywords::Mut, t) }) && self.look_ahead(2, |t| *t == token::COLON); if is_unboxed { self.bump(); self.bump(); self.bump(); } let args = self.parse_seq_to_before_end( &token::BINOP(token::OR), seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_fn_block_arg() ); self.bump(); (is_unboxed, args) } }; let output = if self.eat(&token::RARROW) { self.parse_ty(true) } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyInfer, span: self.span, }) }; (P(FnDecl { inputs: inputs_captures, output: output, cf: Return, variadic: false }), is_unboxed) } /// Parses the `(arg, arg) -> return_type` header on a procedure. fn parse_proc_decl(&mut self) -> P<FnDecl> { let inputs = self.parse_unspanned_seq(&token::LPAREN, &token::RPAREN, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_fn_block_arg()); let output = if self.eat(&token::RARROW) { self.parse_ty(true) } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyInfer, span: self.span, }) }; P(FnDecl { inputs: inputs, output: output, cf: Return, variadic: false }) } /// Parse the name and optional generic types of a function header. fn parse_fn_header(&mut self) -> (Ident, ast::Generics) { let id = self.parse_ident(); let generics = self.parse_generics(); (id, generics) } fn mk_item(&mut self, lo: BytePos, hi: BytePos, ident: Ident, node: Item_, vis: Visibility, attrs: Vec<Attribute>) -> Gc<Item> { box(GC) Item { ident: ident, attrs: attrs, id: ast::DUMMY_NODE_ID, node: node, vis: vis, span: mk_sp(lo, hi) } } /// Parse an item-position function declaration. fn parse_item_fn(&mut self, fn_style: FnStyle, abi: abi::Abi) -> ItemInfo { let (ident, generics) = self.parse_fn_header(); let decl = self.parse_fn_decl(false); let (inner_attrs, body) = self.parse_inner_attrs_and_block(); (ident, ItemFn(decl, fn_style, abi, generics, body), Some(inner_attrs)) } /// Parse a method in a trait impl, starting with `attrs` attributes. pub fn parse_method(&mut self, already_parsed_attrs: Option<Vec<Attribute>>) -> Gc<Method> { let next_attrs = self.parse_outer_attributes(); let attrs = match already_parsed_attrs { Some(mut a) => { a.push_all_move(next_attrs); a } None => next_attrs }; let lo = self.span.lo; // code copied from parse_macro_use_or_failure... abstraction! let (method_, hi, new_attrs) = { if !token::is_any_keyword(&self.token) && self.look_ahead(1, |t| *t == token::NOT) && (self.look_ahead(2, |t| *t == token::LPAREN) || self.look_ahead(2, |t| *t == token::LBRACE)) { // method macro. let pth = self.parse_path(NoTypesAllowed).path; self.expect(&token::NOT); // eat a matched-delimiter token tree: let tts = match token::close_delimiter_for(&self.token) { Some(ket) => { self.bump(); self.parse_seq_to_end(&ket, seq_sep_none(), |p| p.parse_token_tree()) } None => self.fatal("expected open delimiter") }; let m_ = ast::MacInvocTT(pth, tts, EMPTY_CTXT); let m: ast::Mac = codemap::Spanned { node: m_, span: mk_sp(self.span.lo, self.span.hi) }; (ast::MethMac(m), self.span.hi, attrs) } else { let visa = self.parse_visibility(); let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi().unwrap_or(abi::C) } else if attr::contains_name(attrs.as_slice(), "rust_call_abi_hack") { // FIXME(stage0, pcwalton): Remove this awful hack after a // snapshot, and change to `extern "rust-call" fn`. abi::RustCall } else { abi::Rust }; let fn_style = self.parse_fn_style(); let ident = self.parse_ident(); let generics = self.parse_generics(); let (explicit_self, decl) = self.parse_fn_decl_with_self(|p| { p.parse_arg() }); let (inner_attrs, body) = self.parse_inner_attrs_and_block(); let new_attrs = attrs.append(inner_attrs.as_slice()); (ast::MethDecl(ident, generics, abi, explicit_self, fn_style, decl, body, visa), body.span.hi, new_attrs) } }; box(GC) ast::Method { attrs: new_attrs, id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), node: method_, } } /// Parse trait Foo { ... } fn parse_item_trait(&mut self) -> ItemInfo { let ident = self.parse_ident(); let tps = self.parse_generics(); let sized = self.parse_for_sized(); // Parse traits, if necessary. let traits; if self.token == token::COLON { self.bump(); traits = self.parse_trait_ref_list(&token::LBRACE); } else { traits = Vec::new(); } let meths = self.parse_trait_methods(); (ident, ItemTrait(tps, sized, traits, meths), None) } /// Parses two variants (with the region/type params always optional): /// impl<T> Foo { ... } /// impl<T> ToString for ~[T] { ... } fn parse_item_impl(&mut self) -> ItemInfo { // First, parse type parameters if necessary. let generics = self.parse_generics(); // Special case: if the next identifier that follows is '(', don't // allow this to be parsed as a trait. let could_be_trait = self.token != token::LPAREN; // Parse the trait. let mut ty = self.parse_ty(true); // Parse traits, if necessary. let opt_trait = if could_be_trait && self.eat_keyword(keywords::For) { // New-style trait. Reinterpret the type as a trait. let opt_trait_ref = match ty.node { TyPath(ref path, None, node_id) => { Some(TraitRef { path: /* bad */ (*path).clone(), ref_id: node_id }) } TyPath(..) => { self.span_err(ty.span, "bounded traits are only valid in type position"); None } _ => { self.span_err(ty.span, "not a trait"); None } }; ty = self.parse_ty(true); opt_trait_ref } else { None }; let mut meths = Vec::new(); self.expect(&token::LBRACE); let (inner_attrs, next) = self.parse_inner_attrs_and_next(); let mut method_attrs = Some(next); while !self.eat(&token::RBRACE) { meths.push(self.parse_method(method_attrs)); method_attrs = None; } let ident = ast_util::impl_pretty_name(&opt_trait, &*ty); (ident, ItemImpl(generics, opt_trait, ty, meths), Some(inner_attrs)) } /// Parse a::B<String,int> fn parse_trait_ref(&mut self) -> TraitRef { ast::TraitRef { path: self.parse_path(LifetimeAndTypesWithoutColons).path, ref_id: ast::DUMMY_NODE_ID, } } /// Parse B + C<String,int> + D fn parse_trait_ref_list(&mut self, ket: &token::Token) -> Vec<TraitRef> { self.parse_seq_to_before_end( ket, seq_sep_trailing_disallowed(token::BINOP(token::PLUS)), |p| p.parse_trait_ref() ) } /// Parse struct Foo { ... } fn parse_item_struct(&mut self, is_virtual: bool) -> ItemInfo { let class_name = self.parse_ident(); let generics = self.parse_generics(); let super_struct = if self.eat(&token::COLON) { let ty = self.parse_ty(true); match ty.node { TyPath(_, None, _) => { Some(ty) } _ => { self.span_err(ty.span, "not a struct"); None } } } else { None }; let mut fields: Vec<StructField>; let is_tuple_like; if self.eat(&token::LBRACE) { // It's a record-like struct. is_tuple_like = false; fields = Vec::new(); while self.token != token::RBRACE { fields.push(self.parse_struct_decl_field()); } if fields.len() == 0 { self.fatal(format!("unit-like struct definition should be \ written as `struct {};`", token::get_ident(class_name)).as_slice()); } self.bump(); } else if self.token == token::LPAREN { // It's a tuple-like struct. is_tuple_like = true; fields = self.parse_unspanned_seq( &token::LPAREN, &token::RPAREN, seq_sep_trailing_allowed(token::COMMA), |p| { let attrs = p.parse_outer_attributes(); let lo = p.span.lo; let struct_field_ = ast::StructField_ { kind: UnnamedField(p.parse_visibility()), id: ast::DUMMY_NODE_ID, ty: p.parse_ty(true), attrs: attrs, }; spanned(lo, p.span.hi, struct_field_) }); if fields.len() == 0 { self.fatal(format!("unit-like struct definition should be \ written as `struct {};`", token::get_ident(class_name)).as_slice()); } self.expect(&token::SEMI); } else if self.eat(&token::SEMI) { // It's a unit-like struct. is_tuple_like = true; fields = Vec::new(); } else { let token_str = self.this_token_to_string(); self.fatal(format!("expected `{}`, `(`, or `;` after struct \ name but found `{}`", "{", token_str).as_slice()) } let _ = ast::DUMMY_NODE_ID; // FIXME: Workaround for crazy bug. let new_id = ast::DUMMY_NODE_ID; (class_name, ItemStruct(box(GC) ast::StructDef { fields: fields, ctor_id: if is_tuple_like { Some(new_id) } else { None }, super_struct: super_struct, is_virtual: is_virtual, }, generics), None) } /// Parse a structure field declaration pub fn parse_single_struct_field(&mut self, vis: Visibility, attrs: Vec<Attribute> ) -> StructField { let a_var = self.parse_name_and_ty(vis, attrs); match self.token { token::COMMA => { self.bump(); } token::RBRACE => {} _ => { let span = self.span; let token_str = self.this_token_to_string(); self.span_fatal(span, format!("expected `,`, or `}}` but found `{}`", token_str).as_slice()) } } a_var } /// Parse an element of a struct definition fn parse_struct_decl_field(&mut self) -> StructField { let attrs = self.parse_outer_attributes(); if self.eat_keyword(keywords::Pub) { return self.parse_single_struct_field(Public, attrs); } return self.parse_single_struct_field(Inherited, attrs); } /// Parse visiility: PUB, PRIV, or nothing fn parse_visibility(&mut self) -> Visibility { if self.eat_keyword(keywords::Pub) { Public } else { Inherited } } fn parse_for_sized(&mut self) -> Option<ast::TyParamBound> { if self.eat_keyword(keywords::For) { let span = self.span; let ident = self.parse_ident(); if !self.eat(&token::QUESTION) { self.span_err(span, "expected 'Sized?' after `for` in trait item"); return None; } let tref = Parser::trait_ref_from_ident(ident, span); Some(TraitTyParamBound(tref)) } else { None } } /// Given a termination token and a vector of already-parsed /// attributes (of length 0 or 1), parse all of the items in a module fn parse_mod_items(&mut self, term: token::Token, first_item_attrs: Vec<Attribute>, inner_lo: BytePos) -> Mod { // parse all of the items up to closing or an attribute. // view items are legal here. let ParsedItemsAndViewItems { attrs_remaining: attrs_remaining, view_items: view_items, items: starting_items, .. } = self.parse_items_and_view_items(first_item_attrs, true, true); let mut items: Vec<Gc<Item>> = starting_items; let attrs_remaining_len = attrs_remaining.len(); // don't think this other loop is even necessary.... let mut first = true; while self.token != term { let mut attrs = self.parse_outer_attributes(); if first { attrs = attrs_remaining.clone().append(attrs.as_slice()); first = false; } debug!("parse_mod_items: parse_item_or_view_item(attrs={:?})", attrs); match self.parse_item_or_view_item(attrs, true /* macros allowed */) { IoviItem(item) => items.push(item), IoviViewItem(view_item) => { self.span_fatal(view_item.span, "view items must be declared at the top of \ the module"); } _ => { let token_str = self.this_token_to_string(); self.fatal(format!("expected item but found `{}`", token_str).as_slice()) } } } if first && attrs_remaining_len > 0u { // We parsed attributes for the first item but didn't find it let last_span = self.last_span; self.span_err(last_span, "expected item after attributes"); } ast::Mod { inner: mk_sp(inner_lo, self.span.lo), view_items: view_items, items: items } } fn parse_item_const(&mut self) -> ItemInfo { let m = if self.eat_keyword(keywords::Mut) {MutMutable} else {MutImmutable}; let id = self.parse_ident(); self.expect(&token::COLON); let ty = self.parse_ty(true); self.expect(&token::EQ); let e = self.parse_expr(); self.commit_expr_expecting(e, token::SEMI); (id, ItemStatic(ty, m, e), None) } /// Parse a `mod <foo> { ... }` or `mod <foo>;` item fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> ItemInfo { let id_span = self.span; let id = self.parse_ident(); if self.token == token::SEMI { self.bump(); // This mod is in an external file. Let's go get it! let (m, attrs) = self.eval_src_mod(id, outer_attrs, id_span); (id, m, Some(attrs)) } else { self.push_mod_path(id, outer_attrs); self.expect(&token::LBRACE); let mod_inner_lo = self.span.lo; let old_owns_directory = self.owns_directory; self.owns_directory = true; let (inner, next) = self.parse_inner_attrs_and_next(); let m = self.parse_mod_items(token::RBRACE, next, mod_inner_lo); self.expect(&token::RBRACE); self.owns_directory = old_owns_directory; self.pop_mod_path(); (id, ItemMod(m), Some(inner)) } } fn push_mod_path(&mut self, id: Ident, attrs: &[Attribute]) { let default_path = self.id_to_interned_str(id); let file_path = match ::attr::first_attr_value_str_by_name(attrs, "path") { Some(d) => d, None => default_path, }; self.mod_path_stack.push(file_path) } fn pop_mod_path(&mut self) { self.mod_path_stack.pop().unwrap(); } /// Read a module from a source file. fn eval_src_mod(&mut self, id: ast::Ident, outer_attrs: &[ast::Attribute], id_sp: Span) -> (ast::Item_, Vec<ast::Attribute> ) { let mut prefix = Path::new(self.sess.span_diagnostic.cm.span_to_filename(self.span)); prefix.pop(); let mod_path = Path::new(".").join_many(self.mod_path_stack.as_slice()); let dir_path = prefix.join(&mod_path); let mod_string = token::get_ident(id); let (file_path, owns_directory) = match ::attr::first_attr_value_str_by_name( outer_attrs, "path") { Some(d) => (dir_path.join(d), true), None => { let mod_name = mod_string.get().to_string(); let default_path_str = format!("{}.rs", mod_name); let secondary_path_str = format!("{}/mod.rs", mod_name); let default_path = dir_path.join(default_path_str.as_slice()); let secondary_path = dir_path.join(secondary_path_str.as_slice()); let default_exists = default_path.exists(); let secondary_exists = secondary_path.exists(); if !self.owns_directory { self.span_err(id_sp, "cannot declare a new module at this location"); let this_module = match self.mod_path_stack.last() { Some(name) => name.get().to_string(), None => self.root_module_name.get_ref().clone(), }; self.span_note(id_sp, format!("maybe move this module `{0}` \ to its own directory via \ `{0}/mod.rs`", this_module).as_slice()); if default_exists || secondary_exists { self.span_note(id_sp, format!("... or maybe `use` the module \ `{}` instead of possibly \ redeclaring it", mod_name).as_slice()); } self.abort_if_errors(); } match (default_exists, secondary_exists) { (true, false) => (default_path, false), (false, true) => (secondary_path, true), (false, false) => { self.span_fatal(id_sp, format!("file not found for module \ `{}`", mod_name).as_slice()); } (true, true) => { self.span_fatal( id_sp, format!("file for module `{}` found at both {} \ and {}", mod_name, default_path_str, secondary_path_str).as_slice()); } } } }; self.eval_src_mod_from_path(file_path, owns_directory, mod_string.get().to_string(), id_sp) } fn eval_src_mod_from_path(&mut self, path: Path, owns_directory: bool, name: String, id_sp: Span) -> (ast::Item_, Vec<ast::Attribute> ) { let mut included_mod_stack = self.sess.included_mod_stack.borrow_mut(); match included_mod_stack.iter().position(|p| *p == path) { Some(i) => { let mut err = String::from_str("circular modules: "); let len = included_mod_stack.len(); for p in included_mod_stack.slice(i, len).iter() { err.push_str(p.display().as_maybe_owned().as_slice()); err.push_str(" -> "); } err.push_str(path.display().as_maybe_owned().as_slice()); self.span_fatal(id_sp, err.as_slice()); } None => () } included_mod_stack.push(path.clone()); drop(included_mod_stack); let mut p0 = new_sub_parser_from_file(self.sess, self.cfg.clone(), &path, owns_directory, Some(name), id_sp); let mod_inner_lo = p0.span.lo; let (mod_attrs, next) = p0.parse_inner_attrs_and_next(); let first_item_outer_attrs = next; let m0 = p0.parse_mod_items(token::EOF, first_item_outer_attrs, mod_inner_lo); self.sess.included_mod_stack.borrow_mut().pop(); return (ast::ItemMod(m0), mod_attrs); } /// Parse a function declaration from a foreign module fn parse_item_foreign_fn(&mut self, vis: ast::Visibility, attrs: Vec<Attribute>) -> Gc<ForeignItem> { let lo = self.span.lo; self.expect_keyword(keywords::Fn); let (ident, generics) = self.parse_fn_header(); let decl = self.parse_fn_decl(true); let hi = self.span.hi; self.expect(&token::SEMI); box(GC) ast::ForeignItem { ident: ident, attrs: attrs, node: ForeignItemFn(decl, generics), id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), vis: vis } } /// Parse a static item from a foreign module fn parse_item_foreign_static(&mut self, vis: ast::Visibility, attrs: Vec<Attribute> ) -> Gc<ForeignItem> { let lo = self.span.lo; self.expect_keyword(keywords::Static); let mutbl = self.eat_keyword(keywords::Mut); let ident = self.parse_ident(); self.expect(&token::COLON); let ty = self.parse_ty(true); let hi = self.span.hi; self.expect(&token::SEMI); box(GC) ast::ForeignItem { ident: ident, attrs: attrs, node: ForeignItemStatic(ty, mutbl), id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), vis: vis, } } /// Parse safe/unsafe and fn fn parse_fn_style(&mut self) -> FnStyle { if self.eat_keyword(keywords::Fn) { NormalFn } else if self.eat_keyword(keywords::Unsafe) { self.expect_keyword(keywords::Fn); UnsafeFn } else { self.unexpected(); } } /// At this point, this is essentially a wrapper for /// parse_foreign_items. fn parse_foreign_mod_items(&mut self, abi: abi::Abi, first_item_attrs: Vec<Attribute> ) -> ForeignMod { let ParsedItemsAndViewItems { attrs_remaining: attrs_remaining, view_items: view_items, items: _, foreign_items: foreign_items } = self.parse_foreign_items(first_item_attrs, true); if ! attrs_remaining.is_empty() { let last_span = self.last_span; self.span_err(last_span, "expected item after attributes"); } assert!(self.token == token::RBRACE); ast::ForeignMod { abi: abi, view_items: view_items, items: foreign_items } } /// Parse extern crate links /// /// # Example /// /// extern crate url; /// extern crate foo = "bar"; fn parse_item_extern_crate(&mut self, lo: BytePos, visibility: Visibility, attrs: Vec<Attribute> ) -> ItemOrViewItem { let (maybe_path, ident) = match self.token { token::IDENT(..) => { let the_ident = self.parse_ident(); self.expect_one_of(&[], &[token::EQ, token::SEMI]); let path = if self.token == token::EQ { self.bump(); Some(self.parse_str()) } else {None}; self.expect(&token::SEMI); (path, the_ident) } _ => { let span = self.span; let token_str = self.this_token_to_string(); self.span_fatal(span, format!("expected extern crate name but \ found `{}`", token_str).as_slice()); } }; IoviViewItem(ast::ViewItem { node: ViewItemExternCrate(ident, maybe_path, ast::DUMMY_NODE_ID), attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }) } /// Parse `extern` for foreign ABIs /// modules. /// /// `extern` is expected to have been /// consumed before calling this method /// /// # Examples: /// /// extern "C" {} /// extern {} fn parse_item_foreign_mod(&mut self, lo: BytePos, opt_abi: Option<abi::Abi>, visibility: Visibility, attrs: Vec<Attribute> ) -> ItemOrViewItem { self.expect(&token::LBRACE); let abi = opt_abi.unwrap_or(abi::C); let (inner, next) = self.parse_inner_attrs_and_next(); let m = self.parse_foreign_mod_items(abi, next); self.expect(&token::RBRACE); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, special_idents::invalid, ItemForeignMod(m), visibility, maybe_append(attrs, Some(inner))); return IoviItem(item); } /// Parse type Foo = Bar; fn parse_item_type(&mut self) -> ItemInfo { let ident = self.parse_ident(); let tps = self.parse_generics(); self.expect(&token::EQ); let ty = self.parse_ty(true); self.expect(&token::SEMI); (ident, ItemTy(ty, tps), None) } /// Parse a structure-like enum variant definition /// this should probably be renamed or refactored... fn parse_struct_def(&mut self) -> Gc<StructDef> { let mut fields: Vec<StructField> = Vec::new(); while self.token != token::RBRACE { fields.push(self.parse_struct_decl_field()); } self.bump(); return box(GC) ast::StructDef { fields: fields, ctor_id: None, super_struct: None, is_virtual: false, }; } /// Parse the part of an "enum" decl following the '{' fn parse_enum_def(&mut self, _generics: &ast::Generics) -> EnumDef { let mut variants = Vec::new(); let mut all_nullary = true; let mut have_disr = false; while self.token != token::RBRACE { let variant_attrs = self.parse_outer_attributes(); let vlo = self.span.lo; let vis = self.parse_visibility(); let ident; let kind; let mut args = Vec::new(); let mut disr_expr = None; ident = self.parse_ident(); if self.eat(&token::LBRACE) { // Parse a struct variant. all_nullary = false; kind = StructVariantKind(self.parse_struct_def()); } else if self.token == token::LPAREN { all_nullary = false; let arg_tys = self.parse_enum_variant_seq( &token::LPAREN, &token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_ty(true) ); for ty in arg_tys.move_iter() { args.push(ast::VariantArg { ty: ty, id: ast::DUMMY_NODE_ID, }); } kind = TupleVariantKind(args); } else if self.eat(&token::EQ) { have_disr = true; disr_expr = Some(self.parse_expr()); kind = TupleVariantKind(args); } else { kind = TupleVariantKind(Vec::new()); } let vr = ast::Variant_ { name: ident, attrs: variant_attrs, kind: kind, id: ast::DUMMY_NODE_ID, disr_expr: disr_expr, vis: vis, }; variants.push(P(spanned(vlo, self.last_span.hi, vr))); if !self.eat(&token::COMMA) { break; } } self.expect(&token::RBRACE); if have_disr && !all_nullary { self.fatal("discriminator values can only be used with a c-like \ enum"); } ast::EnumDef { variants: variants } } /// Parse an "enum" declaration fn parse_item_enum(&mut self) -> ItemInfo { let id = self.parse_ident(); let generics = self.parse_generics(); self.expect(&token::LBRACE); let enum_definition = self.parse_enum_def(&generics); (id, ItemEnum(enum_definition, generics), None) } fn fn_expr_lookahead(tok: &token::Token) -> bool { match *tok { token::LPAREN | token::AT | token::TILDE | token::BINOP(_) => true, _ => false } } /// Parses a string as an ABI spec on an extern type or module. Consumes /// the `extern` keyword, if one is found. fn parse_opt_abi(&mut self) -> Option<abi::Abi> { match self.token { token::LIT_STR(s) | token::LIT_STR_RAW(s, _) => { self.bump(); let the_string = s.as_str(); match abi::lookup(the_string) { Some(abi) => Some(abi), None => { let last_span = self.last_span; self.span_err( last_span, format!("illegal ABI: expected one of [{}], \ found `{}`", abi::all_names().connect(", "), the_string).as_slice()); None } } } _ => None, } } /// Parse one of the items or view items allowed by the /// flags; on failure, return IoviNone. /// NB: this function no longer parses the items inside an /// extern crate. fn parse_item_or_view_item(&mut self, attrs: Vec<Attribute> , macros_allowed: bool) -> ItemOrViewItem { match self.token { INTERPOLATED(token::NtItem(item)) => { self.bump(); let new_attrs = attrs.append(item.attrs.as_slice()); return IoviItem(box(GC) Item { attrs: new_attrs, ..(*item).clone() }); } _ => {} } let lo = self.span.lo; let visibility = self.parse_visibility(); // must be a view item: if self.eat_keyword(keywords::Use) { // USE ITEM (IoviViewItem) let view_item = self.parse_use(); self.expect(&token::SEMI); return IoviViewItem(ast::ViewItem { node: view_item, attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } // either a view item or an item: if self.eat_keyword(keywords::Extern) { let next_is_mod = self.eat_keyword(keywords::Mod); if next_is_mod || self.eat_keyword(keywords::Crate) { if next_is_mod { let last_span = self.last_span; self.span_err(mk_sp(lo, last_span.hi), format!("`extern mod` is obsolete, use \ `extern crate` instead \ to refer to external \ crates.").as_slice()) } return self.parse_item_extern_crate(lo, visibility, attrs); } let opt_abi = self.parse_opt_abi(); if self.eat_keyword(keywords::Fn) { // EXTERN FUNCTION ITEM let abi = opt_abi.unwrap_or(abi::C); let (ident, item_, extra_attrs) = self.parse_item_fn(NormalFn, abi); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } else if self.token == token::LBRACE { return self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs); } let span = self.span; let token_str = self.this_token_to_string(); self.span_fatal(span, format!("expected `{}` or `fn` but found `{}`", "{", token_str).as_slice()); } let is_virtual = self.eat_keyword(keywords::Virtual); if is_virtual && !self.is_keyword(keywords::Struct) { let span = self.span; self.span_err(span, "`virtual` keyword may only be used with `struct`"); } // the rest are all guaranteed to be items: if self.is_keyword(keywords::Static) { // STATIC ITEM self.bump(); let (ident, item_, extra_attrs) = self.parse_item_const(); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.is_keyword(keywords::Fn) && self.look_ahead(1, |f| !Parser::fn_expr_lookahead(f)) { // FUNCTION ITEM self.bump(); let (ident, item_, extra_attrs) = self.parse_item_fn(NormalFn, abi::Rust); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.is_keyword(keywords::Unsafe) && self.look_ahead(1u, |t| *t != token::LBRACE) { // UNSAFE FUNCTION ITEM self.bump(); let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi().unwrap_or(abi::C) } else { abi::Rust }; self.expect_keyword(keywords::Fn); let (ident, item_, extra_attrs) = self.parse_item_fn(UnsafeFn, abi); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.eat_keyword(keywords::Mod) { // MODULE ITEM let (ident, item_, extra_attrs) = self.parse_item_mod(attrs.as_slice()); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.eat_keyword(keywords::Type) { // TYPE ITEM let (ident, item_, extra_attrs) = self.parse_item_type(); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.eat_keyword(keywords::Enum) { // ENUM ITEM let (ident, item_, extra_attrs) = self.parse_item_enum(); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.eat_keyword(keywords::Trait) { // TRAIT ITEM let (ident, item_, extra_attrs) = self.parse_item_trait(); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.eat_keyword(keywords::Impl) { // IMPL ITEM let (ident, item_, extra_attrs) = self.parse_item_impl(); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } if self.eat_keyword(keywords::Struct) { // STRUCT ITEM let (ident, item_, extra_attrs) = self.parse_item_struct(is_virtual); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); } self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility) } /// Parse a foreign item; on failure, return IoviNone. fn parse_foreign_item(&mut self, attrs: Vec<Attribute> , macros_allowed: bool) -> ItemOrViewItem { maybe_whole!(iovi self, NtItem); let lo = self.span.lo; let visibility = self.parse_visibility(); if self.is_keyword(keywords::Static) { // FOREIGN STATIC ITEM let item = self.parse_item_foreign_static(visibility, attrs); return IoviForeignItem(item); } if self.is_keyword(keywords::Fn) || self.is_keyword(keywords::Unsafe) { // FOREIGN FUNCTION ITEM let item = self.parse_item_foreign_fn(visibility, attrs); return IoviForeignItem(item); } self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility) } /// This is the fall-through for parsing items. fn parse_macro_use_or_failure( &mut self, attrs: Vec<Attribute> , macros_allowed: bool, lo: BytePos, visibility: Visibility ) -> ItemOrViewItem { if macros_allowed && !token::is_any_keyword(&self.token) && self.look_ahead(1, |t| *t == token::NOT) && (self.look_ahead(2, |t| is_plain_ident(t)) || self.look_ahead(2, |t| *t == token::LPAREN) || self.look_ahead(2, |t| *t == token::LBRACE)) { // MACRO INVOCATION ITEM // item macro. let pth = self.parse_path(NoTypesAllowed).path; self.expect(&token::NOT); // a 'special' identifier (like what `macro_rules!` uses) // is optional. We should eventually unify invoc syntax // and remove this. let id = if is_plain_ident(&self.token) { self.parse_ident() } else { token::special_idents::invalid // no special identifier }; // eat a matched-delimiter token tree: let tts = match token::close_delimiter_for(&self.token) { Some(ket) => { self.bump(); self.parse_seq_to_end(&ket, seq_sep_none(), |p| p.parse_token_tree()) } None => self.fatal("expected open delimiter") }; // single-variant-enum... : let m = ast::MacInvocTT(pth, tts, EMPTY_CTXT); let m: ast::Mac = codemap::Spanned { node: m, span: mk_sp(self.span.lo, self.span.hi) }; let item_ = ItemMac(m); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, id, item_, visibility, attrs); return IoviItem(item); } // FAILURE TO PARSE ITEM if visibility != Inherited { let mut s = String::from_str("unmatched visibility `"); if visibility == Public { s.push_str("pub") } else { s.push_str("priv") } s.push_char('`'); let last_span = self.last_span; self.span_fatal(last_span, s.as_slice()); } return IoviNone(attrs); } pub fn parse_item_with_outer_attributes(&mut self) -> Option<Gc<Item>> { let attrs = self.parse_outer_attributes(); self.parse_item(attrs) } pub fn parse_item(&mut self, attrs: Vec<Attribute> ) -> Option<Gc<Item>> { match self.parse_item_or_view_item(attrs, true) { IoviNone(_) => None, IoviViewItem(_) => self.fatal("view items are not allowed here"), IoviForeignItem(_) => self.fatal("foreign items are not allowed here"), IoviItem(item) => Some(item) } } /// Parse, e.g., "use a::b::{z,y}" fn parse_use(&mut self) -> ViewItem_ { return ViewItemUse(self.parse_view_path()); } /// Matches view_path : MOD? IDENT EQ non_global_path /// | MOD? non_global_path MOD_SEP LBRACE RBRACE /// | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE /// | MOD? non_global_path MOD_SEP STAR /// | MOD? non_global_path fn parse_view_path(&mut self) -> Gc<ViewPath> { let lo = self.span.lo; if self.token == token::LBRACE { // use {foo,bar} let idents = self.parse_unspanned_seq( &token::LBRACE, &token::RBRACE, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_path_list_item()); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: Vec::new() }; return box(GC) spanned(lo, self.span.hi, ViewPathList(path, idents, ast::DUMMY_NODE_ID)); } let first_ident = self.parse_ident(); let mut path = vec!(first_ident); match self.token { token::EQ => { // x = foo::bar self.bump(); let path_lo = self.span.lo; path = vec!(self.parse_ident()); while self.token == token::MOD_SEP { self.bump(); let id = self.parse_ident(); path.push(id); } let path = ast::Path { span: mk_sp(path_lo, self.span.hi), global: false, segments: path.move_iter().map(|identifier| { ast::PathSegment { identifier: identifier, lifetimes: Vec::new(), types: OwnedSlice::empty(), } }).collect() }; return box(GC) spanned(lo, self.span.hi, ViewPathSimple(first_ident, path, ast::DUMMY_NODE_ID)); } token::MOD_SEP => { // foo::bar or foo::{a,b,c} or foo::* while self.token == token::MOD_SEP { self.bump(); match self.token { token::IDENT(i, _) => { self.bump(); path.push(i); } // foo::bar::{a,b,c} token::LBRACE => { let idents = self.parse_unspanned_seq( &token::LBRACE, &token::RBRACE, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_path_list_item() ); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: path.move_iter().map(|identifier| { ast::PathSegment { identifier: identifier, lifetimes: Vec::new(), types: OwnedSlice::empty(), } }).collect() }; return box(GC) spanned(lo, self.span.hi, ViewPathList(path, idents, ast::DUMMY_NODE_ID)); } // foo::bar::* token::BINOP(token::STAR) => { self.bump(); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: path.move_iter().map(|identifier| { ast::PathSegment { identifier: identifier, lifetimes: Vec::new(), types: OwnedSlice::empty(), } }).collect() }; return box(GC) spanned(lo, self.span.hi, ViewPathGlob(path, ast::DUMMY_NODE_ID)); } _ => break } } } _ => () } let last = *path.get(path.len() - 1u); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: path.move_iter().map(|identifier| { ast::PathSegment { identifier: identifier, lifetimes: Vec::new(), types: OwnedSlice::empty(), } }).collect() }; return box(GC) spanned(lo, self.last_span.hi, ViewPathSimple(last, path, ast::DUMMY_NODE_ID)); } /// Parses a sequence of items. Stops when it finds program /// text that can't be parsed as an item /// - mod_items uses extern_mod_allowed = true /// - block_tail_ uses extern_mod_allowed = false fn parse_items_and_view_items(&mut self, first_item_attrs: Vec<Attribute> , mut extern_mod_allowed: bool, macros_allowed: bool) -> ParsedItemsAndViewItems { let mut attrs = first_item_attrs.append(self.parse_outer_attributes().as_slice()); // First, parse view items. let mut view_items : Vec<ast::ViewItem> = Vec::new(); let mut items = Vec::new(); // I think this code would probably read better as a single // loop with a mutable three-state-variable (for extern crates, // view items, and regular items) ... except that because // of macros, I'd like to delay that entire check until later. loop { match self.parse_item_or_view_item(attrs, macros_allowed) { IoviNone(attrs) => { return ParsedItemsAndViewItems { attrs_remaining: attrs, view_items: view_items, items: items, foreign_items: Vec::new() } } IoviViewItem(view_item) => { match view_item.node { ViewItemUse(..) => { // `extern crate` must precede `use`. extern_mod_allowed = false; } ViewItemExternCrate(..) if !extern_mod_allowed => { self.span_err(view_item.span, "\"extern crate\" declarations are \ not allowed here"); } ViewItemExternCrate(..) => {} } view_items.push(view_item); } IoviItem(item) => { items.push(item); attrs = self.parse_outer_attributes(); break; } IoviForeignItem(_) => { fail!(); } } attrs = self.parse_outer_attributes(); } // Next, parse items. loop { match self.parse_item_or_view_item(attrs, macros_allowed) { IoviNone(returned_attrs) => { attrs = returned_attrs; break } IoviViewItem(view_item) => { attrs = self.parse_outer_attributes(); self.span_err(view_item.span, "`use` and `extern crate` declarations must precede items"); } IoviItem(item) => { attrs = self.parse_outer_attributes(); items.push(item) } IoviForeignItem(_) => { fail!(); } } } ParsedItemsAndViewItems { attrs_remaining: attrs, view_items: view_items, items: items, foreign_items: Vec::new() } } /// Parses a sequence of foreign items. Stops when it finds program /// text that can't be parsed as an item fn parse_foreign_items(&mut self, first_item_attrs: Vec<Attribute> , macros_allowed: bool) -> ParsedItemsAndViewItems { let mut attrs = first_item_attrs.append(self.parse_outer_attributes().as_slice()); let mut foreign_items = Vec::new(); loop { match self.parse_foreign_item(attrs, macros_allowed) { IoviNone(returned_attrs) => { if self.token == token::RBRACE { attrs = returned_attrs; break } self.unexpected(); }, IoviViewItem(view_item) => { // I think this can't occur: self.span_err(view_item.span, "`use` and `extern crate` declarations must precede items"); } IoviItem(item) => { // FIXME #5668: this will occur for a macro invocation: self.span_fatal(item.span, "macros cannot expand to foreign items"); } IoviForeignItem(foreign_item) => { foreign_items.push(foreign_item); } } attrs = self.parse_outer_attributes(); } ParsedItemsAndViewItems { attrs_remaining: attrs, view_items: Vec::new(), items: Vec::new(), foreign_items: foreign_items } } /// Parses a source module as a crate. This is the main /// entry point for the parser. pub fn parse_crate_mod(&mut self) -> Crate { let lo = self.span.lo; // parse the crate's inner attrs, maybe (oops) one // of the attrs of an item: let (inner, next) = self.parse_inner_attrs_and_next(); let first_item_outer_attrs = next; // parse the items inside the crate: let m = self.parse_mod_items(token::EOF, first_item_outer_attrs, lo); ast::Crate { module: m, attrs: inner, config: self.cfg.clone(), span: mk_sp(lo, self.span.lo), exported_macros: Vec::new(), } } pub fn parse_optional_str(&mut self) -> Option<(InternedString, ast::StrStyle)> { let (s, style) = match self.token { token::LIT_STR(s) => (self.id_to_interned_str(s.ident()), ast::CookedStr), token::LIT_STR_RAW(s, n) => { (self.id_to_interned_str(s.ident()), ast::RawStr(n)) } _ => return None }; self.bump(); Some((s, style)) } pub fn parse_str(&mut self) -> (InternedString, StrStyle) { match self.parse_optional_str() { Some(s) => { s } _ => self.fatal("expected string literal") } } }<|fim▁end|>
} else { after.push(subpat);
<|file_name|>parse.test.js<|end_file_name|><|fim▁begin|>var fs = require('co-fs') , test = require('bandage') , main = require('..') , parse = main.parse , write = main.write , parseFmtpConfig = main.parseFmtpConfig , parseParams = main.parseParams , parseImageAttributes = main.parseImageAttributes , parseSimulcastStreamList = main.parseSimulcastStreamList ; <|fim▁hole|>test('normalSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/normal.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); t.equal(session.origin.username, '-', 'origin username'); t.equal(session.origin.sessionId, 20518, 'origin sessionId'); t.equal(session.origin.sessionVersion, 0, 'origin sessionVersion'); t.equal(session.origin.netType, 'IN', 'origin netType'); t.equal(session.origin.ipVer, 4, 'origin ipVer'); t.equal(session.origin.address, '203.0.113.1', 'origin address'); t.equal(session.connection.ip, '203.0.113.1', 'session connect ip'); t.equal(session.connection.version, 4, 'session connect ip ver'); // global ICE and fingerprint t.equal(session.iceUfrag, 'F7gI', 'global ufrag'); t.equal(session.icePwd, 'x9cml/YzichV2+XlhiMu8g', 'global pwd'); var audio = media[0]; t.equal(audio.type, 'audio', 'audio type'); t.equal(audio.port, 54400, 'audio port'); t.equal(audio.protocol, 'RTP/SAVPF', 'audio protocol'); t.equal(audio.direction, 'sendrecv', 'audio direction'); t.equal(audio.rtp[0].payload, 0, 'audio rtp 0 payload'); t.equal(audio.rtp[0].codec, 'PCMU', 'audio rtp 0 codec'); t.equal(audio.rtp[0].rate, 8000, 'audio rtp 0 rate'); t.equal(audio.rtp[1].payload, 96, 'audio rtp 1 payload'); t.equal(audio.rtp[1].codec, 'opus', 'audio rtp 1 codec'); t.equal(audio.rtp[1].rate, 48000, 'audio rtp 1 rate'); t.deepEqual(audio.ext[0], { value: 1, uri: 'URI-toffset' }, 'audio extension 0'); t.deepEqual(audio.ext[1], { value: 2, direction: 'recvonly', uri: 'URI-gps-string' }, 'audio extension 1'); t.equal(audio.extmapAllowMixed, 'extmap-allow-mixed', 'extmap-allow-mixed present'); var video = media[1]; t.equal(video.type, 'video', 'video type'); t.equal(video.port, 55400, 'video port'); t.equal(video.protocol, 'RTP/SAVPF', 'video protocol'); t.equal(video.direction, 'sendrecv', 'video direction'); t.equal(video.rtp[0].payload, 97, 'video rtp 0 payload'); t.equal(video.rtp[0].codec, 'H264', 'video rtp 0 codec'); t.equal(video.rtp[0].rate, 90000, 'video rtp 0 rate'); t.equal(video.fmtp[0].payload, 97, 'video fmtp 0 payload'); var vidFmtp = parseFmtpConfig(video.fmtp[0].config); t.equal(vidFmtp['profile-level-id'], '4d0028', 'video fmtp 0 profile-level-id'); t.equal(vidFmtp['packetization-mode'], 1, 'video fmtp 0 packetization-mode'); t.equal(vidFmtp['sprop-parameter-sets'], 'Z0IAH5WoFAFuQA==,aM48gA==', 'video fmtp 0 sprop-parameter-sets'); t.equal(video.fmtp[1].payload, 98, 'video fmtp 1 payload'); var vidFmtp2 = parseFmtpConfig(video.fmtp[1].config); t.equal(vidFmtp2.minptime, 10, 'video fmtp 1 minptime'); t.equal(vidFmtp2.useinbandfec, 1, 'video fmtp 1 useinbandfec'); t.equal(video.rtp[1].payload, 98, 'video rtp 1 payload'); t.equal(video.rtp[1].codec, 'VP8', 'video rtp 1 codec'); t.equal(video.rtp[1].rate, 90000, 'video rtp 1 rate'); t.equal(video.rtcpFb[0].payload, '*', 'video rtcp-fb 0 payload'); t.equal(video.rtcpFb[0].type, 'nack', 'video rtcp-fb 0 type'); t.equal(video.rtcpFb[1].payload, 98, 'video rtcp-fb 0 payload'); t.equal(video.rtcpFb[1].type, 'nack', 'video rtcp-fb 0 type'); t.equal(video.rtcpFb[1].subtype, 'rpsi', 'video rtcp-fb 0 subtype'); t.equal(video.rtcpFbTrrInt[0].payload, 98, 'video rtcp-fb trr-int 0 payload'); t.equal(video.rtcpFbTrrInt[0].value, 100, 'video rtcp-fb trr-int 0 value'); t.equal(video.crypto[0].id, 1, 'video crypto 0 id'); t.equal(video.crypto[0].suite, 'AES_CM_128_HMAC_SHA1_32', 'video crypto 0 suite'); t.equal(video.crypto[0].config, 'inline:keNcG3HezSNID7LmfDa9J4lfdUL8W1F7TNJKcbuy|2^20|1:32', 'video crypto 0 config'); t.equal(video.ssrcs.length, 3, 'video got 3 ssrc lines'); // test ssrc with attr:value t.deepEqual(video.ssrcs[0], { id: 1399694169, attribute: 'foo', value: 'bar' }, 'video 1st ssrc line attr:value'); // test ssrc with attr only t.deepEqual(video.ssrcs[1], { id: 1399694169, attribute: 'baz', }, 'video 2nd ssrc line attr only'); // test ssrc with at-tr:value t.deepEqual(video.ssrcs[2], { id: 1399694169, attribute: 'foo-bar', value: 'baz' }, 'video 3rd ssrc line attr with dash'); // ICE candidates (same for both audio and video in this case) [audio.candidates, video.candidates].forEach(function (cs, i) { var str = (i === 0) ? 'audio ' : 'video '; var port = (i === 0) ? 54400 : 55400; t.equal(cs.length, 4, str + 'got 4 candidates'); t.equal(cs[0].foundation, 0, str + 'ice candidate 0 foundation'); t.equal(cs[0].component, 1, str + 'ice candidate 0 component'); t.equal(cs[0].transport, 'UDP', str + 'ice candidate 0 transport'); t.equal(cs[0].priority, 2113667327, str + 'ice candidate 0 priority'); t.equal(cs[0].ip, '203.0.113.1', str + 'ice candidate 0 ip'); t.equal(cs[0].port, port, str + 'ice candidate 0 port'); t.equal(cs[0].type, 'host', str + 'ice candidate 0 type'); t.equal(cs[1].foundation, 1, str + 'ice candidate 1 foundation'); t.equal(cs[1].component, 2, str + 'ice candidate 1 component'); t.equal(cs[1].transport, 'UDP', str + 'ice candidate 1 transport'); t.equal(cs[1].priority, 2113667326, str + 'ice candidate 1 priority'); t.equal(cs[1].ip, '203.0.113.1', str + 'ice candidate 1 ip'); t.equal(cs[1].port, port+1, str + 'ice candidate 1 port'); t.equal(cs[1].type, 'host', str + 'ice candidate 1 type'); t.equal(cs[2].foundation, 2, str + 'ice candidate 2 foundation'); t.equal(cs[2].component, 1, str + 'ice candidate 2 component'); t.equal(cs[2].transport, 'UDP', str + 'ice candidate 2 transport'); t.equal(cs[2].priority, 1686052607, str + 'ice candidate 2 priority'); t.equal(cs[2].ip, '203.0.113.1', str + 'ice candidate 2 ip'); t.equal(cs[2].port, port+2, str + 'ice candidate 2 port'); t.equal(cs[2].type, 'srflx', str + 'ice candidate 2 type'); t.equal(cs[2].raddr, '192.168.1.145', str + 'ice candidate 2 raddr'); t.equal(cs[2].rport, port+2, str + 'ice candidate 2 rport'); t.equal(cs[2].generation, 0, str + 'ice candidate 2 generation'); t.equal(cs[2]['network-id'], 3, str + 'ice candidate 2 network-id'); t.equal(cs[2]['network-cost'], (i === 0 ? 10 : undefined), str + 'ice candidate 2 network-cost'); t.equal(cs[3].foundation, 3, str + 'ice candidate 3 foundation'); t.equal(cs[3].component, 2, str + 'ice candidate 3 component'); t.equal(cs[3].transport, 'UDP', str + 'ice candidate 3 transport'); t.equal(cs[3].priority, 1686052606, str + 'ice candidate 3 priority'); t.equal(cs[3].ip, '203.0.113.1', str + 'ice candidate 3 ip'); t.equal(cs[3].port, port+3, str + 'ice candidate 3 port'); t.equal(cs[3].type, 'srflx', str + 'ice candidate 3 type'); t.equal(cs[3].raddr, '192.168.1.145', str + 'ice candidate 3 raddr'); t.equal(cs[3].rport, port+3, str + 'ice candidate 3 rport'); t.equal(cs[3].generation, 0, str + 'ice candidate 3 generation'); t.equal(cs[3]['network-id'], 3, str + 'ice candidate 3 network-id'); t.equal(cs[3]['network-cost'], (i === 0 ? 10 : undefined), str + 'ice candidate 3 network-cost'); }); t.equal(media.length, 2, 'got 2 m-lines'); }); /* * Test for an sdp that started out as something from chrome * it's since been hacked to include tests for other stuff * ignore the name */ test('hackySdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/hacky.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); t.equal(session.origin.sessionId, '3710604898417546434', 'origin sessionId'); t.ok(session.groups, 'parsing session groups'); t.equal(session.groups.length, 1, 'one grouping'); t.equal(session.groups[0].type, 'BUNDLE', 'grouping is BUNDLE'); t.equal(session.groups[0].mids, 'audio video', 'bundling audio video'); t.ok(session.msidSemantic, 'have an msid semantic'); t.equal(session.msidSemantic.semantic, 'WMS', 'webrtc semantic'); t.equal(session.msidSemantic.token, 'Jvlam5X3SX1OP6pn20zWogvaKJz5Hjf9OnlV', 'semantic token'); // verify a=rtcp:65179 IN IP4 193.84.77.194 t.equal(media[0].rtcp.port, 1, 'rtcp port'); t.equal(media[0].rtcp.netType, 'IN', 'rtcp netType'); t.equal(media[0].rtcp.ipVer, 4, 'rtcp ipVer'); t.equal(media[0].rtcp.address, '0.0.0.0', 'rtcp address'); // verify ice tcp types t.equal(media[0].candidates[0].tcptype, undefined, 'no tcptype'); t.equal(media[0].candidates[1].tcptype, 'active', 'active tcptype'); t.equal(media[0].candidates[1].transport, 'tcp', 'tcp transport'); t.equal(media[0].candidates[1].generation, 0, 'generation 0'); t.equal(media[0].candidates[1].type, 'host', 'tcp host'); t.equal(media[0].candidates[2].generation, undefined, 'no generation'); t.equal(media[0].candidates[2].type, 'host', 'tcp host'); t.equal(media[0].candidates[2].tcptype, 'active', 'active tcptype'); t.equal(media[0].candidates[3].tcptype, 'passive', 'passive tcptype'); t.equal(media[0].candidates[4].tcptype, 'so', 'so tcptype'); // raddr + rport + tcptype + generation t.equal(media[0].candidates[5].type, 'srflx', 'tcp srflx'); t.equal(media[0].candidates[5].rport, 9, 'tcp rport'); t.equal(media[0].candidates[5].raddr, '10.0.1.1', 'tcp raddr'); t.equal(media[0].candidates[5].tcptype, 'active', 'active tcptype'); t.equal(media[0].candidates[6].tcptype, 'passive', 'passive tcptype'); t.equal(media[0].candidates[6].rport, 8998, 'tcp rport'); t.equal(media[0].candidates[6].raddr, '10.0.1.1', 'tcp raddr'); t.equal(media[0].candidates[6].generation, 5, 'tcp generation'); // and verify it works without specifying the ip t.equal(media[1].rtcp.port, 12312, 'rtcp port'); t.equal(media[1].rtcp.netType, undefined, 'rtcp netType'); t.equal(media[1].rtcp.ipVer, undefined, 'rtcp ipVer'); t.equal(media[1].rtcp.address, undefined, 'rtcp address'); // verify a=rtpmap:126 telephone-event/8000 var lastRtp = media[0].rtp.length-1; t.equal(media[0].rtp[lastRtp].codec, 'telephone-event', 'dtmf codec'); t.equal(media[0].rtp[lastRtp].rate, 8000, 'dtmf rate'); t.equal(media[0].iceOptions, 'google-ice', 'ice options parsed'); t.equal(media[0].ptime, 0.125, 'audio packet duration'); t.equal(media[0].maxptime, 60, 'maxptime parsed'); t.equal(media[0].rtcpMux, 'rtcp-mux', 'rtcp-mux present'); t.equal(media[0].rtp[0].codec, 'opus', 'audio rtp 0 codec'); t.equal(media[0].rtp[0].encoding, 2, 'audio rtp 0 encoding'); t.ok(media[0].ssrcs, 'have ssrc lines'); t.equal(media[0].ssrcs.length, 4, 'got 4 ssrc lines'); var ssrcs = media[0].ssrcs; t.deepEqual(ssrcs[0], { id: 2754920552, attribute: 'cname', value: 't9YU8M1UxTF8Y1A1' }, '1st ssrc line'); t.deepEqual(ssrcs[1], { id: 2754920552, attribute: 'msid', value: 'Jvlam5X3SX1OP6pn20zWogvaKJz5Hjf9OnlV Jvlam5X3SX1OP6pn20zWogvaKJz5Hjf9OnlVa0' }, '2nd ssrc line'); t.deepEqual(ssrcs[2], { id: 2754920552, attribute: 'mslabel', value: 'Jvlam5X3SX1OP6pn20zWogvaKJz5Hjf9OnlV' }, '3rd ssrc line'); t.deepEqual(ssrcs[3], { id: 2754920552, attribute: 'label', value: 'Jvlam5X3SX1OP6pn20zWogvaKJz5Hjf9OnlVa0' }, '4th ssrc line'); // verify a=sctpmap:5000 webrtc-datachannel 1024 t.ok(media[2].sctpmap, 'we have sctpmap'); t.equal(media[2].sctpmap.sctpmapNumber, 5000, 'sctpmap number is 5000'); t.equal(media[2].sctpmap.app, 'webrtc-datachannel', 'sctpmap app is webrtc-datachannel'); t.equal(media[2].sctpmap.maxMessageSize, 1024, 'sctpmap maxMessageSize is 1024'); // verify a=framerate:29.97 t.ok(media[2].framerate, 'we have framerate'); t.equal(media[2].framerate, 29.97, 'framerate is 29.97'); // verify a=label:1 t.ok(media[0].label, 'we have label'); t.equal(media[0].label, 1, 'label is 1'); }); test('iceliteSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/icelite.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); t.equal(session.icelite, 'ice-lite', 'icelite parsed'); var rew = write(session); t.ok(rew.indexOf('a=ice-lite\r\n') >= 0, 'got ice-lite'); t.ok(rew.indexOf('m=') > rew.indexOf('a=ice-lite'), 'session level icelite'); }); test('invalidSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/invalid.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); // verify a=rtcp:65179 IN IP4 193.84.77.194 t.equal(media[0].rtcp.port, 1, 'rtcp port'); t.equal(media[0].rtcp.netType, 'IN', 'rtcp netType'); t.equal(media[0].rtcp.ipVer, 7, 'rtcp ipVer'); t.equal(media[0].rtcp.address, 'X', 'rtcp address'); t.equal(media[0].invalid.length, 1, 'found exactly 1 invalid line'); // f= lost t.equal(media[0].invalid[0].value, 'goo:hithere', 'copied verbatim'); }); test('jssipSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/jssip.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); var audio = media[0]; var audCands = audio.candidates; t.equal(audCands.length, 6, '6 candidates'); // testing ice optionals: t.deepEqual(audCands[0], { foundation: 1162875081, component: 1, transport: 'udp', priority: 2113937151, ip: '192.168.34.75', port: 60017, type: 'host', generation: 0, }, 'audio candidate 0' ); t.deepEqual(audCands[2], { foundation: 3289912957, component: 1, transport: 'udp', priority: 1845501695, ip: '193.84.77.194', port: 60017, type: 'srflx', raddr: '192.168.34.75', rport: 60017, generation: 0, }, 'audio candidate 2 (raddr rport)' ); t.deepEqual(audCands[4], { foundation: 198437945, component: 1, transport: 'tcp', priority: 1509957375, ip: '192.168.34.75', port: 0, type: 'host', generation: 0 }, 'audio candidate 4 (tcp)' ); }); test('jsepSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/jsep.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length === 2, 'got media'); var video = media[1]; t.equal(video.ssrcGroups.length, 1, '1 ssrc grouping'); t.deepEqual(video.ssrcGroups[0], { semantics: 'FID', ssrcs: '1366781083 1366781084' }, 'ssrc-group' ); t.equal(video.msid, '61317484-2ed4-49d7-9eb7-1414322a7aae f30bdb4a-5db8-49b5-bcdc-e0c9a23172e0' , 'msid' ); t.ok(video.rtcpRsize, 'rtcp-rsize present'); t.ok(video.bundleOnly, 'bundle-only present'); // video contains 'a=end-of-candidates' // we want to ensure this comes after the candidate lines // so this is the only place we actually test the writer in here t.ok(video.endOfCandidates, 'have end of candidates marker'); var rewritten = write(session).split('\r\n'); var idx = rewritten.indexOf('a=end-of-candidates'); t.equal(rewritten[idx-1].slice(0, 11), 'a=candidate', 'marker after candidate'); }); test('alacSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/alac.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); var audio = media[0]; t.equal(audio.type, 'audio', 'audio type'); t.equal(audio.protocol, 'RTP/AVP', 'audio protocol'); t.equal(audio.fmtp[0].payload, 96, 'audio fmtp 0 payload'); t.equal(audio.fmtp[0].config, '352 0 16 40 10 14 2 255 0 0 44100', 'audio fmtp 0 config'); t.equal(audio.rtp[0].payload, 96, 'audio rtp 0 payload'); t.equal(audio.rtp[0].codec, 'AppleLossless', 'audio rtp 0 codec'); t.equal(audio.rtp[0].rate, undefined, 'audio rtp 0 rate'); t.equal(audio.rtp[0].encoding, undefined, 'audio rtp 0 encoding'); }); test('onvifSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/onvif.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); var audio = media[0]; t.equal(audio.type, 'audio', 'audio type'); t.equal(audio.port, 0, 'audio port'); t.equal(audio.protocol, 'RTP/AVP', 'audio protocol'); t.equal(audio.control, 'rtsp://example.com/onvif_camera/audio', 'audio control'); t.equal(audio.payloads, 0, 'audio payloads'); var video = media[1]; t.equal(video.type, 'video', 'video type'); t.equal(video.port, 0, 'video port'); t.equal(video.protocol, 'RTP/AVP', 'video protocol'); t.equal(video.control, 'rtsp://example.com/onvif_camera/video', 'video control'); t.equal(video.payloads, 26, 'video payloads'); var application = media[2]; t.equal(application.type, 'application', 'application type'); t.equal(application.port, 0, 'application port'); t.equal(application.protocol, 'RTP/AVP', 'application protocol'); t.equal(application.control, 'rtsp://example.com/onvif_camera/metadata', 'application control'); t.equal(application.payloads, 107, 'application payloads'); t.equal(application.direction, 'recvonly', 'application direction'); t.equal(application.rtp[0].payload, 107, 'application rtp 0 payload'); t.equal(application.rtp[0].codec, 'vnd.onvif.metadata', 'application rtp 0 codec'); t.equal(application.rtp[0].rate, 90000, 'application rtp 0 rate'); t.equal(application.rtp[0].encoding, undefined, 'application rtp 0 encoding'); }); test('ssrcSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/ssrc.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); var video = media[1]; t.equal(video.ssrcGroups.length, 2, 'video got 2 ssrc-group lines'); var expectedSsrc = [ { semantics: 'FID', ssrcs: '3004364195 1126032854' }, { semantics: 'FEC-FR', ssrcs: '3004364195 1080772241' } ]; t.deepEqual(video.ssrcGroups, expectedSsrc, 'video ssrc-group obj'); }); test('simulcastSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/simulcast.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); var video = media[1]; t.equal(video.type, 'video', 'video type'); // test rid lines t.equal(video.rids.length, 5, 'video got 5 rid lines'); // test rid 1 t.deepEqual(video.rids[0], { id: 1, direction: 'send', params: 'pt=97;max-width=1280;max-height=720;max-fps=30' }, 'video 1st rid line'); // test rid 2 t.deepEqual(video.rids[1], { id: 2, direction: 'send', params: 'pt=98' }, 'video 2nd rid line'); // test rid 3 t.deepEqual(video.rids[2], { id: 3, direction: 'send', params: 'pt=99' }, 'video 3rd rid line'); // test rid 4 t.deepEqual(video.rids[3], { id: 4, direction: 'send', params: 'pt=100' }, 'video 4th rid line'); // test rid 5 t.deepEqual(video.rids[4], { id: 'c', direction: 'recv', params: 'pt=97' }, 'video 5th rid line'); // test rid 1 params var rid1Params = parseParams(video.rids[0].params); t.deepEqual(rid1Params, { 'pt': 97, 'max-width': 1280, 'max-height': 720, 'max-fps': 30 }, 'video 1st rid params'); // test rid 2 params var rid2Params = parseParams(video.rids[1].params); t.deepEqual(rid2Params, { 'pt': 98 }, 'video 2nd rid params'); // test rid 3 params var rid3Params = parseParams(video.rids[2].params); t.deepEqual(rid3Params, { 'pt': 99 }, 'video 3rd rid params'); // test rid 4 params var rid4Params = parseParams(video.rids[3].params); t.deepEqual(rid4Params, { 'pt': 100 }, 'video 4th rid params'); // test rid 5 params var rid5Params = parseParams(video.rids[4].params); t.deepEqual(rid5Params, { 'pt': 97 }, 'video 5th rid params'); // test imageattr lines t.equal(video.imageattrs.length, 5, 'video got 5 imageattr lines'); // test imageattr 1 t.deepEqual(video.imageattrs[0], { pt: 97, dir1: 'send', attrs1: '[x=1280,y=720]', dir2: 'recv', attrs2: '[x=1280,y=720] [x=320,y=180] [x=160,y=90]' }, 'video 1st imageattr line'); // test imageattr 2 t.deepEqual(video.imageattrs[1], { pt: 98, dir1: 'send', attrs1: '[x=320,y=180]' }, 'video 2nd imageattr line'); // test imageattr 3 t.deepEqual(video.imageattrs[2], { pt: 99, dir1: 'send', attrs1: '[x=160,y=90]' }, 'video 3rd imageattr line'); // test imageattr 4 t.deepEqual(video.imageattrs[3], { pt: 100, dir1: 'recv', attrs1: '[x=1280,y=720] [x=320,y=180]', dir2: 'send', attrs2: '[x=1280,y=720]' }, 'video 4th imageattr line'); // test imageattr 5 t.deepEqual(video.imageattrs[4], { pt: '*', dir1: 'recv', attrs1: '*' }, 'video 5th imageattr line'); // test imageattr 1 send params var imageattr1SendParams = parseImageAttributes(video.imageattrs[0].attrs1); t.deepEqual(imageattr1SendParams, [ {'x': 1280, 'y': 720} ], 'video 1st imageattr send params'); // test imageattr 1 recv params var imageattr1RecvParams = parseImageAttributes(video.imageattrs[0].attrs2); t.deepEqual(imageattr1RecvParams, [ {'x': 1280, 'y': 720}, {'x': 320, 'y': 180}, {'x': 160, 'y': 90}, ], 'video 1st imageattr recv params'); // test imageattr 2 send params var imageattr2SendParams = parseImageAttributes(video.imageattrs[1].attrs1); t.deepEqual(imageattr2SendParams, [ {'x': 320, 'y': 180} ], 'video 2nd imageattr send params'); // test imageattr 3 send params var imageattr3SendParams = parseImageAttributes(video.imageattrs[2].attrs1); t.deepEqual(imageattr3SendParams, [ {'x': 160, 'y': 90} ], 'video 3rd imageattr send params'); // test imageattr 4 recv params var imageattr4RecvParams = parseImageAttributes(video.imageattrs[3].attrs1); t.deepEqual(imageattr4RecvParams, [ {'x': 1280, 'y': 720}, {'x': 320, 'y': 180}, ], 'video 4th imageattr recv params'); // test imageattr 4 send params var imageattr4SendParams = parseImageAttributes(video.imageattrs[3].attrs2); t.deepEqual(imageattr4SendParams, [ {'x': 1280, 'y': 720} ], 'video 4th imageattr send params'); // test imageattr 5 recv params t.equal(video.imageattrs[4].attrs1, '*', 'video 5th imageattr recv params'); // test simulcast line t.deepEqual(video.simulcast, { dir1: 'send', list1: '1,~4;2;3', dir2: 'recv', list2: 'c' }, 'video simulcast line'); // test simulcast send streams var simulcastSendStreams = parseSimulcastStreamList(video.simulcast.list1); t.deepEqual(simulcastSendStreams, [ [ {scid: 1, paused: false}, {scid: 4, paused: true} ], [ {scid: 2, paused: false} ], [ {scid: 3, paused: false} ] ], 'video simulcast send streams'); // test simulcast recv streams var simulcastRecvStreams = parseSimulcastStreamList(video.simulcast.list2); t.deepEqual(simulcastRecvStreams, [ [ {scid: 'c', paused: false} ] ], 'video simulcast recv streams'); // test simulcast version 03 line // test simulcast line t.deepEqual(video.simulcast_03, { value: 'send rid=1,4;2;3 paused=4 recv rid=c' }, 'video simulcast draft 03 line'); }); test('ST2022-6', function *(t) { var sdp = yield fs.readFile(__dirname + '/st2022-6.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); var video = media[0]; var sourceFilter = video.sourceFilter; t.equal(sourceFilter.filterMode, 'incl', 'filter-mode is "incl"'); t.equal(sourceFilter.netType, 'IN', 'nettype is "IN"'); t.equal(sourceFilter.addressTypes, 'IP4', 'address-type is "IP4"'); t.equal(sourceFilter.destAddress, '239.0.0.1', 'dest-address is "239.0.0.1"'); t.equal(sourceFilter.srcList, '192.168.20.20', 'src-list is "192.168.20.20"'); }); test('ST2110-20', function* (t) { var sdp = yield fs.readFile(__dirname + '/st2110-20.sdp', 'utf8'); var session = parse(sdp + ''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); var video = media[0]; var sourceFilter = video.sourceFilter; t.equal(sourceFilter.filterMode, 'incl', 'filter-mode is "incl"'); t.equal(sourceFilter.netType, 'IN', 'nettype is "IN"'); t.equal(sourceFilter.addressTypes, 'IP4', 'address-type is "IP4"'); t.equal(sourceFilter.destAddress, '239.100.9.10', 'dest-address is "239.100.9.10"'); t.equal(sourceFilter.srcList, '192.168.100.2', 'src-list is "192.168.100.2"'); t.equal(video.type, 'video', 'video type'); var fmtp0Params = parseParams(video.fmtp[0].config); t.deepEqual(fmtp0Params, { sampling: 'YCbCr-4:2:2', width: 1280, height: 720, interlace: undefined, exactframerate: '60000/1001', depth: 10, TCS: 'SDR', colorimetry: 'BT709', PM: '2110GPM', SSN: 'ST2110-20:2017' }, 'video 5th rid params'); }); test('SCTP-DTLS-26', function* (t) { var sdp = yield fs.readFile(__dirname + '/sctp-dtls-26.sdp', 'utf8'); var session = parse(sdp + ''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); t.equal(session.origin.sessionId, '5636137646675714991', 'origin sessionId'); t.ok(session.groups, 'parsing session groups'); t.equal(session.groups.length, 1, 'one grouping'); t.equal(session.groups[0].type, 'BUNDLE', 'grouping is BUNDLE'); t.equal(session.groups[0].mids, 'data', 'bundling data'); t.ok(session.msidSemantic, 'have an msid semantic'); t.equal(session.msidSemantic.semantic, 'WMS', 'webrtc semantic'); // verify media is data application t.equal(media[0].type, 'application', 'media type application'); t.equal(media[0].mid, 'data', 'media id pplication'); // verify protocol and ports t.equal(media[0].protocol, 'UDP/DTLS/SCTP', 'protocol is UDP/DTLS/SCTP'); t.equal(media[0].port, 9, 'the UDP port value is 9'); t.equal(media[0].sctpPort, 5000, 'the offerer/answer SCTP port value is 5000'); // verify maxMessageSize t.equal(media[0].maxMessageSize, 10000, 'maximum message size is 10000'); }); test('extmapEncryptSdp', function *(t) { var sdp = yield fs.readFile(__dirname + '/extmap-encrypt.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length > 0, 'got media'); t.equal(session.origin.username, '-', 'origin username'); t.equal(session.origin.sessionId, 20518, 'origin sessionId'); t.equal(session.origin.sessionVersion, 0, 'origin sessionVersion'); t.equal(session.origin.netType, 'IN', 'origin netType'); t.equal(session.origin.ipVer, 4, 'origin ipVer'); t.equal(session.origin.address, '203.0.113.1', 'origin address'); t.equal(session.connection.ip, '203.0.113.1', 'session connect ip'); t.equal(session.connection.version, 4, 'session connect ip ver'); var audio = media[0]; t.equal(audio.type, 'audio', 'audio type'); t.equal(audio.port, 54400, 'audio port'); t.equal(audio.protocol, 'RTP/SAVPF', 'audio protocol'); t.equal(audio.rtp[0].payload, 96, 'audio rtp 0 payload'); t.equal(audio.rtp[0].codec, 'opus', 'audio rtp 0 codec'); t.equal(audio.rtp[0].rate, 48000, 'audio rtp 0 rate'); // extmap and encrypted extmap t.deepEqual(audio.ext[0], { value: 1, direction: 'sendonly', uri: 'URI-toffset' }, 'audio extension 0'); t.deepEqual(audio.ext[1], { value: 2, uri: 'urn:ietf:params:rtp-hdrext:toffset' }, 'audio extension 1'); t.deepEqual(audio.ext[2], { value: 3, 'encrypt-uri': 'urn:ietf:params:rtp-hdrext:encrypt', uri: 'urn:ietf:params:rtp-hdrext:smpte-tc', config: '25@600/24' }, 'audio extension 2'); t.deepEqual(audio.ext[3], { value: 4, direction: 'recvonly', 'encrypt-uri': 'urn:ietf:params:rtp-hdrext:encrypt', uri: 'URI-gps-string' }, 'audio extension 3'); t.equal(media.length, 1, 'got 1 m-lines'); }); test('dante-aes67', function *(t) { var sdp = yield fs.readFile(__dirname + '/dante-aes67.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 1, 'got single media'); t.equal(session.origin.username, '-', 'origin username'); t.equal(session.origin.sessionId, 1423986, 'origin sessionId'); t.equal(session.origin.sessionVersion, 1423994, 'origin sessionVersion'); t.equal(session.origin.netType, 'IN', 'origin netType'); t.equal(session.origin.ipVer, 4, 'origin ipVer'); t.equal(session.origin.address, '169.254.98.63', 'origin address'); t.equal(session.name, 'AOIP44-serial-1614 : 2', 'Session Name'); t.equal(session.keywords, 'Dante', 'Keywords'); t.equal(session.connection.ip, '239.65.125.63/32', 'session connect ip'); t.equal(session.connection.version, 4, 'session connect ip ver'); var audio = media[0]; t.equal(audio.type, 'audio', 'audio type'); t.equal(audio.port, 5004, 'audio port'); t.equal(audio.protocol, 'RTP/AVP', 'audio protocol'); t.equal(audio.direction, 'recvonly', 'audio direction'); t.equal(audio.description, '2 channels: TxChan 0, TxChan 1', 'audio description'); t.equal(audio.ptime, 1, 'audio packet duration'); t.equal(audio.rtp[0].payload, 97, 'audio rtp payload type'); t.equal(audio.rtp[0].codec, 'L24', 'audio rtp codec'); t.equal(audio.rtp[0].rate, 48000, 'audio sample rate'); t.equal(audio.rtp[0].encoding, 2, 'audio channels'); }); test('bfcp', function *(t) { var sdp = yield fs.readFile(__dirname + '/bfcp.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 4, 'got 4 media'); t.equal(session.origin.username, '-', 'origin username'); var audio = media[0]; t.equal(audio.type, 'audio', 'audio type'); var video = media[1]; t.equal(video.type, 'video', 'main video type'); t.equal(video.direction, 'sendrecv', 'main video direction'); t.equal(video.content, 'main', 'main video content'); t.equal(video.label, 1, 'main video label'); var app = media[2]; t.equal(app.type, 'application', 'application type'); t.equal(app.port, 3238, 'application port'); t.equal(app.protocol, 'UDP/BFCP', 'bfcp protocol'); t.equal(app.payloads, '*', 'bfcp payloads'); t.equal(app.connectionType, 'new', 'connection type'); t.equal(app.bfcpFloorCtrl, 's-only', 'bfcp Floor Control'); t.equal(app.bfcpConfId, 1, 'bfcp ConfId'); t.equal(app.bfcpUserId, 1, 'bfcp UserId'); t.equal(app.bfcpFloorId.id, 1, 'bfcp FloorId'); t.equal(app.bfcpFloorId.mStream, 3, 'bfcp Floor Stream'); var video2 = media[3]; t.equal(video2.type, 'video', '2nd video type'); t.equal(video2.direction, 'sendrecv', '2nd video direction'); t.equal(video2.content, 'slides', '2nd video content'); t.equal(video2.label, 3, '2nd video label'); }); test('tcp-active', function *(t) { var sdp = yield fs.readFile(__dirname + '/tcp-active.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 1, 'got single media'); t.equal(session.origin.username, '-', 'origin username'); t.equal(session.origin.sessionId, 1562876543, 'origin sessionId'); t.equal(session.origin.sessionVersion, 11, 'origin sessionVersion'); t.equal(session.origin.netType, 'IN', 'origin netType'); t.equal(session.origin.ipVer, 4, 'origin ipVer'); t.equal(session.origin.address, '192.0.2.3', 'origin address'); var image = media[0]; t.equal(image.type, 'image', 'image type'); t.equal(image.port, 9, 'port'); t.equal(image.connection.version, 4, 'Connection is IPv4'); t.equal(image.connection.ip, '192.0.2.3', 'Connection address'); t.equal(image.protocol, 'TCP', 'TCP protocol'); t.equal(image.payloads, 't38', 'TCP payload'); t.equal(image.setup, 'active', 'setup active'); t.equal(image.connectionType, 'new', 'new connection'); }); test('tcp-passive', function *(t) { var sdp = yield fs.readFile(__dirname + '/tcp-passive.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 1, 'got single media'); t.equal(session.origin.username, '-', 'origin username'); t.equal(session.origin.sessionId, 1562876543, 'origin sessionId'); t.equal(session.origin.sessionVersion, 11, 'origin sessionVersion'); t.equal(session.origin.netType, 'IN', 'origin netType'); t.equal(session.origin.ipVer, 4, 'origin ipVer'); t.equal(session.origin.address, '192.0.2.2', 'origin address'); var image = media[0]; t.equal(image.type, 'image', 'image type'); t.equal(image.port, 54111, 'port'); t.equal(image.connection.version, 4, 'Connection is IPv4'); t.equal(image.connection.ip, '192.0.2.2', 'Connection address'); t.equal(image.protocol, 'TCP', 'TCP protocol'); t.equal(image.payloads, 't38', 'TCP payload'); t.equal(image.setup, 'passive', 'setup passive'); t.equal(image.connectionType, 'existing', 'existing connection'); }); test('mediaclk-avbtp', function *(t) { var sdp = yield fs.readFile(__dirname + '/mediaclk-avbtp.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 1, 'got single media'); var audio = media[0]; t.equal(audio.mediaClk.mediaClockName, 'IEEE1722', 'IEEE1722 Media Clock'); t.equal(audio.mediaClk.mediaClockValue, '38-D6-6D-8E-D2-78-13-2F', 'AVB stream ID'); }); test('mediaclk-ptp-v2-w-rate', function *(t) { var sdp = yield fs.readFile(__dirname + '/mediaclk-ptp-v2-w-rate.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 1, 'got single media'); var audio = media[0]; t.equal(audio.mediaClk.mediaClockName, 'direct', 'Direct Media Clock'); t.equal(audio.mediaClk.mediaClockValue, 963214424, 'offset'); t.equal(audio.mediaClk.rateNumerator, 1000, 'rate numerator'); t.equal(audio.mediaClk.rateDenominator, 1001, 'rate denominator'); }); test('mediaclk-ptp-v2', function *(t) { var sdp = yield fs.readFile(__dirname + '/mediaclk-ptp-v2.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 1, 'got single media'); var audio = media[0]; t.equal(audio.mediaClk.mediaClockName, 'direct', 'Direct Media Clock'); t.equal(audio.mediaClk.mediaClockValue, 963214424, 'offset'); }); test('mediaclk-rtp', function *(t) { var sdp = yield fs.readFile(__dirname + '/mediaclk-rtp.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var media = session.media; t.ok(media && media.length == 1, 'got single media'); var audio = media[0]; t.equal(audio.mediaClk.id, 'MDA6NjA6MmI6MjA6MTI6MWY=', 'Media Clock ID'); t.equal(audio.mediaClk.mediaClockName, 'sender', 'sender type'); }); test('ts-refclk-media', function *(t) { var sdp = yield fs.readFile(__dirname + '/ts-refclk-media.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var sessTsRefClocks = session.tsRefClocks; t.ok(sessTsRefClocks && sessTsRefClocks.length == 1, 'got one TS Ref Clock'); t.equal(sessTsRefClocks[0].clksrc, 'local', 'local Clock Source at Session Level'); var media = session.media; t.ok(media && media.length == 2, 'got two media'); var audio = media[0]; var audTsRefClocks = audio.tsRefClocks; t.ok(audTsRefClocks && audTsRefClocks.length == 2, 'got two audio TS Ref Clocks'); var audTsRefClock1 = audTsRefClocks[0]; t.equal(audTsRefClock1.clksrc, 'ntp', 'NTP Clock Source'); t.equal(audTsRefClock1.clksrcExt, '203.0.113.10', 'IPv4 address'); var audTsRefClock2 = audTsRefClocks[1]; t.equal(audTsRefClock2.clksrc, 'ntp', 'NTP Clock Source'); t.equal(audTsRefClock2.clksrcExt, '198.51.100.22', 'IPv4 address'); var video = media[1]; var vidTsRefClocks = video.tsRefClocks; t.ok(vidTsRefClocks && vidTsRefClocks.length == 1, 'got one video TS Ref Clocks'); t.equal(vidTsRefClocks[0].clksrc, 'ptp', 'PTP Clock Source'); t.equal(vidTsRefClocks[0].clksrcExt, 'IEEE802.1AS-2011:39-A7-94-FF-FE-07-CB-D0', 'PTP config'); }); test('ts-refclk-sess', function *(t) { var sdp = yield fs.readFile(__dirname + '/ts-refclk-sess.sdp', 'utf8'); var session = parse(sdp+''); t.ok(session, 'got session info'); var sessTsRefClocks = session.tsRefClocks; t.ok(sessTsRefClocks && sessTsRefClocks.length == 1, 'got one TS Ref Clock at Session Level'); t.equal(sessTsRefClocks[0].clksrc, 'ntp', 'NTP Clock Source'); t.equal(sessTsRefClocks[0].clksrcExt, '/traceable/', 'traceable Clock Source'); });<|fim▁end|>
<|file_name|>dataset.py<|end_file_name|><|fim▁begin|>import string, copy def joinHeaders(first, second, joined, on): joined.headers = first.headers[:] mappedHeaders = {} for header in second.headers: if header == on: continue i = 0 newHeader = header while newHeader in first.headers: newHeader = '{0}_{1}'.format(newHeader, i) i += 1 if i > 0: mappedHeaders[header] = newHeader joined.headers.append(newHeader) return mappedHeaders def mergeRow(row, toMerge, mappedHeaders): for header in toMerge: if header in mappedHeaders: row[mappedHeaders[header]] = toMerge[header] else: row[header] = toMerge[header] def mergeRows(first, second, joined, on, mappedHeaders): joined.rows = copy.deepcopy(first.rows) secondRows = copy.deepcopy(second.rows) for secondRow in secondRows: pivot = secondRow[on] for row in joined.rows: if row[on] == pivot: mergeRow(row, secondRow, mappedHeaders) break else: newRow = {} mergeRow(newRow, secondRow, mappedHeaders) joined.rows.append(newRow) class Dataset: def __init__(self, filename = '', separator=',', header=True): self.headers = [] self.rows = [] try: infile = file(filename, 'r') if header: self.headers = infile.readline().strip().split(separator) for line in infile: row = line.strip().split(separator) if not header and not self.headers: self.headers = ["V{0}".format(i) for i in range(len(row))] self.rows.append({self.headers[i]:row[i] for i in range(len(row))}) infile.close() except IOError: pass def export(self, filename): outfile = file(filename, 'w') outfile.write(','.join(self.headers)) for row in self.rows: outfile.write('\n') outfile.write(','.join([row[x] for x in self.headers])) outfile.close() def join(self, other, on): """Join self dataset with another dataset, creating a new dataset. The original datasets remain unchanged. The third argument is the header on which to join""" # check for correct join if not (on in self.headers or on in other.headers): print "Error: header '{0}' not found in both collections".format(on)<|fim▁hole|> # fill new dataset with combined data mappedHeaders = joinHeaders(self, other, joined, on) mergeRows(self, other, joined, on, mappedHeaders) joined.ensureFilled() # return newly created dataset return joined def pivot(self): """Pivot this dataset into a new one, discarding current headers, using first column as new headers""" pivoted = Dataset() for (index, header) in enumerate(self.headers): for row in self.rows: if index == 0: pivoted.headers.append(row[header]) else: if len(pivoted.rows) < index: pivoted.rows.extend([{} for x in range(index - len(pivoted.rows))]) pivoted.rows[index - 1][row[self.headers[0]]] = row[header] return pivoted def ensureFilled(self): for row in self.rows: for header in self.headers: if not header in row: row[header] = None def append(self, other, ensureFilled = True): """Append rows of another dataset to this one, leaving the other dataset unchanged""" self.rows.extend(other.rows) self.headers.extend([x for x in other.headers if not x in self.headers]) if(ensureFilled): self.ensureFilled() return self<|fim▁end|>
return None # create new dataset joined = Dataset()
<|file_name|>order-page.component.spec.ts<|end_file_name|><|fim▁begin|>import { async, ComponentFixture, TestBed } from '@angular/core/testing'; import { DebugElement, NO_ERRORS_SCHEMA } from '@angular/core'; import { NgbModule, NgbModal, ModalDismissReasons } from '@ng-bootstrap/ng-bootstrap'; import { OrderPageComponent } from './order-page.component'; <|fim▁hole|> beforeEach(async(() => { TestBed.configureTestingModule({ imports: [ NgbModule.forRoot() ], declarations: [ OrderPageComponent ], schemas: [ NO_ERRORS_SCHEMA ], providers: [ NgbModal ] }) .compileComponents(); })); beforeEach(() => { fixture = TestBed.createComponent(OrderPageComponent); component = fixture.componentInstance; fixture.detectChanges(); }); it('should create', () => { expect(component).toBeTruthy(); }); });<|fim▁end|>
describe('OrderPageComponent', () => { let component: OrderPageComponent; let fixture: ComponentFixture<OrderPageComponent>;
<|file_name|>create.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys from kmip.core import enums from kmip.demos import utils from kmip.pie import client if __name__ == '__main__': logger = utils.build_console_logger(logging.INFO) # Build and parse arguments parser = utils.build_cli_parser(enums.Operation.CREATE) opts, args = parser.parse_args(sys.argv[1:]) config = opts.config algorithm = opts.algorithm length = opts.length # Exit early if the arguments are not specified if algorithm is None: logger.error('No algorithm provided, exiting early from demo') sys.exit() if length is None: logger.error("No key length provided, exiting early from demo") sys.exit()<|fim▁hole|> algorithm = getattr(enums.CryptographicAlgorithm, algorithm, None) # Build the client and connect to the server with client.ProxyKmipClient(config=config) as client: try: uid = client.create(algorithm, length) logger.info("Successfully created symmetric key with ID: " "{0}".format(uid)) except Exception as e: logger.error(e)<|fim▁end|>
<|file_name|>configparser.py<|end_file_name|><|fim▁begin|><<<<<<< HEAD <<<<<<< HEAD """Configuration file parser. A configuration file consists of sections, lead by a "[section]" header, and followed by "name: value" entries, with continuations and such in the style of RFC 822. Intrinsic defaults can be specified by passing them into the ConfigParser constructor as a dictionary. class: ConfigParser -- responsible for parsing a list of configuration files, and managing the parsed database. methods: __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True): Create the parser. When `defaults' is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. When `dict_type' is given, it will be used to create the dictionary objects for the list of sections, for the options within a section, and for the default values. When `delimiters' is given, it will be used as the set of substrings that divide keys from values. When `comment_prefixes' is given, it will be used as the set of substrings that prefix comments in empty lines. Comments can be indented. When `inline_comment_prefixes' is given, it will be used as the set of substrings that prefix comments in non-empty lines. When `strict` is True, the parser won't allow for any section or option duplicates while reading from a single source (file, string or dictionary). Default is True. When `empty_lines_in_values' is False (default: True), each empty line marks the end of an option. Otherwise, internal empty lines of a multiline option are kept as part of the value. When `allow_no_value' is True (default: False), options without values are accepted; the value presented for these is None. sections() Return all the configuration section names, sans DEFAULT. has_section(section) Return whether the given section exists. has_option(section, option) Return whether the given option exists in the given section. options(section) Return list of configuration options for the named section. read(filenames, encoding=None) Read and parse the list of named configuration files, given by name. A single filename is also allowed. Non-existing files are ignored. Return list of successfully read files. read_file(f, filename=None) Read and parse one configuration file, given as a file object. The filename defaults to f.name; it is only used in error messages (if f has no `name' attribute, the string `<???>' is used). read_string(string) Read configuration from a given string. read_dict(dictionary) Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. Values are automatically converted to strings. get(section, option, raw=False, vars=None, fallback=_UNSET) Return a string value for the named option. All % interpolations are expanded in the return values, based on the defaults passed into the constructor and the DEFAULT section. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents override any pre-existing defaults. If `option' is a key in `vars', the value from `vars' is used. getint(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to an integer. getfloat(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a float. getboolean(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a boolean (currently case insensitively defined as 0, false, no, off for False, and 1, true, yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) If section is given, return a list of tuples with (name, value) for each option in the section. Otherwise, return a list of tuples with (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. remove_option(section, option) Remove the given option from the given section. set(section, option, value) Set the given option. write(fp, space_around_delimiters=True) Write the configuration state in .ini format. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ from collections.abc import MutableMapping from collections import OrderedDict as _default_dict, ChainMap as _ChainMap import functools import io import itertools import re import sys import warnings __all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", "InterpolationSyntaxError", "ParsingError", "MissingSectionHeaderError", "ConfigParser", "SafeConfigParser", "RawConfigParser", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] DEFAULTSECT = "DEFAULT" MAX_INTERPOLATION_DEPTH = 10 # exception classes class Error(Exception): """Base class for ConfigParser exceptions.""" def __init__(self, msg=''): self.message = msg Exception.__init__(self, msg) def __repr__(self): return self.message __str__ = __repr__ class NoSectionError(Error): """Raised when no section matches a requested option.""" def __init__(self, section): Error.__init__(self, 'No section: %r' % (section,)) self.section = section self.args = (section, ) class DuplicateSectionError(Error): """Raised when a section is repeated in an input source. Possible repetitions that raise this exception are: multiple creation using the API or in strict parsers when a section is found more than once in a single input file, string or dictionary. """ def __init__(self, section, source=None, lineno=None): msg = [repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": section ") message.extend(msg) msg = message else: msg.insert(0, "Section ") Error.__init__(self, "".join(msg)) self.section = section self.source = source self.lineno = lineno self.args = (section, source, lineno) class DuplicateOptionError(Error): """Raised by strict parsers when an option is repeated in an input source. Current implementation raises this exception only when an option is found more than once in a single file, string or dictionary. """ def __init__(self, section, option, source=None, lineno=None): msg = [repr(option), " in section ", repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": option ") message.extend(msg) msg = message else: msg.insert(0, "Option ") Error.__init__(self, "".join(msg)) self.section = section self.option = option self.source = source self.lineno = lineno self.args = (section, option, source, lineno) class NoOptionError(Error): """A requested option was not found.""" def __init__(self, option, section): Error.__init__(self, "No option %r in section: %r" % (option, section)) self.option = option self.section = section self.args = (option, section) class InterpolationError(Error): """Base class for interpolation-related exceptions.""" def __init__(self, option, section, msg): Error.__init__(self, msg) self.option = option self.section = section self.args = (option, section, msg) class InterpolationMissingOptionError(InterpolationError): """A string substitution required a setting which was not available.""" def __init__(self, option, section, rawval, reference): msg = ("Bad value substitution:\n" "\tsection: [%s]\n" "\toption : %s\n" "\tkey : %s\n" "\trawval : %s\n" % (section, option, reference, rawval)) InterpolationError.__init__(self, option, section, msg) self.reference = reference self.args = (option, section, rawval, reference) class InterpolationSyntaxError(InterpolationError): """Raised when the source text contains invalid syntax. Current implementation raises this exception when the source text into which substitutions are made does not conform to the required syntax. """ class InterpolationDepthError(InterpolationError): """Raised when substitutions are nested too deeply.""" def __init__(self, option, section, rawval): msg = ("Value interpolation too deeply recursive:\n" "\tsection: [%s]\n" "\toption : %s\n" "\trawval : %s\n" % (section, option, rawval)) InterpolationError.__init__(self, option, section, msg) self.args = (option, section, rawval) class ParsingError(Error): """Raised when a configuration file does not follow legal syntax.""" def __init__(self, source=None, filename=None): # Exactly one of `source'/`filename' arguments has to be given. # `filename' kept for compatibility. if filename and source: raise ValueError("Cannot specify both `filename' and `source'. " "Use `source'.") elif not filename and not source: raise ValueError("Required argument `source' not given.") elif filename: source = filename Error.__init__(self, 'Source contains parsing errors: %r' % source) self.source = source self.errors = [] self.args = (source, ) @property def filename(self): """Deprecated, use `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) return self.source @filename.setter def filename(self, value): """Deprecated, user `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) self.source = value def append(self, lineno, line): self.errors.append((lineno, line)) self.message += '\n\t[line %2d]: %s' % (lineno, line) class MissingSectionHeaderError(ParsingError): """Raised when a key-value pair is found before any section header.""" def __init__(self, filename, lineno, line): Error.__init__( self, 'File contains no section headers.\nfile: %r, line: %d\n%r' % (filename, lineno, line)) self.source = filename self.lineno = lineno self.line = line self.args = (filename, lineno, line) # Used in parser getters to indicate the default behaviour when a specific # option is not found it to raise an exception. Created to enable `None' as # a valid fallback value. _UNSET = object() class Interpolation: """Dummy interpolation that passes the value through with no changes.""" def before_get(self, parser, section, option, value, defaults): return value def before_set(self, parser, section, option, value): return value def before_read(self, parser, section, option, value): return value def before_write(self, parser, section, option, value): return value class BasicInterpolation(Interpolation): """Interpolation as implemented in the classic ConfigParser. The option values can contain format strings which refer to other values in the same section, or values in the special default section. For example: something: %(dir)s/whatever would resolve the "%(dir)s" to the value of dir. All reference expansions are done late, on demand. If a user needs to use a bare % in a configuration file, she can escape it by writing %%. Other % usage is considered a user error and raises `InterpolationSyntaxError'.""" _KEYCRE = re.compile(r"%\(([^)]+)\)s") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('%%', '') # escaped percent signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '%' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('%'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("%") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "%": accum.append("%") rest = rest[2:] elif c == "(": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) var = parser.optionxform(m.group(1)) rest = rest[m.end():] try: v = map[var] except KeyError: raise InterpolationMissingOptionError( option, section, rest, var) if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'%%' must be followed by '%%' or '(', " "found: %r" % (rest,)) class ExtendedInterpolation(Interpolation): """Advanced variant of interpolation, supports the syntax used by `zc.buildout'. Enables interpolation between sections.""" _KEYCRE = re.compile(r"\$\{([^}]+)\}") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('$$', '') # escaped dollar signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '$' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('$'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("$") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "$": accum.append("$") rest = rest[2:] elif c == "{": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) path = m.group(1).split(':') rest = rest[m.end():] sect = section opt = option try: if len(path) == 1: opt = parser.optionxform(path[0]) v = map[opt] elif len(path) == 2: sect = path[0] opt = parser.optionxform(path[1]) v = parser.get(sect, opt, raw=True) else: raise InterpolationSyntaxError( option, section, "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( option, section, rest, ":".join(path)) if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'$' must be followed by '$' or '{', " "found: %r" % (rest,)) class LegacyInterpolation(Interpolation): """Deprecated interpolation used in old versions of ConfigParser. Use BasicInterpolation or ExtendedInterpolation instead.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") def before_get(self, parser, section, option, value, vars): rawval = value depth = MAX_INTERPOLATION_DEPTH while depth: # Loop through this until it's done depth -= 1 if value and "%(" in value: replace = functools.partial(self._interpolation_replace, parser=parser) value = self._KEYCRE.sub(replace, value) try: value = value % vars except KeyError as e: raise InterpolationMissingOptionError( option, section, rawval, e.args[0]) else: break if value and "%(" in value: raise InterpolationDepthError(option, section, rawval) return value def before_set(self, parser, section, option, value): return value @staticmethod def _interpolation_replace(match, parser): s = match.group(1) if s is None: return match.group() else: return "%%(%s)s" % parser.optionxform(s) class RawConfigParser(MutableMapping): """ConfigParser that does not do interpolation.""" # Regular expressions for parsing section headers and options _SECT_TMPL = r""" \[ # [ (?P<header>[^]]+) # very permissive! \] # ] """ _OPT_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?P<vi>{delim})\s* # any number of space/tab, # followed by any of the # allowed delimiters, # followed by any space/tab (?P<value>.*)$ # everything up to eol """ _OPT_NV_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?: # any number of space/tab, (?P<vi>{delim})\s* # optionally followed by # any of the allowed # delimiters, followed by any # space/tab (?P<value>.*))?$ # everything up to eol """ # Interpolation algorithm to be used if the user does not specify another _DEFAULT_INTERPOLATION = Interpolation() # Compiled regular expression for matching sections SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE) # Compiled regular expression for matching options with typical separators OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching options with optional values # delimited using typical separators OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching leading whitespace in a line NONSPACECRE = re.compile(r"\S") # Possible boolean values in the configuration. BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False} def __init__(self, defaults=None, dict_type=_default_dict, allow_no_value=False, *, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=DEFAULTSECT, interpolation=_UNSET): self._dict = dict_type self._sections = self._dict() self._defaults = self._dict() self._proxies = self._dict() self._proxies[default_section] = SectionProxy(self, default_section) if defaults: for key, value in defaults.items(): self._defaults[self.optionxform(key)] = value self._delimiters = tuple(delimiters) if delimiters == ('=', ':'): self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE else: d = "|".join(re.escape(d) for d in delimiters) if allow_no_value: self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d), re.VERBOSE) else: self._optcre = re.compile(self._OPT_TMPL.format(delim=d), re.VERBOSE) self._comment_prefixes = tuple(comment_prefixes or ()) self._inline_comment_prefixes = tuple(inline_comment_prefixes or ()) self._strict = strict self._allow_no_value = allow_no_value self._empty_lines_in_values = empty_lines_in_values self.default_section=default_section self._interpolation = interpolation if self._interpolation is _UNSET: self._interpolation = self._DEFAULT_INTERPOLATION if self._interpolation is None: self._interpolation = Interpolation() def defaults(self): return self._defaults def sections(self): """Return a list of section names, excluding [DEFAULT]""" # self._sections will never have [DEFAULT] in it return list(self._sections.keys()) def add_section(self, section): """Create a new section in the configuration. Raise DuplicateSectionError if a section by the specified name already exists. Raise ValueError if name is DEFAULT. """ if section == self.default_section: raise ValueError('Invalid section name: %r' % section) if section in self._sections: raise DuplicateSectionError(section) self._sections[section] = self._dict() self._proxies[section] = SectionProxy(self, section) def has_section(self, section): """Indicate whether the named section is present in the configuration. The DEFAULT section is not acknowledged. """ return section in self._sections def options(self, section): """Return a list of option names for the given section name.""" try: opts = self._sections[section].copy() except KeyError: raise NoSectionError(section) opts.update(self._defaults) return list(opts.keys()) def read(self, filenames, encoding=None): """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, str): filenames = [filenames] read_ok = [] for filename in filenames: try: with open(filename, encoding=encoding) as fp: self._read(fp, filename) except OSError: continue read_ok.append(filename) return read_ok def read_file(self, f, source=None): """Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `<???>' is used. """ if source is None: try: source = f.name except AttributeError: source = '<???>' self._read(f, source) def read_string(self, string, source='<string>'): """Read configuration from a given string.""" sfile = io.StringIO(string) self.read_file(sfile, source) def read_dict(self, dictionary, source='<dict>'): """Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. All types held in the dictionary are converted to strings during reading, including section names, option names and keys. Optional second argument is the `source' specifying the name of the dictionary being read. """ elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value) def readfp(self, fp, filename=None): """Deprecated, use read_file instead.""" warnings.warn( "This method will be removed in future versions. " "Use 'parser.read_file()' instead.", DeprecationWarning, stacklevel=2 ) self.read_file(fp, source=filename) def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET): """Get an option value for a given section. If `vars' is provided, it must be a dictionary. The option is looked up in `vars' (if provided), `section', and in `DEFAULTSECT' in that order. If the key is not found and `fallback' is provided, it is used as a fallback value. `None' can be provided as a `fallback' value. If interpolation is enabled and the optional argument `raw' is False, all interpolations are expanded in the return values. Arguments `raw', `vars', and `fallback' are keyword only. The section DEFAULT is special. """ try: d = self._unify_values(section, vars) except NoSectionError: if fallback is _UNSET: raise else: return fallback option = self.optionxform(option) try: value = d[option] except KeyError: if fallback is _UNSET: raise NoOptionError(option, section) else: return fallback if raw or value is None: return value else: return self._interpolation.before_get(self, section, option, value, d) def _get(self, section, conv, option, **kwargs): return conv(self.get(section, option, **kwargs)) def getint(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, int, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getfloat(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, float, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getboolean(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, self._convert_to_boolean, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def items(self, section=_UNSET, raw=False, vars=None): """Return a list of (name, value) tuples for each option in a section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw' is true. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special. """ if section is _UNSET: return super().items() d = self._defaults.copy() try: d.update(self._sections[section]) except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables if vars: for key, value in vars.items(): d[self.optionxform(key)] = value value_getter = lambda option: self._interpolation.before_get(self, section, option, d[option], d) if raw: value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] def popitem(self): """Remove a section from the parser and return it as a (section_name, section_proxy) tuple. If no section is present, raise KeyError. The section DEFAULT is never returned because it cannot be removed. """ for key in self.sections(): value = self[key] del self[key] return key, value raise KeyError def optionxform(self, optionstr): return optionstr.lower() def has_option(self, section, option): """Check for the existence of a given option in a given section. If the specified `section' is None or an empty string, DEFAULT is assumed. If the specified `section' does not exist, returns False.""" if not section or section == self.default_section: option = self.optionxform(option) return option in self._defaults elif section not in self._sections: return False else: option = self.optionxform(option) return (option in self._sections[section] or option in self._defaults) def set(self, section, option, value=None): """Set an option.""" if value: value = self._interpolation.before_set(self, section, option, value) if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) sectdict[self.optionxform(option)] = value def write(self, fp, space_around_delimiters=True): """Write an .ini-format representation of the configuration state. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ if space_around_delimiters: d = " {} ".format(self._delimiters[0]) else: d = self._delimiters[0] if self._defaults: self._write_section(fp, self.default_section, self._defaults.items(), d) for section in self._sections: self._write_section(fp, section, self._sections[section].items(), d) def _write_section(self, fp, section_name, section_items, delimiter): """Write a single section to the specified `fp'.""" fp.write("[{}]\n".format(section_name)) for key, value in section_items: value = self._interpolation.before_write(self, section_name, key, value) if value is not None or not self._allow_no_value: value = delimiter + str(value).replace('\n', '\n\t') else: value = "" fp.write("{}{}\n".format(key, value)) fp.write("\n") def remove_option(self, section, option): """Remove an option.""" if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed def remove_section(self, section): """Remove a file section.""" existed = section in self._sections if existed: del self._sections[section] del self._proxies[section] return existed def __getitem__(self, key): if key != self.default_section and not self.has_section(key): raise KeyError(key) return self._proxies[key] def __setitem__(self, key, value): # To conform with the mapping protocol, overwrites existing values in # the section. # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. if key == self.default_section: self._defaults.clear() elif key in self._sections: self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): if key == self.default_section: raise ValueError("Cannot remove the default section.") if not self.has_section(key): raise KeyError(key) self.remove_section(key) def __contains__(self, key): return key == self.default_section or self.has_section(key) def __len__(self): return len(self._sections) + 1 # the default section def __iter__(self): # XXX does it break when underlying container state changed? return itertools.chain((self.default_section,), self._sections.keys()) def _read(self, fp, fpname): """Parse a sectioned configuration file. Each section in a configuration file contains a header, indicated by a name in square brackets (`[]'), plus key/value options, indicated by `name' and `value' delimited with a specific substring (`=' or `:' by default). Values can span multiple lines, as long as they are indented deeper than the first line of the value. Depending on the parser's mode, blank lines may be treated as parts of multiline values or ignored. Configuration files may include comments, prefixed by specific characters (`#' and `;' by default). Comments may appear on their own in an otherwise empty line or may be entered in lines holding values or section names. """ elements_added = set() cursect = None # None, or a dictionary sectname = None optname = None lineno = 0 indent_level = 0 e = None # None, or an exception for lineno, line in enumerate(fp, start=1): comment_start = sys.maxsize # strip inline comments inline_prefixes = {p: -1 for p in self._inline_comment_prefixes} while comment_start == sys.maxsize and inline_prefixes: next_prefixes = {} for prefix, index in inline_prefixes.items(): index = line.find(prefix, index+1) if index == -1: continue next_prefixes[prefix] = index if index == 0 or (index > 0 and line[index-1].isspace()): comment_start = min(comment_start, index) inline_prefixes = next_prefixes # strip full line comments for prefix in self._comment_prefixes: if line.strip().startswith(prefix): comment_start = 0 break if comment_start == sys.maxsize: comment_start = None value = line[:comment_start].strip() if not value: if self._empty_lines_in_values: # add empty line to the value, but only if there was no # comment on the line if (comment_start is None and cursect is not None and optname and cursect[optname] is not None): cursect[optname].append('') # newlines added at join else: # empty line marks end of value indent_level = sys.maxsize continue # continuation line? first_nonspace = self.NONSPACECRE.search(line) cur_indent_level = first_nonspace.start() if first_nonspace else 0 if (cursect is not None and optname and cur_indent_level > indent_level): cursect[optname].append(value) # a section header or option header? else: indent_level = cur_indent_level # is it a section header? mo = self.SECTCRE.match(value) if mo: sectname = mo.group('header') if sectname in self._sections: if self._strict and sectname in elements_added: raise DuplicateSectionError(sectname, fpname, lineno) cursect = self._sections[sectname] elements_added.add(sectname) elif sectname == self.default_section: cursect = self._defaults else: cursect = self._dict() self._sections[sectname] = cursect self._proxies[sectname] = SectionProxy(self, sectname) elements_added.add(sectname) # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self._optcre.match(value) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') if not optname: e = self._handle_error(e, fpname, lineno, line) optname = self.optionxform(optname.rstrip()) if (self._strict and (sectname, optname) in elements_added): raise DuplicateOptionError(sectname, optname, fpname, lineno) elements_added.add((sectname, optname)) # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: optval = optval.strip() cursect[optname] = [optval] else: # valueless option handling cursect[optname] = None else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines e = self._handle_error(e, fpname, lineno, line) # if any parsing errors occurred, raise an exception if e: raise e self._join_multiline_values() def _join_multiline_values(self): defaults = self.default_section, self._defaults all_sections = itertools.chain((defaults,), self._sections.items()) for section, options in all_sections: for name, val in options.items(): if isinstance(val, list): val = '\n'.join(val).rstrip() options[name] = self._interpolation.before_read(self, section, name, val) def _handle_error(self, exc, fpname, lineno, line): if not exc: exc = ParsingError(fpname) exc.append(lineno, repr(line)) return exc def _unify_values(self, section, vars): """Create a sequence of lookups with 'vars' taking priority over the 'section' which takes priority over the DEFAULTSECT. """ sectiondict = {} try: sectiondict = self._sections[section] except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables vardict = {} if vars: for key, value in vars.items(): if value is not None: value = str(value) vardict[self.optionxform(key)] = value return _ChainMap(vardict, sectiondict, self._defaults) def _convert_to_boolean(self, value): """Return a boolean value translating from other types if necessary. """ if value.lower() not in self.BOOLEAN_STATES: raise ValueError('Not a boolean: %s' % value) return self.BOOLEAN_STATES[value.lower()] def _validate_value_types(self, *, section="", option="", value=""): """Raises a TypeError for non-string values. The only legal non-string value if we allow valueless options is None, so we need to check if the value is a string if: - we do not allow valueless options, or - we allow valueless options but the value is not None For compatibility reasons this method is not used in classic set() for RawConfigParsers. It is invoked in every case for mapping protocol access and in ConfigParser.set(). """ if not isinstance(section, str): raise TypeError("section names must be strings") if not isinstance(option, str): raise TypeError("option keys must be strings") if not self._allow_no_value or value: if not isinstance(value, str): raise TypeError("option values must be strings") class ConfigParser(RawConfigParser): """ConfigParser implementing interpolation.""" _DEFAULT_INTERPOLATION = BasicInterpolation() def set(self, section, option, value=None): """Set an option. Extends RawConfigParser.set by validating type and interpolation syntax on the value.""" self._validate_value_types(option=option, value=value) super().set(section, option, value) def add_section(self, section): """Create a new section in the configuration. Extends RawConfigParser.add_section by validating if the section name is a string.""" self._validate_value_types(section=section) super().add_section(section) class SafeConfigParser(ConfigParser): """ConfigParser alias for backwards compatibility purposes.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn( "The SafeConfigParser class has been renamed to ConfigParser " "in Python 3.2. This alias will be removed in future versions." " Use ConfigParser directly instead.", DeprecationWarning, stacklevel=2 ) class SectionProxy(MutableMapping): """A proxy for a single section from a parser.""" def __init__(self, parser, name): """Creates a view on a section of the specified `name` in `parser`.""" self._parser = parser self._name = name def __repr__(self): return '<Section: {}>'.format(self._name) def __getitem__(self, key): if not self._parser.has_option(self._name, key): raise KeyError(key) return self._parser.get(self._name, key) def __setitem__(self, key, value): self._parser._validate_value_types(option=key, value=value) return self._parser.set(self._name, key, value) def __delitem__(self, key): if not (self._parser.has_option(self._name, key) and self._parser.remove_option(self._name, key)): raise KeyError(key) def __contains__(self, key): return self._parser.has_option(self._name, key) def __len__(self): return len(self._options()) def __iter__(self): return self._options().__iter__() def _options(self): if self._name != self._parser.default_section: return self._parser.options(self._name) else: return self._parser.defaults() def get(self, option, fallback=None, *, raw=False, vars=None): return self._parser.get(self._name, option, raw=raw, vars=vars, fallback=fallback) def getint(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getint(self._name, option, raw=raw, vars=vars, fallback=fallback) def getfloat(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getfloat(self._name, option, raw=raw, vars=vars, fallback=fallback) def getboolean(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getboolean(self._name, option, raw=raw, vars=vars, fallback=fallback) @property def parser(self): # The parser object of the proxy is read-only. return self._parser @property def name(self): # The name of the section on a proxy is read-only. return self._name ======= """Configuration file parser. A configuration file consists of sections, lead by a "[section]" header, and followed by "name: value" entries, with continuations and such in the style of RFC 822. Intrinsic defaults can be specified by passing them into the ConfigParser constructor as a dictionary. class: ConfigParser -- responsible for parsing a list of configuration files, and managing the parsed database. methods: __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True): Create the parser. When `defaults' is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. When `dict_type' is given, it will be used to create the dictionary objects for the list of sections, for the options within a section, and for the default values. When `delimiters' is given, it will be used as the set of substrings that divide keys from values. When `comment_prefixes' is given, it will be used as the set of substrings that prefix comments in empty lines. Comments can be indented. When `inline_comment_prefixes' is given, it will be used as the set of substrings that prefix comments in non-empty lines. When `strict` is True, the parser won't allow for any section or option duplicates while reading from a single source (file, string or dictionary). Default is True. When `empty_lines_in_values' is False (default: True), each empty line marks the end of an option. Otherwise, internal empty lines of a multiline option are kept as part of the value. When `allow_no_value' is True (default: False), options without values are accepted; the value presented for these is None. sections() Return all the configuration section names, sans DEFAULT. has_section(section) Return whether the given section exists. has_option(section, option) Return whether the given option exists in the given section. options(section) Return list of configuration options for the named section. read(filenames, encoding=None) Read and parse the list of named configuration files, given by name. A single filename is also allowed. Non-existing files are ignored. Return list of successfully read files. read_file(f, filename=None) Read and parse one configuration file, given as a file object. The filename defaults to f.name; it is only used in error messages (if f has no `name' attribute, the string `<???>' is used). read_string(string) Read configuration from a given string. read_dict(dictionary) Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. Values are automatically converted to strings. get(section, option, raw=False, vars=None, fallback=_UNSET) Return a string value for the named option. All % interpolations are expanded in the return values, based on the defaults passed into the constructor and the DEFAULT section. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents override any pre-existing defaults. If `option' is a key in `vars', the value from `vars' is used. getint(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to an integer. getfloat(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a float. getboolean(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a boolean (currently case insensitively defined as 0, false, no, off for False, and 1, true, yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) If section is given, return a list of tuples with (name, value) for each option in the section. Otherwise, return a list of tuples with (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. remove_option(section, option) Remove the given option from the given section. set(section, option, value) Set the given option. write(fp, space_around_delimiters=True) Write the configuration state in .ini format. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ from collections.abc import MutableMapping from collections import OrderedDict as _default_dict, ChainMap as _ChainMap import functools import io import itertools import re import sys import warnings __all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", "InterpolationSyntaxError", "ParsingError", "MissingSectionHeaderError", "ConfigParser", "SafeConfigParser", "RawConfigParser", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] DEFAULTSECT = "DEFAULT" MAX_INTERPOLATION_DEPTH = 10 # exception classes class Error(Exception): """Base class for ConfigParser exceptions.""" def __init__(self, msg=''): self.message = msg Exception.__init__(self, msg) def __repr__(self): return self.message __str__ = __repr__ class NoSectionError(Error): """Raised when no section matches a requested option.""" def __init__(self, section): Error.__init__(self, 'No section: %r' % (section,)) self.section = section self.args = (section, ) class DuplicateSectionError(Error): """Raised when a section is repeated in an input source. Possible repetitions that raise this exception are: multiple creation using the API or in strict parsers when a section is found more than once in a single input file, string or dictionary. """ def __init__(self, section, source=None, lineno=None): msg = [repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": section ") message.extend(msg) msg = message else: msg.insert(0, "Section ") Error.__init__(self, "".join(msg)) self.section = section self.source = source self.lineno = lineno self.args = (section, source, lineno) class DuplicateOptionError(Error): """Raised by strict parsers when an option is repeated in an input source. Current implementation raises this exception only when an option is found more than once in a single file, string or dictionary. """ def __init__(self, section, option, source=None, lineno=None): msg = [repr(option), " in section ", repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": option ") message.extend(msg) msg = message else: msg.insert(0, "Option ") Error.__init__(self, "".join(msg)) self.section = section self.option = option self.source = source self.lineno = lineno self.args = (section, option, source, lineno) class NoOptionError(Error): """A requested option was not found.""" def __init__(self, option, section): Error.__init__(self, "No option %r in section: %r" % (option, section)) self.option = option self.section = section self.args = (option, section) class InterpolationError(Error): """Base class for interpolation-related exceptions.""" def __init__(self, option, section, msg): Error.__init__(self, msg) self.option = option self.section = section self.args = (option, section, msg) class InterpolationMissingOptionError(InterpolationError): """A string substitution required a setting which was not available.""" def __init__(self, option, section, rawval, reference): msg = ("Bad value substitution:\n" "\tsection: [%s]\n" "\toption : %s\n" "\tkey : %s\n" "\trawval : %s\n" % (section, option, reference, rawval)) InterpolationError.__init__(self, option, section, msg) self.reference = reference self.args = (option, section, rawval, reference) class InterpolationSyntaxError(InterpolationError): """Raised when the source text contains invalid syntax. Current implementation raises this exception when the source text into which substitutions are made does not conform to the required syntax. """ class InterpolationDepthError(InterpolationError): """Raised when substitutions are nested too deeply.""" def __init__(self, option, section, rawval): msg = ("Value interpolation too deeply recursive:\n" "\tsection: [%s]\n" "\toption : %s\n" "\trawval : %s\n" % (section, option, rawval)) InterpolationError.__init__(self, option, section, msg) self.args = (option, section, rawval) class ParsingError(Error): """Raised when a configuration file does not follow legal syntax.""" def __init__(self, source=None, filename=None): # Exactly one of `source'/`filename' arguments has to be given. # `filename' kept for compatibility. if filename and source: raise ValueError("Cannot specify both `filename' and `source'. " "Use `source'.") elif not filename and not source: raise ValueError("Required argument `source' not given.") elif filename: source = filename Error.__init__(self, 'Source contains parsing errors: %r' % source) self.source = source self.errors = [] self.args = (source, ) @property def filename(self): """Deprecated, use `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) return self.source @filename.setter def filename(self, value): """Deprecated, user `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) self.source = value def append(self, lineno, line): self.errors.append((lineno, line)) self.message += '\n\t[line %2d]: %s' % (lineno, line) class MissingSectionHeaderError(ParsingError): """Raised when a key-value pair is found before any section header.""" def __init__(self, filename, lineno, line): Error.__init__( self, 'File contains no section headers.\nfile: %r, line: %d\n%r' % (filename, lineno, line)) self.source = filename self.lineno = lineno self.line = line self.args = (filename, lineno, line) # Used in parser getters to indicate the default behaviour when a specific # option is not found it to raise an exception. Created to enable `None' as # a valid fallback value. _UNSET = object() class Interpolation: """Dummy interpolation that passes the value through with no changes.""" def before_get(self, parser, section, option, value, defaults): return value def before_set(self, parser, section, option, value): return value def before_read(self, parser, section, option, value): return value def before_write(self, parser, section, option, value): return value class BasicInterpolation(Interpolation): """Interpolation as implemented in the classic ConfigParser. The option values can contain format strings which refer to other values in the same section, or values in the special default section. For example: something: %(dir)s/whatever would resolve the "%(dir)s" to the value of dir. All reference expansions are done late, on demand. If a user needs to use a bare % in a configuration file, she can escape it by writing %%. Other % usage is considered a user error and raises `InterpolationSyntaxError'.""" _KEYCRE = re.compile(r"%\(([^)]+)\)s") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('%%', '') # escaped percent signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '%' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('%'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("%") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "%": accum.append("%") rest = rest[2:] elif c == "(": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) var = parser.optionxform(m.group(1)) rest = rest[m.end():] try: v = map[var] except KeyError: raise InterpolationMissingOptionError( option, section, rest, var) if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'%%' must be followed by '%%' or '(', " "found: %r" % (rest,)) class ExtendedInterpolation(Interpolation): """Advanced variant of interpolation, supports the syntax used by `zc.buildout'. Enables interpolation between sections.""" _KEYCRE = re.compile(r"\$\{([^}]+)\}") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('$$', '') # escaped dollar signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '$' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('$'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("$") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "$": accum.append("$") rest = rest[2:] elif c == "{": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) path = m.group(1).split(':') rest = rest[m.end():] sect = section opt = option try: if len(path) == 1: opt = parser.optionxform(path[0]) v = map[opt] elif len(path) == 2: sect = path[0] opt = parser.optionxform(path[1]) v = parser.get(sect, opt, raw=True) else: raise InterpolationSyntaxError( option, section, "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( option, section, rest, ":".join(path)) if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'$' must be followed by '$' or '{', " "found: %r" % (rest,)) class LegacyInterpolation(Interpolation): """Deprecated interpolation used in old versions of ConfigParser. Use BasicInterpolation or ExtendedInterpolation instead.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") def before_get(self, parser, section, option, value, vars): rawval = value depth = MAX_INTERPOLATION_DEPTH while depth: # Loop through this until it's done depth -= 1 if value and "%(" in value: replace = functools.partial(self._interpolation_replace, parser=parser) value = self._KEYCRE.sub(replace, value) try: value = value % vars except KeyError as e: raise InterpolationMissingOptionError( option, section, rawval, e.args[0]) else: break if value and "%(" in value: raise InterpolationDepthError(option, section, rawval) return value def before_set(self, parser, section, option, value): return value @staticmethod def _interpolation_replace(match, parser): s = match.group(1) if s is None: return match.group() else: return "%%(%s)s" % parser.optionxform(s) class RawConfigParser(MutableMapping): """ConfigParser that does not do interpolation.""" # Regular expressions for parsing section headers and options _SECT_TMPL = r""" \[ # [ (?P<header>[^]]+) # very permissive! \] # ] """ _OPT_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?P<vi>{delim})\s* # any number of space/tab, # followed by any of the # allowed delimiters, # followed by any space/tab (?P<value>.*)$ # everything up to eol """ _OPT_NV_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?: # any number of space/tab, (?P<vi>{delim})\s* # optionally followed by # any of the allowed # delimiters, followed by any # space/tab (?P<value>.*))?$ # everything up to eol """ # Interpolation algorithm to be used if the user does not specify another _DEFAULT_INTERPOLATION = Interpolation() # Compiled regular expression for matching sections SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE) # Compiled regular expression for matching options with typical separators OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching options with optional values # delimited using typical separators OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching leading whitespace in a line NONSPACECRE = re.compile(r"\S") # Possible boolean values in the configuration. BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False} def __init__(self, defaults=None, dict_type=_default_dict, allow_no_value=False, *, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=DEFAULTSECT, interpolation=_UNSET): self._dict = dict_type self._sections = self._dict() self._defaults = self._dict() self._proxies = self._dict() self._proxies[default_section] = SectionProxy(self, default_section) if defaults: for key, value in defaults.items(): self._defaults[self.optionxform(key)] = value self._delimiters = tuple(delimiters) if delimiters == ('=', ':'): self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE else: d = "|".join(re.escape(d) for d in delimiters) if allow_no_value: self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d), re.VERBOSE) else: self._optcre = re.compile(self._OPT_TMPL.format(delim=d), re.VERBOSE) self._comment_prefixes = tuple(comment_prefixes or ()) self._inline_comment_prefixes = tuple(inline_comment_prefixes or ()) self._strict = strict self._allow_no_value = allow_no_value self._empty_lines_in_values = empty_lines_in_values self.default_section=default_section self._interpolation = interpolation if self._interpolation is _UNSET: self._interpolation = self._DEFAULT_INTERPOLATION if self._interpolation is None: self._interpolation = Interpolation() def defaults(self): return self._defaults def sections(self): """Return a list of section names, excluding [DEFAULT]""" # self._sections will never have [DEFAULT] in it return list(self._sections.keys()) def add_section(self, section): """Create a new section in the configuration. Raise DuplicateSectionError if a section by the specified name already exists. Raise ValueError if name is DEFAULT. """ if section == self.default_section: raise ValueError('Invalid section name: %r' % section) if section in self._sections: raise DuplicateSectionError(section) self._sections[section] = self._dict() self._proxies[section] = SectionProxy(self, section) def has_section(self, section): """Indicate whether the named section is present in the configuration. The DEFAULT section is not acknowledged. """ return section in self._sections def options(self, section): """Return a list of option names for the given section name.""" try: opts = self._sections[section].copy() except KeyError: raise NoSectionError(section) opts.update(self._defaults) return list(opts.keys()) def read(self, filenames, encoding=None): """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, str): filenames = [filenames] read_ok = [] for filename in filenames: try: with open(filename, encoding=encoding) as fp: self._read(fp, filename) except OSError: continue read_ok.append(filename) return read_ok def read_file(self, f, source=None): """Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `<???>' is used. """ if source is None: try: source = f.name except AttributeError: source = '<???>' self._read(f, source) def read_string(self, string, source='<string>'): """Read configuration from a given string.""" sfile = io.StringIO(string) self.read_file(sfile, source) def read_dict(self, dictionary, source='<dict>'): """Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. All types held in the dictionary are converted to strings during reading, including section names, option names and keys. Optional second argument is the `source' specifying the name of the dictionary being read. """ elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value) def readfp(self, fp, filename=None): """Deprecated, use read_file instead.""" warnings.warn( "This method will be removed in future versions. " "Use 'parser.read_file()' instead.", DeprecationWarning, stacklevel=2 ) self.read_file(fp, source=filename) def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET): """Get an option value for a given section. If `vars' is provided, it must be a dictionary. The option is looked up in `vars' (if provided), `section', and in `DEFAULTSECT' in that order. If the key is not found and `fallback' is provided, it is used as a fallback value. `None' can be provided as a `fallback' value. If interpolation is enabled and the optional argument `raw' is False, all interpolations are expanded in the return values. Arguments `raw', `vars', and `fallback' are keyword only. The section DEFAULT is special. """ try: d = self._unify_values(section, vars) except NoSectionError: if fallback is _UNSET: raise else: return fallback option = self.optionxform(option) try: value = d[option] except KeyError: if fallback is _UNSET: raise NoOptionError(option, section) else: return fallback if raw or value is None: return value else: return self._interpolation.before_get(self, section, option, value, d) def _get(self, section, conv, option, **kwargs): return conv(self.get(section, option, **kwargs)) def getint(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, int, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getfloat(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, float, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getboolean(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, self._convert_to_boolean, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def items(self, section=_UNSET, raw=False, vars=None): """Return a list of (name, value) tuples for each option in a section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw' is true. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special. """ if section is _UNSET: return super().items() d = self._defaults.copy() try: d.update(self._sections[section]) except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables if vars: for key, value in vars.items(): d[self.optionxform(key)] = value value_getter = lambda option: self._interpolation.before_get(self, section, option, d[option], d) if raw: value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] def popitem(self): """Remove a section from the parser and return it as a (section_name, section_proxy) tuple. If no section is present, raise KeyError. The section DEFAULT is never returned because it cannot be removed. """ for key in self.sections(): value = self[key] del self[key] return key, value raise KeyError def optionxform(self, optionstr): return optionstr.lower() def has_option(self, section, option): """Check for the existence of a given option in a given section. If the specified `section' is None or an empty string, DEFAULT is assumed. If the specified `section' does not exist, returns False.""" if not section or section == self.default_section: option = self.optionxform(option) return option in self._defaults elif section not in self._sections: return False else: option = self.optionxform(option) return (option in self._sections[section] or option in self._defaults) def set(self, section, option, value=None): """Set an option.""" if value: value = self._interpolation.before_set(self, section, option, value) if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) sectdict[self.optionxform(option)] = value def write(self, fp, space_around_delimiters=True): """Write an .ini-format representation of the configuration state. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ if space_around_delimiters: d = " {} ".format(self._delimiters[0]) else: d = self._delimiters[0] if self._defaults: self._write_section(fp, self.default_section, self._defaults.items(), d) for section in self._sections: self._write_section(fp, section, self._sections[section].items(), d) def _write_section(self, fp, section_name, section_items, delimiter): """Write a single section to the specified `fp'.""" fp.write("[{}]\n".format(section_name)) for key, value in section_items: value = self._interpolation.before_write(self, section_name, key, value) if value is not None or not self._allow_no_value: value = delimiter + str(value).replace('\n', '\n\t') else: value = "" fp.write("{}{}\n".format(key, value)) fp.write("\n") def remove_option(self, section, option): """Remove an option.""" if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed def remove_section(self, section): """Remove a file section.""" existed = section in self._sections if existed: del self._sections[section] del self._proxies[section] return existed def __getitem__(self, key): if key != self.default_section and not self.has_section(key): raise KeyError(key) return self._proxies[key] def __setitem__(self, key, value): # To conform with the mapping protocol, overwrites existing values in # the section. # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. if key == self.default_section: self._defaults.clear() elif key in self._sections: self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): if key == self.default_section: raise ValueError("Cannot remove the default section.") if not self.has_section(key): raise KeyError(key) self.remove_section(key) def __contains__(self, key): return key == self.default_section or self.has_section(key) def __len__(self): return len(self._sections) + 1 # the default section def __iter__(self): # XXX does it break when underlying container state changed? return itertools.chain((self.default_section,), self._sections.keys()) def _read(self, fp, fpname): """Parse a sectioned configuration file. Each section in a configuration file contains a header, indicated by a name in square brackets (`[]'), plus key/value options, indicated by `name' and `value' delimited with a specific substring (`=' or `:' by default). Values can span multiple lines, as long as they are indented deeper than the first line of the value. Depending on the parser's mode, blank lines may be treated as parts of multiline values or ignored. Configuration files may include comments, prefixed by specific characters (`#' and `;' by default). Comments may appear on their own in an otherwise empty line or may be entered in lines holding values or section names. """ elements_added = set() cursect = None # None, or a dictionary sectname = None optname = None lineno = 0 indent_level = 0 e = None # None, or an exception for lineno, line in enumerate(fp, start=1): comment_start = sys.maxsize # strip inline comments inline_prefixes = {p: -1 for p in self._inline_comment_prefixes} while comment_start == sys.maxsize and inline_prefixes: next_prefixes = {} for prefix, index in inline_prefixes.items(): index = line.find(prefix, index+1) if index == -1: continue next_prefixes[prefix] = index if index == 0 or (index > 0 and line[index-1].isspace()): comment_start = min(comment_start, index) inline_prefixes = next_prefixes # strip full line comments for prefix in self._comment_prefixes: if line.strip().startswith(prefix): comment_start = 0 break if comment_start == sys.maxsize: comment_start = None value = line[:comment_start].strip() if not value: if self._empty_lines_in_values: # add empty line to the value, but only if there was no # comment on the line if (comment_start is None and cursect is not None and optname and cursect[optname] is not None): cursect[optname].append('') # newlines added at join else: # empty line marks end of value indent_level = sys.maxsize continue # continuation line? first_nonspace = self.NONSPACECRE.search(line) cur_indent_level = first_nonspace.start() if first_nonspace else 0 if (cursect is not None and optname and cur_indent_level > indent_level): cursect[optname].append(value) # a section header or option header? else: indent_level = cur_indent_level # is it a section header? mo = self.SECTCRE.match(value) if mo: sectname = mo.group('header') if sectname in self._sections: if self._strict and sectname in elements_added: raise DuplicateSectionError(sectname, fpname, lineno) cursect = self._sections[sectname] elements_added.add(sectname) elif sectname == self.default_section: cursect = self._defaults else: cursect = self._dict() self._sections[sectname] = cursect self._proxies[sectname] = SectionProxy(self, sectname) elements_added.add(sectname) # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self._optcre.match(value) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') if not optname: e = self._handle_error(e, fpname, lineno, line) optname = self.optionxform(optname.rstrip()) if (self._strict and (sectname, optname) in elements_added): raise DuplicateOptionError(sectname, optname, fpname, lineno) elements_added.add((sectname, optname)) # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: optval = optval.strip() cursect[optname] = [optval] else: # valueless option handling cursect[optname] = None else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines e = self._handle_error(e, fpname, lineno, line) # if any parsing errors occurred, raise an exception if e: raise e self._join_multiline_values() def _join_multiline_values(self): defaults = self.default_section, self._defaults all_sections = itertools.chain((defaults,), self._sections.items()) for section, options in all_sections: for name, val in options.items(): if isinstance(val, list): val = '\n'.join(val).rstrip() options[name] = self._interpolation.before_read(self, section, name, val) def _handle_error(self, exc, fpname, lineno, line): if not exc: exc = ParsingError(fpname) exc.append(lineno, repr(line)) return exc def _unify_values(self, section, vars): """Create a sequence of lookups with 'vars' taking priority over the 'section' which takes priority over the DEFAULTSECT. """ sectiondict = {} try: sectiondict = self._sections[section] except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables vardict = {} if vars: for key, value in vars.items(): if value is not None: value = str(value) vardict[self.optionxform(key)] = value return _ChainMap(vardict, sectiondict, self._defaults) def _convert_to_boolean(self, value): """Return a boolean value translating from other types if necessary. """ if value.lower() not in self.BOOLEAN_STATES: raise ValueError('Not a boolean: %s' % value) return self.BOOLEAN_STATES[value.lower()] def _validate_value_types(self, *, section="", option="", value=""): """Raises a TypeError for non-string values. The only legal non-string value if we allow valueless options is None, so we need to check if the value is a string if: - we do not allow valueless options, or - we allow valueless options but the value is not None For compatibility reasons this method is not used in classic set() for RawConfigParsers. It is invoked in every case for mapping protocol access and in ConfigParser.set(). """ if not isinstance(section, str): raise TypeError("section names must be strings") if not isinstance(option, str): raise TypeError("option keys must be strings") if not self._allow_no_value or value: if not isinstance(value, str): raise TypeError("option values must be strings") class ConfigParser(RawConfigParser): """ConfigParser implementing interpolation.""" _DEFAULT_INTERPOLATION = BasicInterpolation() def set(self, section, option, value=None): """Set an option. Extends RawConfigParser.set by validating type and interpolation syntax on the value.""" self._validate_value_types(option=option, value=value) super().set(section, option, value) def add_section(self, section): """Create a new section in the configuration. Extends RawConfigParser.add_section by validating if the section name is a string.""" self._validate_value_types(section=section) super().add_section(section) class SafeConfigParser(ConfigParser): """ConfigParser alias for backwards compatibility purposes.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn( "The SafeConfigParser class has been renamed to ConfigParser " "in Python 3.2. This alias will be removed in future versions." " Use ConfigParser directly instead.", DeprecationWarning, stacklevel=2 ) class SectionProxy(MutableMapping): """A proxy for a single section from a parser.""" def __init__(self, parser, name): """Creates a view on a section of the specified `name` in `parser`.""" self._parser = parser self._name = name def __repr__(self): return '<Section: {}>'.format(self._name) def __getitem__(self, key): if not self._parser.has_option(self._name, key): raise KeyError(key) return self._parser.get(self._name, key) def __setitem__(self, key, value): self._parser._validate_value_types(option=key, value=value) return self._parser.set(self._name, key, value) def __delitem__(self, key): if not (self._parser.has_option(self._name, key) and self._parser.remove_option(self._name, key)): raise KeyError(key) def __contains__(self, key): return self._parser.has_option(self._name, key) def __len__(self): return len(self._options()) def __iter__(self): return self._options().__iter__() def _options(self): if self._name != self._parser.default_section: return self._parser.options(self._name) else: return self._parser.defaults() def get(self, option, fallback=None, *, raw=False, vars=None): return self._parser.get(self._name, option, raw=raw, vars=vars, fallback=fallback) def getint(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getint(self._name, option, raw=raw, vars=vars, fallback=fallback) def getfloat(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getfloat(self._name, option, raw=raw, vars=vars, fallback=fallback) def getboolean(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getboolean(self._name, option, raw=raw, vars=vars, fallback=fallback) @property def parser(self): # The parser object of the proxy is read-only. return self._parser @property def name(self): # The name of the section on a proxy is read-only. return self._name >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= """Configuration file parser. A configuration file consists of sections, lead by a "[section]" header, and followed by "name: value" entries, with continuations and such in the style of RFC 822. Intrinsic defaults can be specified by passing them into the ConfigParser constructor as a dictionary. class: ConfigParser -- responsible for parsing a list of configuration files, and managing the parsed database. methods: __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True): Create the parser. When `defaults' is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. When `dict_type' is given, it will be used to create the dictionary objects for the list of sections, for the options within a section, and for the default values. When `delimiters' is given, it will be used as the set of substrings that divide keys from values. When `comment_prefixes' is given, it will be used as the set of substrings that prefix comments in empty lines. Comments can be indented. When `inline_comment_prefixes' is given, it will be used as the set of substrings that prefix comments in non-empty lines. When `strict` is True, the parser won't allow for any section or option duplicates while reading from a single source (file, string or dictionary). Default is True. When `empty_lines_in_values' is False (default: True), each empty line marks the end of an option. Otherwise, internal empty lines of a multiline option are kept as part of the value. When `allow_no_value' is True (default: False), options without values are accepted; the value presented for these is None. sections() Return all the configuration section names, sans DEFAULT. has_section(section) Return whether the given section exists. has_option(section, option) Return whether the given option exists in the given section. options(section) Return list of configuration options for the named section. read(filenames, encoding=None) Read and parse the list of named configuration files, given by name. A single filename is also allowed. Non-existing files are ignored. Return list of successfully read files. read_file(f, filename=None) Read and parse one configuration file, given as a file object. The filename defaults to f.name; it is only used in error messages (if f has no `name' attribute, the string `<???>' is used). read_string(string) Read configuration from a given string. read_dict(dictionary) Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. Values are automatically converted to strings. get(section, option, raw=False, vars=None, fallback=_UNSET) Return a string value for the named option. All % interpolations are expanded in the return values, based on the defaults passed into the constructor and the DEFAULT section. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents override any pre-existing defaults. If `option' is a key in `vars', the value from `vars' is used. getint(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to an integer. getfloat(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a float. getboolean(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a boolean (currently case insensitively defined as 0, false, no, off for False, and 1, true, yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) If section is given, return a list of tuples with (name, value) for each option in the section. Otherwise, return a list of tuples with (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. remove_option(section, option) Remove the given option from the given section. set(section, option, value) Set the given option. write(fp, space_around_delimiters=True) Write the configuration state in .ini format. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ from collections.abc import MutableMapping from collections import OrderedDict as _default_dict, ChainMap as _ChainMap import functools import io import itertools import re import sys import warnings __all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", "InterpolationSyntaxError", "ParsingError", "MissingSectionHeaderError", "ConfigParser", "SafeConfigParser", "RawConfigParser", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] DEFAULTSECT = "DEFAULT" MAX_INTERPOLATION_DEPTH = 10 # exception classes class Error(Exception): """Base class for ConfigParser exceptions.""" def __init__(self, msg=''): self.message = msg Exception.__init__(self, msg) def __repr__(self): return self.message __str__ = __repr__ class NoSectionError(Error): """Raised when no section matches a requested option.""" def __init__(self, section): Error.__init__(self, 'No section: %r' % (section,)) self.section = section self.args = (section, ) class DuplicateSectionError(Error): """Raised when a section is repeated in an input source. Possible repetitions that raise this exception are: multiple creation using the API or in strict parsers when a section is found more than once in a single input file, string or dictionary. """ def __init__(self, section, source=None, lineno=None): msg = [repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": section ") message.extend(msg) msg = message else: msg.insert(0, "Section ") Error.__init__(self, "".join(msg)) self.section = section self.source = source self.lineno = lineno self.args = (section, source, lineno) class DuplicateOptionError(Error): """Raised by strict parsers when an option is repeated in an input source. Current implementation raises this exception only when an option is found more than once in a single file, string or dictionary. """ def __init__(self, section, option, source=None, lineno=None): msg = [repr(option), " in section ", repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": option ") message.extend(msg) msg = message else: msg.insert(0, "Option ") Error.__init__(self, "".join(msg)) self.section = section self.option = option self.source = source self.lineno = lineno self.args = (section, option, source, lineno) class NoOptionError(Error): """A requested option was not found.""" def __init__(self, option, section): Error.__init__(self, "No option %r in section: %r" % (option, section)) self.option = option self.section = section self.args = (option, section) class InterpolationError(Error): """Base class for interpolation-related exceptions.""" def __init__(self, option, section, msg): Error.__init__(self, msg) self.option = option self.section = section self.args = (option, section, msg) class InterpolationMissingOptionError(InterpolationError): """A string substitution required a setting which was not available.""" def __init__(self, option, section, rawval, reference): msg = ("Bad value substitution:\n" "\tsection: [%s]\n" "\toption : %s\n" "\tkey : %s\n" "\trawval : %s\n" % (section, option, reference, rawval)) InterpolationError.__init__(self, option, section, msg) self.reference = reference self.args = (option, section, rawval, reference) class InterpolationSyntaxError(InterpolationError): """Raised when the source text contains invalid syntax. Current implementation raises this exception when the source text into which substitutions are made does not conform to the required syntax. """ class InterpolationDepthError(InterpolationError): """Raised when substitutions are nested too deeply.""" def __init__(self, option, section, rawval): msg = ("Value interpolation too deeply recursive:\n" "\tsection: [%s]\n" "\toption : %s\n" "\trawval : %s\n" % (section, option, rawval)) InterpolationError.__init__(self, option, section, msg) self.args = (option, section, rawval) class ParsingError(Error): """Raised when a configuration file does not follow legal syntax.""" def __init__(self, source=None, filename=None): # Exactly one of `source'/`filename' arguments has to be given. # `filename' kept for compatibility. if filename and source: raise ValueError("Cannot specify both `filename' and `source'. " "Use `source'.") elif not filename and not source: raise ValueError("Required argument `source' not given.") elif filename: source = filename Error.__init__(self, 'Source contains parsing errors: %r' % source) self.source = source self.errors = [] self.args = (source, ) @property def filename(self): """Deprecated, use `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) return self.source @filename.setter def filename(self, value): """Deprecated, user `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) self.source = value def append(self, lineno, line): self.errors.append((lineno, line)) self.message += '\n\t[line %2d]: %s' % (lineno, line) class MissingSectionHeaderError(ParsingError): """Raised when a key-value pair is found before any section header.""" def __init__(self, filename, lineno, line): Error.__init__( self, 'File contains no section headers.\nfile: %r, line: %d\n%r' % (filename, lineno, line)) self.source = filename self.lineno = lineno self.line = line self.args = (filename, lineno, line) # Used in parser getters to indicate the default behaviour when a specific # option is not found it to raise an exception. Created to enable `None' as # a valid fallback value. _UNSET = object() class Interpolation: """Dummy interpolation that passes the value through with no changes.""" def before_get(self, parser, section, option, value, defaults): return value def before_set(self, parser, section, option, value): return value def before_read(self, parser, section, option, value): return value def before_write(self, parser, section, option, value): return value class BasicInterpolation(Interpolation): """Interpolation as implemented in the classic ConfigParser. The option values can contain format strings which refer to other values in the same section, or values in the special default section. For example: something: %(dir)s/whatever would resolve the "%(dir)s" to the value of dir. All reference expansions are done late, on demand. If a user needs to use a bare % in a configuration file, she can escape it by writing %%. Other % usage is considered a user error and raises `InterpolationSyntaxError'.""" _KEYCRE = re.compile(r"%\(([^)]+)\)s") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('%%', '') # escaped percent signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '%' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('%'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("%") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "%": accum.append("%") rest = rest[2:] elif c == "(": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) var = parser.optionxform(m.group(1)) rest = rest[m.end():] try: v = map[var] except KeyError: raise InterpolationMissingOptionError( option, section, rest, var) if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'%%' must be followed by '%%' or '(', " "found: %r" % (rest,)) class ExtendedInterpolation(Interpolation): """Advanced variant of interpolation, supports the syntax used by `zc.buildout'. Enables interpolation between sections.""" _KEYCRE = re.compile(r"\$\{([^}]+)\}") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('$$', '') # escaped dollar signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '$' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('$'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("$") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "$": accum.append("$") rest = rest[2:] elif c == "{": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) path = m.group(1).split(':') rest = rest[m.end():] sect = section opt = option try: if len(path) == 1: opt = parser.optionxform(path[0]) v = map[opt] elif len(path) == 2: sect = path[0] opt = parser.optionxform(path[1]) v = parser.get(sect, opt, raw=True) else: raise InterpolationSyntaxError( option, section, "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( option, section, rest, ":".join(path)) if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'$' must be followed by '$' or '{', " "found: %r" % (rest,)) class LegacyInterpolation(Interpolation): """Deprecated interpolation used in old versions of ConfigParser. Use BasicInterpolation or ExtendedInterpolation instead.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") def before_get(self, parser, section, option, value, vars): rawval = value depth = MAX_INTERPOLATION_DEPTH while depth: # Loop through this until it's done depth -= 1 if value and "%(" in value: replace = functools.partial(self._interpolation_replace, parser=parser) value = self._KEYCRE.sub(replace, value) try: value = value % vars except KeyError as e: raise InterpolationMissingOptionError( option, section, rawval, e.args[0]) else: break if value and "%(" in value: raise InterpolationDepthError(option, section, rawval) return value def before_set(self, parser, section, option, value): return value @staticmethod def _interpolation_replace(match, parser): s = match.group(1) if s is None: return match.group() else: return "%%(%s)s" % parser.optionxform(s) class RawConfigParser(MutableMapping): """ConfigParser that does not do interpolation.""" # Regular expressions for parsing section headers and options _SECT_TMPL = r""" \[ # [ (?P<header>[^]]+) # very permissive! \] # ] """ _OPT_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?P<vi>{delim})\s* # any number of space/tab, # followed by any of the # allowed delimiters, # followed by any space/tab (?P<value>.*)$ # everything up to eol """ _OPT_NV_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?: # any number of space/tab, (?P<vi>{delim})\s* # optionally followed by # any of the allowed # delimiters, followed by any # space/tab (?P<value>.*))?$ # everything up to eol """ # Interpolation algorithm to be used if the user does not specify another _DEFAULT_INTERPOLATION = Interpolation() # Compiled regular expression for matching sections SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE) # Compiled regular expression for matching options with typical separators OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching options with optional values # delimited using typical separators OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching leading whitespace in a line NONSPACECRE = re.compile(r"\S") # Possible boolean values in the configuration. BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False} def __init__(self, defaults=None, dict_type=_default_dict, allow_no_value=False, *, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=DEFAULTSECT, interpolation=_UNSET): self._dict = dict_type self._sections = self._dict() self._defaults = self._dict() self._proxies = self._dict() self._proxies[default_section] = SectionProxy(self, default_section) if defaults: for key, value in defaults.items(): self._defaults[self.optionxform(key)] = value self._delimiters = tuple(delimiters) if delimiters == ('=', ':'): self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE else: d = "|".join(re.escape(d) for d in delimiters) if allow_no_value: self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d), re.VERBOSE) else: self._optcre = re.compile(self._OPT_TMPL.format(delim=d), re.VERBOSE) self._comment_prefixes = tuple(comment_prefixes or ()) self._inline_comment_prefixes = tuple(inline_comment_prefixes or ()) self._strict = strict self._allow_no_value = allow_no_value self._empty_lines_in_values = empty_lines_in_values self.default_section=default_section self._interpolation = interpolation if self._interpolation is _UNSET: self._interpolation = self._DEFAULT_INTERPOLATION if self._interpolation is None: self._interpolation = Interpolation() def defaults(self): return self._defaults def sections(self): """Return a list of section names, excluding [DEFAULT]""" # self._sections will never have [DEFAULT] in it return list(self._sections.keys()) def add_section(self, section): """Create a new section in the configuration. Raise DuplicateSectionError if a section by the specified name already exists. Raise ValueError if name is DEFAULT. """ if section == self.default_section: raise ValueError('Invalid section name: %r' % section) if section in self._sections: raise DuplicateSectionError(section) self._sections[section] = self._dict() self._proxies[section] = SectionProxy(self, section) def has_section(self, section): """Indicate whether the named section is present in the configuration. The DEFAULT section is not acknowledged. """ return section in self._sections def options(self, section): """Return a list of option names for the given section name.""" try: opts = self._sections[section].copy() except KeyError: raise NoSectionError(section) opts.update(self._defaults) return list(opts.keys()) def read(self, filenames, encoding=None): """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, str): filenames = [filenames] read_ok = [] for filename in filenames: try: with open(filename, encoding=encoding) as fp: self._read(fp, filename) except OSError: continue read_ok.append(filename) return read_ok def read_file(self, f, source=None): """Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `<???>' is used. """ if source is None: try: source = f.name except AttributeError: source = '<???>' self._read(f, source) def read_string(self, string, source='<string>'): """Read configuration from a given string.""" sfile = io.StringIO(string) self.read_file(sfile, source) def read_dict(self, dictionary, source='<dict>'): """Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. All types held in the dictionary are converted to strings during reading, including section names, option names and keys. Optional second argument is the `source' specifying the name of the dictionary being read. """ elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value) def readfp(self, fp, filename=None): """Deprecated, use read_file instead.""" warnings.warn( "This method will be removed in future versions. " "Use 'parser.read_file()' instead.", DeprecationWarning, stacklevel=2 ) self.read_file(fp, source=filename) def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET): """Get an option value for a given section. If `vars' is provided, it must be a dictionary. The option is looked up in `vars' (if provided), `section', and in `DEFAULTSECT' in that order. If the key is not found and `fallback' is provided, it is used as a fallback value. `None' can be provided as a `fallback' value. If interpolation is enabled and the optional argument `raw' is False, all interpolations are expanded in the return values. Arguments `raw', `vars', and `fallback' are keyword only. The section DEFAULT is special. """ try: d = self._unify_values(section, vars) except NoSectionError: if fallback is _UNSET: raise else: return fallback option = self.optionxform(option) try: value = d[option] except KeyError: if fallback is _UNSET: raise NoOptionError(option, section) else: return fallback if raw or value is None: return value else: return self._interpolation.before_get(self, section, option, value, d) def _get(self, section, conv, option, **kwargs): return conv(self.get(section, option, **kwargs)) def getint(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, int, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getfloat(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, float, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getboolean(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, self._convert_to_boolean, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def items(self, section=_UNSET, raw=False, vars=None): """Return a list of (name, value) tuples for each option in a section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw' is true. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special. """ if section is _UNSET: return super().items() d = self._defaults.copy() try: d.update(self._sections[section]) except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables if vars: for key, value in vars.items(): d[self.optionxform(key)] = value value_getter = lambda option: self._interpolation.before_get(self, section, option, d[option], d) if raw: value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] def popitem(self): """Remove a section from the parser and return it as a (section_name, section_proxy) tuple. If no section is present, raise KeyError. The section DEFAULT is never returned because it cannot be removed. """ for key in self.sections(): value = self[key] del self[key] return key, value raise KeyError<|fim▁hole|> def has_option(self, section, option): """Check for the existence of a given option in a given section. If the specified `section' is None or an empty string, DEFAULT is assumed. If the specified `section' does not exist, returns False.""" if not section or section == self.default_section: option = self.optionxform(option) return option in self._defaults elif section not in self._sections: return False else: option = self.optionxform(option) return (option in self._sections[section] or option in self._defaults) def set(self, section, option, value=None): """Set an option.""" if value: value = self._interpolation.before_set(self, section, option, value) if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) sectdict[self.optionxform(option)] = value def write(self, fp, space_around_delimiters=True): """Write an .ini-format representation of the configuration state. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ if space_around_delimiters: d = " {} ".format(self._delimiters[0]) else: d = self._delimiters[0] if self._defaults: self._write_section(fp, self.default_section, self._defaults.items(), d) for section in self._sections: self._write_section(fp, section, self._sections[section].items(), d) def _write_section(self, fp, section_name, section_items, delimiter): """Write a single section to the specified `fp'.""" fp.write("[{}]\n".format(section_name)) for key, value in section_items: value = self._interpolation.before_write(self, section_name, key, value) if value is not None or not self._allow_no_value: value = delimiter + str(value).replace('\n', '\n\t') else: value = "" fp.write("{}{}\n".format(key, value)) fp.write("\n") def remove_option(self, section, option): """Remove an option.""" if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed def remove_section(self, section): """Remove a file section.""" existed = section in self._sections if existed: del self._sections[section] del self._proxies[section] return existed def __getitem__(self, key): if key != self.default_section and not self.has_section(key): raise KeyError(key) return self._proxies[key] def __setitem__(self, key, value): # To conform with the mapping protocol, overwrites existing values in # the section. # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. if key == self.default_section: self._defaults.clear() elif key in self._sections: self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): if key == self.default_section: raise ValueError("Cannot remove the default section.") if not self.has_section(key): raise KeyError(key) self.remove_section(key) def __contains__(self, key): return key == self.default_section or self.has_section(key) def __len__(self): return len(self._sections) + 1 # the default section def __iter__(self): # XXX does it break when underlying container state changed? return itertools.chain((self.default_section,), self._sections.keys()) def _read(self, fp, fpname): """Parse a sectioned configuration file. Each section in a configuration file contains a header, indicated by a name in square brackets (`[]'), plus key/value options, indicated by `name' and `value' delimited with a specific substring (`=' or `:' by default). Values can span multiple lines, as long as they are indented deeper than the first line of the value. Depending on the parser's mode, blank lines may be treated as parts of multiline values or ignored. Configuration files may include comments, prefixed by specific characters (`#' and `;' by default). Comments may appear on their own in an otherwise empty line or may be entered in lines holding values or section names. """ elements_added = set() cursect = None # None, or a dictionary sectname = None optname = None lineno = 0 indent_level = 0 e = None # None, or an exception for lineno, line in enumerate(fp, start=1): comment_start = sys.maxsize # strip inline comments inline_prefixes = {p: -1 for p in self._inline_comment_prefixes} while comment_start == sys.maxsize and inline_prefixes: next_prefixes = {} for prefix, index in inline_prefixes.items(): index = line.find(prefix, index+1) if index == -1: continue next_prefixes[prefix] = index if index == 0 or (index > 0 and line[index-1].isspace()): comment_start = min(comment_start, index) inline_prefixes = next_prefixes # strip full line comments for prefix in self._comment_prefixes: if line.strip().startswith(prefix): comment_start = 0 break if comment_start == sys.maxsize: comment_start = None value = line[:comment_start].strip() if not value: if self._empty_lines_in_values: # add empty line to the value, but only if there was no # comment on the line if (comment_start is None and cursect is not None and optname and cursect[optname] is not None): cursect[optname].append('') # newlines added at join else: # empty line marks end of value indent_level = sys.maxsize continue # continuation line? first_nonspace = self.NONSPACECRE.search(line) cur_indent_level = first_nonspace.start() if first_nonspace else 0 if (cursect is not None and optname and cur_indent_level > indent_level): cursect[optname].append(value) # a section header or option header? else: indent_level = cur_indent_level # is it a section header? mo = self.SECTCRE.match(value) if mo: sectname = mo.group('header') if sectname in self._sections: if self._strict and sectname in elements_added: raise DuplicateSectionError(sectname, fpname, lineno) cursect = self._sections[sectname] elements_added.add(sectname) elif sectname == self.default_section: cursect = self._defaults else: cursect = self._dict() self._sections[sectname] = cursect self._proxies[sectname] = SectionProxy(self, sectname) elements_added.add(sectname) # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self._optcre.match(value) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') if not optname: e = self._handle_error(e, fpname, lineno, line) optname = self.optionxform(optname.rstrip()) if (self._strict and (sectname, optname) in elements_added): raise DuplicateOptionError(sectname, optname, fpname, lineno) elements_added.add((sectname, optname)) # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: optval = optval.strip() cursect[optname] = [optval] else: # valueless option handling cursect[optname] = None else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines e = self._handle_error(e, fpname, lineno, line) # if any parsing errors occurred, raise an exception if e: raise e self._join_multiline_values() def _join_multiline_values(self): defaults = self.default_section, self._defaults all_sections = itertools.chain((defaults,), self._sections.items()) for section, options in all_sections: for name, val in options.items(): if isinstance(val, list): val = '\n'.join(val).rstrip() options[name] = self._interpolation.before_read(self, section, name, val) def _handle_error(self, exc, fpname, lineno, line): if not exc: exc = ParsingError(fpname) exc.append(lineno, repr(line)) return exc def _unify_values(self, section, vars): """Create a sequence of lookups with 'vars' taking priority over the 'section' which takes priority over the DEFAULTSECT. """ sectiondict = {} try: sectiondict = self._sections[section] except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables vardict = {} if vars: for key, value in vars.items(): if value is not None: value = str(value) vardict[self.optionxform(key)] = value return _ChainMap(vardict, sectiondict, self._defaults) def _convert_to_boolean(self, value): """Return a boolean value translating from other types if necessary. """ if value.lower() not in self.BOOLEAN_STATES: raise ValueError('Not a boolean: %s' % value) return self.BOOLEAN_STATES[value.lower()] def _validate_value_types(self, *, section="", option="", value=""): """Raises a TypeError for non-string values. The only legal non-string value if we allow valueless options is None, so we need to check if the value is a string if: - we do not allow valueless options, or - we allow valueless options but the value is not None For compatibility reasons this method is not used in classic set() for RawConfigParsers. It is invoked in every case for mapping protocol access and in ConfigParser.set(). """ if not isinstance(section, str): raise TypeError("section names must be strings") if not isinstance(option, str): raise TypeError("option keys must be strings") if not self._allow_no_value or value: if not isinstance(value, str): raise TypeError("option values must be strings") class ConfigParser(RawConfigParser): """ConfigParser implementing interpolation.""" _DEFAULT_INTERPOLATION = BasicInterpolation() def set(self, section, option, value=None): """Set an option. Extends RawConfigParser.set by validating type and interpolation syntax on the value.""" self._validate_value_types(option=option, value=value) super().set(section, option, value) def add_section(self, section): """Create a new section in the configuration. Extends RawConfigParser.add_section by validating if the section name is a string.""" self._validate_value_types(section=section) super().add_section(section) class SafeConfigParser(ConfigParser): """ConfigParser alias for backwards compatibility purposes.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn( "The SafeConfigParser class has been renamed to ConfigParser " "in Python 3.2. This alias will be removed in future versions." " Use ConfigParser directly instead.", DeprecationWarning, stacklevel=2 ) class SectionProxy(MutableMapping): """A proxy for a single section from a parser.""" def __init__(self, parser, name): """Creates a view on a section of the specified `name` in `parser`.""" self._parser = parser self._name = name def __repr__(self): return '<Section: {}>'.format(self._name) def __getitem__(self, key): if not self._parser.has_option(self._name, key): raise KeyError(key) return self._parser.get(self._name, key) def __setitem__(self, key, value): self._parser._validate_value_types(option=key, value=value) return self._parser.set(self._name, key, value) def __delitem__(self, key): if not (self._parser.has_option(self._name, key) and self._parser.remove_option(self._name, key)): raise KeyError(key) def __contains__(self, key): return self._parser.has_option(self._name, key) def __len__(self): return len(self._options()) def __iter__(self): return self._options().__iter__() def _options(self): if self._name != self._parser.default_section: return self._parser.options(self._name) else: return self._parser.defaults() def get(self, option, fallback=None, *, raw=False, vars=None): return self._parser.get(self._name, option, raw=raw, vars=vars, fallback=fallback) def getint(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getint(self._name, option, raw=raw, vars=vars, fallback=fallback) def getfloat(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getfloat(self._name, option, raw=raw, vars=vars, fallback=fallback) def getboolean(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getboolean(self._name, option, raw=raw, vars=vars, fallback=fallback) @property def parser(self): # The parser object of the proxy is read-only. return self._parser @property def name(self): # The name of the section on a proxy is read-only. return self._name >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453<|fim▁end|>
def optionxform(self, optionstr): return optionstr.lower()
<|file_name|>iam-object-store.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2015-2021 MinIO, Inc. // // This file is part of MinIO Object Storage stack // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. package cmd import ( "context" "path" "strings" "sync" "unicode/utf8" jsoniter "github.com/json-iterator/go" "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" ) // IAMObjectStore implements IAMStorageAPI type IAMObjectStore struct { // Protect access to storage within the current server. sync.RWMutex *iamCache usersSysType UsersSysType objAPI ObjectLayer } func newIAMObjectStore(objAPI ObjectLayer, usersSysType UsersSysType) *IAMObjectStore { return &IAMObjectStore{ iamCache: newIamCache(), objAPI: objAPI, usersSysType: usersSysType, } } func (iamOS *IAMObjectStore) rlock() *iamCache { iamOS.RLock() return iamOS.iamCache } func (iamOS *IAMObjectStore) runlock() { iamOS.RUnlock() } func (iamOS *IAMObjectStore) lock() *iamCache { iamOS.Lock() return iamOS.iamCache } func (iamOS *IAMObjectStore) unlock() { iamOS.Unlock() } func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType { return iamOS.usersSysType } // Migrate users directory in a single scan. // // 1. Migrate user policy from: // // `iamConfigUsersPrefix + "<username>/policy.json"` // // to: // // `iamConfigPolicyDBUsersPrefix + "<username>.json"`. // // 2. Add versioning to the policy json file in the new // location. // // 3. Migrate user identity json file to include version info. func (iamOS *IAMObjectStore) migrateUsersConfigToV1(ctx context.Context) error { basePrefix := iamConfigUsersPrefix for item := range listIAMConfigItems(ctx, iamOS.objAPI, basePrefix) { if item.Err != nil { return item.Err } user := path.Dir(item.Item) { // 1. check if there is policy file in old location. oldPolicyPath := pathJoin(basePrefix, user, iamPolicyFile) var policyName string if err := iamOS.loadIAMConfig(ctx, &policyName, oldPolicyPath); err != nil { switch err { case errConfigNotFound: // This case means it is already // migrated or there is no policy on // user. default: // File may be corrupt or network error } // Nothing to do on the policy file, // so move on to check the id file. goto next } // 2. copy policy file to new location. mp := newMappedPolicy(policyName) userType := regUser if err := iamOS.saveMappedPolicy(ctx, user, userType, false, mp); err != nil { return err } // 3. delete policy file from old // location. Ignore error. iamOS.deleteIAMConfig(ctx, oldPolicyPath) } next: // 4. check if user identity has old format. identityPath := pathJoin(basePrefix, user, iamIdentityFile) cred := auth.Credentials{ AccessKey: user, } if err := iamOS.loadIAMConfig(ctx, &cred, identityPath); err != nil { switch err { case errConfigNotFound: // This should not happen. default: // File may be corrupt or network error } continue } // If the file is already in the new format, // then the parsed auth.Credentials will have // the zero value for the struct. if !cred.IsValid() { // nothing to do continue } u := newUserIdentity(cred) if err := iamOS.saveIAMConfig(ctx, u, identityPath); err != nil { logger.LogIf(ctx, err) return err } // Nothing to delete as identity file location // has not changed. } return nil } func (iamOS *IAMObjectStore) migrateToV1(ctx context.Context) error { var iamFmt iamFormat path := getIAMFormatFilePath() if err := iamOS.loadIAMConfig(ctx, &iamFmt, path); err != nil { switch err { case errConfigNotFound: // Need to migrate to V1. default: // if IAM format return err } } if iamFmt.Version >= iamFormatVersion1 { // Nothing to do. return nil } if err := iamOS.migrateUsersConfigToV1(ctx); err != nil { logger.LogIf(ctx, err) return err } // Save iam format to version 1. if err := iamOS.saveIAMConfig(ctx, newIAMFormatVersion1(), path); err != nil { logger.LogIf(ctx, err) return err } return nil } // Should be called under config migration lock func (iamOS *IAMObjectStore) migrateBackendFormat(ctx context.Context) error { iamOS.Lock() defer iamOS.Unlock() return iamOS.migrateToV1(ctx) } func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error { json := jsoniter.ConfigCompatibleWithStandardLibrary data, err := json.Marshal(item) if err != nil { return err } if GlobalKMS != nil { data, err = config.EncryptBytes(GlobalKMS, data, kms.Context{ minioMetaBucket: path.Join(minioMetaBucket, objPath), }) if err != nil { return err } } return saveConfig(ctx, iamOS.objAPI, objPath, data) } func (iamOS *IAMObjectStore) loadIAMConfigBytesWithMetadata(ctx context.Context, objPath string) ([]byte, ObjectInfo, error) { data, meta, err := readConfigWithMetadata(ctx, iamOS.objAPI, objPath) if err != nil { return nil, meta, err } if !utf8.Valid(data) && GlobalKMS != nil { data, err = config.DecryptBytes(GlobalKMS, data, kms.Context{ minioMetaBucket: path.Join(minioMetaBucket, objPath), }) if err != nil { return nil, meta, err } } return data, meta, nil } func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}, objPath string) error { data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath) if err != nil { return err } json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Unmarshal(data, item) } func (iamOS *IAMObjectStore) deleteIAMConfig(ctx context.Context, path string) error { return deleteConfig(ctx, iamOS.objAPI, path) } func (iamOS *IAMObjectStore) loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error { data, objInfo, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, getPolicyDocPath(policy)) if err != nil { if err == errConfigNotFound { return errNoSuchPolicy } return err } var p PolicyDoc err = p.parseJSON(data) if err != nil { return err } if p.Version == 0 { // This means that policy was in the old version (without any // timestamp info). We fetch the mod time of the file and save // that as create and update date. p.CreateDate = objInfo.ModTime p.UpdateDate = objInfo.ModTime } m[policy] = p return nil } func (iamOS *IAMObjectStore) loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error { for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigPoliciesPrefix) { if item.Err != nil { return item.Err } policyName := path.Dir(item.Item) if err := iamOS.loadPolicyDoc(ctx, policyName, m); err != nil && err != errNoSuchPolicy { return err } } return nil } func (iamOS *IAMObjectStore) loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]auth.Credentials) error { var u UserIdentity err := iamOS.loadIAMConfig(ctx, &u, getUserIdentityPath(user, userType)) if err != nil { if err == errConfigNotFound { return errNoSuchUser } return err } if u.Credentials.IsExpired() { // Delete expired identity - ignoring errors here. iamOS.deleteIAMConfig(ctx, getUserIdentityPath(user, userType)) iamOS.deleteIAMConfig(ctx, getMappedPolicyPath(user, userType, false)) return nil } if u.Credentials.AccessKey == "" { u.Credentials.AccessKey = user } m[user] = u.Credentials return nil } func (iamOS *IAMObjectStore) loadUsers(ctx context.Context, userType IAMUserType, m map[string]auth.Credentials) error { var basePrefix string switch userType { case svcUser: basePrefix = iamConfigServiceAccountsPrefix case stsUser: basePrefix = iamConfigSTSPrefix default: basePrefix = iamConfigUsersPrefix } for item := range listIAMConfigItems(ctx, iamOS.objAPI, basePrefix) { if item.Err != nil { return item.Err } userName := path.Dir(item.Item) if err := iamOS.loadUser(ctx, userName, userType, m); err != nil && err != errNoSuchUser { return err } } return nil } func (iamOS *IAMObjectStore) loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error { var g GroupInfo err := iamOS.loadIAMConfig(ctx, &g, getGroupInfoPath(group)) if err != nil { if err == errConfigNotFound { return errNoSuchGroup } return err } m[group] = g return nil } func (iamOS *IAMObjectStore) loadGroups(ctx context.Context, m map[string]GroupInfo) error { for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigGroupsPrefix) { if item.Err != nil { return item.Err } group := path.Dir(item.Item) if err := iamOS.loadGroup(ctx, group, m); err != nil && err != errNoSuchGroup { return err } } return nil } func (iamOS *IAMObjectStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy, ) error { var p MappedPolicy err := iamOS.loadIAMConfig(ctx, &p, getMappedPolicyPath(name, userType, isGroup)) if err != nil { if err == errConfigNotFound { return errNoSuchPolicy } return err } m[name] = p return nil } func (iamOS *IAMObjectStore) loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error { var basePath string if isGroup { basePath = iamConfigPolicyDBGroupsPrefix } else { switch userType { case svcUser: basePath = iamConfigPolicyDBServiceAccountsPrefix case stsUser: basePath = iamConfigPolicyDBSTSUsersPrefix default: basePath = iamConfigPolicyDBUsersPrefix } } for item := range listIAMConfigItems(ctx, iamOS.objAPI, basePath) { if item.Err != nil { return item.Err } policyFile := item.Item userOrGroupName := strings.TrimSuffix(policyFile, ".json") if err := iamOS.loadMappedPolicy(ctx, userOrGroupName, userType, isGroup, m); err != nil && err != errNoSuchPolicy { return err } } return nil } func (iamOS *IAMObjectStore) savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error { return iamOS.saveIAMConfig(ctx, &p, getPolicyDocPath(policyName)) } func (iamOS *IAMObjectStore) saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error { return iamOS.saveIAMConfig(ctx, mp, getMappedPolicyPath(name, userType, isGroup), opts...) } func (iamOS *IAMObjectStore) saveUserIdentity(ctx context.Context, name string, userType IAMUserType, u UserIdentity, opts ...options) error { return iamOS.saveIAMConfig(ctx, u, getUserIdentityPath(name, userType), opts...) } func (iamOS *IAMObjectStore) saveGroupInfo(ctx context.Context, name string, gi GroupInfo) error { return iamOS.saveIAMConfig(ctx, gi, getGroupInfoPath(name)) } func (iamOS *IAMObjectStore) deletePolicyDoc(ctx context.Context, name string) error { err := iamOS.deleteIAMConfig(ctx, getPolicyDocPath(name)) if err == errConfigNotFound { err = errNoSuchPolicy } return err } func (iamOS *IAMObjectStore) deleteMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool) error { err := iamOS.deleteIAMConfig(ctx, getMappedPolicyPath(name, userType, isGroup)) if err == errConfigNotFound { err = errNoSuchPolicy } return err } func (iamOS *IAMObjectStore) deleteUserIdentity(ctx context.Context, name string, userType IAMUserType) error { err := iamOS.deleteIAMConfig(ctx, getUserIdentityPath(name, userType)) if err == errConfigNotFound { err = errNoSuchUser } return err }<|fim▁hole|> err := iamOS.deleteIAMConfig(ctx, getGroupInfoPath(name)) if err == errConfigNotFound { err = errNoSuchGroup } return err } // helper type for listIAMConfigItems type itemOrErr struct { Item string Err error } // Lists files or dirs in the minioMetaBucket at the given path // prefix. If dirs is true, only directories are listed, otherwise // only objects are listed. All returned items have the pathPrefix // removed from their names. func listIAMConfigItems(ctx context.Context, objAPI ObjectLayer, pathPrefix string) <-chan itemOrErr { ch := make(chan itemOrErr) go func() { defer close(ch) // Allocate new results channel to receive ObjectInfo. objInfoCh := make(chan ObjectInfo) if err := objAPI.Walk(ctx, minioMetaBucket, pathPrefix, objInfoCh, ObjectOptions{}); err != nil { select { case ch <- itemOrErr{Err: err}: case <-ctx.Done(): } return } for obj := range objInfoCh { item := strings.TrimPrefix(obj.Name, pathPrefix) item = strings.TrimSuffix(item, SlashSeparator) select { case ch <- itemOrErr{Item: item}: case <-ctx.Done(): return } } }() return ch }<|fim▁end|>
func (iamOS *IAMObjectStore) deleteGroupInfo(ctx context.Context, name string) error {
<|file_name|>analysis.py<|end_file_name|><|fim▁begin|># -*- mode: python; coding: utf-8 -*- # Copyright 2016-2017 Peter Williams <[email protected]> and collaborators # Licensed under the MIT License """Various helpers for X-ray analysis that rely on CIAO tools. """ from __future__ import absolute_import, division, print_function, unicode_literals __all__ = str(''' get_region_area count_events compute_bgband simple_srcflux ''').split () def get_region_area (env, evtpath, region): with env.slurp (argv=['dmlist', '%s[sky=%s]' % (evtpath, region), 'subspace'], linebreak=True) as s: for etype, payload in s: if etype != 'stdout': continue if b'Region area' not in payload: continue return float (payload.split ()[-1]) raise Exception ('parsing of dmlist output failed') def count_events (env, evtpath, filter): """TODO: this can probably be replaced with simply reading the file ourselves!<|fim▁hole|> """ with env.slurp (argv=['dmstat', '%s%s[cols energy]' % (evtpath, filter)], linebreak=True) as s: for etype, payload in s: if etype != 'stdout': continue if b'good:' not in payload: continue return int (payload.split ()[-1]) raise Exception ('parsing of dmlist output failed') def compute_bgband (evtpath, srcreg, bkgreg, ebins, env=None): """Compute background information for a source in one or more energy bands. evtpath Path to a CIAO events file srcreg String specifying the source region to consider; use 'region(path.reg)' if you have the region saved in a file. bkgreg String specifying the background region to consider; same format as srcreg ebins Iterable of 2-tuples giving low and high bounds of the energy bins to consider, measured in eV. env An optional CiaoEnvironment instance; default settings are used if unspecified. Returns a DataFrame containing at least the following columns: elo The low bound of this energy bin, in eV. ehi The high bound of this energy bin, in eV. ewidth The width of the bin in eV; simply `abs(ehi - elo)`. nsrc The number of events within the specified source region and energy range. nbkg The number of events within the specified background region and energy range. nbkg_scaled The number of background events scaled to the source area; not an integer. nsrc_subbed The estimated number of non-background events in the source region; simply `nsrc - nbkg_scaled`. log_prob_bkg The logarithm of the probability that all counts in the source region are due to background events. src_sigma The confidence of source detection in sigma inferred from log_prob_bkg. The probability of backgrounditude is computed as: b^s * exp (-b) / s! where `b` is `nbkg_scaled` and `s` is `nsrc`. The confidence of source detection is computed as: sqrt(2) * erfcinv (prob_bkg) where `erfcinv` is the inverse complementary error function. """ import numpy as np import pandas as pd from scipy.special import erfcinv, gammaln if env is None: from . import CiaoEnvironment env = CiaoEnvironment () srcarea = get_region_area (env, evtpath, srcreg) bkgarea = get_region_area (env, evtpath, bkgreg) srccounts = [count_events (env, evtpath, '[sky=%s][energy=%d:%d]' % (srcreg, elo, ehi)) for elo, ehi in ebins] bkgcounts = [count_events (env, evtpath, '[sky=%s][energy=%d:%d]' % (bkgreg, elo, ehi)) for elo, ehi in ebins] df = pd.DataFrame ({ 'elo': [t[0] for t in ebins], 'ehi': [t[1] for t in ebins], 'nsrc': srccounts, 'nbkg': bkgcounts }) df['ewidth'] = np.abs (df['ehi'] - df['elo']) df['nbkg_scaled'] = df['nbkg'] * srcarea / bkgarea df['log_prob_bkg'] = df['nsrc'] * np.log (df['nbkg_scaled']) - df['nbkg_scaled'] - gammaln (df['nsrc'] + 1) df['src_sigma'] = np.sqrt (2) * erfcinv (np.exp (df['log_prob_bkg'])) df['nsrc_subbed'] = df['nsrc'] - df['nbkg_scaled'] return df def _rmtree_error (func, path, excinfo): from ...cli import warn warn ('couldn\'t delete temporary file %s: %s (%s)', path, excinfo[0], func) def simple_srcflux(env, infile=None, psfmethod='arfcorr', conf=0.68, verbose=0, **kwargs): """Run the CIAO "srcflux" script and retrieve its results. *infile* The input events file; must be specified. The computation is done in a temporary directory, so this path — and all others passed in as arguments — **must be made absolute**. *psfmethod* = "arfcorr" The PSF modeling method to be used; see the "srcflux" documentation. *conf* = 0.68 The confidence limit to detect. We default to 1 sigma, instead of the 90% mark, which is the srcflux default. *verbose* = 0 The level of verbosity to be used by the tool. *kwargs* Remaining keyword arguments are passed to the tool as command-line keyword arguments, with values stringified. Returns: A :class:`pandas.DataFrame` extracted from the results table generated by the tool. There is one row for each source analyzed; in common usage, this means that there will be one row. """ from ...io import Path import shutil, signal, tempfile if infile is None: raise ValueError('must specify infile') kwargs.update(dict( infile = infile, psfmethod = psfmethod, conf = conf, verbose = verbose, clobber = 'yes', outroot = 'sf', )) argv = ['srcflux'] + ['%s=%s' % t for t in kwargs.items()] argstr = ' '.join(argv) tempdir = None try: tempdir = tempfile.mkdtemp(prefix='srcflux') proc = env.launch(argv, cwd=tempdir, shell=False) retcode = proc.wait() if retcode > 0: raise RuntimeError('command "%s" failed with exit code %d' % (argstr, retcode)) elif retcode == -signal.SIGINT: raise KeyboardInterrupt() elif retcode < 0: raise RuntimeError('command "%s" killed by signal %d' % (argstr, -retcode)) tables = list(Path(tempdir).glob('*.flux')) if len(tables) != 1: raise RuntimeError('expected exactly one flux table from srcflux; got %d' % len(tables)) return tables[0].read_fits_bintable(hdu=1) finally: if tempdir is not None: shutil.rmtree(tempdir, onerror=_rmtree_error)<|fim▁end|>
<|file_name|>connections.js<|end_file_name|><|fim▁begin|>'use strict'; /**<|fim▁hole|> * @name networkAnalyzerApp.controller:ConnectionsCtrl * @description * # ConnectionsCtrl * Controller of the networkAnalyzerApp */ angular.module('networkAnalyzerApp') .controller('ConnectionsCtrl', function ($scope) { $scope.awesomeThings = [ 'HTML5 Boilerplate', 'AngularJS', 'Karma' ]; });<|fim▁end|>
* @ngdoc function
<|file_name|>WalletName.py<|end_file_name|><|fim▁begin|>__author__ = 'frank' import json from BaseObject import BaseObject from Requestor import process_request class WalletName(BaseObject): """ Wallet Name object :param domain_name: Domain name that Wallet Name should be associated with. :param name: Unique name for this Wallet Name prefixed to your domain_name. e.g. name.domain_name :param external_id: Unique identifier of your choice to identify your user's Wallet Name. :param id: Unique Netki identifier for this Wallet Name. """ def __init__(self, domain_name, name, external_id, id=None): super(WalletName, self).__init__() self.domain_name = domain_name self.name = name self.external_id = external_id self.id = id self.wallets = {} def get_used_currencies(self): """ Returns wallets dictionary containing currencies and wallet addresses. ``wallets['currency']: 'wallet_address'`` """ return self.wallets def get_wallet_address(self, currency): """ Returns the wallet address for a provided currency. """ return self.wallets[currency] def set_currency_address(self, currency, wallet_address): """ Create or update a currency and wallet address. :param currency: Three or Four letter currency identifier per Netki API documentation. ``btc, ltc, oap`` :param wallet_address: wallet address for provided currency """ self.wallets[currency] = wallet_address def remove_currency_address(self, currency): """ Remove a currency including the associated wallet address. """ if self.wallets[currency]: del self.wallets[currency] def save(self): """ Commit changes to a WalletName object by submitting them to the API. For new Wallet Names, an id will automatically be generated by the server. Run Netki.create_wallet_name() to create a new WalletName object, then run save() on your WalletName object to submit it to the API. To update a Wallet Name, run Netki.get_wallet_names() to retrieve the Wallet Name object, make your updates, then run save() on the<|fim▁hole|> wallet_data = [] for k in self.wallets.keys(): wallet_data.append({ 'currency': k, 'wallet_address': self.wallets[k] }) wallet_name_data = { 'domain_name': self.domain_name, 'name': self.name, 'wallets': wallet_data, 'external_id': self.external_id } wn_api_data = {'wallet_names': [wallet_name_data]} # If an ID is present it exists in Netki's systems, therefore submit an update if self.id: wallet_name_data['id'] = self.id response = process_request( self.netki_client, '/v1/partner/walletname', 'PUT', wn_api_data ) else: response = process_request( self.netki_client, '/v1/partner/walletname', 'POST', wn_api_data ) for wn in response.wallet_names: if wn.domain_name == self.domain_name and wn.name == self.name: self.id = wn.id def delete(self): """ To delete a WalletName object, first run Netki.get_wallet_names() to retrieve the Wallet Name from the API, then run delete() on the WalletName object to delete it from Netki systems. """ if not self.id: raise Exception('Unable to Delete Object that Does Not Exist Remotely') wn_api_data = { 'wallet_names': [ { 'domain_name': self.domain_name, 'id': self.id } ] } process_request( self.netki_client, '/v1/partner/walletname', 'DELETE', wn_api_data )<|fim▁end|>
WalletName object to commit changes to the API. """
<|file_name|>ProfileSkillsModule.js<|end_file_name|><|fim▁begin|>import React, { Component, PropTypes } from 'react' import { updateCurrentUser, notify } from '../actions' import ListItemTagInput from './ListItemTagInput' export default class ProfileSkillsModule extends Component { static propTypes = { person: PropTypes.object } static contextTypes = { dispatch: PropTypes.func } constructor (props) { super(props) this.state = { tags: [], valid: false } }<|fim▁hole|> const valid = tags.length > 0 return this.setState({ tags, valid }) } save = () => { const { tags } = this.state const { dispatch } = this.context return Promise.all([ dispatch(updateCurrentUser({tags})), dispatch(notify('Skills added successfully.', {type: 'info'})) ]) } render () { const { update, save } = this const { person } = this.props const { valid } = this.state const firstName = person.name.split(' ')[0] return <div className='feed-module profile-skills'> <h2> Welcome {firstName}! Are there any skills, passions or interests you’d like to be known for in the community? </h2> <p> Pick “tags” to describe yourself and to find people and opportunities that match your interests. </p> <ListItemTagInput type='tags' className='modal-input' person={person} update={update} context='feed-module' /> <div className='meta'> Press Enter (Return) after each tag. Use a dash (-) between words in a tag. </div> <button type='button' className='btn-primary' disabled={!valid} onClick={save}> Save </button> </div> } }<|fim▁end|>
update = (type, tags) => {
<|file_name|>test_subscriptions.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (c) 2015 Digi International Inc. All Rights Reserved. import json from wva.test.test_utilities import WVATestBase class TestWVASubscriptions(WVATestBase): def test_get_subscriptions(self): self.prepare_json_response("GET", "/ws/subscriptions", { "subscriptions": [ "subscriptions/a", "subscriptions/b", ] }) subs = self.wva.get_subscriptions() self.assertEqual(len(subs), 2) self.assertEqual(subs[0].short_name, "a") self.assertEqual(subs[1].short_name, "b") def test_get_metadata(self): self.prepare_json_response("GET", "/ws/subscriptions/speedy", { 'subscription': {'buffer': 'queue', 'interval': 1, 'uri': 'vehicle/data/VehicleSpeed'} }) sub = self.wva.get_subscription("speedy") self.assertEqual(sub.get_metadata(), { 'buffer': 'queue', 'interval': 1,<|fim▁hole|> self.prepare_response("DELETE", "/ws/subscriptions/short-name", "") sub = self.wva.get_subscription("short-name") sub.delete() self.assertEqual(self._get_last_request().method, "DELETE") self.assertEqual(self._get_last_request().path, "/ws/subscriptions/short-name") def test_create(self): self.prepare_response("PUT", "/ws/subscriptions/new-short-name", "") sub = self.wva.get_subscription("new-short-name") sub.create("vehicle/data/EngineSpeed", buffer="drop", interval=5) req = self._get_last_request() self.assertDictEqual(json.loads(req.body.decode('utf-8')), { 'subscription': {'buffer': 'drop', 'interval': 5, 'uri': 'vehicle/data/EngineSpeed'}, })<|fim▁end|>
'uri': 'vehicle/data/VehicleSpeed', }) def test_delete(self):
<|file_name|>index.js<|end_file_name|><|fim▁begin|>export { AbilityScoresViewModel } from './ability_scores'; export { ActionsToolbarViewModel } from './actions_toolbar'; export { ArmorViewModel } from './armor'; export { CharacterNameViewModel } from './character_name'; export { CharacterPortraitModel } from './character_portrait'; export { CharacterStatusLineViewModel } from './character_status_line'; export { BackgroundViewModel } from './background'; export { FeatsViewModel } from './feats'; export { FeaturesViewModel } from './features'; export { ItemsViewModel } from './items';<|fim▁hole|>export { ProfileViewModel } from './profile'; export { CharacterRootViewModel } from './root'; export { SkillsViewModel } from './skills'; export { SpellSlotsViewModel } from './spell_slots'; export { SpellStatsViewModel } from './spell_stats'; export { SpellbookViewModel } from './spells'; export { StatsViewModel } from './stats'; export { TrackerViewModel } from './tracker'; export { TraitsViewModel } from './traits'; export { WealthViewModel } from './wealth'; export { WeaponsViewModel } from './weapons'; export { OtherStatsViewModel } from './other_stats';<|fim▁end|>
export { MagicItemsViewModel } from './magic_items'; export { ProficienciesViewModel } from './proficiencies';
<|file_name|>TransportStopAction.java<|end_file_name|><|fim▁begin|>/* * Created on 17-dic-2005 * * TODO To change the template for this generated file go to Window - * Preferences - Java - Code Style - Code Templates */ package org.herac.tuxguitar.gui.actions.transport; <|fim▁hole|>import org.eclipse.swt.events.TypedEvent; import org.herac.tuxguitar.gui.TuxGuitar; import org.herac.tuxguitar.gui.actions.Action; /** * @author julian * * TODO To change the template for this generated type comment go to * Window - Preferences - Java - Code Style - Code Templates */ public class TransportStopAction extends Action { public static final String NAME = "action.transport.stop"; public TransportStopAction() { super(NAME, AUTO_LOCK | AUTO_UNLOCK | AUTO_UPDATE | KEY_BINDING_AVAILABLE); } protected int execute(TypedEvent e) { TuxGuitar.instance().getTransport().stop(); return 0; } }<|fim▁end|>
<|file_name|>memes_zaebali.py<|end_file_name|><|fim▁begin|># coding: utf-8 __author__ = "@strizhechenko" import sys from morpher import Morpher from twitterbot_utils import Twibot from apscheduler.schedulers.blocking import BlockingScheduler sched = BlockingScheduler() bot = Twibot() morphy = Morpher() def tweets2words(tweets): string = " ".join([tweet.text for tweet in tweets]) return morphy.process_to_words(string) @sched.scheduled_job('interval', minutes=15) def do_tweets(): print 'New tick' words = tweets2words(bot.fetch_list(list_id=217926157)) for word in words: tweet = morphy.word2phrase(word) bot.tweet(tweet) print 'post', tweet.encode('utf-8') @sched.scheduled_job('interval', hours=24) def do_wipe(): print 'Wipe time'<|fim▁hole|> bot.wipe() if __name__ == '__main__': do_tweets() if '--test' in sys.argv: exit(0) sched.start()<|fim▁end|>
<|file_name|>mm.lang.js<|end_file_name|><|fim▁begin|>// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview Moodle mobile lang lib. * @author <a href="mailto:[email protected]">Juan Leyva</a> * @version 1.2 */ /** * @namespace Holds all the MoodleMobile language functionality. */ MM.lang = { strings: [], current: '', locale: '', /** * Determine the language for the app. * We check first if the user has selected by configuration a language.. * then the Mobile device locale (but we have to check that we have a language file for that locale) * then the remote site lang (if the remote site is loaded, we can be at the login screen) * finally, the language in the config.json file (default language) * * @return {string} Language id (en, es, etc...) */ determine: function() { // User preferences. var lang = MM.getConfig('lang'); if (typeof(lang) != 'undefined') { return lang; } // Locale // MM.lang.locale is loaded by Phonegap. if (MM.lang.locale) { lang = MM.lang.locale.toLowerCase().replace("-", "_"); if (typeof(MM.config.languages[lang]) != "undefined") { return lang; } else if(lang.length > 2) { // Try without the region/country. lang = lang.substr(0, 2); if (typeof(MM.config.languages[lang]) != "undefined") { return lang; } } } // Browser language. RFC 4646. var browserLang = window.navigator.userLanguage || window.navigator.language; // Normalize i.e: pt-BR to pt_br. browserLang = browserLang.toLowerCase().replace("-", "_"); if (typeof(MM.config.languages[browserLang]) != "undefined") { return browserLang; } else if(browserLang.length > 2) { // Try without the region/country. browserLang = browserLang.substr(0, 2); if (typeof(MM.config.languages[browserLang]) != "undefined") { return browserLang; } } // Default site lang. if (typeof(MM.config.current_site) != 'undefined' && MM.config.current_site && typeof(MM.config.current_site.lang) != 'undefined' && typeof(MM.config.languages[MM.config.current_site.lang]) != "undefined") { return MM.config.current_site.lang; } // Default language. return MM.config.default_lang; }, setup: function(component) { MM.log('Strings: Lang setup for ' + component); var cacheEl = ""; if (typeof(component) == 'undefined') { component = 'core'; cacheEl = 'core'; } if (component != 'core') { cacheEl = MM.plugins[component].settings.lang.component; } var lang = MM.lang.determine(); // Try to find in cache the language strings. // Languages are automatically sync and stored in cache, forcing to not expire. // Check if we are inside a site first, because languages can be set up in the login screen. if (typeof(MM.config.current_site) != "undefined" && MM.config.current_site) { var langStrings = MM.cache.getElement('lang-' + cacheEl + '-' + lang, true); if (langStrings) { MM.lang.loadLang(component, lang, langStrings); MM.log('Strings loaded from cache (remote syte)', 'Strings'); } } }, loadLang: function(component, lang, strings) { MM.log('Strings: Loading lang ' + lang + ' for component ' + component); MM.lang.current = lang; if (typeof(MM.lang.strings[lang]) == 'undefined') { MM.lang.strings[lang] = []; } if (strings && Object.keys(strings).length > 0) { MM.lang.strings[lang][component] = strings; } }, loadPluginLang: function(component, strings) { MM.log('Strings: Loading plugin lang ' + component); if (!MM.lang.current) { MM.lang.current = 'en'; MM.lang.strings['en'] = []; } // Try to find in cache the language strings. // Languages are automatically sync and stored in cache, forcing to not expire. var cacheStrings = MM.cache.getElement('lang-' + component + '-' + MM.lang.current, true); if (cacheStrings) { strings = cacheStrings; MM.log('Strings: Plugin '+component+' Strings loaded from cache (remote syte)'); } MM.lang.strings[MM.lang.current][component] = strings; if (MM.lang.current != 'en') { MM.lang.strings['en'][component] = strings; } }, pluginName: function(plugin) { <|fim▁hole|> return MM.lang.s(plugin); }, /** * Main function for translating strings * * @this {MM.lang} * @param {string} id The unique id of the string to be translated. * @param {string} component Core for regular strings or pluginname for plugins. */ s: function(id, component) { if (typeof(component) == 'undefined') { component = 'core'; } var translated = ''; // First we check if we find the string in the current language. if (typeof(MM.lang.strings[MM.lang.current][component]) != 'undefined' && typeof(MM.lang.strings[MM.lang.current][component][id]) !== 'undefined' ) { translated = MM.lang.strings[MM.lang.current][component][id]; } // If not, we look for the string in the default language "english" else if (typeof(MM.lang.strings['en']) != 'undefined' && typeof(MM.lang.strings['en'][component]) !== 'undefined' && typeof(MM.lang.strings['en'][component][id]) !== 'undefined') { translated = MM.lang.strings['en'][component][id]; } // If not found yet, we look for the string in the base language file (lang/en.json) if (!translated && component == 'core' && MM.lang.base[id]) { translated = MM.lang.base[id]; } // If not found yet (for plugins only) we look for the string in the base lang also (plugin/lang/en.json). if (!translated && component != "core" && MM.plugins[component].settings.lang.strings && MM.plugins[component].settings.lang.strings[id] !== 'undefined') { translated = MM.plugins[component].settings.lang.strings[id]; } // For missing strings, we use the [string] notation. if (!translated) { translated = '[[' + id + ']]'; } return translated; }, sync: function(forced) { MM.log('Executing lang sync function', 'Sync'); if (forced) { MM.Router.navigate(""); } var lang = MM.lang.determine(); if (MM.deviceConnected() && MM.getConfig('sync_lang_on')) { var data = { 'component': 'mobile', 'lang': lang }; MM.log('Loading lang file from remote site for core', 'Sync'); MM.moodleWSCall('core_get_component_strings', data, function(strings) { var stringsFormatted = {}; if (strings.length > 0) { $.each(strings, function(index, string) { stringsFormatted[string.stringid] = string.string; }); } MM.cache.addElement('lang-core-' + lang, stringsFormatted, 'lang'); if (forced) { MM.popMessage(MM.lang.s("langsynced") + " (" + strings.length + ") " + MM.lang.s("strings")); } }, {silently: true, getFromCache: false}); for (var el in MM.plugins) { var plugin = MM.plugins[el]; var component = plugin.settings.lang.component; if (component != 'core') { var data = { 'component': component, 'lang': lang }; MM.log('Sync: Loading lang from remtote site for component: ' + component); MM.moodleWSCall('core_get_component_strings', data, function(strings) { var stringsFormatted = {}; if (strings.length > 0) { $.each(strings, function(index, string) { stringsFormatted[string.stringid] = string.string; }); } MM.cache.addElement( 'lang-' + data.component + '-' + lang, stringsFormatted, 'lang' ); }, {silently: true} ); } } } } };<|fim▁end|>
if (MM.plugins[plugin].settings.lang.component != 'core') { return MM.lang.s('plugin' + plugin + 'name', plugin); }
<|file_name|>panel.py<|end_file_name|><|fim▁begin|>from django.utils.translation import ugettext_lazy as _ import horizon from horizon.test.test_dashboards.dogs import dashboard class Puppies(horizon.Panel): name = _("Puppies") slug = "puppies"<|fim▁hole|><|fim▁end|>
dashboard.Dogs.register(Puppies)
<|file_name|>spoof.py<|end_file_name|><|fim▁begin|>import sqlite3 import logging from time import sleep logging.getLogger("scapy.runtime").setLevel(logging.ERROR) from scapy.all import * import sweetSecurityDB dbPath="/opt/sweetsecurity/client/SweetSecurity.db" def convertMAC(mac): newMac="%s%s:%s%s:%s%s:%s%s:%s%s:%s%s" % (mac[0],mac[1],mac[2],mac[3],mac[4],mac[5],mac[6],mac[7],mac[8],mac[9],mac[10],mac[11]) return newMac def getMac(): myMac = [get_if_hwaddr(i) for i in get_if_list()] for mac in myMac: if(mac != "00:00:00:00:00:00"): return mac def start(): logger = logging.getLogger('SweetSecurityLogger') while 1: try: dfgwInfo=sweetSecurityDB.getDfgw() dfgw=dfgwInfo['dfgw'] dfgwMAC=dfgwInfo['dfgwMAC'] dfgwMAC=convertMAC(dfgwMAC)<|fim▁hole|> c = conn.cursor() for row in c.execute('SELECT * FROM hosts where active = 1 and ignore = 0'): logger.info("Spoofing Device: ip=%s, mac=%s",row[2],row[3]) #Spoof the things... victimMac=convertMAC(row[3]) packet = Ether()/ARP(op="who-has",hwdst=dfgwMAC,pdst=dfgw,psrc=row[2]) sendp(packet) packet = Ether()/ARP(op="who-has",hwdst=victimMac,pdst=row[2],psrc=dfgw) sendp(packet) conn.close() sleep(1) except Exception,e: logger.info("Error spoofing device: %s" % str(e))<|fim▁end|>
conn = sqlite3.connect(dbPath)
<|file_name|>issue-10291.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn test<'x>(x: &'x int) { //~ NOTE the lifetime 'x as defined drop::< <'z>|&'z int| -> &'z int>(|z| { //~^ ERROR mismatched types //~^^ ERROR cannot infer an appropriate lifetime x }); }<|fim▁hole|><|fim▁end|>
fn main() {}
<|file_name|>webrender_helpers.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ // TODO(gw): This contains helper traits and implementations for converting Servo display lists // into WebRender display lists. In the future, this step should be completely removed. // This might be achieved by sharing types between WR and Servo display lists, or // completely converting layout to directly generate WebRender display lists, for example. use app_units::Au; use euclid::{Point2D, Vector2D, Rect, SideOffsets2D, Size2D}; use gfx::display_list::{BorderDetails, BorderRadii, BoxShadowClipMode, ClipScrollNode}; use gfx::display_list::{ClipScrollNodeIndex, ClipScrollNodeType, ClippingRegion, DisplayItem}; use gfx::display_list::{DisplayList, StackingContextType}; use msg::constellation_msg::PipelineId; use style::computed_values::{image_rendering, mix_blend_mode, transform_style}; use style::values::computed::{BorderStyle, Filter}; use style::values::generics::effects::Filter as GenericFilter; use webrender_api::{self, ClipAndScrollInfo, ClipId, ClipMode, ComplexClipRegion}; use webrender_api::{DisplayListBuilder, ExtendMode, LayoutTransform}; pub trait WebRenderDisplayListConverter { fn convert_to_webrender(&self, pipeline_id: PipelineId) -> DisplayListBuilder; } trait WebRenderDisplayItemConverter { fn prim_info(&self) -> webrender_api::LayoutPrimitiveInfo; fn convert_to_webrender( &self, builder: &mut DisplayListBuilder, clip_scroll_nodes: &[ClipScrollNode], clip_ids: &mut Vec<Option<ClipId>>, current_clip_and_scroll_info: &mut ClipAndScrollInfo ); } trait ToBorderStyle { fn to_border_style(&self) -> webrender_api::BorderStyle; } impl ToBorderStyle for BorderStyle { fn to_border_style(&self) -> webrender_api::BorderStyle { match *self { BorderStyle::none => webrender_api::BorderStyle::None, BorderStyle::solid => webrender_api::BorderStyle::Solid, BorderStyle::double => webrender_api::BorderStyle::Double, BorderStyle::dotted => webrender_api::BorderStyle::Dotted, BorderStyle::dashed => webrender_api::BorderStyle::Dashed, BorderStyle::hidden => webrender_api::BorderStyle::Hidden, BorderStyle::groove => webrender_api::BorderStyle::Groove, BorderStyle::ridge => webrender_api::BorderStyle::Ridge, BorderStyle::inset => webrender_api::BorderStyle::Inset, BorderStyle::outset => webrender_api::BorderStyle::Outset, } } } trait ToBorderWidths { fn to_border_widths(&self) -> webrender_api::BorderWidths; } impl ToBorderWidths for SideOffsets2D<Au> { fn to_border_widths(&self) -> webrender_api::BorderWidths { webrender_api::BorderWidths { left: self.left.to_f32_px(), top: self.top.to_f32_px(), right: self.right.to_f32_px(), bottom: self.bottom.to_f32_px(), } } } trait ToBoxShadowClipMode { fn to_clip_mode(&self) -> webrender_api::BoxShadowClipMode; } impl ToBoxShadowClipMode for BoxShadowClipMode { fn to_clip_mode(&self) -> webrender_api::BoxShadowClipMode { match *self { BoxShadowClipMode::Inset => webrender_api::BoxShadowClipMode::Inset, BoxShadowClipMode::Outset => webrender_api::BoxShadowClipMode::Outset, } } } trait ToSizeF { fn to_sizef(&self) -> webrender_api::LayoutSize; } trait ToPointF { fn to_pointf(&self) -> webrender_api::LayoutPoint; } trait ToVectorF { fn to_vectorf(&self) -> webrender_api::LayoutVector2D; } impl ToPointF for Point2D<Au> { fn to_pointf(&self) -> webrender_api::LayoutPoint { webrender_api::LayoutPoint::new(self.x.to_f32_px(), self.y.to_f32_px()) } } impl ToVectorF for Vector2D<Au> { fn to_vectorf(&self) -> webrender_api::LayoutVector2D { webrender_api::LayoutVector2D::new(self.x.to_f32_px(), self.y.to_f32_px()) } } impl ToSizeF for Size2D<Au> { fn to_sizef(&self) -> webrender_api::LayoutSize { webrender_api::LayoutSize::new(self.width.to_f32_px(), self.height.to_f32_px()) } } pub trait ToRectF { fn to_rectf(&self) -> webrender_api::LayoutRect; } impl ToRectF for Rect<Au> { fn to_rectf(&self) -> webrender_api::LayoutRect { let x = self.origin.x.to_f32_px(); let y = self.origin.y.to_f32_px(); let w = self.size.width.to_f32_px(); let h = self.size.height.to_f32_px(); let point = webrender_api::LayoutPoint::new(x, y); let size = webrender_api::LayoutSize::new(w, h); webrender_api::LayoutRect::new(point, size) } } pub trait ToBorderRadius { fn to_border_radius(&self) -> webrender_api::BorderRadius; } impl ToBorderRadius for BorderRadii<Au> { fn to_border_radius(&self) -> webrender_api::BorderRadius { webrender_api::BorderRadius { top_left: self.top_left.to_sizef(), top_right: self.top_right.to_sizef(), bottom_left: self.bottom_left.to_sizef(), bottom_right: self.bottom_right.to_sizef(), } } } pub trait ToMixBlendMode { fn to_mix_blend_mode(&self) -> webrender_api::MixBlendMode; } impl ToMixBlendMode for mix_blend_mode::T { fn to_mix_blend_mode(&self) -> webrender_api::MixBlendMode { match *self { mix_blend_mode::T::normal => webrender_api::MixBlendMode::Normal, mix_blend_mode::T::multiply => webrender_api::MixBlendMode::Multiply, mix_blend_mode::T::screen => webrender_api::MixBlendMode::Screen, mix_blend_mode::T::overlay => webrender_api::MixBlendMode::Overlay, mix_blend_mode::T::darken => webrender_api::MixBlendMode::Darken, mix_blend_mode::T::lighten => webrender_api::MixBlendMode::Lighten, mix_blend_mode::T::color_dodge => webrender_api::MixBlendMode::ColorDodge, mix_blend_mode::T::color_burn => webrender_api::MixBlendMode::ColorBurn, mix_blend_mode::T::hard_light => webrender_api::MixBlendMode::HardLight, mix_blend_mode::T::soft_light => webrender_api::MixBlendMode::SoftLight, mix_blend_mode::T::difference => webrender_api::MixBlendMode::Difference, mix_blend_mode::T::exclusion => webrender_api::MixBlendMode::Exclusion, mix_blend_mode::T::hue => webrender_api::MixBlendMode::Hue, mix_blend_mode::T::saturation => webrender_api::MixBlendMode::Saturation, mix_blend_mode::T::color => webrender_api::MixBlendMode::Color, mix_blend_mode::T::luminosity => webrender_api::MixBlendMode::Luminosity, } } } trait ToImageRendering { fn to_image_rendering(&self) -> webrender_api::ImageRendering; } impl ToImageRendering for image_rendering::T { fn to_image_rendering(&self) -> webrender_api::ImageRendering { match *self { image_rendering::T::crisp_edges => webrender_api::ImageRendering::CrispEdges, image_rendering::T::auto => webrender_api::ImageRendering::Auto, image_rendering::T::pixelated => webrender_api::ImageRendering::Pixelated, } } } trait ToFilterOps { fn to_filter_ops(&self) -> Vec<webrender_api::FilterOp>; } impl ToFilterOps for Vec<Filter> { fn to_filter_ops(&self) -> Vec<webrender_api::FilterOp> { let mut result = Vec::with_capacity(self.len()); for filter in self.iter() { match *filter { GenericFilter::Blur(radius) => result.push(webrender_api::FilterOp::Blur(radius.px())), GenericFilter::Brightness(amount) => result.push(webrender_api::FilterOp::Brightness(amount.0)), GenericFilter::Contrast(amount) => result.push(webrender_api::FilterOp::Contrast(amount.0)), GenericFilter::Grayscale(amount) => result.push(webrender_api::FilterOp::Grayscale(amount.0)), GenericFilter::HueRotate(angle) => result.push(webrender_api::FilterOp::HueRotate(angle.radians())), GenericFilter::Invert(amount) => result.push(webrender_api::FilterOp::Invert(amount.0)), GenericFilter::Opacity(amount) => result.push(webrender_api::FilterOp::Opacity(amount.0.into())), GenericFilter::Saturate(amount) => result.push(webrender_api::FilterOp::Saturate(amount.0)), GenericFilter::Sepia(amount) => result.push(webrender_api::FilterOp::Sepia(amount.0)), GenericFilter::DropShadow(ref shadow) => match *shadow {}, } } result } } pub trait ToTransformStyle { fn to_transform_style(&self) -> webrender_api::TransformStyle; } impl ToTransformStyle for transform_style::T { fn to_transform_style(&self) -> webrender_api::TransformStyle { match *self { transform_style::T::auto | transform_style::T::flat => webrender_api::TransformStyle::Flat, transform_style::T::preserve_3d => webrender_api::TransformStyle::Preserve3D, } } } impl WebRenderDisplayListConverter for DisplayList { fn convert_to_webrender(&self, pipeline_id: PipelineId) -> DisplayListBuilder { let mut builder = DisplayListBuilder::with_capacity(pipeline_id.to_webrender(), self.bounds().size.to_sizef(), 1024 * 1024); // 1 MB of space let mut current_clip_and_scroll_info = pipeline_id.root_clip_and_scroll_info(); builder.push_clip_and_scroll_info(current_clip_and_scroll_info); let mut clip_ids = Vec::with_capacity(self.clip_scroll_nodes.len()); clip_ids.resize(self.clip_scroll_nodes.len(), None); clip_ids[0] = Some(ClipId::root_scroll_node(pipeline_id.to_webrender())); for item in &self.list { item.convert_to_webrender( &mut builder, &self.clip_scroll_nodes, &mut clip_ids, &mut current_clip_and_scroll_info ); } builder } } impl WebRenderDisplayItemConverter for DisplayItem { fn prim_info(&self) -> webrender_api::LayoutPrimitiveInfo { let tag = match self.base().metadata.pointing { Some(cursor) => Some((self.base().metadata.node.0 as u64, cursor as u8)), None => None, }; webrender_api::LayoutPrimitiveInfo { rect: self.base().bounds.to_rectf(), local_clip: self.base().local_clip, // TODO(gw): Make use of the WR backface visibility functionality. is_backface_visible: true, tag: tag, edge_aa_segment_mask: webrender_api::EdgeAaSegmentMask::empty(), } } fn convert_to_webrender( &self, builder: &mut DisplayListBuilder, clip_scroll_nodes: &[ClipScrollNode], clip_ids: &mut Vec<Option<ClipId>>, current_clip_and_scroll_info: &mut ClipAndScrollInfo ) { let get_id = |clip_ids: &[Option<ClipId>], index: ClipScrollNodeIndex| -> ClipId { match clip_ids[index.0] { Some(id) => id, None => unreachable!("Tried to use WebRender ClipId before it was defined."), } }; let clip_and_scroll_indices = self.base().clipping_and_scrolling; let scrolling_id = get_id(clip_ids, clip_and_scroll_indices.scrolling); let clip_and_scroll_info = match clip_and_scroll_indices.clipping { None => ClipAndScrollInfo::simple(scrolling_id), Some(index) => ClipAndScrollInfo::new(scrolling_id, get_id(clip_ids, index)), }; if clip_and_scroll_info != *current_clip_and_scroll_info { builder.pop_clip_id(); builder.push_clip_and_scroll_info(clip_and_scroll_info); *current_clip_and_scroll_info = clip_and_scroll_info; } match *self { DisplayItem::SolidColor(ref item) => { builder.push_rect(&self.prim_info(), item.color); } DisplayItem::Text(ref item) => { let mut origin = item.baseline_origin.clone(); let mut glyphs = vec!(); for slice in item.text_run.natural_word_slices_in_visual_order(&item.range) { for glyph in slice.glyphs.iter_glyphs_for_byte_range(&slice.range) { let glyph_advance = if glyph.char_is_space() { glyph.advance() + item.text_run.extra_word_spacing } else { glyph.advance() }; if !slice.glyphs.is_whitespace() { let glyph_offset = glyph.offset().unwrap_or(Point2D::zero()); let x = (origin.x + glyph_offset.x).to_f32_px(); let y = (origin.y + glyph_offset.y).to_f32_px(); let point = webrender_api::LayoutPoint::new(x, y); let glyph = webrender_api::GlyphInstance { index: glyph.id(), point: point, }; glyphs.push(glyph); } origin.x = origin.x + glyph_advance; }; } if glyphs.len() > 0 { builder.push_text(&self.prim_info(), &glyphs, item.text_run.font_key, item.text_color, None); } } DisplayItem::Image(ref item) => { if let Some(id) = item.webrender_image.key { if item.stretch_size.width > Au(0) && item.stretch_size.height > Au(0) { builder.push_image(&self.prim_info(), item.stretch_size.to_sizef(), item.tile_spacing.to_sizef(), item.image_rendering.to_image_rendering(), id); } } } DisplayItem::Border(ref item) => { let widths = item.border_widths.to_border_widths(); let details = match item.details { BorderDetails::Normal(ref border) => { let left = webrender_api::BorderSide { color: border.color.left, style: border.style.left.to_border_style(), }; let top = webrender_api::BorderSide { color: border.color.top, style: border.style.top.to_border_style(), }; let right = webrender_api::BorderSide { color: border.color.right, style: border.style.right.to_border_style(), }; let bottom = webrender_api::BorderSide { color: border.color.bottom, style: border.style.bottom.to_border_style(), }; let radius = border.radius.to_border_radius(); webrender_api::BorderDetails::Normal(webrender_api::NormalBorder { left: left, top: top, right: right, bottom: bottom, radius: radius, }) } BorderDetails::Image(ref image) => { match image.image.key { None => return, Some(key) => { webrender_api::BorderDetails::Image(webrender_api::ImageBorder { image_key: key, patch: webrender_api::NinePatchDescriptor { width: image.image.width, height: image.image.height, slice: image.slice, }, fill: image.fill, outset: image.outset, repeat_horizontal: image.repeat_horizontal, repeat_vertical: image.repeat_vertical, }) } } } BorderDetails::Gradient(ref gradient) => { let extend_mode = if gradient.gradient.repeating { ExtendMode::Repeat } else { ExtendMode::Clamp }; webrender_api::BorderDetails::Gradient(webrender_api::GradientBorder { gradient: builder.create_gradient( gradient.gradient.start_point.to_pointf(), gradient.gradient.end_point.to_pointf(), gradient.gradient.stops.clone(), extend_mode), outset: gradient.outset, }) } BorderDetails::RadialGradient(ref gradient) => { let extend_mode = if gradient.gradient.repeating { ExtendMode::Repeat } else { ExtendMode::Clamp }; webrender_api::BorderDetails::RadialGradient(webrender_api::RadialGradientBorder { gradient: builder.create_radial_gradient( gradient.gradient.center.to_pointf(), gradient.gradient.radius.to_sizef(), gradient.gradient.stops.clone(), extend_mode), outset: gradient.outset, }) } }; builder.push_border(&self.prim_info(), widths, details); } DisplayItem::Gradient(ref item) => { let rect = item.base.bounds; let start_point = item.gradient.start_point.to_pointf(); let end_point = item.gradient.end_point.to_pointf(); let extend_mode = if item.gradient.repeating { ExtendMode::Repeat } else { ExtendMode::Clamp }; let gradient = builder.create_gradient(start_point, end_point, item.gradient.stops.clone(), extend_mode); builder.push_gradient(&self.prim_info(), gradient, rect.size.to_sizef(), webrender_api::LayoutSize::zero()); } DisplayItem::RadialGradient(ref item) => { let rect = item.base.bounds; let center = item.gradient.center.to_pointf(); let radius = item.gradient.radius.to_sizef(); let extend_mode = if item.gradient.repeating { ExtendMode::Repeat } else { ExtendMode::Clamp }; let gradient = builder.create_radial_gradient(center, radius, item.gradient.stops.clone(), extend_mode); builder.push_radial_gradient(&self.prim_info(), gradient, rect.size.to_sizef(), webrender_api::LayoutSize::zero()); } DisplayItem::Line(ref item) => { builder.push_line(&self.prim_info(), // TODO(gw): Use a better estimate for wavy line thickness. (0.33 * item.base.bounds.size.height.to_f32_px()).ceil(), webrender_api::LineOrientation::Horizontal, &item.color, item.style); } DisplayItem::BoxShadow(ref item) => { let box_bounds = item.box_bounds.to_rectf(); builder.push_box_shadow(&self.prim_info(), box_bounds, item.offset.to_vectorf(), item.color, item.blur_radius.to_f32_px(), item.spread_radius.to_f32_px(), item.border_radius.to_border_radius(), item.clip_mode.to_clip_mode()); } DisplayItem::PushTextShadow(ref item) => { builder.push_shadow(&self.prim_info(), webrender_api::Shadow { blur_radius: item.blur_radius.to_f32_px(), offset: item.offset.to_vectorf(), color: item.color, }); } DisplayItem::PopAllTextShadows(_) => { builder.pop_all_shadows(); } DisplayItem::Iframe(ref item) => { builder.push_iframe(&self.prim_info(), item.iframe.to_webrender()); } DisplayItem::PushStackingContext(ref item) => { let stacking_context = &item.stacking_context; debug_assert!(stacking_context.context_type == StackingContextType::Real); let transform = stacking_context.transform.map(|transform| { LayoutTransform::from_untyped(&transform).into() }); let perspective = stacking_context.perspective.map(|perspective| { LayoutTransform::from_untyped(&perspective) }); builder.push_stacking_context( &webrender_api::LayoutPrimitiveInfo::new(stacking_context.bounds.to_rectf()), stacking_context.scroll_policy, transform, stacking_context.transform_style, perspective, stacking_context.mix_blend_mode, stacking_context.filters.to_filter_ops() ); } DisplayItem::PopStackingContext(_) => builder.pop_stacking_context(), DisplayItem::DefineClipScrollNode(ref item) => { let node = &clip_scroll_nodes[item.node_index.0]; let parent_id = get_id(clip_ids, node.parent_index); let item_rect = node.clip.main.to_rectf(); let webrender_id = match node.node_type { ClipScrollNodeType::Clip => { builder.define_clip_with_parent( node.id, parent_id, item_rect, node.clip.get_complex_clips(), None ) } ClipScrollNodeType::ScrollFrame(scroll_sensitivity) => {<|fim▁hole|> node.clip.main.to_rectf(), node.clip.get_complex_clips(), None, scroll_sensitivity ) } ClipScrollNodeType::StickyFrame(ref sticky_data) => { // TODO: Add define_sticky_frame_with_parent to WebRender. builder.push_clip_id(parent_id); let id = builder.define_sticky_frame( node.id, item_rect, sticky_data.margins, sticky_data.vertical_offset_bounds, sticky_data.horizontal_offset_bounds, webrender_api::LayoutVector2D::zero(), ); builder.pop_clip_id(); id } }; debug_assert!(node.id.is_none() || node.id == Some(webrender_id)); clip_ids[item.node_index.0] = Some(webrender_id); } } } } trait ToWebRenderClip { fn get_complex_clips(&self) -> Vec<ComplexClipRegion>; } impl ToWebRenderClip for ClippingRegion { fn get_complex_clips(&self) -> Vec<ComplexClipRegion> { self.complex.iter().map(|complex_clipping_region| { ComplexClipRegion::new( complex_clipping_region.rect.to_rectf(), complex_clipping_region.radii.to_border_radius(), ClipMode::Clip, ) }).collect() } }<|fim▁end|>
builder.define_scroll_frame_with_parent( node.id, parent_id, node.content_rect.to_rectf(),
<|file_name|>websocket.py<|end_file_name|><|fim▁begin|>import os import socket import struct from OpenSSL import SSL from mitmproxy import exceptions from mitmproxy import flow from mitmproxy.proxy.protocol import base from mitmproxy.net import tcp from mitmproxy.net import websockets from mitmproxy.websocket import WebSocketFlow, WebSocketBinaryMessage, WebSocketTextMessage class WebSocketLayer(base.Layer): """ WebSocket layer to intercept, modify, and forward WebSocket messages. Only version 13 is supported (as specified in RFC6455). Only HTTP/1.1-initiated connections are supported. The client starts by sending an Upgrade-request. In order to determine the handshake and negotiate the correct protocol and extensions, the Upgrade-request is forwarded to the server. The response from the server is then parsed and negotiated settings are extracted. Finally the handshake is completed by forwarding the server-response to the client. After that, only WebSocket frames are exchanged. PING/PONG frames pass through and must be answered by the other endpoint. CLOSE frames are forwarded before this WebSocketLayer terminates. This layer is transparent to any negotiated extensions. This layer is transparent to any negotiated subprotocols. Only raw frames are forwarded to the other endpoint. WebSocket messages are stored in a WebSocketFlow. """ def __init__(self, ctx, handshake_flow): super().__init__(ctx) self.handshake_flow = handshake_flow self.flow = None # type: WebSocketFlow self.client_frame_buffer = [] self.server_frame_buffer = [] def _handle_frame(self, frame, source_conn, other_conn, is_server): if frame.header.opcode & 0x8 == 0: return self._handle_data_frame(frame, source_conn, other_conn, is_server) elif frame.header.opcode in (websockets.OPCODE.PING, websockets.OPCODE.PONG): return self._handle_ping_pong(frame, source_conn, other_conn, is_server) elif frame.header.opcode == websockets.OPCODE.CLOSE: return self._handle_close(frame, source_conn, other_conn, is_server) else: return self._handle_unknown_frame(frame, source_conn, other_conn, is_server) def _handle_data_frame(self, frame, source_conn, other_conn, is_server): fb = self.server_frame_buffer if is_server else self.client_frame_buffer fb.append(frame) if frame.header.fin: payload = b''.join(f.payload for f in fb) original_chunk_sizes = [len(f.payload) for f in fb] message_type = fb[0].header.opcode compressed_message = fb[0].header.rsv1 fb.clear() if message_type == websockets.OPCODE.TEXT: t = WebSocketTextMessage else: t = WebSocketBinaryMessage websocket_message = t(self.flow, not is_server, payload) length = len(websocket_message.content) self.flow.messages.append(websocket_message) self.channel.ask("websocket_message", self.flow) def get_chunk(payload): if len(payload) == length: # message has the same length, we can reuse the same sizes pos = 0 for s in original_chunk_sizes: yield payload[pos:pos + s] pos += s else: # just re-chunk everything into 10kB frames chunk_size = 10240 chunks = range(0, len(payload), chunk_size) for i in chunks: yield payload[i:i + chunk_size] frms = [ websockets.Frame( payload=chunk, opcode=frame.header.opcode, mask=(False if is_server else 1), masking_key=(b'' if is_server else os.urandom(4))) for chunk in get_chunk(websocket_message.content) ] if len(frms) > 0: frms[-1].header.fin = True else: frms.append(websockets.Frame( fin=True, opcode=websockets.OPCODE.CONTINUE, mask=(False if is_server else 1), masking_key=(b'' if is_server else os.urandom(4)))) frms[0].header.opcode = message_type frms[0].header.rsv1 = compressed_message for frm in frms: other_conn.send(bytes(frm)) return True<|fim▁hole|> return True def _handle_close(self, frame, source_conn, other_conn, is_server): self.flow.close_sender = "server" if is_server else "client" if len(frame.payload) >= 2: code, = struct.unpack('!H', frame.payload[:2]) self.flow.close_code = code self.flow.close_message = websockets.CLOSE_REASON.get_name(code, default='unknown status code') if len(frame.payload) > 2: self.flow.close_reason = frame.payload[2:] other_conn.send(bytes(frame)) # initiate close handshake return False def _handle_unknown_frame(self, frame, source_conn, other_conn, is_server): # unknown frame - just forward it other_conn.send(bytes(frame)) sender = "server" if is_server else "client" self.log("Unknown WebSocket frame received from {}".format(sender), "info", [repr(frame)]) return True def __call__(self): self.flow = WebSocketFlow(self.client_conn, self.server_conn, self.handshake_flow, self) self.flow.metadata['websocket_handshake'] = self.handshake_flow self.handshake_flow.metadata['websocket_flow'] = self.flow self.channel.ask("websocket_start", self.flow) client = self.client_conn.connection server = self.server_conn.connection conns = [client, server] close_received = False try: while not self.channel.should_exit.is_set(): r = tcp.ssl_read_select(conns, 0.1) for conn in r: source_conn = self.client_conn if conn == client else self.server_conn other_conn = self.server_conn if conn == client else self.client_conn is_server = (conn == self.server_conn.connection) frame = websockets.Frame.from_file(source_conn.rfile) cont = self._handle_frame(frame, source_conn, other_conn, is_server) if not cont: if close_received: return else: close_received = True except (socket.error, exceptions.TcpException, SSL.Error) as e: s = 'server' if is_server else 'client' self.flow.error = flow.Error("WebSocket connection closed unexpectedly by {}: {}".format(s, repr(e))) self.channel.tell("websocket_error", self.flow) finally: self.channel.tell("websocket_end", self.flow)<|fim▁end|>
def _handle_ping_pong(self, frame, source_conn, other_conn, is_server): # just forward the ping/pong to the other side other_conn.send(bytes(frame))
<|file_name|>concurrent_hash_map.rs<|end_file_name|><|fim▁begin|>use std::ptr; use std::marker::Copy; use std::clone::Clone; use std::ops::Deref; use std::ops::DerefMut; use std::sync::{RwLock, RwLockWriteGuard}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::fmt::{Debug, Formatter, Result}; use super::super::round_up_to_next_highest_power_of_two; struct Bucket { key: Option<i32>, value: Option<i32>, next: Option<Link> } impl Bucket { fn empty() -> Bucket { Bucket { key: None, value: None, next: None } } fn new(key: i32, value: i32) -> Bucket { Bucket { key: Some(key), value: Some(value), next: None } } } impl Debug for Bucket { fn fmt(&self, fmt: &mut Formatter) -> Result { write!(fmt, "[ Key = {:?} Value = {:?} ]", self.key, self.value) } } struct Link { ptr: *mut Bucket } impl Link { fn new(bucket: Bucket) -> Link { Link { ptr: Box::into_raw(Box::new(bucket)) } } } impl Deref for Link { type Target = Bucket; fn deref(&self) -> &Bucket { unsafe { &*self.ptr } } } impl DerefMut for Link { fn deref_mut(&mut self) -> &mut Bucket { unsafe { &mut *self.ptr } } } impl Clone for Link{ fn clone(&self) -> Link { Link { ptr: self.ptr } } } impl Copy for Link { } unsafe impl Send for Link { } /// A hash table supporting concurrency for insertions and deletions /// /// Currnet implementation is non resizeble vector of Read-Write locks-buckets /// which resolve hash collisions with link to the next key value pair pub struct ConcurrentHashMap { table: Vec<RwLock<Link>>, size: AtomicUsize } impl Default for ConcurrentHashMap { fn default() -> ConcurrentHashMap { ConcurrentHashMap::new() } } impl ConcurrentHashMap { /// Create hash table with vector of locks-buckets with default size which is 16 pub fn new() -> ConcurrentHashMap { ConcurrentHashMap::with_capacity(16) } /// Create hash table with vector of locks-buckets with specified capacity which will be /// increase if needed to next highest power of two pub fn with_capacity(capacity: usize) -> ConcurrentHashMap { let capacity = round_up_to_next_highest_power_of_two(capacity); let mut table = Vec::with_capacity(capacity); for _ in 0..capacity { table.push(RwLock::new(Link::new(Bucket::empty()))); } ConcurrentHashMap { table: table, size: AtomicUsize::new(0) } } /// Check if table is empty pub fn is_empty(&self) -> bool { self.len() == 0 } /// Return size of table pub fn len(&self) -> usize { self.size.load(Ordering::Relaxed) } /// Return capacity of locks-buckets vector pub fn capacity(&self) -> usize { self.table.capacity() } /// Insert key value pair into table /// or update value if specified key is already in table pub fn insert(&mut self, key: i32, val: i32) { let index = self.capacity() & key as usize; let mut guard = self.table[index].write().unwrap(); if put(key, val, &mut guard) { self.size.fetch_add(1, Ordering::Relaxed); } } /// Remove specified key from table return value /// or None if key wasn't in the table pub fn remove(&mut self, key: i32) -> Option<i32> { let index = self.capacity() & key as usize; let mut guard = self.table[index].write().unwrap(); let result = take(key, &mut guard); println!("{:?}", result); if result.is_some() { self.size.fetch_sub(1, Ordering::Relaxed); } result } } fn put(key: i32, val: i32, guard: &mut RwLockWriteGuard<Link>) -> bool { let contains = contains(key, guard); if contains { let mut link = iterate(key, guard); link.value = Some(val); }<|fim▁hole|> let link = **guard; new_bucket.next = Some(link); **guard = new_bucket; } !contains } fn contains(key: i32, guard: &RwLockWriteGuard<Link>) -> bool { (*iterate(key, guard)).key == Some(key) } fn take(key: i32, guard: &mut RwLockWriteGuard<Link>) -> Option<i32> { let contains = contains(key, guard); if contains { let mut link = iterate(key, guard); match (*link).next { Some(next) => link.next = next.next, None => link.ptr = ptr::null_mut(), } (*link).value } else { None } } fn iterate(key: i32, guard: &RwLockWriteGuard<Link>) -> Link { let mut link = **guard; while (*link).key != Some(key) && (*link).next.is_some() { link = (*link).next.unwrap(); } link }<|fim▁end|>
else { let mut new_bucket = Link::new(Bucket::new(key, val));
<|file_name|>Add Binary-optimized.cpp<|end_file_name|><|fim▁begin|>class Solution { public: string addBinary(string a, string b) { string sum = ""; int carry = 0; for (int i = a.size() - 1, j = b.size() - 1; i >= 0 || j >= 0; i--, j--) { int m = (i >= 0 && a[i] == '1'); int n = (j >= 0 && b[j] == '1'); sum += to_string((m + n + carry) & 0x1); // &0x1 only get the last binary digit //It's better to avoid pattern string = char + string in loop. Use s[i] to alter string. //just directly writing the output string and reversing it at last. carry = (m + n + carry) >> 1; // >>1 is /2 } reverse(sum.begin(), sum.end()); if(carry) sum = '1' + sum; else sum = '0' + sum; //in case of two null input string size_t i=0; while(sum[i] == '0' && i < sum.length()-1) i++; sum = sum.substr(i); return sum;<|fim▁hole|><|fim▁end|>
} };
<|file_name|>bitcoin_ja.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="ja" version="2.0"> <defaultcodec>UTF-8</defaultcodec> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About BulbaCoin</source> <translation>BulbaCoinについて</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;BulbaCoin&lt;/b&gt; version</source> <translation>&lt;b&gt;BulbaCoin&lt;/b&gt; バージョン</translation> </message> <message> <location line="+57"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation type="unfinished"/> </message> <message> <location filename="../aboutdialog.cpp" line="+14"/> <source>Copyright</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>The BulbaCoin developers</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>アドレス帳</translation> </message> <message> <location line="+19"/> <source>Double-click to edit address or label</source> <translation>アドレスまたはラベルを編集するにはダブルクリック</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>新規アドレスの作成</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>現在選択されているアドレスをシステムのクリップボードにコピーする</translation> </message><|fim▁hole|> <translation type="unfinished"/> </message> <message> <location filename="../addressbookpage.cpp" line="+63"/> <source>These are your BulbaCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"/> </message> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>&amp;Copy Address</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a BulbaCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Export</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Verify a message to ensure it was signed with a specified BulbaCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>削除(&amp;D)</translation> </message> <message> <location filename="../addressbookpage.cpp" line="-5"/> <source>These are your BulbaCoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Copy &amp;Label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send &amp;Coins</source> <translation type="unfinished"/> </message> <message> <location line="+260"/> <source>Export Address Book Data</source> <translation>アドレス帳データをエクスポートする</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>CSVファイル (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>エクスポートエラー</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>%1のファイルに書き込めませんでした。</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>ラベル</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>アドレス</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(ラベル無し)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>パスフレーズを入力</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>新しいパスフレーズ</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>新しいパスフレーズをもう一度</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+33"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>ウォレットの新しいパスフレーズを入力してください。&lt;br/&gt;&lt;b&gt;8個以上の単語か10個以上のランダムな文字&lt;/b&gt;を使ってください。</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>ウォレットを暗号化する</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>この操作はウォレットをアンロックするためにパスフレーズが必要です。</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>ウォレットをアンロックする</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>この操作はウォレットの暗号化解除のためにパスフレーズが必要です。</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>ウォレットの暗号化を解除する</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>パスフレーズの変更</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>新旧両方のパスフレーズを入力してください。</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>ウォレットの暗号化を確認する</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR BULBACOINS&lt;/b&gt;!</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation type="unfinished"/> </message> <message> <location line="-130"/> <location line="+58"/> <source>Wallet encrypted</source> <translation>ウォレットは暗号化されました</translation> </message> <message> <location line="-56"/> <source>BulbaCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bulbacoins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+42"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>ウォレットの暗号化に失敗しました</translation> </message> <message> <location line="-54"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>内部エラーによりウォレットの暗号化が失敗しました。ウォレットは暗号化されませんでした。</translation> </message> <message> <location line="+7"/> <location line="+48"/> <source>The supplied passphrases do not match.</source> <translation>パスフレーズが同じではありません。</translation> </message> <message> <location line="-37"/> <source>Wallet unlock failed</source> <translation>ウォレットのアンロックに失敗しました</translation> </message> <message> <location line="+1"/> <location line="+11"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>ウォレットの暗号化解除のパスフレーズが正しくありません。</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>ウォレットの暗号化解除に失敗しました</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation type="unfinished"/> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+233"/> <source>Sign &amp;message...</source> <translation type="unfinished"/> </message> <message> <location line="+280"/> <source>Synchronizing with network...</source> <translation>ネットワークに同期中……</translation> </message> <message> <location line="-349"/> <source>&amp;Overview</source> <translation>概要(&amp;O)</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>ウォレットの概要を見る</translation> </message> <message> <location line="+20"/> <source>&amp;Transactions</source> <translation>取引(&amp;T)</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>取引履歴を閲覧</translation> </message> <message> <location line="+7"/> <source>Edit the list of stored addresses and labels</source> <translation>保存されたアドレスとラベルのリストを編集</translation> </message> <message> <location line="-14"/> <source>Show the list of addresses for receiving payments</source> <translation>支払い受け取り用アドレスのリストを見る</translation> </message> <message> <location line="+31"/> <source>E&amp;xit</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>アプリケーションを終了</translation> </message> <message> <location line="+4"/> <source>Show information about BulbaCoin</source> <translation>BulbaCoinに関する情報を見る</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>オプション(&amp;O)</translation> </message> <message> <location line="+6"/> <source>&amp;Encrypt Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation type="unfinished"/> </message> <message> <location line="+285"/> <source>Importing blocks from disk...</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Reindexing blocks on disk...</source> <translation type="unfinished"/> </message> <message> <location line="-347"/> <source>Send coins to a BulbaCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>Modify configuration options for BulbaCoin</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Backup wallet to another location</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>ウォレット暗号化用パスフレーズの変更</translation> </message> <message> <location line="+6"/> <source>&amp;Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation type="unfinished"/> </message> <message> <location line="-4"/> <source>&amp;Verify message...</source> <translation type="unfinished"/> </message> <message> <location line="-165"/> <location line="+530"/> <source>BulbaCoin</source> <translation type="unfinished"/> </message> <message> <location line="-530"/> <source>Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+101"/> <source>&amp;Send</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Receive</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>&amp;Addresses</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>&amp;About BulbaCoin</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show or hide the main Window</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Encrypt the private keys that belong to your wallet</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Sign messages with your BulbaCoin addresses to prove you own them</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Verify messages to ensure they were signed with specified BulbaCoin addresses</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>&amp;File</source> <translation>ファイル(&amp;F)</translation> </message> <message> <location line="+7"/> <source>&amp;Settings</source> <translation>設定(&amp;S)</translation> </message> <message> <location line="+6"/> <source>&amp;Help</source> <translation>ヘルプ(&amp;H)</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>タブツールバー</translation> </message> <message> <location line="+17"/> <location line="+10"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+47"/> <source>BulbaCoin client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+141"/> <source>%n active connection(s) to BulbaCoin network</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+22"/> <source>No block source available...</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Processed %1 of %2 (estimated) blocks of transaction history.</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Processed %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+20"/> <source>%n hour(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n week(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+4"/> <source>%1 behind</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Last received block was generated %1 ago.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Error</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="+70"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="-140"/> <source>Up to date</source> <translation>バージョンは最新です</translation> </message> <message> <location line="+31"/> <source>Catching up...</source> <translation type="unfinished"/> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Sent transaction</source> <translation>送金取引</translation> </message> <message> <location line="+0"/> <source>Incoming transaction</source> <translation>着金取引</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation type="unfinished"/> </message> <message> <location line="+33"/> <location line="+23"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-23"/> <location line="+23"/> <source>URI can not be parsed! This can be caused by an invalid BulbaCoin address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>ウォレットは&lt;b&gt;暗号化され、アンロックされています&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>ウォレットは&lt;b&gt;暗号化され、ロックされています&lt;/b&gt;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+111"/> <source>A fatal error occurred. BulbaCoin can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+104"/> <source>Network Alert</source> <translation type="unfinished"/> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>アドレスの編集</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>ラベル(&amp;L)</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation>このアドレス帳の入った事と関係のレーベル</translation> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;アドレス</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation>アドレス帳の入った事の関係のアドレスです。これは遅れるのアドレスのためだけに編集出来ます。</translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation>新しいの受け入れのアドレス</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>新しいの送るのアドレス</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>受け入れのアドレスを編集する</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>送るのアドレスを編集する</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>入ったのアドレス「%1」はもうアドレス帳にあります。</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid BulbaCoin address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>財布をアンロックするのは出来ませんでした。</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>新しいのキーの生成は失敗しました。</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+424"/> <location line="+12"/> <source>BulbaCoin-Qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>オプションズ</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start BulbaCoin after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start BulbaCoin on system login</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Reset all client options to default.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Reset Options</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>&amp;Network</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Automatically open the BulbaCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Connect to the BulbaCoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation type="unfinished"/> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting BulbaCoin.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Whether to show BulbaCoin addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+53"/> <source>default</source> <translation type="unfinished"/> </message> <message> <location line="+130"/> <source>Confirm options reset</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Some settings may require a client restart to take effect.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Do you want to proceed?</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting BulbaCoin.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation type="unfinished"/> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>フォーム</translation> </message> <message> <location line="+50"/> <location line="+166"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the BulbaCoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-124"/> <source>Balance:</source> <translation>残高:</translation> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation>未確認:</translation> </message> <message> <location line="-78"/> <source>Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+107"/> <source>Immature:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation type="unfinished"/> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;最近の取引&lt;/b&gt;</translation> </message> <message> <location line="-101"/> <source>Your current balance</source> <translation>今の残高</translation> </message> <message> <location line="+29"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation type="unfinished"/> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start bulbacoin: click-to-pay handler</source> <translation type="unfinished"/> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+339"/> <source>N/A</source> <translation type="unfinished"/> </message> <message> <location line="-217"/> <source>Client version</source> <translation type="unfinished"/> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation type="unfinished"/> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>Startup time</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Network</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Last block time</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the BulbaCoin-Qt help message to get a list with possible BulbaCoin command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation type="unfinished"/> </message> <message> <location line="-260"/> <source>Build date</source> <translation type="unfinished"/> </message> <message> <location line="-104"/> <source>BulbaCoin - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>BulbaCoin Core</source> <translation type="unfinished"/> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Open the BulbaCoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation type="unfinished"/> </message> <message> <location filename="../rpcconsole.cpp" line="-30"/> <source>Welcome to the BulbaCoin RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+124"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>コインを送る</translation> </message> <message> <location line="+50"/> <source>Send to multiple recipients at once</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Balance:</source> <translation>残高:</translation> </message> <message> <location line="+10"/> <source>123.456 BTC</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-59"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>The recipient address is not valid, please recheck.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation>フォーム</translation> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation type="unfinished"/> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a BulbaCoin address (they start with an 'P')</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>&amp;Sign Message</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+213"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-203"/> <location line="+213"/> <source>Alt+A</source> <translation type="unfinished"/> </message> <message> <location line="-203"/> <source>Paste address from clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Signature</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Copy the current signature to the system clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this BulbaCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Reset all sign message fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified BulbaCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Verify &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Reset all verify message fields</source> <translation type="unfinished"/> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a BulbaCoin address (they start with an 'P')</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Enter BulbaCoin signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation type="unfinished"/> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation type="unfinished"/> </message> </context> <context> <name>SplashScreen</name> <message> <location filename="../splashscreen.cpp" line="+22"/> <source>The BulbaCoin developers</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+20"/> <source>Open until %1</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>%1/offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Status</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Source</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Generated</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation type="unfinished"/> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>label</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation type="unfinished"/> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Net amount</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Message</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Comment</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Transaction</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Inputs</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>true</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>false</source> <translation type="unfinished"/> </message> <message> <location line="-209"/> <source>, has not been successfully broadcast yet</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-35"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+70"/> <source>unknown</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+225"/> <source>Date</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Type</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Address</source> <translation>Helbidea</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+57"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+3"/> <source>Open until %1</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Offline (%1 confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed (%1 of %2 confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirmed (%1 confirmations)</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+8"/> <source>Mined balance will be available when it matures in %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+5"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+43"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Received from</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation type="unfinished"/> </message> <message> <location line="+199"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+52"/> <location line="+16"/> <source>All</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <source>Today</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This week</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Last month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This year</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Range...</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>To yourself</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Other</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Min amount</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>Copy address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+139"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>テキスト CSV (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Date</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Type</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Label</source> <translation>レーベル</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Helbidea</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>ID</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation>エラー輸出</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>%1のファイルに書き込めませんでした。</translation> </message> <message> <location line="+100"/> <source>Range:</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>to</source> <translation type="unfinished"/> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+193"/> <source>Send Coins</source> <translation type="unfinished"/> </message> </context> <context> <name>WalletView</name> <message> <location filename="../walletview.cpp" line="+42"/> <source>&amp;Export</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="+193"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Backup Successful</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>The wallet data was successfully saved to the new location.</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+94"/> <source>BulbaCoin version</source> <translation>BulbaCoin Bertsio</translation> </message> <message> <location line="+102"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Send command to -server or bulbacoind</source> <translation type="unfinished"/> </message> <message> <location line="-23"/> <source>List commands</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>Get help for a command</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Options:</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Specify configuration file (default: bulbacoin.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Specify pid file (default: bulbacoind.pid)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <source>Set database cache size in megabytes (default: 25)</source> <translation type="unfinished"/> </message> <message> <location line="-28"/> <source>Listen for connections on &lt;port&gt; (default: 33994 or testnet: 13994)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation type="unfinished"/> </message> <message> <location line="-48"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <source>Specify your own public address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="-134"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 33993 or testnet: 13993)</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <source>Accept command line and JSON-RPC commands</source> <translation type="unfinished"/> </message> <message> <location line="+76"/> <source>Run in the background as a daemon and accept commands</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <source>Use the test network</source> <translation type="unfinished"/> </message> <message> <location line="-112"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation type="unfinished"/> </message> <message> <location line="-80"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=bulbacoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;BulbaCoin Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. BulbaCoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong BulbaCoin will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Block creation options:</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Connect only to the specified node(s)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Corrupted block database detected</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Do you want to rebuild the block database now?</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error initializing block database</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error initializing wallet database environment %s!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading block database</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error opening block database</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error: system error: </source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to read block info</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to read block</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to sync block index</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to write block index</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to write block info</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to write block</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to write file info</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to write to coin database</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to write transaction index</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Failed to write undo data</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Find peers using DNS lookup (default: 1 unless -connect)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Generate coins (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 288, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-4, default: 3)</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Not enough file descriptors available.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Rebuild block chain index from current blk000??.dat files</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation type="unfinished"/> </message> <message> <location line="+26"/> <source>Verifying blocks...</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Verifying wallet...</source> <translation type="unfinished"/> </message> <message> <location line="-69"/> <source>Imports blocks from external blk000??.dat file</source> <translation type="unfinished"/> </message> <message> <location line="-76"/> <source>Set the number of script verification threads (up to 16, 0 = auto, &lt;0 = leave that many cores free, default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Invalid amount for -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Invalid amount for -mintxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Maintain a full transaction index (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Only accept block chain matching built-in checkpoints (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>SSL options: (see the BulbaCoin Wiki for SSL setup instructions)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Signing transaction failed</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>System error: </source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Transaction amount too small</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transaction amounts must be positive</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transaction too large</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Username for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>You need to rebuild the databases using -reindex to change -txindex</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>wallet.dat corrupt, salvage failed</source> <translation type="unfinished"/> </message> <message> <location line="-50"/> <source>Password for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="-67"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation type="unfinished"/> </message> <message> <location line="+76"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="-120"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation type="unfinished"/> </message> <message> <location line="+147"/> <source>Upgrade wallet to latest format</source> <translation type="unfinished"/> </message> <message> <location line="-21"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>Rescan the block chain for missing wallet transactions</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="-26"/> <source>Server certificate file (default: server.cert)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation type="unfinished"/> </message> <message> <location line="-151"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+165"/> <source>This help message</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation type="unfinished"/> </message> <message> <location line="-91"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="-10"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation type="unfinished"/> </message> <message> <location line="+55"/> <source>Loading addresses...</source> <translation type="unfinished"/> </message> <message> <location line="-35"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat: Wallet requires newer version of BulbaCoin</source> <translation type="unfinished"/> </message> <message> <location line="+93"/> <source>Wallet needed to be rewritten: restart BulbaCoin to complete</source> <translation type="unfinished"/> </message> <message> <location line="-95"/> <source>Error loading wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation type="unfinished"/> </message> <message> <location line="-96"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Invalid amount</source> <translation type="unfinished"/> </message> <message> <location line="-6"/> <source>Insufficient funds</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Loading block index...</source> <translation type="unfinished"/> </message> <message> <location line="-57"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>Unable to bind to %s on this computer. BulbaCoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="+64"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Loading wallet...</source> <translation type="unfinished"/> </message> <message> <location line="-52"/> <source>Cannot downgrade wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Cannot write default address</source> <translation type="unfinished"/> </message> <message> <location line="+64"/> <source>Rescanning...</source> <translation type="unfinished"/> </message> <message> <location line="-57"/> <source>Done loading</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <source>To use the %s option</source> <translation type="unfinished"/> </message> <message> <location line="-74"/> <source>Error</source> <translation type="unfinished"/> </message> <message> <location line="-31"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation type="unfinished"/> </message> </context> </TS><|fim▁end|>
<message> <location line="-11"/> <source>&amp;New Address</source>
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var config = require('../lib/config')(); var Changeset = require('./Changeset'); var queries = require('./queries'); var helpers = require('../helpers'); require('../validators'); var validate = require('validate.js'); var errors = require('../errors'); var pgPromise = helpers.pgPromise; var promisifyQuery = helpers.promisifyQuery; var changesets = {}; module.exports = changesets; var pgURL = config.PostgresURL; changesets.search = function(params) { var parseError = validateParams(params); if (parseError) { return Promise.reject(new errors.ParseError(parseError)); } var searchQuery = queries.getSearchQuery(params); var countQuery = queries.getCountQuery(params); console.log('searchQ', searchQuery); console.log('countQ', countQuery); return pgPromise(pgURL) .then(function (pg) { var query = promisifyQuery(pg.client); var searchProm = query(searchQuery.text, searchQuery.values); var countProm = query(countQuery.text, countQuery.values); return Promise.all([searchProm, countProm]) .then(function (r) { pg.done(); return r; }) .catch(function (e) { pg.done(); return Promise.reject(e); }); }) .then(processSearchResults); }; changesets.get = function(id) { if (!validate.isNumber(parseInt(id, 10))) { return Promise.reject(new errors.ParseError('Changeset id must be a number')); } var changesetQuery = queries.getChangesetQuery(id); var changesetCommentsQuery = queries.getChangesetCommentsQuery(id); return pgPromise(pgURL) .then(function (pg) { var query = promisifyQuery(pg.client); var changesetProm = query(changesetQuery.text, changesetQuery.values); var changesetCommentsProm = query(changesetCommentsQuery.text, changesetCommentsQuery.values); return Promise.all([changesetProm, changesetCommentsProm]) .then(function (results) { pg.done(); return results; }) .catch(function (e) { pg.done(); return Promise.reject(e); }); }) .then(function (results) { var changesetResult = results[0]; if (changesetResult.rows.length === 0) { return Promise.reject(new errors.NotFoundError('Changeset not found')); } var changeset = new Changeset(results[0].rows[0], results[1].rows); return changeset.getGeoJSON(); }); }; function processSearchResults(results) { var searchResult = results[0]; var countResult = results[1];<|fim▁hole|> var changeset = new Changeset(row); return changeset.getGeoJSON(); }); var count; if (countResult.rows.length > 0) { count = countResult.rows[0].count; } else { count = 0; } var featureCollection = { 'type': 'FeatureCollection', 'features': changesetsArray, 'total': count }; return featureCollection; } function validateParams(params) { var constraints = { 'from': { 'presence': false, 'datetime': true }, 'to': { 'presence': false, 'datetime': true }, 'bbox': { 'presence': false, 'bbox': true } }; var errs = validate(params, constraints); if (errs) { var errMsg = Object.keys(errs).map(function(key) { return errs[key][0]; }).join(', '); return errMsg; } return null; }<|fim▁end|>
var changesetsArray = searchResult.rows.map(function (row) {
<|file_name|>Twitter.js<|end_file_name|><|fim▁begin|><|fim▁hole|>// components import Loading from '../../components/loading/Loading'; // styles var style = require('./_index.scss'); // data var Timeline = require('react-twitter-widgets').Timeline; var Twitter = React.createClass({ getInitialState: function(){ return { isLoading: true } }, componentWillMount: function(){ this.setState({ isLoading: true }) }, componentDidMount: function(){ this.setState({ isLoading: false }) }, render: function(){ if(this.state.isLoading === true){ return ( <Loading /> ) } else { return ( <div className='twitter-container'> <Timeline className='twitter-component' dataSource={{ sourceType: 'widget', widgetId: '844245316706566144' }} /> </div> ) } } }); export default Twitter;<|fim▁end|>
// core import React from 'react'; import PropTypes from 'react';