prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>player.js<|end_file_name|><|fim▁begin|>let html_audio = document.getElementById("audio-source");
let html_open_button = document.getElementById("open-button");
//Setup the audio graph and context
let audioContext = new window.AudioContext();
let audioSource = audioContext.createMediaElementSource($("#audio-source")[0]);
let audioAnalyser = audioContext.createAnalyser();
audioAnalyser.fftSize = 2048;
let audioVolume = audioContext.createGain();
audioVolume.gain.value = 1;
audioSource.connect(audioAnalyser);
audioSource.connect(audioVolume);
audioVolume.connect(audioContext.destination);
//File input
html_open_button.addEventListener("click", () => {
remote.dialog.showOpenDialog({filters: [{name: "Music files" ,extensions: ["mp3"]}], properties: ['openFile']}, (file) => {
if(file) {
readMusicFile(file);
}
});
});
function readMusicFile(file) {
html_audio.pause();
html_audio.src = file[0];
html_audio.load();
MusicMetadata.parseFile(file[0], {duration: true})
.then((metadata) => {
if(metadata.common.artist) {
document.getElementById("audio-artist").innerHTML = metadata.common.artist;
}
else {
document.getElementById("audio-artist").innerHTML = null;
}
if(metadata.common.title) {
document.getElementById("audio-title").innerHTML = metadata.common.title;
}
else {
document.getElementById("audio-title").innerHTML = file[0].slice(file[0].lastIndexOf("\\")+1, file[0].lastIndexOf("."));
}
readNewDirectory(file[0]);
if(metadata.common.picture !== null && typeof metadata.common.picture == "object") {
document.getElementById("album-image").width = 125;
document.getElementById("album-image").height = 125;
let pictureData = new Uint8Array(metadata.common.picture[0].data);
let len = metadata.common.picture[0].data.byteLength;
let pictureDataString = "";
for (let i = 0; i < len; i++) {
pictureDataString += String.fromCharCode(pictureData[i]);
}
let base64String = btoa(pictureDataString);
document.getElementById("album-image").src = "data:image/jpg;base64,"+base64String;
document.getElementById("audio-title").style.marginLeft = "20px";
}
else {
document.getElementById("album-image").width = 0;
document.getElementById("album-image").height = 0;
document.getElementById("album-image").src = "";
document.getElementById("audio-title").style.marginLeft = "0px";
}
document.getElementById("slider").max = Math.floor(metadata.format.duration);
let minutes = Math.floor(metadata.format.duration / 60).toString();
if(minutes < 10) {
minutes = "0" + minutes;
}
let seconds = Math.round(metadata.format.duration % 60).toString();
if(seconds < 10) {
seconds = "0" + seconds;
}
document.getElementById("total-time").innerHTML = minutes + ":" + seconds;
})
.catch((err) => console.log(err));
startSliderPositionUpdate();
html_audio.play();
document.getElementById("play-button").innerHTML = "❘❘";
};
//Slider and timestamp
let sliderPositionInterval = 0;
let mousedown = false;
let startSliderPositionUpdate = function() {
sliderPositionInterval = setInterval(() => {
if(!mousedown) {
let minutes = Math.floor(html_audio.currentTime / 60);
if(minutes < 10) {
minutes = "0" + minutes.toString();
}
let seconds = Math.round(html_audio.currentTime % 60);
if(seconds < 10) {
seconds = "0" + seconds.toString();
}
document.getElementById("current-time").innerHTML = minutes + ":" + seconds;
document.getElementById("slider").value = html_audio.currentTime;
}
}, 500);
};
document.getElementById("slider").addEventListener("mousedown", () => {
mousedown = true;
clearInterval(sliderPositionInterval);
sliderPositionInterval = 0;
});
document.getElementById("slider").addEventListener("mouseup", () => {
mousedown = false;
html_audio.currentTime = $("#slider").val();
startSliderPositionUpdate();
});
//Play-pause button
document.getElementById("play-button").addEventListener("click", () => {
if(html_audio.paused) {
html_audio.play();
document.getElementById("play-button").innerHTML = "❘❘";
}
else {
html_audio.pause();
document.getElementById("play-button").innerHTML = "▷";
}
});
//Next song button
document.getElementById("next-song-button").addEventListener("click", () => {
if(currentSong == songsList.length - 1) {
currentSong = 0;
readMusicFile(new Array(currentDir + "\\" + songsList[currentSong]));
}
else {
currentSong += 1;
readMusicFile(new Array(currentDir + "\\" + songsList[currentSong]));
}
});
//Previous song button
document.getElementById("previous-song-button").addEventListener("click", () => {
if(html_audio.currentTime >= 15) {
readMusicFile(new Array(currentDir + "\\" + songsList[currentSong]));
}
else if(currentSong == 0) {
currentSong = songsList.length - 1;
readMusicFile(new Array(currentDir + "\\" + songsList[currentSong]));
}
else {
currentSong -= 1;
readMusicFile(new Array(currentDir + "\\" + songsList[currentSong]));
}
});
//Automatically load next song
document.getElementById("audio-source").addEventListener("ended", () => {
if(currentSong == songsList.length - 1) {
currentSong = 0;
readMusicFile(new Array(currentDir + "\\" + songsList[currentSong]));
}
else {
currentSong += 1;
readMusicFile(new Array(currentDir + "\\" + songsList[currentSong]));
}
});
//Mouse hover delay in player info
let playerInfoHoverTimer = 0;
$("#audio-info").hover(() => {
if(playerInfoHoverTimer == 0) {<|fim▁hole|> else {
clearTimeout(playerInfoHoverTimer);
}
},
() => {
playerInfoHoverTimer = setTimeout(() => {
$("#audio-info").css("opacity", 0.0);
$("#audio-info").css("top", "0px");
playerInfoHoverTimer = 0;
}, 2500);
});<|fim▁end|>
|
$("#audio-info").css("opacity", 1.0);
$("#audio-info").css("top", "30px");
}
|
<|file_name|>AntRenameHandler.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2010 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.lang.ant.refactoring;
import com.intellij.codeInsight.TargetElementUtilBase;
import com.intellij.lang.ant.dom.AntDomFileDescription;
import com.intellij.openapi.actionSystem.CommonDataKeys;
import com.intellij.openapi.actionSystem.DataContext;
import com.intellij.openapi.actionSystem.LangDataKeys;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.project.IndexNotReadyException;
import com.intellij.openapi.project.Project;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiFile;
import com.intellij.psi.PsiReference;
import com.intellij.psi.xml.XmlFile;
import com.intellij.refactoring.rename.PsiElementRenameHandler;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.Collection;
/**
* @author Eugene Zhuravlev
* Date: Mar 19, 2007
*/
public final class AntRenameHandler extends PsiElementRenameHandler {
public boolean isAvailableOnDataContext(final DataContext dataContext) {
final PsiElement[] elements = getElements(dataContext);
return elements != null && elements.length > 1;
}
public void invoke(@NotNull final Project project, final Editor editor, final PsiFile file, final DataContext dataContext) {
final PsiElement[] elements = getElements(dataContext);
if (elements != null && elements.length > 0) {
invoke(project, new PsiElement[]{elements[0]}, dataContext);
}
}
public void invoke(@NotNull final Project project, @NotNull final PsiElement[] elements, final DataContext dataContext) {
super.invoke(project, elements, dataContext);
}
@Nullable <|fim▁hole|> private static PsiElement[] getElements(DataContext dataContext) {
final PsiFile psiFile = CommonDataKeys.PSI_FILE.getData(dataContext);
if (!(psiFile instanceof XmlFile && AntDomFileDescription.isAntFile((XmlFile)psiFile))) {
return null;
}
final Editor editor = LangDataKeys.EDITOR.getData(dataContext);
if (editor == null) {
return null;
}
return getPsiElementsIn(editor, psiFile);
}
@Nullable
private static PsiElement[] getPsiElementsIn(final Editor editor, final PsiFile psiFile) {
try {
final PsiReference reference = TargetElementUtilBase.findReference(editor, editor.getCaretModel().getOffset());
if (reference == null) {
return null;
}
final Collection<PsiElement> candidates = TargetElementUtilBase.getInstance().getTargetCandidates(reference);
return ContainerUtil.toArray(candidates, new PsiElement[candidates.size()]);
}
catch (IndexNotReadyException e) {
return null;
}
}
}<|fim▁end|>
| |
<|file_name|>settings_devel_fast.py<|end_file_name|><|fim▁begin|># Settings for running unittests. These are optimized for speed.
from .settings_devel import * # noqa
<|fim▁hole|> "weight": "healthmonitor.migrations_not_used_in_tests"
}<|fim▁end|>
|
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = []
MIGRATION_MODULES = {
|
<|file_name|>i18n.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Various i18n functions.
Helper functions for both the internal translation system
and for TranslateWiki-based translations.
By default messages are assumed to reside in a package called
'scripts.i18n'. In pywikibot 2.0, that package is not packaged
with pywikibot, and pywikibot 2.0 does not have a hard dependency
on any i18n messages. However, there are three user input questions
in pagegenerators which will use i18 messages if they can be loaded.
The default message location may be changed by calling
L{set_message_package} with a package name. The package must contain
an __init__.py, and a message bundle called 'pywikibot' containing
messages. See L{twntranslate} for more information on the messages.
"""
#
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import sys
import re
import locale
import json
import os
import pkgutil
from collections import defaultdict
from pywikibot import Error
from .plural import plural_rules
import pywikibot
from . import config2 as config
if sys.version_info[0] > 2:
basestring = (str, )
PLURAL_PATTERN = r'{{PLURAL:(?:%\()?([^\)]*?)(?:\)d)?\|(.*?)}}'
# Package name for the translation messages. The messages data must loaded
# relative to that package name. In the top of this package should be
# directories named after for each script/message bundle, and each directory
# should contain JSON files called <lang>.json
_messages_package_name = 'scripts.i18n'
# Flag to indicate whether translation messages are available
_messages_available = None
# Cache of translated messages
_cache = defaultdict(dict)
def set_messages_package(package_name):
"""Set the package name where i18n messages are located."""
global _messages_package_name
global _messages_available
_messages_package_name = package_name
_messages_available = None
def messages_available():
"""
Return False if there are no i18n messages available.
To determine if messages are available, it looks for the package name
set using L{set_messages_package} for a message bundle called 'pywikibot'
containing messages.
@rtype: bool
"""
global _messages_available
if _messages_available is not None:
return _messages_available
try:
__import__(_messages_package_name)
except ImportError:
_messages_available = False
return False
_messages_available = True
return True
def _altlang(code):
"""Define fallback languages for particular languages.
If no translation is available to a specified language, translate() will
try each of the specified fallback languages, in order, until it finds
one with a translation, with 'en' and '_default' as a last resort.
For example, if for language 'xx', you want the preference of languages
to be: xx > fr > ru > en, you let this method return ['fr', 'ru'].
This code is used by other translating methods below.
@param code: The language code
@type code: string
@return: language codes
@rtype: list of str
"""
# Akan
if code in ['ak', 'tw']:
return ['ak', 'tw']
# Amharic
if code in ['aa', 'ti']:
return ['am']
# Arab
if code in ['arc', 'arz', 'so']:
return ['ar']
if code == 'kab':
return ['ar', 'fr']
# Bulgarian
if code in ['cu', 'mk']:
return ['bg', 'sr', 'sh']
# Czech
if code in ['cs', 'sk']:
return ['cs', 'sk']
# German
if code in ['bar', 'frr', 'ksh', 'pdc', 'pfl']:
return ['de']
if code == 'lb':
return ['de', 'fr']
if code in ['als', 'gsw']:
return ['als', 'gsw', 'de']
if code == 'nds':
return ['nds-nl', 'de']
if code in ['dsb', 'hsb']:
return ['hsb', 'dsb', 'de']
if code == 'sli':
return ['de', 'pl']
if code == 'rm':
return ['de', 'it']
if code == 'stq':
return ['nds', 'de']
# Greek
if code in ['grc', 'pnt']:
return ['el']
# Esperanto<|fim▁hole|> 'yua']:
return ['es']
if code in ['gl', 'gn']:
return ['es', 'pt']
if code == 'eu':
return ['es', 'fr']
if code == 'cbk-zam':
return ['es', 'tl']
# Estonian
if code in ['fiu-vro', 'vro']:
return ['fiu-vro', 'vro', 'et']
if code == 'liv':
return ['et', 'lv']
# Persian (Farsi)
if code == 'ps':
return ['fa']
if code in ['glk', 'mzn']:
return ['glk', 'mzn', 'fa', 'ar']
# Finnish
if code == 'vep':
return ['fi', 'ru']
if code == 'fit':
return ['fi', 'sv']
# French
if code in ['bm', 'br', 'ht', 'kg', 'ln', 'mg', 'nrm', 'pcd',
'rw', 'sg', 'ty', 'wa']:
return ['fr']
if code == 'oc':
return ['fr', 'ca', 'es']
if code in ['co', 'frp']:
return ['fr', 'it']
# Hindi
if code in ['sa']:
return ['hi']
if code in ['ne', 'new']:
return ['ne', 'new', 'hi']
if code in ['bh', 'bho']:
return ['bh', 'bho']
# Indonesian and Malay
if code in ['ace', 'bug', 'bjn', 'id', 'jv', 'ms', 'su']:
return ['id', 'ms', 'jv']
if code == 'map-bms':
return ['jv', 'id', 'ms']
# Inuit languages
if code in ['ik', 'iu']:
return ['iu', 'kl']
if code == 'kl':
return ['da', 'iu', 'no', 'nb']
# Italian
if code in ['eml', 'fur', 'lij', 'lmo', 'nap', 'pms', 'roa-tara', 'sc',
'scn', 'vec']:
return ['it']
# Lithuanian
if code in ['bat-smg', 'sgs']:
return ['bat-smg', 'sgs', 'lt']
# Latvian
if code == 'ltg':
return ['lv']
# Dutch
if code in ['af', 'fy', 'li', 'pap', 'srn', 'vls', 'zea']:
return ['nl']
if code == ['nds-nl']:
return ['nds', 'nl']
# Polish
if code in ['csb', 'szl']:
return ['pl']
# Portuguese
if code in ['fab', 'mwl', 'tet']:
return ['pt']
# Romanian
if code in ['roa-rup', 'rup']:
return ['roa-rup', 'rup', 'ro']
if code == 'mo':
return ['ro']
# Russian and Belarusian
if code in ['ab', 'av', 'ba', 'bxr', 'ce', 'cv', 'inh', 'kk', 'koi', 'krc',
'kv', 'ky', 'lbe', 'lez', 'mdf', 'mhr', 'mn', 'mrj', 'myv',
'os', 'sah', 'tg', 'udm', 'uk', 'xal']:
return ['ru']
if code in ['kbd', 'ady']:
return ['kbd', 'ady', 'ru']
if code == 'tt':
return ['tt-cyrl', 'ru']
if code in ['be', 'be-x-old', 'be-tarask']:
return ['be', 'be-x-old', 'be-tarask', 'ru']
if code == 'kaa':
return ['uz', 'ru']
# Serbocroatian
if code in ['bs', 'hr', 'sh']:
return ['sh', 'hr', 'bs', 'sr', 'sr-el']
if code == 'sr':
return ['sr-el', 'sh', 'hr', 'bs']
# Tagalog
if code in ['bcl', 'ceb', 'ilo', 'pag', 'pam', 'war']:
return ['tl']
# Turkish and Kurdish
if code in ['diq', 'ku']:
return ['ku', 'ku-latn', 'tr']
if code == 'gag':
return ['tr']
if code == 'ckb':
return ['ku']
# Ukrainian
if code in ['crh', 'crh-latn']:
return ['crh', 'crh-latn', 'uk', 'ru']
if code in ['rue']:
return ['uk', 'ru']
# Chinese
if code in ['zh-classical', 'lzh', 'minnan', 'zh-min-nan', 'nan', 'zh-tw',
'zh', 'zh-hans']:
return ['zh', 'zh-hans', 'zh-tw', 'zh-cn', 'zh-classical', 'lzh']
if code in ['cdo', 'gan', 'hak', 'ii', 'wuu', 'za', 'zh-classical', 'lzh',
'zh-cn', 'zh-yue', 'yue']:
return ['zh', 'zh-hans' 'zh-cn', 'zh-tw', 'zh-classical', 'lzh']
# Scandinavian languages
if code in ['da', 'sv']:
return ['da', 'no', 'nb', 'sv', 'nn']
if code in ['fo', 'is']:
return ['da', 'no', 'nb', 'nn', 'sv']
if code == 'nn':
return ['no', 'nb', 'sv', 'da']
if code in ['no', 'nb']:
return ['no', 'nb', 'da', 'nn', 'sv']
if code == 'se':
return ['sv', 'no', 'nb', 'nn', 'fi']
# Other languages
if code in ['bi', 'tpi']:
return ['bi', 'tpi']
if code == 'yi':
return ['he', 'de']
if code in ['ia', 'ie']:
return ['ia', 'la', 'it', 'fr', 'es']
if code == 'xmf':
return ['ka']
if code in ['nso', 'st']:
return ['st', 'nso']
if code in ['kj', 'ng']:
return ['kj', 'ng']
if code in ['meu', 'hmo']:
return ['meu', 'hmo']
if code == ['as']:
return ['bn']
# Default value
return []
class TranslationError(Error, ImportError):
"""Raised when no correct translation could be found."""
# Inherits from ImportError, as this exception is now used
# where previously an ImportError would have been raised,
# and may have been caught by scripts as such.
pass
def _get_translation(lang, twtitle):
"""
Return message of certain twtitle if exists.
For internal use, don't use it directly.
"""
if twtitle in _cache[lang]:
return _cache[lang][twtitle]
message_bundle = twtitle.split('-')[0]
trans_text = None
filename = '%s/%s.json' % (message_bundle, lang)
try:
trans_text = pkgutil.get_data(
_messages_package_name, filename).decode('utf-8')
except (OSError, IOError): # file open can cause several exceptions
_cache[lang][twtitle] = None
return
transdict = json.loads(trans_text)
_cache[lang].update(transdict)
try:
return transdict[twtitle]
except KeyError:
return
def _extract_plural(code, message, parameters):
"""Check for the plural variants in message and replace them.
@param message: the message to be replaced
@type message: unicode string
@param parameters: plural parameters passed from other methods
@type parameters: int, basestring, tuple, list, dict
"""
plural_items = re.findall(PLURAL_PATTERN, message)
if plural_items: # we found PLURAL patterns, process it
if len(plural_items) > 1 and isinstance(parameters, (tuple, list)) and \
len(plural_items) != len(parameters):
raise ValueError("Length of parameter does not match PLURAL "
"occurrences.")
i = 0
for selector, variants in plural_items:
if isinstance(parameters, dict):
num = int(parameters[selector])
elif isinstance(parameters, basestring):
num = int(parameters)
elif isinstance(parameters, (tuple, list)):
num = int(parameters[i])
i += 1
else:
num = parameters
# TODO: check against plural_rules[code]['nplurals']
try:
index = plural_rules[code]['plural'](num)
except KeyError:
index = plural_rules['_default']['plural'](num)
except TypeError:
# we got an int, not a function
index = plural_rules[code]['plural']
repl = variants.split('|')[index]
message = re.sub(PLURAL_PATTERN, repl, message, count=1)
return message
DEFAULT_FALLBACK = ('_default', )
def translate(code, xdict, parameters=None, fallback=False):
"""Return the most appropriate translation from a translation dict.
Given a language code and a dictionary, returns the dictionary's value for
key 'code' if this key exists; otherwise tries to return a value for an
alternative language that is most applicable to use on the wiki in
language 'code' except fallback is False.
The language itself is always checked first, then languages that
have been defined to be alternatives, and finally English. If none of
the options gives result, we just take the one language from xdict which may
not be always the same. When fallback is iterable it'll return None if no
code applies (instead of returning one).
For PLURAL support have a look at the twntranslate method
@param code: The language code
@type code: string or Site object
@param xdict: dictionary with language codes as keys or extended dictionary
with family names as keys containing language dictionaries or
a single (unicode) string. May contain PLURAL tags as
described in twntranslate
@type xdict: dict, string, unicode
@param parameters: For passing (plural) parameters
@type parameters: dict, string, unicode, int
@param fallback: Try an alternate language code. If it's iterable it'll
also try those entries and choose the first match.
@type fallback: boolean or iterable
"""
family = pywikibot.config.family
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
family = code.family.name
code = code.code
# Check whether xdict has multiple projects
if isinstance(xdict, dict):
if family in xdict:
xdict = xdict[family]
elif 'wikipedia' in xdict:
xdict = xdict['wikipedia']
# Get the translated string
if not isinstance(xdict, dict):
trans = xdict
elif not xdict:
trans = None
else:
codes = [code]
if fallback is True:
codes += _altlang(code) + ['_default', 'en']
elif fallback is not False:
codes += list(fallback)
for code in codes:
if code in xdict:
trans = xdict[code]
break
else:
if fallback is not True:
# this shouldn't simply return "any one" code but when fallback
# was True before 65518573d2b0, it did just that. When False it
# did just return None. It's now also returning None in the new
# iterable mode.
return
code = list(xdict.keys())[0]
trans = xdict[code]
if trans is None:
return # return None if we have no translation found
if parameters is None:
return trans
# else we check for PLURAL variants
trans = _extract_plural(code, trans, parameters)
if parameters:
try:
return trans % parameters
except (KeyError, TypeError):
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twtranslate(code, twtitle, parameters=None, fallback=True):
"""
Translate a message.
The translations are retrieved from json files in messages_package_name.
fallback parameter must be True for i18n and False for L10N or testing
purposes.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing parameters.
@param fallback: Try an alternate language code
@type fallback: boolean
"""
if not messages_available():
raise TranslationError(
'Unable to load messages package %s for bundle %s'
'\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n'
% (_messages_package_name, twtitle))
code_needed = False
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
lang = code.code
# check whether we need the language code back
elif isinstance(code, list):
lang = code.pop()
code_needed = True
else:
lang = code
# There are two possible failure modes: the translation dict might not have
# the language altogether, or a specific key could be untranslated. Both
# modes are caught with the KeyError.
langs = [lang]
if fallback:
langs += _altlang(lang) + ['en']
for alt in langs:
trans = _get_translation(alt, twtitle)
if trans:
break
else:
raise TranslationError(
'No English translation has been defined for TranslateWiki key'
' %r\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n' % twtitle)
# send the language code back via the given list
if code_needed:
code.append(alt)
if parameters:
return trans % parameters
else:
return trans
# Maybe this function should be merged with twtranslate
def twntranslate(code, twtitle, parameters=None):
r"""Translate a message with plural support.
Support is implemented like in MediaWiki extension. If the TranslateWiki
message contains a plural tag inside which looks like::
{{PLURAL:<number>|<variant1>|<variant2>[|<variantn>]}}
it takes that variant calculated by the plural_rules depending on the number
value. Multiple plurals are allowed.
As an examples, if we had several json dictionaries in test folder like:
en.json:
{
"test-plural": "Bot: Changing %(num)s {{PLURAL:%(num)d|page|pages}}.",
}
fr.json:
{
"test-plural": "Robot: Changer %(descr)s {{PLURAL:num|une page|quelques pages}}.",
}
and so on.
>>> from pywikibot import i18n
>>> i18n.set_messages_package('tests.i18n')
>>> # use a number
>>> str(i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'})
'Bot: Changing no pages.'
>>> # use a string
>>> str(i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'})
'Bot: Changing one page.'
>>> # use a dictionary
>>> str(i18n.twntranslate('en', 'test-plural', {'num':2}))
'Bot: Changing 2 pages.'
>>> # use additional format strings
>>> str(i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}))
'Robot: Changer seulement une page.'
>>> # use format strings also outside
>>> str(i18n.twntranslate('fr', 'test-plural', 10) % {'descr': 'seulement'})
'Robot: Changer seulement quelques pages.'
The translations are retrieved from i18n.<package>, based on the callers
import table.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing (plural) parameters.
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
# we send the code via list and get the alternate code back
code = [code]
trans = twtranslate(code, twtitle)
# get the alternate language code modified by twtranslate
lang = code.pop()
# check for PLURAL variants
trans = _extract_plural(lang, trans, parameters)
# we always have a dict for replacement of translatewiki messages
if parameters and isinstance(parameters, dict):
try:
return trans % parameters
except KeyError:
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twhas_key(code, twtitle):
"""
Check if a message has a translation in the specified language code.
The translations are retrieved from i18n.<package>, based on the callers
import table.
No code fallback is made.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
transdict = _get_translation(code, twtitle)
if transdict is None:
return False
return True
def twget_keys(twtitle):
"""
Return all language codes for a special message.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# obtain the directory containing all the json files for this package
package = twtitle.split("-")[0]
mod = __import__(_messages_package_name, fromlist=[str('__file__')])
pathname = os.path.join(os.path.dirname(mod.__file__), package)
# build a list of languages in that directory
langs = [filename.partition('.')[0]
for filename in sorted(os.listdir(pathname))
if filename.endswith('.json')]
# exclude languages does not have this specific message in that package
# i.e. an incomplete set of translated messages.
return [lang for lang in langs
if lang != 'qqq' and
_get_translation(lang, twtitle)]
def input(twtitle, parameters=None, password=False, fallback_prompt=None):
"""
Ask the user a question, return the user's answer.
The prompt message is retrieved via L{twtranslate} and either uses the
config variable 'userinterface_lang' or the default locale as the language
code.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: The values which will be applied to the translated text
@param password: Hides the user's input (for password entry)
@param fallback_prompt: The English prompt if i18n is not available.
@rtype: unicode string
"""
if not messages_available():
if not fallback_prompt:
raise TranslationError(
'Unable to load messages package %s for bundle %s'
% (_messages_package_name, twtitle))
else:
prompt = fallback_prompt
else:
code = config.userinterface_lang or \
locale.getdefaultlocale()[0].split('_')[0]
prompt = twtranslate(code, twtitle, parameters)
return pywikibot.input(prompt, password)<|fim▁end|>
|
if code in ['io', 'nov']:
return ['eo']
# Spanish
if code in ['an', 'arn', 'ast', 'ay', 'ca', 'ext', 'lad', 'nah', 'nv', 'qu',
|
<|file_name|>expressions-bitwise-xor-assignment.js<|end_file_name|><|fim▁begin|>let a = 5; // 00000000000000000000000000000101
a ^= 3; // 00000000000000000000000000000011
console.log(a); // 00000000000000000000000000000110<|fim▁hole|><|fim▁end|>
|
// expected output: 6
|
<|file_name|>0002_auto_20170413_1633.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-13 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
<|fim▁hole|> ('posts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='tags',
),
migrations.DeleteModel(
name='Tag',
),
]<|fim▁end|>
|
dependencies = [
|
<|file_name|>Impl_0.java<|end_file_name|><|fim▁begin|>package issues.issue130;
public class Impl_0 {
public int a = 0;
protected void printMe(String s) {<|fim▁hole|> System.out.println(s);
}
}<|fim▁end|>
| |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import React from 'react';
import { TypeChooser } from 'react-stockcharts/lib/helper'
import Chart from './Chart'
import { getData } from './util';
class ChartComponent extends React.Component {
componentDidMount () {
getData().then(data => {
this.setState({ data})
})
}
render () {
if (this.state == null) {
return <div>
Loading...<|fim▁hole|> <Chart type='hybrid' data={this.state.data} />
)
}
}
export default ChartComponent;<|fim▁end|>
|
</div>
}
return (
|
<|file_name|>user.ts<|end_file_name|><|fim▁begin|>import { ValidationError, ValidationCode } from './validation-error';
export function validateUsername({ id }): boolean {
if (id.length < 3 || id.length > 32) {
throw new ValidationError({
code: ValidationCode.ID,
message: 'User ID must be between 3 and 32 characters.',
});
}<|fim▁hole|>
return true;
}
export function validateNewUser({ id, password }): boolean {
if (id.length < 3 || id.length > 32) {
throw new ValidationError({
code: ValidationCode.ID,
message: 'User ID must be between 3 and 32 characters.',
});
}
if (password.length < 8 || password.length > 100) {
throw new ValidationError({
code: ValidationCode.PASSWORD,
message: 'User password must be longer than 8 characters.',
});
}
return true;
}<|fim▁end|>
| |
<|file_name|>UnZip.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
import zipfile
from pyload.plugin.Extractor import Extractor, ArchiveError, CRCError, PasswordError
from pyload.utils import fs_encode
<|fim▁hole|> __version = "1.12"
__description = """Zip extractor plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "[email protected]")]
EXTENSIONS = [".zip", ".zip64"]
NAME = __name__.rsplit('.', 1)[1]
VERSION = "(python %s.%s.%s)" % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def isUsable(cls):
return sys.version_info[:2] >= (2, 6)
def list(self, password=None):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
return z.namelist()
def check(self, password):
pass
def verify(self):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
raise PasswordError
def extract(self, password=None):
try:
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
z.extractall(self.out)
except (zipfile.BadZipfile, zipfile.LargeZipFile), e:
raise ArchiveError(e)
except RuntimeError, e:
if "encrypted" in e:
raise PasswordError
else:
raise ArchiveError(e)
else:
self.files = z.namelist()<|fim▁end|>
|
class UnZip(Extractor):
__name = "UnZip"
__type = "extractor"
|
<|file_name|>volume_cubic_inches_to_metric_test.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the<|fim▁hole|># AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import unittest
from .volume_cubic_inches_to_metric import cubic_inches_to_metric
class VolumeTestCase(unittest.TestCase):
def test(self):
text = (
"Total volume is 100.50 cubic inches for this land. "
"Total volume is 15.7 cubic in for this land. "
"Total volume is 1 Cubic Inch for this land. "
"Total volume is 1-16 cu-in for this land. "
"Total volume is 1-16 cb. in for this land. "
"Total volume is 16.7-Cubic-in for this land. "
"Total volume is 16,500-cu. in. for this land. "
)
item = {"body_html": text}
res, diff = cubic_inches_to_metric(item)
self.assertEqual(diff["100.50 cubic inches"], "100.50 cubic inches (1,647 cubic centimeter)")
self.assertEqual(diff["15.7 cubic in"], "15.7 cubic in (257.3 cubic centimeter)")
self.assertEqual(diff["1 Cubic Inch"], "1 Cubic Inch (16 cubic centimeter)")
self.assertEqual(diff["1-16 cu-in"], "1-16 cu-in (16-262 cubic centimeter)")
self.assertEqual(diff["1-16 cb. in"], "1-16 cb. in (16-262 cubic centimeter)")
self.assertEqual(diff["16.7-Cubic-in"], "16.7-Cubic-in (273.7 cubic centimeter)")
self.assertEqual(diff["16,500-cu. in"], "16,500-cu. in (0.3 cubic meter)")
self.assertEqual(res["body_html"], item["body_html"])<|fim▁end|>
| |
<|file_name|>conference-data.ts<|end_file_name|><|fim▁begin|>import { Injectable } from '@angular/core';
import { Http } from '@angular/http';
import { UserData } from './user-data';
import { Observable } from 'rxjs/Observable';
import 'rxjs/add/operator/map';
import 'rxjs/add/observable/of';
@Injectable()
export class ConferenceData {
data: any;
constructor(public http: Http, public user: UserData) { }
load(): any {
if (this.data) {
return Observable.of(this.data);
} else {
return this.http.get('assets/data/data.json')
.map(this.processData, this);
}
}<|fim▁hole|> // build up the data by linking employees to sessions
this.data = data.json();
this.data.tracks = [];
// loop through each day in the schedule
this.data.schedule.forEach((day: any) => {
// loop through each timeline group in the day
day.groups.forEach((group: any) => {
// loop through each session in the timeline group
group.sessions.forEach((session: any) => {
session.employees = [];
if (session.employeeNames) {
session.employeeNames.forEach((employeeName: any) => {
let employee = this.data.employees.find((s: any) => s.name === employeeName);
if (employee) {
session.employees.push(employee);
employee.sessions = employee.sessions || [];
employee.sessions.push(session);
}
});
}
if (session.tracks) {
session.tracks.forEach((track: any) => {
if (this.data.tracks.indexOf(track) < 0) {
this.data.tracks.push(track);
}
});
}
});
});
});
return this.data;
}
getTimeline(dayIndex: number, queryText = '', excludeTracks: any[] = [], segment = 'all') {
return this.load().map((data: any) => {
let day = data.schedule[dayIndex];
day.shownSessions = 0;
queryText = queryText.toLowerCase().replace(/,|\.|-/g, ' ');
let queryWords = queryText.split(' ').filter(w => !!w.trim().length);
day.groups.forEach((group: any) => {
group.hide = true;
group.sessions.forEach((session: any) => {
// check if this session should show or not
this.filterSession(session, queryWords, excludeTracks, segment);
if (!session.hide) {
// if this session is not hidden then this group should show
group.hide = false;
day.shownSessions++;
}
});
});
return day;
});
}
filterSession(session: any, queryWords: string[], excludeTracks: any[], segment: string) {
let matchesQueryText = false;
if (queryWords.length) {
// of any query word is in the session name than it passes the query test
queryWords.forEach((queryWord: string) => {
if (session.name.toLowerCase().indexOf(queryWord) > -1) {
matchesQueryText = true;
}
});
} else {
// if there are no query words then this session passes the query test
matchesQueryText = true;
}
// if any of the sessions tracks are not in the
// exclude tracks then this session passes the track test
let matchesTracks = false;
session.tracks.forEach((trackName: string) => {
if (excludeTracks.indexOf(trackName) === -1) {
matchesTracks = true;
}
});
// if the segement is 'favorites', but session is not a user favorite
// then this session does not pass the segment test
let matchesSegment = false;
if (segment === 'favorites') {
if (this.user.hasFavorite(session.name)) {
matchesSegment = true;
}
} else {
matchesSegment = true;
}
// all tests must be true if it should not be hidden
session.hide = !(matchesQueryText && matchesTracks && matchesSegment);
}
getEmployees() {
return this.load().map((data: any) => {
return data.employees.sort((a: any, b: any) => {
let aName = a.name.split(' ').pop();
let bName = b.name.split(' ').pop();
return aName.localeCompare(bName);
});
});
}
getTracks() {
return this.load().map((data: any) => {
return data.tracks.sort();
});
}
getMap() {
return this.load().map((data: any) => {
return data.map;
});
}
}<|fim▁end|>
|
processData(data: any) {
// just some good 'ol JS fun with objects and arrays
|
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-20 19:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',<|fim▁hole|> fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('report_url', models.URLField()),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
]<|fim▁end|>
| |
<|file_name|>_dsymatrix-calc.hpp<|end_file_name|><|fim▁begin|>//=============================================================================<|fim▁hole|> WARNING_REPORT;
std::cerr << "This function call has no effect since the matrix is symmetric." << std::endl;
#endif//CPPL_DEBUG
return mat;
}
//=============================================================================
/*! return its inverse matrix */
inline _dsymatrix i(const _dsymatrix& mat)
{CPPL_VERBOSE_REPORT;
dsymatrix mat_cp(mat);
dsymatrix mat_inv(mat_cp.n);
mat_inv.identity();
char UPLO('l');
CPPL_INT NRHS(mat.n), LDA(mat.n), *IPIV(new CPPL_INT[mat.n]), LDB(mat.n), LWORK(-1), INFO(1);
double *WORK( new double[1] );
dsysv_(&UPLO, &mat_cp.n, &NRHS, mat_cp.array, &LDA, IPIV, mat_inv.array, &LDB, WORK, &LWORK, &INFO);
LWORK = CPPL_INT(WORK[0]);
delete [] WORK;
WORK = new double[LWORK];
dsysv_(&UPLO, &mat_cp.n, &NRHS, mat_cp.array, &LDA, IPIV, mat_inv.array, &LDB, WORK, &LWORK, &INFO);
delete [] WORK;
delete [] IPIV;
if(INFO!=0){
WARNING_REPORT;
std::cerr << "Serious trouble happend. INFO = " << INFO << "." << std::endl;
}
return _(mat_inv);
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//=============================================================================
/*! search the index of element having the largest absolute value
in 0-based numbering system */
inline void idamax(CPPL_INT& i, CPPL_INT& j, const _dsymatrix& mat)
{CPPL_VERBOSE_REPORT;
dsymatrix newmat =mat;
idamax(i, j, newmat);
}
//=============================================================================
/*! return its largest absolute value */
inline double damax(const _dsymatrix& mat)
{CPPL_VERBOSE_REPORT;
dsymatrix newmat =mat;
return damax(newmat);
}<|fim▁end|>
|
/*! return transposed _dsymatrix */
inline _dsymatrix t(const _dsymatrix& mat)
{CPPL_VERBOSE_REPORT;
#ifdef CPPL_DEBUG
|
<|file_name|>progress-bar.component.spec.ts<|end_file_name|><|fim▁begin|>import { TestBed, ComponentFixture, waitForAsync } from '@angular/core/testing';
import { ProgressBarComponent } from './progress-bar.component';
describe('ProgressBarComponent', () => {
let fixture: ComponentFixture<ProgressBarComponent>;
let component: ProgressBarComponent;
let element;<|fim▁hole|> declarations: [ProgressBarComponent]
});
});
beforeEach(
waitForAsync(() => {
TestBed.compileComponents().then(() => {
fixture = TestBed.createComponent(ProgressBarComponent);
component = fixture.componentInstance;
element = fixture.nativeElement;
});
})
);
describe('fixture', () => {
it('should have a component instance', () => {
expect(component).toBeTruthy();
});
});
});<|fim▁end|>
|
// provide our implementations or mocks to the dependency injector
beforeEach(() => {
TestBed.configureTestingModule({
|
<|file_name|>get_intra_predictor.cpp<|end_file_name|><|fim▁begin|>/*!
* \copy
* Copyright (c) 2009-2013, Cisco Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* \file get_intra_predictor.c
*
* \brief implementation for get intra predictor about 16x16, 4x4, chroma.
*
* \date 4/2/2009 Created
* 9/14/2009 C level based optimization with high performance gained.
* [const, using ST32/ST64 to replace memset, memcpy and memmove etc.]
*
*************************************************************************************
*/
#include "ls_defines.h"
#include "cpu_core.h"
#include "intra_pred_common.h"
#include "get_intra_predictor.h"
namespace WelsEnc {
#define I4x4_COUNT 4
#define I8x8_COUNT 8
#define I16x16_COUNT 16
typedef void (*PFillingPred) (uint8_t* pPred, uint8_t* pSrc);
typedef void (*PFillingPred1to16) (uint8_t* pPred, const uint8_t kuiSrc);
static inline void WelsFillingPred8to16_c (uint8_t* pPred, uint8_t* pSrc) {
ST64 (pPred , LD64 (pSrc));
ST64 (pPred + 8, LD64 (pSrc));
}
static inline void WelsFillingPred8x2to16_c (uint8_t* pPred, uint8_t* pSrc) {
ST64 (pPred , LD64 (pSrc));
ST64 (pPred + 8, LD64 (pSrc + 8));
}
static inline void WelsFillingPred1to16_c (uint8_t* pPred, const uint8_t kuiSrc) {
const uint8_t kuiSrc8[8] = { kuiSrc, kuiSrc, kuiSrc, kuiSrc, kuiSrc, kuiSrc, kuiSrc, kuiSrc };
ST64 (pPred , LD64 (kuiSrc8));
ST64 (pPred + 8, LD64 (kuiSrc8));
}
#define WelsFillingPred8to16 WelsFillingPred8to16_c
#define WelsFillingPred8x2to16 WelsFillingPred8x2to16_c
#define WelsFillingPred1to16 WelsFillingPred1to16_c
#define I4x4_PRED_STRIDE 4
#define I4x4_PRED_STRIDE2 8
#define I4x4_PRED_STRIDE3 12
void WelsI4x4LumaPredV_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint32_t kuiSrc = LD32 (&pRef[-kiStride]);
ENFORCE_STACK_ALIGN_1D (uint32_t, uiSrcx2, 2, 16)
uiSrcx2[0] = uiSrcx2[1] = kuiSrc;
WelsFillingPred8to16 (pPred, (uint8_t*)&uiSrcx2[0]);
}
void WelsI4x4LumaPredH_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint32_t kiStridex2Left = (kiStride << 1) - 1;
const uint32_t kiStridex3Left = kiStride + kiStridex2Left;
const uint8_t kuiHor1 = pRef[-1];
const uint8_t kuiHor2 = pRef[kiStride - 1];
const uint8_t kuiHor3 = pRef[kiStridex2Left];
const uint8_t kuiHor4 = pRef[kiStridex3Left];
const uint8_t kuiVec1[4] = {kuiHor1, kuiHor1, kuiHor1, kuiHor1};
const uint8_t kuiVec2[4] = {kuiHor2, kuiHor2, kuiHor2, kuiHor2};
const uint8_t kuiVec3[4] = {kuiHor3, kuiHor3, kuiHor3, kuiHor3};
const uint8_t kuiVec4[4] = {kuiHor4, kuiHor4, kuiHor4, kuiHor4};
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
ST32 (&uiSrc[0], LD32 (kuiVec1));
ST32 (&uiSrc[4], LD32 (kuiVec2));
ST32 (&uiSrc[8], LD32 (kuiVec3));
ST32 (&uiSrc[12], LD32 (kuiVec4));
WelsFillingPred8x2to16 (pPred, uiSrc);
}
void WelsI4x4LumaPredDc_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint8_t kuiDcValue = (pRef[-1] + pRef[kiStride - 1] + pRef[ (kiStride << 1) - 1] + pRef[ (kiStride << 1) +
kiStride - 1] +
pRef[-kiStride] + pRef[1 - kiStride] + pRef[2 - kiStride] + pRef[3 - kiStride] + 4) >> 3;
WelsFillingPred1to16 (pPred, kuiDcValue);
}
void WelsI4x4LumaPredDcLeft_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint8_t kuiDcValue = (pRef[-1] + pRef[kiStride - 1] + pRef[ (kiStride << 1) - 1] + pRef[ (kiStride << 1) +
kiStride - 1] + 2) >> 2;
WelsFillingPred1to16 (pPred, kuiDcValue);
}
void WelsI4x4LumaPredDcTop_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint8_t kuiDcValue = (pRef[-kiStride] + pRef[1 - kiStride] + pRef[2 - kiStride] + pRef[3 - kiStride] + 2) >> 2;
WelsFillingPred1to16 (pPred, kuiDcValue);
}
void WelsI4x4LumaPredDcNA_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint8_t kuiDcValue = 0x80;
WelsFillingPred1to16 (pPred, kuiDcValue);
}
/*down pLeft*/
void WelsI4x4LumaPredDDL_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
/*get pTop*/
const uint8_t kuiT0 = pRef[-kiStride];
const uint8_t kuiT1 = pRef[1 - kiStride];
const uint8_t kuiT2 = pRef[2 - kiStride];
const uint8_t kuiT3 = pRef[3 - kiStride];
const uint8_t kuiT4 = pRef[4 - kiStride];
const uint8_t kuiT5 = pRef[5 - kiStride];
const uint8_t kuiT6 = pRef[6 - kiStride];
const uint8_t kuiT7 = pRef[7 - kiStride];
const uint8_t kuiDDL0 = (2 + kuiT0 + kuiT2 + (kuiT1 << 1)) >> 2; // uiDDL0
const uint8_t kuiDDL1 = (2 + kuiT1 + kuiT3 + (kuiT2 << 1)) >> 2; // uiDDL1
const uint8_t kuiDDL2 = (2 + kuiT2 + kuiT4 + (kuiT3 << 1)) >> 2; // uiDDL2
const uint8_t kuiDDL3 = (2 + kuiT3 + kuiT5 + (kuiT4 << 1)) >> 2; // uiDDL3
const uint8_t kuiDDL4 = (2 + kuiT4 + kuiT6 + (kuiT5 << 1)) >> 2; // uiDDL4
const uint8_t kuiDDL5 = (2 + kuiT5 + kuiT7 + (kuiT6 << 1)) >> 2; // uiDDL5
const uint8_t kuiDDL6 = (2 + kuiT6 + kuiT7 + (kuiT7 << 1)) >> 2; // uiDDL6
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
uiSrc[0] = kuiDDL0;
uiSrc[1] = uiSrc[4] = kuiDDL1;
uiSrc[2] = uiSrc[5] = uiSrc[8] = kuiDDL2;
uiSrc[3] = uiSrc[6] = uiSrc[9] = uiSrc[12] = kuiDDL3;
uiSrc[7] = uiSrc[10] = uiSrc[13] = kuiDDL4;
uiSrc[11] = uiSrc[14] = kuiDDL5;
uiSrc[15] = kuiDDL6;
WelsFillingPred8x2to16 (pPred, uiSrc);
}
/*down pLeft*/
void WelsI4x4LumaPredDDLTop_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
/*get pTop*/
const uint8_t kuiT0 = pRef[-kiStride];
const uint8_t kuiT1 = pRef[1 - kiStride];
const uint8_t kuiT2 = pRef[2 - kiStride];
const uint8_t kuiT3 = pRef[3 - kiStride];
const uint8_t kuiDLT0 = (2 + kuiT0 + kuiT2 + (kuiT1 << 1)) >> 2; // uiDLT0
const uint8_t kuiDLT1 = (2 + kuiT1 + kuiT3 + (kuiT2 << 1)) >> 2; // uiDLT1
const uint8_t kuiDLT2 = (2 + kuiT2 + kuiT3 + (kuiT3 << 1)) >> 2; // uiDLT2
const uint8_t kuiDLT3 = (2 + (kuiT3 << 2)) >> 2; // uiDLT3
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
memset (&uiSrc[6], kuiDLT3, 10 * sizeof (uint8_t));
uiSrc[0] = kuiDLT0;
uiSrc[1] = uiSrc[4] = kuiDLT1;
uiSrc[2] = uiSrc[5] = uiSrc[8] = kuiDLT2;
uiSrc[3] = kuiDLT3;
WelsFillingPred8x2to16 (pPred, uiSrc);
}
/*down right*/
void WelsI4x4LumaPredDDR_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const int32_t kiStridex2 = kiStride << 1;
const int32_t kiStridex3 = kiStride + kiStridex2;
const uint8_t kuiLT = pRef[-kiStride - 1]; // pTop-pLeft
/*get pLeft and pTop*/
const uint8_t kuiL0 = pRef[-1];
const uint8_t kuiL1 = pRef[kiStride - 1];
const uint8_t kuiL2 = pRef[kiStridex2 - 1];
const uint8_t kuiL3 = pRef[kiStridex3 - 1];
const uint8_t kuiT0 = pRef[-kiStride];
const uint8_t kuiT1 = pRef[1 - kiStride];
const uint8_t kuiT2 = pRef[2 - kiStride];
const uint8_t kuiT3 = pRef[3 - kiStride];
const uint16_t kuiTL0 = 1 + kuiLT + kuiL0;
const uint16_t kuiLT0 = 1 + kuiLT + kuiT0;
const uint16_t kuiT01 = 1 + kuiT0 + kuiT1;
const uint16_t kuiT12 = 1 + kuiT1 + kuiT2;
const uint16_t kuiT23 = 1 + kuiT2 + kuiT3;
const uint16_t kuiL01 = 1 + kuiL0 + kuiL1;
const uint16_t kuiL12 = 1 + kuiL1 + kuiL2;
const uint16_t kuiL23 = 1 + kuiL2 + kuiL3;
const uint8_t kuiDDR0 = (kuiTL0 + kuiLT0) >> 2;
const uint8_t kuiDDR1 = (kuiLT0 + kuiT01) >> 2;
const uint8_t kuiDDR2 = (kuiT01 + kuiT12) >> 2;
const uint8_t kuiDDR3 = (kuiT12 + kuiT23) >> 2;
const uint8_t kuiDDR4 = (kuiTL0 + kuiL01) >> 2;
const uint8_t kuiDDR5 = (kuiL01 + kuiL12) >> 2;
const uint8_t kuiDDR6 = (kuiL12 + kuiL23) >> 2;
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
uiSrc[0] = uiSrc[5] = uiSrc[10] = uiSrc[15] = kuiDDR0;
uiSrc[1] = uiSrc[6] = uiSrc[11] = kuiDDR1;
uiSrc[2] = uiSrc[7] = kuiDDR2;
uiSrc[3] = kuiDDR3;
uiSrc[4] = uiSrc[9] = uiSrc[14] = kuiDDR4;
uiSrc[8] = uiSrc[13] = kuiDDR5;
uiSrc[12] = kuiDDR6;
WelsFillingPred8x2to16 (pPred, uiSrc);
}
/*vertical pLeft*/
void WelsI4x4LumaPredVL_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
/*get pTop*/
const uint8_t kuiT0 = pRef[-kiStride];
const uint8_t kuiT1 = pRef[1 - kiStride];
const uint8_t kuiT2 = pRef[2 - kiStride];
const uint8_t kuiT3 = pRef[3 - kiStride];
const uint8_t kuiT4 = pRef[4 - kiStride];
const uint8_t kuiT5 = pRef[5 - kiStride];
const uint8_t kuiT6 = pRef[6 - kiStride];
const uint8_t kuiVL0 = (1 + kuiT0 + kuiT1) >> 1; // uiVL0
const uint8_t kuiVL1 = (1 + kuiT1 + kuiT2) >> 1; // uiVL1
const uint8_t kuiVL2 = (1 + kuiT2 + kuiT3) >> 1; // uiVL2
const uint8_t kuiVL3 = (1 + kuiT3 + kuiT4) >> 1; // uiVL3
const uint8_t kuiVL4 = (1 + kuiT4 + kuiT5) >> 1; // uiVL4
const uint8_t kuiVL5 = (2 + kuiT0 + (kuiT1 << 1) + kuiT2) >> 2; // uiVL5
const uint8_t kuiVL6 = (2 + kuiT1 + (kuiT2 << 1) + kuiT3) >> 2; // uiVL6
const uint8_t kuiVL7 = (2 + kuiT2 + (kuiT3 << 1) + kuiT4) >> 2; // uiVL7
const uint8_t kuiVL8 = (2 + kuiT3 + (kuiT4 << 1) + kuiT5) >> 2; // uiVL8
const uint8_t kuiVL9 = (2 + kuiT4 + (kuiT5 << 1) + kuiT6) >> 2; // uiVL9
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
uiSrc[0] = kuiVL0;
uiSrc[1] = uiSrc[8] = kuiVL1;
uiSrc[2] = uiSrc[9] = kuiVL2;
uiSrc[3] = uiSrc[10] = kuiVL3;
uiSrc[4] = kuiVL5;
uiSrc[5] = uiSrc[12] = kuiVL6;
uiSrc[6] = uiSrc[13] = kuiVL7;
uiSrc[7] = uiSrc[14] = kuiVL8;
uiSrc[11] = kuiVL4;
uiSrc[15] = kuiVL9;
WelsFillingPred8x2to16 (pPred, uiSrc);
}
/*vertical pLeft*/
void WelsI4x4LumaPredVLTop_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
uint8_t* pTopLeft = &pRef[-kiStride - 1]; // pTop-pLeft
/*get pTop*/
const uint8_t kuiT0 = * (pTopLeft + 1);
const uint8_t kuiT1 = * (pTopLeft + 2);
const uint8_t kuiT2 = * (pTopLeft + 3);
const uint8_t kuiT3 = * (pTopLeft + 4);
const uint8_t kuiVLT0 = (1 + kuiT0 + kuiT1) >> 1; // uiVLT0
const uint8_t kuiVLT1 = (1 + kuiT1 + kuiT2) >> 1; // uiVLT1
const uint8_t kuiVLT2 = (1 + kuiT2 + kuiT3) >> 1; // uiVLT2
const uint8_t kuiVLT3 = (1 + (kuiT3 << 1)) >> 1; // uiVLT3
const uint8_t kuiVLT4 = (2 + kuiT0 + (kuiT1 << 1) + kuiT2) >> 2; // uiVLT4
const uint8_t kuiVLT5 = (2 + kuiT1 + (kuiT2 << 1) + kuiT3) >> 2; // uiVLT5
const uint8_t kuiVLT6 = (2 + kuiT2 + (kuiT3 << 1) + kuiT3) >> 2; // uiVLT6
const uint8_t kuiVLT7 = (2 + (kuiT3 << 2)) >> 2; // uiVLT7
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
uiSrc[0] = kuiVLT0;
uiSrc[1] = uiSrc[8] = kuiVLT1;
uiSrc[2] = uiSrc[9] = kuiVLT2;
uiSrc[3] = uiSrc[10] = uiSrc[11] = kuiVLT3;
uiSrc[4] = kuiVLT4;
uiSrc[5] = uiSrc[12] = kuiVLT5;
uiSrc[6] = uiSrc[13] = kuiVLT6;
uiSrc[7] = uiSrc[14] = uiSrc[15] = kuiVLT7;
WelsFillingPred8x2to16 (pPred, uiSrc);
}
/*vertical right*/
void WelsI4x4LumaPredVR_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const int32_t kiStridex2 = kiStride << 1;
const uint8_t kuiLT = pRef[-kiStride - 1]; // pTop-pLeft
/*get pLeft and pTop*/
const uint8_t kuiL0 = pRef[-1];
const uint8_t kuiL1 = pRef[kiStride - 1];
const uint8_t kuiL2 = pRef[kiStridex2 - 1];
const uint8_t kuiT0 = pRef[-kiStride];
const uint8_t kuiT1 = pRef[1 - kiStride];
const uint8_t kuiT2 = pRef[2 - kiStride];
const uint8_t kuiT3 = pRef[3 - kiStride];
const uint8_t kuiVR0 = (1 + kuiLT + kuiT0) >> 1;
const uint8_t kuiVR1 = (1 + kuiT0 + kuiT1) >> 1;
const uint8_t kuiVR2 = (1 + kuiT1 + kuiT2) >> 1;
const uint8_t kuiVR3 = (1 + kuiT2 + kuiT3) >> 1;
const uint8_t kuiVR4 = (2 + kuiL0 + (kuiLT << 1) + kuiT0) >> 2;
const uint8_t kuiVR5 = (2 + kuiLT + (kuiT0 << 1) + kuiT1) >> 2;
const uint8_t kuiVR6 = (2 + kuiT0 + (kuiT1 << 1) + kuiT2) >> 2;
const uint8_t kuiVR7 = (2 + kuiT1 + (kuiT2 << 1) + kuiT3) >> 2;
const uint8_t kuiVR8 = (2 + kuiLT + (kuiL0 << 1) + kuiL1) >> 2;
const uint8_t kuiVR9 = (2 + kuiL0 + (kuiL1 << 1) + kuiL2) >> 2;
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
uiSrc[0] = uiSrc[9] = kuiVR0;
uiSrc[1] = uiSrc[10] = kuiVR1;
uiSrc[2] = uiSrc[11] = kuiVR2;
uiSrc[3] = kuiVR3;
uiSrc[4] = uiSrc[13] = kuiVR4;
uiSrc[5] = uiSrc[14] = kuiVR5;
uiSrc[6] = uiSrc[15] = kuiVR6;
uiSrc[7] = kuiVR7;
uiSrc[8] = kuiVR8;
uiSrc[12] = kuiVR9;
WelsFillingPred8x2to16 (pPred, uiSrc);
}
/*horizontal up*/
void WelsI4x4LumaPredHU_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const int32_t kiStridex2 = kiStride << 1;
const int32_t kiStridex3 = kiStride + kiStridex2;
/*get pLeft*/
const uint8_t kuiL0 = pRef[-1];
const uint8_t kuiL1 = pRef[kiStride - 1];
const uint8_t kuiL2 = pRef[kiStridex2 - 1];
const uint8_t kuiL3 = pRef[kiStridex3 - 1];
const uint16_t kuiL01 = (1 + kuiL0 + kuiL1);
const uint16_t kuiL12 = (1 + kuiL1 + kuiL2);
const uint16_t kuiL23 = (1 + kuiL2 + kuiL3);
const uint8_t kuiHU0 = kuiL01 >> 1;
const uint8_t kuiHU1 = (kuiL01 + kuiL12) >> 2;
const uint8_t kuiHU2 = kuiL12 >> 1;
const uint8_t kuiHU3 = (kuiL12 + kuiL23) >> 2;
const uint8_t kuiHU4 = kuiL23 >> 1;
const uint8_t kuiHU5 = (1 + kuiL23 + (kuiL3 << 1)) >> 2;
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
uiSrc[0] = kuiHU0;
uiSrc[1] = kuiHU1;
uiSrc[2] = uiSrc[4] = kuiHU2;
uiSrc[3] = uiSrc[5] = kuiHU3;
uiSrc[6] = uiSrc[8] = kuiHU4;
uiSrc[7] = uiSrc[9] = kuiHU5;
memset (&uiSrc[10], kuiL3, 6 * sizeof (uint8_t));
WelsFillingPred8x2to16 (pPred, uiSrc);
}
/*horizontal down*/
void WelsI4x4LumaPredHD_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const int32_t kiStridex2 = kiStride << 1;
const int32_t kiStridex3 = kiStride + kiStridex2;
const uint8_t kuiLT = pRef[-kiStride - 1]; // pTop-pLeft
/*get pLeft and pTop*/
const uint8_t kuiL0 = pRef[-1];
const uint8_t kuiL1 = pRef[kiStride - 1];
const uint8_t kuiL2 = pRef[kiStridex2 - 1];
const uint8_t kuiL3 = pRef[kiStridex3 - 1];
const uint8_t kuiT0 = pRef[-kiStride];
const uint8_t kuiT1 = pRef[1 - kiStride];
const uint8_t kuiT2 = pRef[2 - kiStride];
const uint8_t kuiHD0 = (1 + kuiLT + kuiL0) >> 1; // uiHD0
const uint8_t kuiHD1 = (2 + kuiL0 + (kuiLT << 1) + kuiT0) >> 2; // uiHD1
const uint8_t kuiHD2 = (2 + kuiLT + (kuiT0 << 1) + kuiT1) >> 2; // uiHD2
const uint8_t kuiHD3 = (2 + kuiT0 + (kuiT1 << 1) + kuiT2) >> 2; // uiHD3
const uint8_t kuiHD4 = (1 + kuiL0 + kuiL1) >> 1; // uiHD4
const uint8_t kuiHD5 = (2 + kuiLT + (kuiL0 << 1) + kuiL1) >> 2; // uiHD5
const uint8_t kuiHD6 = (1 + kuiL1 + kuiL2) >> 1; // uiHD6
const uint8_t kuiHD7 = (2 + kuiL0 + (kuiL1 << 1) + kuiL2) >> 2; // uiHD7
const uint8_t kuiHD8 = (1 + kuiL2 + kuiL3) >> 1; // uiHD8
const uint8_t kuiHD9 = (2 + kuiL1 + (kuiL2 << 1) + kuiL3) >> 2; // uiHD9
ENFORCE_STACK_ALIGN_1D (uint8_t, uiSrc, 16, 16) // TobeCont'd about assign opt as follows
uiSrc[0] = uiSrc[6] = kuiHD0;
uiSrc[1] = uiSrc[7] = kuiHD1;
uiSrc[2] = kuiHD2;
uiSrc[3] = kuiHD3;
uiSrc[4] = uiSrc[10] = kuiHD4;
uiSrc[5] = uiSrc[11] = kuiHD5;
uiSrc[8] = uiSrc[14] = kuiHD6;
uiSrc[9] = uiSrc[15] = kuiHD7;
uiSrc[12] = kuiHD8;
uiSrc[13] = kuiHD9;
WelsFillingPred8x2to16 (pPred, uiSrc);
}
#define I8x8_PRED_STRIDE 8
void WelsIChromaPredV_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint64_t kuiSrc64 = LD64 (&pRef[-kiStride]);
ST64 (pPred , kuiSrc64);
ST64 (pPred + 8 , kuiSrc64);
ST64 (pPred + 16, kuiSrc64);
ST64 (pPred + 24, kuiSrc64);
ST64 (pPred + 32, kuiSrc64);
ST64 (pPred + 40, kuiSrc64);
ST64 (pPred + 48, kuiSrc64);
ST64 (pPred + 56, kuiSrc64);
}
void WelsIChromaPredH_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
int32_t iStridex7 = (kiStride << 3) - kiStride;
int32_t iI8x8Stridex7 = (I8x8_PRED_STRIDE << 3) - I8x8_PRED_STRIDE;
uint8_t i = 7;
do {
const uint8_t kuiLeft = pRef[iStridex7 - 1]; // pLeft value
uint64_t kuiSrc64 = (uint64_t) (0x0101010101010101ULL * kuiLeft);
ST64 (pPred + iI8x8Stridex7, kuiSrc64);
iStridex7 -= kiStride;
iI8x8Stridex7 -= I8x8_PRED_STRIDE;
} while (i-- > 0);
}
void WelsIChromaPredPlane_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
int32_t iLTshift = 0, iTopshift = 0, iLeftshift = 0, iTopSum = 0, iLeftSum = 0;
int32_t i, j;
uint8_t* pTop = &pRef[-kiStride];
uint8_t* pLeft = &pRef[-1];
for (i = 0 ; i < 4 ; i ++) {
iTopSum += (i + 1) * (pTop[4 + i] - pTop[2 - i]);
iLeftSum += (i + 1) * (pLeft[ (4 + i) * kiStride] - pLeft[ (2 - i) * kiStride]);
}
iLTshift = (pLeft[7 * kiStride] + pTop[7]) << 4;
iTopshift = (17 * iTopSum + 16) >> 5;
iLeftshift = (17 * iLeftSum + 16) >> 5;
for (i = 0 ; i < 8 ; i ++) {
for (j = 0 ; j < 8 ; j ++) {
pPred[j] = WelsClip1 ((iLTshift + iTopshift * (j - 3) + iLeftshift * (i - 3) + 16) >> 5);
}
pPred += I8x8_PRED_STRIDE;
}
}
void WelsIChromaPredDc_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const int32_t kuiL1 = kiStride - 1;
const int32_t kuiL2 = kuiL1 + kiStride;
const int32_t kuiL3 = kuiL2 + kiStride;
const int32_t kuiL4 = kuiL3 + kiStride;
const int32_t kuiL5 = kuiL4 + kiStride;
const int32_t kuiL6 = kuiL5 + kiStride;
const int32_t kuiL7 = kuiL6 + kiStride;
/*caculate the iMean value*/
const uint8_t kuiMean1 = (pRef[-kiStride] + pRef[1 - kiStride] + pRef[2 - kiStride] + pRef[3 - kiStride] +
pRef[-1] + pRef[kuiL1] + pRef[kuiL2] + pRef[kuiL3] + 4) >> 3;
const uint32_t kuiSum2 = pRef[4 - kiStride] + pRef[5 - kiStride] + pRef[6 - kiStride] + pRef[7 - kiStride];
const uint32_t kuiSum3 = pRef[kuiL4] + pRef[kuiL5] + pRef[kuiL6] + pRef[kuiL7];
const uint8_t kuiMean2 = (kuiSum2 + 2) >> 2;
const uint8_t kuiMean3 = (kuiSum3 + 2) >> 2;
const uint8_t kuiMean4 = (kuiSum2 + kuiSum3 + 4) >> 3;
const uint8_t kuiTopMean[8] = {kuiMean1, kuiMean1, kuiMean1, kuiMean1, kuiMean2, kuiMean2, kuiMean2, kuiMean2};
const uint8_t kuiBottomMean[8] = {kuiMean3, kuiMean3, kuiMean3, kuiMean3, kuiMean4, kuiMean4, kuiMean4, kuiMean4};
const uint64_t kuiTopMean64 = LD64 (kuiTopMean);
const uint64_t kuiBottomMean64 = LD64 (kuiBottomMean);
ST64 (pPred , kuiTopMean64);
ST64 (pPred + 8 , kuiTopMean64);
ST64 (pPred + 16, kuiTopMean64);
ST64 (pPred + 24, kuiTopMean64);
ST64 (pPred + 32, kuiBottomMean64);
ST64 (pPred + 40, kuiBottomMean64);
ST64 (pPred + 48, kuiBottomMean64);
ST64 (pPred + 56, kuiBottomMean64);
}
void WelsIChromaPredDcLeft_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const int32_t kuiL1 = kiStride - 1;
const int32_t kuiL2 = kuiL1 + kiStride;
const int32_t kuiL3 = kuiL2 + kiStride;
const int32_t kuiL4 = kuiL3 + kiStride;
const int32_t kuiL5 = kuiL4 + kiStride;
const int32_t kuiL6 = kuiL5 + kiStride;
const int32_t kuiL7 = kuiL6 + kiStride;
/*caculate the iMean value*/
const uint8_t kuiTopMean = (pRef[-1] + pRef[kuiL1] + pRef[kuiL2] + pRef[kuiL3] + 2) >> 2 ;
const uint8_t kuiBottomMean = (pRef[kuiL4] + pRef[kuiL5] + pRef[kuiL6] + pRef[kuiL7] + 2) >> 2;
const uint64_t kuiTopMean64 = (uint64_t) (0x0101010101010101ULL * kuiTopMean);
const uint64_t kuiBottomMean64 = (uint64_t) (0x0101010101010101ULL * kuiBottomMean);
ST64 (pPred , kuiTopMean64);
ST64 (pPred + 8 , kuiTopMean64);
ST64 (pPred + 16, kuiTopMean64);
ST64 (pPred + 24, kuiTopMean64);
ST64 (pPred + 32, kuiBottomMean64);
ST64 (pPred + 40, kuiBottomMean64);
ST64 (pPred + 48, kuiBottomMean64);
ST64 (pPred + 56, kuiBottomMean64);
}
void WelsIChromaPredDcTop_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
/*caculate the iMean value*/
const uint8_t kuiMean1 = (pRef[-kiStride] + pRef[1 - kiStride] + pRef[2 - kiStride] + pRef[3 - kiStride] + 2) >> 2;
const uint8_t kuiMean2 = (pRef[4 - kiStride] + pRef[5 - kiStride] + pRef[6 - kiStride] + pRef[7 - kiStride] + 2) >> 2;
const uint8_t kuiMean[8] = {kuiMean1, kuiMean1, kuiMean1, kuiMean1, kuiMean2, kuiMean2, kuiMean2, kuiMean2};
const uint64_t kuiMean64 = LD64 (kuiMean);
ST64 (pPred , kuiMean64);
ST64 (pPred + 8 , kuiMean64);
ST64 (pPred + 16, kuiMean64);
ST64 (pPred + 24, kuiMean64);
ST64 (pPred + 32, kuiMean64);
ST64 (pPred + 40, kuiMean64);
ST64 (pPred + 48, kuiMean64);
ST64 (pPred + 56, kuiMean64);
}
void WelsIChromaPredDcNA_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
const uint64_t kuiDcValue64 = (uint64_t)0x8080808080808080ULL;
ST64 (pPred , kuiDcValue64);
ST64 (pPred + 8 , kuiDcValue64);
ST64 (pPred + 16, kuiDcValue64);
ST64 (pPred + 24, kuiDcValue64);
ST64 (pPred + 32, kuiDcValue64);
ST64 (pPred + 40, kuiDcValue64);
ST64 (pPred + 48, kuiDcValue64);
ST64 (pPred + 56, kuiDcValue64);
}
void WelsI16x16LumaPredPlane_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
int32_t iLTshift = 0, iTopshift = 0, iLeftshift = 0, iTopSum = 0, iLeftSum = 0;
int32_t i, j;
uint8_t* pTop = &pRef[-kiStride];
uint8_t* pLeft = &pRef[-1];
int32_t iPredStride = 16;
for (i = 0 ; i < 8 ; i ++) {
iTopSum += (i + 1) * (pTop[8 + i] - pTop[6 - i]);
iLeftSum += (i + 1) * (pLeft[ (8 + i) * kiStride] - pLeft[ (6 - i) * kiStride]);
}
iLTshift = (pLeft[15 * kiStride] + pTop[15]) << 4;
iTopshift = (5 * iTopSum + 32) >> 6;
iLeftshift = (5 * iLeftSum + 32) >> 6;
for (i = 0 ; i < 16 ; i ++) {
for (j = 0 ; j < 16 ; j ++) {
pPred[j] = WelsClip1 ((iLTshift + iTopshift * (j - 7) + iLeftshift * (i - 7) + 16) >> 5);
}
pPred += iPredStride;
}
}
void WelsI16x16LumaPredDc_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
int32_t iStridex15 = (kiStride << 4) - kiStride;
int32_t iSum = 0;
uint8_t i = 15;
uint8_t iMean = 0;
/*caculate the iMean value*/
do {
iSum += pRef[-1 + iStridex15] + pRef[-kiStride + i];
iStridex15 -= kiStride;
} while (i-- > 0);
iMean = (16 + iSum) >> 5;
memset (pPred, iMean, 256);
}
void WelsI16x16LumaPredDcTop_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
int32_t iSum = 0;
uint8_t i = 15;
uint8_t iMean = 0;
/*caculate the iMean value*/
do {
iSum += pRef[-kiStride + i];
} while (i-- > 0);
iMean = (8 + iSum) >> 4;
memset (pPred, iMean, 256);
}
void WelsI16x16LumaPredDcLeft_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
int32_t iStridex15 = (kiStride << 4) - kiStride;
int32_t iSum = 0;
uint8_t i = 15;
uint8_t iMean = 0;
/*caculate the iMean value*/
do {
iSum += pRef[-1 + iStridex15];
iStridex15 -= kiStride;
} while (i-- > 0);
iMean = (8 + iSum) >> 4;
memset (pPred, iMean, 256);
}
void WelsI16x16LumaPredDcNA_c (uint8_t* pPred, uint8_t* pRef, const int32_t kiStride) {
memset (pPred, 0x80, 256);
}
void WelsInitIntraPredFuncs (SWelsFuncPtrList* pFuncList, const uint32_t kuiCpuFlag) {
pFuncList->pfGetLumaI16x16Pred[I16_PRED_V] = WelsI16x16LumaPredV_c;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_H] = WelsI16x16LumaPredH_c;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC] = WelsI16x16LumaPredDc_c;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_P] = WelsI16x16LumaPredPlane_c;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC_L] = WelsI16x16LumaPredDcLeft_c;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC_T] = WelsI16x16LumaPredDcTop_c;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC_128] = WelsI16x16LumaPredDcNA_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_V] = WelsI4x4LumaPredV_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_H] = WelsI4x4LumaPredH_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DC] = WelsI4x4LumaPredDc_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DC_L] = WelsI4x4LumaPredDcLeft_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DC_T] = WelsI4x4LumaPredDcTop_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DC_128] = WelsI4x4LumaPredDcNA_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDL] = WelsI4x4LumaPredDDL_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDL_TOP] = WelsI4x4LumaPredDDLTop_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDR] = WelsI4x4LumaPredDDR_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VL] = WelsI4x4LumaPredVL_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VL_TOP] = WelsI4x4LumaPredVLTop_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VR] = WelsI4x4LumaPredVR_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HU] = WelsI4x4LumaPredHU_c;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HD] = WelsI4x4LumaPredHD_c;
pFuncList->pfGetChromaPred[C_PRED_DC] = WelsIChromaPredDc_c;
pFuncList->pfGetChromaPred[C_PRED_H] = WelsIChromaPredH_c;
pFuncList->pfGetChromaPred[C_PRED_V] = WelsIChromaPredV_c;
pFuncList->pfGetChromaPred[C_PRED_P] = WelsIChromaPredPlane_c;
pFuncList->pfGetChromaPred[C_PRED_DC_L] = WelsIChromaPredDcLeft_c;
pFuncList->pfGetChromaPred[C_PRED_DC_T] = WelsIChromaPredDcTop_c;
pFuncList->pfGetChromaPred[C_PRED_DC_128] = WelsIChromaPredDcNA_c;
#ifdef HAVE_NEON
if (kuiCpuFlag & WELS_CPU_NEON) {
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDR] = WelsI4x4LumaPredDDR_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HD] = WelsI4x4LumaPredHD_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HU] = WelsI4x4LumaPredHU_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VR] = WelsI4x4LumaPredVR_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDL] = WelsI4x4LumaPredDDL_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VL] = WelsI4x4LumaPredVL_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_H] = WelsI4x4LumaPredH_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_V] = WelsI4x4LumaPredV_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_V] = WelsI16x16LumaPredV_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_H] = WelsI16x16LumaPredH_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC] = WelsI16x16LumaPredDc_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_P] = WelsI16x16LumaPredPlane_neon;
pFuncList->pfGetChromaPred[C_PRED_DC] = WelsIChromaPredDc_neon;
pFuncList->pfGetChromaPred[C_PRED_V] = WelsIChromaPredV_neon;
pFuncList->pfGetChromaPred[C_PRED_P] = WelsIChromaPredPlane_neon;
pFuncList->pfGetChromaPred[C_PRED_H] = WelsIChromaPredH_neon;
}
#endif
#if defined(HAVE_NEON_AARCH64)
if (kuiCpuFlag & WELS_CPU_NEON) {
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC] = WelsI16x16LumaPredDc_AArch64_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_P] = WelsI16x16LumaPredPlane_AArch64_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_H] = WelsI16x16LumaPredH_AArch64_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_V] = WelsI16x16LumaPredV_AArch64_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC_L] = WelsI16x16LumaPredDcLeft_AArch64_neon;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC_T] = WelsI16x16LumaPredDcTop_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_H ] = WelsI4x4LumaPredH_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDL ] = WelsI4x4LumaPredDDL_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDL_TOP] = WelsI4x4LumaPredDDLTop_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VL ] = WelsI4x4LumaPredVL_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VL_TOP ] = WelsI4x4LumaPredVLTop_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VR ] = WelsI4x4LumaPredVR_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HU ] = WelsI4x4LumaPredHU_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HD ] = WelsI4x4LumaPredHD_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DC ] = WelsI4x4LumaPredDc_AArch64_neon;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DC_T ] = WelsI4x4LumaPredDcTop_AArch64_neon;
pFuncList->pfGetChromaPred[C_PRED_H] = WelsIChromaPredH_AArch64_neon;
pFuncList->pfGetChromaPred[C_PRED_V] = WelsIChromaPredV_AArch64_neon;
pFuncList->pfGetChromaPred[C_PRED_P ] = WelsIChromaPredPlane_AArch64_neon;
pFuncList->pfGetChromaPred[C_PRED_DC] = WelsIChromaPredDc_AArch64_neon;
pFuncList->pfGetChromaPred[C_PRED_DC_T] = WelsIChromaPredDcTop_AArch64_neon;
}
#endif//HAVE_NEON_AARCH64
#ifdef X86_ASM
if (kuiCpuFlag & WELS_CPU_MMXEXT) {
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDR] = WelsI4x4LumaPredDDR_mmx;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HD] = WelsI4x4LumaPredHD_mmx;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_HU] = WelsI4x4LumaPredHU_mmx;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VR] = WelsI4x4LumaPredVR_mmx;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DDL] = WelsI4x4LumaPredDDL_mmx;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_VL] = WelsI4x4LumaPredVL_mmx;<|fim▁hole|> }
if (kuiCpuFlag & WELS_CPU_SSE2) {
pFuncList->pfGetLumaI4x4Pred[I4_PRED_H] = WelsI4x4LumaPredH_sse2;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_DC] = WelsI4x4LumaPredDc_sse2;
pFuncList->pfGetLumaI4x4Pred[I4_PRED_V] = WelsI4x4LumaPredV_sse2;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_V] = WelsI16x16LumaPredV_sse2;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_H] = WelsI16x16LumaPredH_sse2;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_DC] = WelsI16x16LumaPredDc_sse2;
pFuncList->pfGetLumaI16x16Pred[I16_PRED_P] = WelsI16x16LumaPredPlane_sse2;
pFuncList->pfGetChromaPred[C_PRED_DC] = WelsIChromaPredDc_sse2;
pFuncList->pfGetChromaPred[C_PRED_V] = WelsIChromaPredV_sse2;
pFuncList->pfGetChromaPred[C_PRED_P] = WelsIChromaPredPlane_sse2;
}
#endif
}
}<|fim▁end|>
|
pFuncList->pfGetChromaPred[C_PRED_H] = WelsIChromaPredH_mmx;
|
<|file_name|>RSAKeyValueType.java<|end_file_name|><|fim▁begin|>//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2016.09.02 at 01:00:06 PM UYT
//
package dgi.classes.respuestas.reporte;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for RSAKeyValueType complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="RSAKeyValueType">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="Modulus" type="{http://www.w3.org/2000/09/xmldsig#}CryptoBinary"/>
* <element name="Exponent" type="{http://www.w3.org/2000/09/xmldsig#}CryptoBinary"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "RSAKeyValueType", propOrder = {
"modulus",
"exponent"
})
public class RSAKeyValueType {
@XmlElement(name = "Modulus", required = true)
protected byte[] modulus;
@XmlElement(name = "Exponent", required = true)
protected byte[] exponent;
/**
* Gets the value of the modulus property.
*
* @return
* possible object is
* byte[]
*/
public byte[] getModulus() {
return modulus;
}
/**
* Sets the value of the modulus property.
*
* @param value<|fim▁hole|> * byte[]
*/
public void setModulus(byte[] value) {
this.modulus = value;
}
/**
* Gets the value of the exponent property.
*
* @return
* possible object is
* byte[]
*/
public byte[] getExponent() {
return exponent;
}
/**
* Sets the value of the exponent property.
*
* @param value
* allowed object is
* byte[]
*/
public void setExponent(byte[] value) {
this.exponent = value;
}
}<|fim▁end|>
|
* allowed object is
|
<|file_name|>xian.js<|end_file_name|><|fim▁begin|>var $box = $('.box');
var F = {};
F.getMousePos = function(e, $relaveDom) {
var x = 0;
var y = 0;
if (!e) {
var e = window.event;
}
if (e.pageX || e.pageY) {
x = e.pageX;
y = e.pageY;
}
else if (e.clientX || e.clientY) {
x = e.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
y = e.clientY + document.body.scrollTop + document.documentElement.scrollTop;
}
if($relaveDom) {
var offset = $relaveDom.offset();
x -= offset.left;
y -= offset.top;
}
return {x:x, y:y};
};
var batteryWater = function(opts){
var self = this;
this.opts = $.extend({
dom: '',
content: [],
color: {}
}, opts)
this.$dom = this.opts.dom;
this.content = this.opts.content;
this.timeScale = this.opts.timeScale;
this.color = this.opts.color;
this.canvas = this.$dom.find('canvas');
this.width = this.$dom.width();
this.height = this.$dom.height();
this.ctx = this.canvas.get(0).getContext('2d');
var pi = Math.PI;
var o_x = 30.5; //原点坐标
var o_y = 185.5;
var scale_width = 700;
var each_width = parseInt(scale_width/(2 * this.timeScale.length));
var each_height = 25;
var scale_height = each_height * 6;
var point_radius = 2.5; //小点半径
var o_temp = 25; //原点坐标的起始温度
var y_height = 0; //每个点在动画过程中的纵坐标
var arr_pos = []; //存储每个点的坐标
this.makeScale = function(){
var ctx = this.ctx;
ctx.save();
ctx.translate(o_x, o_y);
//温度数字
ctx.beginPath();
ctx.font = '10px Arial';
ctx.fillStyle = self.color.gray;
ctx.textBaseline = 'middle';
ctx.textAlign = 'right';
ctx.fillText(o_temp, -10, 0);
ctx.closePath();
//温度横线
ctx.beginPath();
for( var i=1; i<7; i++){
ctx.fillText(o_temp + 5 * i , -10, -i * each_height);
ctx.moveTo(0, -i * each_height);
ctx.lineTo(scale_width, -i * each_height);
}
ctx.lineWidth = 1;
ctx.strokeStyle = self.color.l_gray;
ctx.stroke();
ctx.closePath();
ctx.restore();
};
this.drawTemp = function(y_height){
var ctx = this.ctx;
ctx.save();
ctx.translate(o_x, o_y);
for(var i=0; i<self.content.length; i++){
var temp_x = i * each_width;
var ny = self.content[i].values - o_temp;
var temp_y = -ny * 5 * y_height;
if( i != self.content.length - 1 ){
var nny = self.content[i+1].values - o_temp;
var temp_ny = -nny * 5 * y_height;
}
if( y_height >= 1 ){
arr_pos.push({x: temp_x, y: temp_y, values: self.content[i].values});
}
//温度区间块
ctx.beginPath();
ctx.moveTo( temp_x, 0);
ctx.lineTo( temp_x, temp_y);
ctx.lineTo( (i+1) * each_width, temp_ny);
ctx.lineTo( (i+1) * each_width, 0);
ctx.lineTo( temp_x, 0);
ctx.fillStyle = 'rgba(89, 103, 107, 0.05)';
ctx.fill();
ctx.closePath();
//竖线
ctx.beginPath();
ctx.moveTo(temp_x, 0);
ctx.lineTo(temp_x, temp_y);
ctx.strokeStyle = self.color.l_gray;
ctx.lineWidth = 1;
ctx.stroke();
ctx.closePath();
//点与点之间的连线(除了最后一个点);
if( i != self.content.length - 1 ){
ctx.beginPath();
ctx.moveTo(temp_x, temp_y);
ctx.lineTo( (i+1) * each_width, temp_ny);
ctx.strokeStyle = self.color.black;
ctx.lineWidth = 1;
ctx.stroke();
ctx.closePath();
}
//温度圆点的白色底
ctx.beginPath();
ctx.arc(temp_x, temp_y, point_radius-0.5, 0, 2*pi);
ctx.fillStyle = '#fff';
ctx.fill();
ctx.closePath();
//温度圆点
ctx.beginPath();
ctx.arc(temp_x, temp_y, point_radius-0.5, 0, 2*pi);
ctx.strokeStyle = self.color.black;
ctx.stroke();
ctx.closePath();
}
ctx.restore();
};
this.makeOy = function(){
var ctx = this.ctx;
ctx.save();
ctx.translate(o_x, o_y);
ctx.beginPath();
ctx.moveTo(0, 0);
ctx.lineTo(scale_width, 0);
ctx.strokeStyle = self.color.black;
ctx.stroke();
ctx.closePath();
ctx.beginPath();
for(var i=0; i<this.timeScale.length; i++){
ctx.font = '10px Arial';<|fim▁hole|> ctx.closePath();
ctx.beginPath();
for(var j=0; j<2 * this.timeScale.length + 1; j+=2){
ctx.arc(j * each_width, 0, point_radius, 0, 2*pi);
ctx.fillStyle = self.color.black;
}
ctx.fill();
ctx.closePath();
ctx.restore();
};
//鼠标悬浮
this.makeHover = function(pos){
var ctx = this.ctx;
ctx.save();
ctx.translate(o_x, o_y);
ctx.beginPath();
ctx.arc(pos.x, pos.y, point_radius+0.5, 0, 2*pi);
ctx.fillStyle = '#fff';
ctx.fill();
ctx.closePath();
ctx.beginPath();
ctx.arc(pos.x, pos.y, point_radius+0.5, 0, 2*pi);
ctx.strokeStyle = self.color.blue;
ctx.stroke();
ctx.closePath();
ctx.beginPath();
ctx.arc(pos.x, pos.y, 1.5, 0, 2*pi);
ctx.fillStyle = self.color.blue;
ctx.fill();
ctx.closePath();
var r = 2; //圆角半径
var r_width = 36; //正方体框宽度
var r_height = 16; //正方体框高度
var a_width = 7; //小箭头宽度
var a_height = 3; //小箭头高度
var radius = 10;
//温度数字框
ctx.beginPath();
var a_x = Math.floor(pos.x) - 0.5;
var a_y = Math.floor(pos.y) - 25.5;
ctx.moveTo(a_x, a_y);
ctx.arcTo(r_width/2 + a_x, a_y, r_width/2 + a_x, 1 - a_y, r);
ctx.arcTo(r_width/2 + a_x, r_height + a_y, r_width/2 + a_x - 1, r_height + a_y, r);
ctx.lineTo( a_width/2 + a_x, r_height + a_y);
ctx.lineTo( a_x, r_height + a_height + a_y);
ctx.lineTo( a_x - a_width/2, r_height + a_y);
ctx.arcTo(a_x - r_width/2, r_height + a_y, a_x - r_width/2, r_height + a_y - 1, r);
ctx.arcTo(a_x - r_width/2, a_y, a_x - r_width/2 + 1, a_y, r);
ctx.lineTo(a_x, a_y);
ctx.fillStyle = self.color.blue;
ctx.fill();
ctx.font = '12px Arial';
//ctx.font = '12px "Helvitica Neue" lighter';
//ctx.font = '12px "Helvitica Neue", Helvitica, Arial, "Microsoft YaHei", sans-serif lighter';
ctx.textAlign = 'center';
ctx.fillStyle = '#fff';
ctx.fillText(pos.values, a_x, Math.floor(pos.y) - 13);
ctx.closePath();
ctx.restore();
};
this.run = function(){
if( y_height < 100 ){
y_height += 2;
self.ctx.clearRect(0, 0, self.width, self.height);
self.makeScale();
self.drawTemp(y_height/100);
self.makeOy();
self.animation = requestAnimationFrame(self.run);
} else {
cancelAnimationFrame(this.animation);
}
};
this.animation = requestAnimationFrame(this.run);
this.canvas.on('mousemove', function(ev){
if( y_height >= 100 ){
var mouse = F.getMousePos(ev, $(this));
//相对于原点的坐标轴位置
var pos = { x: mouse.x - o_x, y: mouse.y - o_y };
var now_one = Math.ceil( (pos.x - each_width/2) / each_width);
if( pos.x > 0 && pos.y < 0 ){
self.ctx.clearRect(0, 0, self.width, self.height);
self.makeScale();
self.drawTemp(1);
self.makeOy();
self.makeHover(arr_pos[now_one]);
}
}
});
};
var drawWater = new batteryWater({
dom: $box,
timeScale: ['网络视频', '本地视频','电子书', '微博', '拍照', '游戏', '微信', '网页', '通话', '音乐'],
content: [
{name: '起始点亮', values: '29.20'},
{name: '网络视频1', values: '33.30'},
{name: '网络视频2', values: '33.60'},
{name: '本地视频1', values: '32.50'},
{name: '本地视频2', values: '31.80'},
{name: '电子书1', values: '33.30'},
{name: '电子书2', values: '32.50'},
{name: '微博1', values: '33.40'},
{name: '微博2', values: '33.70'},
{name: '拍照1', values: '37.30'},
{name: '拍照2', values: '38.30'},
{name: '游戏1', values: '38.50'},
{name: '游戏2', values: '38.00'},
{name: '微信1', values: '35.60'},
{name: '微信2', values: '40.00'},
{name: '网页1', values: '40.00'},
{name: '网页2', values: '33.20'},
{name: '通话1', values: '29.50'},
{name: '通话2', values: '29.60'},
{name: '音乐1', values: '37.00'},
{name: '音乐2', values: '37.00'},
],
color: {
blue: '#0096ff',
green: '#44be05',
yellow: '#ffc411',
red: '#f86117',
black: '#59676b',
gray: '#b3b3b3',
l_gray: '#e2e5e7'
}
});<|fim▁end|>
|
ctx.textAlign = 'center';
ctx.fillStyle = self.color.black;
ctx.fillText(this.timeScale[i], (2 * i + 1)* each_width, 20);
}
|
<|file_name|>struct_failed_image.go<|end_file_name|><|fim▁begin|>package mts<|fim▁hole|>
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// FailedImage is a nested struct in mts response
type FailedImage struct {
Code string `json:"Code" xml:"Code"`
Success string `json:"Success" xml:"Success"`
ImageFile ImageFile `json:"ImageFile" xml:"ImageFile"`
}<|fim▁end|>
| |
<|file_name|>rules.py<|end_file_name|><|fim▁begin|># Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import Iterable, Optional, Tuple
from pants.backend.python.target_types import PythonRequirementsField, PythonSources
from pants.backend.python.typecheck.mypy.skip_field import SkipMyPyField
from pants.backend.python.typecheck.mypy.subsystem import MyPy
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
PexRequirements,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.typecheck import TypecheckRequest, TypecheckResult, TypecheckResults
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.fs import CreateDigest, Digest, DigestContents, FileContent, MergeDigests
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target, TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.docutil import bracketed_docs_url
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class MyPyFieldSet(FieldSet):
required_fields = (PythonSources,)
sources: PythonSources
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipMyPyField).value
@dataclass(frozen=True)
class MyPyPartition:
root_targets: FrozenOrderedSet[Target]
closure: FrozenOrderedSet[Target]
interpreter_constraints: InterpreterConstraints
python_version_already_configured: bool
class MyPyRequest(TypecheckRequest):
field_set_type = MyPyFieldSet
def generate_argv(
mypy: MyPy,
typechecked_venv_pex: VenvPex,
*,
file_list_path: str,
python_version: Optional[str],
) -> Tuple[str, ...]:
args = [f"--python-executable={typechecked_venv_pex.python.argv0}", *mypy.args]
if mypy.config:
args.append(f"--config-file={mypy.config}")
if python_version:
args.append(f"--python-version={python_version}")
args.append(f"@{file_list_path}")
return tuple(args)
def check_and_warn_if_python_version_configured(
*, config: Optional[FileContent], args: Tuple[str, ...]
) -> bool:
configured = []
if config and b"python_version" in config.content:
configured.append(
f"`python_version` in {config.path} (which is used because of the "
"`[mypy].config` option)"
)
if "--py2" in args:
configured.append("`--py2` in the `--mypy-args` option")
if any(arg.startswith("--python-version") for arg in args):
configured.append("`--python-version` in the `--mypy-args` option")
if configured:
formatted_configured = " and you set ".join(configured)
logger.warning(
f"You set {formatted_configured}. Normally, Pants would automatically set this for you "
"based on your code's interpreter constraints "
f"({bracketed_docs_url('python-interpreter-compatibility')}). Instead, it will "
"use what you set.\n\n(Automatically setting the option allows Pants to partition your "
"targets by their constraints, so that, for example, you can run MyPy on Python 2-only "
"code and Python 3-only code at the same time. This feature may no longer work.)"
)
return bool(configured)
def determine_python_files(files: Iterable[str]) -> Tuple[str, ...]:
"""We run over all .py and .pyi files, but .pyi files take precedence.
MyPy will error if we say to run over the same module with both its .py and .pyi files, so we
must be careful to only use the .pyi stub.
"""
result: OrderedSet[str] = OrderedSet()
for f in files:<|fim▁hole|> py_file = f[:-1] # That is, strip the `.pyi` suffix to be `.py`.
result.discard(py_file)
result.add(f)
elif f.endswith(".py"):
pyi_file = f + "i"
if pyi_file not in result:
result.add(f)
return tuple(result)
@rule
async def mypy_typecheck_partition(partition: MyPyPartition, mypy: MyPy) -> TypecheckResult:
plugin_target_addresses = await Get(Addresses, UnparsedAddressInputs, mypy.source_plugins)
plugin_transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest(plugin_target_addresses)
)
plugin_requirements = PexRequirements.create_from_requirement_fields(
plugin_tgt[PythonRequirementsField]
for plugin_tgt in plugin_transitive_targets.closure
if plugin_tgt.has_field(PythonRequirementsField)
)
# If the user did not set `--python-version` already, we set it ourselves based on their code's
# interpreter constraints. This determines what AST is used by MyPy.
python_version = (
None
if partition.python_version_already_configured
else partition.interpreter_constraints.minimum_python_version()
)
# MyPy requires 3.5+ to run, but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6,
# and 3.7. However, typed-ast does not understand 3.8+, so instead we must run MyPy with
# Python 3.8+ when relevant. We only do this if <3.8 can't be used, as we don't want a
# loose requirement like `>=3.6` to result in requiring Python 3.8+, which would error if
# 3.8+ is not installed on the machine.
tool_interpreter_constraints = (
partition.interpreter_constraints
if (
mypy.options.is_default("interpreter_constraints")
and partition.interpreter_constraints.requires_python38_or_newer()
)
else InterpreterConstraints(mypy.interpreter_constraints)
)
plugin_sources_get = Get(
PythonSourceFiles, PythonSourceFilesRequest(plugin_transitive_targets.closure)
)
closure_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
roots_sources_get = Get(
SourceFiles, SourceFilesRequest(tgt.get(PythonSources) for tgt in partition.root_targets)
)
requirements_pex_get = Get(
Pex,
PexFromTargetsRequest,
PexFromTargetsRequest.for_requirements(
(tgt.address for tgt in partition.root_targets),
hardcoded_interpreter_constraints=partition.interpreter_constraints,
internal_only=True,
),
)
# TODO(John Sirois): Scope the extra requirements to the partition.
# Right now we just use a global set of extra requirements and these might not be compatible
# with all partitions. See: https://github.com/pantsbuild/pants/issues/11556
mypy_extra_requirements_pex_get = Get(
Pex,
PexRequest(
output_filename="mypy_extra_requirements.pex",
internal_only=True,
requirements=PexRequirements(mypy.extra_requirements),
interpreter_constraints=partition.interpreter_constraints,
),
)
mypy_pex_get = Get(
VenvPex,
PexRequest(
output_filename="mypy.pex",
internal_only=True,
main=mypy.main,
requirements=PexRequirements((*mypy.all_requirements, *plugin_requirements)),
interpreter_constraints=tool_interpreter_constraints,
),
)
config_files_get = Get(ConfigFiles, ConfigFilesRequest, mypy.config_request)
(
plugin_sources,
closure_sources,
roots_sources,
mypy_pex,
requirements_pex,
mypy_extra_requirements_pex,
config_files,
) = await MultiGet(
plugin_sources_get,
closure_sources_get,
roots_sources_get,
mypy_pex_get,
requirements_pex_get,
mypy_extra_requirements_pex_get,
config_files_get,
)
python_files = determine_python_files(roots_sources.snapshot.files)
file_list_path = "__files.txt"
file_list_digest_request = Get(
Digest,
CreateDigest([FileContent(file_list_path, "\n".join(python_files).encode())]),
)
typechecked_venv_pex_request = Get(
VenvPex,
PexRequest(
output_filename="typechecked_venv.pex",
internal_only=True,
pex_path=[requirements_pex, mypy_extra_requirements_pex],
interpreter_constraints=partition.interpreter_constraints,
),
)
typechecked_venv_pex, file_list_digest = await MultiGet(
typechecked_venv_pex_request, file_list_digest_request
)
merged_input_files = await Get(
Digest,
MergeDigests(
[
file_list_digest,
plugin_sources.source_files.snapshot.digest,
closure_sources.source_files.snapshot.digest,
typechecked_venv_pex.digest,
config_files.snapshot.digest,
]
),
)
all_used_source_roots = sorted(
set(itertools.chain(plugin_sources.source_roots, closure_sources.source_roots))
)
env = {
"PEX_EXTRA_SYS_PATH": ":".join(all_used_source_roots),
"MYPYPATH": ":".join(all_used_source_roots),
}
result = await Get(
FallibleProcessResult,
VenvPexProcess(
mypy_pex,
argv=generate_argv(
mypy,
typechecked_venv_pex,
file_list_path=file_list_path,
python_version=python_version,
),
input_digest=merged_input_files,
extra_env=env,
description=f"Run MyPy on {pluralize(len(python_files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return TypecheckResult.from_fallible_process_result(
result, partition_description=str(sorted(str(c) for c in partition.interpreter_constraints))
)
# TODO(#10864): Improve performance, e.g. by leveraging the MyPy cache.
@rule(desc="Typecheck using MyPy", level=LogLevel.DEBUG)
async def mypy_typecheck(
request: MyPyRequest, mypy: MyPy, python_setup: PythonSetup
) -> TypecheckResults:
if mypy.skip:
return TypecheckResults([], typechecker_name="MyPy")
# We batch targets by their interpreter constraints to ensure, for example, that all Python 2
# targets run together and all Python 3 targets run together. We can only do this by setting
# the `--python-version` option, but we allow the user to set it as a safety valve. We warn if
# they've set the option.
config_files = await Get(ConfigFiles, ConfigFilesRequest, mypy.config_request)
config_content = await Get(DigestContents, Digest, config_files.snapshot.digest)
python_version_configured = check_and_warn_if_python_version_configured(
config=next(iter(config_content), None), args=mypy.args
)
# When determining how to batch by interpreter constraints, we must consider the entire
# transitive closure to get the final resulting constraints.
# TODO(#10863): Improve the performance of this.
transitive_targets_per_field_set = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address]))
for field_set in request.field_sets
)
interpreter_constraints_to_transitive_targets = defaultdict(set)
for transitive_targets in transitive_targets_per_field_set:
interpreter_constraints = InterpreterConstraints.create_from_targets(
transitive_targets.closure, python_setup
) or InterpreterConstraints(mypy.interpreter_constraints)
interpreter_constraints_to_transitive_targets[interpreter_constraints].add(
transitive_targets
)
partitions = []
for interpreter_constraints, all_transitive_targets in sorted(
interpreter_constraints_to_transitive_targets.items()
):
combined_roots: OrderedSet[Target] = OrderedSet()
combined_closure: OrderedSet[Target] = OrderedSet()
for transitive_targets in all_transitive_targets:
combined_roots.update(transitive_targets.roots)
combined_closure.update(transitive_targets.closure)
partitions.append(
MyPyPartition(
FrozenOrderedSet(combined_roots),
FrozenOrderedSet(combined_closure),
interpreter_constraints,
python_version_already_configured=python_version_configured,
)
)
partitioned_results = await MultiGet(
Get(TypecheckResult, MyPyPartition, partition) for partition in partitions
)
return TypecheckResults(partitioned_results, typechecker_name="MyPy")
def rules():
return [
*collect_rules(),
UnionRule(TypecheckRequest, MyPyRequest),
*pex_from_targets.rules(),
]<|fim▁end|>
|
if f.endswith(".pyi"):
|
<|file_name|>run-tests-spec.ts<|end_file_name|><|fim▁begin|>import Omni = require('../../lib/omni-sharp-server/omni');
import {Observable, CompositeDisposable} from "rx";
import {setupFeature, restoreBuffers, openEditor} from "../test-helpers";
describe('Run Tests', () => {
setupFeature(['features/run-tests']);
it('adds commands', () => {
var disposable = new CompositeDisposable();
runs(() => {
var commands: any = atom.commands;
expect(commands.registeredCommands['omnisharp-atom:run-all-tests']).toBeTruthy();
expect(commands.registeredCommands['omnisharp-atom:run-fixture-tests']).toBeTruthy();
expect(commands.registeredCommands['omnisharp-atom:run-single-test']).toBeTruthy();
expect(commands.registeredCommands['omnisharp-atom:run-last-test']).toBeTruthy();<|fim▁hole|> disposable.dispose();
});
});
// TODO: Test functionality
});<|fim▁end|>
| |
<|file_name|>handlers.rs<|end_file_name|><|fim▁begin|>use x11::xlib;
use window_system::WindowSystem;
use libc::{c_ulong};
pub struct KeyPressedHandler;
pub struct MapRequestHandler;
fn create_some_window(window_system: &WindowSystem, width: u32, height: u32, x: i32, y: i32) -> c_ulong {
let border_width = 2;
unsafe {
let border = xlib::XWhitePixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let background = xlib::XBlackPixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let window = xlib::XCreateSimpleWindow(window_system.display,
window_system.root,
x,
y,
width,
height,
border_width,
border,background);
xlib::XSelectInput(window_system.display,
window,
xlib::SubstructureNotifyMask | xlib::SubstructureRedirectMask);
return window;
}
}
impl MapRequestHandler {
pub fn new() -> MapRequestHandler {
return MapRequestHandler;
}
pub fn handle(&self, event: xlib::XEvent, window_system: &WindowSystem) {
let event = xlib::XMapRequestEvent::from(event);
let height: u32;
let width: u32;
let mut x: i32 = 0;
let y: i32 = 0;
if window_system.count.get() == 0 {
width = window_system.info.width as u32;
height = window_system.info.height as u32;
}
else {
width = (window_system.info.width / 2) as u32;
height = window_system.info.height as u32;
x = width as i32;
}
// create frame as a new parent for the window to be mapped<|fim▁hole|> unsafe {
// resize window to fit parent
xlib::XResizeWindow(window_system.display, event.window, width as u32, height as u32);
// make frame window parent of window to be mapped
xlib::XReparentWindow(window_system.display, event.window, frame, 0, 0);
// show frame
xlib::XMapWindow(window_system.display, frame);
// show window inside frame
xlib::XMapWindow(window_system.display, event.window);
}
window_system.count.set(window_system.count.get() + 1);
}
}
impl KeyPressedHandler {
pub fn new() -> KeyPressedHandler {
return KeyPressedHandler;
}
pub fn handle(&self, event: xlib::XEvent) {
let event = xlib::XKeyPressedEvent::from(event);
println!("KeyPressed {}", event.keycode);
}
}<|fim▁end|>
|
let frame = create_some_window(window_system, width, height, x, y);
|
<|file_name|>infoblox.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# (c) 2018, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import json
import argparse
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_text
from ansible.module_utils.net_tools.nios.api import get_connector
from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
try:
# disable urllib3 warnings so as to not interfere with printing to stdout
# which is read by ansible
import urllib3
urllib3.disable_warnings()
except ImportError:
sys.stdout.write('missing required library: urllib3\n')
sys.exit(-1)
CONFIG_FILES = [
'/etc/ansible/infoblox.yaml',
'/etc/ansible/infoblox.yml'<|fim▁hole|> parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true',
help='List host records from NIOS for use in Ansible')
parser.add_argument('--host',
help='List meta data about single host (not used)')
return parser.parse_args()
def main():
args = parse_args()
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
break
else:
sys.stdout.write('unable to locate config file at /etc/ansible/infoblox.yaml\n')
sys.exit(-1)
try:
loader = DataLoader()
config = loader.load_from_file(config_file)
provider = config.get('provider') or {}
connector = get_connector(**provider)
except Exception as exc:
sys.stdout.write(to_text(exc))
sys.exit(-1)
if args.host:
host_filter = {'name': args.host}
else:
host_filter = {}
config_filters = config.get('filters')
if config_filters.get('view') is not None:
host_filter['view'] = config_filters['view']
if config_filters.get('extattrs'):
extattrs = normalize_extattrs(config_filters['extattrs'])
else:
extattrs = {}
hostvars = {}
inventory = {
'_meta': {
'hostvars': hostvars
}
}
return_fields = ['name', 'view', 'extattrs', 'ipv4addrs']
hosts = connector.get_object('record:host',
host_filter,
extattrs=extattrs,
return_fields=return_fields)
if hosts:
for item in hosts:
view = item['view']
name = item['name']
if view not in inventory:
inventory[view] = {'hosts': []}
inventory[view]['hosts'].append(name)
hostvars[name] = {
'view': view
}
if item.get('extattrs'):
for key, value in iteritems(flatten_extattrs(item['extattrs'])):
if key.startswith('ansible_'):
hostvars[name][key] = value
else:
if 'extattrs' not in hostvars:
hostvars[name]['extattrs'] = {}
hostvars[name]['extattrs'][key] = value
sys.stdout.write(json.dumps(inventory, indent=4))
sys.exit(0)
if __name__ == '__main__':
main()<|fim▁end|>
|
]
def parse_args():
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod config;
mod node;
mod step;
mod token;
<|fim▁hole|>pub use self::config::dump_config;
pub use self::node::dump_bare_node;
pub use self::node::dump_node;
pub use self::step::dump_step;
pub use self::token::dump_token;<|fim▁end|>
| |
<|file_name|>unlit_vertex.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>#[derive(Copy, Clone)]
pub struct UnlitVertex {
pub position: [f32; 3],
pub color: [f32; 3],
}
implement_vertex!(UnlitVertex, position, color);<|fim▁end|>
| |
<|file_name|>notetranslator_tests.py<|end_file_name|><|fim▁begin|>from nose.tools import *
from DeckMaker.notetranslator import NoteTranslator
def setup():
print "SETUP!"
def teardown():
print "TEAR DOWN!"
def test_basic():
t = NoteTranslator()
assert_equal(t.GetMidiCodeForHumans("E5"),64)
assert_equal(t.GetMidiCodeForHumans("C1"),12)
assert_equal(t.GetMidiCodeForHumans("Ab6"),80)
assert_equal(t.GetMidiCodeForHumans("Gb7"),90)
assert_equal(t.GetMidiCodeForHumans("D#2"),27)
pass
def test_hex():
t = NoteTranslator()
<|fim▁hole|> assert_equal(t.GetHexString(t.GetMidiCodeForHumans("Gb7")),"5a")
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("D#2")),"1b")
pass
def test_GetTriadCodes():
t = NoteTranslator()
assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("C4"), "minor", 3),[48, 53, 56])
assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("Ab2"), "major", 2),[32, 40, 35])
assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("G#6"), "minor", 1),[80, 83, 87])
def test_GetTriadHexCodeStrings():
t = NoteTranslator()
assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("C4"), "major", 1),['30', '34', '37'])
assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("Ab2"), "major", 2),['20', '28', '23'])
assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("G#6"), "minor", 1),['50', '53', '57'])<|fim▁end|>
|
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("E5")),"40")
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("C1")),"c")
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("Ab6")),"50")
|
<|file_name|>zmqabstractnotifier.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2015-2017 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <zmq/zmqabstractnotifier.h>
#include <util.h>
CZMQAbstractNotifier::~CZMQAbstractNotifier()
{
assert(!psocket);
}
bool CZMQAbstractNotifier::NotifyBlock(const CBlockIndex * /*CBlockIndex*/)
{
return true;
}
bool CZMQAbstractNotifier::NotifyTransaction(const CTransaction &/*transaction*/)
{
return true;
}
bool CZMQAbstractNotifier::NotifyTransactionLock(const CTransactionRef &/*transaction*/)
{<|fim▁hole|> return true;
}<|fim▁end|>
| |
<|file_name|>set_scheduling.py<|end_file_name|><|fim▁begin|># Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting scheduling for virtual machine instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
class SetSchedulingInstances(base_classes.NoOutputAsyncMutator):
"""Set scheduling options for Google Compute Engine virtual machine instances.
"""
@staticmethod
def Args(parser):
restart_on_failure = parser.add_argument(
'--restart-on-failure',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Restart instances if they are terminated by Compute Engine.')
restart_on_failure.detailed_help = """\
The instances will be restarted if they are terminated by Compute '
Engine. This does not affect terminations performed by the user.'
"""
instance_flags.AddMaintenancePolicyArgs(parser)
instance_flags.INSTANCE_ARG.AddArgument(parser)
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'SetScheduling'
@property
def resource_type(self):
return 'instances'<|fim▁hole|>
def CreateRequests(self, args):
"""Returns a list of request necessary for setting scheduling options."""
instance_ref = instance_flags.INSTANCE_ARG.ResolveAsResource(
args, self.resources, scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project))
scheduling_options = self.messages.Scheduling()
scheduling_options.automaticRestart = args.restart_on_failure
if args.maintenance_policy:
scheduling_options.onHostMaintenance = (
self.messages.Scheduling.OnHostMaintenanceValueValuesEnum(
args.maintenance_policy))
request = self.messages.ComputeInstancesSetSchedulingRequest(
instance=instance_ref.Name(),
project=self.project,
scheduling=scheduling_options,
zone=instance_ref.zone)
return [request]
SetSchedulingInstances.detailed_help = {
'brief': ('Set scheduling options for Google Compute Engine virtual '
'machines'),
'DESCRIPTION': """\
*${command}* is used to configure scheduling options for Google Compute
Engine virtual machines.
""",
}<|fim▁end|>
| |
<|file_name|>KafkaKey.java<|end_file_name|><|fim▁begin|>package org.apache.hadoop.hive.kafka.camus;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.UTF8;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Map;
/**
* The key for the mapreduce job to pull kafka. Contains offsets and the
* checksum.
*/
public class KafkaKey implements WritableComparable<KafkaKey>, IKafkaKey {
public static final Text SERVER = new Text("server");
public static final Text SERVICE = new Text("service");
public static KafkaKey DUMMY_KEY = new KafkaKey();
private String leaderId = "";
private int partition = 0;
private long beginOffset = 0;
private long offset = 0;
private long checksum = 0;
private String topic = "";
private long time = 0;
private String server = "";
private String service = "";
private MapWritable partitionMap = new MapWritable();
/**
* dummy empty constructor
*/
public KafkaKey() {
this("dummy", "0", 0, 0, 0, 0);
}
public KafkaKey(KafkaKey other) {
this.partition = other.partition;
this.beginOffset = other.beginOffset;
this.offset = other.offset;
this.checksum = other.checksum;
this.topic = other.topic;
this.time = other.time;
this.server = other.server;
this.service = other.service;
this.partitionMap = new MapWritable(other.partitionMap);
}
public KafkaKey(String topic, String leaderId, int partition) {
this.set(topic, leaderId, partition, 0, 0, 0);
}
public KafkaKey(String topic, String leaderId, int partition, long beginOffset, long offset) {
this.set(topic, leaderId, partition, beginOffset, offset, 0);
}
public KafkaKey(String topic, String leaderId, int partition, long beginOffset, long offset, long checksum) {
this.set(topic, leaderId, partition, beginOffset, offset, checksum);
}
public void set(String topic, String leaderId, int partition, long beginOffset, long offset, long checksum) {
this.leaderId = leaderId;
this.partition = partition;
this.beginOffset = beginOffset;
this.offset = offset;
this.checksum = checksum;
this.topic = topic;
this.time = System.currentTimeMillis(); // if event can't be decoded,
// this time will be used for
// debugging.
}
public void clear() {
leaderId = "";
partition = 0;
beginOffset = 0;
offset = 0;
checksum = 0;
topic = "";
time = 0;
server = "";
service = "";
partitionMap = new MapWritable();
}
public String getServer() {
return partitionMap.get(SERVER).toString();
}
public void setServer(String newServer) {
partitionMap.put(SERVER, new Text(newServer));
}
public String getService() {
return partitionMap.get(SERVICE).toString();
}
public void setService(String newService) {
partitionMap.put(SERVICE, new Text(newService));
}
public long getTime() {
return time;
}
public void setTime(long time) {
this.time = time;
}
public String getTopic() {
return topic;
}
public String getLeaderId() {
return leaderId;
}
public int getPartition() {
return this.partition;
}
public long getBeginOffset() {
return this.beginOffset;
}
public void setOffset(long offset) {
this.offset = offset;
}
public long getOffset() {
return this.offset;
}
public long getChecksum() {
return this.checksum;
}
@Override
public long getMessageSize() {
Text key = new Text("message.size");
if (this.partitionMap.containsKey(key))
return ((LongWritable) this.partitionMap.get(key)).get();
else
return 1024; //default estimated size
}
public void setMessageSize(long messageSize) {
Text key = new Text("message.size");
put(key, new LongWritable(messageSize));
}
public void put(Writable key, Writable value) {
this.partitionMap.put(key, value);
}
public void addAllPartitionMap(MapWritable partitionMap) {
this.partitionMap.putAll(partitionMap);
}
public MapWritable getPartitionMap() {
return partitionMap;
}
@Override
public void readFields(DataInput in) throws IOException {
this.leaderId = UTF8.readString(in);
this.partition = in.readInt();
this.beginOffset = in.readLong();
this.offset = in.readLong();
this.checksum = in.readLong();
this.topic = in.readUTF();
this.time = in.readLong();
this.server = in.readUTF(); // left for legacy
this.service = in.readUTF(); // left for legacy
this.partitionMap = new MapWritable();
try {
this.partitionMap.readFields(in);
} catch (IOException e) {
this.setServer(this.server);
this.setService(this.service);
}
}
@Override
public void write(DataOutput out) throws IOException {
UTF8.writeString(out, this.leaderId);
out.writeInt(this.partition);
out.writeLong(this.beginOffset);
out.writeLong(this.offset);
out.writeLong(this.checksum);
out.writeUTF(this.topic);<|fim▁hole|> this.partitionMap.write(out);
}
@Override
public int compareTo(KafkaKey o) {
if (partition != o.partition) {
return partition = o.partition;
} else {
if (offset > o.offset) {
return 1;
} else if (offset < o.offset) {
return -1;
} else {
if (checksum > o.checksum) {
return 1;
} else if (checksum < o.checksum) {
return -1;
} else {
return 0;
}
}
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("topic=");
builder.append(topic);
builder.append(" partition=");
builder.append(partition);
builder.append("leaderId=");
builder.append(leaderId);
builder.append(" server=");
builder.append(server);
builder.append(" service=");
builder.append(service);
builder.append(" beginOffset=");
builder.append(beginOffset);
builder.append(" offset=");
builder.append(offset);
builder.append(" msgSize=");
builder.append(getMessageSize());
builder.append(" server=");
builder.append(server);
builder.append(" checksum=");
builder.append(checksum);
builder.append(" time=");
builder.append(time);
for (Map.Entry<Writable, Writable> e : partitionMap.entrySet()) {
builder.append(" " + e.getKey() + "=");
builder.append(e.getValue().toString());
}
return builder.toString();
}
}<|fim▁end|>
|
out.writeLong(this.time);
out.writeUTF(this.server); // left for legacy
out.writeUTF(this.service); // left for legacy
|
<|file_name|>roman-numerals-macro.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:roman_numerals.rs
// ignore-stage1
#![feature(phase)]
#[phase(plugin)]
extern crate roman_numerals;<|fim▁hole|>
pub fn main() {
assert_eq!(rn!(MMXV), 2015);
assert_eq!(rn!(MCMXCIX), 1999);
assert_eq!(rn!(XXV), 25);
assert_eq!(rn!(MDCLXVI), 1666);
assert_eq!(rn!(MMMDCCCLXXXVIII), 3888);
assert_eq!(rn!(MMXIV), 2014);
}<|fim▁end|>
| |
<|file_name|>he_dlg.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
MODX Evolution 1.1 = 6c5e02783a79b572e09102b05854077e
|
<|file_name|>CommentAdapter.java<|end_file_name|><|fim▁begin|>package gitmad.bitter.ui;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.TextView;
import gitmad.bitter.R;
import gitmad.bitter.model.Comment;
import gitmad.bitter.model.User;
import java.util.Map;
/**
* Created by prabh on 10/19/2015.
*/
public class CommentAdapter extends ArrayAdapter<Comment> {<|fim▁hole|> private Map<Comment, User> commentAuthors;
public CommentAdapter(Context context, Comment[] comments, Map<Comment,
User> commentAuthors) {
super(context, 0, comments); // TODO is the zero here correct?
this.comments = comments;
this.commentAuthors = commentAuthors;
}
public View getView(int position, View convertView, ViewGroup parent) {
Comment comment = comments[position];
if (convertView == null) {
convertView = LayoutInflater.from(getContext()).inflate(R.layout
.view_comment, parent, false);
}
TextView userText = (TextView) convertView.findViewById(R.id.user_text);
TextView commentText = (TextView) convertView.findViewById(R.id
.comment_text);
userText.setText(commentAuthors.get(comment).getName());
commentText.setText(comment.getText());
return convertView;
}
}<|fim▁end|>
|
private Comment[] comments;
|
<|file_name|>TestTimeCollectionTest.java<|end_file_name|><|fim▁begin|>package ru.job4j;
<|fim▁hole|>import org.junit.Test;
import java.util.*;
/**
* Класс для тестирования.
* @author agavrikov
* @since 13.07.2017
* @version 1
*/
public class TestTimeCollectionTest {
/**
* Тестирование метода добавления.
*/
@Test
public void add() {
TestTimeCollection methods = new TestTimeCollection();
List<String> linkedList = new LinkedList<String>();
long timeStart = new Date().getTime();
long timeEnd = methods.add(linkedList, 1000000);
System.out.println(timeEnd - timeStart);
List<String> arrayList = new ArrayList<String>();
timeStart = new Date().getTime();
timeEnd = methods.add(arrayList, 1000000);
System.out.println(timeEnd - timeStart);
Set<String> treeSet = new TreeSet<String>();
timeStart = new Date().getTime();
timeEnd = methods.add(treeSet, 1000000);
System.out.println(timeEnd - timeStart);
}
/**
* Тестирование метода удаления.
*/
@Test
public void delete() {
TestTimeCollection methods = new TestTimeCollection();
List<String> linkedList = new LinkedList<String>();
methods.add(linkedList, 100000);
long timeStart = new Date().getTime();
long timeEnd = methods.delete(linkedList, 10000);
System.out.println(timeEnd - timeStart);
List<String> arrayList = new ArrayList<String>();
methods.add(arrayList, 100000);
timeStart = new Date().getTime();
timeEnd = methods.delete(arrayList, 10000);
System.out.println(timeEnd - timeStart);
Set<String> treeSet = new TreeSet<String>();
methods.add(treeSet, 100000);
timeStart = new Date().getTime();
timeEnd = methods.delete(treeSet, 10000);
System.out.println(timeEnd - timeStart);
}
}<|fim▁end|>
| |
<|file_name|>p2p_disconnect_ban.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
class DisconnectBanTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point<|fim▁hole|> assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, node_id=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(node_id=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()<|fim▁end|>
|
self.nodes[1].setban(subnet="127.0.0.1", command="add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
from . import submission
<|fim▁hole|>from .submission.submit import SubmitTarget
from .submission.submit import PathType
from .submission.submit import SubmitConfig
from .submission.submit import submit_run
from .submission.submit import get_path_from_template
from .submission.submit import convert_path
from .submission.submit import make_run_dir_path
from .util import EasyDict
submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.<|fim▁end|>
|
from .submission.run_context import RunContext
|
<|file_name|>client.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright © 2017 papamitra
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*<|fim▁hole|> * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "client.h"
#include "resource.h"
namespace karuta {
std::unique_ptr<Resource> Client::resource_create(
const struct wl_interface* interface, uint32_t version, uint32_t id) {
struct wl_resource* resource =
wl_resource_create(client_, interface, version, id);
if (!resource) {
wl_client_post_no_memory(client_);
return std::unique_ptr<Resource>();
}
return std::unique_ptr<Resource>(new Resource{resource});
}
} // karuta<|fim▁end|>
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
<|file_name|>channel_1000_uvplot.py<|end_file_name|><|fim▁begin|>import os
import matplotlib.pyplot as plt
import numpy as np
from plotting_styles import onecolumn_figure, default_figure
from paths import paper1_figures_path
'''
Make a UV plot of the 1000th HI channel.
'''
uvw = np.load("/mnt/MyRAID/M33/VLA/14B-088/HI/"
"14B-088_HI_LSRK.ms.contsub_channel_1000.uvw.npy")
onecolumn_figure()
fig = plt.figure()
ax = fig.add_subplot(111) # , rasterized=True)
# plt.hexbin(uvw[0], uvw[1], bins='log', cmap='afmhot_r')
ax.scatter(uvw[0], uvw[1], s=0.1, color='k', rasterized=True)
plt.xlabel("U (m)")<|fim▁hole|>plt.ylim([-3200, 3200])
plt.grid()
plt.tight_layout()
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.pdf"))
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.png"))
plt.close()
default_figure()<|fim▁end|>
|
plt.ylabel("V (m)")
plt.xlim([-3200, 3500])
|
<|file_name|>request_forwarding_service.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go.
// source: request_forwarding_service.proto
// DO NOT EDIT!
/*
Package vault is a generated protocol buffer package.
It is generated from these files:
request_forwarding_service.proto
It has these top-level messages:
*/
package vault
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import forwarding "github.com/hashicorp/vault/helper/forwarding"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for RequestForwarding service
type RequestForwardingClient interface {
ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error)<|fim▁hole|>type requestForwardingClient struct {
cc *grpc.ClientConn
}
func NewRequestForwardingClient(cc *grpc.ClientConn) RequestForwardingClient {
return &requestForwardingClient{cc}
}
func (c *requestForwardingClient) ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error) {
out := new(forwarding.Response)
err := grpc.Invoke(ctx, "/vault.RequestForwarding/ForwardRequest", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for RequestForwarding service
type RequestForwardingServer interface {
ForwardRequest(context.Context, *forwarding.Request) (*forwarding.Response, error)
}
func RegisterRequestForwardingServer(s *grpc.Server, srv RequestForwardingServer) {
s.RegisterService(&_RequestForwarding_serviceDesc, srv)
}
func _RequestForwarding_ForwardRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(forwarding.Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RequestForwardingServer).ForwardRequest(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/vault.RequestForwarding/ForwardRequest",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RequestForwardingServer).ForwardRequest(ctx, req.(*forwarding.Request))
}
return interceptor(ctx, in, info, handler)
}
var _RequestForwarding_serviceDesc = grpc.ServiceDesc{
ServiceName: "vault.RequestForwarding",
HandlerType: (*RequestForwardingServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ForwardRequest",
Handler: _RequestForwarding_ForwardRequest_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "request_forwarding_service.proto",
}
func init() { proto.RegisterFile("request_forwarding_service.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 151 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x28, 0x4a, 0x2d, 0x2c,
0x4d, 0x2d, 0x2e, 0x89, 0x4f, 0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xc9, 0xcc, 0x4b, 0x8f, 0x2f,
0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4b,
0x2c, 0xcd, 0x29, 0x91, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5,
0xcf, 0x48, 0x2c, 0xce, 0xc8, 0x4c, 0xce, 0x2f, 0x2a, 0xd0, 0x07, 0xcb, 0xe9, 0x67, 0xa4, 0xe6,
0x14, 0xa4, 0x16, 0xe9, 0x23, 0x8c, 0xd0, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0x18, 0x60, 0x14,
0xc4, 0x25, 0x18, 0x04, 0xb1, 0xc4, 0x0d, 0xae, 0x40, 0xc8, 0x96, 0x8b, 0x0f, 0xca, 0x83, 0xca,
0x09, 0x09, 0xeb, 0x21, 0xf4, 0xeb, 0x41, 0x05, 0xa5, 0x44, 0x50, 0x05, 0x8b, 0x0b, 0xf2, 0xf3,
0x8a, 0x53, 0x95, 0x18, 0x92, 0xd8, 0xc0, 0x46, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x81,
0xce, 0x3f, 0x7f, 0xbf, 0x00, 0x00, 0x00,
}<|fim▁end|>
|
}
|
<|file_name|>_calculate_exchange_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CalculateExchangeOperations:
"""CalculateExchangeOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.reservations.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _post_initial(
self,
body: "_models.CalculateExchangeRequest",
**kwargs: Any
) -> Optional["_models.CalculateExchangeOperationResultResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.CalculateExchangeOperationResultResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._post_initial.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'CalculateExchangeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response)
if response.status_code == 202:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_post_initial.metadata = {'url': '/providers/Microsoft.Capacity/calculateExchange'} # type: ignore
async def begin_post(
self,
body: "_models.CalculateExchangeRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.CalculateExchangeOperationResultResponse"]:
"""Calculates the refund amounts and price of the new purchases.
Calculates price for exchanging ``Reservations`` if there are no policy errors.
:param body: Request containing purchases and refunds that need to be executed.
:type body: ~azure.mgmt.reservations.models.CalculateExchangeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CalculateExchangeOperationResultResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.reservations.models.CalculateExchangeOperationResultResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CalculateExchangeOperationResultResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:<|fim▁hole|> )
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_post.metadata = {'url': '/providers/Microsoft.Capacity/calculateExchange'} # type: ignore<|fim▁end|>
|
raw_result = await self._post_initial(
body=body,
cls=lambda x,y,z: x,
**kwargs
|
<|file_name|>GPUComputationRenderer.js<|end_file_name|><|fim▁begin|>/**
* @author yomboprime https://github.com/yomboprime
*
* GPUComputationRenderer, based on SimulationRenderer by zz85
*
* The GPUComputationRenderer uses the concept of variables. These variables are RGBA float textures that hold 4 floats
* for each compute element (texel)
*
* Each variable has a fragment shader that defines the computation made to obtain the variable in question.
* You can use as many variables you need, and make dependencies so you can use textures of other variables in the shader
* (the sampler uniforms are added automatically) Most of the variables will need themselves as dependency.
*
* The renderer has actually two render targets per variable, to make ping-pong. Textures from the current frame are used
* as inputs to render the textures of the next frame.
*
* The render targets of the variables can be used as input textures for your visualization shaders.
*
* Variable names should be valid identifiers and should not collide with THREE GLSL used identifiers.
* a common approach could be to use 'texture' prefixing the variable name; i.e texturePosition, textureVelocity...
*
* The size of the computation (sizeX * sizeY) is defined as 'resolution' automatically in the shader. For example:
* #DEFINE resolution vec2( 1024.0, 1024.0 )
*
* -------------
*
* Basic use:
*
* // Initialization...
*
* // Create computation renderer
* var gpuCompute = new GPUComputationRenderer( 1024, 1024, renderer );
*
* // Create initial state float textures
* var pos0 = gpuCompute.createTexture();
* var vel0 = gpuCompute.createTexture();
* // and fill in here the texture data...
*
* // Add texture variables
* var velVar = gpuCompute.addVariable( "textureVelocity", fragmentShaderVel, pos0 );
* var posVar = gpuCompute.addVariable( "texturePosition", fragmentShaderPos, vel0 );
*
* // Add variable dependencies
* gpuCompute.setVariableDependencies( velVar, [ velVar, posVar ] );
* gpuCompute.setVariableDependencies( posVar, [ velVar, posVar ] );
*
* // Add custom uniforms
* velVar.material.uniforms.time = { value: 0.0 };
*
* // Check for completeness
* var error = gpuCompute.init();
* if ( error !== null ) {
* console.error( error );
* }
*
*
* // In each frame...
*
* // Compute!
* gpuCompute.compute();
*
* // Update texture uniforms in your visualization materials with the gpu renderer output
* myMaterial.uniforms.myTexture.value = gpuCompute.getCurrentRenderTarget( posVar ).texture;
*
* // Do your rendering
* renderer.render( myScene, myCamera );
*
* -------------
*
* Also, you can use utility functions to create ShaderMaterial and perform computations (rendering between textures)
* Note that the shaders can have multiple input textures.
*
* var myFilter1 = gpuCompute.createShaderMaterial( myFilterFragmentShader1, { theTexture: { value: null } } );
* var myFilter2 = gpuCompute.createShaderMaterial( myFilterFragmentShader2, { theTexture: { value: null } } );
*
* var inputTexture = gpuCompute.createTexture();
*
* // Fill in here inputTexture...
*
* myFilter1.uniforms.theTexture.value = inputTexture;
*
* var myRenderTarget = gpuCompute.createRenderTarget();
* myFilter2.uniforms.theTexture.value = myRenderTarget.texture;
*
* var outputRenderTarget = gpuCompute.createRenderTarget();
*
* // Now use the output texture where you want:
* myMaterial.uniforms.map.value = outputRenderTarget.texture;
*
* // And compute each frame, before rendering to screen:
* gpuCompute.doRenderTarget( myFilter1, myRenderTarget );
* gpuCompute.doRenderTarget( myFilter2, outputRenderTarget );
*
*
*
* @param {int} sizeX Computation problem size is always 2d: sizeX * sizeY elements.
* @param {int} sizeY Computation problem size is always 2d: sizeX * sizeY elements.
* @param {WebGLRenderer} renderer The renderer
*/
import {
Camera,
ClampToEdgeWrapping,
DataTexture,
FloatType,
HalfFloatType,
Mesh,
NearestFilter,
PlaneBufferGeometry,
RGBAFormat,
Scene,
ShaderMaterial,
WebGLRenderTarget
} from "./three.module.js";
var GPUComputationRenderer = function ( sizeX, sizeY, renderer ) {
this.variables = [];
this.currentTextureIndex = 0;
var scene = new Scene();
var camera = new Camera();
camera.position.z = 1;
var passThruUniforms = {
passThruTexture: { value: null }
};
var passThruShader = createShaderMaterial( getPassThroughFragmentShader(), passThruUniforms );
var mesh = new Mesh( new PlaneBufferGeometry( 2, 2 ), passThruShader );
scene.add( mesh );
this.addVariable = function ( variableName, computeFragmentShader, initialValueTexture ) {
var material = this.createShaderMaterial( computeFragmentShader );
var variable = {
name: variableName,
initialValueTexture: initialValueTexture,
material: material,
dependencies: null,
renderTargets: [],
wrapS: null,
wrapT: null,
minFilter: NearestFilter,
magFilter: NearestFilter
};
this.variables.push( variable );
return variable;
};
this.setVariableDependencies = function ( variable, dependencies ) {
variable.dependencies = dependencies;
};
this.init = function () {
if ( ! renderer.extensions.get( "OES_texture_float" ) &&
! renderer.capabilities.isWebGL2 ) {
return "No OES_texture_float support for float textures.";
}
if ( renderer.capabilities.maxVertexTextures === 0 ) {
return "No support for vertex shader textures.";
}
for ( var i = 0; i < this.variables.length; i ++ ) {
var variable = this.variables[ i ];
// Creates rendertargets and initialize them with input texture
variable.renderTargets[ 0 ] = this.createRenderTarget( sizeX, sizeY, variable.wrapS, variable.wrapT, variable.minFilter, variable.magFilter );
variable.renderTargets[ 1 ] = this.createRenderTarget( sizeX, sizeY, variable.wrapS, variable.wrapT, variable.minFilter, variable.magFilter );
this.renderTexture( variable.initialValueTexture, variable.renderTargets[ 0 ] );
this.renderTexture( variable.initialValueTexture, variable.renderTargets[ 1 ] );
// Adds dependencies uniforms to the ShaderMaterial
var material = variable.material;
var uniforms = material.uniforms;
if ( variable.dependencies !== null ) {
for ( var d = 0; d < variable.dependencies.length; d ++ ) {
var depVar = variable.dependencies[ d ];
if ( depVar.name !== variable.name ) {
// Checks if variable exists
var found = false;
for ( var j = 0; j < this.variables.length; j ++ ) {
if ( depVar.name === this.variables[ j ].name ) {
found = true;
break;
}
}
if ( ! found ) {
return "Variable dependency not found. Variable=" + variable.name + ", dependency=" + depVar.name;
}
}
uniforms[ depVar.name ] = { value: null };
material.fragmentShader = "\nuniform sampler2D " + depVar.name + ";\n" + material.fragmentShader;
}
}
}
this.currentTextureIndex = 0;
return null;
};
this.compute = function () {
var currentTextureIndex = this.currentTextureIndex;
var nextTextureIndex = this.currentTextureIndex === 0 ? 1 : 0;
for ( var i = 0, il = this.variables.length; i < il; i ++ ) {
var variable = this.variables[ i ];
// Sets texture dependencies uniforms
if ( variable.dependencies !== null ) {
var uniforms = variable.material.uniforms;
for ( var d = 0, dl = variable.dependencies.length; d < dl; d ++ ) {
var depVar = variable.dependencies[ d ];
uniforms[ depVar.name ].value = depVar.renderTargets[ currentTextureIndex ].texture;
}
}
// Performs the computation for this variable
this.doRenderTarget( variable.material, variable.renderTargets[ nextTextureIndex ] );
}
this.currentTextureIndex = nextTextureIndex;
};
this.getCurrentRenderTarget = function ( variable ) {
return variable.renderTargets[ this.currentTextureIndex ];
};
this.getAlternateRenderTarget = function ( variable ) {
return variable.renderTargets[ this.currentTextureIndex === 0 ? 1 : 0 ];
};
function addResolutionDefine( materialShader ) {
materialShader.defines.resolution = 'vec2( ' + sizeX.toFixed( 1 ) + ', ' + sizeY.toFixed( 1 ) + " )";
}
this.addResolutionDefine = addResolutionDefine;
// The following functions can be used to compute things manually
function createShaderMaterial( computeFragmentShader, uniforms ) {
uniforms = uniforms || {};
var material = new ShaderMaterial( {
uniforms: uniforms,
vertexShader: getPassThroughVertexShader(),
fragmentShader: computeFragmentShader
} );
addResolutionDefine( material );
return material;
}
this.createShaderMaterial = createShaderMaterial;
<|fim▁hole|> sizeXTexture = sizeXTexture || sizeX;
sizeYTexture = sizeYTexture || sizeY;
wrapS = wrapS || ClampToEdgeWrapping;
wrapT = wrapT || ClampToEdgeWrapping;
minFilter = minFilter || NearestFilter;
magFilter = magFilter || NearestFilter;
var renderTarget = new WebGLRenderTarget( sizeXTexture, sizeYTexture, {
wrapS: wrapS,
wrapT: wrapT,
minFilter: minFilter,
magFilter: magFilter,
format: RGBAFormat,
type: ( /(iPad|iPhone|iPod)/g.test( navigator.userAgent ) ) ? HalfFloatType : FloatType,
stencilBuffer: false,
depthBuffer: false
} );
return renderTarget;
};
this.createTexture = function () {
var a = new Float32Array( sizeX * sizeY * 4 );
var texture = new DataTexture( a, sizeX, sizeY, RGBAFormat, FloatType );
texture.needsUpdate = true;
return texture;
};
this.renderTexture = function ( input, output ) {
// Takes a texture, and render out in rendertarget
// input = Texture
// output = RenderTarget
passThruUniforms.passThruTexture.value = input;
this.doRenderTarget( passThruShader, output );
passThruUniforms.passThruTexture.value = null;
};
this.doRenderTarget = function ( material, output ) {
var currentRenderTarget = renderer.getRenderTarget();
mesh.material = material;
renderer.setRenderTarget( output );
renderer.render( scene, camera );
mesh.material = passThruShader;
renderer.setRenderTarget( currentRenderTarget );
};
// Shaders
function getPassThroughVertexShader() {
return "void main() {\n" +
"\n" +
" gl_Position = vec4( position, 1.0 );\n" +
"\n" +
"}\n";
}
function getPassThroughFragmentShader() {
return "uniform sampler2D passThruTexture;\n" +
"\n" +
"void main() {\n" +
"\n" +
" vec2 uv = gl_FragCoord.xy / resolution.xy;\n" +
"\n" +
" gl_FragColor = texture2D( passThruTexture, uv );\n" +
"\n" +
"}\n";
}
};
export { GPUComputationRenderer };<|fim▁end|>
|
this.createRenderTarget = function ( sizeXTexture, sizeYTexture, wrapS, wrapT, minFilter, magFilter ) {
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>SECRET_KEY = 'not-anymore'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'<|fim▁hole|>USE_I18N = True
USE_L10N = True
USE_TZ = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = [
'reverse_unique',
'reverse_unique_tests',
]<|fim▁end|>
| |
<|file_name|>selective-preloading-strategy.service.spec.ts<|end_file_name|><|fim▁begin|>import { TestBed, inject } from '@angular/core/testing';
import { SelectivePreloadingStrategyService } from './selective-preloading-strategy.service';
describe('SelectivePreloadingStrategyService', () => {
beforeEach(() => {
TestBed.configureTestingModule({
providers: [SelectivePreloadingStrategyService]
});
});
it('should be created', inject([SelectivePreloadingStrategyService], (service: SelectivePreloadingStrategyService) => {<|fim▁hole|> expect(service).toBeTruthy();
}));
});<|fim▁end|>
| |
<|file_name|>tf_export_test.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf_export tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
class ValidateExportTest(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
self._modules = []
def tearDown(self):
for name in self._modules:
del sys.modules[name]
self._modules = []
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
if hasattr(symbol, '_tf_api_names_v1'):
del symbol._tf_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(_test_function)
self.assertEquals(decorated_function, _test_function)
self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(_test_function)
decorated_function2 = export_decorator2(_test_function2)
self.assertEquals(decorated_function1, _test_function)
self.assertEquals(decorated_function2, _test_function2)
self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(TestClassB)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
module1._tf_api_constants)
self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)],
module2._tf_api_constants)
def testRaisesExceptionIfAlreadyHasAPINames(self):
_test_function._tf_api_names = ['abc']
export_decorator = tf_export.tf_export('nameA', 'nameB')
with self.assertRaises(tf_export.SymbolAlreadyExposedError):
export_decorator(_test_function)
def testRaisesExceptionIfInvalidSymbolName(self):
# TensorFlow code is not allowed to export symbols under package
# tf.estimator
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('estimator.invalid')
# All symbols exported by Estimator must be under tf.estimator package.
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('Estimator.invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid.estimator')
def testRaisesExceptionIfInvalidV1SymbolName(self):
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('valid', v1=['estimator.invalid'])
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('estimator.valid', v1=['invalid'])
def testOverridesFunction(self):
_test_function2._tf_api_names = ['abc']
export_decorator = tf_export.tf_export(
'nameA', 'nameB', overrides=[_test_function2])
export_decorator(_test_function)
# _test_function overrides _test_function2. So, _tf_api_names
# should be removed from _test_function2.
self.assertFalse(hasattr(_test_function2, '_tf_api_names'))
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function = get_wrapper(_test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEquals(decorated_function, exported_function)
self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)<|fim▁hole|>if __name__ == '__main__':
test.main()<|fim▁end|>
| |
<|file_name|>Main.java<|end_file_name|><|fim▁begin|>package me.giantcrack.gs;
import me.giantcrack.eco.BalanceCmd;
import me.giantcrack.eco.BuyCmd;
import me.giantcrack.eco.EcoFile;
import me.giantcrack.eco.EconomyCmd;
import me.giantcrack.eco.SellCmd;
import me.giantcrack.eco.ValueCmd;
import org.bukkit.Bukkit;
import org.bukkit.ChatColor;
import org.bukkit.command.Command;
import org.bukkit.command.CommandSender;
import org.bukkit.event.EventHandler;
import org.bukkit.event.Listener;
import org.bukkit.event.player.PlayerJoinEvent;
import org.bukkit.plugin.java.JavaPlugin;
public class Main extends JavaPlugin implements Listener {
public void onEnable() {
getCommand("buy").setExecutor(new BuyCmd());
getCommand("sell").setExecutor(new SellCmd());
getCommand("economy").setExecutor(new EconomyCmd());
getCommand("value").setExecutor(new ValueCmd());
getCommand("balance").setExecutor(new BalanceCmd());
getCommand("add").setExecutor(new AddItemCmd());
getCommand("edit").setExecutor(new EditItemCmd());
if (Config.getInstance().get("buysell") == null) {
Config.getInstance().set("buysell", 0.5);
}
ItemManager.getInstance().setUp();
Bukkit.getServer().getPluginManager().registerEvents(this, this);
}
public void onLoad() {
EcoFile.getInstance().setup(this);
ConverterFile.getInstance().setup(this);
Config.getInstance().setup(this);
}
@EventHandler
public void onPlayerJoin(PlayerJoinEvent e) {
if (EcoFile.getInstance().get("Economy." + e.getPlayer().getUniqueId() + ".balance") == null) {
EcoFile.getInstance().setBalance(e.getPlayer(), 0.0);
}
}
public boolean onCommand(CommandSender sender, Command cmd, String commandLabel, String[] args) {
if (cmd.getName().equalsIgnoreCase("convertitems")) {
if (sender.isOp()) {
ConverterFile.getInstance().convertOldConfig();
sender.sendMessage(ChatColor.GREEN + "File Converted!");
return true;
} else {
sender.sendMessage(ChatColor.RED + "You don't have permission!");
return true;
}
}
return false;
}
/***
* TODO LISt
*1.Finish Commands
<|fim▁hole|>
}<|fim▁end|>
|
*2.Test
*3.Do lava plugin
*/
|
<|file_name|>train.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from transH import TransH
import pickle
import numpy as np
import sys<|fim▁hole|>
def main():
if len(sys.argv) != 3:
print '[Usage] python train.py train_data validation_data'
exit(0)
train_data, valid_data = sys.argv[1:]
X, E, R = loadData(train_data)
V = loadData(valid_data, E=E, R=R, mode='valid')
# parameters
gamma = 1
k = 50
alpha = 0.1
b = 5000
c = 0.25
transH = TransH(len(E), len(R), gamma, k, alpha, b, c)
transH.fit(X, validationset=V)
w = open('transH.model', 'w')
pickle.dump((transH, E, R), w)
def loadData(file_path, E=None, R=None, mode='train'):
if mode == 'train':
E, R = {}, {}
e_ind, r_ind = 0, 0
X = []
f = open(file_path, 'r')
for line in f:
h, r, t = line.strip().split('\t')
if not h in E:
E[h] = e_ind
e_ind += 1
if not t in E:
E[t] = e_ind
e_ind +=1
if not r in R:
R[r] = r_ind
r_ind += 1
X.append((E[h], R[r], E[t]))
f.close()
return np.array(X), E, R
elif mode == 'valid':
X = []
f = open(file_path, 'r')
for line in f:
h, r, t = line.strip().split('\t')
X.append((E[h], R[r], E[t]))
return np.array(X)
if __name__ == "__main__":
main()<|fim▁end|>
| |
<|file_name|>issue-64391.rs<|end_file_name|><|fim▁begin|>// Regression test for Issue #64391. The goal here is that this
// function compiles. In the past, due to incorrect drop order for<|fim▁hole|>// `drop-order/drop-order-for-temporary-in-tail-return-expr.rs`.
//
// check-pass
// edition:2018
async fn add(x: u32, y: u32) -> u32 {
async { x + y }.await
}
fn main() { }<|fim▁end|>
|
// temporaries in the tail expression, we failed to compile this
// example. The drop order itself is directly tested in
|
<|file_name|>cd8b0318-8942-4a64-b2c9-ee7c253d6b7d-1445822636718.js<|end_file_name|><|fim▁begin|>jQuery("#simulation")
.on("click", ".s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d .click", function(event, data) {
var jEvent, jFirer, cases;
if(data === undefined) { data = event; }
jEvent = jimEvent(event);
jFirer = jEvent.getEventFirer();
if(jFirer.is("#s-Label_58")) {
cases = [
{
"blocks": [
{
"actions": [
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_58": {
"attributes": {
"font-size": "12.0pt",
"font-family": "Roboto-Regular,Arial"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_58 .valign": {
"attributes": {
"vertical-align": "middle",
"text-align": "left"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_58 span": {
"attributes": {
"color": "#80B8F1",
"text-align": "left",
"text-decoration": "none",
"font-family": "Roboto-Regular,Arial",
"font-size": "12.0pt"
}
}
} ],
"exectype": "serial",
"delay": 0
},
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_59": {
"attributes": {
"font-size": "20.0pt",
"font-family": "IOS8-Icons-Regular,Arial"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_59 .valign": {
"attributes": {
"vertical-align": "middle",
"text-align": "left"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_59 span": {
"attributes": {
"color": "#80B8F1",
"text-align": "left",
"text-decoration": "none",
"font-family": "IOS8-Icons-Regular,Arial",
"font-size": "20.0pt"
}
}
} ],
"exectype": "serial",
"delay": 0
},
{
"action": "jimPause",
"parameter": {
"pause": 300
},
"exectype": "serial",
"delay": 0
},
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_58": {
"attributes": {
"font-size": "12.0pt",
"font-family": "Roboto-Regular,Arial"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_58 .valign": {
"attributes": {
"vertical-align": "middle",
"text-align": "left"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_58 span": {
"attributes": {
"color": "#007DFF",
"text-align": "left",
"text-decoration": "none",
"font-family": "Roboto-Regular,Arial",
"font-size": "12.0pt"
}
}
} ],
"exectype": "serial",
"delay": 0
},
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_59": {
"attributes": {
"font-size": "20.0pt",
"font-family": "IOS8-Icons-Regular,Arial"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_59 .valign": {
"attributes": {
"vertical-align": "middle",
"text-align": "left"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Label_59 span": {
"attributes": {
"color": "#157EFB",
"text-align": "left",
"text-decoration": "none",
"font-family": "IOS8-Icons-Regular,Arial",
"font-size": "20.0pt"
}
}
} ],
"exectype": "serial",
"delay": 0
}
]
}
],
"exectype": "serial",
"delay": 0
}
];
event.data = data;
jEvent.launchCases(cases);
} else if(jFirer.is("#s-cover")) {
cases = [
{
"blocks": [
{
"actions": [
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-cover": {
"attributes": {
"opacity": "0.75"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-cover": {
"attributes-ie": {
"-ms-filter": "progid:DXImageTransform.Microsoft.Alpha(Opacity=75)",
"filter": "alpha(opacity=75)"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-cover": {
"attributes-ie8lte": {
"-ms-filter": "progid:DXImageTransform.Microsoft.Alpha(Opacity=75)",
"filter": "alpha(opacity=75)"
}
}
} ],
"exectype": "serial",
"delay": 0
},
{
"action": "jimPause",
"parameter": {
"pause": 300<|fim▁hole|> "delay": 0
},
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-cover": {
"attributes": {
"opacity": "1.0"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-cover": {
"attributes-ie": {
"-ms-filter": "progid:DXImageTransform.Microsoft.Alpha(Opacity=100)",
"filter": "alpha(opacity=100)"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-cover": {
"attributes-ie8lte": {
"-ms-filter": "progid:DXImageTransform.Microsoft.Alpha(Opacity=100)",
"filter": "alpha(opacity=100)"
}
}
} ],
"exectype": "serial",
"delay": 0
}
]
}
],
"exectype": "serial",
"delay": 0
}
];
event.data = data;
jEvent.launchCases(cases);
} else if(jFirer.is("#s-Hotspot_1")) {
cases = [
{
"blocks": [
{
"actions": [
{
"action": "jimNavigation",
"parameter": {
"target": "screens/6709a53d-60b3-4498-bf73-977706fff4da"
},
"exectype": "serial",
"delay": 0
}
]
}
],
"exectype": "serial",
"delay": 0
}
];
event.data = data;
jEvent.launchCases(cases);
} else if(jFirer.is("#s-Hotspot_3")) {
cases = [
{
"blocks": [
{
"actions": [
{
"action": "jimNavigation",
"parameter": {
"target": "screens/27852e19-fc20-4cac-8d96-13d00ac70f75"
},
"exectype": "serial",
"delay": 0
}
]
}
],
"exectype": "serial",
"delay": 0
}
];
event.data = data;
jEvent.launchCases(cases);
} else if(jFirer.is("#s-Button_1")) {
cases = [
{
"blocks": [
{
"actions": [
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Button_1": {
"attributes": {
"font-size": "12.0pt",
"font-family": "Roboto-Regular,Arial"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Button_1 .valign": {
"attributes": {
"vertical-align": "middle",
"text-align": "center"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Button_1 span": {
"attributes": {
"color": "#80B8F1",
"text-align": "center",
"text-decoration": "none",
"font-family": "Roboto-Regular,Arial",
"font-size": "12.0pt"
}
}
} ],
"exectype": "serial",
"delay": 0
},
{
"action": "jimPause",
"parameter": {
"pause": 300
},
"exectype": "serial",
"delay": 0
},
{
"action": "jimChangeStyle",
"parameter": [ {
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Button_1": {
"attributes": {
"font-size": "12.0pt",
"font-family": "Roboto-Regular,Arial"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Button_1 .valign": {
"attributes": {
"vertical-align": "middle",
"text-align": "center"
}
}
},{
"#s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d #s-Button_1 span": {
"attributes": {
"color": "#007DFF",
"text-align": "center",
"text-decoration": "none",
"font-family": "Roboto-Regular,Arial",
"font-size": "12.0pt"
}
}
} ],
"exectype": "serial",
"delay": 0
}
]
}
],
"exectype": "serial",
"delay": 0
}
];
event.data = data;
jEvent.launchCases(cases);
}
})
.on("pageload", ".s-cd8b0318-8942-4a64-b2c9-ee7c253d6b7d .pageload", function(event, data) {
var jEvent, jFirer, cases;
if(data === undefined) { data = event; }
jEvent = jimEvent(event);
jFirer = jEvent.getEventFirer();
if(jFirer.is("#s-Label_35")) {
cases = [
{
"blocks": [
{
"actions": [
{
"action": "jimSetValue",
"parameter": {
"target": "#s-Label_35",
"value": {
"action": "jimConcat",
"parameter": [ {
"action": "jimSubstring",
"parameter": [ {
"action": "jimSystemTime"
},"0","5" ]
}," PM" ]
}
},
"exectype": "serial",
"delay": 0
}
]
}
],
"exectype": "serial",
"delay": 0
}
];
event.data = data;
jEvent.launchCases(cases);
}
});<|fim▁end|>
|
},
"exectype": "serial",
|
<|file_name|>objectclick2.py<|end_file_name|><|fim▁begin|>from server.util import ScriptManager
<|fim▁hole|>def objectClick2_11758(player, obId, obX, obY):
player.getPA().openUpBank()<|fim▁end|>
|
def objectClick2_2213(player, obId, obX, obY):
player.getPA().openUpBank()
|
<|file_name|>SystemsList.js<|end_file_name|><|fim▁begin|>import React, { Component } from 'react';
import classnames from 'classnames';
import withStyles from 'isomorphic-style-loader/lib/withStyles';
import LinearProgress from 'material-ui/LinearProgress';
import history from '../../core/history';
import Link from '../../components/Link/Link';
import Image from '../../components/Image';
import { Grid, Row, Col, ListGroup, ListGroupItem } from 'react-bootstrap';
import { AutoAffix } from 'react-overlays';
import s from './SystemsList.css';
import configs from '../../config/systems.json';
const System = ({name, title, image, available, visible, onClick}) => {
const classNames = classnames(s.availabilityCheckIcon, "fa", available ? 'fa-check' : 'fa-close', available ? s.available : s.unavailable);
return (
<div key={`system-${name}`}
className={classnames(s.system, !visible ? s.hidden : s.show )}
onClick={() => onClick(`/system/${name}`)}
>
<Image src={image} alt={title} />
<div className={s.systemTitle}>{title}</div>
<i className={classNames} />
</div>
)
};
class Systems extends Component
{
constructor(...props) {
super(...props);
this.state = {
show: 'available'
};
this.onHandleClick = ::this.onHandleClick;
this.filter = ::this.filter;
}
onHandleClick(url) {
history.push(url);
}
filter(system) {
const { checkList } = this.props;
if (this.state.show == 'all') {
return true;
} else if (this.state.show == 'available' && checkList[system.name]) {
return true;
} else if (this.state.show == 'not_available' && !checkList[system.name]) {
return true;
}
return false;
}
setFilter(filter) {
this.setState({ show: filter });
}
renderSystems() {
const { isChecking, checkList } = this.props;
if (isChecking || checkList == undefined) {
return (<LinearProgress />);
}
return (
<Row className={s.list}>
<Col xs={12} md={8}>
{
configs.map((system) => {
const isAvailable = checkList[system.name];
const isVisible = this.filter(system);
return (
<System key={`emu-${system.name}`} {...system}
available={isAvailable}
visible={isVisible}
onClick={this.onHandleClick}
/>
)
})
}
</Col>
<Col xs={6} md={3}>
<AutoAffix viewportOffsetTop={15} container={this}>
<ListGroup>
<ListGroupItem onClick={this.setFilter.bind(this, 'available')}
active={this.state.show == 'available'}
>
Show only available systems
</ListGroupItem>
<ListGroupItem onClick={this.setFilter.bind(this, 'not_available')}
active={this.state.show == 'not_available'}
>
Show only not available systems
</ListGroupItem>
<ListGroupItem onClick={this.setFilter.bind(this, 'all')}
active={this.state.show == 'all'}
>
Show all systems
</ListGroupItem>
</ListGroup>
</AutoAffix>
</Col>
</Row>
);
}
render() {
return (
<div className={s.container}>
<h1>Systems</h1>
<Grid>
{this.renderSystems()}
</Grid>
</div>
)
}<|fim▁hole|>export default withStyles(s)(Systems);<|fim▁end|>
|
}
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from . import geodis_api_ws
from . import geodis_api_find_localite_ws
from . import geodis_api_edi
from . import geodis_api_rest_ws
from . import geodis_encoder_ws
from . import geodis_encoder_edi
from . import geodis_encoder_rest_ws
from . import geodis_decoder_ws
from . import geodis_decoder_rest_ws
from . import geodis_transport_ws
from . import geodis_transport_edi
from . import geodis_transport_rest_ws<|fim▁end|>
|
from . import geodis
from . import geodis_common_ws
|
<|file_name|>ip.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Internet Protocol (IP) addresses.
//!
//! This module contains functions useful for parsing, formatting, and
//! manipulating IP addresses.
#![allow(missing_docs)]
pub use self::IpAddr::*;
use fmt;
use io::{mod, IoResult, IoError};
use io::net;
use iter::Iterator;
use option::{Option, None, Some};
use result::{Ok, Err};
use str::{FromStr, StrPrelude};
use slice::{CloneSlicePrelude, SlicePrelude};
use vec::Vec;
pub type Port = u16;
#[deriving(PartialEq, Eq, Clone, Hash)]
pub enum IpAddr {
Ipv4Addr(u8, u8, u8, u8),
Ipv6Addr(u16, u16, u16, u16, u16, u16, u16, u16)
}
impl fmt::Show for IpAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
Ipv4Addr(a, b, c, d) =>
write!(fmt, "{}.{}.{}.{}", a, b, c, d),
// Ipv4 Compatible address
Ipv6Addr(0, 0, 0, 0, 0, 0, g, h) => {
write!(fmt, "::{}.{}.{}.{}", (g >> 8) as u8, g as u8,
(h >> 8) as u8, h as u8)
}
// Ipv4-Mapped address
Ipv6Addr(0, 0, 0, 0, 0, 0xFFFF, g, h) => {
write!(fmt, "::FFFF:{}.{}.{}.{}", (g >> 8) as u8, g as u8,
(h >> 8) as u8, h as u8)
}
Ipv6Addr(a, b, c, d, e, f, g, h) =>
write!(fmt, "{:x}:{:x}:{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
a, b, c, d, e, f, g, h)
}
}
}
#[deriving(PartialEq, Eq, Clone, Hash)]
pub struct SocketAddr {
pub ip: IpAddr,
pub port: Port,
}
impl fmt::Show for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.ip {
Ipv4Addr(..) => write!(f, "{}:{}", self.ip, self.port),
Ipv6Addr(..) => write!(f, "[{}]:{}", self.ip, self.port),
}
}
}
struct Parser<'a> {
// parsing as ASCII, so can use byte array
s: &'a [u8],
pos: uint,
}
impl<'a> Parser<'a> {
fn new(s: &'a str) -> Parser<'a> {
Parser {
s: s.as_bytes(),
pos: 0,
}
}
fn is_eof(&self) -> bool {
self.pos == self.s.len()
}
// Commit only if parser returns Some
fn read_atomically<T>(&mut self, cb: |&mut Parser| -> Option<T>)
-> Option<T> {
let pos = self.pos;
let r = cb(self);
if r.is_none() {
self.pos = pos;
}
r
}
// Commit only if parser read till EOF
fn read_till_eof<T>(&mut self, cb: |&mut Parser| -> Option<T>)
-> Option<T> {
self.read_atomically(|p| {
match cb(p) {
Some(x) => if p.is_eof() {Some(x)} else {None},
None => None,
}
})
}
// Return result of first successful parser
fn read_or<T>(&mut self, parsers: &mut [|&mut Parser| -> Option<T>])
-> Option<T> {
for pf in parsers.iter_mut() {
match self.read_atomically(|p: &mut Parser| (*pf)(p)) {
Some(r) => return Some(r),
None => {}
}
}
None
}
// Apply 3 parsers sequentially
fn read_seq_3<A,
B,
C>(
&mut self,
pa: |&mut Parser| -> Option<A>,
pb: |&mut Parser| -> Option<B>,
pc: |&mut Parser| -> Option<C>)
-> Option<(A, B, C)> {
self.read_atomically(|p| {
let a = pa(p);
let b = if a.is_some() { pb(p) } else { None };
let c = if b.is_some() { pc(p) } else { None };
match (a, b, c) {
(Some(a), Some(b), Some(c)) => Some((a, b, c)),
_ => None
}
})
}
// Read next char
fn read_char(&mut self) -> Option<char> {
if self.is_eof() {
None
} else {
let r = self.s[self.pos] as char;
self.pos += 1;
Some(r)
}
}
// Return char and advance iff next char is equal to requested
fn read_given_char(&mut self, c: char) -> Option<char> {
self.read_atomically(|p| {
match p.read_char() {
Some(next) if next == c => Some(next),
_ => None,
}
})
}
// Read digit
fn read_digit(&mut self, radix: u8) -> Option<u8> {
fn parse_digit(c: char, radix: u8) -> Option<u8> {
let c = c as u8;
// assuming radix is either 10 or 16
if c >= b'0' && c <= b'9' {
Some(c - b'0')
} else if radix > 10 && c >= b'a' && c < b'a' + (radix - 10) {
Some(c - b'a' + 10)
} else if radix > 10 && c >= b'A' && c < b'A' + (radix - 10) {
Some(c - b'A' + 10)
} else {
None
}
}
self.read_atomically(|p| {
p.read_char().and_then(|c| parse_digit(c, radix))
})
}
fn read_number_impl(&mut self, radix: u8, max_digits: u32, upto: u32) -> Option<u32> {
let mut r = 0u32;
let mut digit_count = 0;
loop {
match self.read_digit(radix) {
Some(d) => {
r = r * (radix as u32) + (d as u32);
digit_count += 1;
if digit_count > max_digits || r >= upto {
return None
}
}
None => {
if digit_count == 0 {
return None
} else {
return Some(r)
}
}
};
}
}
// Read number, failing if max_digits of number value exceeded
fn read_number(&mut self, radix: u8, max_digits: u32, upto: u32) -> Option<u32> {
self.read_atomically(|p| p.read_number_impl(radix, max_digits, upto))
}
fn read_ipv4_addr_impl(&mut self) -> Option<IpAddr> {
let mut bs = [0u8, ..4];
let mut i = 0;
while i < 4 {
if i != 0 && self.read_given_char('.').is_none() {
return None;
}
let octet = self.read_number(10, 3, 0x100).map(|n| n as u8);
match octet {
Some(d) => bs[i] = d,
None => return None,
};
i += 1;
}
Some(Ipv4Addr(bs[0], bs[1], bs[2], bs[3]))
}
// Read IPv4 address
fn read_ipv4_addr(&mut self) -> Option<IpAddr> {
self.read_atomically(|p| p.read_ipv4_addr_impl())
}
fn read_ipv6_addr_impl(&mut self) -> Option<IpAddr> {
fn ipv6_addr_from_head_tail(head: &[u16], tail: &[u16]) -> IpAddr {
assert!(head.len() + tail.len() <= 8);
let mut gs = [0u16, ..8];
gs.clone_from_slice(head);
gs[mut 8 - tail.len() .. 8].clone_from_slice(tail);
Ipv6Addr(gs[0], gs[1], gs[2], gs[3], gs[4], gs[5], gs[6], gs[7])
}
fn read_groups(p: &mut Parser, groups: &mut [u16, ..8], limit: uint) -> (uint, bool) {
let mut i = 0;
while i < limit {
if i < limit - 1 {
let ipv4 = p.read_atomically(|p| {
if i == 0 || p.read_given_char(':').is_some() {
p.read_ipv4_addr()
} else {
None
}
});
match ipv4 {
Some(Ipv4Addr(a, b, c, d)) => {
groups[i + 0] = (a as u16 << 8) | (b as u16);
groups[i + 1] = (c as u16 << 8) | (d as u16);
return (i + 2, true);
}
_ => {}
}
}
let group = p.read_atomically(|p| {
if i == 0 || p.read_given_char(':').is_some() {
p.read_number(16, 4, 0x10000).map(|n| n as u16)
} else {
None
}
});
match group {
Some(g) => groups[i] = g,
None => return (i, false)
}
i += 1;
}
(i, false)
}
let mut head = [0u16, ..8];
let (head_size, head_ipv4) = read_groups(self, &mut head, 8);
if head_size == 8 {
return Some(Ipv6Addr(
head[0], head[1], head[2], head[3],
head[4], head[5], head[6], head[7]))
}
// IPv4 part is not allowed before `::`
if head_ipv4 {
return None
}
// read `::` if previous code parsed less than 8 groups
if !self.read_given_char(':').is_some() || !self.read_given_char(':').is_some() {
return None;
}
let mut tail = [0u16, ..8];
let (tail_size, _) = read_groups(self, &mut tail, 8 - head_size);
Some(ipv6_addr_from_head_tail(head[..head_size], tail[..tail_size]))
}
fn read_ipv6_addr(&mut self) -> Option<IpAddr> {
self.read_atomically(|p| p.read_ipv6_addr_impl())
}
fn read_ip_addr(&mut self) -> Option<IpAddr> {
let ipv4_addr = |p: &mut Parser| p.read_ipv4_addr();
let ipv6_addr = |p: &mut Parser| p.read_ipv6_addr();
self.read_or(&mut [ipv4_addr, ipv6_addr])
}
fn read_socket_addr(&mut self) -> Option<SocketAddr> {
let ip_addr = |p: &mut Parser| {
let ipv4_p = |p: &mut Parser| p.read_ip_addr();
let ipv6_p = |p: &mut Parser| {
let open_br = |p: &mut Parser| p.read_given_char('[');
let ip_addr = |p: &mut Parser| p.read_ipv6_addr();
let clos_br = |p: &mut Parser| p.read_given_char(']');
p.read_seq_3::<char, IpAddr, char>(open_br, ip_addr, clos_br)
.map(|t| match t { (_, ip, _) => ip })
};
p.read_or(&mut [ipv4_p, ipv6_p])
};
let colon = |p: &mut Parser| p.read_given_char(':');
let port = |p: &mut Parser| p.read_number(10, 5, 0x10000).map(|n| n as u16);
// host, colon, port
self.read_seq_3::<IpAddr, char, u16>(ip_addr, colon, port)
.map(|t| match t { (ip, _, port) => SocketAddr { ip: ip, port: port } })
}
}
impl FromStr for IpAddr {
fn from_str(s: &str) -> Option<IpAddr> {
Parser::new(s).read_till_eof(|p| p.read_ip_addr())
}
}
impl FromStr for SocketAddr {
fn from_str(s: &str) -> Option<SocketAddr> {
Parser::new(s).read_till_eof(|p| p.read_socket_addr())
}
}
/// A trait for objects which can be converted or resolved to one or more `SocketAddr` values.
///
/// Implementing types minimally have to implement either `to_socket_addr` or `to_socket_addr_all`
/// method, and its trivial counterpart will be available automatically.
///
/// This trait is used for generic address resolution when constructing network objects.
/// By default it is implemented for the following types:
///
/// * `SocketAddr` - `to_socket_addr` is identity function.
///
/// * `(IpAddr, u16)` - `to_socket_addr` constructs `SocketAddr` trivially.
///
/// * `(&str, u16)` - the string should be either a string representation of an IP address
/// expected by `FromStr` implementation for `IpAddr` or a host name.
///
/// For the former, `to_socket_addr_all` returns a vector with a single element corresponding
/// to that IP address joined with the given port.
///
/// For the latter, it tries to resolve the host name and returns a vector of all IP addresses
/// for the host name, each joined with the given port.
///
/// * `&str` - the string should be either a string representation of a `SocketAddr` as
/// expected by its `FromStr` implementation or a string like `<host_name>:<port>` pair
/// where `<port>` is a `u16` value.
///
/// For the former, `to_socker_addr_all` returns a vector with a single element corresponding
/// to that socker address.
///
/// For the latter, it tries to resolve the host name and returns a vector of all IP addresses
/// for the host name, each joined with the port.
///
///
/// This trait allows constructing network objects like `TcpStream` or `UdpSocket` easily with
/// values of various types for the bind/connection address. It is needed because sometimes
/// one type is more appropriate than the other: for simple uses a string like `"localhost:12345"`
/// is much nicer than manual construction of the corresponding `SocketAddr`, but sometimes
/// `SocketAddr` value is *the* main source of the address, and converting it to some other type
/// (e.g. a string) just for it to be converted back to `SocketAddr` in constructor methods
/// is pointless.
///
/// Some examples:
///
/// ```rust,no_run
/// # #![allow(unused_must_use)]
///
/// use std::io::{TcpStream, TcpListener};
/// use std::io::net::udp::UdpSocket;
/// use std::io::net::ip::{Ipv4Addr, SocketAddr};
///
/// fn main() {
/// // The following lines are equivalent modulo possible "localhost" name resolution
/// // differences
/// let tcp_s = TcpStream::connect(SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 12345 });
/// let tcp_s = TcpStream::connect((Ipv4Addr(127, 0, 0, 1), 12345u16));
/// let tcp_s = TcpStream::connect(("127.0.0.1", 12345u16));
/// let tcp_s = TcpStream::connect(("localhost", 12345u16));
/// let tcp_s = TcpStream::connect("127.0.0.1:12345");
/// let tcp_s = TcpStream::connect("localhost:12345");
///
/// // TcpListener::bind(), UdpSocket::bind() and UdpSocket::send_to() behave similarly
/// let tcp_l = TcpListener::bind("localhost:12345");
///
/// let mut udp_s = UdpSocket::bind(("127.0.0.1", 23451u16)).unwrap();
/// udp_s.send_to([7u8, 7u8, 7u8].as_slice(), (Ipv4Addr(127, 0, 0, 1), 23451u16));
/// }
/// ```
pub trait ToSocketAddr {
/// Converts this object to single socket address value.
///
/// If more than one value is available, this method returns the first one. If no
/// values are available, this method returns an `IoError`.
///
/// By default this method delegates to `to_socket_addr_all` method, taking the first
/// item from its result.
fn to_socket_addr(&self) -> IoResult<SocketAddr> {
self.to_socket_addr_all()
.and_then(|v| v.into_iter().next().ok_or_else(|| IoError {
kind: io::InvalidInput,
desc: "no address available",
detail: None
}))
}
/// Converts this object to all available socket address values.
///
/// Some values like host name string naturally corrrespond to multiple IP addresses.
/// This method tries to return all available addresses corresponding to this object.
///
/// By default this method delegates to `to_socket_addr` method, creating a singleton
/// vector from its result.
#[inline]
fn to_socket_addr_all(&self) -> IoResult<Vec<SocketAddr>> {
self.to_socket_addr().map(|a| vec![a])
}
}
impl ToSocketAddr for SocketAddr {
#[inline]
fn to_socket_addr(&self) -> IoResult<SocketAddr> { Ok(*self) }
}
impl ToSocketAddr for (IpAddr, u16) {
#[inline]
fn to_socket_addr(&self) -> IoResult<SocketAddr> {
let (ip, port) = *self;
Ok(SocketAddr { ip: ip, port: port })
}
}
fn resolve_socket_addr(s: &str, p: u16) -> IoResult<Vec<SocketAddr>> {
net::get_host_addresses(s)
.map(|v| v.into_iter().map(|a| SocketAddr { ip: a, port: p }).collect())
}
fn parse_and_resolve_socket_addr(s: &str) -> IoResult<Vec<SocketAddr>> {
macro_rules! try_opt(
($e:expr, $msg:expr) => (
match $e {
Some(r) => r,
None => return Err(IoError {
kind: io::InvalidInput,
desc: $msg,
detail: None
})
}
)
)
// split the string by ':' and convert the second part to u16
let mut parts_iter = s.rsplitn(2, ':');
let port_str = try_opt!(parts_iter.next(), "invalid socket address");
let host = try_opt!(parts_iter.next(), "invalid socket address");
let port: u16 = try_opt!(FromStr::from_str(port_str), "invalid port value");
resolve_socket_addr(host, port)
}
impl<'a> ToSocketAddr for (&'a str, u16) {
fn to_socket_addr_all(&self) -> IoResult<Vec<SocketAddr>> {
let (host, port) = *self;
// try to parse the host as a regular IpAddr first
match FromStr::from_str(host) {
Some(addr) => return Ok(vec![SocketAddr {
ip: addr,
port: port
}]),
None => {}
}
resolve_socket_addr(host, port)
}
}
// accepts strings like 'localhost:12345'
impl<'a> ToSocketAddr for &'a str {
fn to_socket_addr(&self) -> IoResult<SocketAddr> {
// try to parse as a regular SocketAddr first
match FromStr::from_str(*self) {
Some(addr) => return Ok(addr),
None => {}
}
parse_and_resolve_socket_addr(*self)
.and_then(|v| v.into_iter().next()
.ok_or_else(|| IoError {
kind: io::InvalidInput,
desc: "no address available",
detail: None
})
)
}
fn to_socket_addr_all(&self) -> IoResult<Vec<SocketAddr>> {
// try to parse as a regular SocketAddr first
match FromStr::from_str(*self) {
Some(addr) => return Ok(vec![addr]),
None => {}
}
parse_and_resolve_socket_addr(*self)
}
}
#[cfg(test)]
mod test {
use prelude::*;
use super::*;
use str::FromStr;
#[test]
fn test_from_str_ipv4() {
assert_eq!(Some(Ipv4Addr(127, 0, 0, 1)), FromStr::from_str("127.0.0.1"));
assert_eq!(Some(Ipv4Addr(255, 255, 255, 255)), FromStr::from_str("255.255.255.255"));
assert_eq!(Some(Ipv4Addr(0, 0, 0, 0)), FromStr::from_str("0.0.0.0"));
// out of range
let none: Option<IpAddr> = FromStr::from_str("256.0.0.1");
assert_eq!(None, none);
// too short
let none: Option<IpAddr> = FromStr::from_str("255.0.0");
assert_eq!(None, none);
// too long
let none: Option<IpAddr> = FromStr::from_str("255.0.0.1.2");
assert_eq!(None, none);
// no number between dots
let none: Option<IpAddr> = FromStr::from_str("255.0..1");
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv6() {
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 0)), FromStr::from_str("0:0:0:0:0:0:0:0"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1)), FromStr::from_str("0:0:0:0:0:0:0:1"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1)), FromStr::from_str("::1"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 0)), FromStr::from_str("::"));
assert_eq!(Some(Ipv6Addr(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)),
FromStr::from_str("2a02:6b8::11:11"));
// too long group
let none: Option<IpAddr> = FromStr::from_str("::00000");
assert_eq!(None, none);
// too short
let none: Option<IpAddr> = FromStr::from_str("1:2:3:4:5:6:7");
assert_eq!(None, none);
// too long
let none: Option<IpAddr> = FromStr::from_str("1:2:3:4:5:6:7:8:9");
assert_eq!(None, none);
// triple colon
let none: Option<IpAddr> = FromStr::from_str("1:2:::6:7:8");
assert_eq!(None, none);
// two double colons
let none: Option<IpAddr> = FromStr::from_str("1:2::6::8");
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv4_in_ipv6() {
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0, 49152, 545)),
FromStr::from_str("::192.0.2.33"));
assert_eq!(Some(Ipv6Addr(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)),
FromStr::from_str("::FFFF:192.0.2.33"));
assert_eq!(Some(Ipv6Addr(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)),
FromStr::from_str("64:ff9b::192.0.2.33"));
assert_eq!(Some(Ipv6Addr(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)),
FromStr::from_str("2001:db8:122:c000:2:2100:192.0.2.33"));
// colon after v4
let none: Option<IpAddr> = FromStr::from_str("::127.0.0.1:");
assert_eq!(None, none);
// not enough groups
let none: Option<IpAddr> = FromStr::from_str("1.2.3.4.5:127.0.0.1");
assert_eq!(None, none);
// too many groups
let none: Option<IpAddr> =
FromStr::from_str("1.2.3.4.5:6:7:127.0.0.1");
assert_eq!(None, none);
}
#[test]
fn test_from_str_socket_addr() {
assert_eq!(Some(SocketAddr { ip: Ipv4Addr(77, 88, 21, 11), port: 80 }),
FromStr::from_str("77.88.21.11:80"));
assert_eq!(Some(SocketAddr { ip: Ipv6Addr(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), port: 53 }),
FromStr::from_str("[2a02:6b8:0:1::1]:53"));
assert_eq!(Some(SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0x7F00, 1), port: 22 }),
FromStr::from_str("[::127.0.0.1]:22"));
// without port
let none: Option<SocketAddr> = FromStr::from_str("127.0.0.1");
assert_eq!(None, none);
// without port
let none: Option<SocketAddr> = FromStr::from_str("127.0.0.1:");
assert_eq!(None, none);
// wrong brackets around v4
let none: Option<SocketAddr> = FromStr::from_str("[127.0.0.1]:22");
assert_eq!(None, none);
// port out of range
let none: Option<SocketAddr> = FromStr::from_str("127.0.0.1:123456");
assert_eq!(None, none);
}
#[test]
fn ipv6_addr_to_string() {
let a1 = Ipv6Addr(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280);
assert!(a1.to_string() == "::ffff:192.0.2.128".to_string() ||
a1.to_string() == "::FFFF:192.0.2.128".to_string());
assert_eq!(Ipv6Addr(8, 9, 10, 11, 12, 13, 14, 15).to_string(),
"8:9:a:b:c:d:e:f".to_string());
}
#[test]
fn to_socket_addr_socketaddr() {
let a = SocketAddr { ip: Ipv4Addr(77, 88, 21, 11), port: 12345 };
assert_eq!(Ok(a), a.to_socket_addr());
assert_eq!(Ok(vec![a]), a.to_socket_addr_all());
}
#[test]
fn to_socket_addr_ipaddr_u16() {
let a = Ipv4Addr(77, 88, 21, 11);
let p = 12345u16;
let e = SocketAddr { ip: a, port: p };
assert_eq!(Ok(e), (a, p).to_socket_addr());
assert_eq!(Ok(vec![e]), (a, p).to_socket_addr_all());
}
#[test]
fn to_socket_addr_str_u16() {
let a = SocketAddr { ip: Ipv4Addr(77, 88, 21, 11), port: 24352 };
assert_eq!(Ok(a), ("77.88.21.11", 24352u16).to_socket_addr());
assert_eq!(Ok(vec![a]), ("77.88.21.11", 24352u16).to_socket_addr_all());
let a = SocketAddr { ip: Ipv6Addr(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), port: 53 };
assert_eq!(Ok(a), ("2a02:6b8:0:1::1", 53).to_socket_addr());
assert_eq!(Ok(vec![a]), ("2a02:6b8:0:1::1", 53).to_socket_addr_all());
let a = SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 23924 };<|fim▁hole|> assert!(("localhost", 23924u16).to_socket_addr_all().unwrap().contains(&a));
}
#[test]
fn to_socket_addr_str() {
let a = SocketAddr { ip: Ipv4Addr(77, 88, 21, 11), port: 24352 };
assert_eq!(Ok(a), "77.88.21.11:24352".to_socket_addr());
assert_eq!(Ok(vec![a]), "77.88.21.11:24352".to_socket_addr_all());
let a = SocketAddr { ip: Ipv6Addr(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), port: 53 };
assert_eq!(Ok(a), "[2a02:6b8:0:1::1]:53".to_socket_addr());
assert_eq!(Ok(vec![a]), "[2a02:6b8:0:1::1]:53".to_socket_addr_all());
let a = SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 23924 };
assert!("localhost:23924".to_socket_addr_all().unwrap().contains(&a));
}
}<|fim▁end|>
| |
<|file_name|>extension_sync_event_observer.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/extensions/api/sync_file_system/extension_sync_event_observer.h"
#include "base/lazy_instance.h"
#include "chrome/browser/extensions/api/sync_file_system/sync_file_system_api_helpers.h"
#include "chrome/browser/extensions/extension_service.h"
#include "chrome/browser/sync_file_system/sync_event_observer.h"
#include "chrome/browser/sync_file_system/sync_file_system_service.h"
#include "chrome/browser/sync_file_system/sync_file_system_service_factory.h"
#include "chrome/browser/sync_file_system/syncable_file_system_util.h"
#include "chrome/common/extensions/api/sync_file_system.h"
#include "content/public/browser/browser_context.h"
#include "extensions/browser/event_router.h"
#include "extensions/browser/extension_system.h"
#include "extensions/browser/extension_system_provider.h"
#include "extensions/browser/extensions_browser_client.h"
#include "webkit/browser/fileapi/file_system_url.h"
#include "webkit/common/fileapi/file_system_util.h"
using sync_file_system::SyncEventObserver;
namespace extensions {
static base::LazyInstance<
BrowserContextKeyedAPIFactory<ExtensionSyncEventObserver> > g_factory =
LAZY_INSTANCE_INITIALIZER;
// static
BrowserContextKeyedAPIFactory<ExtensionSyncEventObserver>*
ExtensionSyncEventObserver::GetFactoryInstance() {
return g_factory.Pointer();
}
ExtensionSyncEventObserver::ExtensionSyncEventObserver(
content::BrowserContext* context)
: browser_context_(context), sync_service_(NULL) {}
void ExtensionSyncEventObserver::InitializeForService(
sync_file_system::SyncFileSystemService* sync_service) {
DCHECK(sync_service);
if (sync_service_ != NULL) {
DCHECK_EQ(sync_service_, sync_service);
return;
}
sync_service_ = sync_service;
sync_service_->AddSyncEventObserver(this);
}
ExtensionSyncEventObserver::~ExtensionSyncEventObserver() {}
void ExtensionSyncEventObserver::Shutdown() {
if (sync_service_ != NULL)
sync_service_->RemoveSyncEventObserver(this);
}
std::string ExtensionSyncEventObserver::GetExtensionId(
const GURL& app_origin) {
const Extension* app = ExtensionSystem::Get(browser_context_)
->extension_service()
->GetInstalledApp(app_origin);
if (!app) {
// The app is uninstalled or disabled.
return std::string();
}
return app->id();
}
void ExtensionSyncEventObserver::OnSyncStateUpdated(
const GURL& app_origin,
sync_file_system::SyncServiceState state,
const std::string& description) {
// Convert state and description into SyncState Object.
api::sync_file_system::ServiceInfo service_info;
service_info.state = SyncServiceStateToExtensionEnum(state);
service_info.description = description;
scoped_ptr<base::ListValue> params(
api::sync_file_system::OnServiceStatusChanged::Create(service_info));
BroadcastOrDispatchEvent(
app_origin,
api::sync_file_system::OnServiceStatusChanged::kEventName,
params.Pass());
}
void ExtensionSyncEventObserver::OnFileSynced(
const fileapi::FileSystemURL& url,
sync_file_system::SyncFileStatus status,
sync_file_system::SyncAction action,
sync_file_system::SyncDirection direction) {
scoped_ptr<base::ListValue> params(new base::ListValue());
// For now we always assume events come only for files (not directories).
params->Append(CreateDictionaryValueForFileSystemEntry(
url, sync_file_system::SYNC_FILE_TYPE_FILE));
// Status, SyncAction and any optional notes to go here.
api::sync_file_system::FileStatus status_enum =
SyncFileStatusToExtensionEnum(status);
api::sync_file_system::SyncAction action_enum =
SyncActionToExtensionEnum(action);
api::sync_file_system::SyncDirection direction_enum =
SyncDirectionToExtensionEnum(direction);
params->AppendString(api::sync_file_system::ToString(status_enum));
params->AppendString(api::sync_file_system::ToString(action_enum));
params->AppendString(api::sync_file_system::ToString(direction_enum));
BroadcastOrDispatchEvent(
url.origin(),
api::sync_file_system::OnFileStatusChanged::kEventName,
params.Pass());
}
void ExtensionSyncEventObserver::BroadcastOrDispatchEvent(
const GURL& app_origin,
const std::string& event_name,
scoped_ptr<base::ListValue> values) {
// Check to see whether the event should be broadcasted to all listening
// extensions or sent to a specific extension ID.
bool broadcast_mode = app_origin.is_empty();
EventRouter* event_router =
ExtensionSystem::Get(browser_context_)->event_router();
DCHECK(event_router);
scoped_ptr<Event> event(new Event(event_name, values.Pass()));
event->restrict_to_browser_context = browser_context_;
// No app_origin, broadcast to all listening extensions for this event name.
if (broadcast_mode) {<|fim▁hole|> return;
}
// Dispatch to single extension ID.
const std::string extension_id = GetExtensionId(app_origin);
if (extension_id.empty())
return;
event_router->DispatchEventToExtension(extension_id, event.Pass());
}
template <>
void BrowserContextKeyedAPIFactory<
ExtensionSyncEventObserver>::DeclareFactoryDependencies() {
DependsOn(sync_file_system::SyncFileSystemServiceFactory::GetInstance());
DependsOn(ExtensionsBrowserClient::Get()->GetExtensionSystemFactory());
}
} // namespace extensions<|fim▁end|>
|
event_router->BroadcastEvent(event.Pass());
|
<|file_name|>schema2.rs<|end_file_name|><|fim▁begin|>use types::{ColumnType};
pub trait Schema {
fn len(&self) -> usize;
fn name(&self, index: usize) -> &str;
fn ctype(&self, index: usize) -> ColumnType;
fn nullable(&self, index: usize) -> bool;
}
#[derive(Clone)]
pub struct Schema2 {
pub names: Vec<String>,
pub types: Vec<ColumnType>,
pub nullable: Vec<bool>,
}
impl Schema2 {
pub fn new() -> Self {
Schema2 {
names: Vec::new(),
types: Vec::new(),
nullable: Vec::new(),
}
}
pub fn add(&mut self,
name: &str,
ctype: ColumnType,
nullable: bool) {
self.names.push(name.to_string());
self.types.push(ctype);
self.nullable.push(nullable);
}
pub fn set_nullable(&mut self, index: usize, nullability: bool) {
self.nullable[index] = nullability;
}
}
impl Schema for Schema2 {
fn len(&self) -> usize {
self.names.len()
}
fn name(&self, index: usize) -> &str {
self.names[index].as_str()
}
fn ctype(&self, index: usize) -> ColumnType {<|fim▁hole|> }
fn nullable(&self, index: usize) -> bool {
self.nullable[index]
}
}<|fim▁end|>
|
self.types[index]
|
<|file_name|>actions.ts<|end_file_name|><|fim▁begin|>import { Observable } from 'rxjs/Observable';
import { Store } from '../types';
import { REHYDRATE } from 'redux-persist/constants';
import 'rxjs/operator/map';
export const appError = (error: any): Store.Action => ({
type: 'APP_ERROR',
payload: { error },<|fim▁hole|> payload: { online },
});
export const appShowMenu = (menuShown: boolean): Store.Action => ({
type: 'APP_SHOW_MENU',
payload: { menuShown },
});
export const toggleBaseline = (): Store.Action => ({
type: 'TOGGLE_BASELINE',
});
export const setTheme = (theme: string): Store.Action => ({
type: 'SET_THEME',
payload: { theme },
});
const appStartedEpic = (action$: any, deps: Store.Deps) => {
const { getState } = deps;
// const appOnline$ = Observable.create(observer => {
// const onValue = snap => {
// const online = snap.val();
// if (online === getState().app.online) {
// return;
// }
// observer.next(appOnline(online));
// };
// });
// return action$
// .filter((action: Store.Action) => action.type === 'APP_STARTED')
// .mergeMap(() => Observable.merge(appOnline$));
return action$.ofType(REHYDRATE)
.map(appOnline);
};
export const epics = [appStartedEpic];<|fim▁end|>
|
});
export const appOnline = (online: boolean): Store.Action => ({
type: 'APP_ONLINE',
|
<|file_name|>jabbersearch.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="de" version="2.0">
<context>
<name>JabberSearch</name>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="32"/>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="214"/>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="299"/>
<source>Jabber Search</source>
<translation>Jabber Suche</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="33"/>
<source>Allows to search in the Jabber network</source>
<translation>Erlaubt die Suche im Jabber-Netzwerk</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="199"/>
<source>Search</source>
<translation>Suche</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="215"/>
<source>First Name</source>
<translation>Vorname</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="216"/>
<source>Last Name</source>
<translation>Nachname</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="217"/>
<source>Nickname</source>
<translation>Spitzname</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="218"/>
<source>Email Address</source>
<translation>E-Mail Adresse</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/jabbersearch.cpp" line="300"/>
<source>Supports the searching of the information</source>
<translation>Unterstützt das Suchen der Information</translation>
</message>
</context>
<context>
<name>SearchDialog</name>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="103"/>
<source>Search in %1</source>
<translation>Suche in %1</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="131"/>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="164"/>
<source>Waiting for host response ...</source>
<translation>Warte auf Host Antwort...</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="136"/>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="169"/>
<source>Error: Can't send request to host.</source>
<translation>Fehler: Kann Anfrage nicht zum Host senden.</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="196"/>
<source>Disco info</source>
<translation>Disco Info</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="205"/>
<source>Add Contact</source>
<translation>Kontakt hinzufügen</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="214"/>
<source>vCard</source>
<translation>vCard</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.cpp" line="299"/>
<source>Requested operation failed: %1</source>
<translation>Angeforderte Operation fehlgeschlagen: %1</translation>
</message>
</context>
<context>
<name>SearchDialogClass</name>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="49"/>
<source>First:</source>
<translation>Vorname:</translation>
</message>
<message><|fim▁hole|> <location filename="../../plugins/jabbersearch/searchdialog.ui" line="62"/>
<source>Last:</source>
<translation>Nachname:</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="75"/>
<source>Nick:</source>
<translation>Spitzname:</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="88"/>
<source>Email:</source>
<translation>E-mail:</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="147"/>
<source>JID</source>
<translation>JID</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="152"/>
<source>First</source>
<translation>Vorname</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="157"/>
<source>Last</source>
<translation>Nachname</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="162"/>
<source>Nick</source>
<translation>Spitzname</translation>
</message>
<message>
<location filename="../../plugins/jabbersearch/searchdialog.ui" line="167"/>
<source>EMail</source>
<translation>E-Mail</translation>
</message>
</context>
</TS><|fim▁end|>
| |
<|file_name|>commands.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
import urllib3
import urllib
import xml.etree.ElementTree as etree
from superdesk.io.iptc import subject_codes
from datetime import datetime
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, ITEM_STATE, CONTENT_STATE
from superdesk.utc import utc
from superdesk.io.commands.update_ingest import process_iptc_codes
from superdesk.etree import get_text_word_count
# The older content does not contain an anpa category, so we derive it from the
# publication name
pubnames = {
'International Sport': 'S',
'Racing': 'R',
'Parliamentary Press Releases': 'P',
'Features': 'C',
'Financial News': 'F',
'General': 'A',
'aap Features': 'C',
'aap International News': 'I',
'aap Australian Sport': 'S',
'Australian General News': 'A',
'Asia Pulse Full': 'I',
'AFR Summary': 'A',
'Australian Sport': 'T',
'PR Releases': 'J',
'Entertainment News': 'E',
'Special Events': 'Y',
'Asia Pulse': 'I',
'aap International Sport': 'S',
'Emergency Services': 'A',
'BRW Summary': 'A',
'FBM Summary': 'A',
'aap Australian General News': 'A',
'International News': 'I',
'aap Financial News': 'F',
'Asia Pulse Basic': 'I',
'Political News': 'P',
'Advisories': 'V'
}
class AppImportTextArchiveCommand(superdesk.Command):
option_list = (
superdesk.Option('--start', '-strt', dest='start_id', required=False),
superdesk.Option('--user', '-usr', dest='user', required=True),
superdesk.Option('--password', '-pwd', dest='password', required=True),
superdesk.Option('--url_root', '-url', dest='url', required=True),
superdesk.Option('--query', '-qry', dest='query', required=True),
superdesk.Option('--count', '-c', dest='limit', required=False)
)
def run(self, start_id, user, password, url, query, limit):
print('Starting text archive import at {}'.format(start_id))
self._user = user
self._password = password
self._id = int(start_id)
self._url_root = url
self._query = urllib.parse.quote(query)
if limit is not None:
self._limit = int(limit)
else:
self._limit = None
self._api_login()
x = self._get_bunch(self._id)
while x:
self._process_bunch(x)
x = self._get_bunch(self._id)
if self._limit is not None and self._limit <= 0:
break
print('finished text archive import')
def _api_login(self):
self._http = urllib3.PoolManager()
credentials = '?login[username]={}&login[password]={}'.format(self._user, self._password)
url = self._url_root + credentials
r = self._http.urlopen('GET', url, headers={'Content-Type': 'application/xml'})
self._headers = {'cookie': r.getheader('set-cookie')}
self._anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
def _get_bunch(self, id):
url = self._url_root + \
'archives/txtarch?search_docs[struct_query]=(DCDATA_ID<{0})&search_docs[query]='.format(id)
url += self._query
url += '&search_docs[format]=full&search_docs[pagesize]=500&search_docs[page]=1'
url += '&search_docs[sortorder]=DCDATA_ID%20DESC'
print(url)
retries = 3
while retries > 0:
r = self._http.request('GET', url, headers=self._headers)
if r.status == 200:
e = etree.fromstring(r.data)
# print(str(r.data))
count = int(e.find('doc_count').text)
if count > 0:
print('count : {}'.format(count))
return e
else:
self._api_login()
retries -= 1
return None
def _get_head_value(self, doc, field):
el = doc.find('dcdossier/document/head/' + field)
if el is not None:
return el.text
return None
def _addkeywords(self, key, doc, item):
code = self._get_head_value(doc, key)
if code:
if 'keywords' not in item:
item['keywords'] = []
item['keywords'].append(code)
def _process_bunch(self, x):
# x.findall('dc_rest_docs/dc_rest_doc')[0].get('href')
for doc in x.findall('dc_rest_docs/dc_rest_doc'):
print(doc.get('href'))
id = doc.find('dcdossier').get('id')
if int(id) < self._id:
self._id = int(id)
item = {}
item['guid'] = doc.find('dcdossier').get('guid')
# if the item has been modified in the archive then it is due to a kill
# there is an argument that this item should not be imported at all
if doc.find('dcdossier').get('created') != doc.find('dcdossier').get('modified'):
item[ITEM_STATE] = CONTENT_STATE.KILLED
else:
item[ITEM_STATE] = CONTENT_STATE.PUBLISHED
value = datetime.strptime(self._get_head_value(doc, 'PublicationDate'), '%Y%m%d%H%M%S')
item['firstcreated'] = utc.normalize(value) if value.tzinfo else value
item['versioncreated'] = item['firstcreated']
item['unique_id'] = doc.find('dcdossier').get('unique')
item['ingest_id'] = id
item['source'] = self._get_head_value(doc, 'Agency')
self._addkeywords('AsiaPulseCodes', doc, item)
byline = self._get_head_value(doc, 'Byline')
if byline:
item['byline'] = byline
# item['service'] = self._get_head_value(doc,'Service')
category = self._get_head_value(doc, 'Category')
if not category:
publication_name = self._get_head_value(doc, 'PublicationName')
if publication_name in pubnames:
category = pubnames[publication_name]
if category:
anpacategory = {}
anpacategory['qcode'] = category
for anpa_category in self._anpa_categories['items']:
if anpacategory['qcode'].lower() == anpa_category['qcode'].lower():
anpacategory = {'qcode': anpacategory['qcode'], 'name': anpa_category['name']}
break
item['anpa_category'] = [anpacategory]
self._addkeywords('CompanyCodes', doc, item)
type = self._get_head_value(doc, 'Format')
if type == 'x':
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
elif type == 't':
item[ITEM_TYPE] = CONTENT_TYPE.PREFORMATTED
else:
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
<|fim▁hole|> item['keyword'] = self._get_head_value(doc, 'Keyword')
item['ingest_provider_sequence'] = self._get_head_value(doc, 'Sequence')
orginal_source = self._get_head_value(doc, 'Author')
if orginal_source:
item['original_source'] = orginal_source
item['headline'] = self._get_head_value(doc, 'Headline')
code = self._get_head_value(doc, 'SubjectRefNum')
if code and len(code) == 7:
code = '0' + code
if code and code in subject_codes:
item['subject'] = []
item['subject'].append({'qcode': code, 'name': subject_codes[code]})
try:
process_iptc_codes(item, None)
except:
pass
slug = self._get_head_value(doc, 'SLUG')
if slug:
item['slugline'] = slug
else:
item['slugline'] = self._get_head_value(doc, 'Keyword')
# self._addkeywords('Takekey', doc, item)
take_key = self._get_head_value(doc, 'Takekey')
if take_key:
item['anpa_take_key'] = take_key
self._addkeywords('Topic', doc, item)
self._addkeywords('Selectors', doc, item)
el = doc.find('dcdossier/document/body/BodyText')
if el is not None:
story = el.text
if item[ITEM_TYPE] == CONTENT_TYPE.TEXT:
story = story.replace('\n ', '<br><br>')
story = story.replace('\n', '<br>')
item['body_html'] = story
else:
item['body_html'] = story
try:
item['word_count'] = get_text_word_count(item['body_html'])
except:
pass
item['pubstatus'] = 'usable'
item['allow_post_publish_actions'] = False
res = superdesk.get_resource_service('published')
original = res.find_one(req=None, guid=item['guid'])
if not original:
res.post([item])
else:
res.patch(original['_id'], item)
if self._limit:
self._limit -= 1
# print(item)
superdesk.command('app:import_text_archive', AppImportTextArchiveCommand())<|fim▁end|>
| |
<|file_name|>randomUtils.py<|end_file_name|><|fim▁begin|># Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains the random number generating methods used in the framework.
created on 07/15/2017
@author: talbpaul
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import math
import threading
from collections import deque, defaultdict
import numpy as np
from utils.utils import findCrowModule
from utils import mathUtils
# in general, we will use Crow for now, but let's make it easy to switch just in case it is helpful eventually.
# Numpy stochastic environment can not pass the test as this point
stochasticEnv = 'crow'
#stochasticEnv = 'numpy'
class BoxMullerGenerator:
"""
Iterator class for the Box-Muller transform
"""
def __init__(self):
"""
Constructor.
@ In, engine, instance, optional, random number generator
@ Out, None
"""
self.queue = defaultdict(deque)
self.__queueLock = threading.RLock()
def generate(self,engine=None):
"""
Yields a normally-distributed pseudorandom value
@ In, engine, instance, optional, random number generator
@ Out, generate, float, random value
"""
with self.__queueLock:
if len(self.queue[engine]) == 0:
#calculate new values
self.queue[engine].extend(self.createSamples(engine=engine))
val = self.queue[engine].pop()
return val
def createSamples(self,engine=None):
"""
Sample calculator. Because Box Muller does batches of 2, add them to a queue.
@ In, engine, instance, optional, random number generator.
@ Out, (z1,z2), tuple, two independent random values
"""
u1,u2 = random(2,engine=engine)
z1 = np.sqrt(-2.*np.log(u1))*np.cos(2.*np.pi*u2)
z2 = np.sqrt(-2.*np.log(u1))*np.sin(2.*np.pi*u2)
return z1,z2
def testSampling(self, n=1e5,engine=None):
"""
Tests distribution of samples over a large number.
@ In, n, int, optional, number of samples to test with
@ In, engine, instance, optional, random number generator
@ Out, mean, float, mean of sample set
@ Out, stdev, float, standard deviation of sample set
"""
n = int(n)
samples = np.array([self.generate(engine=engine) for _ in range(n)])
mean = np.average(samples)
stdev = np.std(samples)
return mean,stdev
if stochasticEnv == 'numpy':
npStochEnv = np.random.RandomState()
else:
crowStochEnv = findCrowModule('randomENG').RandomClass()
# this is needed for now since we need to split the stoch environments
distStochEnv = findCrowModule('distribution1D').DistributionContainer.instance()
boxMullerGen = BoxMullerGenerator()
#
# Utilities
#
#
def randomSeed(value, seedBoth=False, engine=None):
"""
Function to get a random seed
@ In, value, float, the seed
@ In, engine, instance, optional, random number generator
@ In, seedBoth, bool, optional, if True then seed both random environments
@ Out, None
"""
# we need a flag to tell us if the global numpy stochastic environment is needed to be changed
replaceGlobalEnv=False
## choose an engine if it is none
if engine is None:
if stochasticEnv == 'crow':
distStochEnv.seedRandom(value)
engine=crowStochEnv
elif stochasticEnv == 'numpy':
replaceGlobalEnv=True
global npStochEnv
# global npStochEvn is needed in numpy environment here
# to prevent referenced before assignment in local loop
engine = npStochEnv
if isinstance(engine, np.random.RandomState):
engine = np.random.RandomState(value)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
engine.seed(value)
if seedBoth:
np.random.seed(value+1) # +1 just to prevent identical seed sets
if stochasticEnv== 'numpy' and replaceGlobalEnv:
npStochEnv= engine
if replaceGlobalEnv:
print('randomUtils: Global random number seed has been changed to',value)
def random(dim=1, samples=1, keepMatrix=False, engine=None):
"""
Function to get a single random value, an array of random values, or a matrix of random values, on [0,1]
@ In, dim, int, optional, dimensionality of samples
@ In, samples, int, optional, number of arrays to deliver
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, vals, float, random normal number (or np.array with size [n] if n>1, or np.array with size [n,samples] if sampels>1)
"""
engine = getEngine(engine)
dim = int(dim)
samples = int(samples)
if isinstance(engine, np.random.RandomState):
vals = engine.rand(samples,dim)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
vals = np.zeros([samples, dim])
for i in range(len(vals)):
for j in range(len(vals[0])):
vals[i][j] = engine.random()
# regardless of stoch env
if keepMatrix:
return vals
else:
return _reduceRedundantListing(vals, (samples, dim))<|fim▁hole|>def randomNormal(size=(1,), keepMatrix=False, engine=None):
"""
Function to get a single random value, an array of random values, or a matrix of random values, normally distributed
@ In, size, int or tuple, optional, shape of the samples to return
(if int, an array of samples will be returned if size>1, otherwise a float if keepMatrix is false)
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, vals, float, random normal number (or np.array with size [n] if n>1, or np.array with size [n,samples] if sampels>1)
"""
engine = getEngine(engine)
if isinstance(size, int):
size = (size, )
if isinstance(engine, np.random.RandomState):
vals = engine.randn(*size)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
vals = np.zeros(np.prod(size))
for i in range(len(vals)):
vals[i] = boxMullerGen.generate(engine=engine)
vals.shape = size
if keepMatrix:
return vals
else:
return _reduceRedundantListing(vals,size)
def randomMultivariateNormal(cov, size=1, mean=None):
"""
Provides a random sample from a multivariate distribution.
@ In, cov, np.array, covariance matrix (must be square, positive definite)
@ In, size, int, optional, number of samples to return
@ In, mean, np.array, means for distributions (must be length of 1 side of covar matrix == len(cov[0]))
@ Out, vals, np.array, array of samples with size [n_samples, len(cov[0])]
"""
dims = cov.shape[0]
if mean is None:
mean = np.zeros(dims)
eps = 10 * sys.float_info.epsilon
covEps = cov + eps * np.identity(dims)
decomp = np.linalg.cholesky(covEps)
randSamples = randomNormal(size=(dims, size)).reshape((dims, size))
vals = mean + np.dot(decomp, randSamples)
return vals
def randomIntegers(low, high, caller=None, engine=None):
"""
Function to get a random integer
@ In, low, int, low boundary
@ In, high, int, upper boundary
@ In, caller, instance, optional, object requesting the random integers
@ In, engine, instance, optional, optional, random number generator
@ Out, rawInt, int, random int
"""
engine = getEngine(engine)
if isinstance(engine, np.random.RandomState):
return engine.randint(low, high=high+1)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
intRange = high - low + 1.0
rawNum = low + random(engine=engine)*intRange
rawInt = math.floor(rawNum)
if rawInt < low or rawInt > high:
if caller:
caller.raiseAMessage("Random int out of range")
rawInt = max(low, min(rawInt, high))
return rawInt
else:
raise TypeError('Engine type not recognized! {}'.format(type(engine)))
def randomChoice(array, size = 1, replace = True, engine = None):
"""
Generates a random sample or a sequence of random samples from a given array-like (list or such) or N-D array
This equivalent to np.random.choice but extending the functionality to N-D arrays
@ In, array, list or np.ndarray, the array from which to pick
@ In, size, int, optional, the number of samples to return
@ In, replace, bool, optional, allows replacement if True, default is True
@ In, engine, instance, optional, optional, random number generator
@ Out, selected, object, the random choice (1 element) or a list of elements
"""
assert(hasattr(array,"shape") or isinstance(array,list))
if not replace:
if hasattr(array,"shape"):
raise RuntimeError("Option with replace False not available for ndarrays")
if len(array) < size:
raise RuntimeError("array size < of number of requested samples (size)")
sel = []
coords = array
for _ in range(size):
if hasattr(array,"shape"):
coord = tuple([randomIntegers(0, dim-1, engine=engine) for dim in coords.shape])
sel.append(coords[coord])
else:
sel.append(coords[randomIntegers(0, len(coords)-1, engine=engine)])
if not replace:
coords.remove(sel[-1])
selected = sel[0] if size == 1 else sel
return selected
def randomPermutation(l,caller,engine=None):
"""
Function to get a random permutation
@ In, l, list, list to be permuted
@ In, caller, instance, the caller
@ In, engine, instance, optional, random number generator
@ Out, newList, list, randomly permuted list
"""
engine = getEngine(engine)
if isinstance(engine, np.random.RandomState):
return engine.permutation(l)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
newList = []
oldList = l[:]
while len(oldList) > 0:
newList.append(oldList.pop(randomIntegers(0,len(oldList)-1,caller,engine=engine)))
return newList
def randPointsOnHypersphere(dim,samples=1,r=1,keepMatrix=False,engine=None):
"""
obtains random points on the surface of a hypersphere of dimension "n" with radius "r".
see http://www.sciencedirect.com/science/article/pii/S0047259X10001211
"On decompositional algorithms for uniform sampling from n-spheres and n-balls", Harman and Lacko, 2010, J. Multivariate Analysis
@ In, dim, int, the dimensionality of the hypersphere
@ In, samples, int, optional, the number of samples desired
@ In, r, float, optional, the radius of the hypersphere
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, pts, np.array(np.array(float)), random points on the surface of the hypersphere [sample#][pt]
"""
engine=getEngine(engine)
## first fill random samples
pts = randomNormal(size=(samples, dim),keepMatrix=True,engine=engine)
## extend radius, place inside sphere through normalization
rnorm = float(r)/np.linalg.norm(pts,axis=1)
pts *= rnorm[:,np.newaxis]
#TODO if all values in any given sample are 0,
# this produces an unphysical result, so we should resample;
# however, this probability is miniscule and the speed benefits of skipping checking loop seems worth it.
if keepMatrix:
return pts
else:
return _reduceRedundantListing(pts,(samples, dim))
return pts
def randPointsInHypersphere(dim,samples=1,r=1,keepMatrix=False,engine=None):
"""
obtains a random point internal to a hypersphere of dimension "n" with radius "r"
see http://www.sciencedirect.com/science/article/pii/S0047259X10001211
"On decompositional algorithms for uniform sampling from n-spheres and n-balls", Harman and Lacko, 2010, J. Multivariate Analysis
@ In, dim, int, the dimensionality of the hypersphere
@ In, r, float, the radius of the hypersphere
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, pt, np.array(float), a random point on the surface of the hypersphere
"""
engine=getEngine(engine)
#sample on surface of n+2-sphere and discard the last two dimensions
pts = randPointsOnHypersphere(dim+2,samples=samples,r=r,keepMatrix=True,engine=engine)[:,:-2]
if keepMatrix:
return pts
else:
return _reduceRedundantListing(pts,(samples, dim))
return pts
def newRNG(env=None):
"""
Provides a new instance of the random number generator.
@ In, env, string, optional, type of random number generator. Defaults to global option stored in "stochasticEnv".
@ Out, engine, object, RNG producer
"""
if env is None:
env = stochasticEnv
if env == 'crow':
engine = findCrowModule('randomENG').RandomClass()
elif env == 'numpy':
engine = np.random.RandomState()
return engine
### internal utilities ###
def _reduceRedundantListing(data,size):
"""
Adjusts data to be intuitive for developers.
- if np.prod(size) => dim = samples = 1: returns a float
- if size[1,...,n] > 1 but size[0] (samples) = 1: returns a 1D numpy array of floats
- otherwise: returns a numpy array indexed by the original shape
@ In, data, numpy.array, n-dimensional array indexed by [sample, :, ...,n]
@ In, dim, int, dimensionality of each sample
@ In, samples, int, number of samples taken
@ Out, data, np.array, shape and size described above in method description.
"""
if np.prod(size) == 1: #user expects single float
return data.flatten()[0]
elif size[0]==1: #user expects array of floats (or matrix)
return data[0]
else:
return data
def getEngine(eng):
"""
Choose an engine if it is none and raise error if engine type not recognized
@ In, engine, instance, random number generator
@ Out, engine, instance, random number generator
"""
if eng is None:
if stochasticEnv == 'numpy':
eng = npStochEnv
elif stochasticEnv == 'crow':
eng = crowStochEnv
if not isinstance(eng, np.random.RandomState) and not isinstance(eng, findCrowModule('randomENG').RandomClass):
raise TypeError('Engine type not recognized! {}'.format(type(eng)))
return eng
def randomPerpendicularVector(vector):
"""
Finds a random vector perpendicular to the given vector
Uses definition of dot product orthogonality:
0 = sum_i (p_i * g_i)
p_i = rand() forall i != n
p_n = -1/g_n * sum_i(p_i * g_i) forall i != n
@ In, vector, np.array, ND vector
@ Out, perp, np.array, perpendicular vector
"""
# sanity check
numNonZero = np.count_nonzero(vector)
if not numNonZero:
raise RuntimeError('Provided vector is the zero vector!')
N = len(vector)
indices = np.arange(N)
nonZeroMap = vector != 0
# choose a random NONZERO index to be dependent (don't divide by zero, mate)
depIndex = indices[nonZeroMap][randomIntegers(0, numNonZero - 1, None)]
# random values for all but chosen variable
perp = randomNormal(N)
# cheat some math, zero out the random index term by setting the perp value to 0
perp[depIndex] = 0
dotProd = np.dot(vector, perp)
perp[depIndex] = - dotProd / vector[depIndex]
return perp<|fim▁end|>
| |
<|file_name|>vis_eye_video_overlay.py<|end_file_name|><|fim▁begin|>'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import sys, os, platform
from glob import glob
import cv2
import numpy as np
from file_methods import Persistent_Dict
from pyglui import ui
from player_methods import transparent_image_overlay
from plugin import Plugin
from copy import copy
# helpers/utils
from version_utils import VersionFormat
#capture
from video_capture import EndofVideoFileError,FileSeekError,FileCaptureError,File_Source
#mouse
from glfw import glfwGetCursorPos,glfwGetWindowSize,glfwGetCurrentContext
from methods import normalize,denormalize
#logging
import logging
logger = logging.getLogger(__name__)
def get_past_timestamp(idx,timestamps):
"""
recursive function to find the most recent valid timestamp in the past
"""
if idx == 0:
# if at the beginning, we can't go back in time.
return get_future_timestamp(idx,timestamps)
if timestamps[idx]:
res = timestamps[idx][-1]
return res
else:
return get_past_timestamp(idx-1,timestamps)
def get_future_timestamp(idx,timestamps):
"""
recursive function to find most recent valid timestamp in the future
"""
if idx == len(timestamps)-1:
# if at the end, we can't go further into the future.
return get_past_timestamp(idx,timestamps)
elif timestamps[idx]:
return timestamps[idx][0]
else:
idx = min(len(timestamps),idx+1)
return get_future_timestamp(idx,timestamps)
def get_nearest_timestamp(past_timestamp,future_timestamp,world_timestamp):
dt_past = abs(past_timestamp-world_timestamp)
dt_future = abs(future_timestamp-world_timestamp) # abs prob not necessary here, but just for sanity
if dt_past < dt_future:
return past_timestamp
else:
return future_timestamp
def correlate_eye_world(eye_timestamps,world_timestamps):
"""
This function takes a list of eye timestamps and world timestamps
and correlates one eye frame per world frame
Returns a mapping that correlates a single eye frame index with each world frame index.
Up and downsampling is used to achieve this mapping.
"""
# return framewise mapping as a list
e_ts = eye_timestamps
w_ts = list(world_timestamps)
eye_frames_by_timestamp = dict(zip(e_ts,range(len(e_ts))))
eye_timestamps_by_world_index = [[] for i in world_timestamps]
frame_idx = 0
try:
current_e_ts = e_ts.pop(0)
except:
logger.warning("No eye timestamps found.")
return eye_timestamps_by_world_index
while e_ts:
# if the current eye timestamp is before the mean of the current world frame timestamp and the next worldframe timestamp
try:
t_between_frames = ( w_ts[frame_idx]+w_ts[frame_idx+1] ) / 2.
except IndexError:
break
if current_e_ts <= t_between_frames:
eye_timestamps_by_world_index[frame_idx].append(current_e_ts)
current_e_ts = e_ts.pop(0)
else:
frame_idx+=1
idx = 0
eye_world_frame_map = []
# some entiries in the `eye_timestamps_by_world_index` might be empty -- no correlated eye timestamp
# so we will either show the previous frame or next frame - whichever is temporally closest
for candidate,world_ts in zip(eye_timestamps_by_world_index,w_ts):
# if there is no candidate, then assign it to the closest timestamp
if not candidate:
# get most recent timestamp, either in the past or future
e_past_ts = get_past_timestamp(idx,eye_timestamps_by_world_index)
e_future_ts = get_future_timestamp(idx,eye_timestamps_by_world_index)
eye_world_frame_map.append(eye_frames_by_timestamp[get_nearest_timestamp(e_past_ts,e_future_ts,world_ts)])
else:
# TODO - if there is a list of len > 1 - then we should check which is the temporally closest timestamp
eye_world_frame_map.append(eye_frames_by_timestamp[eye_timestamps_by_world_index[idx][-1]])
idx += 1
return eye_world_frame_map
class Vis_Eye_Video_Overlay(Plugin):
"""docstring This plugin allows the user to overlay the eye recording on the recording of his field of vision
Features: flip video across horiz/vert axes, click and drag around interface, scale video size from 20% to 100%,
show only 1 or 2 or both eyes
features updated by Andrew June 2015
"""
def __init__(self,g_pool,alpha=0.6,eye_scale_factor=.5,move_around=0,mirror={'0':False,'1':False}, flip={'0':False,'1':False},pos=[(640,10),(10,10)]):
super().__init__(g_pool)
self.order = .6
self.menu = None
# user controls
self.alpha = alpha #opacity level of eyes
self.eye_scale_factor = eye_scale_factor #scale
self.showeyes = 0,1 #modes: any text containg both means both eye is present, on 'only eye1' if only one eye recording
self.move_around = move_around #boolean whether allow to move clip around screen or not
self.video_size = [0,0] #video_size of recording (bc scaling)
#variables specific to each eye
self.eye_frames = []
self.eye_world_frame_map = []
self.eye_cap = []
self.mirror = mirror #do we horiz flip first eye
self.flip = flip #do we vert flip first eye
self.pos = [list(pos[0]),list(pos[1])] #positions of 2 eyes
self.drag_offset = [None,None]
# load eye videos and eye timestamps
if VersionFormat(self.g_pool.meta_info['Capture Software Version'][1:]) < VersionFormat('0.4'):
eye_video_path = os.path.join(g_pool.rec_dir,'eye.avi'),'None'
eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye_timestamps.npy'),'None'
else:<|fim▁hole|> #try to load eye video and ts for each eye.
for video,ts in zip(eye_video_path,eye_timestamps_path):
try:
self.eye_cap.append(File_Source(self.g_pool,source_path=glob(video)[0],timestamps=np.load(ts)))
except(IndexError,FileCaptureError):
pass
else:
self.eye_frames.append(self.eye_cap[-1].get_frame())
try:
eye_timestamps = list(np.load(ts))
except:
pass
else:
self.eye_world_frame_map.append(correlate_eye_world(eye_timestamps,g_pool.timestamps))
if len(self.eye_cap) == 2:
logger.debug("Loaded binocular eye video data.")
elif len(self.eye_cap) == 1:
logger.debug("Loaded monocular eye video data")
self.showeyes = (0,)
else:
logger.error("Could not load eye video.")
self.alive = False
return
def unset_alive(self):
self.alive = False
def init_gui(self):
# initialize the menu
self.menu = ui.Scrolling_Menu('Eye Video Overlay')
self.update_gui()
self.g_pool.gui.append(self.menu)
def update_gui(self):
self.menu.elements[:] = []
self.menu.append(ui.Button('Close',self.unset_alive))
self.menu.append(ui.Info_Text('Show the eye video overlaid on top of the world video. Eye1 is usually the right eye'))
self.menu.append(ui.Slider('alpha',self,min=0.0,step=0.05,max=1.0,label='Opacity'))
self.menu.append(ui.Slider('eye_scale_factor',self,min=0.2,step=0.1,max=1.0,label='Video Scale'))
self.menu.append(ui.Switch('move_around',self,label="Move Overlay"))
if len(self.eye_cap) == 2:
self.menu.append(ui.Selector('showeyes',self,label='Show',selection=[(0,),(1,),(0,1)],labels= ['eye 1','eye 2','both'],setter=self.set_showeyes))
if 0 in self.showeyes:
self.menu.append(ui.Switch('0',self.mirror,label="Eye 1: Horiz. Flip"))
self.menu.append(ui.Switch('0',self.flip,label="Eye 1: Vert. Flip"))
if 1 in self.showeyes:
self.menu.append(ui.Switch('1',self.mirror,label="Eye 2: Horiz Flip"))
self.menu.append(ui.Switch('1',self.flip,label="Eye 2: Vert Flip"))
def set_showeyes(self,new_mode):
#everytime we choose eye setting (either use eye 1, 2, or both, updates the gui menu to remove certain options from list)
self.showeyes = new_mode
self.update_gui()
def deinit_gui(self):
if self.menu:
self.g_pool.gui.remove(self.menu)
self.menu = None
def update(self,frame,events):
for eye_index in self.showeyes:
requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]
#1. do we need a new frame?
if requested_eye_frame_idx != self.eye_frames[eye_index].index:
# do we need to seek?
if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
# if we just need to seek by one frame, its faster to just read one and and throw it away.
_ = self.eye_cap[eye_index].get_frame()
if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
# only now do I need to seek
self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
# reading the new eye frame frame
try:
self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
except EndofVideoFileError:
logger.warning("Reached the end of the eye video for eye video {}.".format(eye_index))
else:
#our old frame is still valid because we are doing upsampling
pass
#2. dragging image
if self.drag_offset[eye_index] is not None:
pos = glfwGetCursorPos(glfwGetCurrentContext())
pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
else:
self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]
#3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))
#4. flipping images, converting to greyscale
eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
eyeimage = cv2.resize(eye_gray,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor)
if self.mirror[str(eye_index)]:
eyeimage = np.fliplr(eyeimage)
if self.flip[str(eye_index)]:
eyeimage = np.flipud(eyeimage)
#5. finally overlay the image
x,y = int(self.pos[eye_index][0]),int(self.pos[eye_index][1])
transparent_image_overlay((x,y),cv2.cvtColor(eyeimage,cv2.COLOR_GRAY2BGR),frame.img,self.alpha)
def on_click(self,pos,button,action):
if self.move_around == 1 and action == 1:
for eye_index in self.showeyes:
if self.pos[eye_index][0] < pos[0] < self.pos[eye_index][0]+self.video_size[0] and self.pos[eye_index][1] < pos[1] < self.pos[eye_index][1] + self.video_size[1]:
self.drag_offset[eye_index] = self.pos[eye_index][0]-pos[0],self.pos[eye_index][1]-pos[1]
return
else:
self.drag_offset = [None,None]
def get_init_dict(self):
return {'alpha':self.alpha,'eye_scale_factor':self.eye_scale_factor,'move_around':self.move_around,'mirror':self.mirror,'flip':self.flip,'pos':self.pos,'move_around':self.move_around}
def cleanup(self):
""" called when the plugin gets terminated.
This happens either voluntarily or forced.
if you have a GUI or glfw window destroy it here.
"""
self.deinit_gui()<|fim▁end|>
|
eye_video_path = os.path.join(g_pool.rec_dir,'eye0.*'),os.path.join(g_pool.rec_dir,'eye1.*')
eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye0_timestamps.npy'),os.path.join(g_pool.rec_dir,'eye1_timestamps.npy')
|
<|file_name|>keras_future.py<|end_file_name|><|fim▁begin|>import keras
import keras.layers
import keras.models
def concatenate(x):
if hasattr(keras.layers, 'Concatenate'):
return keras.layers.Concatenate()(x)
else:
return keras.layers.merge(x, mode='concat')
def add(x):
if hasattr(keras.layers, 'Add'):
return keras.layers.Add()(x)
else:
return keras.layers.merge(x, mode='sum')
<|fim▁hole|>
def Model(input, output, **kwargs):
if int(keras.__version__.split('.')[0]) >= 2:
return keras.models.Model(inputs=input, outputs=output, **kwargs)
else:
return keras.models.Model(input=input, output=output, **kwargs)<|fim▁end|>
| |
<|file_name|>AzureStore.ts<|end_file_name|><|fim▁begin|>/*
Copyright (C) 2017 Cloudbase Solutions SRL<|fim▁hole|>License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import { observable, action } from 'mobx'
import cookie from 'js-cookie'
import AzureSource from '../sources/AzureSource'
import type { Assessment, VmItem, AzureLocation } from '../@types/Assessment'
import type { NetworkMap } from '../@types/Network'
import type { Endpoint } from '../@types/Endpoint'
export type LocalData = {
endpoint: Endpoint,
sourceEndpoint: Endpoint | null,
connectionInfo: any,
resourceGroupName: string,
locationName: string,
assessmentName: string,
groupName: string,
projectName: string,
selectedVmSizes: { [prop: string]: string },
selectedVms: string[],
selectedNetworks: NetworkMap[],
[prop: string]: any,
}
class AzureLocalStorage {
static loadLocalData(assessmentName: string): LocalData | undefined {
const localDataArray: LocalData[] = JSON.parse(localStorage.getItem(`assessments-${cookie.get('projectId') || ''}`) || '[]')
return localDataArray.find(a => a.assessmentName === assessmentName)
}
static setLocalData(data: LocalData) {
const localDataArray: LocalData[] = JSON.parse(localStorage.getItem(`assessments-${cookie.get('projectId') || ''}`) || '[]')
const assessmentIndex = localDataArray.findIndex(a => a.assessmentName === data.assessmentName)
if (assessmentIndex > -1) {
localDataArray.splice(assessmentIndex, 1)
}
localDataArray.push(data)
localStorage.setItem(`assessments-${cookie.get('projectId') || ''}`, JSON.stringify(localDataArray))
}
}
class AzureStore {
@observable authenticating: boolean = false
@observable loadingResourceGroups: boolean = false
@observable assessmentResourceGroups: Assessment['group'][] = []
@observable coriolisResourceGroups: string[] = []
@observable loadingAssessments: boolean = false
@observable loadingAssessmentDetails: boolean = false
@observable assessmentDetails: Assessment | null = null
@observable assessments: Assessment[] = []
@observable loadingAssessedVms: boolean = false
@observable assessedVms: VmItem[] = []
@observable loadingVmSizes: boolean = false
// @observable vmSizes: VmSize[] = []
@observable assessmentsProjectId: string = ''
@observable locations: AzureLocation[] = []
@observable localData: LocalData | null | undefined = null
@observable vmSizes: string[] = []
@action loadLocalData(assessmentName: string): boolean {
this.localData = AzureLocalStorage.loadLocalData(assessmentName)
return Boolean(this.localData)
}
@action setLocalData(data: LocalData) {
const newData = data
newData.selectedVmSizes = data.selectedVmSizes || {}
newData.selectedVms = data.selectedVms || []
newData.selectedNetworks = data.selectedNetworks || []
this.localData = newData
AzureLocalStorage.setLocalData(newData)
}
@action updateResourceGroup(resourceGroupName: string) {
if (!this.localData) {
return
}
this.localData.resourceGroupName = resourceGroupName
AzureLocalStorage.setLocalData(this.localData)
}
@action updateNetworkMap(selectedNetworks: NetworkMap[]) {
if (!this.localData) {
return
}
this.localData.selectedNetworks = selectedNetworks
AzureLocalStorage.setLocalData(this.localData)
}
@action updateSourceEndpoint(sourceEndpoint: Endpoint | null) {
if (!this.localData) {
return
}
this.localData.sourceEndpoint = sourceEndpoint
AzureLocalStorage.setLocalData(this.localData)
}
@action updateSelectedVms(selectedVms: string[]) {
if (!this.localData) {
return
}
this.localData.selectedVms = selectedVms
AzureLocalStorage.setLocalData(this.localData)
}
@action updateVmSize(vmId: string, vmSize: string) {
if (!this.localData) {
return
}
this.localData.selectedVmSizes[vmId] = vmSize
if (this.localData) {
AzureLocalStorage.setLocalData(this.localData)
}
}
@action updateVmSizes(vmSizes: { [prop: string]: string }) {
if (!this.localData) {
return
}
this.localData.selectedVmSizes = vmSizes
AzureLocalStorage.setLocalData(this.localData)
}
@action updateLocation(locationName: string) {
if (!this.localData) {
return
}
this.localData.locationName = locationName
AzureLocalStorage.setLocalData(this.localData)
}
@action updateTargetEndpoint(endpoint: Endpoint) {
if (!this.localData) {
return
}
this.localData.endpoint = endpoint
AzureLocalStorage.setLocalData(this.localData)
}
@action async authenticate(connectionInfo: any): Promise<void> {
this.authenticating = true
try {
await AzureSource.authenticate(connectionInfo)
this.authenticating = false
return Promise.resolve()
} catch (e) {
this.authenticating = false
return Promise.reject()
}
}
@action async getResourceGroups(subscriptionId: string): Promise<void> {
this.loadingResourceGroups = true
try {
const groups = await AzureSource.getResourceGroups(subscriptionId)
this.loadingResourceGroups = false
this.assessmentResourceGroups = groups
} catch (e) {
this.loadingResourceGroups = false
}
}
@action isLoadedForCurrentProject() {
return this.assessmentsProjectId === (cookie.get('projectId') || 'null')
}
@action async getAssessments(
subscriptionId: string,
resourceGroupName: string,
projectId: string,
options?: { backgroundLoading: boolean, skipLog?: boolean },
): Promise<void> {
let cookieProjectId = cookie.get('projectId') || 'null'
if (projectId !== cookieProjectId) {
return Promise.resolve()
}
if (!options || !options.backgroundLoading) {
this.loadingAssessments = true
}
const assessments = await AzureSource
.getAssessments(subscriptionId, resourceGroupName, options && options.skipLog)
this.loadingAssessments = false
cookieProjectId = cookie.get('projectId') || 'null'
if (projectId !== cookieProjectId) {
return Promise.resolve()
}
this.assessmentsProjectId = cookieProjectId
this.assessments = assessments
return Promise.resolve()
}
@action async getAssessmentDetails(info: Assessment): Promise<void> {
this.loadingAssessmentDetails = true
try {
const assessment = await AzureSource.getAssessmentDetails(info)
this.loadingAssessmentDetails = false
this.assessmentDetails = assessment
} catch (e) {
this.loadingAssessmentDetails = false
}
}
@action saveLocations(locations: AzureLocation[]) {
this.locations = locations
}
@action saveResourceGroups(resourceGroups: string[]) {
this.coriolisResourceGroups = resourceGroups
}
@action saveTargetVmSizes(targetVmSizes: string[]) {
this.vmSizes = targetVmSizes
}
@action setLocation(location: string) {
if (!this.localData || this.localData.locationName) {
return
}
this.localData.locationName = location
}
@action clearAssessmentDetails() {
this.assessmentDetails = null
this.assessedVms = []
}
@action async getAssessedVms(info: Assessment): Promise<void> {
this.loadingAssessedVms = true
try {
const vms = await AzureSource.getAssessedVms(info)
this.loadingAssessedVms = false
this.assessedVms = vms
} catch (e) {
this.loadingAssessedVms = false
}
}
// @action getVmSizes(info: Assessment): Promise<void> {
// this.loadingVmSizes = true
// return AzureSource.getVmSizes(info).then((sizes: VmSize[]) => {
// this.loadingVmSizes = false
// this.vmSizes = sizes
// }).catch(() => {
// this.loadingVmSizes = false
// })
// }
@action clearAssessedVms() {
this.assessedVms = []
}
@action clearAssessments() {
this.assessmentResourceGroups = []
this.assessments = []
this.locations = []
this.coriolisResourceGroups = []
}
}
export default new AzureStore()<|fim▁end|>
|
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
|
<|file_name|>resize_gcp.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node
const fs = require('fs');
const Gcp = require('../static/app/js/classes/Gcp');
const argv = process.argv.slice(2);
function die(s){
console.log(s);
process.exit(1);
}
if (argv.length != 2){
die(`Usage: ./resize_gcp.js <path/to/gcp_file.txt> <JSON encoded image-->ratio map>`);
}
const [inputFile, jsonMap] = argv;
if (!fs.existsSync(inputFile)){
die('File does not exist: ' + inputFile);
}
const originalGcp = new Gcp(fs.readFileSync(inputFile, 'utf8'));
try{<|fim▁hole|> const map = JSON.parse(jsonMap);
const newGcp = originalGcp.resize(map, true);
console.log(newGcp.toString());
}catch(e){
die("Not a valid JSON string: " + jsonMap);
}<|fim▁end|>
| |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>import { FormatLong } from '../../../types'
import buildFormatLongFn from '../../../_lib/buildFormatLongFn/index'
const dateFormats = {
full: 'EEEE, do MMMM, y', // CLDR #1787
long: 'do MMMM, y', // CLDR #1788
medium: 'd MMM, y', // CLDR #1789
short: 'dd/MM/yyyy', // CLDR #1790
}
const timeFormats = {
full: 'h:mm:ss a zzzz', // CLDR #1791
long: 'h:mm:ss a z', // CLDR #1792
medium: 'h:mm:ss a', // CLDR #1793<|fim▁hole|> full: "{{date}} 'को' {{time}}", // CLDR #1795
long: "{{date}} 'को' {{time}}", // CLDR #1796
medium: '{{date}}, {{time}}', // CLDR #1797
short: '{{date}}, {{time}}', // CLDR #1798
}
const formatLong: FormatLong = {
date: buildFormatLongFn({
formats: dateFormats,
defaultWidth: 'full',
}),
time: buildFormatLongFn({
formats: timeFormats,
defaultWidth: 'full',
}),
dateTime: buildFormatLongFn({
formats: dateTimeFormats,
defaultWidth: 'full',
}),
}
export default formatLong<|fim▁end|>
|
short: 'h:mm a', // CLDR #1794
}
const dateTimeFormats = {
|
<|file_name|>fontselect.py<|end_file_name|><|fim▁begin|># AnalogClock's font selector for setup dialog
# E. A. Tacao <e.a.tacao |at| estadao.com.br>
# http://j.domaindlx.com/elements28/wxpython/
# 15 Fev 2006, 22:00 GMT-03:00
# Distributed under the wxWidgets license.
import wx
from wx.lib.newevent import NewEvent
from wx.lib.buttons import GenButton
#----------------------------------------------------------------------------
(FontSelectEvent, EVT_FONTSELECT) = NewEvent()
#----------------------------------------------------------------------------
class FontSelect(GenButton):
def __init__(self, parent, size=(75, 21), value=None):
GenButton.__init__(self, parent, wx.ID_ANY, label="Select...",
<|fim▁hole|>
self.parent = parent
self.SetValue(value)
self.parent.Bind(wx.EVT_BUTTON, self.OnClick, self)
def GetValue(self):
return self.value
def SetValue(self, value):
if value is None:
value = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.value = value
def OnClick(self, event):
data = wx.FontData()
data.EnableEffects(False)
font = self.value; font.SetPointSize(10)
data.SetInitialFont(font)
dlg = wx.FontDialog(self, data)
changed = dlg.ShowModal() == wx.ID_OK
if changed:
data = dlg.GetFontData()
self.value = data.GetChosenFont()
self.Refresh()
dlg.Destroy()
if changed:
nevt = FontSelectEvent(id=self.GetId(), obj=self, val=self.value)
wx.PostEvent(self.parent, nevt)
#
##
### eof<|fim▁end|>
|
size=size)
self.SetBezelWidth(1)
|
<|file_name|>solvers.py<|end_file_name|><|fim▁begin|>"""
This module contain solvers for all kinds of equations,
algebraic or transcendental.
"""
import warnings
from collections import defaultdict
from types import GeneratorType
from ..core import (Add, Dummy, E, Equality, Expr, Float, Function, Ge, I,
Integer, Lambda, Mul, Symbol, expand_log, expand_mul,
expand_power_exp, nan, nfloat, pi, preorder_traversal,
sympify)
from ..core.assumptions import check_assumptions
from ..core.compatibility import (default_sort_key, is_sequence, iterable,
ordered)
from ..core.function import AppliedUndef
from ..core.logic import fuzzy_and
from ..core.relational import Relational
from ..functions import (Abs, Max, Min, Piecewise, acos, arg, asin, atan,
atan2, cos, exp, im, log, piecewise_fold, re, sin,
sqrt, tan)
from ..functions.elementary.hyperbolic import HyperbolicFunction
from ..functions.elementary.trigonometric import TrigonometricFunction
from ..logic import false, true
from ..matrices import Matrix, zeros
from ..polys import Poly, RootOf, factor, roots
from ..polys.polyerrors import PolynomialError
from ..simplify import (denom, logcombine, nsimplify, posify, powdenest,
powsimp, simplify)
from ..simplify.fu import TR1
from ..simplify.sqrtdenest import unrad
from ..utilities import filldedent
from ..utilities.iterables import uniq
from .polysys import solve_linear_system, solve_poly_system, solve_surd_system
from .utils import checksol
__all__ = 'solve', 'solve_linear', 'minsolve_linear_system'
def denoms(eq, symbols=None):
"""Return (recursively) set of all denominators that appear in eq
that contain any symbol in iterable ``symbols``; if ``symbols`` is
None (default) then all denominators will be returned.
Examples
========
>>> denoms(x/y)
{y}
>>> denoms(x/(y*z))
{y, z}
>>> denoms(3/x + y/z)
{x, z}
>>> denoms(x/2 + y/z)
{2, z}
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
den = denom(p)
if den == 1:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
rv = []
for d in dens:
free = d.free_symbols
if any(s in free for s in symbols):
rv.append(d)
return set(rv)
def solve(f, *symbols, **flags):
r"""Algebraically solves equation or system of equations.
Parameters
==========
f : Expr, Equality or iterable of above
All expressions are assumed to be equal to 0.
\*symbols : tuple
If none symbols given (empty tuple), free symbols
of expressions will be used.
\*\*flags : dict
A dictionary of following parameters:
check : bool, optional
If False, don't do any testing of solutions. Default is
True, i.e. the solutions are checked and those that doesn't
satisfy given assumptions on symbols solved for or make any
denominator zero - are automatically excluded.
warn : bool, optional
Show a warning if :func:`~diofant.solvers.utils.checksol`
could not conclude. Default is False.
simplify : bool, optional
Enable simplification (default) for all but polynomials of
order 3 or greater before returning them and (if check is
not False) use the general simplify function on the solutions
and the expression obtained when they are substituted into the
function which should be zero.
rational : bool or None, optional
If True, recast Floats as Rational. If None (default),
Floats will be recast as rationals but the answer will be
recast as Floats. If the flag is False then nothing
will be done to the Floats.
cubics, quartics, quintics : bool, optional
Return explicit solutions (with radicals, which can be quite
long) when, respectively, cubic, quartic or quintic expressions
are encountered. Default is True. If False,
:class:`~diofant.polys.rootoftools.RootOf` instances will
be returned instead.
Examples
========
Single equation:
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(x**2 - 1)
[{x: -1}, {x: 1}]
We could restrict solutions by using assumptions:
>>> p = Symbol('p', positive=True)
>>> solve(p**2 - 1)
[{p: 1}]
Several equations:
>>> solve((x + 5*y - 2, -3*x + 6*y - 15))
[{x: -3, y: 1}]
>>> solve((x + 5*y - 2, -3*x + 6*y - z))
[{x: -5*z/21 + 4/7, y: z/21 + 2/7}]
No solution:
>>> solve([x + 3, x - 3])
[]
Notes
=====
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save one from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method.
>>> solve(f(x) - x, f(x))
[{f(x): x}]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[{Derivative(f(x), x): x + f(x)}]
See Also
========
diofant.solvers.recurr.rsolve : solving recurrence equations
diofant.solvers.ode.dsolve : solving differential equations
diofant.solvers.inequalities.reduce_inequalities : solving inequalities
"""
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
ordered_symbols = (symbols and symbols[0] and
(isinstance(symbols[0], (Dummy, Symbol)) or
is_sequence(symbols[0], include=GeneratorType)))
f, symbols = (_sympified_list(w) for w in [f, symbols])
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, Equality):
if 'ImmutableMatrix' in (type(a).__name__ for a in fi.args):
f[i] = fi.lhs - fi.rhs
else:
f[i] = Add(fi.lhs, -fi.rhs, evaluate=False)
elif isinstance(fi, Relational):
raise ValueError(f'Only expressions or equalities supported, got {fi}')
elif isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction),
lambda w: w.rewrite(exp))
# replace min/max:
f[i] = f[i].replace(lambda w: isinstance(w, (Min, Max)),
lambda w: w.rewrite(Piecewise))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = Integer(0)
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# preprocess symbol(s)
###########################################################################
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not (p.is_number or p.is_Add or p.is_Mul) or \
isinstance(p, AppliedUndef):
symbols.add(p)
pot.skip() # don't go any deeper
symbols = list(symbols)
# supply dummy symbols so solve(3) behaves like solve(3, x)
for i in range(len(f) - len(symbols)):
symbols.append(Dummy())
ordered_symbols = False
elif len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
# real/imag handling -----------------------------
w = Dummy('w')
piece = Lambda(w, Piecewise((w, Ge(w, 0)), (-w, True)))
for i, fi in enumerate(f):
# Abs
reps = []
for a in fi.atoms(Abs):
if not a.has(*symbols):
continue
if a.args[0].is_extended_real is None and a.args[0].is_imaginary is not True:
raise NotImplementedError(f'solving {a} when the argument '
'is not real or imaginary.')
reps.append((a, piece(a.args[0]) if a.args[0].is_extended_real else
piece(a.args[0]*I)))
fi = fi.subs(reps)
# arg
_arg = [a for a in fi.atoms(arg) if a.has(*symbols)]
fi = fi.xreplace({a: atan(im(a.args[0])/re(a.args[0])) for a in _arg})
# save changes
f[i] = fi
# see if re(s) or im(s) appear
irf = []
for s in symbols:
if s.is_extended_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in f):
irf.append((s, re(s) + I*im(s)))
if irf:
for s, rhs in irf:
for i, fi in enumerate(f):
f[i] = fi.xreplace({s: rhs})
f.append(s - rhs)
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
# end of real/imag handling -----------------------------
symbols = list(uniq(symbols))
if not ordered_symbols:
# we do this to make the results returned canonical in case f
# contains a system of nonlinear equations; all other cases should
# be unambiguous
symbols = sorted(symbols, key=default_sort_key)
# we can solve for non-symbol entities by replacing them with Dummy symbols
symbols_new = []
symbol_swapped = False
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
else:
symbol_swapped = True
s_new = Dummy(f'X{i:d}')
symbols_new.append(s_new)
if symbol_swapped:
swap_sym = list(zip(symbols, symbols_new))
f = [fi.subs(swap_sym) for fi in f]
symbols = symbols_new
swap_sym = {v: k for k, v in swap_sym}
else:
swap_sym = {}
# this is needed in the next two events
symset = set(symbols)
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pot.skip()
elif (isinstance(p, bool) or not p.args or p in symset or
p.is_Add or p.is_Mul or p.is_Pow or p.is_Function or
isinstance(p, RootOf)) and p.func not in (re, im):
pass
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
pot.skip()
del seen
non_inverts = {d: Dummy() for d in non_inverts}
f = [fi.subs(non_inverts) for fi in f]
non_inverts = [(v, k.subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# piecewise_fold might cancel denominators, so be sure to check them.
piecewise_dens = set()
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
for i, fi in enumerate(f):
if any(e.has(*symbols) for e in fi.atoms(Piecewise)):
piecewise_dens |= denoms(fi, symbols)
f[i] = piecewise_fold(fi)
if all(_ == 0 for _ in f):
return [{}]
#
# try to get a solution
###########################################################################
if bare_f and len(symbols) == 1:
solution = [{symbols[0]: s} for s in _solve(f[0], symbols[0], **flags)]
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
solution = [{k: v.subs(non_inverts) for k, v in s.items()}
for s in solution]
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if symbol_swapped:
symbols = [swap_sym[k] for k in symbols]
if solution:
for i, sol in enumerate(solution):
solution[i] = {swap_sym[k]: v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
def test_assumptions(sol):
return fuzzy_and([check_assumptions(sol[sym], **sym._assumptions)
for sym in sol])
solution = [s for s in solution if test_assumptions(s) is not False]
warn = flags.get('warn', False)
got_None = [s for s in solution if not test_assumptions(s)]
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
can't be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
solution = [s for s in solution if
all(not checksol(den, s, **flags) for den in piecewise_dens)]
#
# done
###########################################################################
# Make sure that a list of solutions is ordered in a canonical way.
solution.sort(key=default_sort_key)
return solution
def _solve(f, symbol, **flags):
"""Return a checked solution for f in terms of one or more of the
symbols. A list (possibly empty) should be returned.
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised.
"""
not_impl_msg = 'No algorithms are implemented to solve equation %s'
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
flags['check'] = checkdens = check = flags.pop('check', True)
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = denoms(f, [symbol])
result = [s for s in result if
all(not checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end; solutions for each
# factor were already checked and simplified
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for n, (expr, cond) in enumerate(f.args):
candidates = _solve(piecewise_fold(expr), symbol, **flags)
for candidate in candidates:
if candidate in result:
continue
try:
v = (cond == true) or cond.subs({symbol: candidate})
except TypeError:
v = False
if v != false:
# Only include solutions that do not match the condition
# of any previous pieces.
matches_other_piece = False
for other_n, (other_expr, other_cond) in enumerate(f.args): # pragma: no branch
if other_n == n:
break
try:
if other_cond.subs({symbol: candidate}) == true:
matches_other_piece = True
break
except TypeError:
pass
if not matches_other_piece:
v = v == true or v.doit()
if isinstance(v, Relational):
v = v.canonical
result.add(Piecewise(
(candidate, v),
(nan, True)
))
check = False
flags['simplify'] = False
else:
# first see if it really depends on symbol and whether there
# is a linear solution
f_num, sol = solve_linear(f, symbol)
if symbol not in f_num.free_symbols:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
poly = Poly(f_num)
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.denominator
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c != 1: # c could be a Float
return b**ee, c.denominator
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = {b for b in bases if b.is_Function}
trig = {_ for _ in funcs if
isinstance(_, TrigonometricFunction)}
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
# don't check the rewritten form --check
# solutions in the un-rewritten form below
flags['check'] = False
result = _solve(newf, symbol, **flags)
flags['check'] = check
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs({f1: t})
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = []
for sol in cv_sols:
sols.append(cv_inv.subs({t: sol}))
result = list(ordered(sols))
if result is False:
msg = f'multiple generators {gens}'
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs({exp(x): y}) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs({exp(x): y}) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(lambda w: w.is_Pow, _expand).subs({u: t})
assert not ftry.has(symbol)
soln = _solve(ftry, t, **flags)
sols = []
for sol in soln:
for i in inv:
sols.append(i.subs({t: sol}))
result = list(ordered(sols))
else:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
poly = Poly(f_num, gens[0], extension=False)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
deg = poly.degree()
flags['tsolve'] = True
solvers = {k: flags.get(k, True) for k in
('cubics', 'quartics', 'quintics')}
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
soln = poly.all_roots()
else:
soln = list(soln)
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered({i.subs({t: s}) for i in iv for s in soln}))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
u = unrad(f_num, symbol)
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = {inv.subs({isym: xi}) for xi in _solve(eq, isym, **flags)}
else:
rv = set(_solve(eq, symbol, **flags))
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = denoms(f, [symbol])
result = [s for s in result if
all(not checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result
def _solve_system(exprs, symbols, **flags):
"""Return a checked solution for list of exprs in terms of one or more
of the symbols. A list of dict's (possibly empty) should be returned.
"""
if len(symbols) != 1 and len(exprs) == 1:
f = exprs[0]
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
# find first successful solution
failed = []
got_s = set()
result = []
for s in symbols:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
polys = []
surds = []
dens = set()
failed = []
result = [{}]
solved_syms = []
algebraic = False
inversions = False
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(denoms(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
if exprs[j] not in (+g, -g):
inversions = True
g = g.as_numer_denom()[0]
poly = g.as_poly(*symbols)
if poly is not None:
polys.append(poly)
elif g.is_algebraic_expr(*symbols):
surds.append(g)
else:
failed.append(g)
if surds:
result = solve_surd_system([_.as_expr() for _ in polys] +
surds, *symbols)
solved_syms = list(set().union(*[set(r) for r in result]))
elif polys and all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary {symbols: values} or None
result = solve_linear_system(matrix, *symbols, **flags)
solved_syms = list(result) if result else []
result = [result] if result else [{}]
elif polys:
result = solve_poly_system(polys, *symbols)
solved_syms = list(set().union(*[set(r) for r in result]))
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = (e.free_symbols - solved_syms) & legal
if sort:
rv = list(rv)
rv.sort(key=default_sort_key)
return rv
solved_syms = set(solved_syms) # set of symbols we have solved for
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
u = Dummy() # used in solution checking
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, {u: eq2}, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
soln = _solve(eq2, s, **flags)
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s in being added in-place
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs({s: sol})
# and add this new solution
rnew[s] = sol
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit: # pragma: no cover
raise NotImplementedError(f'could not solve {eq2}')
else:
result = newresult
assert not any(b in bad_results for b in result)
else:
algebraic = True
default_simplify = bool(failed) # rely on system-solvers to simplify
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and (inversions or not algebraic):
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
return [r for r in result if r]
def solve_linear(f, x):
r"""
Solve equation ``f`` wrt variable ``x``.
Returns
=======
tuple
``(x, solution)``, if there is a linear solution, ``(0, 1)`` if
``f`` is independent of the symbol ``x``, ``(0, 0)`` if solution set
any denominator of ``f`` to zero or ``(numerator, denominator)``
of ``f``, if it's a nonlinear expression wrt ``x``.
Examples
========
>>> solve_linear(1/x - y**2, x)
(x, y**(-2))
>>> solve_linear(x**2/y**2 - 3, x)
(x**2 - 3*y**2, y**2)
>>> solve_linear(y, x)
(0, 1)
>>> solve_linear(1/(1/x - 2), x)
(0, 0)
"""
if not x.is_Symbol:
raise ValueError(f'{x} is not a Symbol')
f = f.replace(lambda e: e.is_Derivative, lambda e: e.doit())
n, d = res = f.as_numer_denom()
poly = n.as_poly(x, extension=False)
if poly is not None and poly.is_linear:
a, b = n.expand().coeff(x, 1), n.expand().coeff(x, 0)
if a != 0 and d.subs({x: -b/a}) != 0:
res = (x, -b/a)
if not n.simplify().has(x):
res = Integer(0), Integer(1)
if x == res[0] and any(checksol(_, {x: res[1]}) for _ in denoms(f, [x])):
res = Integer(0), Integer(0)
return res
def minsolve_linear_system(system, *symbols, **flags):
r"""Find a particular solution to a linear system.
In particular, try to find a solution with the minimal possible number
of non-zero variables. This is a very computationally hard problem.
Parameters
==========
system : Matrix
Nx(M+1) matrix, which means it has to be in augmented form.
\*symbols : list
List of M Symbol’s.
\*\*flags : dict
A dictionary of following parameters:
quick : boolean, optional
If True, a heuristic is used. Otherwise (default) a naive
algorithm with exponential complexity is used.
"""
quick = flags.get('quick', False)
# Check if there are any non-zero solutions at all
s0 = solve_linear_system(system, *symbols, **flags)
if not s0 or all(v == 0 for v in s0.values()):
return s0
if quick:
# We just solve the system and try to heuristically find a nice
# solution.
s = solve_linear_system(system, *symbols)
def update(determined, solution):
delete = []
for k, v in solution.items():
solution[k] = v.subs(determined)
if not solution[k].free_symbols:
delete.append(k)
determined[k] = solution[k]
for k in delete:
del solution[k]
determined = {}
update(determined, s)
while s:
# NOTE sort by default_sort_key to get deterministic result
k = max((k for k in s.values()),
key=lambda x: (len(x.free_symbols), default_sort_key(x)))
x = max(k.free_symbols, key=default_sort_key)
if len(k.free_symbols) != 1:
determined[x] = Integer(0)
else:
val = solve(k)[0][x]
if val == 0 and all(v.subs({x: val}) == 0 for v in s.values()):
determined[x] = Integer(1)
else:
determined[x] = val
update(determined, s)
return determined
else:
# We try to select n variables which we want to be non-zero.
# All others will be assumed zero. We try to solve the modified system.
# If there is a non-trivial solution, just set the free variables to
# one. If we do this for increasing n, trying all combinations of
# variables, we will find an optimal solution.
# We speed up slightly by starting at one less than the number of
# variables the quick method manages.
from itertools import combinations
from ..utilities.misc import debug
N = len(symbols)
bestsol = minsolve_linear_system(system, *symbols, quick=True)
n0 = len([x for x in bestsol.values() if x != 0])
for n in range(n0 - 1, 1, -1):
debug(f'minsolve: {n}')
thissol = None
for nonzeros in combinations(list(range(N)), n):
subm = Matrix([system[:, i].T for i in nonzeros] + [system[:, -1].T]).T
s = solve_linear_system(subm, *[symbols[i] for i in nonzeros])
if s and not all(v == 0 for v in s.values()):
subs = [(symbols[v], Integer(1)) for v in nonzeros]
for k, v in s.items():
s[k] = v.subs(subs)
for sym in symbols:
if sym not in s:
if symbols.index(sym) in nonzeros:
s[sym] = Integer(1)
else:
s[sym] = Integer(0)
thissol = s
break
if thissol is None:
break
bestsol = thissol
return bestsol
# these are functions that have multiple inverse values per period
multi_inverses = {
sin: lambda x: (asin(x), pi - asin(x)),
cos: lambda x: (acos(x), 2*pi - acos(x)),
}
def _tsolve(eq, sym, **flags):
"""
Helper for _solve that solves a transcendental equation with respect
to the given symbol. Various equations containing powers and logarithms,
can be solved.
There is currently no guarantee that all solutions will be returned or
that a real solution will be favored over a complex one.
Either a list of potential solutions will be returned or None will be
returned (in the case that no method was known to get a solution
for the equation). All other errors (like the inability to cast an
expression as a Poly) are unhandled.
Examples
========
>>> _tsolve(3**(2*x + 5) - 4, x)
[-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)]
>>> _tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
from .bivariate import bivariate_type, _solve_lambert, _filtered_gens
if 'tsolve_saw' not in flags:
flags['tsolve_saw'] = []
if eq in flags['tsolve_saw']:
return
else:
flags['tsolve_saw'].append(eq)
rhs, lhs = _invert(eq, sym)
if lhs == sym:
return [rhs]
try:
if lhs.is_Add:
# it's time to try factoring; powdenest is used
# to try get powers in standard form for better factoring
f = factor(powdenest(lhs - rhs))
if f.is_Mul:
return _solve(f, sym, **flags)
if rhs:
f = logcombine(lhs, force=flags.get('force', True))
if f.count(log) != lhs.count(log):
if isinstance(f, log):
return _solve(f.args[0] - exp(rhs), sym, **flags)
else:
raise NotImplementedError
elif lhs.is_Pow:
if lhs.exp.is_Integer and lhs - rhs != eq:
return _solve(lhs - rhs, sym, **flags)
elif sym not in lhs.exp.free_symbols:
return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags)
elif not rhs and sym in lhs.exp.free_symbols:
# f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at
# the same place
sol_base = _solve(lhs.base, sym, **flags)
return list(ordered(set(sol_base) -
set(_solve(lhs.exp, sym, **flags))))
elif (rhs != 0 and
lhs.base.is_positive and
lhs.exp.is_extended_real):
return _solve(lhs.exp*log(lhs.base) - log(rhs), sym, **flags)
elif lhs.base == 0 and rhs == 1:
return _solve(lhs.exp, sym, **flags)
elif lhs.is_Mul and rhs.is_positive:
llhs = expand_log(log(lhs))
if llhs.is_Add:
return _solve(llhs - log(rhs), sym, **flags)
elif lhs.is_Function and len(lhs.args) == 1 and lhs.func in multi_inverses:
# sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3))
soln = []
for i in multi_inverses[lhs.func](rhs):
soln.extend(_solve(lhs.args[0] - i, sym, **flags))
return list(ordered(soln))
rewrite = lhs.rewrite(exp)
if rewrite != lhs:
return _solve(rewrite - rhs, sym, **flags)
except NotImplementedError:
pass
# maybe it is a lambert pattern
if flags.pop('bivariate', True):
# lambert forms may need some help being recognized, e.g. changing
# 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1
# to 2**(3*x) + (x*log(2) + 1)**3
g = _filtered_gens(eq.as_poly(), sym)
up_or_log = set()
for gi in g:
if gi.is_Pow and gi.base is E or isinstance(gi, log):
up_or_log.add(gi)
elif gi.is_Pow:
gisimp = powdenest(expand_power_exp(gi))
if gisimp.is_Pow and sym in gisimp.exp.free_symbols:
up_or_log.add(gi)
eq_down = expand_log(expand_power_exp(eq)).subs(
dict(zip(up_or_log, [0]*len(up_or_log))))
eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down))
rhs, lhs = _invert(eq, sym)
if lhs.has(sym):
try:
poly = lhs.as_poly()
g = _filtered_gens(poly, sym)
return _solve_lambert(lhs - rhs, sym, g)
except NotImplementedError:
# maybe it's a convoluted function
if len(g) == 2:
try:
gpu = bivariate_type(lhs - rhs, *g)
if gpu is None:
raise NotImplementedError
g, p, u = gpu
flags['bivariate'] = False
inversion = _tsolve(g - u, sym, **flags)
if inversion:
sol = _solve(p, u, **flags)
return list(ordered({i.subs({u: s})
for i in inversion for s in sol}))
else:
raise NotImplementedError
except NotImplementedError:
pass
else:
pass
if flags.pop('force', True):
flags['force'] = False
pos, reps = posify(lhs - rhs)
for u, s in reps.items():
if s == sym:
break
else:
u = sym
if pos.has(u):
try:
soln = _solve(pos, u, **flags)
return list(ordered([s.subs(reps) for s in soln]))
except NotImplementedError:
pass
def _invert(eq, *symbols, **kwargs):
"""Return tuple (i, d) where ``i`` is independent of ``symbols`` and ``d``
contains symbols. ``i`` and ``d`` are obtained after recursively using
algebraic inversion until an uninvertible ``d`` remains. If there are no
free symbols then ``d`` will be zero. Some (but not necessarily all)
solutions to the expression ``i - d`` will be related to the solutions of
the original expression.
Examples
========
>>> _invert(x - 3)
(3, x)
>>> _invert(3)
(3, 0)
>>> _invert(2*cos(x) - 1)
(1/2, cos(x))
>>> _invert(sqrt(x) - 3)
(3, sqrt(x))
>>> _invert(sqrt(x) + y, x)
(-y, sqrt(x))
>>> _invert(sqrt(x) + y, y)
(-sqrt(x), y)
>>> _invert(sqrt(x) + y, x, y)
(0, sqrt(x) + y)
If there is more than one symbol in a power's base and the exponent
is not an Integer, then the principal root will be used for the
inversion:
>>> _invert(sqrt(x + y) - 2)
(4, x + y)
>>> _invert(sqrt(x + y) - 2)
(4, x + y)
If the exponent is an integer, setting ``integer_power`` to True
will force the principal root to be selected:
>>> _invert(x**2 - 4, integer_power=True)
(2, x)
"""
eq = sympify(eq)
free = eq.free_symbols
if not symbols:
symbols = free
if not free & set(symbols):
return eq, Integer(0)
dointpow = bool(kwargs.get('integer_power', False))
lhs = eq
rhs = Integer(0)
while True:
was = lhs
while True:
indep, dep = lhs.as_independent(*symbols)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep == 0:
break
lhs = dep
rhs -= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep == 1:
break
lhs = dep
rhs /= indep
# collect like-terms in symbols<|fim▁hole|> terms[d].append(i)
if any(len(v) > 1 for v in terms.values()):
args = []
for d, i in terms.items():
if len(i) > 1:
args.append(Add(*i)*d)
else:
args.append(i[0]*d)
lhs = Add(*args)
# if it's a two-term Add with rhs = 0 and two powers we can get the
# dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3
if lhs.is_Add and not rhs and len(lhs.args) == 2 and \
not lhs.is_polynomial(*symbols):
a, b = ordered(lhs.args)
ai, ad = a.as_independent(*symbols)
bi, bd = b.as_independent(*symbols)
if any(i.is_Pow for i in (ad, bd)):
a_base, a_exp = ad.as_base_exp()
b_base, b_exp = bd.as_base_exp()
if a_base == b_base:
# a = -b
lhs = powsimp(powdenest(ad/bd))
rhs = -bi/ai
else:
rat = ad/bd
_lhs = powsimp(ad/bd)
if _lhs != rat:
lhs = _lhs
rhs = -bi/ai
if ai*bi == -1:
if all(
isinstance(i, Function) for i in (ad, bd)) and \
ad.func == bd.func and len(ad.args) == len(bd.args):
if len(ad.args) == 1:
lhs = ad.args[0] - bd.args[0]
else:
# should be able to solve
# f(x, y) == f(2, 3) -> x == 2
# f(x, x + y) == f(2, 3) -> x == 2 or x == 3 - y
raise NotImplementedError('equal function with more than 1 argument')
elif lhs.is_Mul and any(a.is_Pow for a in lhs.args):
lhs = powsimp(powdenest(lhs))
if lhs.is_Function:
if hasattr(lhs, 'inverse') and len(lhs.args) == 1:
# -1
# f(x) = g -> x = f (g)
#
# /!\ inverse should not be defined if there are multiple values
# for the function -- these are handled in _tsolve
#
rhs = lhs.inverse()(rhs)
lhs = lhs.args[0]
elif isinstance(lhs, atan2):
y, x = lhs.args
lhs = 2*atan(y/(sqrt(x**2 + y**2) + x))
if lhs.is_Pow and lhs.base is E:
rhs = log(rhs)
lhs = lhs.exp
if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0:
lhs = 1/lhs
rhs = 1/rhs
# base**a = b -> base = b**(1/a) if
# a is an Integer and dointpow=True (this gives real branch of root)
# a is not an Integer and the equation is multivariate and the
# base has more than 1 symbol in it
# The rationale for this is that right now the multi-system solvers
# doesn't try to resolve generators to see, for example, if the whole
# system is written in terms of sqrt(x + y) so it will just fail, so we
# do that step here.
if lhs.is_Pow and (
lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and
len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1):
rhs = rhs**(1/lhs.exp)
lhs = lhs.base
if lhs == was:
break
return rhs, lhs<|fim▁end|>
|
if lhs.is_Add:
terms = defaultdict(list)
for a in lhs.args:
i, d = a.as_independent(*symbols)
|
<|file_name|>random.cpp<|end_file_name|><|fim▁begin|>/*************************************************************************
> File Name: random.cpp
> Author: qiaoyihan
> Email: yihqiao@126
> Created Time: Sun May 15 11:20:00 2016
************************************************************************/
#include<iostream>
#include<string>
#include<cstdlib>
#include<ctime>
using std::cout;
using std::endl;
using std::string;
int main()<|fim▁hole|> std::srand(std::time(0)); // use current time as seed for random generator
cout << std::rand() % 3 << endl;
return 0;
}<|fim▁end|>
|
{
|
<|file_name|>security.go<|end_file_name|><|fim▁begin|>package security
import (
"crypto/md5"
"fmt"
"golang.org/x/crypto/blowfish"
"log"
"bytes"
"crypto/rand"
"crypto/rsa"
"os"
"encoding/gob"
"encoding/pem"
"crypto/x509"
"time"
"math/big"
"crypto/x509/pkix"
)
func Md5Hash(str string) {
hash := md5.New()
bts := []byte(str + "\n")
hash.Write(bts)
hashVal := hash.Sum(nil)
hashSize := hash.Size()
for n := 0; n < hashSize; n += 4 {
var val uint32
val = uint32(hashVal[n])<<24 +
uint32(hashVal[n]+1)<<16 +
uint32(hashVal[n+2])<<8 +
uint32(hashVal[n+3])
fmt.Printf("%x", val)
}
fmt.Println()
}
//need to set right buffer sizes
//looks like it can't decrypt strings with len>8
func BlowFish(str string) {
key := []byte("super secret and long key")
cipher, err := blowfish.NewCipher(key)
if err != nil {
log.Fatal(err)
}
src := []byte(str + "\n\n\n\n\n\n\n")
var enc [256]byte
cipher.Encrypt(enc[0:], src)
fmt.Println("Encoded", enc)
var decrypt [8] byte
cipher.Decrypt(decrypt[0:], enc[0:])
result := bytes.NewBuffer(nil)
result.Write(decrypt[0:8])
fmt.Println(string(result.Bytes()))
}
func GenRsaKey() {
reader := rand.Reader
bitSize := 512
key, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
log.Fatal(err)
}
fmt.Println("Private key primes", key.Primes[0].String(), key.Primes[1].String())
fmt.Println("Private key exponent", key.D.String())
publicKey := key.PublicKey
fmt.Println("Public key modulus", publicKey.N.String())
fmt.Println("Public key exponent", publicKey.E)
saveGobKey("private.key", key)
saveGobKey("public.key", publicKey)
savePemKey("private.pem", key)
}
func LoadRsaKey() {
var key rsa.PrivateKey
loadKey("private.key", &key)
fmt.Println("Private key primes", key.Primes[0].String(), key.Primes[1].String())
fmt.Println("Private key exponent", key.D.String())
var publicKey rsa.PublicKey
loadKey("public.key", &publicKey)
fmt.Println("Public key modulus", publicKey.N.String())<|fim▁hole|>
func GenX509() {
random := rand.Reader
var key rsa.PrivateKey
loadKey("private.key", &key)
now := time.Now()
then := now.Add(60 * 60 * 24 * 365 * 1000 * 1000 * 1000)
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "borscht.com",
Organization: []string{"Borscht Systems AG"},
},
NotBefore: now,
NotAfter: then,
SubjectKeyId: []byte{1, 2, 3, 4},
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
BasicConstraintsValid: true,
IsCA: true,
DNSNames: []string{"borscht.com", "localhost"},
}
derBytes, err := x509.CreateCertificate(random, &template, &template, &key.PublicKey, &key)
if err != nil {
log.Fatal(err)
}
certCerFile, err := os.Create("certificate.cer")
if err != nil {
log.Fatal(err)
}
certCerFile.Write(derBytes)
certCerFile.Close()
certPemFile, err := os.Create("certificate.pem")
if err != nil {
log.Fatal(err)
}
pem.Encode(certPemFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certPemFile.Close()
keyPemFile, err := os.Create("private.pem")
if err != nil {
log.Fatal(err)
}
pem.Encode(keyPemFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(&key)})
keyPemFile.Close()
}
func LoadX509() {
certCerFile, err := os.Open("certificate.cer")
if err != nil {
log.Fatal(err)
}
derBytes := make([]byte, 1000)
count, err := certCerFile.Read(derBytes)
if err != nil {
log.Fatal(err)
}
certCerFile.Close()
// trim the bytes to actual length in call
cert, err := x509.ParseCertificate(derBytes[0:count])
if err != nil {
log.Fatal(err)
}
fmt.Printf("Name %s\n", cert.Subject.CommonName)
fmt.Printf("Not before %s\n", cert.NotBefore.String())
fmt.Printf("Not after %s\n", cert.NotAfter.String())
}
func saveGobKey(fileName string, key interface{}) {
outFile, err := os.Create(fileName)
if err != nil {
log.Fatal(err)
}
encoder := gob.NewEncoder(outFile)
err = encoder.Encode(key)
if err != nil {
log.Fatal(err)
}
outFile.Close()
}
func savePemKey(fileName string, key *rsa.PrivateKey) {
outFile, err := os.Create(fileName)
if err != nil {
log.Fatal(err)
}
var privateKey = &pem.Block{Type: "RSA Private Key", Bytes: x509.MarshalPKCS1PrivateKey(key)}
pem.Encode(outFile, privateKey)
outFile.Close()
}
func loadKey(fileName string, key interface{}) {
inFile, err := os.Open(fileName)
if err != nil {
log.Fatal(err)
}
decoder := gob.NewDecoder(inFile)
err = decoder.Decode(key)
if err != nil {
log.Fatal(err)
}
inFile.Close()
}<|fim▁end|>
|
fmt.Println("Public key exponent", publicKey.E)
}
|
<|file_name|>xqueue.py<|end_file_name|><|fim▁begin|>"""
Fixture to configure XQueue response.
"""
import json
import requests
from common.test.acceptance.fixtures import XQUEUE_STUB_URL
class XQueueResponseFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class XQueueResponseFixture(object):
"""
Configure the XQueue stub's response to submissions.
"""
def __init__(self, pattern, response_dict):
"""
Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submission that contains the string
`pattern`.
Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel.
"""
self._pattern = pattern
self._response_dict = response_dict
def install(self):
"""
Configure the stub via HTTP.
"""
url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue
payload = {self._pattern: json.dumps(self._response_dict)}
response = requests.put(url, data=payload)
<|fim▁hole|> raise XQueueResponseFixtureError(
u"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._pattern, self._response_dict))<|fim▁end|>
|
if not response.ok:
|
<|file_name|>DevelopmentLoaderConfig.java<|end_file_name|><|fim▁begin|>/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.server;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import io.airlift.configuration.Config;
import io.airlift.resolver.ArtifactResolver;
import javax.validation.constraints.NotNull;
import java.util.List;
public class DevelopmentLoaderConfig
{
private static final Splitter SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults();
private List<String> plugins = ImmutableList.of();
private String mavenLocalRepository = ArtifactResolver.USER_LOCAL_REPO;
private List<String> mavenRemoteRepository = ImmutableList.of(ArtifactResolver.MAVEN_CENTRAL_URI);
public List<String> getPlugins()
{
return plugins;
}<|fim▁hole|> this.plugins = ImmutableList.copyOf(plugins);
return this;
}
@Config("plugin.bundles")
public DevelopmentLoaderConfig setPlugins(String plugins)
{
this.plugins = SPLITTER.splitToList(plugins);
return this;
}
@NotNull
public String getMavenLocalRepository()
{
return mavenLocalRepository;
}
@Config("maven.repo.local")
public DevelopmentLoaderConfig setMavenLocalRepository(String mavenLocalRepository)
{
this.mavenLocalRepository = mavenLocalRepository;
return this;
}
@NotNull
public List<String> getMavenRemoteRepository()
{
return mavenRemoteRepository;
}
public DevelopmentLoaderConfig setMavenRemoteRepository(List<String> mavenRemoteRepository)
{
this.mavenRemoteRepository = mavenRemoteRepository;
return this;
}
@Config("maven.repo.remote")
public DevelopmentLoaderConfig setMavenRemoteRepository(String mavenRemoteRepository)
{
this.mavenRemoteRepository = ImmutableList.copyOf(Splitter.on(',').omitEmptyStrings().trimResults().split(mavenRemoteRepository));
return this;
}
}<|fim▁end|>
|
public DevelopmentLoaderConfig setPlugins(List<String> plugins)
{
|
<|file_name|>PlayerController.py<|end_file_name|><|fim▁begin|>import json
from urllib.parse import unquote
from gi.repository import Totem
from Controllers.BaseController import BaseController
from OpenSubtitleHasher import *
class PlayerController(BaseController):
def __init__(self, server, request):
BaseController.__init__(self, server, request)
self.player = self.server.player
self.mrl = self.player.get_current_mrl()
def actionStatus(self):
self.dumpPlayerStatus()
def actionVolume(self, volume):
self.player.set_volume(float(volume))
self.dumpPlayerStatus()<|fim▁hole|> self.player.remote_command(Totem.RemoteCommand.SEEK_FORWARD, self.mrl)
self.dumpPlayerStatus()
def actionBackward(self):
self.player.remote_command(Totem.RemoteCommand.SEEK_BACKWARD, self.mrl)
self.dumpPlayerStatus()
def actionSeek(self, time):
self.player.seek_time(int(time), True)
self.dumpPlayerStatus()
def actionToggleFullscreen(self):
self.player.remote_command(Totem.RemoteCommand.FULLSCREEN, self.mrl)
self.dumpPlayerStatus()
def actionNext(self):
self.player.seek_next()
self.dumpPlayerStatus()
def actionPrevious(self):
self.player.seek_previous()
self.dumpPlayerStatus()
def actionPlayPause(self):
self.player.play_pause()
self.dumpPlayerStatus()
def actionStop(self):
self.player.stop()
self.dumpPlayerStatus()
def actionRemote(self, command):
command = Totem.RemoteCommand.__dict__[command]
self.player.remote_command(command, self.mrl)
self.dumpPlayerStatus()
def actionHash(self):
if self.mrl == None:
self.request.send_response(404);
self.request.send_header('Content-Type', 'text/plain')
self.request.end_headers()
self.request.wfile.write(bytes('No movie found', 'UTF-8'))
return
filename = unquote(self.mrl[7:]);
filehash = getOpenSubtitleHash(filename)
self.request.send_response(200);
self.request.send_header('Content-Type', 'text/plain')
self.request.end_headers()
self.request.wfile.write(bytes(filehash, 'UTF-8'))
def dumpPlayerStatus(self):
self.request.send_response(200);
self.request.send_header('Content-Type', 'application/json')
self.request.end_headers()
self.request.wfile.write(bytes(json.dumps(self.getPlayerStatus(), indent=4), 'UTF-8'))
def getPlayerStatus(self):
status = {
"current_time": self.player.props.current_time,
"stream_length": self.player.props.stream_length,
"state": self.player.props.playing,
"volume": self.player.get_volume(),
"display_name": self.player.props.current_display_name,
"fullscreen": self.player.props.fullscreen,
"mrl": self.player.props.current_mrl,
"content_type": self.player.props.current_content_type
}
return status<|fim▁end|>
|
def actionForward(self):
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.admin import site as admin_site
class Language(models.Model):
"""
Language model
"""
name = models.CharField(max_length=64)
code = models.CharField(max_length=6, db_column='shortcut')
visible = models.BooleanField(default=True)<|fim▁hole|> return "%s - %s" % (self.code, self.name)
admin_site.register(Language)<|fim▁end|>
|
def __str__(self):
|
<|file_name|>chat.ts<|end_file_name|><|fim▁begin|>// Copyright (c) ppy Pty Ltd <[email protected]>. Licensed under the GNU Affero General Public License v3.0.
// See the LICENCE file in the repository root for full licence text.
import { PresenceJSON, SendToJSON } from 'chat/chat-api-responses';
import MainView from 'chat/main-view';
import * as _ from 'lodash';
import Channel from 'models/chat/channel';
import core from 'osu-core-singleton';
const dataStore = core.dataStore;
const presence: PresenceJSON = osu.parseJson('json-presence');
if (!_.isEmpty(presence)) {
// initial population of channel/presence data
dataStore.channelStore.updatePresence(presence);
}
reactTurbolinks.register('chat', MainView, () => {
let initialChannel: number | undefined;
const sendTo: SendToJSON = osu.parseJson('json-sendto');
if (!_.isEmpty(sendTo)) {
const target = dataStore.userStore.getOrCreate(sendTo.target.id, sendTo.target); // pre-populate userStore with target
let channel = dataStore.channelStore.findPM(target.id);
if (channel) {
initialChannel = channel.channelId;
} else if (!target.is(core.currentUser)) {
channel = Channel.newPM(target);
channel.moderated = !sendTo.can_message; // TODO: move can_message to a user prop?
dataStore.channelStore.channels.set(channel.channelId, channel);
dataStore.channelStore.loaded = true;
initialChannel = channel.channelId;
}
} else if (dataStore.channelStore.loaded) {
const hasNonPmChannels = dataStore.channelStore.nonPmChannels.length > 0;
const hasPmChannels = dataStore.channelStore.pmChannels.length > 0;
if (hasNonPmChannels) {
initialChannel = dataStore.channelStore.nonPmChannels[0].channelId;<|fim▁hole|>
return {
dataStore: core.dataStore,
initialChannel,
worker: core.chatWorker,
};
});<|fim▁end|>
|
} else if (hasPmChannels) {
initialChannel = dataStore.channelStore.pmChannels[0].channelId;
}
}
|
<|file_name|>cpu.rs<|end_file_name|><|fim▁begin|>use void::Void;
use io::{self, Write};
pub use cpu::*;
use arch::keyboard::Keyboard;
static DEFAULT_KEYBOARD: Keyboard = Keyboard {
callback: ::put_char,
control_port: Port(0x64),
data_port: Port(0x60),
};
pub unsafe fn init() {
set_gdt(&*GDT);
// Reload segment registers after lgdt
set_cs(SegmentSelector::new(1, PrivilegeLevel::Ring0));
let ds = SegmentSelector::new(2, PrivilegeLevel::Ring0);
set_ds(ds);
set_es(ds);
set_fs(ds);
set_gs(ds);
set_ss(ds);
PIC::master().remap_to(0x20);
PIC::slave().remap_to(0x28);
set_idt(&*IDT);
}
fn acknowledge_irq(_: u32) {
PIC::master().control_port.out8(0x20); //TODO(ryan) ugly and only for master PIC
}
pub unsafe fn test_interrupt() {
asm!("int 0x15" :::: "volatile", "intel");
}
macro_rules! make_handler {
($num:expr, $name:ident, $body:expr) => {{
fn body () {
$body
}
#[naked]
unsafe extern "C" fn $name () {
asm!(concat!(
"push esp", "\n\t",
"mov ebp, esp", "\n\t",
"pusha", "\n\t",
"call $0", "\n\t",
"popa", "\n\t",
"leave", "\n\t",
"iretd", "\n\t")
:: "s" (body as fn()) :: "volatile", "intel");
}
IdtEntry::new($name, PrivilegeLevel::Ring0, true)
}};
($num:expr, $name:ident, EX, $title:expr) => {
make_handler!($num, $name, {
panic!("Exception {:#04x}: {}", $num, $title)
})
};
($num:expr, $name:ident) => {
make_handler!($num, $name, {
panic!("interrupt with no handler: {:#04x}", $num)
})
}
}
// TODO should be real statics
lazy_static! {
static ref GDT: [GdtEntry; 3] = {[
GdtEntry::NULL,
GdtEntry::new(0 as *const (),
0xFFFFFFFF,
GdtAccess::Executable | GdtAccess::NotTss,
PrivilegeLevel::Ring0),
GdtEntry::new(0 as *const (),
0xFFFFFFFF,
GdtAccess::Writable | GdtAccess::NotTss,
PrivilegeLevel::Ring0),
//gdt.add_entry( = {.base=&myTss, .limit=sizeof(myTss), .type=0x89}; // You can use LTR(0x18)
]};
static ref IDT: [IdtEntry; 256] = {[
make_handler!(0x00, interrupt_handler_0x00, EX, "Divide by zero"),
make_handler!(0x01, interrupt_handler_0x01, EX, "Debug"),
make_handler!(0x02, interrupt_handler_0x02, EX, "Non-maskable Interrupt"),
make_handler!(0x03, interrupt_handler_0x03, EX, "Breakpoint"),
make_handler!(0x04, interrupt_handler_0x04, EX, "Overflow"),
make_handler!(0x05, interrupt_handler_0x05, EX, "Bound Range Exceeded"),
make_handler!(0x06, interrupt_handler_0x06, EX, "Invalid Opcode"),
make_handler!(0x07, interrupt_handler_0x07, EX, "Device Not Available"),
make_handler!(0x08, interrupt_handler_0x08, EX, "Double Fault"),
make_handler!(0x09, interrupt_handler_0x09),
make_handler!(0x0a, interrupt_handler_0x0a, EX, "Invalid TSS"),
make_handler!(0x0b, interrupt_handler_0x0b, EX, "Segment Not Present"),
make_handler!(0x0c, interrupt_handler_0x0c, EX, "Stack-Segment Fault"),
make_handler!(0x0d, interrupt_handler_0x0d, EX, "General Protection Fault"),
make_handler!(0x0e, interrupt_handler_0x0e, EX, "Page Fault"),
make_handler!(0x0f, interrupt_handler_0x0f, EX, "x87 Floating Point Exception"),
make_handler!(0x10, interrupt_handler_0x10),
make_handler!(0x11, interrupt_handler_0x11),
make_handler!(0x12, interrupt_handler_0x12),
make_handler!(0x13, interrupt_handler_0x13),
make_handler!(0x14, interrupt_handler_0x14),
make_handler!(0x15, interrupt_handler_0x15, {
debug!("In test interrupt handler");
}),
make_handler!(0x16, interrupt_handler_0x16),
make_handler!(0x17, interrupt_handler_0x17),
make_handler!(0x18, interrupt_handler_0x18),
make_handler!(0x19, interrupt_handler_0x19),
make_handler!(0x1a, interrupt_handler_0x1a),
make_handler!(0x1b, interrupt_handler_0x1b),
make_handler!(0x1c, interrupt_handler_0x1c),
make_handler!(0x1d, interrupt_handler_0x1d),
make_handler!(0x1e, interrupt_handler_0x1e),
make_handler!(0x1f, interrupt_handler_0x1f),
make_handler!(0x20, interrupt_handler_0x20, {
// Timer, just ignore
}),
make_handler!(0x21, interrupt_handler_0x21, {
DEFAULT_KEYBOARD.got_interrupted();
acknowledge_irq(0x21);
}),
make_handler!(0x22, interrupt_handler_0x22),
make_handler!(0x23, interrupt_handler_0x23),
make_handler!(0x24, interrupt_handler_0x24),
make_handler!(0x25, interrupt_handler_0x25),
make_handler!(0x26, interrupt_handler_0x26),
make_handler!(0x27, interrupt_handler_0x27),
make_handler!(0x28, interrupt_handler_0x28),
make_handler!(0x29, interrupt_handler_0x29),
make_handler!(0x2a, interrupt_handler_0x2a),
make_handler!(0x2b, interrupt_handler_0x2b),
make_handler!(0x2c, interrupt_handler_0x2c),
make_handler!(0x2d, interrupt_handler_0x2d),
make_handler!(0x2e, interrupt_handler_0x2e),
make_handler!(0x2f, interrupt_handler_0x2f),
make_handler!(0x30, interrupt_handler_0x30),
make_handler!(0x31, interrupt_handler_0x31),
make_handler!(0x32, interrupt_handler_0x32),
make_handler!(0x33, interrupt_handler_0x33),
make_handler!(0x34, interrupt_handler_0x34),
make_handler!(0x35, interrupt_handler_0x35),
make_handler!(0x36, interrupt_handler_0x36),
make_handler!(0x37, interrupt_handler_0x37),
make_handler!(0x38, interrupt_handler_0x38),
make_handler!(0x39, interrupt_handler_0x39),
make_handler!(0x3a, interrupt_handler_0x3a),
make_handler!(0x3b, interrupt_handler_0x3b),
make_handler!(0x3c, interrupt_handler_0x3c),
make_handler!(0x3d, interrupt_handler_0x3d),
make_handler!(0x3e, interrupt_handler_0x3e),
make_handler!(0x3f, interrupt_handler_0x3f),
make_handler!(0x40, interrupt_handler_0x40),
make_handler!(0x41, interrupt_handler_0x41),
make_handler!(0x42, interrupt_handler_0x42),
make_handler!(0x43, interrupt_handler_0x43),
make_handler!(0x44, interrupt_handler_0x44),
make_handler!(0x45, interrupt_handler_0x45),
make_handler!(0x46, interrupt_handler_0x46),
make_handler!(0x47, interrupt_handler_0x47),
make_handler!(0x48, interrupt_handler_0x48),
make_handler!(0x49, interrupt_handler_0x49),
make_handler!(0x4a, interrupt_handler_0x4a),
make_handler!(0x4b, interrupt_handler_0x4b),
make_handler!(0x4c, interrupt_handler_0x4c),
make_handler!(0x4d, interrupt_handler_0x4d),
make_handler!(0x4e, interrupt_handler_0x4e),
make_handler!(0x4f, interrupt_handler_0x4f),
make_handler!(0x50, interrupt_handler_0x50),
make_handler!(0x51, interrupt_handler_0x51),
make_handler!(0x52, interrupt_handler_0x52),
make_handler!(0x53, interrupt_handler_0x53),
make_handler!(0x54, interrupt_handler_0x54),
make_handler!(0x55, interrupt_handler_0x55),
make_handler!(0x56, interrupt_handler_0x56),
make_handler!(0x57, interrupt_handler_0x57),
make_handler!(0x58, interrupt_handler_0x58),
make_handler!(0x59, interrupt_handler_0x59),
make_handler!(0x5a, interrupt_handler_0x5a),
make_handler!(0x5b, interrupt_handler_0x5b),
make_handler!(0x5c, interrupt_handler_0x5c),
make_handler!(0x5d, interrupt_handler_0x5d),
make_handler!(0x5e, interrupt_handler_0x5e),
make_handler!(0x5f, interrupt_handler_0x5f),
make_handler!(0x60, interrupt_handler_0x60),
make_handler!(0x61, interrupt_handler_0x61),
make_handler!(0x62, interrupt_handler_0x62),
make_handler!(0x63, interrupt_handler_0x63),
make_handler!(0x64, interrupt_handler_0x64),
make_handler!(0x65, interrupt_handler_0x65),
make_handler!(0x66, interrupt_handler_0x66),
make_handler!(0x67, interrupt_handler_0x67),
make_handler!(0x68, interrupt_handler_0x68),
make_handler!(0x69, interrupt_handler_0x69),
make_handler!(0x6a, interrupt_handler_0x6a),
make_handler!(0x6b, interrupt_handler_0x6b),
make_handler!(0x6c, interrupt_handler_0x6c),
make_handler!(0x6d, interrupt_handler_0x6d),
make_handler!(0x6e, interrupt_handler_0x6e),
make_handler!(0x6f, interrupt_handler_0x6f),
make_handler!(0x70, interrupt_handler_0x70),
make_handler!(0x71, interrupt_handler_0x71),
make_handler!(0x72, interrupt_handler_0x72),
make_handler!(0x73, interrupt_handler_0x73),
make_handler!(0x74, interrupt_handler_0x74),
make_handler!(0x75, interrupt_handler_0x75),
make_handler!(0x76, interrupt_handler_0x76),
make_handler!(0x77, interrupt_handler_0x77),
make_handler!(0x78, interrupt_handler_0x78),
make_handler!(0x79, interrupt_handler_0x79),
make_handler!(0x7a, interrupt_handler_0x7a),
make_handler!(0x7b, interrupt_handler_0x7b),
make_handler!(0x7c, interrupt_handler_0x7c),
make_handler!(0x7d, interrupt_handler_0x7d),
make_handler!(0x7e, interrupt_handler_0x7e),
make_handler!(0x7f, interrupt_handler_0x7f),
make_handler!(0x80, interrupt_handler_0x80),
make_handler!(0x81, interrupt_handler_0x81),
make_handler!(0x82, interrupt_handler_0x82),
make_handler!(0x83, interrupt_handler_0x83),
make_handler!(0x84, interrupt_handler_0x84),
make_handler!(0x85, interrupt_handler_0x85),
make_handler!(0x86, interrupt_handler_0x86),
make_handler!(0x87, interrupt_handler_0x87),
make_handler!(0x88, interrupt_handler_0x88),
make_handler!(0x89, interrupt_handler_0x89),
make_handler!(0x8a, interrupt_handler_0x8a),
make_handler!(0x8b, interrupt_handler_0x8b),
make_handler!(0x8c, interrupt_handler_0x8c),
make_handler!(0x8d, interrupt_handler_0x8d),
make_handler!(0x8e, interrupt_handler_0x8e),
make_handler!(0x8f, interrupt_handler_0x8f),
make_handler!(0x90, interrupt_handler_0x90),
make_handler!(0x91, interrupt_handler_0x91),
make_handler!(0x92, interrupt_handler_0x92),
make_handler!(0x93, interrupt_handler_0x93),
make_handler!(0x94, interrupt_handler_0x94),
make_handler!(0x95, interrupt_handler_0x95),
make_handler!(0x96, interrupt_handler_0x96),
make_handler!(0x97, interrupt_handler_0x97),
make_handler!(0x98, interrupt_handler_0x98),
make_handler!(0x99, interrupt_handler_0x99),
make_handler!(0x9a, interrupt_handler_0x9a),
make_handler!(0x9b, interrupt_handler_0x9b),
make_handler!(0x9c, interrupt_handler_0x9c),
make_handler!(0x9d, interrupt_handler_0x9d),
make_handler!(0x9e, interrupt_handler_0x9e),
make_handler!(0x9f, interrupt_handler_0x9f),
make_handler!(0xa0, interrupt_handler_0xa0),
make_handler!(0xa1, interrupt_handler_0xa1),
make_handler!(0xa2, interrupt_handler_0xa2),
make_handler!(0xa3, interrupt_handler_0xa3),
make_handler!(0xa4, interrupt_handler_0xa4),
make_handler!(0xa5, interrupt_handler_0xa5),
make_handler!(0xa6, interrupt_handler_0xa6),
make_handler!(0xa7, interrupt_handler_0xa7),
make_handler!(0xa8, interrupt_handler_0xa8),
make_handler!(0xa9, interrupt_handler_0xa9),
make_handler!(0xaa, interrupt_handler_0xaa),
make_handler!(0xab, interrupt_handler_0xab),
make_handler!(0xac, interrupt_handler_0xac),
make_handler!(0xad, interrupt_handler_0xad),
make_handler!(0xae, interrupt_handler_0xae),
make_handler!(0xaf, interrupt_handler_0xaf),
make_handler!(0xb0, interrupt_handler_0xb0),
make_handler!(0xb1, interrupt_handler_0xb1),
make_handler!(0xb2, interrupt_handler_0xb2),
make_handler!(0xb3, interrupt_handler_0xb3),
make_handler!(0xb4, interrupt_handler_0xb4),
make_handler!(0xb5, interrupt_handler_0xb5),
make_handler!(0xb6, interrupt_handler_0xb6),
make_handler!(0xb7, interrupt_handler_0xb7),
make_handler!(0xb8, interrupt_handler_0xb8),
make_handler!(0xb9, interrupt_handler_0xb9),
make_handler!(0xba, interrupt_handler_0xba),
make_handler!(0xbb, interrupt_handler_0xbb),
make_handler!(0xbc, interrupt_handler_0xbc),
make_handler!(0xbd, interrupt_handler_0xbd),
make_handler!(0xbe, interrupt_handler_0xbe),
make_handler!(0xbf, interrupt_handler_0xbf),
make_handler!(0xc0, interrupt_handler_0xc0),
make_handler!(0xc1, interrupt_handler_0xc1),
make_handler!(0xc2, interrupt_handler_0xc2),
make_handler!(0xc3, interrupt_handler_0xc3),
make_handler!(0xc4, interrupt_handler_0xc4),
make_handler!(0xc5, interrupt_handler_0xc5),
make_handler!(0xc6, interrupt_handler_0xc6),
make_handler!(0xc7, interrupt_handler_0xc7),
make_handler!(0xc8, interrupt_handler_0xc8),
make_handler!(0xc9, interrupt_handler_0xc9),
make_handler!(0xca, interrupt_handler_0xca),
make_handler!(0xcb, interrupt_handler_0xcb),
make_handler!(0xcc, interrupt_handler_0xcc),
make_handler!(0xcd, interrupt_handler_0xcd),
make_handler!(0xce, interrupt_handler_0xce),
make_handler!(0xcf, interrupt_handler_0xcf),
make_handler!(0xd0, interrupt_handler_0xd0),
make_handler!(0xd1, interrupt_handler_0xd1),
make_handler!(0xd2, interrupt_handler_0xd2),
make_handler!(0xd3, interrupt_handler_0xd3),
make_handler!(0xd4, interrupt_handler_0xd4),
make_handler!(0xd5, interrupt_handler_0xd5),
make_handler!(0xd6, interrupt_handler_0xd6),
make_handler!(0xd7, interrupt_handler_0xd7),
make_handler!(0xd8, interrupt_handler_0xd8),
make_handler!(0xd9, interrupt_handler_0xd9),
make_handler!(0xda, interrupt_handler_0xda),
make_handler!(0xdb, interrupt_handler_0xdb),
make_handler!(0xdc, interrupt_handler_0xdc),
make_handler!(0xdd, interrupt_handler_0xdd),
make_handler!(0xde, interrupt_handler_0xde),
make_handler!(0xdf, interrupt_handler_0xdf),
make_handler!(0xe0, interrupt_handler_0xe0),
make_handler!(0xe1, interrupt_handler_0xe1),
make_handler!(0xe2, interrupt_handler_0xe2),
make_handler!(0xe3, interrupt_handler_0xe3),
make_handler!(0xe4, interrupt_handler_0xe4),
make_handler!(0xe5, interrupt_handler_0xe5),
make_handler!(0xe6, interrupt_handler_0xe6),
make_handler!(0xe7, interrupt_handler_0xe7),
make_handler!(0xe8, interrupt_handler_0xe8),
make_handler!(0xe9, interrupt_handler_0xe9),
make_handler!(0xea, interrupt_handler_0xea),
make_handler!(0xeb, interrupt_handler_0xeb),
make_handler!(0xec, interrupt_handler_0xec),
make_handler!(0xed, interrupt_handler_0xed),
make_handler!(0xee, interrupt_handler_0xee),
make_handler!(0xef, interrupt_handler_0xef),
make_handler!(0xf0, interrupt_handler_0xf0),
make_handler!(0xf1, interrupt_handler_0xf1),
make_handler!(0xf2, interrupt_handler_0xf2),
make_handler!(0xf3, interrupt_handler_0xf3),
make_handler!(0xf4, interrupt_handler_0xf4),
make_handler!(0xf5, interrupt_handler_0xf5),
make_handler!(0xf6, interrupt_handler_0xf6),
make_handler!(0xf7, interrupt_handler_0xf7),
make_handler!(0xf8, interrupt_handler_0xf8),
make_handler!(0xf9, interrupt_handler_0xf9),
make_handler!(0xfa, interrupt_handler_0xfa),
make_handler!(0xfb, interrupt_handler_0xfb),
make_handler!(0xfc, interrupt_handler_0xfc),
make_handler!(0xfd, interrupt_handler_0xfd),
make_handler!(0xfe, interrupt_handler_0xfe),
make_handler!(0xff, interrupt_handler_0xff),
]};
}
struct PIC {
control_port: Port,
mask_port: Port,
is_master: bool
}
impl PIC {
fn master() -> PIC {
PIC { control_port: Port::new(0x20), mask_port: Port::new(0x21), is_master: true}
}
fn slave() -> PIC {
PIC { control_port: Port::new(0xA0), mask_port: Port::new(0xA1), is_master: false}
}
unsafe fn remap_to(&mut self, start: u8) {
let icw1 = 0x11;
let icw4 = 0x1;
let enable_all = 0x00;
let typ = if self.is_master { 0x2 } else { 0x4 };
self.control_port.out8(icw1);
self.mask_port.write(&[start, typ, icw4, enable_all]).ok();
}
}
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Debug)]
pub struct Port(u16);
impl Port {
pub const fn new(number: u16) -> Port {
Port(number)
}
pub fn in8(self) -> u8 {
unsafe { ::cpu::in8(self.0) }
}
pub fn out8(self, num: u8) {
unsafe { ::cpu::out8(self.0, num) }
}
pub fn in16(self) -> u16 {
unsafe { ::cpu::in16(self.0) }
}
pub fn out16(self, num: u16) {
unsafe { ::cpu::out16(self.0, num) }
}
pub fn in32(self) -> u32 {
unsafe { ::cpu::in32(self.0) }
}
pub fn out32(self, num: u32) {
unsafe { ::cpu::out32(self.0, num) }
}
pub fn io_wait() {
Port::new(0x80).out8(0);
}
}
impl io::Read for Port
{
type Err = Void;<|fim▁hole|> fn read(&mut self, buf: &mut [u8]) -> Result<usize, Void> {
Ok(match *buf {
[] => 0,
[ref mut a, _..] => {
*a = self.in8();
1
}
})
}
fn read_all<E>(&mut self, buf: &mut [u8]) -> Result<(), E> {
for el in buf.iter_mut() {
*el = self.in8();
}
Ok(())
}
}
impl io::Write for Port
{
type Err = Void;
fn write(&mut self, buf: &[u8]) -> Result<usize, Void> {
Ok(match *buf {
[] => 0,
[a, _..] => {
self.out8(a);
1
}
})
}
fn write_all<E>(&mut self, buf: &[u8]) -> Result<(), E> {
for el in buf.iter() {
self.out8(*el);
}
Ok(())
}
}<|fim▁end|>
| |
<|file_name|>instance_nexus.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ScriptMgr.h"
#include "InstanceScript.h"
#include "nexus.h"
#include "Player.h"
#define NUMBER_OF_ENCOUNTERS 4
enum Factions
{
FACTION_HOSTILE_FOR_ALL = 16
};
class instance_nexus : public InstanceMapScript
{
public:
instance_nexus() : InstanceMapScript("instance_nexus", 576) { }
InstanceScript* GetInstanceScript(InstanceMap* map) const override
{
return new instance_nexus_InstanceMapScript(map);
}
struct instance_nexus_InstanceMapScript : public InstanceScript
{
instance_nexus_InstanceMapScript(Map* map) : InstanceScript(map)
{
SetHeaders(DataHeader);
memset(&m_auiEncounter, 0, sizeof(m_auiEncounter));
}
uint32 m_auiEncounter[NUMBER_OF_ENCOUNTERS];
ObjectGuid Anomalus;
ObjectGuid Keristrasza;
ObjectGuid AnomalusContainmentSphere;
ObjectGuid OrmoroksContainmentSphere;
ObjectGuid TelestrasContainmentSphere;
std::string strInstData;
void OnCreatureCreate(Creature* creature) override
{
Map::PlayerList const &players = instance->GetPlayers();
uint32 TeamInInstance = 0;
if (!players.isEmpty())
{
if (Player* player = players.begin()->GetSource())
TeamInInstance = player->GetTeam();
}
switch (creature->GetEntry())
{
case 26763:
Anomalus = creature->GetGUID();
break;
case 26723:
Keristrasza = creature->GetGUID();
break;
// Alliance npcs are spawned by default, if you are alliance, you will fight against horde npcs.
case 26800:
{
if (ServerAllowsTwoSideGroups())
creature->setFaction(FACTION_HOSTILE_FOR_ALL);
if (TeamInInstance == ALLIANCE)
creature->UpdateEntry(26799);
break;
}
case 26802:
{
if (ServerAllowsTwoSideGroups())
creature->setFaction(FACTION_HOSTILE_FOR_ALL);
if (TeamInInstance == ALLIANCE)
creature->UpdateEntry(26801);
break;
}
case 26805:
{
if (ServerAllowsTwoSideGroups())
creature->setFaction(FACTION_HOSTILE_FOR_ALL);
if (TeamInInstance == ALLIANCE)
creature->UpdateEntry(26803);
break;
}
case 27949:
{
if (ServerAllowsTwoSideGroups())
creature->setFaction(FACTION_HOSTILE_FOR_ALL);
if (TeamInInstance == ALLIANCE)
creature->UpdateEntry(27947);
break;
}
case 26796:
{
if (ServerAllowsTwoSideGroups())
creature->setFaction(FACTION_HOSTILE_FOR_ALL);
if (TeamInInstance == ALLIANCE)
creature->UpdateEntry(26798);
break;
}
}
}
void OnGameObjectCreate(GameObject* go) override
{
switch (go->GetEntry())
{
case 188527:
{
AnomalusContainmentSphere = go->GetGUID();
if (m_auiEncounter[1] == DONE)
go->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
break;
}
case 188528:
{
OrmoroksContainmentSphere = go->GetGUID();
if (m_auiEncounter[2] == DONE)
go->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
break;
}
case 188526:
{
TelestrasContainmentSphere = go->GetGUID();
if (m_auiEncounter[0] == DONE)
go->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
break;
}
}
}
uint32 GetData(uint32 identifier) const override
{
switch (identifier)
{
case DATA_MAGUS_TELESTRA_EVENT: return m_auiEncounter[0];
case DATA_ANOMALUS_EVENT: return m_auiEncounter[1];
case DATA_ORMOROK_EVENT: return m_auiEncounter[2];
case DATA_KERISTRASZA_EVENT: return m_auiEncounter[3];
}
return 0;
}
void SetData(uint32 identifier, uint32 data) override
{
switch (identifier)
{
case DATA_MAGUS_TELESTRA_EVENT:
{
if (data == DONE)
{
GameObject* Sphere = instance->GetGameObject(TelestrasContainmentSphere);
if (Sphere)
Sphere->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
}
m_auiEncounter[0] = data;
break;
}
case DATA_ANOMALUS_EVENT:
{
if (data == DONE)
{
if (GameObject* Sphere = instance->GetGameObject(AnomalusContainmentSphere))
Sphere->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
}
m_auiEncounter[1] = data;
break;
}
case DATA_ORMOROK_EVENT:
{
if (data == DONE)
{
if (GameObject* Sphere = instance->GetGameObject(OrmoroksContainmentSphere))
Sphere->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
}
m_auiEncounter[2] = data;
break;
}
case DATA_KERISTRASZA_EVENT:
m_auiEncounter[3] = data;
break;
}
if (data == DONE)
{
OUT_SAVE_INST_DATA;
std::ostringstream saveStream;
saveStream << m_auiEncounter[0] << ' ' << m_auiEncounter[1] << ' ' << m_auiEncounter[2] << ' '
<< m_auiEncounter[3];
strInstData = saveStream.str();
SaveToDB();
OUT_SAVE_INST_DATA_COMPLETE;
}
}
ObjectGuid GetGuidData(uint32 uiIdentifier) const override
{
switch (uiIdentifier)
{
case DATA_ANOMALUS: return Anomalus;
case DATA_KERISTRASZA: return Keristrasza;
case ANOMALUS_CONTAINMET_SPHERE: return AnomalusContainmentSphere;
case ORMOROKS_CONTAINMET_SPHERE: return OrmoroksContainmentSphere;
case TELESTRAS_CONTAINMET_SPHERE: return TelestrasContainmentSphere;
}
return ObjectGuid::Empty;
}
std::string GetSaveData() override
{
return strInstData;
}
<|fim▁hole|> {
OUT_LOAD_INST_DATA_FAIL;
return;
}
OUT_LOAD_INST_DATA(chrIn);
std::istringstream loadStream(chrIn);
loadStream >> m_auiEncounter[0] >> m_auiEncounter[1] >> m_auiEncounter[2] >> m_auiEncounter[3];
for (uint8 i = 0; i < NUMBER_OF_ENCOUNTERS; ++i)
if (m_auiEncounter[i] == IN_PROGRESS)
m_auiEncounter[i] = NOT_STARTED;
OUT_LOAD_INST_DATA_COMPLETE;
}
};
};
void AddSC_instance_nexus()
{
new instance_nexus();
}<|fim▁end|>
|
void Load(const char *chrIn)
{
if (!chrIn)
|
<|file_name|>parallel.rs<|end_file_name|><|fim▁begin|>//! Functions for parallel processing of record sets and records.
//!
//! Sequences are read and processed in batches (`RecordSet`) because sending
//! data across channels has a performance impact. The process works as follows:
//!
//! * Sequence parsing is done in a background thread
//! * Record sets are sent to worker threads, where expensive operations take
//! place (e.g. sequence analysis).
//! * The results are sent to the main thread along with the record sets.
//! * The record sets are recycled by sending them back to the background
//! reader.
//!
//! # Per-record processsing
//!
//! The easiest to use are the functions, which operate directly on sequence
//! records without having to deal with record sets:
//!
//! * [`read_process_fasta_records`](read_process_fasta_records)
//! * [`read_process_fastq_records`](read_process_fastq_records)
//! * [`read_process_fastx_records`](read_process_fastx_records)
//!
//! They are specific for the given sequence format, but it is possible to
//! generate functions for other types using the
//! [`parallel_record_impl`](parallel_record_impl) macro.
//!
//! ## Example
//!
//! This example filters sequences by the occurrence of a pattern:
//!
//! ```no_run
//! use seq_io::prelude::*;
//! use seq_io::fastq::{Reader,Record};
//! use seq_io::parallel::read_process_fastq_records;
//! use std::fs::File;
//! use std::io::BufWriter;
//!
//! let reader = Reader::from_path("seqs.fastq").unwrap();
//! let mut writer = BufWriter::new(File::create("filtered.fastq").unwrap());
//!
//! read_process_fastq_records(reader, 4, 2,
//! |record, found| { // runs in worker
//! *found = record.seq().windows(3).position(|s| s == b"AAA").is_some();
//! },
//! |record, found| { // runs in main thread
//! if *found {
//! record.write(&mut writer).unwrap();
//! }
//! // Some(value) will stop the reader, and the value will be returned.
//! // In the case of never stopping, we need to give the compiler a hint about the
//! // type parameter, thus the special 'turbofish' notation is needed.
//! None::<()>
//! }).unwrap();
//! ```
//!
//! # Record set processing
//!
//! It is still possible to directly work with record sets using the following
//! generic functions:
//!
//! * [`read_process_recordsets`](read_process_recordsets)
//! * [`read_process_recordsets_init`](read_process_recordsets_init)
//!
//! ## Example
//!
//! This example searches for the first occurrence of a sequence pattern and
//! then stops the parser.
//!
//! ```no_run
//! use seq_io::prelude::*;
//! use seq_io::fastq;
//! use seq_io::parallel::read_process_recordsets;
//!
//! let reader = fastq::Reader::from_path("seqs.fastq").unwrap();
//!
//! read_process_recordsets(reader, 4, 2,
//! |record_set, position| {
//! // This function does the heavy work.
//! // The code is not necessarily very efficient, just for demonstration.
//! for (i, record) in record_set.into_iter().enumerate() {
//! if let Some(pos) = record.seq().windows(3).position(|s| s == b"AAA") {
//! *position = Some((i, pos));
//! }
//! }
//! *position = None;
//! }, |mut record_sets| {
//! // This function runs in the main thread. It provides a streaming iterator over
//! // record sets and the corresponding return values from the worker function
//! // (not necessarily in the same order as in the file)
//! while let Some(result) = record_sets.next() {
//! let (record_set, position) = result?;
//! if let Some(&(i, pos)) = position.as_ref() {
//! let record = record_set.into_iter().nth(i).unwrap();
//! println!("Found AAA in record {} at position {}", record.id().unwrap(), pos);
//! return Ok(());
//! }
//! }
//! // Here, we need to give the compiler a type hint about the returned
//! // result, since it is not smart enough to infer it.
//! // In real-world programs, this may be less of an issue because the
//! // returned result type is often known.
//! Ok::<_, fastq::Error>(())
//! }
//! ).expect("FASTQ reading error");
//! ```
use crate::core::{QualRecordPosition, SeqRecordPosition};
use crate::{fasta, fastq, fastx};
use std::sync::mpsc;
/// A simple trait required to be implemented for readers fed into the
/// functions in this module.
pub trait RecordSetReader {
type RecordSet: Send;
type Err: Send;
fn fill_data(&mut self, record: &mut Self::RecordSet) -> Result<bool, Self::Err>;
}
/// This function reads record sets and processes them in parallel threads.
///
/// * It takes a [`RecordSetReader`](RecordSetReader), which reads data into
/// record sets in a background thread.
/// * These are then sent to `n_workers` worker threads, where the heavy work
/// is done in the `work` closure.
/// * Once ready, the record sets and work results are sent to the main thread
/// and provided to the `func` closure. The won't necessarily arrive in the
/// same order as they were read.
pub fn read_process_recordsets<R, W, F, O, Out>(
reader: R,
n_workers: u32,
queue_len: usize,
work: W,
func: F,
) -> Out
where
R: RecordSetReader + Send,
R::RecordSet: Default + Send,
O: Default + Send,
W: Send + Sync + Fn(&mut R::RecordSet, &mut O),
F: FnOnce(ParallelDataSets<R::RecordSet, R::Err, O>) -> Out,
{
read_process_recordsets_init(|| Ok::<_, ()>(reader), n_workers, queue_len, work, func).unwrap()
}
/// Like [`read_process_recordsets`](read_process_recordsets), but additionally
/// allows initiating the reader in the background thread using a closure
/// (`reader_init`).
/// This is useful for readers, which don't implement `Send`.
/// The `reader_init` closure has to return a result. Errors are returned from
/// the main function witout being mixed with reading errors. This may lead to
/// nested `Result` being returned if the `func` closure returns `Result`.
pub fn read_process_recordsets_init<R, Ri, Ei, W, F, O, Out>(
reader_init: Ri,
n_workers: u32,
queue_len: usize,
work: W,
func: F,
) -> Result<Out, Ei>
where
R: RecordSetReader,
Ri: Send + FnOnce() -> Result<R, Ei>,
R::RecordSet: Default + Send,
O: Default + Send,
W: Send + Sync + Fn(&mut R::RecordSet, &mut O),
F: FnOnce(ParallelDataSets<R::RecordSet, R::Err, O>) -> Out,
Ei: Send,
{
let (done_send, done_recv) = mpsc::sync_channel(queue_len);
let (empty_send, empty_recv) = mpsc::sync_channel(queue_len);
crossbeam::scope(|scope| {
let handle = scope.spawn::<_, Result<(), Ei>>(move |_| {
let mut reader = reader_init()?;
let mut pool = scoped_threadpool::Pool::new(n_workers);
pool.scoped(|pool_scope| {
let work = &work;
loop {
// recycle an old RecordSet sent back after use
let (mut data, mut out) = if let Ok(r) = empty_recv.recv() {
r
} else {
// ParallelDataSets dropped -> stop
return;
};
let done_send = done_send.clone();
match reader.fill_data(&mut data) {
Ok(has_data) => {
if !has_data {
break;
}
// expensive work carried out by func()
pool_scope.execute(move || {
work(&mut data, &mut out);
done_send.send(Some(Ok((data, out)))).ok();
});
}
Err(e) => {
done_send.send(Some(Err(e))).ok();
break;
}
}
}
pool_scope.join_all();
done_send.send(None).ok();
});
Ok(())
});
for _ in 0..queue_len {
if empty_send
.send((R::RecordSet::default(), O::default()))
.is_err()
{
break;
}
}
let dsets: ParallelDataSets<R::RecordSet, R::Err, O> = ParallelDataSets {
empty_send,
done_recv,
current_recordset: (R::RecordSet::default(), O::default()),
};
let out = func(dsets);
handle.join().unwrap()?;
Ok(out)
})
.unwrap()
}
pub struct ParallelDataSets<D, E, O = ()>
where
D: Send,
E: Send,
O: Send,
{
empty_send: mpsc::SyncSender<(D, O)>,
done_recv: mpsc::Receiver<Option<Result<(D, O), E>>>,
current_recordset: (D, O),
}
impl<D, E, O> ParallelDataSets<D, E, O>
where
D: Send,
E: Send,
O: Send,
{
/// Returns a tuple of the next processed record set and processing results,
/// if present.
pub fn next(&mut self) -> Option<Result<(&mut D, &mut O), E>> {
self.done_recv.recv().unwrap().map(move |result| {
match result {
Ok(d) => {
let prev_rset = std::mem::replace(&mut self.current_recordset, d);
self.empty_send.send(prev_rset).ok(); // error: channel closed is not a problem, happens after calling stop()
Ok((&mut self.current_recordset.0, &mut self.current_recordset.1))
}
Err(e) => Err(e),
}
})
}
}
/// Allows generating functions equivalent to the `read_process_xy_records`
/// functions in this crate for your own types. This is rather a workaround
/// because the generic approach ([`read_process_records_init`](read_process_records_init))
/// does currently not work.
///
/// * `$format`: String specifying the name of the sequence format
/// (for genrated documentation)
/// * `$name`: name of the generated function
/// * `$name_init`: name of another generated function, which takes a closure
/// initializing the readers in the background thread.
/// * <X: Trait, ...>: Optional set of trait bounds to be added to the
/// functions. If none are to be added, specify `<>`.
/// * `$RecordSet`: record set type (see [`RecordSetReader::RecordSet`](RecordSetReader::RecordSet)).
/// In addition to the trait requirements, `&$RecordSet` needs to implement
/// `IntoIterator<Item=$Record>`.
/// * `$Record`: record type returned by the record set iterator.
/// * `$Error`: reading error type ([`RecordSetReader::Err`](RecordSetReader::Err))
#[macro_export]
macro_rules! parallel_record_impl {
($format:expr, $name:ident, $name_init:ident,
( $($bounds:tt)* ),
$RecordSet:ty, $Record:ty, $Error:ty) => {
_parallel_record_impl!(
$format,
$name,
$name_init,
($($bounds)*),
$RecordSet,
$Record,
$Error,
concat!("[`", stringify!($name), "`](", stringify!($name), ")")
);
};
}
macro_rules! _parallel_record_impl {
($format:expr, $name:ident, $name_init:ident,
( $($bounds:tt)* ),
$RecordSet:ty, $Record:ty, $Error:ty,
$name_link:expr) => {
/// This function wraps [`read_process_recordsets`](read_process_recordsets),
/// hiding the complexity related to record sets and allowing it to
/// directly work on
#[doc = $format]
/// sequence records.
///
/// Apart from this, the process is similar:
///
/// * The records are read (as part of record sets) in a background
/// thread.
/// * Then they are sent to `n_workers` worker threads. Work is done
/// in the `work` closure supplied to this function.
/// * Once ready, records an results are sent to the main thread,
/// where they are supplied to the `func` closure. The order of the
/// records may be different.
pub fn $name<R, $($bounds)*, W, F, O, Out>(
reader: R,
n_workers: u32,
queue_len: usize,
work: W,
func: F,
) -> Result<Option<Out>, R::Err>
where
R: RecordSetReader<RecordSet = $RecordSet, Err = $Error> + Send,
O: Default + Send,
W: Send + Sync + Fn($Record, &mut O),
F: FnMut($Record, &mut O) -> Option<Out>,
{
let out: Result<_, $Error> = $name_init(
|| Ok(reader), n_workers, queue_len, work, func
);
out
}
/// Like
#[doc = $name_link]
///, but instead of a [`RecordSetReader`](RecordSetReader), it takes a
/// closure (`reader_init`) returning an `RecordSetReader` instance.
/// This allows using readers that don't implement `Send`.
/// `reader_init` should return a result. The error type needs to
/// implement `From<RecordSetReader::Err>`
///
pub fn $name_init<R, Ri, $($bounds)*, W, F, O, Out, E>(
reader_init: Ri,
n_workers: u32,
queue_len: usize,
work: W,
mut func: F,
) -> Result<Option<Out>, E>
where
R: RecordSetReader<RecordSet = $RecordSet, Err = $Error>,
Ri: Send + FnOnce() -> Result<R, E>,
O: Default + Send,
W: Send + Sync + Fn($Record, &mut O),
F: FnMut($Record, &mut O) -> Option<Out>,
E: Send + From<R::Err>,
{
read_process_recordsets_init(
reader_init,
n_workers,
queue_len,
|rset: &mut $RecordSet, output: &mut Vec<O>| {
let mut record_iter = rset.into_iter();
for (out, record) in output.iter_mut().zip(&mut record_iter) {
work(record, out);
}
for record in record_iter {
output.push(O::default());
work(record, output.last_mut().unwrap());
}
},
|mut records| {
while let Some(result) = records.next() {
let (rset, out) = result?;
for (record, o) in rset.into_iter().zip(out.iter_mut()) {
if let Some(out) = func(record, o) {
return Ok(Some(out));
}
}
}
Ok(None)
},
).and_then(From::from)
}
};
}
parallel_record_impl!(
"FASTA",<|fim▁hole|> read_process_fasta_records,
read_process_fasta_records_init,
(S: SeqRecordPosition + Send + Sync),
fasta::RecordSet<S>,
fasta::RefRecord<S>,
fasta::Error
);
parallel_record_impl!(
"FASTQ",
read_process_fastq_records,
read_process_fastq_records_init,
(S: QualRecordPosition + Send + Sync),
fastq::RecordSet<S>,
fastq::RefRecord<S>,
fastq::Error
);
parallel_record_impl!(
"FASTX",
read_process_fastx_records,
read_process_fastx_records_init,
(S: QualRecordPosition + Send + Sync),
fastx::RecordSet<S>,
fastx::RefRecord<S>,
fastx::Error
);
/// Using this function currently does not work due to a
/// [compiler bug](https://github.com/rust-lang/rust/issues/62529).
///
/// [`read_process_fasta_records`](read_process_fasta_records),
/// [`read_process_fastq_records`](read_process_fastq_records) and
/// [`read_process_fastx_records`](read_process_fastx_records)
/// provide the same functionality for now
/// (implemented using [`parallel_record_impl`](parallel_record_impl) macro).
pub fn read_process_records_init<R, Ri, W, F, O, Out, E>(
reader_init: Ri,
n_workers: u32,
queue_len: usize,
work: W,
mut func: F,
) -> Result<Option<Out>, E>
where
R: RecordSetReader,
Ri: Send + FnOnce() -> Result<R, E>,
R::RecordSet: Default + Send,
for<'a> &'a R::RecordSet: IntoIterator + Send,
O: Default + Send,
W: Send + Sync + Fn(<&R::RecordSet as IntoIterator>::Item, &mut O),
F: FnMut(<&R::RecordSet as IntoIterator>::Item, &mut O) -> Option<Out>,
E: From<<R as RecordSetReader>::Err> + Send,
{
read_process_recordsets_init(
reader_init,
n_workers,
queue_len,
|rset, out: &mut Vec<O>| {
let mut record_iter = rset.into_iter();
for mut d in (&mut record_iter).zip(out.iter_mut()) {
work(d.0, &mut d.1);
}
for record in record_iter {
out.push(O::default());
work(record, out.last_mut().unwrap());
}
},
|mut records| {
while let Some(result) = records.next() {
let (rset, out) = result?;
for (record, o) in rset.into_iter().zip(out.iter_mut()) {
if let Some(out) = func(record, o) {
return Ok(Some(out));
}
}
}
Ok(None)
},
)
.and_then(From::from)
}
macro_rules! impl_parallel_reader {
($($l:lifetime)?; $SeqReader:ty, $RecordPositionTrait:path, $RecordSet:ty, $Error:ty, $read_fn:ident) => {
impl<$($l,)? R, P, S> RecordSetReader for $SeqReader
where
R: std::io::Read,
P: crate::policy::BufPolicy + Send,
S: $RecordPositionTrait + Send + Sync
{
type RecordSet = $RecordSet;
type Err = $Error;
fn fill_data(&mut self, rset: &mut $RecordSet) -> Result<bool, $Error> {
self.$read_fn(rset)
}
}
}
}
impl_parallel_reader!(; fasta::Reader<R, P, S>, SeqRecordPosition, fasta::RecordSet<S>, fasta::Error, read_record_set);
impl_parallel_reader!(; fasta::single_line::Reader<R, P, S>, SeqRecordPosition, fasta::RecordSet<S>, fasta::Error, read_record_set);
impl_parallel_reader!(; fastq::Reader<R, P, S>, QualRecordPosition, fastq::RecordSet<S>, fastq::Error, read_record_set);
impl_parallel_reader!(; fastq::multiline::Reader<R, P, S>, QualRecordPosition, fastq::RecordSet<S>, fastq::Error, read_record_set);
impl_parallel_reader!(; fastx::Reader<R, P, S>, QualRecordPosition, fastx::RecordSet<S>, fastx::Error, read_record_set);
impl_parallel_reader!(; fastx::multiline_qual::Reader<R, P, S>, QualRecordPosition, fastx::RecordSet<S>, fastx::Error, read_record_set);
impl_parallel_reader!('a ; &'a mut (dyn fastx::dynamic::FastxReader<R, P, S> + Send), QualRecordPosition, fastx::RecordSet<S>, fastx::Error, read_record_set_fastx);
impl_parallel_reader!('a ; Box<dyn fastx::dynamic::FastxReader<R, P, S> + Send + 'a>, QualRecordPosition, fastx::RecordSet<S>, fastx::Error, read_record_set_fastx);<|fim▁end|>
| |
<|file_name|>ExOh.py<|end_file_name|><|fim▁begin|>def ExOh(str):
temp = list(str)
xcount = 0
ocount = 0
for c in temp:
if c == "x":
xcount += 1
if c == "o":
ocount += 1
if xcount == ocount:
print "true"
elif xcount != ocount:
print "false"
<|fim▁hole|><|fim▁end|>
|
ExOh(raw_input())
|
<|file_name|>tests.options.compilers.js<|end_file_name|><|fim▁begin|>import path from 'path'
<|fim▁hole|>
context.it = function (name, callback) {
if (callback) {
return it(...arguments);
} else {
callback = name
name = path.basename(file, '.js')
}
}<|fim▁end|>
|
let { context, file, mocha, options } = module.parent.context
let { it } = context
|
<|file_name|>constant.js<|end_file_name|><|fim▁begin|>'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _rest = require('./internal/rest');
var _rest2 = _interopRequireDefault(_rest);
var _initialParams = require('./internal/initialParams');
var _initialParams2 = _interopRequireDefault(_initialParams);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
/**
* Returns a function that when called, calls-back with the values provided.
* Useful as the first function in a [`waterfall`]{@link module:ControlFlow.waterfall}, or for plugging values in to
* [`auto`]{@link module:ControlFlow.auto}.
*
* @name constant
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {...*} arguments... - Any number of arguments to automatically invoke
* callback with.
* @returns {AsyncFunction} Returns a function that when invoked, automatically
* invokes the callback with the previous given arguments.
* @example
*
* async.waterfall([
* async.constant(42),
* function (value, next) {
* // value === 42
* },
* //...
* ], callback);
*
* async.waterfall([
* async.constant(filename, "utf8"),
* fs.readFile,
* function (fileData, next) {
* //...<|fim▁hole|> *
* async.auto({
* hostname: async.constant("https://server.net/"),
* port: findFreePort,
* launchServer: ["hostname", "port", function (options, cb) {
* startServer(options, cb);
* }],
* //...
* }, callback);
*/
exports.default = (0, _rest2.default)(function (values) {
var args = [null].concat(values);
return (0, _initialParams2.default)(function (ignoredArgs, callback) {
return callback.apply(this, args);
});
});
module.exports = exports['default'];<|fim▁end|>
|
* }
* //...
* ], callback);
|
<|file_name|>ModStatsDumper.java<|end_file_name|><|fim▁begin|>package mezz.texturedump.dumpers;
import com.google.gson.stream.JsonWriter;
import net.minecraft.client.renderer.texture.TextureAtlas;
import net.minecraftforge.fml.ModList;
import net.minecraftforge.fml.StartupMessageManager;
import net.minecraftforge.forgespi.language.IModFileInfo;
import net.minecraftforge.forgespi.language.IModInfo;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.annotation.Nullable;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class ModStatsDumper {
private static final Logger LOGGER = LogManager.getLogger();
public Path saveModStats(String name, TextureAtlas map, Path modStatsDir) throws IOException {
Map<String, Long> modPixelCounts = map.texturesByName.values().stream()
.collect(Collectors.groupingBy(
sprite -> sprite.getName().getNamespace(),
Collectors.summingLong(sprite -> (long) sprite.getWidth() * sprite.getHeight()))
);
final long totalPixels = modPixelCounts.values().stream().mapToLong(longValue -> longValue).sum();
final String filename = name + "_mod_statistics";
Path output = modStatsDir.resolve(filename + ".js");
List<Map.Entry<String, Long>> sortedEntries = modPixelCounts.entrySet().stream()
.sorted(Collections.reverseOrder(Map.Entry.comparingByValue()))
.collect(Collectors.toList());<|fim▁hole|> JsonWriter jsonWriter = new JsonWriter(fileWriter);
jsonWriter.setIndent(" ");
jsonWriter.beginArray();
{
for (Map.Entry<String, Long> modPixels : sortedEntries) {
String resourceDomain = modPixels.getKey();
long pixelCount = modPixels.getValue();
writeModStatisticsObject(jsonWriter, resourceDomain, pixelCount, totalPixels);
}
}
jsonWriter.endArray();
jsonWriter.close();
fileWriter.close();
LOGGER.info("Saved mod statistics to {}.", output.toString());
return output;
}
private static void writeModStatisticsObject(JsonWriter jsonWriter, String resourceDomain, long pixelCount, long totalPixels) throws IOException {
IModInfo modInfo = getModMetadata(resourceDomain);
String modName = modInfo != null ? modInfo.getDisplayName() : "";
jsonWriter.beginObject()
.name("resourceDomain").value(resourceDomain)
.name("pixelCount").value(pixelCount)
.name("percentOfTextureMap").value(pixelCount * 100f / totalPixels)
.name("modName").value(modName)
.name("url").value(getModConfigValue(modInfo, "displayURL"))
.name("issueTrackerUrl").value(getModConfigValue(modInfo, "issueTrackerURL"));
jsonWriter.name("authors").beginArray();
{
String authors = getModConfigValue(modInfo, "authors");
if (!authors.isEmpty()) {
String[] authorList = authors.split(",");
for (String author : authorList) {
jsonWriter.value(author.trim());
}
}
}
jsonWriter.endArray();
jsonWriter.endObject();
}
private static String getModConfigValue(@Nullable IModInfo modInfo, String key) {
if (modInfo == null) {
return "";
}
Map<String, Object> modConfig = modInfo.getModProperties();
Object value = modConfig.getOrDefault(key, "");
if (value instanceof String) {
return (String) value;
}
return "";
}
@Nullable
private static IModInfo getModMetadata(String resourceDomain) {
ModList modList = ModList.get();
IModFileInfo modFileInfo = modList.getModFileById(resourceDomain);
if (modFileInfo == null) {
return null;
}
return modFileInfo.getMods()
.stream()
.findFirst()
.orElse(null);
}
}<|fim▁end|>
|
StartupMessageManager.addModMessage("Dumping Mod TextureMap Statistics");
FileWriter fileWriter = new FileWriter(output.toFile());
fileWriter.write("var modStatistics = \n//Start of Data\n");
|
<|file_name|>without_loop_counters.rs<|end_file_name|><|fim▁begin|>#![warn(clippy::needless_range_loop, clippy::manual_memcpy)]
const LOOP_OFFSET: usize = 5000;<|fim▁hole|> for i in 0..src.len() {
dst[i] = src[i];
}
// dst offset memcpy
for i in 0..src.len() {
dst[i + 10] = src[i];
}
// src offset memcpy
for i in 0..src.len() {
dst[i] = src[i + 10];
}
// src offset memcpy
for i in 11..src.len() {
dst[i] = src[i - 10];
}
// overwrite entire dst
for i in 0..dst.len() {
dst[i] = src[i];
}
// manual copy with branch - can't easily convert to memcpy!
for i in 0..src.len() {
dst[i] = src[i];
if dst[i] > 5 {
break;
}
}
// multiple copies - suggest two memcpy statements
for i in 10..256 {
dst[i] = src[i - 5];
dst2[i + 500] = src[i]
}
// this is a reversal - the copy lint shouldn't be triggered
for i in 10..LOOP_OFFSET {
dst[i + LOOP_OFFSET] = src[LOOP_OFFSET - i];
}
let some_var = 5;
// Offset in variable
for i in 10..LOOP_OFFSET {
dst[i + LOOP_OFFSET] = src[i - some_var];
}
// Non continuous copy - don't trigger lint
for i in 0..10 {
dst[i + i] = src[i];
}
let src_vec = vec![1, 2, 3, 4, 5];
let mut dst_vec = vec![0, 0, 0, 0, 0];
// make sure vectors are supported
for i in 0..src_vec.len() {
dst_vec[i] = src_vec[i];
}
// lint should not trigger when either
// source or destination type is not
// slice-like, like DummyStruct
struct DummyStruct(i32);
impl ::std::ops::Index<usize> for DummyStruct {
type Output = i32;
fn index(&self, _: usize) -> &i32 {
&self.0
}
}
let src = DummyStruct(5);
let mut dst_vec = vec![0; 10];
for i in 0..10 {
dst_vec[i] = src[i];
}
// Simplify suggestion (issue #3004)
let src = [0, 1, 2, 3, 4];
let mut dst = [0, 0, 0, 0, 0, 0];
let from = 1;
for i in from..from + src.len() {
dst[i] = src[i - from];
}
for i in from..from + 3 {
dst[i] = src[i - from];
}
#[allow(clippy::identity_op)]
for i in 0..5 {
dst[i - 0] = src[i];
}
#[allow(clippy::reversed_empty_ranges)]
for i in 0..0 {
dst[i] = src[i];
}
// `RangeTo` `for` loop - don't trigger lint
for i in 0.. {
dst[i] = src[i];
}
}
#[warn(clippy::needless_range_loop, clippy::manual_memcpy)]
pub fn manual_clone(src: &[String], dst: &mut [String]) {
for i in 0..src.len() {
dst[i] = src[i].clone();
}
}
fn main() {}<|fim▁end|>
|
pub fn manual_copy(src: &[i32], dst: &mut [i32], dst2: &mut [i32]) {
// plain manual memcpy
|
<|file_name|>test.rs<|end_file_name|><|fim▁begin|>use std::thread::{self, sleep_ms};
use std::sync::{Arc};
use std::sync::atomic::{AtomicUsize};
use std::sync::atomic::Ordering::{SeqCst};
use spsc::unbounded::{new};
use super::{Select, Selectable};
fn ms_sleep(ms: i64) {
sleep_ms(ms as u32);
}
#[test]
fn no_wait_one() {
let (send, recv) = new();
send.send(1u8).unwrap();
let select = Select::new();
select.add(&recv);
assert!(select.wait(&mut [0]).len() == 1);
}
#[test]
fn wait_one() {
let (send, recv) = new();
thread::spawn(move || {
ms_sleep(100);
send.send(1u8).unwrap();
});
let select = Select::new();
select.add(&recv);
assert!(select.wait(&mut [0]) == &mut [recv.id()][..]);
}
#[test]
fn ready_list_one() {
let (send, recv) = new();
let select = Select::new();
select.add(&recv);
send.send(1u8).unwrap();
assert!(select.wait_timeout(&mut [0], None) == Some(&mut [recv.id()][..]));
}
#[test]
fn no_wait_two() {
let (send, recv) = new();
let (send2, recv2) = new();
send.send(1u8).unwrap();
send2.send(1u8).unwrap();
let select = Select::new();
select.add(&recv);
select.add(&recv2);
assert!(select.wait(&mut [0, 0]).len() == 2);
}
#[test]
fn wait_two() {
let (send, recv) = new();
let (send2, recv2) = new();
thread::spawn(move || {
ms_sleep(100);
send.send(1u8).unwrap();
});
thread::spawn(move || {
ms_sleep(200);
send2.send(1u8).unwrap();
});
let select = Select::new();
select.add(&recv);
select.add(&recv2);
let mut saw1 = false;
'outer: loop {
let mut buf = [0, 0];
for &mut id in select.wait(&mut buf) {
if id == recv.id() && recv.recv_sync().is_err() {
saw1 = true;
}
if id == recv2.id() && recv2.recv_sync().is_err() {
break 'outer;
}
}
}
assert!(saw1);
}
#[test]
fn select_wrong_thread() {
// Check that cross thread selecting works.
<|fim▁hole|> let (send2, recv2) = new();
let id1 = recv1.id();
let id2 = recv2.id();
let select1 = Arc::new(Select::new());
let select2 = select1.clone();
let thread = thread::scoped(move || {
select2.add(&recv2);
send2.send(1u8).unwrap();
ms_sleep(100);
// clear the second channel so that wait below will remove it from the ready list
recv2.recv_sync().unwrap();
assert_eq!(select2.wait(&mut [0, 0]), &mut [id1][..]);
});
select1.add(&recv1);
assert_eq!(select1.wait(&mut [0, 0]), &mut [id2][..]);
send1.send(2u8).unwrap();
// make sure that we wait for the other thread before dropping anything else
drop(thread);
}
#[test]
fn select_chance() {
// Check that only one selecting thread wakes up.
let counter1 = Arc::new(AtomicUsize::new(0));
let counter2 = counter1.clone();
let counter3 = counter1.clone();
let (send, recv) = new();
let select1 = Arc::new(Select::new());
let select2 = select1.clone();
select1.add(&recv);
thread::spawn(move || {
select1.wait(&mut []);
counter2.fetch_add(1, SeqCst);
});
thread::spawn(move || {
select2.wait(&mut []);
counter3.fetch_add(1, SeqCst);
});
ms_sleep(100);
send.send(1u8).unwrap();
ms_sleep(100);
assert_eq!(counter1.swap(0, SeqCst), 1);
}<|fim▁end|>
|
let (send1, recv1) = new();
|
<|file_name|>types.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base types used by other parts of libcloud
"""
from libcloud.common.types import LibcloudError, MalformedResponseError
from libcloud.common.types import InvalidCredsError, InvalidCredsException
__all__ = [
"Provider",
"NodeState",
"DeploymentError",
"DeploymentException",
# @@TR: should the unused imports below be exported?
"LibcloudError",
"MalformedResponseError",
"InvalidCredsError",
"InvalidCredsException",
"DEPRECATED_RACKSPACE_PROVIDERS",
"OLD_CONSTANT_TO_NEW_MAPPING"<|fim▁hole|> """
Defines for each of the supported providers
:cvar DUMMY: Example provider
:cvar EC2_US_EAST: Amazon AWS US N. Virgina
:cvar EC2_US_WEST: Amazon AWS US N. California
:cvar EC2_EU_WEST: Amazon AWS EU Ireland
:cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers
:cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers
:cvar GCE: Google Compute Engine
:cvar GOGRID: GoGrid
:cvar VPSNET: VPS.net
:cvar LINODE: Linode.com
:cvar VCLOUD: vmware vCloud
:cvar RIMUHOSTING: RimuHosting.com
:cvar ECP: Enomaly
:cvar IBM: IBM Developer Cloud
:cvar OPENNEBULA: OpenNebula.org
:cvar DREAMHOST: DreamHost Private Server
:cvar ELASTICHOSTS: ElasticHosts.com
:cvar CLOUDSIGMA: CloudSigma
:cvar NIMBUS: Nimbus
:cvar BLUEBOX: Bluebox
:cvar OPSOURCE: Opsource Cloud
:cvar DIMENSIONDATA: Dimension Data Cloud
:cvar NINEFOLD: Ninefold
:cvar TERREMARK: Terremark
:cvar EC2_US_WEST_OREGON: Amazon AWS US West 2 (Oregon)
:cvar CLOUDSTACK: CloudStack
:cvar CLOUDSIGMA_US: CloudSigma US Las Vegas
:cvar LIBVIRT: Libvirt driver
:cvar JOYENT: Joyent driver
:cvar VCL: VCL driver
:cvar KTUCLOUD: kt ucloud driver
:cvar GRIDSPOT: Gridspot driver
:cvar ABIQUO: Abiquo driver
:cvar NEPHOSCALE: NephoScale driver
:cvar EXOSCALE: Exoscale driver.
:cvar IKOULA: Ikoula driver.
:cvar OUTSCALE_SAS: Outscale SAS driver.
:cvar OUTSCALE_INC: Outscale INC driver.
:cvar PROFIT_BRICKS: ProfitBricks driver.
:cvar VULTR: vultr driver.
:cvar AZURE: Azure driver.
:cvar AURORACOMPUTE: Aurora Compute driver.
"""
AZURE = 'azure'
DUMMY = 'dummy'
EC2 = 'ec2_us_east'
RACKSPACE = 'rackspace'
GCE = 'gce'
GOGRID = 'gogrid'
VPSNET = 'vpsnet'
LINODE = 'linode'
VCLOUD = 'vcloud'
RIMUHOSTING = 'rimuhosting'
VOXEL = 'voxel'
SOFTLAYER = 'softlayer'
EUCALYPTUS = 'eucalyptus'
ECP = 'ecp'
IBM = 'ibm'
OPENNEBULA = 'opennebula'
DREAMHOST = 'dreamhost'
ELASTICHOSTS = 'elastichosts'
BRIGHTBOX = 'brightbox'
CLOUDSIGMA = 'cloudsigma'
NIMBUS = 'nimbus'
BLUEBOX = 'bluebox'
GANDI = 'gandi'
OPSOURCE = 'opsource'
DIMENSIONDATA = 'dimensiondata'
OPENSTACK = 'openstack'
SKALICLOUD = 'skalicloud'
SERVERLOVE = 'serverlove'
NINEFOLD = 'ninefold'
TERREMARK = 'terremark'
CLOUDSTACK = 'cloudstack'
LIBVIRT = 'libvirt'
JOYENT = 'joyent'
VCL = 'vcl'
KTUCLOUD = 'ktucloud'
GRIDSPOT = 'gridspot'
RACKSPACE_FIRST_GEN = 'rackspace_first_gen'
HOSTVIRTUAL = 'hostvirtual'
ABIQUO = 'abiquo'
DIGITAL_OCEAN = 'digitalocean'
NEPHOSCALE = 'nephoscale'
CLOUDFRAMES = 'cloudframes'
EXOSCALE = 'exoscale'
IKOULA = 'ikoula'
OUTSCALE_SAS = 'outscale_sas'
OUTSCALE_INC = 'outscale_inc'
VSPHERE = 'vsphere'
PROFIT_BRICKS = 'profitbricks'
VULTR = 'vultr'
AURORACOMPUTE = 'aurora_compute'
CLOUDWATT = 'cloudwatt'
PACKET = 'packet'
RUNABOVE = 'runabove'
# OpenStack based providers
HPCLOUD = 'hpcloud'
CLOUDWATT = 'cloudwatt'
KILI = 'kili'
ONAPP = 'onapp'
# Deprecated constants which are still supported
EC2_US_EAST = 'ec2_us_east'
EC2_EU = 'ec2_eu_west' # deprecated name
EC2_EU_WEST = 'ec2_eu_west'
EC2_US_WEST = 'ec2_us_west'
EC2_AP_SOUTHEAST = 'ec2_ap_southeast'
EC2_AP_NORTHEAST = 'ec2_ap_northeast'
EC2_US_WEST_OREGON = 'ec2_us_west_oregon'
EC2_SA_EAST = 'ec2_sa_east'
EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2'
ELASTICHOSTS_UK1 = 'elastichosts_uk1'
ELASTICHOSTS_UK2 = 'elastichosts_uk2'
ELASTICHOSTS_US1 = 'elastichosts_us1'
ELASTICHOSTS_US2 = 'elastichosts_us2'
ELASTICHOSTS_US3 = 'elastichosts_us3'
ELASTICHOSTS_CA1 = 'elastichosts_ca1'
ELASTICHOSTS_AU1 = 'elastichosts_au1'
ELASTICHOSTS_CN1 = 'elastichosts_cn1'
CLOUDSIGMA_US = 'cloudsigma_us'
# Deprecated constants which aren't supported anymore
RACKSPACE_UK = 'rackspace_uk'
RACKSPACE_NOVA_BETA = 'rackspace_nova_beta'
RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw'
RACKSPACE_NOVA_LON = 'rackspace_nova_lon'
RACKSPACE_NOVA_ORD = 'rackspace_nova_ord'
# Removed
# SLICEHOST = 'slicehost'
DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK,
Provider.RACKSPACE_NOVA_BETA,
Provider.RACKSPACE_NOVA_DFW,
Provider.RACKSPACE_NOVA_LON,
Provider.RACKSPACE_NOVA_ORD]
OLD_CONSTANT_TO_NEW_MAPPING = {
Provider.RACKSPACE: Provider.RACKSPACE_FIRST_GEN,
Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN,
Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE
}
class NodeState(object):
"""
Standard states for a node
:cvar RUNNING: Node is running.
:cvar REBOOTING: Node is rebooting.
:cvar TERMINATED: Node is terminated. This node can't be started later on.
:cvar STOPPED: Node is stopped. This node can be started later on.
:cvar PENDING: Node is pending.
:cvar STOPPED: Node is stopped.
:cvar SUSPENDED: Node is suspended.
:cvar ERROR: Node is an error state. Usually no operations can be performed
on the node once it ends up in the error state.
:cvar PAUSED: Node is paused.
:cvar UNKNOWN: Node state is unknown.
"""
RUNNING = 0
REBOOTING = 1
TERMINATED = 2
PENDING = 3
UNKNOWN = 4
STOPPED = 5
SUSPENDED = 6
ERROR = 7
PAUSED = 8
@classmethod
def tostring(cls, value):
values = cls.__dict__
values = dict([(key, string) for key, string in values.items() if
not key.startswith('__')])
for item_key, item_value in values.items():
if value == item_value:
return item_key
@classmethod
def fromstring(cls, value):
return getattr(cls, value.upper(), None)
class StorageVolumeState(object):
"""
Standard states of a StorageVolume
"""
AVAILABLE = "available"
ERROR = "error"
INUSE = "in_use"
CREATING = "creating"
DELETING = "deleting"
DELETED = "deleted"
BACKUP = "backup"
ATTACHING = "attaching"
UNKNOWN = "unknown"
class VolumeSnapshotState(object):
"""
Standard states of VolumeSnapshots
"""
AVAILABLE = 0
ERROR = 1
CREATING = 2
DELETING = 3
RESTORING = 4
UNKNOWN = 5
class Architecture(object):
"""
Image and size architectures.
:cvar I386: i386 (32 bt)
:cvar X86_64: x86_64 (64 bit)
"""
I386 = 0
X86_X64 = 1
class DeploymentError(LibcloudError):
"""
Exception used when a Deployment Task failed.
:ivar node: :class:`Node` on which this exception happened, you might want
to call :func:`Node.destroy`
"""
def __init__(self, node, original_exception=None, driver=None):
self.node = node
self.value = original_exception
self.driver = driver
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<DeploymentError: node=%s, error=%s, driver=%s>'
% (self.node.id, str(self.value), str(self.driver))))
class KeyPairError(LibcloudError):
error_type = 'KeyPairError'
def __init__(self, name, driver):
self.name = name
self.value = 'Key pair with name %s does not exist' % (name)
super(KeyPairError, self).__init__(value=self.value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s name=%s, value=%s, driver=%s>' %
(self.error_type, self.name, self.value, self.driver.name))
class KeyPairDoesNotExistError(KeyPairError):
error_type = 'KeyPairDoesNotExistError'
"""Deprecated alias of :class:`DeploymentException`"""
DeploymentException = DeploymentError<|fim▁end|>
|
]
class Provider(object):
|
<|file_name|>musicabstractfloatwidget.cpp<|end_file_name|><|fim▁begin|>#include "musicabstractfloatwidget.h"
MusicAbstractFloatWidget::MusicAbstractFloatWidget(QWidget *parent)
: QLabel(parent)
{
m_animation = new QPropertyAnimation(this, "geometry", this);
m_animation->setDuration(500);
m_blockAnimation = false;
}
MusicAbstractFloatWidget::~MusicAbstractFloatWidget()
{
delete m_animation;
}
void MusicAbstractFloatWidget::animationIn()
{
m_animation->setStartValue(m_rectOut);
m_animation->setEndValue(m_rectIn);
m_animation->start();
}
void MusicAbstractFloatWidget::animationOut()
{
m_animation->setStartValue(m_rectIn);
m_animation->setEndValue(m_rectOut);
m_animation->start();
}
#if TTK_QT_VERSION_CHECK(6,0,0)
void MusicAbstractFloatWidget::enterEvent(QEnterEvent *event)
#else
void MusicAbstractFloatWidget::enterEvent(QEvent *event)
#endif
{
QLabel::enterEvent(event);
if(!m_blockAnimation)
{
animationIn();
}
}
<|fim▁hole|> QLabel::leaveEvent(event);
if(!m_blockAnimation)
{
animationOut();
}
}<|fim▁end|>
|
void MusicAbstractFloatWidget::leaveEvent(QEvent *event)
{
|
<|file_name|>reference-link-dialog.js<|end_file_name|><|fim▁begin|>/*!
* Reference link dialog plugin for Editor.md
*
* @file reference-link-dialog.js
* @author pandao
* @version 1.2.1
* @updateTime 2015-06-09
* {@link https://github.com/pandao/editor.md}
* @license MIT
*/
(function () {
var factory = function (exports) {
var pluginName = "reference-link-dialog";
var ReLinkId = 1;
exports.fn.referenceLinkDialog = function () {
var _this = this;
var cm = this.cm;
var lang = this.lang;
var editor = this.editor;
var settings = this.settings;
var cursor = cm.getCursor();
var selection = cm.getSelection();
var dialogLang = lang.dialog.referenceLink;
var classPrefix = this.classPrefix;
var dialogName = classPrefix + pluginName, dialog;
cm.focus();
if (editor.find("." + dialogName).length < 1) {
var dialogHTML = "<div class=\"" + classPrefix + "form\">" +
"<label>" + dialogLang.name + "</label>" +
"<input type=\"text\" value=\"[" + ReLinkId + "]\" data-name />" +
"<br/>" +
"<label>" + dialogLang.urlId + "</label>" +
"<input type=\"text\" data-url-id />" +
"<br/>" +
"<label>" + dialogLang.url + "</label>" +
"<input type=\"text\" value=\"http://\" data-url />" +
"<br/>" +
"<label>" + dialogLang.urlTitle + "</label>" +
"<input type=\"text\" value=\"" + selection + "\" data-title />" +
"<br/>" +
"</div>";
dialog = this.createDialog({
name: dialogName,
title: dialogLang.title,
width: 380,
height: 296,
content: dialogHTML,
mask: settings.dialogShowMask,
drag: settings.dialogDraggable,
lockScreen: settings.dialogLockScreen,
maskStyle: {
opacity: settings.dialogMaskOpacity,
backgroundColor: settings.dialogMaskBgColor
},
buttons: {
enter: [lang.buttons.enter, function () {
var name = this.find("[data-name]").val();
var url = this.find("[data-url]").val();
var rid = this.find("[data-url-id]").val();
var title = this.find("[data-title]").val();
if (name === "") {
alert(dialogLang.nameEmpty);
return false;
}
if (rid === "") {
alert(dialogLang.idEmpty);
return false;
}
if (url === "http://" || url === "") {
alert(dialogLang.urlEmpty);
return false;
}
//cm.replaceSelection("[" + title + "][" + name + "]\n[" + name + "]: " + url + "");
cm.replaceSelection("[" + name + "][" + rid + "]");
if (selection === "") {
cm.setCursor(cursor.line, cursor.ch + 1);
}
title = (title === "") ? "" : " \"" + title + "\"";
cm.setValue(cm.getValue() + "\n[" + rid + "]: " + url + title + "");
this.hide().lockScreen(false).hideMask();
return false;
}],
cancel: [lang.buttons.cancel, function () {
this.hide().lockScreen(false).hideMask();
return false;
}]
}
});
}
dialog = editor.find("." + dialogName);
dialog.find("[data-name]").val("[" + ReLinkId + "]");
dialog.find("[data-url-id]").val("");
dialog.find("[data-url]").val("http://");
dialog.find("[data-title]").val(selection);
this.dialogShowMask(dialog);
this.dialogLockScreen();
dialog.show();
ReLinkId++;
};
};
// CommonJS/Node.js
if (typeof require === "function" && typeof exports === "object" && typeof module === "object") {
module.exports = factory;
}
else if (typeof define === "function") // AMD/CMD/Sea.js
{
if (define.amd) { // for Require.js
define(["editormd"], function (editormd) {
factory(editormd);
});
} else { // for Sea.js
define(function (require) {
var editormd = require("./../../editormd");
factory(editormd);
});
}
}
else {
factory(window.editormd);<|fim▁hole|> }
})();<|fim▁end|>
| |
<|file_name|>multiwindow.rs<|end_file_name|><|fim▁begin|>#[cfg(target_os = "android")]
#[macro_use]
extern crate android_glue;
extern crate glutin;
use std::thread;
mod support;
#[cfg(target_os = "android")]
android_start!(main);
#[cfg(not(feature = "window"))]
fn main() { println!("This example requires glutin to be compiled with the `window` feature"); }
#[cfg(feature = "window")]
fn main() {
let window1 = glutin::Window::new().unwrap();
let window2 = glutin::Window::new().unwrap();
let window3 = glutin::Window::new().unwrap();
let t1 = thread::spawn(move || {
run(window1, (0.0, 1.0, 0.0, 1.0));
});
let t2 = thread::spawn(move || {
run(window2, (0.0, 0.0, 1.0, 1.0));
});
let t3 = thread::spawn(move || {
run(window3, (1.0, 0.0, 0.0, 1.0));
});<|fim▁hole|> let _ = t2.join();
let _ = t3.join();
}
#[cfg(feature = "window")]
fn run(window: glutin::Window, color: (f32, f32, f32, f32)) {
unsafe { window.make_current() };
let context = support::load(&window);
while !window.is_closed() {
context.draw_frame(color);
window.swap_buffers();
window.wait_events().next();
}
}<|fim▁end|>
|
let _ = t1.join();
|
<|file_name|>base.ts<|end_file_name|><|fim▁begin|>import {
RegistrationType,
IDynamicDependency,
Factory,
Initializer,
Disposer
} from './common'
import { IContainer } from '../../build'
import { OwnerType, ScopeType } from '../../common'
export interface IRegistrationBase {
id: string
service: {}
factory?: Factory<{}>
factoryType?: {}
factoryValue?: {}
name?: string
scope?: ScopeType
owner: OwnerType
initializer?: Initializer<{}>
disposer?: Disposer<{}>
<|fim▁hole|> args: {}[]
params: {}[]
container?: IContainer
instance?: {}
registrationType?: RegistrationType
isLazy: boolean
dependenciesValue: IDynamicDependency[]
cloneFor: (container: IContainer) => IRegistrationBase
clone: () => IRegistrationBase
copyDependency: (dependency: IDynamicDependency) => void
checkRegistrationType:() => void
}<|fim▁end|>
| |
<|file_name|>util_bsd.go<|end_file_name|><|fim▁begin|>// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd netbsd openbsd<|fim▁hole|>import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TIOCGETA
const ioctlWriteTermios = unix.TIOCSETA<|fim▁end|>
|
package terminal
|
<|file_name|>MainActivity.java<|end_file_name|><|fim▁begin|>/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Curt Binder
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package info.curtbinder.reefangel.phone;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.res.Configuration;
import android.os.Bundle;
import android.os.Handler;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentTransaction;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.ActionBar;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.app.ActionBarDrawerToggle;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.WindowManager;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.ListView;
import android.widget.Toast;
import java.util.List;
import info.curtbinder.reefangel.wizard.SetupWizardActivity;
public class MainActivity extends AppCompatActivity
implements ActionBar.OnNavigationListener {
// public static final int REQUEST_EXIT = 1;
// public static final int RESULT_EXIT = 1024;
private static final String OPENED_KEY = "OPENED_KEY";
private static final String STATE_CHECKED = "DRAWER_CHECKED";
private static final String PREVIOUS_CHECKED = "PREVIOUS";
// do not switch selected profile when restoring the application state
private static boolean fRestoreState = false;
public final String TAG = MainActivity.class.getSimpleName();
private RAApplication raApp;
private String[] mNavTitles;
private Toolbar mToolbar;
private DrawerLayout mDrawerLayout;
private ListView mDrawerList;
private ActionBarDrawerToggle mDrawerToggle;
private Boolean opened = null;
private int mOldPosition = -1;
private Boolean fCanExit = false;
private Fragment mHistoryContent = null;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
raApp = (RAApplication) getApplication();
raApp.raprefs.setDefaultPreferences();
// Set the Theme before the layout is instantiated
//Utils.onActivityCreateSetTheme(this, raApp.raprefs.getSelectedTheme());
setContentView(R.layout.activity_main);
// Check for first run
if (raApp.isFirstRun()) {
Intent i = new Intent(this, SetupWizardActivity.class);
i.addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
startActivity(i);
finish();
}
// Load any saved position
int position = 0;
if (savedInstanceState != null) {
position = savedInstanceState.getInt(STATE_CHECKED, 0);
Log.d(TAG, "Restore, position: " + position);
if (position == 3) {
// history fragment
mHistoryContent = getSupportFragmentManager().getFragment(savedInstanceState, "HistoryGraphFragment");
}
}
setupToolbar();
setupNavDrawer();
updateActionBar();
selectItem(position);
// launch a new thread to show the drawer on very first app launch
new Thread(new Runnable() {
@Override
public void run() {
opened = raApp.raprefs.getBoolean(OPENED_KEY, false);
if (!opened) {
mDrawerLayout.openDrawer(mDrawerList);
}
}
}).start();
}
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
// get the checked item and subtract off one to get the actual position
// the same logic applies that is used in the DrawerItemClickedListener.onItemClicked
int position = mDrawerList.getCheckedItemPosition() - 1;
outState.putInt(STATE_CHECKED, position);
if (position == 3) {
getSupportFragmentManager().putFragment(outState, "HistoryGraphFragment", mHistoryContent);
}
}
@Override
protected void onResume() {
super.onResume();
fCanExit = false;
fRestoreState = true;
setNavigationList();
if (raApp.raprefs.isKeepScreenOnEnabled()) {
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
}
// last thing we do is display the changelog if necessary
// TODO add in a preference check for displaying changelog on app startup
raApp.displayChangeLog(this);
}
@Override
protected void onPause() {
super.onPause();
}
private void setupToolbar() {
mToolbar = (Toolbar) findViewById(R.id.toolbar);
mToolbar.setTitle("");
setSupportActionBar(mToolbar);
}
private void setupNavDrawer() {
// get the string array for the navigation items
mNavTitles = getResources().getStringArray(R.array.nav_items);
// locate the navigation drawer items in the layout
mDrawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout);
mDrawerList = (ListView) findViewById(R.id.left_drawer);
// set a custom shadow that overlays the main content when the drawer
// opens
mDrawerLayout.setDrawerShadow(R.drawable.drawer_shadow,
GravityCompat.START);
// add in the logo header
View header = getLayoutInflater().inflate(R.layout.drawer_list_header, null);
mDrawerList.addHeaderView(header, null, false);
// set the adapter for the navigation list view
ArrayAdapter<String> adapter =
new ArrayAdapter<String>(this, R.layout.drawer_list_item,
mNavTitles);
mDrawerList.setAdapter(adapter);
mDrawerList.setOnItemClickListener(new DrawerItemClickListener());
// setup the toggling for the drawer
mDrawerToggle =
new MyDrawerToggle(this, mDrawerLayout, mToolbar,
R.string.drawer_open, R.string.drawer_close);
mDrawerLayout.setDrawerListener(mDrawerToggle);
}
private void setNavigationList() {
// set list navigation items
final ActionBar ab = getSupportActionBar();
Context context = ab.getThemedContext();
int arrayID;
if (raApp.isAwayProfileEnabled()) {
arrayID = R.array.profileLabels;
} else {
arrayID = R.array.profileLabelsHomeOnly;
}
ArrayAdapter<CharSequence> list =
ArrayAdapter.createFromResource(context, arrayID,
R.layout.support_simple_spinner_dropdown_item);
ab.setListNavigationCallbacks(list, this);
ab.setSelectedNavigationItem(raApp.getSelectedProfile());
}
private void updateActionBar() {
// update actionbar
final ActionBar ab = getSupportActionBar();
ab.setNavigationMode(ActionBar.NAVIGATION_MODE_LIST);
}
@Override
public boolean onNavigationItemSelected(int itemPosition, long itemId) {
// only switch profiles when the user changes the navigation item,
// not when the navigation list state is restored
if (!fRestoreState) {
raApp.setSelectedProfile(itemPosition);
} else {
fRestoreState = false;
}
return true;
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
mDrawerToggle.onConfigurationChanged(newConfig);
}
@Override
protected void onPostCreate(Bundle savedInstanceState) {
super.onPostCreate(savedInstanceState);
// sync the toggle state after onRestoreInstanceState has occurred
mDrawerToggle.syncState();
}
@Override
public void onBackPressed() {
/*
When the back button is pressed, this function is called.
If the drawer is open, check it and cancel it here.
Calling super.onBackPressed() causes the BackStackChangeListener to be called
*/
// Log.d(TAG, "onBackPressed");
if (mDrawerLayout.isDrawerOpen(mDrawerList)) {
// Log.d(TAG, "drawer open, closing");
mDrawerLayout.closeDrawer(mDrawerList);
return;
}
if ( !fCanExit ) {
Toast.makeText(this, R.string.messageExitNotification, Toast.LENGTH_SHORT).show();
fCanExit = true;
Handler h = new Handler();
h.postDelayed(new Runnable() {
@Override
public void run() {
// Log.d(TAG, "Disabling exit flag");
fCanExit = false;
}
}, 2000);
return;
}
super.onBackPressed();
}
private void updateContent(int position) {
if (position != mOldPosition) {
// update the main content by replacing fragments
Fragment fragment;
switch (position) {
default:
case 0:
fragment = StatusFragment.newInstance();
break;
case 1:
fragment = MemoryFragment.newInstance(raApp.raprefs.useOldPre10MemoryLocations());
break;
case 2:
fragment = NotificationsFragment.newInstance();
break;
case 3:
//fragment = HistoryFragment.newInstance();
// TODO check the restoration of the fragment content
if (mHistoryContent != null ) {
fragment = mHistoryContent;
} else {
// mHistoryContent = HistoryGraphFragment.newInstance();
mHistoryContent = HistoryMultipleGraphFragment.newInstance();
fragment = mHistoryContent;
}
break;
case 4:
fragment = ErrorsFragment.newInstance();
break;
case 5:
fragment = DateTimeFragment.newInstance();
break;
}
Log.d(TAG, "UpdateContent: " + position);
FragmentTransaction ft =
getSupportFragmentManager().beginTransaction();
ft.replace(R.id.content_frame, fragment);
ft.commit();
mOldPosition = position;
}
}
public void selectItem(int position) {
// Log.d(TAG, "selectItem: " + position);
updateContent(position);
highlightItem(position);
mDrawerLayout.closeDrawer(mDrawerList);
}
public void highlightItem(int position) {
// Log.d(TAG, "highlightItem: " + position);
// since we are using a header for the list, the first
// item/position in the list is the header. our header is non-selectable
// so in order for us to have the proper item in our list selected, we must
// increase the position by 1. this same logic is applied to the
// DrawerItemClickedListener.onItemClicked
mDrawerList.setItemChecked(position + 1, true);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.global, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// pass the event to ActionBarDrawerToggle, if it returns true,
// then it has handled the app icon touch event
if (mDrawerToggle.onOptionsItemSelected(item)) {
return true;
}
// handle the rest of the action bar items here
switch (item.getItemId()) {
case R.id.action_settings:
Fragment f = getSupportFragmentManager().findFragmentById(R.id.content_frame);
if (f instanceof StatusFragment) {
// the current fragment is the status fragment
Log.d(TAG, "Status Fragment is current");
((StatusFragment) f).reloadPages();
}
startActivity(new Intent(this, SettingsActivity.class));
// startActivityForResult(new Intent(this, SettingsActivity.class), REQUEST_EXIT);
return true;
default:
return super.onOptionsItemSelected(item);
}
}
// @Override
// protected void onActivityResult(int requestCode, int resultCode, Intent data) {
// Log.d(TAG, "onActivityResult");
// if (requestCode == REQUEST_EXIT) {
// if (resultCode == RESULT_EXIT) {
// this.finish();
// }
// }
// }
// called whenever we call invalidateOptionsMenu()
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
/*
This function is called after invalidateOptionsMenu is called.
This happens when the Navigation drawer is opened and closed.
*/
boolean open = mDrawerLayout.isDrawerOpen(mDrawerList);
hideMenuItems(menu, open);
return super.onPrepareOptionsMenu(menu);
}
private void hideMenuItems(Menu menu, boolean open) {
// hide the menu item(s) when the drawer is open
// Refresh button on Status page
MenuItem mi = menu.findItem(R.id.action_refresh);
if ( mi != null )
mi.setVisible(!open);
// Add button on Notification page
mi = menu.findItem(R.id.action_add_notification);
if ( mi != null )
mi.setVisible(!open);
// Delete button on Notification page
mi = menu.findItem(R.id.action_delete_notification);
if ( mi != null )
mi.setVisible(!open);
// Delete button on Error page
mi = menu.findItem(R.id.menu_delete);
if ( mi != null )
mi.setVisible(!open);
// hide buttons on History / Chart page
mi = menu.findItem(R.id.action_configure_chart);
if (mi != null)
mi.setVisible(!open);
mi = menu.findItem(R.id.action_refresh_chart);
if (mi != null)
mi.setVisible(!open);
}
private class MyDrawerToggle extends ActionBarDrawerToggle {
public MyDrawerToggle(Activity activity, DrawerLayout drawerLayout,<|fim▁hole|> super(activity, drawerLayout, toolbar,
openDrawerContentDescRes, closeDrawerContentDescRes);
}
@Override
public void onDrawerClosed(View drawerView) {
super.onDrawerClosed(drawerView);
// Log.d(TAG, "DrawerClosed");
invalidateOptionsMenu();
if (opened != null && !opened) {
// drawer closed for the first time ever,
// set that it has been closed
opened = true;
raApp.raprefs.set(OPENED_KEY, true);
}
}
@Override
public void onDrawerOpened(View drawerView) {
super.onDrawerOpened(drawerView);
// getSupportActionBar().setTitle(R.string.app_name);
invalidateOptionsMenu();
}
}
private class DrawerItemClickListener implements
ListView.OnItemClickListener {
@Override
public void onItemClick(
AdapterView<?> parent,
View view,
int position,
long id) {
// Perform action when a drawer item is selected
// Log.d(TAG, "onDrawerItemClick: " + position);
// when we have a list header, it counts as a position in the list
// the first position to be exact. so we have to decrease the
// position by 1 to get the proper item chosen in our list
selectItem(position - 1);
}
}
}<|fim▁end|>
|
Toolbar toolbar,
int openDrawerContentDescRes,
int closeDrawerContentDescRes) {
|
<|file_name|>0006_scheduleexperience.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models<|fim▁hole|>
class Migration(migrations.Migration):
dependencies = [
('schedules', '0005_auto_20171010_1722'),
]
operations = [
migrations.CreateModel(
name='ScheduleExperience',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('experience_type', models.PositiveSmallIntegerField(default=0, choices=[(0, b'Recurring Nudge and Upgrade Reminder'), (1, b'Course Updates')])),
('schedule', models.OneToOneField(related_name='experience', to='schedules.Schedule')),
],
),
]<|fim▁end|>
| |
<|file_name|>fix_intern.py<|end_file_name|><|fim▁begin|># Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
<|fim▁hole|>from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new<|fim▁end|>
|
from .. import pytree
from .. import fixer_base
|
<|file_name|>contact.py<|end_file_name|><|fim▁begin|>from model.group_address import Address_data
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*7
return prefix + "".join([random.choice(symbols) for x in range(random.randrange(maxlen))])
def random_numbers(maxlen):
numbers = string.digits + " "*2 + "(" + ")" + "-"
return "".join([random.choice(numbers) for x in range(maxlen)])
def random_mail(domen, maxlen):
value = string.ascii_letters + string.digits
return "".join([random.choice(value) for x in range(random.randrange(maxlen))]) + domen
testdata = [
Address_data(firstname=random_string("firstname", 20), middlename=random_string("", 1),
lastname=random_string("lastname", 20), nickname=random_string("nickname", 20),
company=random_string("company", 20), address=random_string("address", 20),
home_phone=random_numbers(10), mobile_phone=random_numbers(10), work_phone=random_numbers(10),
fax_phone=random_numbers(10), email_1=random_mail("@mail.ru", 10), email_2=random_mail("@mail.ru", 10),
home_page=random_string("page", 15))
for x in range(n)
]
constant = [
Address_data(firstname="firstname", middlename="middlename", lastname="lastname", nickname="nickname",
company="company", address="address", home_phone="7874177", mobile_phone="784541212",
work_phone="8776464321", fax_phone="874845421", email_1="[email protected]", email_2="[email protected]",<|fim▁hole|>
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
#with open(file, "w") as out:
# out.write(json.dumps(testdata, default=lambda x: x.__dict__, indent=2))
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))<|fim▁end|>
|
home_page="www.page.com")
]
|
<|file_name|>BenchmarkTest12371.java<|end_file_name|><|fim▁begin|>/**
* OWASP Benchmark Project v1.1
*
* This file is part of the Open Web Application Security Project (OWASP)
* Benchmark Project. For details, please see
* <a href="https://www.owasp.org/index.php/Benchmark">https://www.owasp.org/index.php/Benchmark</a>.
*
* The Benchmark is free software: you can redistribute it and/or modify it under the terms
* of the GNU General Public License as published by the Free Software Foundation, version 2.
*
* The Benchmark is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details
*
* @author Dave Wichers <a href="https://www.aspectsecurity.com">Aspect Security</a><|fim▁hole|>* @created 2015
*/
package org.owasp.benchmark.testcode;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@WebServlet("/BenchmarkTest12371")
public class BenchmarkTest12371 extends HttpServlet {
private static final long serialVersionUID = 1L;
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
doPost(request, response);
}
@Override
public void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
String[] values = request.getParameterValues("foo");
String param;
if (values.length != 0)
param = request.getParameterValues("foo")[0];
else param = null;
String bar = new Test().doSomething(param);
java.io.FileOutputStream fos = new java.io.FileOutputStream(new java.io.File(org.owasp.benchmark.helpers.Utils.testfileDir + bar));
} // end doPost
private class Test {
public String doSomething(String param) throws ServletException, IOException {
// Chain a bunch of propagators in sequence
String a13109 = param; //assign
StringBuilder b13109 = new StringBuilder(a13109); // stick in stringbuilder
b13109.append(" SafeStuff"); // append some safe content
b13109.replace(b13109.length()-"Chars".length(),b13109.length(),"Chars"); //replace some of the end content
java.util.HashMap<String,Object> map13109 = new java.util.HashMap<String,Object>();
map13109.put("key13109", b13109.toString()); // put in a collection
String c13109 = (String)map13109.get("key13109"); // get it back out
String d13109 = c13109.substring(0,c13109.length()-1); // extract most of it
String e13109 = new String( new sun.misc.BASE64Decoder().decodeBuffer(
new sun.misc.BASE64Encoder().encode( d13109.getBytes() ) )); // B64 encode and decode it
String f13109 = e13109.split(" ")[0]; // split it on a space
org.owasp.benchmark.helpers.ThingInterface thing = org.owasp.benchmark.helpers.ThingFactory.createThing();
String g13109 = "barbarians_at_the_gate"; // This is static so this whole flow is 'safe'
String bar = thing.doSomething(g13109); // reflection
return bar;
}
} // end innerclass Test
} // end DataflowThruInnerClass<|fim▁end|>
| |
<|file_name|>dataTables.fixedHeader.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*! FixedHeader 2.1.2
* ©2010-2014 SpryMedia Ltd - datatables.net/license
*/
/**
* @summary FixedHeader
* @description Fix a table's header or footer, so it is always visible while
* Scrolling
* @version 2.1.2
* @file dataTables.fixedHeader.js
* @author SpryMedia Ltd (www.sprymedia.co.uk)
* @contact www.sprymedia.co.uk/contact
* @copyright Copyright 2009-2014 SpryMedia Ltd.
*
* This source file is free software, available under the following license:
* MIT license - http://datatables.net/license/mit
*
* This source file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details.
*
* For details please refer to: http://www.datatables.net
*/
/* Global scope for FixedColumns for backwards compatibility - will be removed
* in future. Not documented in 1.1.x.
*/
/* Global scope for FixedColumns */
var FixedHeader;
(function(window, document, undefined) {
var factory = function( $, DataTable ) {
"use strict";
/*
* Function: FixedHeader
* Purpose: Provide 'fixed' header, footer and columns for a DataTable
* Returns: object:FixedHeader - must be called with 'new'
* Inputs: mixed:mTable - target table
* @param {object} dt DataTables instance or HTML table node. With DataTables
* 1.10 this can also be a jQuery collection (with just a single table in its
* result set), a jQuery selector, DataTables API instance or settings
* object.
* @param {object} [oInit] initialisation settings, with the following
* properties (each optional)
* * bool:top - fix the header (default true)
* * bool:bottom - fix the footer (default false)
* * int:left - fix the left column(s) (default 0)
* * int:right - fix the right column(s) (default 0)
* * int:zTop - fixed header zIndex
* * int:zBottom - fixed footer zIndex
* * int:zLeft - fixed left zIndex
* * int:zRight - fixed right zIndex
*/
FixedHeader = function ( mTable, oInit ) {
/* Sanity check - you just know it will happen */
if ( ! this instanceof FixedHeader )
{
alert( "FixedHeader warning: FixedHeader must be initialised with the 'new' keyword." );
return;
}
var that = this;
var oSettings = {
"aoCache": [],
"oSides": {
"top": true,
"bottom": false,
"left": 0,
"right": 0
},
"oZIndexes": {
"top": 104,
"bottom": 103,
"left": 102,
"right": 101
},
"oCloneOnDraw": {
"top": false,
"bottom": false,
"left": true,
"right": true
},
"oMes": {
"iTableWidth": 0,
"iTableHeight": 0,
"iTableLeft": 0,
"iTableRight": 0, /* note this is left+width, not actually "right" */
"iTableTop": 0,
"iTableBottom": 0 /* note this is top+height, not actually "bottom" */
},
"oOffset": {
"top": 0
},
"nTable": null,
"bFooter": false,
"bInitComplete": false
};
/*
* Function: fnGetSettings
* Purpose: Get the settings for this object
* Returns: object: - settings object
* Inputs: -
*/
this.fnGetSettings = function () {
return oSettings;
};
/*
* Function: fnUpdate
* Purpose: Update the positioning and copies of the fixed elements
* Returns: -
* Inputs: -
*/
this.fnUpdate = function () {
this._fnUpdateClones();
this._fnUpdatePositions();
};
/*
* Function: fnPosition
* Purpose: Update the positioning of the fixed elements
* Returns: -
* Inputs: -
*/
this.fnPosition = function () {
this._fnUpdatePositions();
};
var dt = $.fn.dataTable.Api ?
new $.fn.dataTable.Api( mTable ).settings()[0] :
mTable.fnSettings();
dt._oPluginFixedHeader = this;
/* Let's do it */
this.fnInit( dt, oInit );
};
/*
* Variable: FixedHeader
* Purpose: Prototype for FixedHeader
* Scope: global
*/
FixedHeader.prototype = {
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Initialisation
*/
/*
* Function: fnInit
* Purpose: The "constructor"
* Returns: -
* Inputs: {as FixedHeader function}
*/
fnInit: function ( oDtSettings, oInit )
{
var s = this.fnGetSettings();
var that = this;
/* Record the user definable settings */
this.fnInitSettings( s, oInit );
if ( oDtSettings.oScroll.sX !== "" || oDtSettings.oScroll.sY !== "" )
{
alert( "FixedHeader 2 is not supported with DataTables' scrolling mode at this time" );
return;
}
s.nTable = oDtSettings.nTable;
oDtSettings.aoDrawCallback.unshift( {
"fn": function () {
FixedHeader.fnMeasure();
that._fnUpdateClones.call(that);
that._fnUpdatePositions.call(that);
},
"sName": "FixedHeader"
} );
s.bFooter = ($('>tfoot', s.nTable).length > 0) ? true : false;
/* Add the 'sides' that are fixed */
if ( s.oSides.top )
{
s.aoCache.push( that._fnCloneTable( "fixedHeader", "FixedHeader_Header", that._fnCloneThead ) );
}
if ( s.oSides.bottom )
{
s.aoCache.push( that._fnCloneTable( "fixedFooter", "FixedHeader_Footer", that._fnCloneTfoot ) );
}
if ( s.oSides.left )
{
s.aoCache.push( that._fnCloneTable( "fixedLeft", "FixedHeader_Left", that._fnCloneTLeft, s.oSides.left ) );
}
if ( s.oSides.right )
{
s.aoCache.push( that._fnCloneTable( "fixedRight", "FixedHeader_Right", that._fnCloneTRight, s.oSides.right ) );
}
/* Event listeners for window movement */
FixedHeader.afnScroll.push( function () {
that._fnUpdatePositions.call(that);
} );
$(window).resize( function () {
FixedHeader.fnMeasure();
that._fnUpdateClones.call(that);
that._fnUpdatePositions.call(that);
} );
$(s.nTable)
.on('column-reorder.dt', function () {
FixedHeader.fnMeasure();
that._fnUpdateClones( true );
that._fnUpdatePositions();
} )
.on('column-visibility.dt', function () {
FixedHeader.fnMeasure();
that._fnUpdateClones( true );
that._fnUpdatePositions();
} );
/* Get things right to start with */
FixedHeader.fnMeasure();
that._fnUpdateClones();
that._fnUpdatePositions();
s.bInitComplete = true;
},
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Support functions
*/
/*
* Function: fnInitSettings
* Purpose: Take the user's settings and copy them to our local store
* Returns: -
* Inputs: object:s - the local settings object
* object:oInit - the user's settings object
*/
fnInitSettings: function ( s, oInit )
{
if ( oInit !== undefined )
{
if ( oInit.top !== undefined ) {
s.oSides.top = oInit.top;
}
if ( oInit.bottom !== undefined ) {
s.oSides.bottom = oInit.bottom;
}
if ( typeof oInit.left == 'boolean' ) {
s.oSides.left = oInit.left ? 1 : 0;
}
else if ( oInit.left !== undefined ) {
s.oSides.left = oInit.left;
}
if ( typeof oInit.right == 'boolean' ) {
s.oSides.right = oInit.right ? 1 : 0;
}
else if ( oInit.right !== undefined ) {
s.oSides.right = oInit.right;
}
if ( oInit.zTop !== undefined ) {
s.oZIndexes.top = oInit.zTop;
}
if ( oInit.zBottom !== undefined ) {
s.oZIndexes.bottom = oInit.zBottom;
}
if ( oInit.zLeft !== undefined ) {
s.oZIndexes.left = oInit.zLeft;
}
if ( oInit.zRight !== undefined ) {
s.oZIndexes.right = oInit.zRight;
}
if ( oInit.offsetTop !== undefined ) {
s.oOffset.top = oInit.offsetTop;
}
if ( oInit.alwaysCloneTop !== undefined ) {
s.oCloneOnDraw.top = oInit.alwaysCloneTop;
}
if ( oInit.alwaysCloneBottom !== undefined ) {
s.oCloneOnDraw.bottom = oInit.alwaysCloneBottom;
}
if ( oInit.alwaysCloneLeft !== undefined ) {
s.oCloneOnDraw.left = oInit.alwaysCloneLeft;
}
if ( oInit.alwaysCloneRight !== undefined ) {
s.oCloneOnDraw.right = oInit.alwaysCloneRight;
}
}
},
/*
* Function: _fnCloneTable
* Purpose: Clone the table node and do basic initialisation
* Returns: -
* Inputs: -
*/
_fnCloneTable: function ( sType, sClass, fnClone, iCells )
{
var s = this.fnGetSettings();
var nCTable;
/* We know that the table _MUST_ has a DIV wrapped around it, because this is simply how
* DataTables works. Therefore, we can set this to be relatively position (if it is not
* alreadu absolute, and use this as the base point for the cloned header
*/
if ( $(s.nTable.parentNode).css('position') != "absolute" )
{
s.nTable.parentNode.style.position = "relative";
}
/* Just a shallow clone will do - we only want the table node */
nCTable = s.nTable.cloneNode( false );
nCTable.removeAttribute( 'id' );
var nDiv = document.createElement( 'div' );
nDiv.style.position = "absolute";
nDiv.style.top = "0px";
nDiv.style.left = "0px";
nDiv.className += " FixedHeader_Cloned "+sType+" "+sClass;
/* Set the zIndexes */
if ( sType == "fixedHeader" )
{
nDiv.style.zIndex = s.oZIndexes.top;
}
if ( sType == "fixedFooter" )
{
nDiv.style.zIndex = s.oZIndexes.bottom;
}
if ( sType == "fixedLeft" )
{
nDiv.style.zIndex = s.oZIndexes.left;
}
else if ( sType == "fixedRight" )
{
nDiv.style.zIndex = s.oZIndexes.right;
}
/* remove margins since we are going to position it absolutely */
nCTable.style.margin = "0";
/* Insert the newly cloned table into the DOM, on top of the "real" header */
nDiv.appendChild( nCTable );
document.body.appendChild( nDiv );
return {
"nNode": nCTable,
"nWrapper": nDiv,
"sType": sType,
"sPosition": "",
"sTop": "",
"sLeft": "",
"fnClone": fnClone,
"iCells": iCells
};
},
/*
* Function: _fnMeasure
* Purpose: Get the current positioning of the table in the DOM
* Returns: -
* Inputs: -
*/
_fnMeasure: function ()
{
var
s = this.fnGetSettings(),
m = s.oMes,
jqTable = $(s.nTable),
oOffset = jqTable.offset(),
iParentScrollTop = this._fnSumScroll( s.nTable.parentNode, 'scrollTop' ),
iParentScrollLeft = this._fnSumScroll( s.nTable.parentNode, 'scrollLeft' );
m.iTableWidth = jqTable.outerWidth();
m.iTableHeight = jqTable.outerHeight();
m.iTableLeft = oOffset.left + s.nTable.parentNode.scrollLeft;
m.iTableTop = oOffset.top + iParentScrollTop;
m.iTableRight = m.iTableLeft + m.iTableWidth;
m.iTableRight = FixedHeader.oDoc.iWidth - m.iTableLeft - m.iTableWidth;
m.iTableBottom = FixedHeader.oDoc.iHeight - m.iTableTop - m.iTableHeight;
},
/*
* Function: _fnSumScroll
* Purpose: Sum node parameters all the way to the top<|fim▁hole|> * string:side - scrollTop or scrollLeft
*/
_fnSumScroll: function ( n, side )
{
var i = n[side];
while ( n = n.parentNode )
{
if ( n.nodeName == 'HTML' || n.nodeName == 'BODY' )
{
break;
}
i = n[side];
}
return i;
},
/*
* Function: _fnUpdatePositions
* Purpose: Loop over the fixed elements for this table and update their positions
* Returns: -
* Inputs: -
*/
_fnUpdatePositions: function ()
{
var s = this.fnGetSettings();
this._fnMeasure();
for ( var i=0, iLen=s.aoCache.length ; i<iLen ; i++ )
{
if ( s.aoCache[i].sType == "fixedHeader" )
{
this._fnScrollFixedHeader( s.aoCache[i] );
}
else if ( s.aoCache[i].sType == "fixedFooter" )
{
this._fnScrollFixedFooter( s.aoCache[i] );
}
else if ( s.aoCache[i].sType == "fixedLeft" )
{
this._fnScrollHorizontalLeft( s.aoCache[i] );
}
else
{
this._fnScrollHorizontalRight( s.aoCache[i] );
}
}
},
/*
* Function: _fnUpdateClones
* Purpose: Loop over the fixed elements for this table and call their cloning functions
* Returns: -
* Inputs: -
*/
_fnUpdateClones: function ( full )
{
var s = this.fnGetSettings();
if ( full ) {
// This is a little bit of a hack to force a full clone draw. When
// `full` is set to true, we want to reclone the source elements,
// regardless of the clone-on-draw settings
s.bInitComplete = false;
}
for ( var i=0, iLen=s.aoCache.length ; i<iLen ; i++ )
{
s.aoCache[i].fnClone.call( this, s.aoCache[i] );
}
if ( full ) {
s.bInitComplete = true;
}
},
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Scrolling functions
*/
/*
* Function: _fnScrollHorizontalLeft
* Purpose: Update the positioning of the scrolling elements
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnScrollHorizontalRight: function ( oCache )
{
var
s = this.fnGetSettings(),
oMes = s.oMes,
oWin = FixedHeader.oWin,
oDoc = FixedHeader.oDoc,
nTable = oCache.nWrapper,
iFixedWidth = $(nTable).outerWidth();
if ( oWin.iScrollRight < oMes.iTableRight )
{
/* Fully right aligned */
this._fnUpdateCache( oCache, 'sPosition', 'absolute', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', oMes.iTableTop+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', (oMes.iTableLeft+oMes.iTableWidth-iFixedWidth)+"px", 'left', nTable.style );
}
else if ( oMes.iTableLeft < oDoc.iWidth-oWin.iScrollRight-iFixedWidth )
{
/* Middle */
this._fnUpdateCache( oCache, 'sPosition', 'fixed', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', (oMes.iTableTop-oWin.iScrollTop)+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', (oWin.iWidth-iFixedWidth)+"px", 'left', nTable.style );
}
else
{
/* Fully left aligned */
this._fnUpdateCache( oCache, 'sPosition', 'absolute', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', oMes.iTableTop+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', oMes.iTableLeft+"px", 'left', nTable.style );
}
},
/*
* Function: _fnScrollHorizontalLeft
* Purpose: Update the positioning of the scrolling elements
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnScrollHorizontalLeft: function ( oCache )
{
var
s = this.fnGetSettings(),
oMes = s.oMes,
oWin = FixedHeader.oWin,
oDoc = FixedHeader.oDoc,
nTable = oCache.nWrapper,
iCellWidth = $(nTable).outerWidth();
if ( oWin.iScrollLeft < oMes.iTableLeft )
{
/* Fully left align */
this._fnUpdateCache( oCache, 'sPosition', 'absolute', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', oMes.iTableTop+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', oMes.iTableLeft+"px", 'left', nTable.style );
}
else if ( oWin.iScrollLeft < oMes.iTableLeft+oMes.iTableWidth-iCellWidth )
{
this._fnUpdateCache( oCache, 'sPosition', 'fixed', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', (oMes.iTableTop-oWin.iScrollTop)+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', "0px", 'left', nTable.style );
}
else
{
/* Fully right align */
this._fnUpdateCache( oCache, 'sPosition', 'absolute', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', oMes.iTableTop+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', (oMes.iTableLeft+oMes.iTableWidth-iCellWidth)+"px", 'left', nTable.style );
}
},
/*
* Function: _fnScrollFixedFooter
* Purpose: Update the positioning of the scrolling elements
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnScrollFixedFooter: function ( oCache )
{
var
s = this.fnGetSettings(),
oMes = s.oMes,
oWin = FixedHeader.oWin,
oDoc = FixedHeader.oDoc,
nTable = oCache.nWrapper,
iTheadHeight = $("thead", s.nTable).outerHeight(),
iCellHeight = $(nTable).outerHeight();
if ( oWin.iScrollBottom < oMes.iTableBottom )
{
/* Below */
this._fnUpdateCache( oCache, 'sPosition', 'absolute', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', (oMes.iTableTop+oMes.iTableHeight-iCellHeight)+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', oMes.iTableLeft+"px", 'left', nTable.style );
}
else if ( oWin.iScrollBottom < oMes.iTableBottom+oMes.iTableHeight-iCellHeight-iTheadHeight )
{
this._fnUpdateCache( oCache, 'sPosition', 'fixed', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', (oWin.iHeight-iCellHeight)+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', (oMes.iTableLeft-oWin.iScrollLeft)+"px", 'left', nTable.style );
}
else
{
/* Above */
this._fnUpdateCache( oCache, 'sPosition', 'absolute', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', (oMes.iTableTop+iCellHeight)+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', oMes.iTableLeft+"px", 'left', nTable.style );
}
},
/*
* Function: _fnScrollFixedHeader
* Purpose: Update the positioning of the scrolling elements
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnScrollFixedHeader: function ( oCache )
{
var
s = this.fnGetSettings(),
oMes = s.oMes,
oWin = FixedHeader.oWin,
oDoc = FixedHeader.oDoc,
nTable = oCache.nWrapper,
iTbodyHeight = 0,
anTbodies = s.nTable.getElementsByTagName('tbody');
for (var i = 0; i < anTbodies.length; ++i) {
iTbodyHeight += anTbodies[i].offsetHeight;
}
if ( oMes.iTableTop > oWin.iScrollTop + s.oOffset.top )
{
/* Above the table */
this._fnUpdateCache( oCache, 'sPosition', "absolute", 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', oMes.iTableTop+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', oMes.iTableLeft+"px", 'left', nTable.style );
}
else if ( oWin.iScrollTop + s.oOffset.top > oMes.iTableTop+iTbodyHeight )
{
/* At the bottom of the table */
this._fnUpdateCache( oCache, 'sPosition', "absolute", 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', (oMes.iTableTop+iTbodyHeight)+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', oMes.iTableLeft+"px", 'left', nTable.style );
}
else
{
/* In the middle of the table */
this._fnUpdateCache( oCache, 'sPosition', 'fixed', 'position', nTable.style );
this._fnUpdateCache( oCache, 'sTop', s.oOffset.top+"px", 'top', nTable.style );
this._fnUpdateCache( oCache, 'sLeft', (oMes.iTableLeft-oWin.iScrollLeft)+"px", 'left', nTable.style );
}
},
/*
* Function: _fnUpdateCache
* Purpose: Check the cache and update cache and value if needed
* Returns: -
* Inputs: object:oCache - local cache object
* string:sCache - cache property
* string:sSet - value to set
* string:sProperty - object property to set
* object:oObj - object to update
*/
_fnUpdateCache: function ( oCache, sCache, sSet, sProperty, oObj )
{
if ( oCache[sCache] != sSet )
{
oObj[sProperty] = sSet;
oCache[sCache] = sSet;
}
},
/**
* Copy the classes of all child nodes from one element to another. This implies
* that the two have identical structure - no error checking is performed to that
* fact.
* @param {element} source Node to copy classes from
* @param {element} dest Node to copy classes too
*/
_fnClassUpdate: function ( source, dest )
{
var that = this;
if ( source.nodeName.toUpperCase() === "TR" || source.nodeName.toUpperCase() === "TH" ||
source.nodeName.toUpperCase() === "TD" || source.nodeName.toUpperCase() === "SPAN" )
{
dest.className = source.className;
}
$(source).children().each( function (i) {
that._fnClassUpdate( $(source).children()[i], $(dest).children()[i] );
} );
},
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Cloning functions
*/
/*
* Function: _fnCloneThead
* Purpose: Clone the thead element
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnCloneThead: function ( oCache )
{
var s = this.fnGetSettings();
var nTable = oCache.nNode;
if ( s.bInitComplete && !s.oCloneOnDraw.top )
{
this._fnClassUpdate( $('thead', s.nTable)[0], $('thead', nTable)[0] );
return;
}
/* Set the wrapper width to match that of the cloned table */
var iDtWidth = $(s.nTable).outerWidth();
oCache.nWrapper.style.width = iDtWidth+"px";
nTable.style.width = iDtWidth+"px";
/* Remove any children the cloned table has */
while ( nTable.childNodes.length > 0 )
{
$('thead th', nTable).unbind( 'click' );
nTable.removeChild( nTable.childNodes[0] );
}
/* Clone the DataTables header */
var nThead = $('thead', s.nTable).clone(true)[0];
nTable.appendChild( nThead );
/* Copy the widths across - apparently a clone isn't good enough for this */
var a = [];
var b = [];
$("thead>tr th", s.nTable).each( function (i) {
a.push( $(this).width() );
} );
$("thead>tr td", s.nTable).each( function (i) {
b.push( $(this).width() );
} );
$("thead>tr th", s.nTable).each( function (i) {
$("thead>tr th:eq("+i+")", nTable).width( a[i] );
$(this).width( a[i] );
} );
$("thead>tr td", s.nTable).each( function (i) {
$("thead>tr td:eq("+i+")", nTable).width( b[i] );
$(this).width( b[i] );
} );
// Stop DataTables 1.9 from putting a focus ring on the headers when
// clicked to sort
$('th.sorting, th.sorting_desc, th.sorting_asc', nTable).bind( 'click', function () {
this.blur();
} );
},
/*
* Function: _fnCloneTfoot
* Purpose: Clone the tfoot element
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnCloneTfoot: function ( oCache )
{
var s = this.fnGetSettings();
var nTable = oCache.nNode;
/* Set the wrapper width to match that of the cloned table */
oCache.nWrapper.style.width = $(s.nTable).outerWidth()+"px";
/* Remove any children the cloned table has */
while ( nTable.childNodes.length > 0 )
{
nTable.removeChild( nTable.childNodes[0] );
}
/* Clone the DataTables footer */
var nTfoot = $('tfoot', s.nTable).clone(true)[0];
nTable.appendChild( nTfoot );
/* Copy the widths across - apparently a clone isn't good enough for this */
$("tfoot:eq(0)>tr th", s.nTable).each( function (i) {
$("tfoot:eq(0)>tr th:eq("+i+")", nTable).width( $(this).width() );
} );
$("tfoot:eq(0)>tr td", s.nTable).each( function (i) {
$("tfoot:eq(0)>tr td:eq("+i+")", nTable).width( $(this).width() );
} );
},
/*
* Function: _fnCloneTLeft
* Purpose: Clone the left column(s)
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnCloneTLeft: function ( oCache )
{
var s = this.fnGetSettings();
var nTable = oCache.nNode;
var nBody = $('tbody', s.nTable)[0];
/* Remove any children the cloned table has */
while ( nTable.childNodes.length > 0 )
{
nTable.removeChild( nTable.childNodes[0] );
}
/* Is this the most efficient way to do this - it looks horrible... */
nTable.appendChild( $("thead", s.nTable).clone(true)[0] );
nTable.appendChild( $("tbody", s.nTable).clone(true)[0] );
if ( s.bFooter )
{
nTable.appendChild( $("tfoot", s.nTable).clone(true)[0] );
}
/* Remove unneeded cells */
var sSelector = 'gt(' + (oCache.iCells - 1) + ')';
$('thead tr', nTable).each( function (k) {
$('th:' + sSelector, this).remove();
} );
$('tfoot tr', nTable).each( function (k) {
$('th:' + sSelector, this).remove();
} );
$('tbody tr', nTable).each( function (k) {
$('td:' + sSelector, this).remove();
} );
this.fnEqualiseHeights( 'thead', nBody.parentNode, nTable );
this.fnEqualiseHeights( 'tbody', nBody.parentNode, nTable );
this.fnEqualiseHeights( 'tfoot', nBody.parentNode, nTable );
var iWidth = 0;
for (var i = 0; i < oCache.iCells; i++) {
iWidth += $('thead tr th:eq(' + i + ')', s.nTable).outerWidth();
}
nTable.style.width = iWidth+"px";
oCache.nWrapper.style.width = iWidth+"px";
},
/*
* Function: _fnCloneTRight
* Purpose: Clone the right most column(s)
* Returns: -
* Inputs: object:oCache - the cached values for this fixed element
*/
_fnCloneTRight: function ( oCache )
{
var s = this.fnGetSettings();
var nBody = $('tbody', s.nTable)[0];
var nTable = oCache.nNode;
var iCols = $('tbody tr:eq(0) td', s.nTable).length;
/* Remove any children the cloned table has */
while ( nTable.childNodes.length > 0 )
{
nTable.removeChild( nTable.childNodes[0] );
}
/* Is this the most efficient way to do this - it looks horrible... */
nTable.appendChild( $("thead", s.nTable).clone(true)[0] );
nTable.appendChild( $("tbody", s.nTable).clone(true)[0] );
if ( s.bFooter )
{
nTable.appendChild( $("tfoot", s.nTable).clone(true)[0] );
}
$('thead tr th:lt('+(iCols-oCache.iCells)+')', nTable).remove();
$('tfoot tr th:lt('+(iCols-oCache.iCells)+')', nTable).remove();
/* Remove unneeded cells */
$('tbody tr', nTable).each( function (k) {
$('td:lt('+(iCols-oCache.iCells)+')', this).remove();
} );
this.fnEqualiseHeights( 'thead', nBody.parentNode, nTable );
this.fnEqualiseHeights( 'tbody', nBody.parentNode, nTable );
this.fnEqualiseHeights( 'tfoot', nBody.parentNode, nTable );
var iWidth = 0;
for (var i = 0; i < oCache.iCells; i++) {
iWidth += $('thead tr th:eq('+(iCols-1-i)+')', s.nTable).outerWidth();
}
nTable.style.width = iWidth+"px";
oCache.nWrapper.style.width = iWidth+"px";
},
/**
* Equalise the heights of the rows in a given table node in a cross browser way. Note that this
* is more or less lifted as is from FixedColumns
* @method fnEqualiseHeights
* @returns void
* @param {string} parent Node type - thead, tbody or tfoot
* @param {element} original Original node to take the heights from
* @param {element} clone Copy the heights to
* @private
*/
"fnEqualiseHeights": function ( parent, original, clone )
{
var that = this;
var originals = $(parent +' tr', original);
var height;
$(parent+' tr', clone).each( function (k) {
height = originals.eq( k ).css('height');
// This is nasty :-(. IE has a sub-pixel error even when setting
// the height below (the Firefox fix) which causes the fixed column
// to go out of alignment. Need to add a pixel before the assignment
// Can this be feature detected? Not sure how...
if ( navigator.appName == 'Microsoft Internet Explorer' ) {
height = parseInt( height, 10 ) + 1;
}
$(this).css( 'height', height );
// For Firefox to work, we need to also set the height of the
// original row, to the value that we read from it! Otherwise there
// is a sub-pixel rounding error
originals.eq( k ).css( 'height', height );
} );
}
};
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Static properties and methods
* We use these for speed! This information is common to all instances of FixedHeader, so no
* point if having them calculated and stored for each different instance.
*/
/*
* Variable: oWin
* Purpose: Store information about the window positioning
* Scope: FixedHeader
*/
FixedHeader.oWin = {
"iScrollTop": 0,
"iScrollRight": 0,
"iScrollBottom": 0,
"iScrollLeft": 0,
"iHeight": 0,
"iWidth": 0
};
/*
* Variable: oDoc
* Purpose: Store information about the document size
* Scope: FixedHeader
*/
FixedHeader.oDoc = {
"iHeight": 0,
"iWidth": 0
};
/*
* Variable: afnScroll
* Purpose: Array of functions that are to be used for the scrolling components
* Scope: FixedHeader
*/
FixedHeader.afnScroll = [];
/*
* Function: fnMeasure
* Purpose: Update the measurements for the window and document
* Returns: -
* Inputs: -
*/
FixedHeader.fnMeasure = function ()
{
var
jqWin = $(window),
jqDoc = $(document),
oWin = FixedHeader.oWin,
oDoc = FixedHeader.oDoc;
oDoc.iHeight = jqDoc.height();
oDoc.iWidth = jqDoc.width();
oWin.iHeight = jqWin.height();
oWin.iWidth = jqWin.width();
oWin.iScrollTop = jqWin.scrollTop();
oWin.iScrollLeft = jqWin.scrollLeft();
oWin.iScrollRight = oDoc.iWidth - oWin.iScrollLeft - oWin.iWidth;
oWin.iScrollBottom = oDoc.iHeight - oWin.iScrollTop - oWin.iHeight;
};
FixedHeader.version = "2.1.2";
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Global processing
*/
/*
* Just one 'scroll' event handler in FixedHeader, which calls the required components. This is
* done as an optimisation, to reduce calculation and proagation time
*/
$(window).scroll( function () {
FixedHeader.fnMeasure();
for ( var i=0, iLen=FixedHeader.afnScroll.length ; i<iLen ; i++ ) {
FixedHeader.afnScroll[i]();
}
} );
$.fn.dataTable.FixedHeader = FixedHeader;
$.fn.DataTable.FixedHeader = FixedHeader;
return FixedHeader;
}; // /factory
// Define as an AMD module if possible
if ( typeof define === 'function' && define.amd ) {
define( ['jquery', 'datatables'], factory );
}
else if ( typeof exports === 'object' ) {
// Node/CommonJS
factory( require('jquery'), require('datatables') );
}
else if ( jQuery && !jQuery.fn.dataTable.FixedHeader ) {
// Otherwise simply initialise as normal, stopping multiple evaluation
factory( jQuery, jQuery.fn.dataTable );
}
})(window, document);<|fim▁end|>
|
* Returns: int: sum
* Inputs: node:n - node to consider
|
<|file_name|>longbeach_crime_stats.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Sat Feb 22 12:07:53 2014
@author: Gouthaman Balaraman
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
import numpy as np
import os
#####################################################
# A bunch of constants used throught the script. #
#####################################################
_curdir= os.path.abspath(os.path.curdir)
_posdat = re.compile('(\w+):(\d+)px')
_topdat = re.compile('top:(\d+)px')
_leftdat = re.compile('top:(\d+)px')
# this is the full format with all columns; The numbers here bracket the columns
maptbl_long = [(0,75),(75,145),(145,212),(212,283),(283,350),(350,418),(418,486),
(486,554),(554,621),(621,688),(688,756),(756,823),(823,890),(890,958),
(958,1026),(1026,1094),(1094,1199)]
# This provides a mapping to the column with the text
mptbltxt = ['RD','MURDER','MANSLTR','FORCED_RAPE','ROBBERY','AGGRAV_ASSAULT',
'BURGLARY_RES','BURGLARY_COM','AUTO_BURG','GRAND_THEFT','PETTY_THEFT',
'BIKE_THEFT','AUTO_THEFT','ARSON','TOTAL_PART1','TOTAL_PART2','GRAND_TOTAL']
#this a truncate version I found for some months; The numbers here bracket the columns
maptbl_short=[(0,133),(133,194.5),(194.5,264),(264,329),(329,396),(396,466),(466,531),
(531,597),(597,667.5),(667.5,736),(736,803),(803,871),(871,938),(938,1004),(1004,1300)
]
def load_html(filename):
soup = BeautifulSoup(file(filename).read())
return soup
def grab_pages(soup):
return soup.body.find_all('div')
def cleanup_data(data):
# remove  
data = data.replace(u'\xa0','')
return data
def create_buckets(arr):
'''
Here we bin the rows based on 'top' value
'''
sarr = np.sort(arr)
# coarseness ; this is used to separate different rows
crsns = 10# np.mean(sdiff)
s = 0
prev = sarr[0]
buckets = []
for sa in sarr[1:]:
if sa-prev>crsns:
e = (sa+prev)*0.5
buckets.append((s,e))
s = e
prev = sa
#else
buckets.append((s,s+40))
return [buckets,[i for i,y in enumerate(buckets)]]
def create_frame(pnodes,mptbl,mptbltxt,lftmrkr):
'''
For a given page, here I use the position to tag it with a column number.
Then a data frame is created and the pivot_table option is construct back
a proper table to resemble the actual data set.
'''
df = pd.DataFrame(pnodes)
[tmptbl,tmptblval] = create_buckets(df.top.unique()) # buckets for top
dval = []
for t in tmptbl:
dvlst = df[(df["top"]>=t[0])&(df["top"]<=t[1])&(df['left']<lftmrkr)]['content'].values
#dval.append(dvlst[0] if len(dvlst)>0 else u'RD')
cval = dvlst[0] if len(dvlst)>0 else u'RD'
dval.append(cval)
#df[(df["top"]>=t[0])&(df["top"]<=t[1])]['rowval'] = cval
df['row'] = df['top'].map(lambda g:
[
dval[i] for i,x in enumerate(tmptbl)
if ((x[0]<=g)and(g<=x[1])) or None
][0]
)
dfs = df[df['row']!='RD']
dlst = dcnt = []
for i,v in dfs.iterrows():
if v.left<lftmrkr:
dcnt.append(v.content)
dlst.append(v.top)
dfs['column'] = dfs['left'].map(lambda g: [mptbltxt[i] for i,x in enumerate(mptbl)
if ((x[0]<=g)and(g<=x[1]))][0])
pvt = dfs.pivot(index='row',columns='column',values='content')
pvt.fillna(0,inplace=True)
for c in pvt.columns:
try:
pvt[c] = pvt[c].astype(int)
except:
pass
return pvt
'''
# this didn't work; need to check later
def grab_monthlypdfs():
domain='http://www.longbeach.gov'
url = 'http://www.longbeach.gov/police/statistics.asp'
res = requests.get(url)
sp = BeautifulSoup(res.text)
tbody = sp.find_all('tbody')
links = tbody[3].find_all('a')
pdfdir = os.path.join(_curdir,'files','PDF')
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
for l in links:
title = '_'.join( l['title'].split(" ") )
print title
try:
res = requests.get(domain+l['href'],stream=True)
pdffile = os.path.join(pdfdir,title+'.pdf')
with open(pdffile,'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
print 'FAILED: '+str(e)+l['title']+" "+l['href']
'''
def extract_nodes(p,lftmrkr):
'''
This is the code that extracts the beautiful soup html document into
a bunch of nodes for easy processing
'''
nodes = p.find_all('p' )
dlist = []
nextdat = {}
for node in nodes:
ddict = {}
attrs = node.attrs
attrssty = attrs.get('style','')
attrscls = attrs.get('class','')
if attrscls[0] == 'ft01' or attrscls[0] == 'ft03':
posns = _posdat.findall(attrssty)
if len(posns) == 2:
k,v = zip(*posns)
if ('top' in k ) and ('left' in k):
if nextdat != {}:
nextdat['top'] = int(v[0]) if k[0] == 'top' else int(v[1])
ddict = nextdat
nextdat = {}
ddict[k[0]] = int(v[0])
ddict[k[1]] = int(v[1])
cont = node.contents
if len(cont) == 1 :
ddict['content'] = cont[0].replace('\xa0','0')
elif len(cont)==3:
ddict['content'] = cont[0].replace('\xa0','0')
nextdat['content'] = cont[2].replace('\xa0','0')<|fim▁hole|> dlist.append(ddict)
return dlist
def create_html(pdffile):
'''
Given a pdf file, this calls pdftohtml.exe to convert to html
'''
try:
pdftohtml = "pdftohtml.exe "
htmldir = os.path.join(_curdir,'files','HTML')
if not os.path.exists(htmldir):
os.makedirs(htmldir)
pdffile = os.path.abspath(pdffile)
fileprefix = os.path.split(pdffile)[1].split('.pdf')[0]
cmd = pdftohtml+pdffile+" -c -noframes "+os.path.join(htmldir,fileprefix+".html")
print cmd
os.system(cmd)
except Exception as e:
print str(e)
def convert_all_pdfs(pdfdir):
'''
Convenient method to loop over all the pdf files. Calls create_html
file in a loop.
'''
for f in os.listdir(pdfdir):
if f.endswith('.pdf'):
create_html(os.path.join(pdfdir,f))
def _finalize_dataframe(ddf):
'''
Does some clean-up, check sums to validate the data. This is a basic
check. Nothing is guaranteed!
'''
# do a checksum test
if 'TOTAL_PART1' in ddf.columns:
checksum = np.sum(\
np.power(
ddf[mptbltxt[1:14]].astype(int).sum(axis=1) -
ddf['TOTAL_PART1'].astype(int)
,2)
)
if checksum:
print "Failed check sum test "+str(checksum)
else:
print "Passed checksum test"
# reorder the columns
if len(ddf.columns) == 17:
ddf = ddf[mptbltxt]
else:
ddf = ddf[mptbltxt[:15]]
del ddf['RD']
ddf.index.name = 'RD'
return ddf
def create_csv(htmlfile):
'''
This creates the csv file given a html file
'''
try:
print "Converting "+htmlfile
soup = load_html(htmlfile)
pages = grab_pages(soup)
num_nodes = len(pages[0])
leftmrkr = 75 if num_nodes > 440 else 133 # to handle two pdf formats
mptbl = maptbl_long if num_nodes > 440 else maptbl_short
#filetype = 1 if num_nodes > 480 else 0 # 1 if long type else 0
pvts = []
for i,p in enumerate(pages):
print 'Page-'+str(i)
dlist = extract_nodes(p,leftmrkr)
#df = create_frame(dlist,mptbl0,mptbltxt,leftmrkr)
df = create_frame(dlist,mptbl,mptbltxt,leftmrkr)
pvts.append(df)
ddf = pd.concat(pvts)
exclrows = set(['0'+str(i)for i in range(2000,2020,1)]) | set(['%CHG'])
exclrows = exclrows & set(ddf.index)
ddf.drop(exclrows,inplace=True)
ddf.fillna(0,inplace=True)
#cleanup
ddf = _finalize_dataframe(ddf)
csvdir = os.path.join(_curdir,'files','CSV')
if not os.path.exists(csvdir):
os.makedirs(csvdir)
htmlfile = os.path.abspath(htmlfile)
fileprefix = os.path.split(htmlfile)[1].split('.html')[0]
csvfile = os.path.join(csvdir,fileprefix+".csv")
ddf.to_csv(csvfile)
except Exception as e:
print str(e)
def convert_all_htmls(htmldir):
'''
This is a top leve driver which calls create_csv in a loop
'''
for f in os.listdir(htmldir):
if f.endswith('.html'):
create_csv(os.path.join(htmldir,f))
#break
if __name__=='__main__':
'''
Here is a complete example to loop over all pdfs and create all csvs.
>>>pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
>>>convert_all_pdfs(pdfdir)
>>>htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
>>>convert_all_htmls(htmldir)
Or you can do individual file conversions:
>>>pdffile = os.path.join(pdfdir,'January_2013.pdf')
>>>create_html(pdffile)
'''
# Convert pdfs to html
pdfdir = "D:\\Development\\Python\\CrimeData\\files\\PDF"
pdffile = os.path.join(pdfdir,'January_2013.pdf')
create_html(pdffile)
#convert_all_pdfs(pdfdir)
# Then convert html to csv
htmldir = "D:\\Development\\Python\\CrimeData\\files\\HTML"
html = os.path.join(htmldir,'January_2013.html')
create_csv(html)
#convert_all_htmls(htmldir)<|fim▁end|>
|
nextdat['left'] = int(v[1])if k[1] == 'left' else int(v[0])
#if (ddict['left']<lftmrkr) and (ddict['content']!= 'RD'):
# currrd = ddict['content']
#ddict['rd'] = currrd
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import os
from djangomaster.core import autodiscover
from djangomaster.sites import mastersite
def get_version():
path = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(path, 'version.txt')
return open(path).read().strip()<|fim▁hole|>
__version__ = get_version()
def get_urls():
autodiscover()
return mastersite.urlpatterns, 'djangomaster', 'djangomaster'
urls = get_urls()<|fim▁end|>
| |
<|file_name|>collada_dom.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#<|fim▁hole|># met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Collada DOM 1.3.0 tool for SCons."""
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env.Append(CCFLAGS=[
'-I$COLLADA_DIR/include',
'-I$COLLADA_DIR/include/1.4',
])<|fim▁end|>
|
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
|
<|file_name|>plots.py<|end_file_name|><|fim▁begin|>import matplotlib.pyplot as plt
import numpy as np
import scalpplot
from scalpplot import plot_scalp
from positions import POS_10_5
from scipy import signal
def plot_timeseries(frames, time=None, offset=None, color='k', linestyle='-'):
frames = np.asarray(frames)
if offset == None:
offset = np.max(np.std(frames, axis=0)) * 3
if time == None:
time = np.arange(frames.shape[0])
plt.plot(time, frames - np.mean(frames, axis=0) +
np.arange(frames.shape[1]) * offset, color=color, ls=linestyle)
def plot_scalpgrid(scalps, sensors, locs=POS_10_5, width=None,
clim=None, cmap=None, titles=None):
'''
Plots a grid with scalpplots. Scalps contains the different scalps in the
rows, sensors contains the names for the columns of scalps, locs is a dict
that maps the sensor-names to locations.
Width determines the width of the grid that contains the plots. Cmap selects
a colormap, for example plt.cm.RdBu_r is very useful for AUC-ROC plots.
Clim is a list containing the minimim and maximum value mapped to a color.
Titles is an optional list with titles for each subplot.
Returns a list with subplots for further manipulation.<|fim▁hole|> nscalps = scalps.shape[0]
subplots = []
if not width:
width = int(min(8, np.ceil(np.sqrt(nscalps))))
height = int(np.ceil(nscalps/float(width)))
if not clim:
clim = [np.min(scalps), np.max(scalps)]
plt.clf()
for i in range(nscalps):
subplots.append(plt.subplot(height, width, i + 1))
plot_scalp(scalps[i], sensors, locs, clim=clim, cmap=cmap)
if titles:
plt.title(titles[i])
# plot colorbar next to last scalp
bb = plt.gca().get_position()
plt.colorbar(cax=plt.axes([bb.xmax + bb.width/10, bb.ymin, bb.width/10,
bb.height]), ticks=np.linspace(clim[0], clim[1], 5).round(2))
return subplots<|fim▁end|>
|
'''
scalps = np.asarray(scalps)
assert scalps.ndim == 2
|
<|file_name|>use.py<|end_file_name|><|fim▁begin|>##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#<|fim▁hole|># This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "add package to environment using dotkit"
section = "environment"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to use with dotkit')
def use(parser, args):
print_module_placeholder_help()<|fim▁end|>
| |
<|file_name|>input_handler_proxy.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/renderer/input/input_handler_proxy.h"
#include "base/auto_reset.h"
#include "base/command_line.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/single_thread_task_runner.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "content/common/input/did_overscroll_params.h"
#include "content/common/input/web_input_event_traits.h"
#include "content/public/common/content_switches.h"
#include "content/renderer/input/input_handler_proxy_client.h"
#include "content/renderer/input/input_scroll_elasticity_controller.h"
#include "third_party/WebKit/public/platform/Platform.h"
#include "third_party/WebKit/public/web/WebInputEvent.h"
#include "ui/events/latency_info.h"
#include "ui/gfx/geometry/point_conversions.h"
using blink::WebFloatPoint;
using blink::WebFloatSize;
using blink::WebGestureEvent;
using blink::WebInputEvent;
using blink::WebMouseEvent;
using blink::WebMouseWheelEvent;
using blink::WebPoint;
using blink::WebTouchEvent;
using blink::WebTouchPoint;
namespace {
// Maximum time between a fling event's timestamp and the first |Animate| call
// for the fling curve to use the fling timestamp as the initial animation time.
// Two frames allows a minor delay between event creation and the first animate.
const double kMaxSecondsFromFlingTimestampToFirstAnimate = 2. / 60.;
// Threshold for determining whether a fling scroll delta should have caused the
// client to scroll.
const float kScrollEpsilon = 0.1f;
// Minimum fling velocity required for the active fling and new fling for the
// two to accumulate.
const double kMinBoostFlingSpeedSquare = 350. * 350.;
// Minimum velocity for the active touch scroll to preserve (boost) an active
// fling for which cancellation has been deferred.
const double kMinBoostTouchScrollSpeedSquare = 150 * 150.;
// Timeout window after which the active fling will be cancelled if no scrolls
// or flings of sufficient velocity relative to the current fling are received.
// The default value on Android native views is 40ms, but we use a slightly
// increased value to accomodate small IPC message delays.
const double kFlingBoostTimeoutDelaySeconds = 0.045;
gfx::Vector2dF ToClientScrollIncrement(const WebFloatSize& increment) {
return gfx::Vector2dF(-increment.width, -increment.height);
}
double InSecondsF(const base::TimeTicks& time) {
return (time - base::TimeTicks()).InSecondsF();
}
bool ShouldSuppressScrollForFlingBoosting(
const gfx::Vector2dF& current_fling_velocity,
const WebGestureEvent& scroll_update_event,
double time_since_last_boost_event) {
DCHECK_EQ(WebInputEvent::GestureScrollUpdate, scroll_update_event.type);
gfx::Vector2dF dx(scroll_update_event.data.scrollUpdate.deltaX,
scroll_update_event.data.scrollUpdate.deltaY);
if (gfx::DotProduct(current_fling_velocity, dx) <= 0)
return false;
if (time_since_last_boost_event < 0.001)
return true;
// TODO(jdduke): Use |scroll_update_event.data.scrollUpdate.velocity{X,Y}|.
// The scroll must be of sufficient velocity to maintain the active fling.
const gfx::Vector2dF scroll_velocity =
gfx::ScaleVector2d(dx, 1. / time_since_last_boost_event);
if (scroll_velocity.LengthSquared() < kMinBoostTouchScrollSpeedSquare)
return false;
return true;
}
bool ShouldBoostFling(const gfx::Vector2dF& current_fling_velocity,
const WebGestureEvent& fling_start_event) {
DCHECK_EQ(WebInputEvent::GestureFlingStart, fling_start_event.type);
gfx::Vector2dF new_fling_velocity(
fling_start_event.data.flingStart.velocityX,
fling_start_event.data.flingStart.velocityY);
if (gfx::DotProduct(current_fling_velocity, new_fling_velocity) <= 0)
return false;
if (current_fling_velocity.LengthSquared() < kMinBoostFlingSpeedSquare)
return false;
if (new_fling_velocity.LengthSquared() < kMinBoostFlingSpeedSquare)
return false;
return true;
}
WebGestureEvent ObtainGestureScrollBegin(const WebGestureEvent& event) {
WebGestureEvent scroll_begin_event = event;
scroll_begin_event.type = WebInputEvent::GestureScrollBegin;
scroll_begin_event.data.scrollBegin.deltaXHint = 0;
scroll_begin_event.data.scrollBegin.deltaYHint = 0;
return scroll_begin_event;
}
void ReportInputEventLatencyUma(const WebInputEvent& event,
const ui::LatencyInfo& latency_info) {
if (!(event.type == WebInputEvent::GestureScrollBegin ||
event.type == WebInputEvent::GestureScrollUpdate ||
event.type == WebInputEvent::GesturePinchBegin ||
event.type == WebInputEvent::GesturePinchUpdate ||
event.type == WebInputEvent::GestureFlingStart)) {
return;
}
ui::LatencyInfo::LatencyMap::const_iterator it =
latency_info.latency_components().find(std::make_pair(
ui::INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT, 0));
if (it == latency_info.latency_components().end())
return;
base::TimeDelta delta = base::TimeTicks::Now() - it->second.event_time;
for (size_t i = 0; i < it->second.event_count; ++i) {
switch (event.type) {
case blink::WebInputEvent::GestureScrollBegin:
UMA_HISTOGRAM_CUSTOM_COUNTS(
"Event.Latency.RendererImpl.GestureScrollBegin",
delta.InMicroseconds(), 1, 1000000, 100);
break;
case blink::WebInputEvent::GestureScrollUpdate:
UMA_HISTOGRAM_CUSTOM_COUNTS(
// So named for historical reasons.
"Event.Latency.RendererImpl.GestureScroll2",
delta.InMicroseconds(), 1, 1000000, 100);
break;
case blink::WebInputEvent::GesturePinchBegin:
UMA_HISTOGRAM_CUSTOM_COUNTS(
"Event.Latency.RendererImpl.GesturePinchBegin",
delta.InMicroseconds(), 1, 1000000, 100);
break;
case blink::WebInputEvent::GesturePinchUpdate:
UMA_HISTOGRAM_CUSTOM_COUNTS(
"Event.Latency.RendererImpl.GesturePinchUpdate",
delta.InMicroseconds(), 1, 1000000, 100);
break;
case blink::WebInputEvent::GestureFlingStart:
UMA_HISTOGRAM_CUSTOM_COUNTS(
"Event.Latency.RendererImpl.GestureFlingStart",
delta.InMicroseconds(), 1, 1000000, 100);
break;
default:
NOTREACHED();
break;
}
}
}
} // namespace
namespace content {
InputHandlerProxy::InputHandlerProxy(cc::InputHandler* input_handler,
InputHandlerProxyClient* client)
: client_(client),
input_handler_(input_handler),
deferred_fling_cancel_time_seconds_(0),
#ifndef NDEBUG
expect_scroll_update_end_(false),
#endif
gesture_scroll_on_impl_thread_(false),
gesture_pinch_on_impl_thread_(false),
fling_may_be_active_on_main_thread_(false),
disallow_horizontal_fling_scroll_(false),
disallow_vertical_fling_scroll_(false),
has_fling_animation_started_(false),
uma_latency_reporting_enabled_(base::TimeTicks::IsHighResolution()) {
DCHECK(client);
input_handler_->BindToClient(this);
smooth_scroll_enabled_ = base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableSmoothScrolling);
cc::ScrollElasticityHelper* scroll_elasticity_helper =
input_handler_->CreateScrollElasticityHelper();
if (scroll_elasticity_helper) {
scroll_elasticity_controller_.reset(
new InputScrollElasticityController(scroll_elasticity_helper));
}
}
InputHandlerProxy::~InputHandlerProxy() {}
void InputHandlerProxy::WillShutdown() {
scroll_elasticity_controller_.reset();
input_handler_ = NULL;
client_->WillShutdown();
}
InputHandlerProxy::EventDisposition
InputHandlerProxy::HandleInputEventWithLatencyInfo(
const WebInputEvent& event,
ui::LatencyInfo* latency_info) {
DCHECK(input_handler_);
if (uma_latency_reporting_enabled_)
ReportInputEventLatencyUma(event, *latency_info);
TRACE_EVENT_FLOW_STEP0("input,benchmark",
"LatencyInfo.Flow",
TRACE_ID_DONT_MANGLE(latency_info->trace_id()),
"HandleInputEventImpl");
scoped_ptr<cc::SwapPromiseMonitor> latency_info_swap_promise_monitor =
input_handler_->CreateLatencyInfoSwapPromiseMonitor(latency_info);
InputHandlerProxy::EventDisposition disposition = HandleInputEvent(event);
return disposition;
}
InputHandlerProxy::EventDisposition InputHandlerProxy::HandleInputEvent(
const WebInputEvent& event) {
DCHECK(input_handler_);
TRACE_EVENT1("input,benchmark", "InputHandlerProxy::HandleInputEvent",
"type", WebInputEventTraits::GetName(event.type));
if (FilterInputEventForFlingBoosting(event))
return DID_HANDLE;
switch (event.type) {
case WebInputEvent::MouseWheel:
return HandleMouseWheel(static_cast<const WebMouseWheelEvent&>(event));
case WebInputEvent::GestureScrollBegin:
return HandleGestureScrollBegin(
static_cast<const WebGestureEvent&>(event));
case WebInputEvent::GestureScrollUpdate:
return HandleGestureScrollUpdate(
static_cast<const WebGestureEvent&>(event));
case WebInputEvent::GestureScrollEnd:
return HandleGestureScrollEnd(static_cast<const WebGestureEvent&>(event));
case WebInputEvent::GesturePinchBegin: {
DCHECK(!gesture_pinch_on_impl_thread_);
const WebGestureEvent& gesture_event =
static_cast<const WebGestureEvent&>(event);
if (gesture_event.sourceDevice == blink::WebGestureDeviceTouchpad &&
input_handler_->HaveWheelEventHandlersAt(
gfx::Point(gesture_event.x, gesture_event.y))) {
return DID_NOT_HANDLE;
} else {
input_handler_->PinchGestureBegin();
gesture_pinch_on_impl_thread_ = true;
return DID_HANDLE;
}
}
case WebInputEvent::GesturePinchEnd:
if (gesture_pinch_on_impl_thread_) {
gesture_pinch_on_impl_thread_ = false;
input_handler_->PinchGestureEnd();
return DID_HANDLE;
} else {
return DID_NOT_HANDLE;
}
case WebInputEvent::GesturePinchUpdate: {
if (gesture_pinch_on_impl_thread_) {
const WebGestureEvent& gesture_event =
static_cast<const WebGestureEvent&>(event);
if (gesture_event.data.pinchUpdate.zoomDisabled)
return DROP_EVENT;
input_handler_->PinchGestureUpdate(
gesture_event.data.pinchUpdate.scale,
gfx::Point(gesture_event.x, gesture_event.y));
return DID_HANDLE;
} else {
return DID_NOT_HANDLE;
}
}
case WebInputEvent::GestureFlingStart:
return HandleGestureFlingStart(
*static_cast<const WebGestureEvent*>(&event));
case WebInputEvent::GestureFlingCancel:
if (CancelCurrentFling())
return DID_HANDLE;
else if (!fling_may_be_active_on_main_thread_)
return DROP_EVENT;
return DID_NOT_HANDLE;
case WebInputEvent::TouchStart:
return HandleTouchStart(static_cast<const WebTouchEvent&>(event));
case WebInputEvent::MouseMove: {
const WebMouseEvent& mouse_event =
static_cast<const WebMouseEvent&>(event);
// TODO(tony): Ignore when mouse buttons are down?
// TODO(davemoore): This should never happen, but bug #326635 showed some
// surprising crashes.
CHECK(input_handler_);
input_handler_->MouseMoveAt(gfx::Point(mouse_event.x, mouse_event.y));
return DID_NOT_HANDLE;
}
default:
if (WebInputEvent::isKeyboardEventType(event.type)) {
// Only call |CancelCurrentFling()| if a fling was active, as it will
// otherwise disrupt an in-progress touch scroll.
if (fling_curve_)
CancelCurrentFling();
}
break;
}
return DID_NOT_HANDLE;
}
InputHandlerProxy::EventDisposition InputHandlerProxy::HandleMouseWheel(
const WebMouseWheelEvent& wheel_event) {
InputHandlerProxy::EventDisposition result = DID_NOT_HANDLE;
cc::InputHandlerScrollResult scroll_result;
// TODO(ccameron): The rail information should be pushed down into
// InputHandler.
gfx::Vector2dF scroll_delta(
wheel_event.railsMode != WebInputEvent::RailsModeVertical
? -wheel_event.deltaX
: 0,
wheel_event.railsMode != WebInputEvent::RailsModeHorizontal
? -wheel_event.deltaY
: 0);
if (wheel_event.scrollByPage) {
// TODO(jamesr): We don't properly handle scroll by page in the compositor
// thread, so punt it to the main thread. http://crbug.com/236639
result = DID_NOT_HANDLE;
} else if (!wheel_event.canScroll) {
// Wheel events with |canScroll| == false will not trigger scrolling,
// only event handlers. Forward to the main thread.
result = DID_NOT_HANDLE;
} else if (smooth_scroll_enabled_) {
cc::InputHandler::ScrollStatus scroll_status =
input_handler_->ScrollAnimated(gfx::Point(wheel_event.x, wheel_event.y),
scroll_delta);
switch (scroll_status) {
case cc::InputHandler::SCROLL_STARTED:
result = DID_HANDLE;
break;
case cc::InputHandler::SCROLL_IGNORED:
result = DROP_EVENT;
default:
result = DID_NOT_HANDLE;
break;
}
} else {
cc::InputHandler::ScrollStatus scroll_status = input_handler_->ScrollBegin(
gfx::Point(wheel_event.x, wheel_event.y), cc::InputHandler::WHEEL);
switch (scroll_status) {
case cc::InputHandler::SCROLL_STARTED: {
TRACE_EVENT_INSTANT2("input",
"InputHandlerProxy::handle_input wheel scroll",
TRACE_EVENT_SCOPE_THREAD, "deltaX",
scroll_delta.x(), "deltaY", scroll_delta.y());
gfx::Point scroll_point(wheel_event.x, wheel_event.y);
scroll_result = input_handler_->ScrollBy(scroll_point, scroll_delta);
HandleOverscroll(scroll_point, scroll_result);
input_handler_->ScrollEnd();
result = scroll_result.did_scroll ? DID_HANDLE : DROP_EVENT;
break;
}
case cc::InputHandler::SCROLL_IGNORED:
// TODO(jamesr): This should be DROP_EVENT, but in cases where we fail
// to properly sync scrollability it's safer to send the event to the
// main thread. Change back to DROP_EVENT once we have synchronization
// bugs sorted out.
result = DID_NOT_HANDLE;
break;
case cc::InputHandler::SCROLL_UNKNOWN:
case cc::InputHandler::SCROLL_ON_MAIN_THREAD:
result = DID_NOT_HANDLE;
break;
case cc::InputHandler::ScrollStatusCount:
NOTREACHED();
break;
}
}
// Send the event and its disposition to the elasticity controller to update
// the over-scroll animation. If the event is to be handled on the main
// thread, the event and its disposition will be sent to the elasticity
// controller after being handled on the main thread.
if (scroll_elasticity_controller_ && result != DID_NOT_HANDLE) {
// Note that the call to the elasticity controller is made asynchronously,
// to minimize divergence between main thread and impl thread event
// handling paths.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&InputScrollElasticityController::ObserveWheelEventAndResult,
scroll_elasticity_controller_->GetWeakPtr(), wheel_event,
scroll_result));
}
return result;
}
InputHandlerProxy::EventDisposition InputHandlerProxy::HandleGestureScrollBegin(
const WebGestureEvent& gesture_event) {
if (gesture_scroll_on_impl_thread_)
CancelCurrentFling();
#ifndef NDEBUG
DCHECK(!expect_scroll_update_end_);
expect_scroll_update_end_ = true;
#endif
cc::InputHandler::ScrollStatus scroll_status;
if (gesture_event.data.scrollBegin.targetViewport) {
scroll_status = input_handler_->RootScrollBegin(cc::InputHandler::GESTURE);
} else {
scroll_status = input_handler_->ScrollBegin(
gfx::Point(gesture_event.x, gesture_event.y),
cc::InputHandler::GESTURE);
}
UMA_HISTOGRAM_ENUMERATION("Renderer4.CompositorScrollHitTestResult",
scroll_status,
cc::InputHandler::ScrollStatusCount);
switch (scroll_status) {
case cc::InputHandler::SCROLL_STARTED:
TRACE_EVENT_INSTANT0("input",
"InputHandlerProxy::handle_input gesture scroll",
TRACE_EVENT_SCOPE_THREAD);
gesture_scroll_on_impl_thread_ = true;
return DID_HANDLE;
case cc::InputHandler::SCROLL_UNKNOWN:
case cc::InputHandler::SCROLL_ON_MAIN_THREAD:
return DID_NOT_HANDLE;
case cc::InputHandler::SCROLL_IGNORED:
return DROP_EVENT;
case cc::InputHandler::ScrollStatusCount:
NOTREACHED();
break;
}
return DID_NOT_HANDLE;
}
InputHandlerProxy::EventDisposition
InputHandlerProxy::HandleGestureScrollUpdate(
const WebGestureEvent& gesture_event) {
#ifndef NDEBUG
DCHECK(expect_scroll_update_end_);
#endif
if (!gesture_scroll_on_impl_thread_ && !gesture_pinch_on_impl_thread_)
return DID_NOT_HANDLE;
gfx::Point scroll_point(gesture_event.x, gesture_event.y);
gfx::Vector2dF scroll_delta(-gesture_event.data.scrollUpdate.deltaX,
-gesture_event.data.scrollUpdate.deltaY);
cc::InputHandlerScrollResult scroll_result = input_handler_->ScrollBy(
scroll_point, scroll_delta);
HandleOverscroll(scroll_point, scroll_result);
return scroll_result.did_scroll ? DID_HANDLE : DROP_EVENT;
}
InputHandlerProxy::EventDisposition InputHandlerProxy::HandleGestureScrollEnd(
const WebGestureEvent& gesture_event) {
#ifndef NDEBUG
DCHECK(expect_scroll_update_end_);
expect_scroll_update_end_ = false;
#endif
input_handler_->ScrollEnd();
if (!gesture_scroll_on_impl_thread_)
return DID_NOT_HANDLE;
gesture_scroll_on_impl_thread_ = false;
return DID_HANDLE;
}
InputHandlerProxy::EventDisposition InputHandlerProxy::HandleGestureFlingStart(
const WebGestureEvent& gesture_event) {
cc::InputHandler::ScrollStatus scroll_status;
if (gesture_event.sourceDevice == blink::WebGestureDeviceTouchpad) {
if (gesture_event.data.flingStart.targetViewport) {
scroll_status = input_handler_->RootScrollBegin(
cc::InputHandler::NON_BUBBLING_GESTURE);
} else {
scroll_status = input_handler_->ScrollBegin(
gfx::Point(gesture_event.x, gesture_event.y),
cc::InputHandler::NON_BUBBLING_GESTURE);
}
} else {
if (!gesture_scroll_on_impl_thread_)
scroll_status = cc::InputHandler::SCROLL_ON_MAIN_THREAD;
else
scroll_status = input_handler_->FlingScrollBegin();
}
#ifndef NDEBUG
expect_scroll_update_end_ = false;
#endif
switch (scroll_status) {
case cc::InputHandler::SCROLL_STARTED: {
if (gesture_event.sourceDevice == blink::WebGestureDeviceTouchpad)
input_handler_->ScrollEnd();
const float vx = gesture_event.data.flingStart.velocityX;
const float vy = gesture_event.data.flingStart.velocityY;
current_fling_velocity_ = gfx::Vector2dF(vx, vy);
DCHECK(!current_fling_velocity_.IsZero());
fling_curve_.reset(client_->CreateFlingAnimationCurve(
gesture_event.sourceDevice,
WebFloatPoint(vx, vy),
blink::WebSize()));
disallow_horizontal_fling_scroll_ = !vx;
disallow_vertical_fling_scroll_ = !vy;
TRACE_EVENT_ASYNC_BEGIN2("input",
"InputHandlerProxy::HandleGestureFling::started",
this,
"vx",
vx,
"vy",
vy);
// Note that the timestamp will only be used to kickstart the animation if
// its sufficiently close to the timestamp of the first call |Animate()|.
has_fling_animation_started_ = false;
fling_parameters_.startTime = gesture_event.timeStampSeconds;
fling_parameters_.delta = WebFloatPoint(vx, vy);
fling_parameters_.point = WebPoint(gesture_event.x, gesture_event.y);
fling_parameters_.globalPoint =
WebPoint(gesture_event.globalX, gesture_event.globalY);
fling_parameters_.modifiers = gesture_event.modifiers;
fling_parameters_.sourceDevice = gesture_event.sourceDevice;
input_handler_->SetNeedsAnimateInput();
return DID_HANDLE;
}
case cc::InputHandler::SCROLL_UNKNOWN:
case cc::InputHandler::SCROLL_ON_MAIN_THREAD: {
TRACE_EVENT_INSTANT0("input",
"InputHandlerProxy::HandleGestureFling::"
"scroll_on_main_thread",
TRACE_EVENT_SCOPE_THREAD);
gesture_scroll_on_impl_thread_ = false;
fling_may_be_active_on_main_thread_ = true;
return DID_NOT_HANDLE;
}
case cc::InputHandler::SCROLL_IGNORED: {
TRACE_EVENT_INSTANT0(
"input",
"InputHandlerProxy::HandleGestureFling::ignored",
TRACE_EVENT_SCOPE_THREAD);
gesture_scroll_on_impl_thread_ = false;
if (gesture_event.sourceDevice == blink::WebGestureDeviceTouchpad) {
// We still pass the curve to the main thread if there's nothing
// scrollable, in case something
// registers a handler before the curve is over.
return DID_NOT_HANDLE;
}
return DROP_EVENT;
}
case cc::InputHandler::ScrollStatusCount:
NOTREACHED();
break;
}
return DID_NOT_HANDLE;
}
InputHandlerProxy::EventDisposition InputHandlerProxy::HandleTouchStart(
const blink::WebTouchEvent& touch_event) {
for (size_t i = 0; i < touch_event.touchesLength; ++i) {
if (touch_event.touches[i].state != WebTouchPoint::StatePressed)
continue;
if (input_handler_->DoTouchEventsBlockScrollAt(
gfx::Point(touch_event.touches[i].position.x,
touch_event.touches[i].position.y))) {
// TODO(rbyers): We should consider still sending the touch events to
// main asynchronously (crbug.com/455539).
return DID_NOT_HANDLE;
}
}
return DROP_EVENT;
}
bool InputHandlerProxy::FilterInputEventForFlingBoosting(
const WebInputEvent& event) {
if (!WebInputEvent::isGestureEventType(event.type))
return false;
if (!fling_curve_) {
DCHECK(!deferred_fling_cancel_time_seconds_);
return false;
}
const WebGestureEvent& gesture_event =
static_cast<const WebGestureEvent&>(event);
if (gesture_event.type == WebInputEvent::GestureFlingCancel) {
if (gesture_event.data.flingCancel.preventBoosting)
return false;
if (current_fling_velocity_.LengthSquared() < kMinBoostFlingSpeedSquare)
return false;
TRACE_EVENT_INSTANT0("input",
"InputHandlerProxy::FlingBoostStart",
TRACE_EVENT_SCOPE_THREAD);
deferred_fling_cancel_time_seconds_ =
event.timeStampSeconds + kFlingBoostTimeoutDelaySeconds;
return true;
}
// A fling is either inactive or is "free spinning", i.e., has yet to be
// interrupted by a touch gesture, in which case there is nothing to filter.
if (!deferred_fling_cancel_time_seconds_)
return false;
// Gestures from a different source should immediately interrupt the fling.
if (gesture_event.sourceDevice != fling_parameters_.sourceDevice) {
CancelCurrentFling();
return false;
}
switch (gesture_event.type) {
case WebInputEvent::GestureTapCancel:
case WebInputEvent::GestureTapDown:
return false;
case WebInputEvent::GestureScrollBegin:
if (!input_handler_->IsCurrentlyScrollingLayerAt(
gfx::Point(gesture_event.x, gesture_event.y),
fling_parameters_.sourceDevice == blink::WebGestureDeviceTouchpad
? cc::InputHandler::NON_BUBBLING_GESTURE
: cc::InputHandler::GESTURE)) {
CancelCurrentFling();
return false;
}
// TODO(jdduke): Use |gesture_event.data.scrollBegin.delta{X,Y}Hint| to
// determine if the ScrollBegin should immediately cancel the fling.
ExtendBoostedFlingTimeout(gesture_event);
return true;
case WebInputEvent::GestureScrollUpdate: {
const double time_since_last_boost_event =
event.timeStampSeconds - last_fling_boost_event_.timeStampSeconds;
if (ShouldSuppressScrollForFlingBoosting(current_fling_velocity_,
gesture_event,
time_since_last_boost_event)) {
ExtendBoostedFlingTimeout(gesture_event);
return true;
}
CancelCurrentFling();
return false;
}
case WebInputEvent::GestureScrollEnd:
// Clear the last fling boost event *prior* to fling cancellation,
// preventing insertion of a synthetic GestureScrollBegin.
last_fling_boost_event_ = WebGestureEvent();
CancelCurrentFling();
return true;
case WebInputEvent::GestureFlingStart: {
DCHECK_EQ(fling_parameters_.sourceDevice, gesture_event.sourceDevice);
bool fling_boosted =
fling_parameters_.modifiers == gesture_event.modifiers &&
ShouldBoostFling(current_fling_velocity_, gesture_event);
gfx::Vector2dF new_fling_velocity(
gesture_event.data.flingStart.velocityX,
gesture_event.data.flingStart.velocityY);
DCHECK(!new_fling_velocity.IsZero());
if (fling_boosted)
current_fling_velocity_ += new_fling_velocity;
else
current_fling_velocity_ = new_fling_velocity;<|fim▁hole|> deferred_fling_cancel_time_seconds_ = 0;
disallow_horizontal_fling_scroll_ = !velocity.x;
disallow_vertical_fling_scroll_ = !velocity.y;
last_fling_boost_event_ = WebGestureEvent();
fling_curve_.reset(client_->CreateFlingAnimationCurve(
gesture_event.sourceDevice,
velocity,
blink::WebSize()));
fling_parameters_.startTime = gesture_event.timeStampSeconds;
fling_parameters_.delta = velocity;
fling_parameters_.point = WebPoint(gesture_event.x, gesture_event.y);
fling_parameters_.globalPoint =
WebPoint(gesture_event.globalX, gesture_event.globalY);
TRACE_EVENT_INSTANT2("input",
fling_boosted ? "InputHandlerProxy::FlingBoosted"
: "InputHandlerProxy::FlingReplaced",
TRACE_EVENT_SCOPE_THREAD,
"vx",
current_fling_velocity_.x(),
"vy",
current_fling_velocity_.y());
// The client expects balanced calls between a consumed GestureFlingStart
// and |DidStopFlinging()|. TODO(jdduke): Provide a count parameter to
// |DidStopFlinging()| and only send after the accumulated fling ends.
client_->DidStopFlinging();
return true;
}
default:
// All other types of gestures (taps, presses, etc...) will complete the
// deferred fling cancellation.
CancelCurrentFling();
return false;
}
}
void InputHandlerProxy::ExtendBoostedFlingTimeout(
const blink::WebGestureEvent& event) {
TRACE_EVENT_INSTANT0("input",
"InputHandlerProxy::ExtendBoostedFlingTimeout",
TRACE_EVENT_SCOPE_THREAD);
deferred_fling_cancel_time_seconds_ =
event.timeStampSeconds + kFlingBoostTimeoutDelaySeconds;
last_fling_boost_event_ = event;
}
void InputHandlerProxy::Animate(base::TimeTicks time) {
if (scroll_elasticity_controller_)
scroll_elasticity_controller_->Animate(time);
if (!fling_curve_)
return;
double monotonic_time_sec = InSecondsF(time);
if (deferred_fling_cancel_time_seconds_ &&
monotonic_time_sec > deferred_fling_cancel_time_seconds_) {
CancelCurrentFling();
return;
}
client_->DidAnimateForInput();
if (!has_fling_animation_started_) {
has_fling_animation_started_ = true;
// Guard against invalid, future or sufficiently stale start times, as there
// are no guarantees fling event and animation timestamps are compatible.
if (!fling_parameters_.startTime ||
monotonic_time_sec <= fling_parameters_.startTime ||
monotonic_time_sec >= fling_parameters_.startTime +
kMaxSecondsFromFlingTimestampToFirstAnimate) {
fling_parameters_.startTime = monotonic_time_sec;
input_handler_->SetNeedsAnimateInput();
return;
}
}
bool fling_is_active =
fling_curve_->apply(monotonic_time_sec - fling_parameters_.startTime,
this);
if (disallow_vertical_fling_scroll_ && disallow_horizontal_fling_scroll_)
fling_is_active = false;
if (fling_is_active) {
input_handler_->SetNeedsAnimateInput();
} else {
TRACE_EVENT_INSTANT0("input",
"InputHandlerProxy::animate::flingOver",
TRACE_EVENT_SCOPE_THREAD);
CancelCurrentFling();
}
}
void InputHandlerProxy::MainThreadHasStoppedFlinging() {
fling_may_be_active_on_main_thread_ = false;
client_->DidStopFlinging();
}
void InputHandlerProxy::ReconcileElasticOverscrollAndRootScroll() {
if (scroll_elasticity_controller_)
scroll_elasticity_controller_->ReconcileStretchAndScroll();
}
void InputHandlerProxy::HandleOverscroll(
const gfx::Point& causal_event_viewport_point,
const cc::InputHandlerScrollResult& scroll_result) {
DCHECK(client_);
if (!scroll_result.did_overscroll_root)
return;
TRACE_EVENT2("input",
"InputHandlerProxy::DidOverscroll",
"dx",
scroll_result.unused_scroll_delta.x(),
"dy",
scroll_result.unused_scroll_delta.y());
DidOverscrollParams params;
params.accumulated_overscroll = scroll_result.accumulated_root_overscroll;
params.latest_overscroll_delta = scroll_result.unused_scroll_delta;
params.current_fling_velocity =
ToClientScrollIncrement(current_fling_velocity_);
params.causal_event_viewport_point = causal_event_viewport_point;
if (fling_curve_) {
static const int kFlingOverscrollThreshold = 1;
disallow_horizontal_fling_scroll_ |=
std::abs(params.accumulated_overscroll.x()) >=
kFlingOverscrollThreshold;
disallow_vertical_fling_scroll_ |=
std::abs(params.accumulated_overscroll.y()) >=
kFlingOverscrollThreshold;
}
client_->DidOverscroll(params);
}
bool InputHandlerProxy::CancelCurrentFling() {
if (CancelCurrentFlingWithoutNotifyingClient()) {
client_->DidStopFlinging();
return true;
}
return false;
}
bool InputHandlerProxy::CancelCurrentFlingWithoutNotifyingClient() {
bool had_fling_animation = fling_curve_;
if (had_fling_animation &&
fling_parameters_.sourceDevice == blink::WebGestureDeviceTouchscreen) {
input_handler_->ScrollEnd();
TRACE_EVENT_ASYNC_END0(
"input",
"InputHandlerProxy::HandleGestureFling::started",
this);
}
TRACE_EVENT_INSTANT1("input",
"InputHandlerProxy::CancelCurrentFling",
TRACE_EVENT_SCOPE_THREAD,
"had_fling_animation",
had_fling_animation);
fling_curve_.reset();
has_fling_animation_started_ = false;
gesture_scroll_on_impl_thread_ = false;
current_fling_velocity_ = gfx::Vector2dF();
fling_parameters_ = blink::WebActiveWheelFlingParameters();
if (deferred_fling_cancel_time_seconds_) {
deferred_fling_cancel_time_seconds_ = 0;
WebGestureEvent last_fling_boost_event = last_fling_boost_event_;
last_fling_boost_event_ = WebGestureEvent();
if (last_fling_boost_event.type == WebInputEvent::GestureScrollBegin ||
last_fling_boost_event.type == WebInputEvent::GestureScrollUpdate) {
// Synthesize a GestureScrollBegin, as the original was suppressed.
HandleInputEvent(ObtainGestureScrollBegin(last_fling_boost_event));
}
}
return had_fling_animation;
}
bool InputHandlerProxy::TouchpadFlingScroll(
const WebFloatSize& increment) {
WebMouseWheelEvent synthetic_wheel;
synthetic_wheel.type = WebInputEvent::MouseWheel;
synthetic_wheel.deltaX = increment.width;
synthetic_wheel.deltaY = increment.height;
synthetic_wheel.hasPreciseScrollingDeltas = true;
synthetic_wheel.x = fling_parameters_.point.x;
synthetic_wheel.y = fling_parameters_.point.y;
synthetic_wheel.globalX = fling_parameters_.globalPoint.x;
synthetic_wheel.globalY = fling_parameters_.globalPoint.y;
synthetic_wheel.modifiers = fling_parameters_.modifiers;
InputHandlerProxy::EventDisposition disposition =
HandleInputEvent(synthetic_wheel);
switch (disposition) {
case DID_HANDLE:
return true;
case DROP_EVENT:
break;
case DID_NOT_HANDLE:
TRACE_EVENT_INSTANT0("input",
"InputHandlerProxy::scrollBy::AbortFling",
TRACE_EVENT_SCOPE_THREAD);
// If we got a DID_NOT_HANDLE, that means we need to deliver wheels on the
// main thread. In this case we need to schedule a commit and transfer the
// fling curve over to the main thread and run the rest of the wheels from
// there. This can happen when flinging a page that contains a scrollable
// subarea that we can't scroll on the thread if the fling starts outside
// the subarea but then is flung "under" the pointer.
client_->TransferActiveWheelFlingAnimation(fling_parameters_);
fling_may_be_active_on_main_thread_ = true;
CancelCurrentFlingWithoutNotifyingClient();
break;
}
return false;
}
bool InputHandlerProxy::scrollBy(const WebFloatSize& increment,
const WebFloatSize& velocity) {
WebFloatSize clipped_increment;
WebFloatSize clipped_velocity;
if (!disallow_horizontal_fling_scroll_) {
clipped_increment.width = increment.width;
clipped_velocity.width = velocity.width;
}
if (!disallow_vertical_fling_scroll_) {
clipped_increment.height = increment.height;
clipped_velocity.height = velocity.height;
}
current_fling_velocity_ = clipped_velocity;
// Early out if the increment is zero, but avoid early terimination if the
// velocity is still non-zero.
if (clipped_increment == WebFloatSize())
return clipped_velocity != WebFloatSize();
TRACE_EVENT2("input",
"InputHandlerProxy::scrollBy",
"x",
clipped_increment.width,
"y",
clipped_increment.height);
bool did_scroll = false;
switch (fling_parameters_.sourceDevice) {
case blink::WebGestureDeviceTouchpad:
did_scroll = TouchpadFlingScroll(clipped_increment);
break;
case blink::WebGestureDeviceTouchscreen: {
clipped_increment = ToClientScrollIncrement(clipped_increment);
cc::InputHandlerScrollResult scroll_result = input_handler_->ScrollBy(
fling_parameters_.point, clipped_increment);
HandleOverscroll(fling_parameters_.point, scroll_result);
did_scroll = scroll_result.did_scroll;
} break;
}
if (did_scroll) {
fling_parameters_.cumulativeScroll.width += clipped_increment.width;
fling_parameters_.cumulativeScroll.height += clipped_increment.height;
}
// It's possible the provided |increment| is sufficiently small as to not
// trigger a scroll, e.g., with a trivial time delta between fling updates.
// Return true in this case to prevent early fling termination.
if (std::abs(clipped_increment.width) < kScrollEpsilon &&
std::abs(clipped_increment.height) < kScrollEpsilon)
return true;
return did_scroll;
}
} // namespace content<|fim▁end|>
|
WebFloatPoint velocity(current_fling_velocity_.x(),
current_fling_velocity_.y());
|
<|file_name|>flshinc.py<|end_file_name|><|fim▁begin|>"""Tool to loop over fls_h.inc files. Based on nens/asc.py and NumPy
masked arrays. Stripped out all unnecessary flexibility.
Usage:
# Opens zipfile if path ends with zip; inside it opens the only file,
# or raises ValueError if there are several. Currently we need to no
# data value passed in because we don't get it from the file; you may
# need to use some asc file present to get one.
flsh = flshinc.Flsh(path, no_data_value=-999.0)
geo_transform = flsh.geo_transform() # Format same as GDAL's, in
# Rijksdriehoek probably
cellsize_in_m2 = geo_transform[1]*geo_transform[1]
for timestamp, grid in flsh:
print("Total inundated area at timestamp {0}: {1} m2".format(
timestamp, numpy.greater(grid, 0).sum() * cellsize_in_m2))
Extra boolean options to Flsh:
one_per_hour: only yield the first grid of each hour (assumes
timestamp is in hours)
mutate: constantly yield the same grid object. Means that previously
yielded grids change. Faster because no copies are made, but
only use when you understand the risk.
If anything unexpected is encountered in a file, a possibly cryptic
ValueError is raised.
"""
# Python 3 is coming to town
from __future__ import print_function, unicode_literals
from __future__ import absolute_import, division
import logging
import math
import numpy
import numpy.ma
import zipfile
from flooding_lib.util import files
logger = logging.getLogger(__name__)
def splitline(f):
return f.readline().decode('utf8').strip().split()
def ints(f):
return [int(i) for i in splitline(f)]
def floats(f):
return [float(fl) for fl in splitline(f)]
def distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 +
(p1[1] - p2[1]) ** 2)
def check(line, expected):
if line[:len(expected)] != expected:
raise ValueError("line {0} was expected to start with {1}".
format(line, expected))
def y0_is_south(header, helper_geotransform):
if helper_geotransform:
helper_y0 = helper_geotransform[3]
# In old FLS files, header['y0'] is the y value of the
# southwest corner, in newer ones it's the y of the northwest
# corner. We have no way to distinguish them based on the FLS
# file alone.
# The helper geotransform's y0 is always at the north of the
# region. If it is sufficiently northwards of the FLS' y0,
# the y0 must be to the south. "Sufficient" is defined as at
# least 10% of the FLS height -- I'm afraid that without that
# margin, we're going to find maxwaterdepth grids that are a
# tiny bit to the north of the FLS, that would cause false
# souths.
north_of_fls_y0 = (
header['y0'] + 0.1 * (header['nrows'] * header['dx']))
if helper_y0 > north_of_fls_y0:
return True
return False
class Flsh(object):
def __init__(
self, path, no_data_value=-999.0, one_per_hour=False,
mutate=False, helper_geotransform=None):
self.path = path
self.no_data_value = no_data_value
self.one_per_hour = one_per_hour
self.mutate = mutate
self.helper_geotransform = helper_geotransform
def geo_transform(self):
header = self._parse_header()
# y0 can be north or south, dy is positive or negative depending
if y0_is_south(header, self.helper_geotransform):
y0 = header['y0'] + (header['nrows'] * header['dx'])
else:
y0 = header['y0']
return [header['x0'], header['dx'], 0.0,
y0, 0.0, -header['dx']]
def get_classes(self):
header = self._parse_header()
return header['classes']
def _open_path(self):
if self.path.endswith('.zip'):
try:
zipf = zipfile.ZipFile(self.path)
namelist = zipf.namelist()
if len(namelist) != 1:
raise ValueError(
"Can only open .zip files with 1 file inside, "
"{p} has {n}.".format(p=self.path, n=len(namelist)))
return zipf.open(namelist[0], mode='rU')
except zipfile.BadZipfile:
raise ValueError(
"{} ends in .zip but can't be opened as one."
.format(self.path))
else:
return file(self.path, 'rU')
@property
def ncols(self):
return self._parse_header()['ncols']
@property
def nrows(self):
return self._parse_header()['nrows']
def _parse_header(self):
if hasattr(self, '_header'):<|fim▁hole|> self.f = self._open_path()
# 1: dimensions
while True:
try:
check(
splitline(self.f),
['MAIN', 'DIMENSIONS', 'MMAX', 'NMAX'])
break
except ValueError:
pass
colrowline = splitline(self.f)
try:
ncols, nrows = [int(c) for c in colrowline]
except ValueError:
if colrowline[0] == '***':
nrows, ncols = self.find_max_col()
# logger.debug("nrows={0} ncols={1}".format(nrows, ncols))
# 2: grid
while True:
try:
spl = splitline(self.f)
check(spl, ['GRID'])
break
except ValueError:
pass
grid = floats(self.f)
spl = spl[1:]
dx = grid[spl.index('DX')]
x0 = grid[spl.index('X0')]
y0 = grid[spl.index('Y0')]
# logger.debug("dx={0} x0={1} y0={2}".format(dx, x0, y0))
# 3: classes
while True:
try:
check(
splitline(self.f),
['CLASSES', 'OF', 'INCREMENTAL', 'FILE'])
break
except ValueError:
pass
classes = []
line = splitline(self.f)
while line != ['ENDCLASSES']:
classes += [[float(fl) for fl in line]]
line = splitline(self.f)
# logger.debug("classes: {0}".format(classes))
self._header = {
'nrows': nrows,
'ncols': ncols,
'dx': dx,
'x0': x0,
'y0': y0,
'classes': classes,
}
return self._header
def find_max_col(self):
opened = self._open_path()
maxcol = 0
maxrow = 0
for line in opened:
line = line.strip().decode('utf8').split()
if not line or '.' in line[0]:
continue
try:
row, col, value = [int(elem) for elem in line]
except ValueError:
continue
maxcol = max(maxcol, col)
maxrow = max(maxrow, row)
logger.debug("Found max col: {}".format(maxcol))
logger.debug("Found max row: {}".format(maxrow))
return maxcol, maxrow
def __iter__(self):
header = self._parse_header()
the_array = numpy.zeros((header['nrows'] + 1, header['ncols'] + 1))
current_timestamp = False
yield_this_grid = False
last_yielded_hour = None
for line in self.f:
line = line.strip().decode('utf8').split()
if not line or '.' in line[0]:
if yield_this_grid:
if self.mutate:
yield current_timestamp, the_array
else:
yield current_timestamp, numpy.array(the_array)
last_yielded_hour = int(current_timestamp)
if not line:
# End of file
return
# Start of a new timestamp
timestamp, _, class_column = line[:3]
current_timestamp = float(timestamp)
class_column = int(class_column) - 1
yield_this_grid = (
not self.one_per_hour
or int(current_timestamp) != last_yielded_hour)
else:
row, col, classvalue = [int(l) for l in line]
if classvalue == 0:
value = 0.0
else:
value = header['classes'][classvalue - 1][class_column]
try:
the_array[-col, row - 1] = value
except IndexError:
print(the_array.shape)
print("col: {}".format(col))
print("row: {}".format(row))
raise
self.f.close() # When the file is closed, it can be deleted
# on Windows
def save_grid_to_image(grid, path, classes, colormap, geo_transform=None):
"""Save this grid as an image.
Assumes that all values in the grid are values that come from
one of the classes. Translates the values in the classes to colors
from the colormap, then finds all the places in the grid that are
equal to that class and sets all those to the right color.
Because of the above (classes) this save functions is not exactly
the same as the ColorMap.apply_to_grid() and files.save_geopng()
functions.
The type of image is decided by the path, but I only test with
PNG."""
classvalues = set()
for classline in classes:
for value in classline:
classvalues.add(value)
class_to_color = dict()
for classvalue in classvalues:
class_to_color[classvalue] = (
colormap.value_to_color(classvalue) or (0, 0, 0, 0))
n, m = grid.shape
colorgrid = numpy.zeros((4, n, m), dtype=numpy.uint8)
redgrid = numpy.zeros((n, m))
greengrid = numpy.zeros((n, m))
bluegrid = numpy.zeros((n, m))
for classvalue, color in class_to_color.items():
mask = (grid == classvalue)
redgrid += mask * color[0]
greengrid += mask * color[1]
bluegrid += mask * color[2]
colorgrid[0] = redgrid
colorgrid[1] = greengrid
colorgrid[2] = bluegrid
# Colored pixels get opacity 255, non-colored pixels opacity 0
# (transparent)
colorgrid[3] = (
((redgrid > 0) | (greengrid > 0) | (bluegrid > 0)) * 255)
files.save_geopng(path, colorgrid, geo_transform)<|fim▁end|>
|
return self._header
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
__author__ = 'Marek Stępniowski, <[email protected]>'<|fim▁hole|><|fim▁end|>
|
__version__ = '0.1'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.