prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>SectionType.js<|end_file_name|><|fim▁begin|>import Element from './Element'
export default class extends Element {
constructor (
val
) {
super({ 'w:type': {} })
if (val) this.setVal(val || null)
}
setVal (value) {
if (value) {<|fim▁hole|> this.src['w:type']['@w:val'] = value
}
else {
delete this.src['w:type']['@w:val']
}
}
getVal () {
return this.src['w:type']['@w:val'] || null
}
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# Holopy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.<|fim▁hole|># but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.<|fim▁end|> | #
# Holopy is distributed in the hope that it will be useful, |
<|file_name|>command_line.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import tornadoredis
from repi_server import RepiServer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', help='Name of Repi client', type=str,
default='master')
parser.add_argument('-H', '--host', help='Redis host (default: localhost)',
type=str, default='localhost')
parser.add_argument('-p', '--port', help='Redis port (default: 6379)',
type=int, default=6379)
parser.add_argument('-P', '--http-port', help='HTTP port (default:8888',
type=int, default=8888)<|fim▁hole|> help='Redis namespace (default: repi)', type=str,
default='repi')
parser.add_argument('-i', '--info-channel',
help='Redis general PubSub channel (default: cluster)',
type=str, default='cluster')
args = parser.parse_args()
name = args.name
redis_host = args.host
redis_port = args.port
http_port = args.http_port
namespace = args.namespace
info_channel = args.info_channel
if not namespace.endswith(':'):
namespace = '{}:'.format(namespace)
r = tornadoredis.Client(host=redis_host, port=redis_port)
repi_server = RepiServer(r, name=name, namespace=namespace,
info_channel=info_channel, port=http_port)
repi_server.run()
if __name__ == '__main__':
main()<|fim▁end|> | parser.add_argument('-ns', '--namespace', |
<|file_name|>version.py<|end_file_name|><|fim▁begin|>################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
The pyflink version will be consistent with the flink version and follow the PEP440.<|fim▁hole|><|fim▁end|> | .. seealso:: https://www.python.org/dev/peps/pep-0440
"""
__version__ = "1.13.dev0" |
<|file_name|>genComponsTemplate.py<|end_file_name|><|fim▁begin|>import sys
import os
import io
from pkg_resources import parse_version
import wx
if parse_version(wx.__version__) < parse_version('2.9'):
tmpApp = wx.PySimpleApp()
else:
tmpApp = wx.App(False)
from psychopy import experiment
from psychopy.experiment.components import getAllComponents
# usage: generate or compare all Component.param settings & options
# motivation: catch deviations introduced during refactoring
# use --out to re-generate componsTemplate.txt
# ignore attributes that are there because inherit from object
ignoreObjectAttribs = True
# should not need a wx.App with fetchIcons=False
try:
allComp = getAllComponents(fetchIcons=False)
except Exception:
import wx
if parse_version(wx.__version__) < parse_version('2.9'):
tmpApp = wx.PySimpleApp()
else:
tmpApp = wx.App(False)
try:
from psychopy.app import localization
except Exception:
pass # not needed if can't import it
allComp = getAllComponents(fetchIcons=False)
exp = experiment.Experiment()
relPath = os.path.join(os.path.split(__file__)[0], 'componsTemplate.txt')
if not '--out' in sys.argv:
with io.open(relPath, 'r', encoding='utf-8-sig') as f:
target = f.read()<|fim▁hole|>
targetLines = target.splitlines()
targetTag = {}
for line in targetLines:
try:
t, val = line.split(':',1)
targetTag[t] = val
except ValueError:
# need more than one value to unpack; this is a weak way to
# handle multi-line default values, eg TextComponent.text.default
targetTag[t] += '\n' + line # previous t value
else:
outfile = open(relPath,'w')
param = experiment.Param('', '') # want its namespace
ignore = ['__doc__', '__init__', '__module__', '__str__', 'next']
if '--out' not in sys.argv:
# these are for display only (cosmetic) but no harm in gathering initially:
ignore += ['hint',
'label', # comment-out to not ignore labels when checking
'categ'
]
for field in dir(param):
if field.startswith("__"):
ignore.append(field)
fields = set(dir(param)).difference(ignore)
mismatches = []
for compName in sorted(allComp):
comp = allComp[compName](parentName='x', exp=exp)
order = '%s.order:%s' % (compName, eval("comp.order"))
out = [order]
if '--out' in sys.argv:
outfile.write(order+'\n')
elif not order+'\n' in target:
tag = order.split(':', 1)[0]
try:
err = order + ' <== ' + targetTag[tag]
except IndexError: # missing
err = order + ' <==> NEW (no matching param in original)'
print(err)
mismatches.append(err)
for parName in sorted(comp.params):
# default is what you get from param.__str__, which returns its value
default = '%s.%s.default:%s' % (compName, parName, comp.params[parName])
out.append(default)
lineFields = []
for field in sorted(fields):
if parName == 'name' and field == 'updates':
continue
# ignore: never want to change the name *during an experiment*
# the default name.updates value varies across components
# skip private attributes
if field.startswith("_"):
continue
# get value of the field
fieldValue = str(eval("comp.params[parName].%s" % field))
# remove memory address from the string representation
if "at 0x" in fieldValue:
fieldValue = fieldValue.split(" at 0x")[0] + ">"
f = '%s.%s.%s:%s' % (compName, parName, field, fieldValue)
lineFields.append(f)
for line in [default] + lineFields:
if '--out' in sys.argv:
if not ignoreObjectAttribs:
outfile.write(line+'\n')
else:
if (not ":<built-in method __" in line and
not ":<method-wrapper '__" in line and
not ":<bound method " in line):
outfile.write(line+'\n')
elif not line+'\n' in target:
# mismatch, so report on the tag from orig file
# match checks tag + multi-line
# because line is multi-line and target is whole file
tag = line.split(':', 1)[0]
try:
err = line + ' <== ' + targetTag[tag]
except KeyError: # missing
err = line + ' <==> NEW (no matching param in original)'
print(err)
mismatches.append(err)
# return mismatches<|fim▁end|> | |
<|file_name|>buildPy2exe.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#coding:utf8
# *** TROUBLESHOOTING ***
# 1) If you get the error "ImportError: No module named zope.interface" then add an empty __init__.py file to the PYTHONDIR/Lib/site-packages/zope directory
# 2) It is expected that you will have NSIS 3 NSIS from http://nsis.sourceforge.net installed.
import codecs
import sys
# try:
# if (sys.version_info.major != 2) or (sys.version_info.minor < 7):
# raise Exception("You must build Syncplay with Python 2.7!")
# except AttributeError:
# import warnings
# warnings.warn("You must build Syncplay with Python 2.7!")
from glob import glob
import os
import subprocess
from string import Template
from distutils.core import setup
try:
from py2exe.build_exe import py2exe
except ImportError:
from py2exe.distutils_buildexe import py2exe
import syncplay
from syncplay.messages import getMissingStrings
missingStrings = getMissingStrings()
if missingStrings is not None and missingStrings != "":
import warnings
warnings.warn("MISSING/UNUSED STRINGS DETECTED:\n{}".format(missingStrings))
def get_nsis_path():
bin_name = "makensis.exe"
from winreg import HKEY_LOCAL_MACHINE as HKLM
from winreg import KEY_READ, KEY_WOW64_32KEY, OpenKey, QueryValueEx
try:
nsisreg = OpenKey(HKLM, "Software\\NSIS", 0, KEY_READ | KEY_WOW64_32KEY)
if QueryValueEx(nsisreg, "VersionMajor")[0] >= 3:
return "{}\\{}".format(QueryValueEx(nsisreg, "")[0], bin_name)
else:
raise Exception("You must install NSIS 3 or later.")
except WindowsError:
return bin_name
NSIS_COMPILE = get_nsis_path()
OUT_DIR = "syncplay_v{}".format(syncplay.version)
SETUP_SCRIPT_PATH = "syncplay_setup.nsi"
NSIS_SCRIPT_TEMPLATE = r"""
!include LogicLib.nsh
!include nsDialogs.nsh
!include FileFunc.nsh
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\English.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\Polish.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\Russian.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\German.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\Italian.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\Spanish.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\PortugueseBR.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\Portuguese.nlf"
LoadLanguageFile "$${NSISDIR}\Contrib\Language files\Turkish.nlf"
Unicode true
Name "Syncplay $version"
OutFile "Syncplay-$version-Setup.exe"
InstallDir $$PROGRAMFILES\Syncplay
RequestExecutionLevel admin
ManifestDPIAware false
XPStyle on
Icon syncplay\resources\icon.ico ;Change DIR
SetCompressor /SOLID lzma
VIProductVersion "$version.0"
VIAddVersionKey /LANG=$${LANG_ENGLISH} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_ENGLISH} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_ENGLISH} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_ENGLISH} "FileDescription" "Syncplay"
VIAddVersionKey /LANG=$${LANG_POLISH} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_POLISH} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_POLISH} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_POLISH} "FileDescription" "Syncplay"
VIAddVersionKey /LANG=$${LANG_RUSSIAN} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_RUSSIAN} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_RUSSIAN} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_RUSSIAN} "FileDescription" "Syncplay"
VIAddVersionKey /LANG=$${LANG_ITALIAN} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_ITALIAN} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_ITALIAN} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_ITALIAN} "FileDescription" "Syncplay"
VIAddVersionKey /LANG=$${LANG_SPANISH} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_SPANISH} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_SPANISH} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_SPANISH} "FileDescription" "Syncplay"
VIAddVersionKey /LANG=$${LANG_PORTUGUESEBR} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_PORTUGUESEBR} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_PORTUGUESEBR} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_PORTUGUESEBR} "FileDescription" "Syncplay"
VIAddVersionKey /LANG=$${LANG_PORTUGUESE} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_PORTUGUESE} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_PORTUGUESE} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_PORTUGUESE} "FileDescription" "Syncplay"
VIAddVersionKey /LANG=$${LANG_TURKISH} "ProductName" "Syncplay"
VIAddVersionKey /LANG=$${LANG_TURKISH} "FileVersion" "$version.0"
VIAddVersionKey /LANG=$${LANG_TURKISH} "LegalCopyright" "Syncplay"
VIAddVersionKey /LANG=$${LANG_TURKISH} "FileDescription" "Syncplay"
LangString ^SyncplayLanguage $${LANG_ENGLISH} "en"
LangString ^Associate $${LANG_ENGLISH} "Associate Syncplay with multimedia files."
LangString ^Shortcut $${LANG_ENGLISH} "Create Shortcuts in following locations:"
LangString ^StartMenu $${LANG_ENGLISH} "Start Menu"
LangString ^Desktop $${LANG_ENGLISH} "Desktop"
LangString ^QuickLaunchBar $${LANG_ENGLISH} "Quick Launch Bar"
LangString ^AutomaticUpdates $${LANG_ENGLISH} "Check for updates automatically"
LangString ^UninstConfig $${LANG_ENGLISH} "Delete configuration file."
LangString ^SyncplayLanguage $${LANG_POLISH} "pl"
LangString ^Associate $${LANG_POLISH} "Skojarz Syncplaya z multimediami"
LangString ^Shortcut $${LANG_POLISH} "Utworz skroty w nastepujacych miejscach:"
LangString ^StartMenu $${LANG_POLISH} "Menu Start"
LangString ^Desktop $${LANG_POLISH} "Pulpit"
LangString ^QuickLaunchBar $${LANG_POLISH} "Pasek szybkiego uruchamiania"
LangString ^UninstConfig $${LANG_POLISH} "Usun plik konfiguracyjny."
LangString ^SyncplayLanguage $${LANG_RUSSIAN} "ru"
LangString ^Associate $${LANG_RUSSIAN} "Ассоциировать Syncplay с видеофайлами"
LangString ^Shortcut $${LANG_RUSSIAN} "Создать ярлыки:"
LangString ^StartMenu $${LANG_RUSSIAN} "в меню Пуск"
LangString ^Desktop $${LANG_RUSSIAN} "на рабочем столе"
LangString ^QuickLaunchBar $${LANG_RUSSIAN} "в меню быстрого запуска"
LangString ^AutomaticUpdates $${LANG_RUSSIAN} "Проверять обновления автоматически"; TODO: Confirm Russian translation ("Check for updates automatically")
LangString ^UninstConfig $${LANG_RUSSIAN} "Удалить файл настроек."
LangString ^SyncplayLanguage $${LANG_GERMAN} "de"
LangString ^Associate $${LANG_GERMAN} "Syncplay als Standardprogramm für Multimedia-Dateien verwenden."
LangString ^Shortcut $${LANG_GERMAN} "Erstelle Verknüpfungen an folgenden Orten:"
LangString ^StartMenu $${LANG_GERMAN} "Startmenü"
LangString ^Desktop $${LANG_GERMAN} "Desktop"
LangString ^QuickLaunchBar $${LANG_GERMAN} "Schnellstartleiste"
LangString ^AutomaticUpdates $${LANG_GERMAN} "Automatisch nach Updates suchen";
LangString ^UninstConfig $${LANG_GERMAN} "Konfigurationsdatei löschen."
LangString ^SyncplayLanguage $${LANG_ITALIAN} "it"
LangString ^Associate $${LANG_ITALIAN} "Associa Syncplay con i file multimediali."
LangString ^Shortcut $${LANG_ITALIAN} "Crea i collegamenti nei percorsi seguenti:"
LangString ^StartMenu $${LANG_ITALIAN} "Menu Start"
LangString ^Desktop $${LANG_ITALIAN} "Desktop"
LangString ^QuickLaunchBar $${LANG_ITALIAN} "Barra di avvio rapido"
LangString ^AutomaticUpdates $${LANG_ITALIAN} "Controllo automatico degli aggiornamenti"
LangString ^UninstConfig $${LANG_ITALIAN} "Cancella i file di configurazione."
LangString ^SyncplayLanguage $${LANG_SPANISH} "es"
LangString ^Associate $${LANG_SPANISH} "Asociar Syncplay con archivos multimedia."
LangString ^Shortcut $${LANG_SPANISH} "Crear accesos directos en las siguientes ubicaciones:"
LangString ^StartMenu $${LANG_SPANISH} "Menú de inicio"
LangString ^Desktop $${LANG_SPANISH} "Escritorio"
LangString ^QuickLaunchBar $${LANG_SPANISH} "Barra de acceso rápido"
LangString ^AutomaticUpdates $${LANG_SPANISH} "Buscar actualizaciones automáticamente"
LangString ^UninstConfig $${LANG_SPANISH} "Borrar archivo de configuración."
LangString ^SyncplayLanguage $${LANG_PORTUGUESEBR} "pt_BR"
LangString ^Associate $${LANG_PORTUGUESEBR} "Associar Syncplay aos arquivos multimídia."
LangString ^Shortcut $${LANG_PORTUGUESEBR} "Criar atalhos nos seguintes locais:"
LangString ^StartMenu $${LANG_PORTUGUESEBR} "Menu Iniciar"
LangString ^Desktop $${LANG_PORTUGUESEBR} "Área de trabalho"
LangString ^QuickLaunchBar $${LANG_PORTUGUESEBR} "Barra de acesso rápido"
LangString ^AutomaticUpdates $${LANG_PORTUGUESEBR} "Verificar atualizações automaticamente"
LangString ^UninstConfig $${LANG_PORTUGUESEBR} "Deletar arquivo de configuração."
LangString ^SyncplayLanguage $${LANG_PORTUGUESE} "pt_PT"
LangString ^Associate $${LANG_PORTUGUESE} "Associar Syncplay aos ficheiros multimédia."
LangString ^Shortcut $${LANG_PORTUGUESE} "Criar atalhos nos seguintes locais:"
LangString ^StartMenu $${LANG_PORTUGUESE} "Menu Iniciar"
LangString ^Desktop $${LANG_PORTUGUESE} "Área de trabalho"
LangString ^QuickLaunchBar $${LANG_PORTUGUESE} "Barra de acesso rápido"
LangString ^AutomaticUpdates $${LANG_PORTUGUESE} "Verificar atualizações automaticamente"
LangString ^UninstConfig $${LANG_PORTUGUESE} "Apagar ficheiro de configuração."
LangString ^SyncplayLanguage $${LANG_TURKISH} "tr"
LangString ^Associate $${LANG_TURKISH} "Syncplay'i ortam dosyalarıyla ilişkilendirin."
LangString ^Shortcut $${LANG_TURKISH} "Aşağıdaki konumlarda kısayollar oluşturun:"
LangString ^StartMenu $${LANG_TURKISH} "Başlangıç menüsü"
LangString ^Desktop $${LANG_TURKISH} "Masaüstü"
LangString ^QuickLaunchBar $${LANG_TURKISH} "Hızlı Başlatma Çubuğu"
LangString ^AutomaticUpdates $${LANG_TURKISH} "Güncellemeleri otomatik denetle"
LangString ^UninstConfig $${LANG_TURKISH} "Yapılandırma dosyasını silin."
; Remove text to save space
LangString ^ClickInstall $${LANG_GERMAN} " "
PageEx license
LicenseData syncplay\resources\license.rtf
PageExEnd
Page custom DirectoryCustom DirectoryCustomLeave
Page instFiles
UninstPage custom un.installConfirm un.installConfirmLeave
UninstPage instFiles
Var Dialog
Var Icon_Syncplay
Var Icon_Syncplay_Handle
;Var CheckBox_Associate
Var CheckBox_AutomaticUpdates
Var CheckBox_StartMenuShortcut
Var CheckBox_DesktopShortcut
Var CheckBox_QuickLaunchShortcut
;Var CheckBox_Associate_State
Var CheckBox_AutomaticUpdates_State
Var CheckBox_StartMenuShortcut_State
Var CheckBox_DesktopShortcut_State
Var CheckBox_QuickLaunchShortcut_State
Var Button_Browse
Var Directory
Var GroupBox_DirSub
Var Label_Text
Var Label_Shortcut
Var Label_Size
Var Label_Space
Var Text_Directory
Var Uninst_Dialog
Var Uninst_Icon
Var Uninst_Icon_Handle
Var Uninst_Label_Directory
Var Uninst_Label_Text
Var Uninst_Text_Directory
Var Uninst_CheckBox_Config
Var Uninst_CheckBox_Config_State
Var Size
Var SizeHex
Var AvailibleSpace
Var AvailibleSpaceGiB
Var Drive
Var VLC_Directory
;!macro APP_ASSOCIATE EXT FileCLASS DESCRIPTION COMMANDTEXT COMMAND
; WriteRegStr HKCR ".$${EXT}" "" "$${FileCLASS}"
; WriteRegStr HKCR "$${FileCLASS}" "" `$${DESCRIPTION}`
; WriteRegStr HKCR "$${FileCLASS}\shell" "" "open"
; WriteRegStr HKCR "$${FileCLASS}\shell\open" "" `$${COMMANDTEXT}`
; WriteRegStr HKCR "$${FileCLASS}\shell\open\command" "" `$${COMMAND}`
;!macroend
!macro APP_UNASSOCIATE EXT FileCLASS
; Backup the previously associated File class
ReadRegStr $$R0 HKCR ".$${EXT}" `$${FileCLASS}_backup`
WriteRegStr HKCR ".$${EXT}" "" "$$R0"
DeleteRegKey HKCR `$${FileCLASS}`
!macroend
;!macro ASSOCIATE EXT
; !insertmacro APP_ASSOCIATE "$${EXT}" "Syncplay.$${EXT}" "$$INSTDIR\Syncplay.exe,%1%" \
; "Open with Syncplay" "$$INSTDIR\Syncplay.exe $$\"%1$$\""
;!macroend
!macro UNASSOCIATE EXT
!insertmacro APP_UNASSOCIATE "$${EXT}" "Syncplay.$${EXT}"
!macroend
;Prevents from running more than one instance of installer and sets default state of checkboxes
Function .onInit
System::Call 'kernel32::CreateMutexA(i 0, i 0, t "SyncplayMutex") i .r1 ?e'
Pop $$R0
StrCmp $$R0 0 +3
MessageBox MB_OK|MB_ICONEXCLAMATION "The installer is already running."
Abort
;StrCpy $$CheckBox_Associate_State $${BST_CHECKED}
StrCpy $$CheckBox_StartMenuShortcut_State $${BST_CHECKED}
Call GetSize
Call DriveSpace
Call Language
FunctionEnd
;Language selection dialog
Function Language
Push ""
Push $${LANG_ENGLISH}
Push English
Push $${LANG_POLISH}
Push Polski
Push $${LANG_RUSSIAN}
Push Русский
Push $${LANG_GERMAN}
Push Deutsch
Push $${LANG_ITALIAN}
Push Italiano
Push $${LANG_SPANISH}
Push Español
Push $${LANG_PORTUGUESEBR}
Push 'Português do Brasil'
Push $${LANG_PORTUGUESE}
Push 'Português de Portugal'
Push $${LANG_TURKISH}
Push 'Türkçe'
Push A ; A means auto count languages
LangDLL::LangDialog "Language Selection" "Please select the language of Syncplay and the installer"
Pop $$LANGUAGE
StrCmp $$LANGUAGE "cancel" 0 +2
Abort
FunctionEnd
Function DirectoryCustom
nsDialogs::Create 1018
Pop $$Dialog
GetFunctionAddress $$R8 DirectoryCustomLeave
nsDialogs::OnBack $$R8
$${NSD_CreateIcon} 0u 0u 22u 20u ""
Pop $$Icon_Syncplay
$${NSD_SetIconFromInstaller} $$Icon_Syncplay $$Icon_Syncplay_Handle
$${NSD_CreateLabel} 25u 0u 241u 34u "$$(^DirText)"
Pop $$Label_Text
$${NSD_CreateText} 8u 38u 187u 12u "$$INSTDIR"
Pop $$Text_Directory
$${NSD_SetFocus} $$Text_Directory
$${NSD_CreateBrowseButton} 202u 37u 55u 14u "$$(^BrowseBtn)"
Pop $$Button_Browse
$${NSD_OnClick} $$Button_Browse DirectoryBrowseDialog
$${NSD_CreateGroupBox} 1u 27u 264u 30u "$$(^DirSubText)"
Pop $$GroupBox_DirSub
$${NSD_CreateLabel} 0u 122u 132 8u "$$(^SpaceRequired)$$SizeMB"
Pop $$Label_Size
$${NSD_CreateLabel} 321u 122u 132 8u "$$(^SpaceAvailable)$$AvailibleSpaceGiB.$$AvailibleSpaceGB"
Pop $$Label_Space
;$${NSD_CreateCheckBox} 8u 59u 187u 10u "$$(^Associate)"
;Pop $$CheckBox_Associate
$${NSD_CreateCheckBox} 8u 72u 250u 10u "$$(^AutomaticUpdates)"
Pop $$CheckBox_AutomaticUpdates
$${NSD_Check} $$CheckBox_AutomaticUpdates
$${NSD_CreateLabel} 8u 95u 187u 10u "$$(^Shortcut)"
Pop $$Label_Shortcut
$${NSD_CreateCheckbox} 8u 105u 70u 10u "$$(^StartMenu)"
Pop $$CheckBox_StartMenuShortcut
$${NSD_CreateCheckbox} 78u 105u 70u 10u "$$(^Desktop)"
Pop $$CheckBox_DesktopShortcut
$${NSD_CreateCheckbox} 158u 105u 130u 10u "$$(^QuickLaunchBar)"
Pop $$CheckBox_QuickLaunchShortcut
;$${If} $$CheckBox_Associate_State == $${BST_CHECKED}
; $${NSD_Check} $$CheckBox_Associate
;$${EndIf}
$${If} $$CheckBox_StartMenuShortcut_State == $${BST_CHECKED}
$${NSD_Check} $$CheckBox_StartMenuShortcut
$${EndIf}
$${If} $$CheckBox_DesktopShortcut_State == $${BST_CHECKED}
$${NSD_Check} $$CheckBox_DesktopShortcut
$${EndIf}
$${If} $$CheckBox_QuickLaunchShortcut_State == $${BST_CHECKED}
$${NSD_Check} $$CheckBox_QuickLaunchShortcut
$${EndIf}
$${If} $$CheckBox_AutomaticUpdates_State == $${BST_CHECKED}
$${NSD_Check} $$CheckBox_AutomaticUpdates
$${EndIf}
nsDialogs::Show
$${NSD_FreeIcon} $$Icon_Syncplay_Handle
FunctionEnd
Function DirectoryCustomLeave
$${NSD_GetText} $$Text_Directory $$INSTDIR
;$${NSD_GetState} $$CheckBox_Associate $$CheckBox_Associate_State
$${NSD_GetState} $$CheckBox_AutomaticUpdates $$CheckBox_AutomaticUpdates_State
$${NSD_GetState} $$CheckBox_StartMenuShortcut $$CheckBox_StartMenuShortcut_State
$${NSD_GetState} $$CheckBox_DesktopShortcut $$CheckBox_DesktopShortcut_State
$${NSD_GetState} $$CheckBox_QuickLaunchShortcut $$CheckBox_QuickLaunchShortcut_State
FunctionEnd
Function DirectoryBrowseDialog
nsDialogs::SelectFolderDialog $$(^DirBrowseText)
Pop $$Directory
$${If} $$Directory != error
StrCpy $$INSTDIR $$Directory
$${NSD_SetText} $$Text_Directory $$INSTDIR
Call DriveSpace
$${NSD_SetText} $$Label_Space "$$(^SpaceAvailable)$$AvailibleSpaceGiB.$$AvailibleSpaceGB"
$${EndIf}
Abort
FunctionEnd
Function GetSize
StrCpy $$Size "$totalSize"
IntOp $$Size $$Size / 1024
IntFmt $$SizeHex "0x%08X" $$Size
IntOp $$Size $$Size / 1024
FunctionEnd
;Calculates Free Space on HDD
Function DriveSpace
StrCpy $$Drive $$INSTDIR 1
$${DriveSpace} "$$Drive:\" "/D=F /S=M" $$AvailibleSpace
IntOp $$AvailibleSpaceGiB $$AvailibleSpace / 1024
IntOp $$AvailibleSpace $$AvailibleSpace % 1024
IntOp $$AvailibleSpace $$AvailibleSpace / 102
FunctionEnd
Function InstallOptions
;$${If} $$CheckBox_Associate_State == $${BST_CHECKED}
; Call Associate
; DetailPrint "Associated Syncplay with multimedia files"
;$${EndIf}
$${If} $$CheckBox_StartMenuShortcut_State == $${BST_CHECKED}
CreateDirectory $$SMPROGRAMS\Syncplay
SetOutPath "$$INSTDIR"
CreateShortCut "$$SMPROGRAMS\Syncplay\Syncplay.lnk" "$$INSTDIR\Syncplay.exe" ""
CreateShortCut "$$SMPROGRAMS\Syncplay\Syncplay Server.lnk" "$$INSTDIR\syncplayServer.exe" ""
CreateShortCut "$$SMPROGRAMS\Syncplay\Uninstall.lnk" "$$INSTDIR\Uninstall.exe" ""
WriteINIStr "$$SMPROGRAMS\Syncplay\SyncplayWebsite.url" "InternetShortcut" "URL" "https://syncplay.pl"
$${EndIf}
$${If} $$CheckBox_DesktopShortcut_State == $${BST_CHECKED}
SetOutPath "$$INSTDIR"
CreateShortCut "$$DESKTOP\Syncplay.lnk" "$$INSTDIR\Syncplay.exe" ""
$${EndIf}
$${If} $$CheckBox_QuickLaunchShortcut_State == $${BST_CHECKED}
SetOutPath "$$INSTDIR"
CreateShortCut "$$QUICKLAUNCH\Syncplay.lnk" "$$INSTDIR\Syncplay.exe" ""
$${EndIf}
FunctionEnd
;Associates extensions with Syncplay
;Function Associate
; !insertmacro ASSOCIATE avi
; !insertmacro ASSOCIATE mpg
; !insertmacro ASSOCIATE mpeg
; !insertmacro ASSOCIATE mpe
; !insertmacro ASSOCIATE m1v
; !insertmacro ASSOCIATE m2v
; !insertmacro ASSOCIATE mpv2
; !insertmacro ASSOCIATE mp2v
; !insertmacro ASSOCIATE mkv<|fim▁hole|> ; !insertmacro ASSOCIATE m4v
; !insertmacro ASSOCIATE mp4v
; !insertmacro ASSOCIATE 3gp
; !insertmacro ASSOCIATE 3gpp
; !insertmacro ASSOCIATE 3g2
; !insertmacro ASSOCIATE 3pg2
; !insertmacro ASSOCIATE flv
; !insertmacro ASSOCIATE f4v
; !insertmacro ASSOCIATE rm
; !insertmacro ASSOCIATE wmv
; !insertmacro ASSOCIATE swf
; !insertmacro ASSOCIATE rmvb
; !insertmacro ASSOCIATE divx
; !insertmacro ASSOCIATE amv
;FunctionEnd
Function WriteRegistry
Call GetSize
WriteRegStr HKLM SOFTWARE\Syncplay "Install_Dir" "$$INSTDIR"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "DisplayName" "Syncplay"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "InstallLocation" "$$INSTDIR"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "UninstallString" '"$$INSTDIR\uninstall.exe"'
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "DisplayIcon" "$$INSTDIR\resources\icon.ico"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "Publisher" "Syncplay"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "DisplayVersion" "$version"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "URLInfoAbout" "https://syncplay.pl/"
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "NoModify" 1
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "NoRepair" 1
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "EstimatedSize" "$$SizeHex"
WriteINIStr $$APPDATA\syncplay.ini general language $$(^SyncplayLanguage)
$${If} $$CheckBox_AutomaticUpdates_State == $${BST_CHECKED}
WriteINIStr $$APPDATA\syncplay.ini general CheckForUpdatesAutomatically "True"
$${Else}
WriteINIStr $$APPDATA\syncplay.ini general CheckForUpdatesAutomatically "False"
$${EndIf}
FunctionEnd
Function un.installConfirm
nsDialogs::Create 1018
Pop $$Uninst_Dialog
$${NSD_CreateIcon} 0u 1u 22u 20u ""
Pop $$Uninst_Icon
$${NSD_SetIconFromInstaller} $$Uninst_Icon $$Uninst_Icon_Handle
$${NSD_CreateLabel} 0u 45u 55u 8u "$$(^UninstallingSubText)"
Pop $$Uninst_Label_Directory
$${NSD_CreateLabel} 25u 0u 241u 34u "$$(^UninstallingText)"
Pop $$Uninst_Label_Text
ReadRegStr $$INSTDIR HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "InstallLocation"
$${NSD_CreateText} 56u 43u 209u 12u "$$INSTDIR"
Pop $$Uninst_Text_Directory
EnableWindow $$Uninst_Text_Directory 0
$${NSD_CreateCheckBox} 0u 60u 250u 10u "$$(^UninstConfig)"
Pop $$Uninst_CheckBox_Config
nsDialogs::Show
$${NSD_FreeIcon} $$Uninst_Icon_Handle
FunctionEnd
Function un.installConfirmLeave
$${NSD_GetState} $$Uninst_CheckBox_Config $$Uninst_CheckBox_Config_State
FunctionEnd
Function un.AssociateDel
!insertmacro UNASSOCIATE avi
!insertmacro UNASSOCIATE mpg
!insertmacro UNASSOCIATE mpeg
!insertmacro UNASSOCIATE mpe
!insertmacro UNASSOCIATE m1v
!insertmacro UNASSOCIATE m2v
!insertmacro UNASSOCIATE mpv2
!insertmacro UNASSOCIATE mp2v
!insertmacro UNASSOCIATE mkv
!insertmacro UNASSOCIATE mp4
!insertmacro UNASSOCIATE m4v
!insertmacro UNASSOCIATE mp4v
!insertmacro UNASSOCIATE 3gp
!insertmacro UNASSOCIATE 3gpp
!insertmacro UNASSOCIATE 3g2
!insertmacro UNASSOCIATE 3pg2
!insertmacro UNASSOCIATE flv
!insertmacro UNASSOCIATE f4v
!insertmacro UNASSOCIATE rm
!insertmacro UNASSOCIATE wmv
!insertmacro UNASSOCIATE swf
!insertmacro UNASSOCIATE rmvb
!insertmacro UNASSOCIATE divx
!insertmacro UNASSOCIATE amv
FunctionEnd
Function un.InstallOptions
Delete $$SMPROGRAMS\Syncplay\Syncplay.lnk
Delete "$$SMPROGRAMS\Syncplay\Syncplay Server.lnk"
Delete $$SMPROGRAMS\Syncplay\Uninstall.lnk
Delete $$SMPROGRAMS\Syncplay\SyncplayWebsite.url
RMDir $$SMPROGRAMS\Syncplay
Delete $$DESKTOP\Syncplay.lnk
Delete $$QUICKLAUNCH\Syncplay.lnk
ReadRegStr $$VLC_Directory HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay" "VLCInstallLocation"
IfFileExists "$$VLC_Directory\lua\intf\syncplay.lua" 0 +2
Delete $$VLC_Directory\lua\intf\syncplay.lua
FunctionEnd
Section "Install"
SetOverwrite on
SetOutPath $$INSTDIR
WriteUninstaller uninstall.exe
$installFiles
Call InstallOptions
Call WriteRegistry
SectionEnd
Section "Uninstall"
Call un.AssociateDel
Call un.InstallOptions
$uninstallFiles
DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Syncplay"
DeleteRegKey HKLM SOFTWARE\Syncplay
Delete $$INSTDIR\uninstall.exe
RMDir $$INSTDIR\Syncplay\\resources\lua\intf
RMDir $$INSTDIR\Syncplay\\resources\lua
RMDir $$INSTDIR\Syncplay\\resources
RMDir $$INSTDIR\resources
RMDir $$INSTDIR\lib
RMDir $$INSTDIR
$${If} $$Uninst_CheckBox_Config_State == $${BST_CHECKED}
IfFileExists "$$APPDATA\.syncplay" 0 +2
Delete $$APPDATA\.syncplay
IfFileExists "$$APPDATA\syncplay.ini" 0 +2
Delete $$APPDATA\syncplay.ini
$${EndIf}
SectionEnd
"""
class NSISScript(object):
def create(self):
fileList, totalSize = self.getBuildDirContents(OUT_DIR)
print("Total size eq: {}".format(totalSize))
installFiles = self.prepareInstallListTemplate(fileList)
uninstallFiles = self.prepareDeleteListTemplate(fileList)
if os.path.isfile(SETUP_SCRIPT_PATH):
raise RuntimeError("Cannot create setup script, file exists at {}".format(SETUP_SCRIPT_PATH))
contents = Template(NSIS_SCRIPT_TEMPLATE).substitute(
version=syncplay.version,
uninstallFiles=uninstallFiles,
installFiles=installFiles,
totalSize=totalSize,
)
with codecs.open(SETUP_SCRIPT_PATH, "w", "utf-8-sig") as outfile:
outfile.write(contents)
def compile(self):
if not os.path.isfile(NSIS_COMPILE):
return "makensis.exe not found, won't create the installer"
subproc = subprocess.Popen([NSIS_COMPILE, SETUP_SCRIPT_PATH], env=os.environ)
subproc.communicate()
retcode = subproc.returncode
os.remove(SETUP_SCRIPT_PATH)
if retcode:
raise RuntimeError("NSIS compilation return code: %d" % retcode)
def getBuildDirContents(self, path):
fileList = {}
totalSize = 0
for root, _, files in os.walk(path):
totalSize += sum(os.path.getsize(os.path.join(root, file_)) for file_ in files)
for file_ in files:
new_root = root.replace(OUT_DIR, "").strip("\\")
if new_root not in fileList:
fileList[new_root] = []
fileList[new_root].append(file_)
return fileList, totalSize
def prepareInstallListTemplate(self, fileList):
create = []
for dir_ in fileList.keys():
create.append('SetOutPath "$INSTDIR\\{}"'.format(dir_))
for file_ in fileList[dir_]:
create.append('FILE "{}\\{}\\{}"'.format(OUT_DIR, dir_, file_))
return "\n".join(create)
def prepareDeleteListTemplate(self, fileList):
delete = []
for dir_ in fileList.keys():
for file_ in fileList[dir_]:
delete.append('DELETE "$INSTDIR\\{}\\{}"'.format(dir_, file_))
delete.append('RMdir "$INSTDIR\\{}"'.format(file_))
return "\n".join(delete)
def pruneUnneededLibraries():
from pathlib import Path
cwd = os.getcwd()
libDir = cwd + '\\' + OUT_DIR + '\\lib\\'
unneededModules = ['PySide2.Qt3D*', 'PySide2.QtAxContainer.pyd', 'PySide2.QtCharts.pyd', 'PySide2.QtConcurrent.pyd',
'PySide2.QtDataVisualization.pyd', 'PySide2.QtHelp.pyd', 'PySide2.QtLocation.pyd',
'PySide2.QtMultimedia.pyd', 'PySide2.QtMultimediaWidgets.pyd', 'PySide2.QtOpenGL.pyd',
'PySide2.QtPositioning.pyd', 'PySide2.QtPrintSupport.pyd', 'PySide2.QtQml.pyd',
'PySide2.QtQuick.pyd', 'PySide2.QtQuickWidgets.pyd', 'PySide2.QtScxml.pyd', 'PySide2.QtSensors.pyd',
'PySide2.QtSql.pyd', 'PySide2.QtSvg.pyd', 'PySide2.QtTest.pyd', 'PySide2.QtTextToSpeech.pyd',
'PySide2.QtUiTools.pyd', 'PySide2.QtWebChannel.pyd', 'PySide2.QtWebEngine.pyd',
'PySide2.QtWebEngineCore.pyd', 'PySide2.QtWebEngineWidgets.pyd', 'PySide2.QtWebSockets.pyd',
'PySide2.QtWinExtras.pyd', 'PySide2.QtXml.pyd', 'PySide2.QtXmlPatterns.pyd']
unneededLibs = ['Qt53D*', 'Qt5Charts.dll', 'Qt5Concurrent.dll', 'Qt5DataVisualization.dll', 'Qt5Gamepad.dll', 'Qt5Help.dll',
'Qt5Location.dll', 'Qt5Multimedia.dll', 'Qt5MultimediaWidgets.dll', 'Qt5OpenGL.dll', 'Qt5Positioning.dll',
'Qt5PrintSupport.dll', 'Qt5Quick.dll', 'Qt5QuickWidgets.dll', 'Qt5Scxml.dll', 'Qt5Sensors.dll', 'Qt5Sql.dll',
'Qt5Svg.dll', 'Qt5Test.dll', 'Qt5TextToSpeech.dll', 'Qt5WebChannel.dll', 'Qt5WebEngine.dll',
'Qt5WebEngineCore.dll', 'Qt5WebEngineWidgets.dll', 'Qt5WebSockets.dll', 'Qt5WinExtras.dll', 'Qt5Xml.dll',
'Qt5XmlPatterns.dll']
windowsDLL = ['MSVCP140.dll', 'VCRUNTIME140.dll']
deleteList = unneededModules + unneededLibs + windowsDLL
deleteList.append('api-*')
for filename in deleteList:
for p in Path(libDir).glob(filename):
p.unlink()
def copyQtPlugins(paths):
import shutil
from PySide2 import QtCore
basePath = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.PluginsPath)
basePath = basePath.replace('/', '\\')
destBase = os.getcwd() + '\\' + OUT_DIR
for elem in paths:
elemDir, elemName = os.path.split(elem)
source = basePath + '\\' + elem
dest = destBase + '\\' + elem
destDir = destBase + '\\' + elemDir
os.makedirs(destDir, exist_ok=True)
shutil.copy(source, dest)
class build_installer(py2exe):
def run(self):
py2exe.run(self)
print('*** deleting unnecessary libraries and modules ***')
pruneUnneededLibraries()
print('*** copying qt plugins ***')
copyQtPlugins(qt_plugins)
script = NSISScript()
script.create()
print("*** compiling the NSIS setup script ***")
script.compile()
print("*** DONE ***")
guiIcons = glob('syncplay/resources/*.ico') + glob('syncplay/resources/*.png') + ['syncplay/resources/spinner.mng']
resources = [
"syncplay/resources/syncplayintf.lua",
"syncplay/resources/license.rtf",
"syncplay/resources/third-party-notices.rtf"
]
resources.extend(guiIcons)
intf_resources = ["syncplay/resources/lua/intf/syncplay.lua"]
qt_plugins = ['platforms\\qwindows.dll', 'styles\\qwindowsvistastyle.dll']
common_info = dict(
name='Syncplay',
version=syncplay.version,
author='Uriziel',
author_email='[email protected]',
description='Syncplay',
)
info = dict(
common_info,
windows=[{
"script": "syncplayClient.py",
"icon_resources": [(1, "syncplay\\resources\\icon.ico")],
'dest_base': "Syncplay"},
],
console=['syncplayServer.py'],
# *** If you wish to make the Syncplay client use console mode (for --no-gui to work) then comment out the above two lines and uncomment the following line:
# console=['syncplayServer.py', {"script":"syncplayClient.py", "icon_resources":[(1, "resources\\icon.ico")], 'dest_base': "Syncplay"}],
options={
'py2exe': {
'dist_dir': OUT_DIR,
'packages': 'PySide2, cffi, OpenSSL, certifi',
'includes': 'twisted, sys, encodings, datetime, os, time, math, urllib, ast, unicodedata, _ssl, win32pipe, win32file',
'excludes': 'venv, doctest, pdb, unittest, win32clipboard, win32pdh, win32security, win32trace, win32ui, winxpgui, win32process, Tkinter',
'dll_excludes': 'msvcr71.dll, MSVCP90.dll, POWRPROF.dll',
'optimize': 2,
'compressed': 1
}
},
data_files=[("resources", resources), ("resources/lua/intf", intf_resources)],
zipfile="lib/libsync",
cmdclass={"py2exe": build_installer},
)
sys.argv.extend(['py2exe'])
setup(**info)<|fim▁end|> | ; !insertmacro ASSOCIATE mp4 |
<|file_name|>macro-expansion.rs<|end_file_name|><|fim▁begin|>#[macro_use]
extern crate dcrate;
fn main() {
// This ensures that the expansion of nested macros works correctly.
example!(inner!(b" "));
// ^^^^^^^^^^^^^^^^^^^^^^^ERR(<1.44.0-beta) no method named `missing`
// ^^^^^^^^^^^^^^^^^^^^^^^ERR(<1.44.0-beta) this error originates in a macro outside of the current crate
// ^^^^^^^^^^^^^^^^^^^^^^^ERR(>=1.39.0-beta,<1.44.0-beta) method not found
// ^^^^^^^^^^^^^^^^^^^^^^^HELP(>=1.44.0-beta) in this macro invocation<|fim▁hole|>// ^^^^^^^^^^^^^^^^^^^^^^^MSG(>=1.44.0-beta) See Primary: lib.rs:10
}<|fim▁end|> | |
<|file_name|>config.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
SINFONIER_API_NAME = os.environ[EnvConst.SINFONIER_ENV_KEY]
if SINFONIER_API_NAME == EnvConst.DEVELOP_ENVIRONMENT:
from environmentConfig.Develop import *
elif SINFONIER_API_NAME == EnvConst.PROD_ENVIRONMENT:
from environmentConfig.Production import *
elif SINFONIER_API_NAME == EnvConst.DOCKER_ENVIRONMENT:
from environmentConfig.Docker import *
else:
sys.exit('ERROR: Environment not found: ' + EnvConst.SINFONIER_ENV_KEY)<|fim▁end|> | import os
import sys
from utils.SinfonierConstants import Environment as EnvConst |
<|file_name|>test_floating_ips.py<|end_file_name|><|fim▁begin|># Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import mock
import netaddr
from neutron.common import exceptions as ex
from quark.db import models
from quark import exceptions as q_ex
from quark.plugin_modules import floating_ips
from quark.tests import test_quark_plugin
class TestRemoveFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None):
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.db.api.floating_ip_disassociate_fixed_ip"),
mock.patch("quark.db.api.port_disassociate_ip"),
mock.patch("quark.db.api.ip_address_deallocate"),
mock.patch("quark.ipam.QuarkIpam.deallocate_ip_address"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".remove_floating_ip")
) as (flip_find, db_fixed_ip_disassoc, db_port_disassoc, db_dealloc,
mock_dealloc, mock_remove_flip):
flip_find.return_value = flip_model
yield
def test_delete_floating_by_ip_address_id(self):
flip = dict(id=1, address=3232235876, address_readable="192.168.1.100",
subnet_id=1, network_id=2, version=4, used_by_tenant_id=1,
network=dict(ipam_strategy="ANY"))
with self._stubs(flip=flip):
self.plugin.delete_floatingip(self.context, 1)
def test_delete_floating_by_when_ip_address_does_not_exists_fails(self):
with self._stubs():
with self.assertRaises(q_ex.FloatingIpNotFound):
self.plugin.delete_floatingip(self.context, 1)
class TestFloatingIPUtilityMethods(test_quark_plugin.TestQuarkPlugin):
def test_get_next_available_fixed_ip_with_single_fixed_ip(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress('192.168.0.1')
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1')
def test_get_next_available_fixed_ip_with_mult_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
for ip_addr in ["192.168.0.1", "192.168.0.2", "192.168.0.3"]:
fixed_ip_addr = netaddr.IPAddress(ip_addr)
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1')
def test_get_next_available_fixed_ip_with_no_avail_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.1")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
fixed_ip_addr = netaddr.IPAddress("192.168.0.2")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.2")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip, None)
def test_get_next_available_fixed_ip_with_avail_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.1")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
fixed_ip_addr = netaddr.IPAddress("192.168.0.2")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], "192.168.0.2")
class TestCreateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None, port=None, ips=None, network=None):
port_model = None
if port:
port_model = models.Port()
port_model.update(dict(port=port))
if ips:
for ip in ips:
ip_model = models.IPAddress()
ip_model.update(ip)
addr_type = ip.get("address_type")
if addr_type == "floating" and "fixed_ip_addr" in ip:
fixed_ip = models.IPAddress()
fixed_ip.update(next(ip_addr for ip_addr in ips
if (ip_addr["address_readable"] ==
ip["fixed_ip_addr"])))
ip_model.fixed_ip = fixed_ip
port_model.ip_addresses.append(ip_model)
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
net_model = None
if network:
net_model = models.Network()
net_model.update(network)
def _alloc_ip(context, new_addr, net_id, port_m, *args, **kwargs):
new_addr.append(flip_model)
def _port_assoc(context, ports, addr, enable_port=None):
addr.ports = ports
return addr
def _flip_fixed_ip_assoc(context, addr, fixed_ip):
addr.fixed_ip = fixed_ip
return addr
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.port_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".register_floating_ip"),
mock.patch("quark.db.api.port_associate_ip"),
mock.patch("quark.db.api.floating_ip_associate_fixed_ip")
) as (flip_find, net_find, port_find, alloc_ip, mock_reg_flip,
port_assoc, fixed_ip_assoc):
flip_find.return_value = flip_model
net_find.return_value = net_model
port_find.return_value = port_model
alloc_ip.side_effect = _alloc_ip
port_assoc.side_effect = _port_assoc
fixed_ip_assoc.side_effect = _flip_fixed_ip_assoc
yield
def test_create_with_a_port(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.1")
def test_create_without_a_port(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
with self._stubs(flip=floating_ip, port=None,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"], port_id=None)
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip.get("fixed_ip_address"), None)
def test_create_with_fixed_ip_specified(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ips = []
for ip_addr in ["192.168.0.1", "192.168.0.2"]:
fixed_ip_addr = netaddr.IPAddress(ip_addr)
fixed_ips.append(dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"], fixed_ip_address="192.168.0.2")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.2")
def test_create_with_floating_ip_specified(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id=2)
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"], floating_ip_address="10.0.0.1")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.1")
def test_create_without_network_id_fails(self):
with self._stubs():
with self.assertRaises(ex.BadRequest):
request = dict(port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_network_fails(self):
with self._stubs():
with self.assertRaises(ex.NetworkNotFound):
request = dict(floating_network_id=123,
port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_port_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
with self._stubs(network=network):
with self.assertRaises(ex.PortNotFound):
request = dict(floating_network_id=network["id"],
port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_fixed_ip_for_port_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=fixed_ips, network=network):
with self.assertRaises(
q_ex.FixedIpDoesNotExistsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"],
fixed_ip_address="192.168.0.2")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["address_readable"], "10.0.0.1")
self.assertEqual(flip.fixed_ip["address_readable"],
"192.168.0.2")
def test_create_with_port_and_fixed_ip_with_existing_flip_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now(),
fixed_ip_addr="192.168.0.1")
ips = [fixed_ip, floating_ip]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=ips, network=network):
with self.assertRaises(
q_ex.PortAlreadyContainsFloatingIp):
request = dict(floating_network_id=network["id"],
port_id=port["id"],
fixed_ip_address="192.168.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_when_port_has_no_fixed_ips_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")<|fim▁hole|> with self._stubs(port=port, network=network):
with self.assertRaises(
q_ex.NoAvailableFixedIpsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_when_port_has_no_available_fixed_ips_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now(),
fixed_ip_addr="192.168.0.1")
ips = [fixed_ip, floating_ip]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=ips, network=network):
with self.assertRaises(
q_ex.NoAvailableFixedIpsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
class TestUpdateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None, curr_port=None, new_port=None, ips=None):
curr_port_model = None
if curr_port:
curr_port_model = models.Port()
curr_port_model.update(curr_port)
new_port_model = None
if new_port:
new_port_model = models.Port()
new_port_model.update(new_port)
if ips:
for ip in ips:
ip_model = models.IPAddress()
ip_model.update(ip)
addr_type = ip.get("address_type")
if addr_type == "floating" and "fixed_ip_addr" in ip:
fixed_ip = models.IPAddress()
fixed_ip.update(next(ip_addr for ip_addr in ips
if (ip_addr["address_readable"] ==
ip["fixed_ip_addr"])))
ip_model.fixed_ip = fixed_ip
new_port_model.ip_addresses.append(ip_model)
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
if curr_port_model:
flip_model.ports = [curr_port_model]
fixed_ip = flip.get("fixed_ip_address")
if fixed_ip:
addr = netaddr.IPAddress(fixed_ip)
fixed_ip_model = models.IPAddress()
fixed_ip_model.update(dict(address_readable=fixed_ip,
address=int(addr), version=4,
address_type="fixed"))
flip_model.fixed_ip = fixed_ip_model
def _find_port(context, id, **kwargs):
return (curr_port_model if (curr_port_model and
id == curr_port_model.id)
else new_port_model)
def _flip_assoc(context, addr, fixed_ip):
addr.fixed_ip = fixed_ip
return addr
def _flip_disassoc(context, addr):
addr.fixed_ip = None
return addr
def _port_assoc(context, ports, addr, enable_ports=None):
addr.ports = ports
return addr
def _port_dessoc(context, ports, addr):
addr.associations = []
addr.ports = []
return addr
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.db.api.port_find"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".register_floating_ip"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".update_floating_ip"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".remove_floating_ip"),
mock.patch("quark.db.api.port_associate_ip"),
mock.patch("quark.db.api.port_disassociate_ip"),
mock.patch("quark.db.api.floating_ip_associate_fixed_ip"),
mock.patch("quark.db.api.floating_ip_disassociate_fixed_ip")
) as (flip_find, port_find, reg_flip, update_flip, rem_flip,
port_assoc, port_dessoc, flip_assoc, flip_dessoc):
flip_find.return_value = flip_model
port_find.side_effect = _find_port
port_assoc.side_effect = _port_assoc
port_dessoc.side_effect = _port_dessoc
flip_assoc.side_effect = _flip_assoc
flip_dessoc.side_effect = _flip_disassoc
yield
def test_update_with_new_port_and_no_previous_port(self):
new_port = dict(id="2")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
ips = [fixed_ip]
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port, ips=ips):
content = dict(port_id=new_port["id"])
ret = self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
self.assertEqual(ret["fixed_ip_address"], "192.168.0.1")
self.assertEqual(ret["port_id"], new_port["id"])
def test_update_with_new_port(self):
curr_port = dict(id="1")
new_port = dict(id="2")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
ips = [fixed_ip]
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, curr_port=curr_port,
new_port=new_port, ips=ips):
content = dict(port_id=new_port["id"])
ret = self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
self.assertEqual(ret["fixed_ip_address"], "192.168.0.1")
self.assertEqual(ret["port_id"], new_port["id"])
def test_update_with_no_port(self):
curr_port = dict(id="1")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, curr_port=curr_port):
content = dict(port_id=None)
ret = self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
self.assertEqual(ret.get("fixed_ip_address"), None)
self.assertEqual(ret.get("port_id"), None)
def test_update_with_non_existent_port_should_fail(self):
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip):
with self.assertRaises(ex.PortNotFound):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_port_with_no_fixed_ip_avail_should_fail(self):
new_port = dict(id="123")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port):
with self.assertRaises(q_ex.NoAvailableFixedIpsForPort):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_same_port_should_fail(self):
new_port = dict(id="123")
curr_port = dict(id="123")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port, curr_port=curr_port):
with self.assertRaises(q_ex.PortAlreadyAssociatedToFloatingIp):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_when_port_has_a_different_flip_should_fail(self):
new_port = dict(id="123")
floating_ip_addr = netaddr.IPAddress("192.168.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now())
ips = [floating_ip]
curr_port = dict(id="456")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port,
curr_port=curr_port, ips=ips):
with self.assertRaises(q_ex.PortAlreadyContainsFloatingIp):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_no_port_and_no_previous_port_should_fail(self):
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip):
with self.assertRaises(q_ex.FloatingIpUpdateNoPortIdSupplied):
content = dict(port_id=None)
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_missing_port_id_param_should_fail(self):
with self._stubs():
with self.assertRaises(ex.BadRequest):
content = {}
self.plugin.update_floatingip(self.context, "123",
dict(floatingip=content))<|fim▁end|> | |
<|file_name|>util.rs<|end_file_name|><|fim▁begin|>use parking_lot::Mutex;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tokio::task;
#[macro_export]
macro_rules! try_opt_r {
($x:expr) => {
match $x {
Some(value) => value,
None => return Ok(()),
}
};
}
#[macro_export]
macro_rules! try_opt_ret {
($x:expr) => {
match $x {
Some(value) => value,
None => return,
}
};
}
pub fn hm_from_mins<F>(total_mins: F) -> String
where
F: Into<f32>,
{<|fim▁hole|>
let hours = (total_mins / 60.0).floor() as u8;
let minutes = (total_mins % 60.0).floor() as u8;
format!("{:02}:{:02}H", hours, minutes)
}
pub type ArcMutex<T> = Arc<Mutex<T>>;
pub fn arc_mutex<T>(value: T) -> ArcMutex<T> {
Arc::new(Mutex::new(value))
}
pub struct ScopedTask<T>(task::JoinHandle<T>);
impl<T> Deref for ScopedTask<T> {
type Target = task::JoinHandle<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for ScopedTask<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T> From<task::JoinHandle<T>> for ScopedTask<T> {
fn from(task: task::JoinHandle<T>) -> Self {
Self(task)
}
}
impl<T> Drop for ScopedTask<T> {
fn drop(&mut self) {
self.0.abort();
}
}<|fim▁end|> | let total_mins = total_mins.into(); |
<|file_name|>0003_auto_20150703_0843.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20150703_0836'),
]
operations = [
migrations.AlterField(
model_name='user',
name='followers',<|fim▁hole|><|fim▁end|> | field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, related_name='followers_rel_+'),
),
] |
<|file_name|>selectionRendererFactory.js<|end_file_name|><|fim▁begin|>/**
* ag-grid - Advanced Data Grid / Data Table supporting Javascript / React / AngularJS / Web Components
* @version v4.1.5
* @link http://www.ag-grid.com/
* @license MIT
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
var context_1 = require("./context/context");
var rowNode_1 = require("./entities/rowNode");
var renderedRow_1 = require("./rendering/renderedRow");
var utils_1 = require('./utils');
var SelectionRendererFactory = (function () {
function SelectionRendererFactory() {
}
SelectionRendererFactory.prototype.createSelectionCheckbox = function (rowNode, addRenderedRowEventListener) {
var eCheckbox = document.createElement('input');
eCheckbox.type = "checkbox";
eCheckbox.name = "name";
eCheckbox.className = 'ag-selection-checkbox';
utils_1.Utils.setCheckboxState(eCheckbox, rowNode.isSelected());
eCheckbox.addEventListener('click', function (event) { return event.stopPropagation(); });
eCheckbox.addEventListener('change', function () {
var newValue = eCheckbox.checked;
if (newValue) {
rowNode.setSelected(newValue);
}
else {
rowNode.setSelected(newValue);
}
});
var selectionChangedCallback = function () { return utils_1.Utils.setCheckboxState(eCheckbox, rowNode.isSelected()); };
rowNode.addEventListener(rowNode_1.RowNode.EVENT_ROW_SELECTED, selectionChangedCallback);
addRenderedRowEventListener(renderedRow_1.RenderedRow.EVENT_RENDERED_ROW_REMOVED, function () {
rowNode.removeEventListener(rowNode_1.RowNode.EVENT_ROW_SELECTED, selectionChangedCallback);
});
return eCheckbox;
};
SelectionRendererFactory = __decorate([
context_1.Bean('selectionRendererFactory'), <|fim▁hole|>})();
exports.SelectionRendererFactory = SelectionRendererFactory;<|fim▁end|> | __metadata('design:paramtypes', [])
], SelectionRendererFactory);
return SelectionRendererFactory; |
<|file_name|>test_sync.py<|end_file_name|><|fim▁begin|>"""Test class for Custom Sync UI
:Requirement: Sync
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Repositories
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_string
from nailgun import entities
from robottelo import manifests
from robottelo.api.utils import enable_rhrepo_and_fetchid
from robottelo.constants import (
DISTRO_RHEL6, DISTRO_RHEL7,
DOCKER_REGISTRY_HUB,
DOCKER_UPSTREAM_NAME,
FAKE_1_YUM_REPO,
FEDORA27_OSTREE_REPO,
REPOS,
REPOSET,
REPO_TYPE,
PRDS,
)
from robottelo.decorators import (
fixture,
run_in_one_thread,
skip_if_not_set,
tier2,
upgrade,
skip_if_bug_open,
)
from robottelo.decorators.host import skip_if_os
from robottelo.products import (
RepositoryCollection,
RHELCloudFormsTools,
SatelliteCapsuleRepository,
)
@fixture(scope='module')
def module_org():
return entities.Organization().create()
@fixture(scope='module')
def module_custom_product(module_org):
return entities.Product(organization=module_org).create()
@fixture(scope='module')
def module_org_with_manifest():
org = entities.Organization().create()
manifests.upload_manifest_locked(org.id)
return org
@tier2
def test_positive_sync_custom_repo(session, module_custom_product):<|fim▁hole|> :expectedresults: Sync procedure is successful
:CaseImportance: Critical
"""
repo = entities.Repository(
url=FAKE_1_YUM_REPO, product=module_custom_product).create()
with session:
results = session.sync_status.synchronize([
(module_custom_product.name, repo.name)])
assert len(results) == 1
assert results[0] == 'Syncing Complete.'
@run_in_one_thread
@skip_if_not_set('fake_manifest')
@tier2
@upgrade
def test_positive_sync_rh_repos(session, module_org_with_manifest):
"""Create Content RedHat Sync with two repos.
:id: e30f6509-0b65-4bcc-a522-b4f3089d3911
:expectedresults: Sync procedure for RedHat Repos is successful
:CaseLevel: Integration
"""
repos = (
SatelliteCapsuleRepository(cdn=True),
RHELCloudFormsTools(cdn=True)
)
distros = [DISTRO_RHEL7, DISTRO_RHEL6]
repo_collections = [
RepositoryCollection(distro=distro, repositories=[repo])
for distro, repo in zip(distros, repos)
]
for repo_collection in repo_collections:
repo_collection.setup(module_org_with_manifest.id, synchronize=False)
repo_paths = [
(
repo.repo_data['product'],
repo.repo_data.get('releasever'),
repo.repo_data.get('arch'),
repo.repo_data['name'],
)
for repo in repos
]
with session:
session.organization.select(org_name=module_org_with_manifest.name)
results = session.sync_status.synchronize(repo_paths)
assert len(results) == len(repo_paths)
assert all([result == 'Syncing Complete.' for result in results])
@skip_if_bug_open('bugzilla', 1625783)
@skip_if_os('RHEL6')
@tier2
@upgrade
def test_positive_sync_custom_ostree_repo(session, module_custom_product):
"""Create custom ostree repository and sync it.
:id: e4119b9b-0356-4661-a3ec-e5807224f7d2
:expectedresults: ostree repo should be synced successfully
:CaseLevel: Integration
"""
repo = entities.Repository(
content_type='ostree',
url=FEDORA27_OSTREE_REPO,
product=module_custom_product,
unprotected=False,
).create()
with session:
results = session.sync_status.synchronize([
(module_custom_product.name, repo.name)])
assert len(results) == 1
assert results[0] == 'Syncing Complete.'
@run_in_one_thread
@skip_if_bug_open('bugzilla', 1625783)
@skip_if_os('RHEL6')
@skip_if_not_set('fake_manifest')
@tier2
@upgrade
def test_positive_sync_rh_ostree_repo(session, module_org_with_manifest):
"""Sync CDN based ostree repository.
:id: 4d28fff0-5fda-4eee-aa0c-c5af02c31de5
:Steps:
1. Import a valid manifest
2. Enable the OStree repo and sync it
:expectedresults: ostree repo should be synced successfully from CDN
:CaseLevel: Integration
"""
enable_rhrepo_and_fetchid(
basearch=None,
org_id=module_org_with_manifest.id,
product=PRDS['rhah'],
repo=REPOS['rhaht']['name'],
reposet=REPOSET['rhaht'],
releasever=None,
)
with session:
session.organization.select(org_name=module_org_with_manifest.name)
results = session.sync_status.synchronize([
(PRDS['rhah'], REPOS['rhaht']['name'])])
assert len(results) == 1
assert results[0] == 'Syncing Complete.'
@tier2
@upgrade
def test_positive_sync_docker_via_sync_status(session, module_org):
"""Create custom docker repo and sync it via the sync status page.
:id: 00b700f4-7e52-48ed-98b2-e49b3be102f2
:expectedresults: Sync procedure for specific docker repository is
successful
:CaseLevel: Integration
"""
product = entities.Product(organization=module_org).create()
repo_name = gen_string('alphanumeric')
with session:
session.repository.create(
product.name,
{'name': repo_name,
'repo_type': REPO_TYPE['docker'],
'repo_content.upstream_url': DOCKER_REGISTRY_HUB,
'repo_content.upstream_repo_name': DOCKER_UPSTREAM_NAME}
)
assert session.repository.search(product.name, repo_name)[0]['Name'] == repo_name
result = session.sync_status.synchronize([(product.name, repo_name)])
assert result[0] == 'Syncing Complete.'<|fim▁end|> | """Create Content Custom Sync with minimal input parameters
:id: 00fb0b04-0293-42c2-92fa-930c75acee89
|
<|file_name|>test_myplugin.py<|end_file_name|><|fim▁begin|>#
# coding=utf-8
import cmd2_myplugin
from cmd2 import (
cmd2,
)
######
#
# define a class which uses our plugin and some convenience functions
#
######
class MyApp(cmd2_myplugin.MyPluginMixin, cmd2.Cmd):
"""Simple subclass of cmd2.Cmd with our SayMixin plugin included."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cmd2_myplugin.empty_decorator
def do_empty(self, args):
self.poutput("running the empty command")
#
# You can't use a fixture to instantiate your app if you want to use
# to use the capsys fixture to capture the output. cmd2.Cmd sets
# internal variables to sys.stdout and sys.stderr on initialization
# and then uses those internal variables instead of sys.stdout. It does
# this so you can redirect output from within the app. The capsys fixture
# can't capture the output properly in this scenario.
#<|fim▁hole|>def init_app():
app = MyApp()
return app
#####
#
# unit tests
#
#####
def test_say(capsys):
# call our initialization function instead of using a fixture
app = init_app()
# run our mixed in command
app.onecmd_plus_hooks('say hello')
# use the capsys fixture to retrieve the output on stdout and stderr
out, err = capsys.readouterr()
# make our assertions
assert out == 'in postparsing hook\nhello\n'
assert not err
def test_decorator(capsys):
# call our initialization function instead of using a fixture
app = init_app()
# run one command in the app
app.onecmd_plus_hooks('empty')
# use the capsys fixture to retrieve the output on stdout and stderr
out, err = capsys.readouterr()
# make our assertions
assert out == 'in postparsing hook\nin the empty decorator\nrunning the empty command\n'
assert not err<|fim▁end|> | # If you have extensive initialization needs, create a function
# to initialize your cmd2 application.
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![crate_name = "webdriver_server"]
#![crate_type = "rlib"]
#![deny(unsafe_code)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde;
mod keys;
use base64;
use crate::keys::keycodes_to_keys;
use euclid::TypedSize2D;
use hyper::Method;
use image::{DynamicImage, ImageFormat, RgbImage};
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use msg::constellation_msg::{BrowsingContextId, TopLevelBrowsingContextId, TraversalDirection};
use net_traits::image::base::PixelFormat;
use regex::Captures;
use script_traits::webdriver_msg::{LoadStatus, WebDriverCookieError, WebDriverFrameId};
use script_traits::webdriver_msg::{
WebDriverJSError, WebDriverJSResult, WebDriverJSValue, WebDriverScriptCommand,
};
use script_traits::{ConstellationMsg, LoadData, WebDriverCommandMsg};
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use serde_json::{self, Value};
use servo_channel::Sender;
use servo_config::prefs::{PrefValue, PREFS};
use servo_url::ServoUrl;
use std::borrow::ToOwned;
use std::collections::BTreeMap;
use std::fmt;
use std::net::{SocketAddr, SocketAddrV4};
use std::thread;
use std::time::Duration;
use uuid::Uuid;
use webdriver::command::{
AddCookieParameters, GetParameters, JavascriptCommandParameters, LocatorParameters,
};
use webdriver::command::{SendKeysParameters, SwitchToFrameParameters, TimeoutsParameters};
use webdriver::command::{
WebDriverCommand, WebDriverExtensionCommand, WebDriverMessage, WindowRectParameters,
};
use webdriver::common::{Cookie, Date, LocatorStrategy, WebElement};
use webdriver::error::{ErrorStatus, WebDriverError, WebDriverResult};
use webdriver::httpapi::WebDriverExtensionRoute;
use webdriver::response::{CookieResponse, CookiesResponse};
use webdriver::response::{ElementRectResponse, NewSessionResponse, ValueResponse};
use webdriver::response::{WebDriverResponse, WindowRectResponse};
use webdriver::server::{self, Session, WebDriverHandler};
fn extension_routes() -> Vec<(Method, &'static str, ServoExtensionRoute)> {
return vec![
(
Method::POST,
"/session/{sessionId}/servo/prefs/get",
ServoExtensionRoute::GetPrefs,
),
(
Method::POST,
"/session/{sessionId}/servo/prefs/set",
ServoExtensionRoute::SetPrefs,
),
(
Method::POST,
"/session/{sessionId}/servo/prefs/reset",
ServoExtensionRoute::ResetPrefs,
),
];
}
fn cookie_msg_to_cookie(cookie: cookie::Cookie) -> Cookie {
Cookie {
name: cookie.name().to_owned(),
value: cookie.value().to_owned(),
path: cookie.path().map(|s| s.to_owned()),
domain: cookie.domain().map(|s| s.to_owned()),
expiry: cookie
.expires()
.map(|time| Date(time.to_timespec().sec as u64)),
secure: cookie.secure().unwrap_or(false),
httpOnly: cookie.http_only().unwrap_or(false),
}
}
pub fn start_server(port: u16, constellation_chan: Sender<ConstellationMsg>) {
let handler = Handler::new(constellation_chan);
thread::Builder::new()
.name("WebdriverHttpServer".to_owned())
.spawn(move || {
let address = SocketAddrV4::new("0.0.0.0".parse().unwrap(), port);
match server::start(SocketAddr::V4(address), handler, &extension_routes()) {
Ok(listening) => info!("WebDriver server listening on {}", listening.socket),
Err(_) => panic!("Unable to start WebDriver HTTPD server"),
}
})
.expect("Thread spawning failed");
}
/// Represents the current WebDriver session and holds relevant session state.
struct WebDriverSession {
id: Uuid,
browsing_context_id: BrowsingContextId,
top_level_browsing_context_id: TopLevelBrowsingContextId,
/// Time to wait for injected scripts to run before interrupting them. A [`None`] value
/// specifies that the script should run indefinitely.
script_timeout: u64,
/// Time to wait for a page to finish loading upon navigation.
load_timeout: u64,
/// Time to wait for the element location strategy when retrieving elements, and when
/// waiting for an element to become interactable.
implicit_wait_timeout: u64,
}
impl WebDriverSession {
pub fn new(
browsing_context_id: BrowsingContextId,
top_level_browsing_context_id: TopLevelBrowsingContextId,
) -> WebDriverSession {
WebDriverSession {
id: Uuid::new_v4(),
browsing_context_id: browsing_context_id,
top_level_browsing_context_id: top_level_browsing_context_id,
script_timeout: 30_000,
load_timeout: 300_000,
implicit_wait_timeout: 0,
}
}
}
struct Handler {
session: Option<WebDriverSession>,
constellation_chan: Sender<ConstellationMsg>,
resize_timeout: u32,
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum ServoExtensionRoute {
GetPrefs,
SetPrefs,
ResetPrefs,
}
impl WebDriverExtensionRoute for ServoExtensionRoute {
type Command = ServoExtensionCommand;
fn command(
&self,
_captures: &Captures,
body_data: &Value,
) -> WebDriverResult<WebDriverCommand<ServoExtensionCommand>> {
let command = match *self {
ServoExtensionRoute::GetPrefs => {
let parameters: GetPrefsParameters = serde_json::from_value(body_data.clone())?;
ServoExtensionCommand::GetPrefs(parameters)
},
ServoExtensionRoute::SetPrefs => {
let parameters: SetPrefsParameters = serde_json::from_value(body_data.clone())?;
ServoExtensionCommand::SetPrefs(parameters)
},
ServoExtensionRoute::ResetPrefs => {
let parameters: GetPrefsParameters = serde_json::from_value(body_data.clone())?;
ServoExtensionCommand::ResetPrefs(parameters)
},
};
Ok(WebDriverCommand::Extension(command))
}
}
#[derive(Clone, Debug, PartialEq)]
enum ServoExtensionCommand {
GetPrefs(GetPrefsParameters),
SetPrefs(SetPrefsParameters),
ResetPrefs(GetPrefsParameters),
}
impl WebDriverExtensionCommand for ServoExtensionCommand {
fn parameters_json(&self) -> Option<Value> {
match *self {
ServoExtensionCommand::GetPrefs(ref x) => serde_json::to_value(x).ok(),
ServoExtensionCommand::SetPrefs(ref x) => serde_json::to_value(x).ok(),
ServoExtensionCommand::ResetPrefs(ref x) => serde_json::to_value(x).ok(),
}
}
}
struct SendableWebDriverJSValue(pub WebDriverJSValue);
impl Serialize for SendableWebDriverJSValue {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self.0 {
WebDriverJSValue::Undefined => serializer.serialize_unit(),
WebDriverJSValue::Null => serializer.serialize_unit(),
WebDriverJSValue::Boolean(x) => serializer.serialize_bool(x),
WebDriverJSValue::Number(x) => serializer.serialize_f64(x),
WebDriverJSValue::String(ref x) => serializer.serialize_str(&x),
}
}
}
#[derive(Clone, Debug, PartialEq)]
struct WebDriverPrefValue(pub PrefValue);
impl Serialize for WebDriverPrefValue {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self.0 {
PrefValue::Boolean(b) => serializer.serialize_bool(b),
PrefValue::String(ref s) => serializer.serialize_str(&s),
PrefValue::Number(f) => serializer.serialize_f64(f),
PrefValue::Missing => serializer.serialize_unit(),
}
}
}
impl<'de> Deserialize<'de> for WebDriverPrefValue {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> ::serde::de::Visitor<'de> for Visitor {
type Value = WebDriverPrefValue;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("preference value")
}
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
where
E: ::serde::de::Error,
{
Ok(WebDriverPrefValue(PrefValue::Number(value)))
}
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
where
E: ::serde::de::Error,
{
Ok(WebDriverPrefValue(PrefValue::Number(value as f64)))
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: ::serde::de::Error,
{
Ok(WebDriverPrefValue(PrefValue::Number(value as f64)))
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: ::serde::de::Error,
{
Ok(WebDriverPrefValue(PrefValue::String(value.to_owned())))
}
fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E>
where
E: ::serde::de::Error,
{
Ok(WebDriverPrefValue(PrefValue::Boolean(value)))
}
}
deserializer.deserialize_any(Visitor)
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
struct GetPrefsParameters {
prefs: Vec<String>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
struct SetPrefsParameters {
#[serde(deserialize_with = "map_to_vec")]
prefs: Vec<(String, WebDriverPrefValue)>,
}
fn map_to_vec<'de, D>(de: D) -> Result<Vec<(String, WebDriverPrefValue)>, D::Error>
where
D: Deserializer<'de>,
{
de.deserialize_map(TupleVecMapVisitor)
}
struct TupleVecMapVisitor;
impl<'de> Visitor<'de> for TupleVecMapVisitor {
type Value = Vec<(String, WebDriverPrefValue)>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a map")
}
#[inline]
fn visit_unit<E>(self) -> Result<Self::Value, E> {
Ok(Vec::new())
}
#[inline]
fn visit_map<T>(self, mut access: T) -> Result<Self::Value, T::Error>
where
T: MapAccess<'de>,
{
let mut values = Vec::new();
while let Some((key, value)) = access.next_entry()? {
values.push((key, value));
}
Ok(values)
}
}
impl Handler {
pub fn new(constellation_chan: Sender<ConstellationMsg>) -> Handler {
Handler {
session: None,
constellation_chan: constellation_chan,
resize_timeout: 500,
}
}
fn focus_top_level_browsing_context_id(&self) -> WebDriverResult<TopLevelBrowsingContextId> {
debug!("Getting focused context.");
let interval = 20;
let iterations = 30_000 / interval;
let (sender, receiver) = ipc::channel().unwrap();
for _ in 0..iterations {
let msg = ConstellationMsg::GetFocusTopLevelBrowsingContext(sender.clone());
self.constellation_chan.send(msg).unwrap();
// Wait until the document is ready before returning the top-level browsing context id.
if let Some(x) = receiver.recv().unwrap() {
debug!("Focused context is {}", x);
return Ok(x);
}
thread::sleep(Duration::from_millis(interval));
}
debug!("Timed out getting focused context.");
Err(WebDriverError::new(
ErrorStatus::Timeout,
"Failed to get window handle",
))
}
fn session(&self) -> WebDriverResult<&WebDriverSession> {
match self.session {
Some(ref x) => Ok(x),
None => Err(WebDriverError::new(
ErrorStatus::SessionNotCreated,
"Session not created",
)),
}
}
fn session_mut(&mut self) -> WebDriverResult<&mut WebDriverSession> {
match self.session {
Some(ref mut x) => Ok(x),
None => Err(WebDriverError::new(
ErrorStatus::SessionNotCreated,
"Session not created",
)),
}
}
fn handle_new_session(&mut self) -> WebDriverResult<WebDriverResponse> {
debug!("new session");
if self.session.is_none() {
let top_level_browsing_context_id = self.focus_top_level_browsing_context_id()?;
let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id);
let session = WebDriverSession::new(browsing_context_id, top_level_browsing_context_id);
let mut capabilities = serde_json::Map::new();
capabilities.insert("browserName".to_owned(), serde_json::to_value("servo")?);
capabilities.insert("browserVersion".to_owned(), serde_json::to_value("0.0.1")?);
capabilities.insert(
"acceptInsecureCerts".to_owned(),
serde_json::to_value(false)?,
);
let response =
NewSessionResponse::new(session.id.to_string(), Value::Object(capabilities));
debug!("new session created {}.", session.id);
self.session = Some(session);
Ok(WebDriverResponse::NewSession(response))
} else {
debug!("new session failed.");
Err(WebDriverError::new(
ErrorStatus::UnknownError,
"Session already created",
))
}
}
fn handle_delete_session(&mut self) -> WebDriverResult<WebDriverResponse> {
self.session = None;
Ok(WebDriverResponse::DeleteSession)
}
fn browsing_context_script_command(
&self,
cmd_msg: WebDriverScriptCommand,
) -> WebDriverResult<()> {
let browsing_context_id = self.session()?.browsing_context_id;
let msg = ConstellationMsg::WebDriverCommand(WebDriverCommandMsg::ScriptCommand(
browsing_context_id,
cmd_msg,
));
self.constellation_chan.send(msg).unwrap();
Ok(())
}
fn top_level_script_command(&self, cmd_msg: WebDriverScriptCommand) -> WebDriverResult<()> {
let browsing_context_id =
BrowsingContextId::from(self.session()?.top_level_browsing_context_id);
let msg = ConstellationMsg::WebDriverCommand(WebDriverCommandMsg::ScriptCommand(
browsing_context_id,
cmd_msg,
));
self.constellation_chan.send(msg).unwrap();
Ok(())
}
fn handle_get(&self, parameters: &GetParameters) -> WebDriverResult<WebDriverResponse> {
let url = match ServoUrl::parse(¶meters.url[..]) {
Ok(url) => url,
Err(_) => {
return Err(WebDriverError::new(
ErrorStatus::InvalidArgument,
"Invalid URL",
))
},
};
let top_level_browsing_context_id = self.session()?.top_level_browsing_context_id;
let (sender, receiver) = ipc::channel().unwrap();
let load_data = LoadData::new(url, None, None, None);
let cmd_msg =
WebDriverCommandMsg::LoadUrl(top_level_browsing_context_id, load_data, sender.clone());
self.constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
self.wait_for_load(sender, receiver)
}
fn wait_for_load(
&self,
sender: IpcSender<LoadStatus>,
receiver: IpcReceiver<LoadStatus>,
) -> WebDriverResult<WebDriverResponse> {
let timeout = self.session()?.load_timeout;
thread::spawn(move || {
thread::sleep(Duration::from_millis(timeout));
let _ = sender.send(LoadStatus::LoadTimeout);
});
// wait to get a load event
match receiver.recv().unwrap() {
LoadStatus::LoadComplete => Ok(WebDriverResponse::Void),
LoadStatus::LoadTimeout => {
Err(WebDriverError::new(ErrorStatus::Timeout, "Load timed out"))
},
}
}
fn handle_current_url(&self) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
self.top_level_script_command(WebDriverScriptCommand::GetUrl(sender))?;
let url = receiver.recv().unwrap();
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(url.as_str())?,
)))
}
fn handle_window_size(&self) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let top_level_browsing_context_id = self.session()?.top_level_browsing_context_id;
let cmd_msg = WebDriverCommandMsg::GetWindowSize(top_level_browsing_context_id, sender);
self.constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
let window_size = receiver.recv().unwrap();
let vp = window_size.initial_viewport;
let window_size_response = WindowRectResponse {
x: 0,
y: 0,
width: vp.width as i32,
height: vp.height as i32,
};
Ok(WebDriverResponse::WindowRect(window_size_response))
}
fn handle_set_window_size(
&self,
params: &WindowRectParameters,
) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let width = match params.width {
Some(v) => v,
None => 0,
};
let height = match params.height {
Some(v) => v,
None => 0,
};
let size = TypedSize2D::new(width as u32, height as u32);
let top_level_browsing_context_id = self.session()?.top_level_browsing_context_id;
let cmd_msg =
WebDriverCommandMsg::SetWindowSize(top_level_browsing_context_id, size, sender.clone());
self.constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
let timeout = self.resize_timeout;
let constellation_chan = self.constellation_chan.clone();
thread::spawn(move || {
// On timeout, we send a GetWindowSize message to the constellation,
// which will give the current window size.
thread::sleep(Duration::from_millis(timeout as u64));
let cmd_msg = WebDriverCommandMsg::GetWindowSize(top_level_browsing_context_id, sender);
constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
});
let window_size = receiver.recv().unwrap();
let vp = window_size.initial_viewport;
let window_size_response = WindowRectResponse {
x: 0,
y: 0,
width: vp.width as i32,
height: vp.height as i32,
};
Ok(WebDriverResponse::WindowRect(window_size_response))
}
fn handle_is_enabled(&self, element: &WebElement) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
self.top_level_script_command(WebDriverScriptCommand::IsEnabled(
element.id.clone(),
sender,
))?;
match receiver.recv().unwrap() {
Ok(is_enabled) => Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(is_enabled)?,
))),
Err(_) => Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Element not found",
)),
}
}
fn handle_is_selected(&self, element: &WebElement) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
self.top_level_script_command(WebDriverScriptCommand::IsSelected(
element.id.clone(),
sender,
))?;
match receiver.recv().unwrap() {
Ok(is_selected) => Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(is_selected)?,
))),
Err(_) => Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Element not found",
)),
}
}
fn handle_go_back(&self) -> WebDriverResult<WebDriverResponse> {
let top_level_browsing_context_id = self.session()?.top_level_browsing_context_id;
let direction = TraversalDirection::Back(1);
let msg = ConstellationMsg::TraverseHistory(top_level_browsing_context_id, direction);
self.constellation_chan.send(msg).unwrap();
Ok(WebDriverResponse::Void)
}
fn handle_go_forward(&self) -> WebDriverResult<WebDriverResponse> {
let top_level_browsing_context_id = self.session()?.top_level_browsing_context_id;
let direction = TraversalDirection::Forward(1);
let msg = ConstellationMsg::TraverseHistory(top_level_browsing_context_id, direction);
self.constellation_chan.send(msg).unwrap();
Ok(WebDriverResponse::Void)
}
fn handle_refresh(&self) -> WebDriverResult<WebDriverResponse> {
let top_level_browsing_context_id = self.session()?.top_level_browsing_context_id;
let (sender, receiver) = ipc::channel().unwrap();
let cmd_msg = WebDriverCommandMsg::Refresh(top_level_browsing_context_id, sender.clone());
self.constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
self.wait_for_load(sender, receiver)
}
fn handle_title(&self) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
self.top_level_script_command(WebDriverScriptCommand::GetTitle(sender))?;
let value = receiver.recv().unwrap();
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(value)?,
)))
}
fn handle_window_handle(&self) -> WebDriverResult<WebDriverResponse> {
// For now we assume there's only one window so just use the session
// id as the window id
let handle = self.session.as_ref().unwrap().id.to_string();
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(handle)?,
)))
}
fn handle_window_handles(&self) -> WebDriverResult<WebDriverResponse> {
// For now we assume there's only one window so just use the session
// id as the window id
let handles = vec![serde_json::to_value(
self.session.as_ref().unwrap().id.to_string(),
)?];
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(handles)?,
)))
}
fn handle_find_element(
&self,
parameters: &LocatorParameters,
) -> WebDriverResult<WebDriverResponse> {
if parameters.using != LocatorStrategy::CSSSelector {
return Err(WebDriverError::new(
ErrorStatus::UnsupportedOperation,
"Unsupported locator strategy",
));
}
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::FindElementCSS(parameters.value.clone(), sender);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(value) => {
let value_resp = serde_json::to_value(
value.map(|x| serde_json::to_value(WebElement::new(x)).unwrap()),
)?;
Ok(WebDriverResponse::Generic(ValueResponse(value_resp)))
},
Err(_) => Err(WebDriverError::new(
ErrorStatus::InvalidSelector,
"Invalid selector",
)),
}
}
fn handle_switch_to_frame(
&mut self,
parameters: &SwitchToFrameParameters,
) -> WebDriverResult<WebDriverResponse> {
use webdriver::common::FrameId;
let frame_id = match parameters.id {
None => {
let session = self.session_mut()?;
session.browsing_context_id =
BrowsingContextId::from(session.top_level_browsing_context_id);
return Ok(WebDriverResponse::Void);
},
Some(FrameId::Short(ref x)) => WebDriverFrameId::Short(*x),
Some(FrameId::Element(ref x)) => WebDriverFrameId::Element(x.id.clone()),
};
self.switch_to_frame(frame_id)
}
fn handle_switch_to_parent_frame(&mut self) -> WebDriverResult<WebDriverResponse> {
self.switch_to_frame(WebDriverFrameId::Parent)
}
fn switch_to_frame(
&mut self,
frame_id: WebDriverFrameId,
) -> WebDriverResult<WebDriverResponse> {
if let WebDriverFrameId::Short(_) = frame_id {
return Err(WebDriverError::new(
ErrorStatus::UnsupportedOperation,
"Selecting frame by id not supported",
));
}
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetBrowsingContextId(frame_id, sender);
self.browsing_context_script_command(cmd)?;
let browsing_context_id = receiver.recv().unwrap().or(Err(WebDriverError::new(
ErrorStatus::NoSuchFrame,
"Frame does not exist",
)))?;
self.session_mut()?.browsing_context_id = browsing_context_id;
Ok(WebDriverResponse::Void)
}
fn handle_find_elements(
&self,
parameters: &LocatorParameters,
) -> WebDriverResult<WebDriverResponse> {
if parameters.using != LocatorStrategy::CSSSelector {
return Err(WebDriverError::new(
ErrorStatus::UnsupportedOperation,
"Unsupported locator strategy",
));
}
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::FindElementsCSS(parameters.value.clone(), sender);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(value) => {
let resp_value: Vec<Value> = value
.into_iter()
.map(|x| serde_json::to_value(WebElement::new(x)).unwrap())
.collect();
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(resp_value)?,
)))
},
Err(_) => Err(WebDriverError::new(
ErrorStatus::InvalidSelector,
"Invalid selector",
)),
}
}
// https://w3c.github.io/webdriver/webdriver-spec.html#get-element-rect
fn handle_element_rect(&self, element: &WebElement) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetElementRect(element.id.clone(), sender);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(rect) => {
let response = ElementRectResponse {
x: rect.origin.x,
y: rect.origin.y,
width: rect.size.width,
height: rect.size.height,
};
Ok(WebDriverResponse::ElementRect(response))
},
Err(_) => Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Unable to find element in document",
)),
}
}
fn handle_element_text(&self, element: &WebElement) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetElementText(element.id.clone(), sender);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(value) => Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(value)?,
))),
Err(_) => Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Unable to find element in document",
)),
}
}
fn handle_active_element(&self) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetActiveElement(sender);
self.browsing_context_script_command(cmd)?;
let value = receiver
.recv()
.unwrap()
.map(|x| serde_json::to_value(WebElement::new(x)).unwrap());
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(value)?,
)))
}
fn handle_element_tag_name(&self, element: &WebElement) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetElementTagName(element.id.clone(), sender);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(value) => Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(value)?,
))),
Err(_) => Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Unable to find element in document",
)),
}
}<|fim▁hole|> element: &WebElement,
name: &str,
) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetElementAttribute(
element.id.clone(),
name.to_owned(),
sender,
);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(value) => Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(value)?,
))),
Err(_) => Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Unable to find element in document",
)),
}
}
fn handle_element_css(
&self,
element: &WebElement,
name: &str,
) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd =
WebDriverScriptCommand::GetElementCSS(element.id.clone(), name.to_owned(), sender);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(value) => Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(value)?,
))),
Err(_) => Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Unable to find element in document",
)),
}
}
fn handle_get_cookies(&self) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetCookies(sender);
self.browsing_context_script_command(cmd)?;
let cookies = receiver.recv().unwrap();
let response = cookies
.into_iter()
.map(|cookie| cookie_msg_to_cookie(cookie.into_inner()))
.collect::<Vec<Cookie>>();
Ok(WebDriverResponse::Cookies(CookiesResponse(response)))
}
fn handle_get_cookie(&self, name: &str) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::GetCookie(name.to_owned(), sender);
self.browsing_context_script_command(cmd)?;
let cookies = receiver.recv().unwrap();
let response = cookies
.into_iter()
.map(|cookie| cookie_msg_to_cookie(cookie.into_inner()))
.next()
.unwrap();
Ok(WebDriverResponse::Cookie(CookieResponse(response)))
}
fn handle_add_cookie(
&self,
params: &AddCookieParameters,
) -> WebDriverResult<WebDriverResponse> {
let (sender, receiver) = ipc::channel().unwrap();
let cookie = cookie::Cookie::build(params.name.to_owned(), params.value.to_owned())
.secure(params.secure)
.http_only(params.httpOnly);
let cookie = match params.domain {
Some(ref domain) => cookie.domain(domain.to_owned()),
_ => cookie,
};
let cookie = match params.path {
Some(ref path) => cookie.path(path.to_owned()).finish(),
_ => cookie.finish(),
};
let cmd = WebDriverScriptCommand::AddCookie(cookie, sender);
self.browsing_context_script_command(cmd)?;
match receiver.recv().unwrap() {
Ok(_) => Ok(WebDriverResponse::Void),
Err(response) => match response {
WebDriverCookieError::InvalidDomain => Err(WebDriverError::new(
ErrorStatus::InvalidCookieDomain,
"Invalid cookie domain",
)),
WebDriverCookieError::UnableToSetCookie => Err(WebDriverError::new(
ErrorStatus::UnableToSetCookie,
"Unable to set cookie",
)),
},
}
}
fn handle_set_timeouts(
&mut self,
parameters: &TimeoutsParameters,
) -> WebDriverResult<WebDriverResponse> {
let session = self
.session
.as_mut()
.ok_or(WebDriverError::new(ErrorStatus::SessionNotCreated, ""))?;
if let Some(timeout) = parameters.script {
session.script_timeout = timeout
}
if let Some(timeout) = parameters.page_load {
session.load_timeout = timeout
}
if let Some(timeout) = parameters.implicit {
session.implicit_wait_timeout = timeout
}
Ok(WebDriverResponse::Void)
}
fn handle_execute_script(
&self,
parameters: &JavascriptCommandParameters,
) -> WebDriverResult<WebDriverResponse> {
let func_body = ¶meters.script;
let args_string = "";
// This is pretty ugly; we really want something that acts like
// new Function() and then takes the resulting function and executes
// it with a vec of arguments.
let script = format!("(function() {{ {} }})({})", func_body, args_string);
let (sender, receiver) = ipc::channel().unwrap();
let command = WebDriverScriptCommand::ExecuteScript(script, sender);
self.browsing_context_script_command(command)?;
let result = receiver.recv().unwrap();
self.postprocess_js_result(result)
}
fn handle_execute_async_script(
&self,
parameters: &JavascriptCommandParameters,
) -> WebDriverResult<WebDriverResponse> {
let func_body = ¶meters.script;
let args_string = "window.webdriverCallback";
let script = format!(
"setTimeout(webdriverTimeout, {}); (function(callback) {{ {} }})({})",
self.session()?.script_timeout,
func_body,
args_string
);
let (sender, receiver) = ipc::channel().unwrap();
let command = WebDriverScriptCommand::ExecuteAsyncScript(script, sender);
self.browsing_context_script_command(command)?;
let result = receiver.recv().unwrap();
self.postprocess_js_result(result)
}
fn postprocess_js_result(
&self,
result: WebDriverJSResult,
) -> WebDriverResult<WebDriverResponse> {
match result {
Ok(value) => Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(SendableWebDriverJSValue(value))?,
))),
Err(WebDriverJSError::Timeout) => Err(WebDriverError::new(ErrorStatus::Timeout, "")),
Err(WebDriverJSError::UnknownType) => Err(WebDriverError::new(
ErrorStatus::UnsupportedOperation,
"Unsupported return type",
)),
Err(WebDriverJSError::BrowsingContextNotFound) => Err(WebDriverError::new(
ErrorStatus::JavascriptError,
"Pipeline id not found in browsing context",
)),
}
}
fn handle_element_send_keys(
&self,
element: &WebElement,
keys: &SendKeysParameters,
) -> WebDriverResult<WebDriverResponse> {
let browsing_context_id = self.session()?.browsing_context_id;
let (sender, receiver) = ipc::channel().unwrap();
let cmd = WebDriverScriptCommand::FocusElement(element.id.clone(), sender);
let cmd_msg = WebDriverCommandMsg::ScriptCommand(browsing_context_id, cmd);
self.constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
// TODO: distinguish the not found and not focusable cases
receiver.recv().unwrap().or_else(|_| {
Err(WebDriverError::new(
ErrorStatus::StaleElementReference,
"Element not found or not focusable",
))
})?;
let keys = keycodes_to_keys(&keys.text);
// TODO: there's a race condition caused by the focus command and the
// send keys command being two separate messages,
// so the constellation may have changed state between them.
let cmd_msg = WebDriverCommandMsg::SendKeys(browsing_context_id, keys);
self.constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
Ok(WebDriverResponse::Void)
}
fn handle_take_screenshot(&self) -> WebDriverResult<WebDriverResponse> {
let mut img = None;
let top_level_id = self.session()?.top_level_browsing_context_id;
let interval = 1000;
let iterations = 30_000 / interval;
for _ in 0..iterations {
let (sender, receiver) = ipc::channel().unwrap();
let cmd_msg = WebDriverCommandMsg::TakeScreenshot(top_level_id, sender);
self.constellation_chan
.send(ConstellationMsg::WebDriverCommand(cmd_msg))
.unwrap();
if let Some(x) = receiver.recv().unwrap() {
img = Some(x);
break;
};
thread::sleep(Duration::from_millis(interval))
}
let img = match img {
Some(img) => img,
None => {
return Err(WebDriverError::new(
ErrorStatus::Timeout,
"Taking screenshot timed out",
))
},
};
// The compositor always sends RGB pixels.
assert_eq!(
img.format,
PixelFormat::RGB8,
"Unexpected screenshot pixel format"
);
let rgb = RgbImage::from_raw(img.width, img.height, img.bytes.to_vec()).unwrap();
let mut png_data = Vec::new();
DynamicImage::ImageRgb8(rgb)
.write_to(&mut png_data, ImageFormat::PNG)
.unwrap();
let encoded = base64::encode(&png_data);
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(encoded)?,
)))
}
fn handle_get_prefs(
&self,
parameters: &GetPrefsParameters,
) -> WebDriverResult<WebDriverResponse> {
let prefs = parameters
.prefs
.iter()
.map(|item| (item.clone(), serde_json::to_value(PREFS.get(item)).unwrap()))
.collect::<BTreeMap<_, _>>();
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(prefs)?,
)))
}
fn handle_set_prefs(
&self,
parameters: &SetPrefsParameters,
) -> WebDriverResult<WebDriverResponse> {
for &(ref key, ref value) in parameters.prefs.iter() {
PREFS.set(key, value.0.clone());
}
Ok(WebDriverResponse::Void)
}
fn handle_reset_prefs(
&self,
parameters: &GetPrefsParameters,
) -> WebDriverResult<WebDriverResponse> {
let prefs = if parameters.prefs.len() == 0 {
PREFS.reset_all();
BTreeMap::new()
} else {
parameters
.prefs
.iter()
.map(|item| {
(
item.clone(),
serde_json::to_value(PREFS.reset(item)).unwrap(),
)
})
.collect::<BTreeMap<_, _>>()
};
Ok(WebDriverResponse::Generic(ValueResponse(
serde_json::to_value(prefs)?,
)))
}
}
impl WebDriverHandler<ServoExtensionRoute> for Handler {
fn handle_command(
&mut self,
_session: &Option<Session>,
msg: WebDriverMessage<ServoExtensionRoute>,
) -> WebDriverResult<WebDriverResponse> {
// Unless we are trying to create a new session, we need to ensure that a
// session has previously been created
match msg.command {
WebDriverCommand::NewSession(_) => {},
_ => {
self.session()?;
},
}
match msg.command {
WebDriverCommand::NewSession(_) => self.handle_new_session(),
WebDriverCommand::DeleteSession => self.handle_delete_session(),
WebDriverCommand::AddCookie(ref parameters) => self.handle_add_cookie(parameters),
WebDriverCommand::Get(ref parameters) => self.handle_get(parameters),
WebDriverCommand::GetCurrentUrl => self.handle_current_url(),
WebDriverCommand::GetWindowRect => self.handle_window_size(),
WebDriverCommand::SetWindowRect(ref size) => self.handle_set_window_size(size),
WebDriverCommand::IsEnabled(ref element) => self.handle_is_enabled(element),
WebDriverCommand::IsSelected(ref element) => self.handle_is_selected(element),
WebDriverCommand::GoBack => self.handle_go_back(),
WebDriverCommand::GoForward => self.handle_go_forward(),
WebDriverCommand::Refresh => self.handle_refresh(),
WebDriverCommand::GetTitle => self.handle_title(),
WebDriverCommand::GetWindowHandle => self.handle_window_handle(),
WebDriverCommand::GetWindowHandles => self.handle_window_handles(),
WebDriverCommand::SwitchToFrame(ref parameters) => {
self.handle_switch_to_frame(parameters)
},
WebDriverCommand::SwitchToParentFrame => self.handle_switch_to_parent_frame(),
WebDriverCommand::FindElement(ref parameters) => self.handle_find_element(parameters),
WebDriverCommand::FindElements(ref parameters) => self.handle_find_elements(parameters),
WebDriverCommand::GetNamedCookie(ref name) => self.handle_get_cookie(name),
WebDriverCommand::GetCookies => self.handle_get_cookies(),
WebDriverCommand::GetActiveElement => self.handle_active_element(),
WebDriverCommand::GetElementRect(ref element) => self.handle_element_rect(element),
WebDriverCommand::GetElementText(ref element) => self.handle_element_text(element),
WebDriverCommand::GetElementTagName(ref element) => {
self.handle_element_tag_name(element)
},
WebDriverCommand::GetElementAttribute(ref element, ref name) => {
self.handle_element_attribute(element, name)
},
WebDriverCommand::GetCSSValue(ref element, ref name) => {
self.handle_element_css(element, name)
},
WebDriverCommand::ExecuteScript(ref x) => self.handle_execute_script(x),
WebDriverCommand::ExecuteAsyncScript(ref x) => self.handle_execute_async_script(x),
WebDriverCommand::ElementSendKeys(ref element, ref keys) => {
self.handle_element_send_keys(element, keys)
},
WebDriverCommand::SetTimeouts(ref x) => self.handle_set_timeouts(x),
WebDriverCommand::TakeScreenshot => self.handle_take_screenshot(),
WebDriverCommand::Extension(ref extension) => match *extension {
ServoExtensionCommand::GetPrefs(ref x) => self.handle_get_prefs(x),
ServoExtensionCommand::SetPrefs(ref x) => self.handle_set_prefs(x),
ServoExtensionCommand::ResetPrefs(ref x) => self.handle_reset_prefs(x),
},
_ => Err(WebDriverError::new(
ErrorStatus::UnsupportedOperation,
"Command not implemented",
)),
}
}
fn delete_session(&mut self, _session: &Option<Session>) {
// Servo doesn't support multiple sessions, so we exit on session deletion
let _ = self
.constellation_chan
.send(ConstellationMsg::Exit)
.unwrap();
self.session = None;
}
}<|fim▁end|> |
fn handle_element_attribute(
&self, |
<|file_name|>portfolio.py<|end_file_name|><|fim▁begin|># encoding: utf-8
# (c) 2019 Open Risk (https://www.openriskmanagement.com)
#
# portfolioAnalytics is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of correlationMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
""" This module provides simple functionality for holding portfolio data for calculation purposes.
* Portfolio_ implements a simple portfolio data container
"""
import numpy as np
class Portfolio(object):
""" The _`Portfolio` object implements a simple portfolio data structure. See `loan tape <https://www.openriskmanual.org/wiki/Loan_Tape>`_ for more general structures.
"""<|fim▁hole|>
def __init__(self, psize=0, rating=[], exposure=[], factor=[]):
"""Initialize portfolio.
:param psize: initialization values
:param rating: list of default probabilities
:param exposure: list of exposures (numerical values, e.g. `Exposure At Default <https://www.openriskmanual.org/wiki/Exposure_At_Default>`_
:param factor: list of factor indices (those should match the factors used e.g. in a correlation matrix
:type psize: int
:type rating: list of floats
:type exposure: list of floats
:type factor: list of int
:returns: returns a Portfolio object
:rtype: object
.. note:: The initialization in itself does not validate if the provided values form indeed valid portfolio data
"""
self.psize = psize
self.exposure = exposure
self.rating = rating
self.factor = factor
def loadjson(self, data):
"""Load portfolio data from JSON object.
The data format for the input json object is a list of dictionaries as follows
[{"ID":"1","PD":"0.015","EAD":"40","FACTOR":0},
...
{"ID":"2","PD":"0.286","EAD":"20","FACTOR":0}]
"""
self.psize = len(data)
for x in data:
self.exposure.append(float(x['EAD']))
self.rating.append(float(x['PD']))
self.factor.append(x['FACTOR'])
def preprocess_portfolio(self):
"""
Produce some portfolio statistics like total number of entities and exposure weighted average probability of default
:return:
"""
N = self.psize
Total_Exposure = np.sum(self.exposure)
p = np.inner(self.rating, self.exposure) / Total_Exposure
return N, p<|fim▁end|> | |
<|file_name|>SyncDisabled.js<|end_file_name|><|fim▁begin|>import React from 'react';
import pure from 'recompose/pure';
import SvgIcon from 'material-ui/SvgIcon';
let SyncDisabled = props =>
<SvgIcon {...props}>
<path d="M10 6.35V4.26c-.8.21-1.55.54-2.23.96l1.46 1.46c.25-.12.5-.24.77-.33zm-7.14-.94l2.36 2.36C4.45 8.99 4 10.44 4 12c0 2.21.91 4.2 2.36 5.64L4 20h6v-6l-2.24 2.24C6.68 15.15 6 13.66 6 12c0-1 .25-1.94.68-2.77l8.08 8.08c-.25.13-.5.25-.77.34v2.09c.8-.21 1.55-.54 2.23-.96l2.36 2.36 1.27-1.27L4.14 4.14 2.86 5.41zM20 4h-6v6l2.24-2.24C17.32 8.85 18 10.34 18 12c0 1-.25 1.94-.68 2.77l1.46 1.46C19.55 15.01 20 13.56 20 12c0-2.21-.91-4.2-2.36-5.64L20 4z" />
</SvgIcon>;
SyncDisabled = pure(SyncDisabled);
SyncDisabled.muiName = 'SvgIcon';
<|fim▁hole|><|fim▁end|> | export default SyncDisabled; |
<|file_name|>JarFilter.java<|end_file_name|><|fim▁begin|>package mnm.mcpackager.gui;
import java.io.File;
import javax.swing.filechooser.FileFilter;
public class JarFilter extends FileFilter {
@Override
public boolean accept(File f) {
if (f.isDirectory())
return true;
return getExtension(f).equalsIgnoreCase("jar");
}
@Override
public String getDescription() {
return "Jar Archives";
}
private String getExtension(File f) {
String ext = null;
String s = f.getName();
int i = s.lastIndexOf('.');
if (i > 0 && i < s.length() - 1)
ext = s.substring(i + 1).toLowerCase();
return ext;<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>GnomeProxySearchStrategy.java<|end_file_name|><|fim▁begin|>package com.btr.proxy.search.desktop.gnome;
import java.io.File;
import java.io.IOException;
import java.net.ProxySelector;
import java.util.Properties;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import com.btr.proxy.search.ProxySearchStrategy;
import com.btr.proxy.selector.direct.NoProxySelector;
import com.btr.proxy.selector.fixed.FixedProxySelector;
import com.btr.proxy.selector.misc.ProtocolDispatchSelector;
import com.btr.proxy.selector.whitelist.ProxyBypassListSelector;
import com.btr.proxy.util.EmptyXMLResolver;
import com.btr.proxy.util.Logger;
import com.btr.proxy.util.PlatformUtil;
import com.btr.proxy.util.ProxyException;
import com.btr.proxy.util.ProxyUtil;
import com.btr.proxy.util.Logger.LogLevel;
/*****************************************************************************
* Loads the Gnome proxy settings from the Gnome GConf settings.
* <p>
* The following settings are extracted from the configuration that is stored
* in <i>.gconf</i> folder found in the user's home directory:
* </p>
* <ul>
* <li><i>/system/http_proxy/use_http_proxy</i> -> bool used only by gnome-vfs </li>
* <li><i>/system/http_proxy/host</i> -> string "my-proxy.example.com" without "http://"</li>
* <li><i>/system/http_proxy/port</i> -> int</li>
* <li><i>/system/http_proxy/use_authentication</i> -> bool</li>
* <li><i>/system/http_proxy/authentication_user</i> -> string</li>
* <li><i>/system/http_proxy/authentication_password</i> -> string</li>
* <li><i>/system/http_proxy/ignore_hosts</i> -> list-of-string</li>
* <li><i>/system/proxy/mode</i> -> string THIS IS THE CANONICAL KEY; SEE BELOW</li>
* <li><i>/system/proxy/secure_host</i> -> string "proxy-for-https.example.com"</li>
* <li><i>/system/proxy/secure_port</i> -> int</li>
* <li><i>/system/proxy/ftp_host</i> -> string "proxy-for-ftp.example.com"</li>
* <li><i>/system/proxy/ftp_port</i> -> int</li>
* <li><i>/system/proxy/socks_host</i> -> string "proxy-for-socks.example.com"</li>
* <li><i>/system/proxy/socks_port</i> -> int</li>
* <li><i>/system/proxy/autoconfig_url</i> -> string "http://proxy-autoconfig.example.com"</li>
* </ul>
* <i>/system/proxy/mode</i> can be either:<br/>
* "none" -> No proxy is used<br/>
* "manual" -> The user's configuration values are used (/system/http_proxy/{host,port,etc.})<br/>
* "auto" -> The "/system/proxy/autoconfig_url" key is used <br/>
* <p>
* GNOME Proxy_configuration settings are explained
* <a href="http://en.opensuse.org/GNOME/Proxy_configuration">here</a> in detail
* </p><|fim▁hole|> ****************************************************************************/
public class GnomeProxySearchStrategy implements ProxySearchStrategy {
/*************************************************************************
* ProxySelector
* @see java.net.ProxySelector#ProxySelector()
************************************************************************/
public GnomeProxySearchStrategy() {
super();
}
/*************************************************************************
* Loads the proxy settings and initializes a proxy selector for the Gnome
* proxy settings.
* @return a configured ProxySelector, null if none is found.
* @throws ProxyException on file reading error.
************************************************************************/
public ProxySelector getProxySelector() throws ProxyException {
Logger.log(getClass(), LogLevel.TRACE, "Detecting Gnome proxy settings");
Properties settings = readSettings();
String type = settings.getProperty("/system/proxy/mode");
ProxySelector result = null;
if (type == null) {
String useProxy = settings.getProperty("/system/http_proxy/use_http_proxy");
if (useProxy == null) {
return null;
}
type = Boolean.parseBoolean(useProxy)?"manual":"none";
}
if ("none".equals(type)) {
Logger.log(getClass(), LogLevel.TRACE, "Gnome uses no proxy");
result = NoProxySelector.getInstance();
}
if ("manual".equals(type)) {
Logger.log(getClass(), LogLevel.TRACE, "Gnome uses manual proxy settings");
result = setupFixedProxySelector(settings);
}
if ("auto".equals(type)) {
String pacScriptUrl = settings.getProperty("/system/proxy/autoconfig_url", "");
Logger.log(getClass(), LogLevel.TRACE, "Gnome uses autodetect script {0}", pacScriptUrl);
result = ProxyUtil.buildPacSelectorForUrl(pacScriptUrl);
}
// Wrap into white-list filter?
String noProxyList = settings.getProperty("/system/http_proxy/ignore_hosts", null);
if (result != null && noProxyList != null && noProxyList.trim().length() > 0) {
Logger.log(getClass(), LogLevel.TRACE, "Gnome uses proxy bypass list: {0}", noProxyList);
result = new ProxyBypassListSelector(noProxyList, result);
}
return result;
}
/*************************************************************************
* Load the proxy settings from the gconf settings XML file.
* @return the loaded settings stored in a properties object.
* @throws ProxyException on processing error.
************************************************************************/
public Properties readSettings() throws ProxyException {
Properties settings = new Properties();
try {
parseSettings("/system/proxy/", settings);
parseSettings("/system/http_proxy/", settings);
} catch (IOException e) {
Logger.log(getClass(), LogLevel.ERROR, "Gnome settings file error.", e);
throw new ProxyException(e);
}
return settings;
}
/*************************************************************************
* Finds the Gnome GConf settings file.
* @param context the gconf context to parse.
* @return a file or null if does not exist.
************************************************************************/
private File findSettingsFile(String context) {
// Normally we should inspect /etc/gconf/<version>/path to find out where the actual file is.
// But for normal systems this is always stored in .gconf folder in the user's home directory.
File userDir = new File(PlatformUtil.getUserHomeDir());
// Build directory path for context
StringBuilder path = new StringBuilder();
String[] parts = context.split("/");
for (String part : parts) {
path.append(part);
path.append(File.separator);
}
File settingsFile = new File(userDir, ".gconf"+File.separator+path.toString()+"%gconf.xml");
if (!settingsFile.exists()) {
Logger.log(getClass(), LogLevel.WARNING, "Gnome settings: {0} not found.", settingsFile);
return null;
}
return settingsFile;
}
/*************************************************************************
* Parse the fixed proxy settings and build an ProxySelector for this a
* chained configuration.
* @param settings the proxy settings to evaluate.
************************************************************************/
private ProxySelector setupFixedProxySelector(Properties settings) {
if (!hasProxySettings(settings)) {
return null;
}
ProtocolDispatchSelector ps = new ProtocolDispatchSelector();
installHttpSelector(settings, ps);
if (useForAllProtocols(settings)) {
ps.setFallbackSelector(ps.getSelector("http"));
} else {
installSecureSelector(settings, ps);
installFtpSelector(settings, ps);
installSocksSelector(settings, ps);
}
return ps;
}
/*************************************************************************
* Check if the http proxy should also be used for all other protocols.
* @param settings to inspect.
* @return true if only one proxy is configured else false.
************************************************************************/
private boolean useForAllProtocols(Properties settings) {
return Boolean.parseBoolean(
settings.getProperty("/system/http_proxy/use_same_proxy", "false"));
}
/*************************************************************************
* Checks if we have Proxy configuration settings in the properties.
* @param settings to inspect.
* @return true if we have found Proxy settings.
************************************************************************/
private boolean hasProxySettings(Properties settings) {
String proxyHost = settings.getProperty("/system/http_proxy/host", null);
return proxyHost != null && proxyHost.length() > 0;
}
/*************************************************************************
* Install a http proxy from the given settings.
* @param settings to inspect
* @param ps the dispatch selector to configure.
* @throws NumberFormatException
************************************************************************/
private void installHttpSelector(Properties settings,
ProtocolDispatchSelector ps) throws NumberFormatException {
String proxyHost = settings.getProperty("/system/http_proxy/host", null);
int proxyPort = Integer.parseInt(settings.getProperty("/system/http_proxy/port", "0").trim());
if (proxyHost != null && proxyHost.length() > 0 && proxyPort > 0) {
Logger.log(getClass(), LogLevel.TRACE, "Gnome http proxy is {0}:{1}", proxyHost, proxyPort);
ps.setSelector("http", new FixedProxySelector(proxyHost.trim(), proxyPort));
}
}
/*************************************************************************
* Install a socks proxy from the given settings.
* @param settings to inspect
* @param ps the dispatch selector to configure.
* @throws NumberFormatException
************************************************************************/
private void installSocksSelector(Properties settings,
ProtocolDispatchSelector ps) throws NumberFormatException {
String proxyHost = settings.getProperty("/system/proxy/socks_host", null);
int proxyPort = Integer.parseInt(settings.getProperty("/system/proxy/socks_port", "0").trim());
if (proxyHost != null && proxyHost.length() > 0 && proxyPort > 0) {
Logger.log(getClass(), LogLevel.TRACE, "Gnome socks proxy is {0}:{1}", proxyHost, proxyPort);
ps.setSelector("socks", new FixedProxySelector(proxyHost.trim(), proxyPort));
}
}
/*************************************************************************
* @param settings
* @param ps
* @throws NumberFormatException
************************************************************************/
private void installFtpSelector(Properties settings,
ProtocolDispatchSelector ps) throws NumberFormatException {
String proxyHost = settings.getProperty("/system/proxy/ftp_host", null);
int proxyPort = Integer.parseInt(settings.getProperty("/system/proxy/ftp_port", "0").trim());
if (proxyHost != null && proxyHost.length() > 0 && proxyPort > 0) {
Logger.log(getClass(), LogLevel.TRACE, "Gnome ftp proxy is {0}:{1}", proxyHost, proxyPort);
ps.setSelector("ftp", new FixedProxySelector(proxyHost.trim(), proxyPort));
}
}
/*************************************************************************
* @param settings
* @param ps
* @throws NumberFormatException
************************************************************************/
private void installSecureSelector(Properties settings,
ProtocolDispatchSelector ps) throws NumberFormatException {
String proxyHost = settings.getProperty("/system/proxy/secure_host", null);
int proxyPort = Integer.parseInt(settings.getProperty("/system/proxy/secure_port", "0").trim());
if (proxyHost != null && proxyHost.length() > 0 && proxyPort > 0) {
Logger.log(getClass(), LogLevel.TRACE, "Gnome secure proxy is {0}:{1}", proxyHost, proxyPort);
ps.setSelector("https", new FixedProxySelector(proxyHost.trim(), proxyPort));
ps.setSelector("sftp", new FixedProxySelector(proxyHost.trim(), proxyPort));
}
}
/*************************************************************************
* Parse the settings file and extract all network.proxy.* settings from it.
* @param context the gconf context to parse.
* @param settings the settings object to fill.
* @return the parsed properties.
* @throws IOException on read error.
************************************************************************/
private Properties parseSettings(String context, Properties settings) throws IOException {
// Read settings from file
File settingsFile = findSettingsFile(context);
if (settingsFile == null) {
return settings;
}
try {
DocumentBuilder documentBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
documentBuilder.setEntityResolver(new EmptyXMLResolver());
Document doc = documentBuilder.parse(settingsFile);
Element root = doc.getDocumentElement();
Node entry = root.getFirstChild();
while (entry != null) {
if ("entry".equals(entry.getNodeName()) && entry instanceof Element) {
String entryName = ((Element)entry).getAttribute("name");
settings.setProperty(context+entryName, getEntryValue((Element) entry));
}
entry = entry.getNextSibling();
}
} catch (SAXException e) {
Logger.log(getClass(), LogLevel.ERROR, "Gnome settings parse error", e);
throw new IOException(e.getMessage());
} catch (ParserConfigurationException e) {
Logger.log(getClass(), LogLevel.ERROR, "Gnome settings parse error", e);
throw new IOException(e.getMessage());
}
return settings;
}
/*************************************************************************
* Parse an entry value from a given entry node.
* @param entry the XML node to inspect.
* @return the value, null if it has no value.
************************************************************************/
private String getEntryValue(Element entry) {
String type = entry.getAttribute("type");
if ("int".equals(type) || "bool".equals(type)) {
return entry.getAttribute("value");
}
if ("string".equals(type)) {
NodeList list = entry.getElementsByTagName("stringvalue");
if (list.getLength() > 0) {
return list.item(0).getTextContent();
}
}
if ("list".equals(type)) {
StringBuilder result = new StringBuilder();
NodeList list = entry.getElementsByTagName("li");
// Build comma separated list of items
for (int i = 0; i < list.getLength(); i++) {
if (result.length() > 0) {
result.append(",");
}
result.append(getEntryValue((Element) list.item(i)));
}
return result.toString();
}
return null;
}
}<|fim▁end|> | * @author Bernd Rosstauscher ([email protected]) Copyright 2009 |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Copyright (c) 2013-2014 Ben Croston
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""This test suite assumes the following circuit is connected:
GND_PIN = 6
LED_PIN = 12 (with resistor to 0v)
SWITCH_PIN = 18 (with 0.1 uF capacitor around switch) to 0v
LOOP_IN = 16 connected with 1K resistor to LOOP_OUT
LOOP_OUT = 22
"""
import sys
import warnings
import time
from threading import Timer
import RPi.GPIO as GPIO
if sys.version[:3] == '2.6':
import unittest2 as unittest
else:
import unittest
GND_PIN = 6
LED_PIN = 12
LED_PIN_BCM = 18
SWITCH_PIN = 18
LOOP_IN = 16
LOOP_OUT = 22
# Test starts with 'AAA' so that it is run first
class TestAAASetup(unittest.TestCase):
def runTest(self):
# Test mode not set (BOARD or BCM) exception
with self.assertRaises(RuntimeError) as e:
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(str(e.exception), 'Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)')
GPIO.setmode(GPIO.BOARD)
# Test not set as OUTPUT message
with self.assertRaises(RuntimeError) as e:
GPIO.output(LED_PIN, GPIO.HIGH)
self.assertEqual(str(e.exception), 'The GPIO channel has not been set up as an OUTPUT')
GPIO.setup(LED_PIN, GPIO.IN)
# Test setup(..., pull_up_down=GPIO.HIGH) raises exception
with self.assertRaises(ValueError):
GPIO.setup(LED_PIN, GPIO.IN, pull_up_down=GPIO.HIGH)
# Test 'already in use' warning
GPIO.cleanup()
with open('/sys/class/gpio/export','wb') as f:
f.write(str(LED_PIN_BCM).encode())
with open('/sys/class/gpio/gpio%s/direction'%LED_PIN_BCM,'wb') as f:
f.write(b'out')
with open('/sys/class/gpio/gpio%s/value'%LED_PIN_BCM,'wb') as f:
f.write(b'1')
with warnings.catch_warnings(record=True) as w:
GPIO.setup(LED_PIN, GPIO.OUT) # generate 'already in use' warning
self.assertEqual(w[0].category, RuntimeWarning)
with open('/sys/class/gpio/unexport','wb') as f:
f.write(str(LED_PIN_BCM).encode())
GPIO.cleanup()
# test initial value of high reads back as high
GPIO.setup(LED_PIN, GPIO.OUT, initial=GPIO.HIGH)
self.assertEqual(GPIO.input(LED_PIN), GPIO.HIGH)
GPIO.cleanup()
# test initial value of low reads back as low
GPIO.setup(LED_PIN, GPIO.OUT, initial=GPIO.LOW)
self.assertEqual(GPIO.input(LED_PIN), GPIO.LOW)
GPIO.cleanup()
class TestInputOutput(unittest.TestCase):
def test_outputread(self):
"""Test that an output() can be input()"""
GPIO.setup(LED_PIN, GPIO.OUT)
GPIO.output(LED_PIN, GPIO.HIGH)
self.assertEqual(GPIO.input(LED_PIN), GPIO.HIGH)
GPIO.output(LED_PIN, GPIO.LOW)
self.assertEqual(GPIO.input(LED_PIN), GPIO.LOW)
GPIO.cleanup()
def test_loopback(self):
"""Test output loops back to another input"""
GPIO.setup(LOOP_IN, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
GPIO.setup(LOOP_OUT, GPIO.OUT, initial=GPIO.LOW)
self.assertEqual(GPIO.input(LOOP_IN), GPIO.LOW)
GPIO.output(LOOP_OUT, GPIO.HIGH)
self.assertEqual(GPIO.input(LOOP_IN), GPIO.HIGH)
GPIO.cleanup()
def test_output_on_input(self):
"""Test output() can not be done on input"""
GPIO.setup(SWITCH_PIN, GPIO.IN)
with self.assertRaises(RuntimeError):
GPIO.output(SWITCH_PIN, GPIO.LOW)
GPIO.cleanup()
class TestSoftPWM(unittest.TestCase):
def runTest(self):
GPIO.setup(LED_PIN, GPIO.OUT)
pwm = GPIO.PWM(LED_PIN, 50)
pwm.start(100)
print "\nPWM tests"
response = raw_input('Is the LED on (y/n) ? ').upper()
self.assertEqual(response,'Y')
pwm.start(0)
response = raw_input('Is the LED off (y/n) ? ').upper()
self.assertEqual(response,'Y')
print "LED Brighten/fade test..."
for i in range(0,3):
for x in range(0,101,5):
pwm.ChangeDutyCycle(x)
time.sleep(0.1)
for x in range(100,-1,-5):
pwm.ChangeDutyCycle(x)
time.sleep(0.1)
pwm.stop()
response = raw_input('Did it work (y/n) ? ').upper()
self.assertEqual(response,'Y')
GPIO.cleanup()
class TestSetWarnings(unittest.TestCase):
def test_alreadyinuse(self):
"""Test 'already in use' warning"""
GPIO.setwarnings(False)
with open('/sys/class/gpio/export','wb') as f:
f.write(str(LED_PIN_BCM).encode())
with open('/sys/class/gpio/gpio%s/direction'%LED_PIN_BCM,'wb') as f:
f.write(b'out')
with open('/sys/class/gpio/gpio%s/value'%LED_PIN_BCM,'wb') as f:
f.write(b'1')
with warnings.catch_warnings(record=True) as w:
GPIO.setup(LED_PIN, GPIO.OUT) # generate 'already in use' warning
self.assertEqual(len(w),0) # should be no warnings
with open('/sys/class/gpio/unexport','wb') as f:
f.write(str(LED_PIN_BCM).encode())
GPIO.cleanup()
GPIO.setwarnings(True)
with open('/sys/class/gpio/export','wb') as f:
f.write(str(LED_PIN_BCM).encode())
with open('/sys/class/gpio/gpio%s/direction'%LED_PIN_BCM,'wb') as f:
f.write(b'out')
with open('/sys/class/gpio/gpio%s/value'%LED_PIN_BCM,'wb') as f:
f.write(b'1')
with warnings.catch_warnings(record=True) as w:
GPIO.setup(LED_PIN, GPIO.OUT) # generate 'already in use' warning
self.assertEqual(w[0].category, RuntimeWarning)
with open('/sys/class/gpio/unexport','wb') as f:
f.write(str(LED_PIN_BCM).encode())
GPIO.cleanup()
def test_cleanupwarning(self):
"""Test initial GPIO.cleanup() produces warning"""
GPIO.setwarnings(False)
GPIO.setup(SWITCH_PIN, GPIO.IN)
with warnings.catch_warnings(record=True) as w:
GPIO.cleanup()
self.assertEqual(len(w),0) # no warnings
GPIO.cleanup()
self.assertEqual(len(w),0) # no warnings
GPIO.setwarnings(True)
GPIO.setup(SWITCH_PIN, GPIO.IN)
with warnings.catch_warnings(record=True) as w:
GPIO.cleanup()
self.assertEqual(len(w),0) # no warnings
GPIO.cleanup()
self.assertEqual(w[0].category, RuntimeWarning) # a warning
class TestVersions(unittest.TestCase):
def test_rpi_revision(self):
if GPIO.RPI_REVISION == 0:
revision = 'Compute Module'
elif GPIO.RPI_REVISION == 1:
revision = 'revision 1'
elif GPIO.RPI_REVISION == 2:
revision = 'revision 2'
elif GPIO.RPI_REVISION == 3:
revision = 'Model B+'
else:
revision = '**undetected**'
response = raw_input('\nThis board appears to be a %s - is this correct (y/n) ? '%revision).upper()
self.assertEqual(response, 'Y')
def test_gpio_version(self):
response = raw_input('\nRPi.GPIO version %s - is this correct (y/n) ? '%GPIO.VERSION).upper()
self.assertEqual(response, 'Y')
class TestGPIOFunction(unittest.TestCase):
def runTest(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED_PIN_BCM, GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN_BCM), GPIO.IN)
GPIO.setup(LED_PIN_BCM, GPIO.OUT)<|fim▁hole|>
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED_PIN, GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.IN)
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
def tearDown(self):
GPIO.cleanup()
class TestSwitchBounce(unittest.TestCase):
def __init__(self, *a, **k):
unittest.TestCase.__init__(self, *a, **k)
self.switchcount = 0
def cb(self,chan):
self.switchcount += 1
print 'Button press',self.switchcount
def setUp(self):
GPIO.setup(SWITCH_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def test_switchbounce(self):
self.switchcount = 0
print "\nSwitch bounce test. Press switch at least 10 times and count..."
GPIO.add_event_detect(SWITCH_PIN, GPIO.FALLING, callback=self.cb, bouncetime=200)
while self.switchcount < 10:
time.sleep(1)
GPIO.remove_event_detect(SWITCH_PIN)
def test_event_detected(self):
self.switchcount = 0
print "\nGPIO.event_detected() switch bounce test. Press switch at least 10 times and count..."
GPIO.add_event_detect(SWITCH_PIN, GPIO.FALLING, bouncetime=200)
while self.switchcount < 10:
if GPIO.event_detected(SWITCH_PIN):
self.switchcount += 1
print 'Button press',self.switchcount
GPIO.remove_event_detect(SWITCH_PIN)
def tearDown(self):
GPIO.cleanup()
class TestEdgeDetection(unittest.TestCase):
def setUp(self):
GPIO.setup(LOOP_IN, GPIO.IN)
GPIO.setup(LOOP_OUT, GPIO.OUT)
def testWaitForEdgeWithCallback(self):
def cb():
raise Exception("Callback should not be called")
def makehigh():
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.output(LOOP_OUT, GPIO.LOW)
t = Timer(0.1, makehigh)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
t.start()
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING)
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_callback(LOOP_IN, callback=cb)
with self.assertRaises(RuntimeError):
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING)
GPIO.remove_event_detect(LOOP_IN)
def testWaitForEventSwitchbounce(self):
def bounce():
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.2)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.output(LOOP_OUT, GPIO.LOW)
t1 = Timer(0.1, bounce)
t1.start()
starttime = time.time()
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=100)
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=100)
finishtime = time.time()
self.assertGreater(finishtime-starttime, 0.2)
def testInvalidBouncetime(self):
with self.assertRaises(ValueError):
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, bouncetime=-1)
with self.assertRaises(ValueError):
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=-1)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, bouncetime=123)
with self.assertRaises(RuntimeError):
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=321)
GPIO.remove_event_detect(LOOP_IN)
def testAlreadyAdded(self):
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
with self.assertRaises(RuntimeError):
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
with self.assertRaises(RuntimeError):
GPIO.wait_for_edge(LOOP_IN, GPIO.FALLING)
GPIO.remove_event_detect(LOOP_IN)
def testHighLowEvent(self):
with self.assertRaises(ValueError):
GPIO.add_event_detect(LOOP_IN, GPIO.LOW)
with self.assertRaises(ValueError):
GPIO.add_event_detect(LOOP_IN, GPIO.HIGH)
def testFallingEventDetected(self):
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.add_event_detect(LOOP_IN, GPIO.FALLING)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.remove_event_detect(LOOP_IN)
def testRisingEventDetected(self):
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.remove_event_detect(LOOP_IN)
def testBothEventDetected(self):
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.BOTH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
GPIO.remove_event_detect(LOOP_IN)
def testWaitForRising(self):
def makehigh():
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.output(LOOP_OUT, GPIO.LOW)
t = Timer(0.1, makehigh)
t.start()
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING)
def testWaitForFalling(self):
def makelow():
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.output(LOOP_OUT, GPIO.HIGH)
t = Timer(0.1, makelow)
t.start()
GPIO.wait_for_edge(LOOP_IN, GPIO.FALLING)
def testExceptionInCallback(self):
self.run_cb = False
def cb(channel):
with self.assertRaises(ZeroDivisionError):
self.run_cb = True
a = 1/0
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, callback=cb)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(self.run_cb, True)
GPIO.remove_event_detect(LOOP_IN)
def testAddEventCallback(self):
def cb(channel):
self.callback_count += 1
# falling test
self.callback_count = 0
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.add_event_detect(LOOP_IN, GPIO.FALLING)
GPIO.add_event_callback(LOOP_IN, cb)
time.sleep(0.01)
for i in range(2048):
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.001)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.001)
GPIO.remove_event_detect(LOOP_IN)
self.assertEqual(self.callback_count, 2048)
# rising test
self.callback_count = 0
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, callback=cb)
time.sleep(0.01)
for i in range(2048):
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.001)
GPIO.remove_event_detect(LOOP_IN)
self.assertEqual(self.callback_count, 2048)
# both test
self.callback_count = 0
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.BOTH, callback=cb)
time.sleep(0.01)
for i in range(2048):
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.001)
GPIO.remove_event_detect(LOOP_IN)
self.assertEqual(self.callback_count, 4096)
def testEventOnOutput(self):
with self.assertRaises(RuntimeError):
GPIO.add_event_detect(LOOP_OUT, GPIO.FALLING)
def tearDown(self):
GPIO.cleanup()
class TestCleanup(unittest.TestCase):
def test_cleanall(self):
GPIO.setup(LOOP_OUT, GPIO.OUT)
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
GPIO.cleanup()
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.IN)
def test_cleanone(self):
GPIO.setup(LOOP_OUT, GPIO.OUT)
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
GPIO.cleanup(LOOP_OUT)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
GPIO.cleanup(LED_PIN)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.IN)
#def test_suite():
# suite = unittest.TestLoader().loadTestsFromModule()
# return suite
if __name__ == '__main__':
unittest.main()<|fim▁end|> | self.assertEqual(GPIO.gpio_function(LED_PIN_BCM), GPIO.OUT) |
<|file_name|>nc.py<|end_file_name|><|fim▁begin|>################################################################################
# nc.py
#
# Base class for NC code creation
# And global functions for calling current creator
#
# Hirutso Enni, 2009-01-13
# altered by Dan Falck 2010-08-04
# added tap() arguments Michael Haberler 2010-10-07
################################################################################
ncOFF = 0
ncLEFT = -1
ncRIGHT = +1
ncCW = -1
ncCCW = +1
ncMIST = 1
ncFLOOD = 2
################################################################################
class Creator:
def __init__(self):
pass
############################################################################
## Internals
def file_open(self, name):
self.file = open(name, 'w')
self.filename = name
def file_close(self):
self.file.close()
def write(self, s):
self.file.write(s)
############################################################################
## Programs
def program_begin(self, id, name=''):
"""Begin a program"""
pass
def add_stock(self, type_name, params):
pass
def program_stop(self, optional=False):
"""Stop the machine"""
pass
def program_end(self):
"""End the program"""
pass
def flush_nc(self):
"""Flush all pending codes"""
pass
############################################################################
## Subprograms
def sub_begin(self, id, name=''):
"""Begin a subprogram"""
pass
def sub_call(self, id):
"""Call a subprogram"""
pass
def sub_end(self):
"""Return from a subprogram"""
pass
############################################################################
## Settings
def imperial(self):
"""Set imperial units"""
pass
def metric(self):
"""Set metric units"""
pass
def absolute(self):
"""Set absolute coordinates"""
pass
def incremental(self):
"""Set incremental coordinates"""
pass
def polar(self, on=True):
"""Set polar coordinates"""
pass
def set_plane(self, plane):
"""Set plane"""
pass
def set_temporary_origin(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Set temporary origin G92"""
pass
def remove_temporary_origin(self):
"""Remote temporary origin G92.1"""
pass
############################################################################
## Tools
def tool_change(self, id):
"""Change the tool"""
pass
def tool_defn(self, id, name='', params=None):
"""Define a tool"""
pass
def offset_radius(self, id, radius=None):
"""Set tool radius offsetting"""
pass
def offset_length(self, id, length=None):
"""Set tool length offsetting"""
pass
def current_tool(self):
return None
############################################################################
## Datums
def datum_shift(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Shift the datum"""
pass
def datum_set(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Set the datum"""
pass
def workplane(self, id):
"""Set the workplane"""
pass
def clearanceplane(self,z=None):
"""set clearance plane"""
pass
############################################################################
## APT360 like Transformation Definitions
## These definitions were created while looking at Irvin Kraal's book on APT
## - Numerical Control Progamming in APT - page 211
def matrix(self,a1=None,b1=None,c1=None,a2=None,b2=None,c2=None,a3=None,b3=None,c3=None):
"""Create a matrix for transformations"""
pass
def translate(self,x=None,y=None,z=None):
"""Translate in x,y,z direction"""
pass
def rotate(self,xyrot=None,yzrot=None,zxrot=None,angle=None):
"""Rotate about a coordinate axis"""
pass
def scale(self,k=None):
"""Scale by factor k"""
pass
def matrix_product(self,matrix1=None,matrix2=None):
"""Create matrix that is the product of two other matrices"""
pass
def mirror_plane(self,plane1=None,plane2=None,plane3=None):
"""Mirror image about one or more coordinate planes"""
pass
def mirror_line(self,line=None):
"""Mirror about a line"""
pass
############################################################################
## Rates + Modes
def feedrate(self, f):
"""Set the feedrate"""
pass
def feedrate_hv(self, fh, fv):
"""Set the horizontal and vertical feedrates"""
pass
def spindle(self, s, clockwise=True):
"""Set the spindle speed"""
pass
def coolant(self, mode=0):
"""Set the coolant mode"""
pass
def gearrange(self, gear=0):
"""Set the gear range"""
pass
############################################################################
## Moves
def rapid(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Rapid move"""
pass
def feed(self, x=None, y=None, z=None, a = None, b = None, c = None):
"""Feed move"""
pass
def arc_cw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None):
"""Clockwise arc move"""
pass
def arc_ccw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None):
"""Counterclockwise arc move"""
pass
def dwell(self, t):
"""Dwell"""
pass
def rapid_home(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Rapid relative to home position"""
pass
def rapid_unhome(self):
"""Return from rapid home"""
pass
def set_machine_coordinates(self):
"""Set machine coordinates"""
pass
############################################################################
## Cutter radius compensation
def use_CRC(self):
"""CRC"""
return False
############################################################################
## Cycles
def pattern(self):
"""Simple pattern eg. circle, rect"""
pass
def pocket(self):
"""Pocket routine"""
pass
def profile(self):
"""Profile routine"""
pass
def drill(self, x=None, y=None, dwell=None, depthparams = None, retract_mode=None, spindle_mode=None, internal_coolant_on=None, rapid_to_clearance=None):
"""Drilling routines"""
pass
# original prototype was:
# def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None):
#
# current call is like so:
# tap(x=10, y=10, z=0, tap_mode=0, depth=12.7, standoff=6.35, direction=0, pitch=1.25)
# just add tap_mode & direction parameters
def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None):
"""Tapping routines"""
pass
def bore(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, feed_in=None, feed_out=None, stoppos=None, shift_back=None, shift_right=None, backbore=False, stop=False):
"""Boring routines"""
pass
def end_canned_cycle(self):
pass
############################################################################
## Misc
def comment(self, text):
"""Insert a comment"""
pass
def insert(self, text):
"""APT style INSERT statement"""
pass
def block_delete(self, on=False):
"""block to ignore if block delete switch is on"""
pass
def variable(self, id):
"""Insert a variable"""
pass
def variable_set(self, id, value):
"""Set a variable"""
pass
def probe_linear_centre_outside(self, x1=None, y1=None, depth=None, x2=None, y2=None ):
pass
def probe_single_point(self, point_along_edge_x=None, point_along_edge_y=None, depth=None, retracted_point_x=None, retracted_point_y=None, destination_point_x=None, destination_point_y=None, intersection_variable_x=None, intersection_variable_y=None, probe_offset_x_component=None, probe_offset_y_component=None ):
pass
def probe_downward_point(self, x=None, y=None, depth=None, intersection_variable_z=None):
pass
def report_probe_results(self, x1=None, y1=None, z1=None, x2=None, y2=None, z2=None, x3=None, y3=None, z3=None, x4=None, y4=None, z4=None, x5=None, y5=None, z5=None, x6=None, y6=None, z6=None, xml_file_name=None ):
pass
def open_log_file(self, xml_file_name=None ):
pass
def log_coordinate(self, x=None, y=None, z=None):
pass
def log_message(self, message=None):
pass
def close_log_file(self):
pass
def rapid_to_midpoint(self, x1=None, y1=None, z1=None, x2=None, y2=None, z2=None):
pass
def rapid_to_intersection(self, x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub):
pass
def rapid_to_rotated_coordinate(self, x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final):
pass
def set_path_control_mode(self, mode, motion_blending_tolerance, naive_cam_tolerance ):
pass
############################################################################
## NC code creator for additive machines like RepRap
def wipe(self):
"""wipe routine"""
pass
def extruder_on(self):
"""Turn on the extruder"""
pass
def extruder_off(self):
"""turn off the extruder"""
pass
def set_extruder_flowrate(self, flowrate):
"""Set the flowrate for the extruder"""
pass
def extruder_temp(self, temp):
"""Set the extruder temp in celsius"""
pass
def fan_on(self):
"""turn on the cooling fan"""
pass
def fan_off(self):
"""turn off the cooling fan"""
pass
def build_bed_temp(self, temp):
"""Set the bed temp in celsius"""
pass
def chamber_temp(self, temp):
"""Set the chamber temp in celsius"""
pass
def begin_ncblock(self):
# if the moves have come from backplotting nc code, then the nc code text can be given with these three functions
pass
def end_ncblock(self):
pass
def add_text(self, s, col, cdata):
pass
################################################################################
creator = Creator()
############################################################################
## Internals
def write(s):
creator.write(s)
def output(filename):
creator.file_open(filename)
############################################################################
## Programs
def program_begin(id, name=''):
creator.program_begin(id, name)<|fim▁hole|> creator.add_stock(type_name, params)
def program_stop(optional=False):
creator.program_stop(optional)
def program_end():
creator.program_end()
def flush_nc():
creator.flush_nc()
############################################################################
## Subprograms
def sub_begin(id, name=''):
creator.sub_begin(id, name)
def sub_call(id):
creator.sub_call(id)
def sub_end():
creator.sub_end()
############################################################################
## Settings
def imperial():
creator.imperial()
def metric():
creator.metric()
def absolute():
creator.absolute()
def incremental():
creator.incremental()
def polar(on=True):
creator.polar(on)
def set_plane(plane):
creator.set_plane(plane)
def set_temporary_origin(x=None, y=None, z=None, a=None, b=None, c=None):
creator.set_temporary_origin(x,y,z,a,b,c)
def remove_temporary_origin():
creator.remove_temporary_origin()
############################################################################
## Tools
def tool_change(id):
creator.tool_change(id)
def tool_defn(id, name='', params=None):
creator.tool_defn(id, name, params)
def offset_radius(id, radius=None):
creator.offset_radius(id, radius)
def offset_length(id, length=None):
creator.offset_length(id, length)
def current_tool(self):
return creator.current_tool()
############################################################################
## Datums
def datum_shift(x=None, y=None, z=None, a=None, b=None, c=None):
creator.datum_shift(x, y, z, a, b, c)
def datum_set(x=None, y=None, z=None, a=None, b=None, c=None):
creator.datum_set(x, y, z, a, b, c)
def workplane(id):
creator.workplane(id)
def clearanceplane(z=None):
creator.clearanceplane(z)
############################################################################
## APT360 like Transformation Definitions
## These definitions were created while looking at Irvin Kraal's book on APT
## - Numerical Control Progamming in APT - page 211
def matrix(a1=None,b1=None,c1=None,a2=None,b2=None,c2=None,a3=None,b3=None,c3=None):
creator.matrix(a1,b1,c1,a2,b2,c2,a3,b3,c3)
def translate(x=None,y=None,z=None):
creator.translate(x,y,z)
def rotate(xyrot=None,yzrot=None,zxrot=None,angle=None):
creator.rotate(xyrot,yzrot,zxrot,angle)
def scale(k=None):
creator.scale(k)
def matrix_product(matrix1=None,matrix2=None):
creator.matrix_product(matrix1,matrix2)
def mirror_plane(plane1=None,plane2=None,plane3=None):
creator.mirror_plane(plane1,plane2,plane3)
def mirror_line(line=None):
creator.mirror_line(line)
############################################################################
## Rates + Modes
def feedrate(f):
creator.feedrate(f)
def feedrate_hv(fh, fv):
creator.feedrate_hv(fh, fv)
def spindle(s, clockwise=True):
creator.spindle(s, clockwise)
def coolant(mode=0):
creator.coolant(mode)
def gearrange(gear=0):
creator.gearrange(gear)
############################################################################
## Moves
def rapid(x=None, y=None, z=None, a=None, b=None, c=None):
creator.rapid(x, y, z, a, b, c)
def feed(x=None, y=None, z=None, a = None, b = None, c = None):
creator.feed(x, y, z)
def arc_cw(x=None, y=None, z=None, i=None, j=None, k=None, r=None):
creator.arc_cw(x, y, z, i, j, k, r)
def arc_ccw(x=None, y=None, z=None, i=None, j=None, k=None, r=None):
creator.arc_ccw(x, y, z, i, j, k, r)
def dwell(t):
creator.dwell(t)
def rapid_home(x=None, y=None, z=None, a=None, b=None, c=None):
creator.rapid_home(x, y, z, a, b, c)
def rapid_unhome():
creator.rapid_unhome()
def set_machine_coordinates():
creator.set_machine_coordinates()
############################################################################
## Cutter radius compensation
def use_CRC():
return creator.use_CRC()
def CRC_nominal_path():
return creator.CRC_nominal_path()
def start_CRC(left = True, radius = 0.0):
creator.start_CRC(left, radius)
def end_CRC():
creator.end_CRC()
############################################################################
## Cycles
def pattern():
creator.pattern()
def pocket():
creator.pocket()
def profile():
creator.profile()
def drill(x=None, y=None, dwell=None, depthparams = None, retract_mode=None, spindle_mode=None, internal_coolant_on=None, rapid_to_clearance=None):
creator.drill(x, y, dwell, depthparams, retract_mode, spindle_mode, internal_coolant_on, rapid_to_clearance)
def tap(x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None):
creator.tap(x, y, z, zretract, depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction)
def bore(x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, feed_in=None, feed_out=None, stoppos=None, shift_back=None, shift_right=None, backbore=False, stop=False):
creator.bore(x, y, z, zretract, depth, standoff, dwell_Bottom, feed_in, feed_out, stoppos, shift_back, shift_right, backbore, stop)
def end_canned_cycle():
creator.end_canned_cycle()
def peck(count, first, last=None, step=0.0):
pecks = []
peck = first
if (last == None) : last = first
for i in range(0,count):
pecks.append(peck)
if (peck - step > last) : peck -= step
return pecks
############################################################################
## Misc
def comment(text):
creator.comment(text)
def insert(text):
creator.insert(text)
def block_delete(on=False):
creator.block_delete(on)
def variable(id):
creator.variable(id)
def variable_set(id, value):
creator.variable_set(id, value)
def probe_single_point(point_along_edge_x=None, point_along_edge_y=None, depth=None, retracted_point_x=None, retracted_point_y=None, destination_point_x=None, destination_point_y=None, intersection_variable_x=None, intersection_variable_y=None, probe_offset_x_component=None, probe_offset_y_component=None ):
creator.probe_single_point(point_along_edge_x, point_along_edge_y, depth, retracted_point_x, retracted_point_y, destination_point_x, destination_point_y, intersection_variable_x, intersection_variable_y, probe_offset_x_component, probe_offset_y_component )
def probe_downward_point(x=None, y=None, depth=None, intersection_variable_z=None):
creator.probe_downward_point(x, y, depth, intersection_variable_z)
def report_probe_results(x1=None, y1=None, z1=None, x2=None, y2=None, z2=None, x3=None, y3=None, z3=None, x4=None, y4=None, z4=None, x5=None, y5=None, z5=None, x6=None, y6=None, z6=None, xml_file_name=None ):
creator.report_probe_results(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4, x5, y5, z5, x6, y6, z6, xml_file_name)
def open_log_file(xml_file_name=None ):
creator.open_log_file(xml_file_name)
def log_coordinate(x=None, y=None, z=None):
creator.log_coordinate(x, y, z)
def log_message(message=None):
creator.log_message(message)
def close_log_file():
creator.close_log_file()
def rapid_to_midpoint(x1=None, y1=None, z1=None, x2=None, y2=None, z2=None):
creator.rapid_to_midpoint(x1, y1, z1, x2, y2, z2)
def rapid_to_intersection(x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub):
creator.rapid_to_intersection(x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub)
def rapid_to_rotated_coordinate(x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final):
creator.rapid_to_rotated_coordinate(x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final)
def set_path_control_mode(mode, motion_blending_tolerance, naive_cam_tolerance ):
creator.set_path_control_mode(mode, motion_blending_tolerance, naive_cam_tolerance )
############################################################################
## NC code creator for additive machines like RepRap
def wipe():
creator.wipe()
def extruder_on():
creator.extruder_on()
def extruder_off():
creator.extruder_off()
def set_extruder_flowrate(flowrate):
creator.set_extruder_flowrate(flowrate)
def extruder_temp(temp=None):
creator.extruder_temp(temp)
def fan_on():
creator.fan_on()
def fan_off():
creator.fan_off()
def build_bed_temp(temp=None):
creator.build_bed_temp(temp)
def chamber_temp(temp=None):
creator.chamber_temp(temp)<|fim▁end|> |
def add_stock(type_name, params): |
<|file_name|>views__.py<|end_file_name|><|fim▁begin|># coding: utf-8
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.db.models import Q
from django.contrib.auth.models import User
from django.forms import ModelForm
from .models import Image, Album, Tag
def main(request):
"""Main listing."""
context = RequestContext(request)
albums = Album.objects.all()
if not request.user.is_authenticated():
albums = albums.filter(public=True)
paginator = Paginator(albums, 4)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
albums = paginator.page(page)
except (InvalidPage, EmptyPage):
albums = paginator.page(paginator.num_pages)
for album in albums.object_list:
album.images = album.image_set.all()[:4]
#album.images = album.image_set.all()
context_dict = {'albums':albums}
return render_to_response("photo/list.html", context_dict, context)
def album(request, pk, view="thumbnails"):
"""Album listing."""
# Code without Slideshow
"""album = Album.objects.get(pk=pk)
if not album.public and not request.user.is_authenticated():
return HttpResponse("Error: you need to be logged in to view this album.")
images = album.image_set.all()
paginator = Paginator(images, 30)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = paginator.page(paginator.num_pages)"""
#Write another code for Slideshow realization
num_images = 30
if view == "full": num_images = 10
album = Album.objects.get(pk=pk)
images = album.image_set.all()
paginator = Paginator(images, num_images)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = paginator.page(paginator.num_pages)
<|fim▁hole|> # add list of tags as string and list of album objects to each image object
for img in images.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context = RequestContext(request)
context_dict = dict(album=album, images=images, view=view, albums=Album.objects.all())
#context_dict.update(csrf(request))
return render_to_response("photo/album.html", context_dict, context )
def image(request, pk):
"""Image page."""
img = Image.objects.get(pk=pk)
context = RequestContext(request)
context_dict = dict(image=img, backurl=request.META["HTTP_REFERER"])
return render_to_response("photo/image.html", context_dict, context)
def update(request):
"""Update image title, rating, tags, albums."""
p = request.POST
images = defaultdict(dict)
# create dictionary of properties for each image
for k, v in p.items():
if k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
return HttpResponseRedirect(request.META["HTTP_REFERER"])
#@login_required
def search(request):
"""Search, filter, sort images."""
context = RequestContext(request)
context_dict = dict( albums=Album.objects.all(), authors=User.objects.all())
# Если это первый заход по ссылке Search , то просто отображаем страницу, не производя расчетов
if request.method == 'GET' and not request.GET.get("page"):
return render_to_response("photo/search.html", context_dict, context)
# Тут уже работает метод POST or GET(?page)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
p = request.POST
images = defaultdict(dict)
# init parameters
parameters = {}
keys = ['title', 'filename', 'rating_from', 'rating_to', 'width_from',
'width_to', 'height_from', 'height_to', 'tags', 'view', 'user', 'sort', 'asc_desc']
for k in keys:
parameters[k] = ''
parameters["album"] = []
# create dictionary of properties for each image and a dict of search/filter parameters
for k, v in p.items():
if k == "album":
parameters[k] = [int(x) for x in p.getlist(k)]
elif k in parameters:
parameters[k] = v
elif k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# save or restore parameters from session
if page != 1 and "parameters" in request.session:
parameters = request.session["parameters"]
else:
request.session["parameters"] = parameters
results = update_and_filter(images, parameters)
# make paginator
paginator = Paginator(results, 20)
try:
results = paginator.page(page)
except (InvalidPage, EmptyPage):
results = paginator.page(paginator.num_pages)
# add list of tags as string and list of album names to each image object
for img in results.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context_dict['results'] = results
context_dict['prm'] = parameters
return render_to_response("photo/search.html", context_dict, context)
def update_and_filter(images, p):
"""Update image data if changed, filter results through parameters and return results list."""
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
# filter results by parameters
results = Image.objects.all()
if p["title"] : results = results.filter(title__icontains=p["title"])
if p["filename"] : results = results.filter(image__icontains=p["filename"])
if p["rating_from"] : results = results.filter(rating__gte=int(p["rating_from"]))
if p["rating_to"] : results = results.filter(rating__lte=int(p["rating_to"]))
if p["width_from"] : results = results.filter(width__gte=int(p["width_from"]))
if p["width_to"] : results = results.filter(width__lte=int(p["width_to"]))
if p["height_from"] : results = results.filter(height__gte=int(p["height_from"]))
if p["height_to"] : results = results.filter(height__lte=int(p["height_to"]))
if p["tags"]:
tags = p["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
results = results.filter(tags=Tag.objects.get(tag=t))
if p["album"]:
lst = p["album"]
or_query = Q(albums=lst[0])
for album in lst[1:]:
or_query = or_query | Q(albums=album)
results = results.filter(or_query).distinct()
return results<|fim▁end|> | |
<|file_name|>PartialSortedKeyStatisticsAttributeIndexTest.java<|end_file_name|><|fim▁begin|>/**
* Copyright 2012-2015 Niall Gallagher
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.googlecode.cqengine.index.support;
import com.googlecode.cqengine.query.QueryFactory;
import com.googlecode.cqengine.testutil.Car;
import org.junit.Test;
import static com.googlecode.cqengine.query.QueryFactory.noQueryOptions;
import static org.mockito.Mockito.*;<|fim▁hole|>
/**
* Tests for {@link PartialSortedKeyStatisticsAttributeIndex}.
*
* @author niall.gallagher
*/
public class PartialSortedKeyStatisticsAttributeIndexTest {
@Test
public void testGetDistinctKeys1() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getDistinctKeys(noQueryOptions());
verify(backingIndex, times(1)).getDistinctKeys(noQueryOptions());
}
@Test
public void testGetDistinctKeys2() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getDistinctKeys(1, true, 2, true, noQueryOptions());
verify(backingIndex, times(1)).getDistinctKeys(1, true, 2, true, noQueryOptions());
}
@Test
public void testGetDistinctKeysDescending1() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getDistinctKeysDescending(noQueryOptions());
verify(backingIndex, times(1)).getDistinctKeysDescending(noQueryOptions());
}
@Test
public void testGetDistinctKeysDescending2() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getDistinctKeysDescending(1, true, 2, true, noQueryOptions());
verify(backingIndex, times(1)).getDistinctKeysDescending(1, true, 2, true, noQueryOptions());
}
@Test
public void testGetStatisticsForDistinctKeysDescending() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getStatisticsForDistinctKeysDescending(noQueryOptions());
verify(backingIndex, times(1)).getStatisticsForDistinctKeysDescending(noQueryOptions());
}
@Test
public void testGetKeysAndValues1() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getKeysAndValues(noQueryOptions());
verify(backingIndex, times(1)).getKeysAndValues(noQueryOptions());
}
@Test
public void testGetKeysAndValues2() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getKeysAndValues(1, true, 2, true, noQueryOptions());
verify(backingIndex, times(1)).getKeysAndValues(1, true, 2, true, noQueryOptions());
}
@Test
public void testGetKeysAndValuesDescending() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getKeysAndValuesDescending(noQueryOptions());
verify(backingIndex, times(1)).getKeysAndValuesDescending(noQueryOptions());
}
@Test
public void testGetKeysAndValuesDescending1() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getKeysAndValuesDescending(1, true, 2, true, noQueryOptions());
verify(backingIndex, times(1)).getKeysAndValuesDescending(1, true, 2, true, noQueryOptions());
}
@Test
public void testGetCountForKey() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getCountForKey(1, noQueryOptions());
verify(backingIndex, times(1)).getCountForKey(1, noQueryOptions());
}
@Test
public void testGetCountOfDistinctKeys() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getCountOfDistinctKeys(noQueryOptions());
verify(backingIndex, times(1)).getCountOfDistinctKeys(noQueryOptions());
}
@Test
public void testGetStatisticsForDistinctKeys() {
SortedKeyStatisticsAttributeIndex<Integer, Car> backingIndex = mockBackingIndex();
PartialSortedKeyStatisticsAttributeIndex<Integer, Car> index = wrapWithPartialIndex(backingIndex);
index.getStatisticsForDistinctKeys(noQueryOptions());
verify(backingIndex, times(1)).getStatisticsForDistinctKeys(noQueryOptions());
}
static PartialSortedKeyStatisticsAttributeIndex<Integer, Car> wrapWithPartialIndex(final SortedKeyStatisticsAttributeIndex<Integer, Car> mockedBackingIndex) {
return new PartialSortedKeyStatisticsAttributeIndex<Integer, Car>(Car.CAR_ID, QueryFactory.between(Car.CAR_ID, 2, 5)) {
@Override
protected SortedKeyStatisticsAttributeIndex<Integer, Car> createBackingIndex() {
return mockedBackingIndex;
}
};
}
@SuppressWarnings("unchecked")
static SortedKeyStatisticsAttributeIndex<Integer, Car> mockBackingIndex() {
return mock(SortedKeyStatisticsAttributeIndex.class);
}
}<|fim▁end|> | |
<|file_name|>access_control_firerole.py<|end_file_name|><|fim▁begin|>## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Access Control FireRole."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
"""These functions are for realizing a firewall like role definition for extending
webaccess to connect user to roles using every infos about users.
"""
import re
import cPickle
from zlib import compress, decompress
import sys
import time
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.access_control_config import InvenioWebAccessFireroleError
from invenio.dbquery import run_sql, blob_to_string
from invenio.config import CFG_CERN_SITE
from invenio.access_control_config import CFG_ACC_EMPTY_ROLE_DEFINITION_SRC, \<|fim▁hole|>
def compile_role_definition(firerole_def_src):
""" Given a text in which every row contains a rule it returns the compiled
object definition.
Rules have the following syntax:
allow|deny [not] field {list of one or more (double)quoted string or regexp}
or allow|deny any
Every row may contain a # sign followed by a comment which are discarded.
Field could be any key contained in a user_info dictionary. If the key does
not exist in the dictionary, the rule is skipped.
The first rule which matches return.
"""
line = 0
ret = []
default_allow_p = False
if not firerole_def_src or not firerole_def_src.strip():
firerole_def_src = CFG_ACC_EMPTY_ROLE_DEFINITION_SRC
for row in firerole_def_src.split('\n'):
line += 1
row = row.strip()
if not row:
continue
clean_row = _no_comment_re.sub('', row)
if clean_row:
g = _any_rule_re.match(clean_row)
if g:
default_allow_p = g.group('command').lower() == 'allow'
break
g = _rule_re.match(clean_row)
if g:
allow_p = g.group('command').lower() == 'allow'
not_p = g.group('not') != None
field = g.group('field').lower()
# Renaming groups to group
for alias_item in _aliasTable:
if field in alias_item:
field = alias_item[0]
break
if field.startswith('precached_'):
raise InvenioWebAccessFireroleError("Error while compiling rule %s (line %s): %s is a reserved key and can not be used in FireRole rules!" % (row, line, field))
expressions = g.group('expression')+g.group('more_expressions')
expressions_list = []
for expr in _expressions_re.finditer(expressions):
expr = expr.group()
if field in ('from', 'until'):
try:
expressions_list.append((False, time.mktime(time.strptime(expr[1:-1], '%Y-%m-%d'))))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid date with format YYYY-MM-DD because %s!" % (row, line, expr, msg))
elif expr[0] == '/':
try:
expressions_list.append((True, re.compile(expr[1:-1], re.I)))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid re because %s!" % (row, line, expr, msg))
else:
if field == 'remote_ip' and '/' in expr[1:-1]:
try:
expressions_list.append((False, _ip_matcher_builder(expr[1:-1])))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid ip group because %s!" % (row, line, expr, msg))
else:
expressions_list.append((False, expr[1:-1]))
expressions_list = tuple(expressions_list)
if field in ('from', 'until'):
if len(expressions_list) != 1:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): exactly one date is expected when using 'from' or 'until', but %s were found" % (row, line, len(expressions_list)))
if not_p:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): 'not' is not allowed when using 'from' or 'until'" % (row, line))
ret.append((allow_p, not_p, field, expressions_list))
else:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): not a valid rule!" % (row, line))
return (default_allow_p, tuple(ret))
def repair_role_definitions():
""" Try to rebuild compiled serialized definitions from their respectives
sources. This is needed in case Python break back compatibility.
"""
definitions = run_sql("SELECT id, firerole_def_src FROM accROLE")
for role_id, firerole_def_src in definitions:
run_sql("UPDATE accROLE SET firerole_def_ser=%s WHERE id=%s", (serialize(compile_role_definition(firerole_def_src)), role_id))
def store_role_definition(role_id, firerole_def_ser, firerole_def_src):
""" Store a compiled serialized definition and its source in the database
alongside the role to which it belong.
@param role_id: the role_id
@param firerole_def_ser: the serialized compiled definition
@param firerole_def_src: the sources from which the definition was taken
"""
run_sql("UPDATE accROLE SET firerole_def_ser=%s, firerole_def_src=%s WHERE id=%s", (firerole_def_ser, firerole_def_src, role_id))
def load_role_definition(role_id):
""" Load the definition corresponding to a role. If the compiled definition
is corrupted it try to repairs definitions from their sources and try again
to return the definition.
@param role_id:
@return: a deserialized compiled role definition
"""
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
try:
return deserialize(res[0][0])
except Exception:
## Something bad might have happened? (Update of Python?)
repair_role_definitions()
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
return deserialize(res[0][0])
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
def acc_firerole_extract_emails(firerole_def_obj):
"""
Best effort function to extract all the possible email addresses
authorized by the given firerole.
"""
authorized_emails = set()
try:
default_allow_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
if not_p:
continue
if field == 'group':
for reg_p, expr in expressions_list:
if reg_p:
continue
if CFG_CERN_SITE and expr.endswith(' [CERN]'):
authorized_emails.add(expr[:len(' [CERN]')].lower().strip() + '@cern.ch')
emails = run_sql("SELECT user.email FROM usergroup JOIN user_usergroup ON usergroup.id=user_usergroup.id_usergroup JOIN user ON user.id=user_usergroup.id_user WHERE usergroup.name=%s", (expr, ))
for email in emails:
authorized_emails.add(email[0].lower().strip())
elif field == 'email':
for reg_p, expr in expressions_list:
if reg_p:
continue
authorized_emails.add(expr.lower().strip())
elif field == 'uid':
for reg_p, expr in expressions_list:
if reg_p:
continue
email = run_sql("SELECT email FROM user WHERE id=%s", (expr, ))
if email:
authorized_emails.add(email[0][0].lower().strip())
return authorized_emails
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
def acc_firerole_check_user(user_info, firerole_def_obj):
""" Given a user_info dictionary, it matches the rules inside the deserializez
compiled definition in order to discover if the current user match the roles
corresponding to this definition.
@param user_info: a dict produced by collect_user_info which contains every
info about a user
@param firerole_def_obj: a compiled deserialized definition produced by
compile_role_defintion
@return: True if the user match the definition, False otherwise.
"""
try:
default_allow_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
group_p = field == 'group' # Is it related to group?
ip_p = field == 'remote_ip' # Is it related to Ips?
until_p = field == 'until' # Is it related to dates?
from_p = field == 'from' # Idem.
next_expr_p = False # Silly flag to break 2 for cycles
if not user_info.has_key(field) and not from_p and not until_p:
continue
for reg_p, expr in expressions_list: # For every element in the rule
if group_p: # Special case: groups
if reg_p: # When it is a regexp
for group in user_info[field]: # iterate over every group
if expr.match(group): # if it matches
if not_p: # if must not match
next_expr_p = True # let's skip to next expr
break
else: # Ok!
return allow_p
if next_expr_p:
break # I said: let's skip to next rule ;-)
elif expr.lower() in [group.lower() for group in user_info[field]]: # Simple expression then just check for expr in groups
if not_p: # If expr is in groups then if must not match
break # let's skip to next expr
else: # Ok!
return allow_p
elif reg_p: # Not a group, then easier. If it's a regexp
if expr.match(user_info[field]): # if it matches
if not_p: # If must not match
break # Let's skip to next expr
else:
return allow_p # Ok!
elif ip_p and type(expr) == type(()): # If it's just a simple expression but an IP!
if _ipmatch(user_info['remote_ip'], expr): # Then if Ip matches
if not_p: # If must not match
break # let's skip to next expr
else:
return allow_p # ok!
elif until_p:
if time.time() <= expr:
if allow_p:
break
else:
return False
elif allow_p:
return False
else:
break
elif from_p:
if time.time() >= expr:
if allow_p:
break
else:
return False
elif allow_p:
return False
else:
break
elif expr.lower() == user_info[field].lower(): # Finally the easiest one!!
if not_p: # ...
break
else: # ...
return allow_p # ...
if not_p and not next_expr_p: # Nothing has matched and we got not
return allow_p # Then the whole rule matched!
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
return default_allow_p # By default we allow ;-) it'an OpenAccess project
def serialize(firerole_def_obj):
""" Serialize and compress a definition."""
if firerole_def_obj == CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
elif firerole_def_obj:
return compress(cPickle.dumps(firerole_def_obj, -1))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
def deserialize(firerole_def_ser):
""" Deserialize and decompress a definition."""
if firerole_def_ser:
return cPickle.loads(decompress(blob_to_string(firerole_def_ser)))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# IMPLEMENTATION
# Comment finder
_no_comment_re = re.compile(r'[\s]*(?<!\\)#.*')
# Rule dissecter
_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(?:(?P<not>not)[\s]+)?(?P<field>[\w]+)[\s]+(?P<expression>(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/)(?P<more_expressions>([\s]*,[\s]*((?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/))*)(?:[\s]*(?<!\\).*)?', re.I)
_any_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(any|all)[\s]*', re.I)
# Sub expression finder
_expressions_re = re.compile(r'(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/')
def _mkip (ip):
""" Compute a numerical value for a dotted IP """
num = 0L
for i in map (int, ip.split ('.')):
num = (num << 8) + i
return num
_full = 2L ** 32 - 1
_aliasTable = (('group', 'groups'), )
def _ip_matcher_builder(group):
""" Compile a string "ip/bitmask" (i.e. 127.0.0.0/24)
@param group: a classical "ip/bitmask" string
@return: a tuple containing the gip and mask in a binary version.
"""
gip, gmk = group.split('/')
gip = _mkip(gip)
gmk = int(gmk)
mask = (_full - (2L ** (32 - gmk) - 1))
if not (gip & mask == gip):
raise InvenioWebAccessFireroleError, "Netmask does not match IP (%Lx %Lx)" % (gip, mask)
return (gip, mask)
def _ipmatch(ip, ip_matcher):
""" Check if an ip matches an ip_group.
@param ip: the ip to check
@param ip_matcher: a compiled ip_group produced by ip_matcher_builder
@return: True if ip matches, False otherwise
"""
return _mkip(ip) & ip_matcher[1] == ip_matcher[0]<|fim▁end|> | CFG_ACC_EMPTY_ROLE_DEFINITION_SER, CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# INTERFACE |
<|file_name|>all_test.go<|end_file_name|><|fim▁begin|>// Copyright 2014 The b Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package b
import (
"bytes"
"fmt"
"io"
"math"
"path"
"runtime"
"runtime/debug"
"strings"
"testing"
"github.com/flynn/flynn/Godeps/_workspace/src/github.com/cznic/mathutil"
"github.com/flynn/flynn/Godeps/_workspace/src/github.com/cznic/strutil"
)
var caller = func(s string, va ...interface{}) {
_, fn, fl, _ := runtime.Caller(2)
fmt.Printf("%s:%d: ", path.Base(fn), fl)
fmt.Printf(s, va...)
fmt.Println()
}
func dbg(s string, va ...interface{}) {
if s == "" {
s = strings.Repeat("%v ", len(va))
}
_, fn, fl, _ := runtime.Caller(1)
fmt.Printf("%s:%d: ", path.Base(fn), fl)
fmt.Printf(s, va...)
fmt.Println()
}
func TODO(...interface{}) string {
_, fn, fl, _ := runtime.Caller(1)
return fmt.Sprintf("TODO: %s:%d:\n", path.Base(fn), fl)
}
func use(...interface{}) {}
// ============================================================================
func isNil(p interface{}) bool {
switch x := p.(type) {
case *x:
if x == nil {
return true
}
case *d:
if x == nil {
return true
}
}
return false
}
func (t *Tree) dump() string {
var buf bytes.Buffer
f := strutil.IndentFormatter(&buf, "\t")
num := map[interface{}]int{}
visited := map[interface{}]bool{}
handle := func(p interface{}) int {
if isNil(p) {
return 0
}
if n, ok := num[p]; ok {
return n
}
n := len(num) + 1
num[p] = n
return n
}
var pagedump func(interface{}, string)
pagedump = func(p interface{}, pref string) {
if isNil(p) || visited[p] {
return
}
visited[p] = true
switch x := p.(type) {
case *x:
h := handle(p)
n := 0
for i, v := range x.x {
if v.ch != nil || v.k != nil {
n = i + 1
}
}
f.Format("%sX#%d(%p) n %d:%d {", pref, h, x, x.c, n)
a := []interface{}{}
for i, v := range x.x[:n] {
a = append(a, v.ch)
if i != 0 {
f.Format(" ")
}
f.Format("(C#%d K %v)", handle(v.ch), v.k)
}
f.Format("}\n")
for _, p := range a {
pagedump(p, pref+". ")
}
case *d:
h := handle(p)
n := 0
for i, v := range x.d {
if v.k != nil || v.v != nil {
n = i + 1
}
}
f.Format("%sD#%d(%p) P#%d N#%d n %d:%d {", pref, h, x, handle(x.p), handle(x.n), x.c, n)
for i, v := range x.d[:n] {
if i != 0 {
f.Format(" ")
}
f.Format("%v:%v", v.k, v.v)
}
f.Format("}\n")
}
}
pagedump(t.r, "")
s := buf.String()
if s != "" {
s = s[:len(s)-1]
}
return s
}
func rng() *mathutil.FC32 {
x, err := mathutil.NewFC32(math.MinInt32/4, math.MaxInt32/4, false)
if err != nil {
panic(err)
}
return x
}
func cmp(a, b interface{}) int {
return a.(int) - b.(int)
}
func TestGet0(t *testing.T) {
r := TreeNew(cmp)
if g, e := r.Len(), 0; g != e {
t.Fatal(g, e)
}
_, ok := r.Get(42)
if ok {
t.Fatal(ok)
}
}
func TestSetGet0(t *testing.T) {
r := TreeNew(cmp)
set := r.Set
set(42, 314)
if g, e := r.Len(), 1; g != e {
t.Fatal(g, e)
}
v, ok := r.Get(42)
if !ok {
t.Fatal(ok)
}
if g, e := v.(int), 314; g != e {
t.Fatal(g, e)
}
set(42, 278)
if g, e := r.Len(), 1; g != e {
t.Fatal(g, e)
}
v, ok = r.Get(42)
if !ok {
t.Fatal(ok)
}
if g, e := v.(int), 278; g != e {
t.Fatal(g, e)
}
set(420, 0.5)
if g, e := r.Len(), 2; g != e {
t.Fatal(g, e)
}
v, ok = r.Get(42)
if !ok {
t.Fatal(ok)
}
if g, e := v.(int), 278; g != e {
t.Fatal(g, e)
}
v, ok = r.Get(420)
if !ok {
t.Fatal(ok)
}
if g, e := v.(float64), 0.5; g != e {
t.Fatal(g, e)
}
}<|fim▁hole|>
func TestSetGet1(t *testing.T) {
const N = 40000
for _, x := range []int{0, -1, 0x555555, 0xaaaaaa, 0x333333, 0xcccccc, 0x314159} {
r := TreeNew(cmp)
set := r.Set
a := make([]int, N)
for i := range a {
a[i] = (i ^ x) << 1
}
for i, k := range a {
set(k, k^x)
if g, e := r.Len(), i+1; g != e {
t.Fatal(i, g, e)
}
}
for i, k := range a {
v, ok := r.Get(k)
if !ok {
t.Fatal(i, k, v, ok)
}
if g, e := v.(int), k^x; g != e {
t.Fatal(i, g, e)
}
k |= 1
_, ok = r.Get(k)
if ok {
t.Fatal(i, k)
}
}
for _, k := range a {
r.Set(k, (k^x)+42)
}
for i, k := range a {
v, ok := r.Get(k)
if !ok {
t.Fatal(i, k, v, ok)
}
if g, e := v.(int), k^x+42; g != e {
t.Fatal(i, g, e)
}
k |= 1
_, ok = r.Get(k)
if ok {
t.Fatal(i, k)
}
}
}
}
func TestPrealloc(*testing.T) {
const n = 2e6
rng := rng()
a := make([]int, n)
for i := range a {
a[i] = rng.Next()
}
r := TreeNew(cmp)
for _, v := range a {
r.Set(v, 0)
}
r.Close()
}
func BenchmarkSetSeq1e3(b *testing.B) {
benchmarkSetSeq(b, 1e3)
}
func BenchmarkSetSeq1e4(b *testing.B) {
benchmarkSetSeq(b, 1e4)
}
func BenchmarkSetSeq1e5(b *testing.B) {
benchmarkSetSeq(b, 1e5)
}
func BenchmarkSetSeq1e6(b *testing.B) {
benchmarkSetSeq(b, 1e6)
}
func benchmarkSetSeq(b *testing.B, n int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
r := TreeNew(cmp)
debug.FreeOSMemory()
b.StartTimer()
for j := 0; j < n; j++ {
r.Set(j, j)
}
b.StopTimer()
r.Close()
}
b.StopTimer()
}
func BenchmarkGetSeq1e3(b *testing.B) {
benchmarkGetSeq(b, 1e3)
}
func BenchmarkGetSeq1e4(b *testing.B) {
benchmarkGetSeq(b, 1e4)
}
func BenchmarkGetSeq1e5(b *testing.B) {
benchmarkGetSeq(b, 1e5)
}
func BenchmarkGetSeq1e6(b *testing.B) {
benchmarkGetSeq(b, 1e6)
}
func benchmarkGetSeq(b *testing.B, n int) {
r := TreeNew(cmp)
for i := 0; i < n; i++ {
r.Set(i, i)
}
debug.FreeOSMemory()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < n; j++ {
r.Get(j)
}
}
b.StopTimer()
r.Close()
}
func BenchmarkSetRnd1e3(b *testing.B) {
benchmarkSetRnd(b, 1e3)
}
func BenchmarkSetRnd1e4(b *testing.B) {
benchmarkSetRnd(b, 1e4)
}
func BenchmarkSetRnd1e5(b *testing.B) {
benchmarkSetRnd(b, 1e5)
}
func BenchmarkSetRnd1e6(b *testing.B) {
benchmarkSetRnd(b, 1e6)
}
func benchmarkSetRnd(b *testing.B, n int) {
rng := rng()
a := make([]int, n)
for i := range a {
a[i] = rng.Next()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
r := TreeNew(cmp)
debug.FreeOSMemory()
b.StartTimer()
for _, v := range a {
r.Set(v, 0)
}
b.StopTimer()
r.Close()
}
b.StopTimer()
}
func BenchmarkGetRnd1e3(b *testing.B) {
benchmarkGetRnd(b, 1e3)
}
func BenchmarkGetRnd1e4(b *testing.B) {
benchmarkGetRnd(b, 1e4)
}
func BenchmarkGetRnd1e5(b *testing.B) {
benchmarkGetRnd(b, 1e5)
}
func BenchmarkGetRnd1e6(b *testing.B) {
benchmarkGetRnd(b, 1e6)
}
func benchmarkGetRnd(b *testing.B, n int) {
r := TreeNew(cmp)
rng := rng()
a := make([]int, n)
for i := range a {
a[i] = rng.Next()
}
for _, v := range a {
r.Set(v, 0)
}
debug.FreeOSMemory()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, v := range a {
r.Get(v)
}
}
b.StopTimer()
r.Close()
}
func TestSetGet2(t *testing.T) {
const N = 40000
for _, x := range []int{0, -1, 0x555555, 0xaaaaaa, 0x333333, 0xcccccc, 0x314159} {
rng := rng()
r := TreeNew(cmp)
set := r.Set
a := make([]int, N)
for i := range a {
a[i] = (rng.Next() ^ x) << 1
}
for i, k := range a {
set(k, k^x)
if g, e := r.Len(), i+1; g != e {
t.Fatal(i, x, g, e)
}
}
for i, k := range a {
v, ok := r.Get(k)
if !ok {
t.Fatal(i, k, v, ok)
}
if g, e := v.(int), k^x; g != e {
t.Fatal(i, g, e)
}
k |= 1
_, ok = r.Get(k)
if ok {
t.Fatal(i, k)
}
}
for _, k := range a {
r.Set(k, (k^x)+42)
}
for i, k := range a {
v, ok := r.Get(k)
if !ok {
t.Fatal(i, k, v, ok)
}
if g, e := v.(int), k^x+42; g != e {
t.Fatal(i, g, e)
}
k |= 1
_, ok = r.Get(k)
if ok {
t.Fatal(i, k)
}
}
}
}
func TestSetGet3(t *testing.T) {
r := TreeNew(cmp)
set := r.Set
var i int
for i = 0; ; i++ {
set(i, -i)
if _, ok := r.r.(*x); ok {
break
}
}
for j := 0; j <= i; j++ {
set(j, j)
}
for j := 0; j <= i; j++ {
v, ok := r.Get(j)
if !ok {
t.Fatal(j)
}
if g, e := v.(int), j; g != e {
t.Fatal(g, e)
}
}
}
func TestDelete0(t *testing.T) {
r := TreeNew(cmp)
if ok := r.Delete(0); ok {
t.Fatal(ok)
}
if g, e := r.Len(), 0; g != e {
t.Fatal(g, e)
}
r.Set(0, 0)
if ok := r.Delete(1); ok {
t.Fatal(ok)
}
if g, e := r.Len(), 1; g != e {
t.Fatal(g, e)
}
if ok := r.Delete(0); !ok {
t.Fatal(ok)
}
if g, e := r.Len(), 0; g != e {
t.Fatal(g, e)
}
if ok := r.Delete(0); ok {
t.Fatal(ok)
}
r.Set(0, 0)
r.Set(1, 1)
if ok := r.Delete(1); !ok {
t.Fatal(ok)
}
if g, e := r.Len(), 1; g != e {
t.Fatal(g, e)
}
if ok := r.Delete(1); ok {
t.Fatal(ok)
}
if ok := r.Delete(0); !ok {
t.Fatal(ok)
}
if g, e := r.Len(), 0; g != e {
t.Fatal(g, e)
}
if ok := r.Delete(0); ok {
t.Fatal(ok)
}
r.Set(0, 0)
r.Set(1, 1)
if ok := r.Delete(0); !ok {
t.Fatal(ok)
}
if g, e := r.Len(), 1; g != e {
t.Fatal(g, e)
}
if ok := r.Delete(0); ok {
t.Fatal(ok)
}
if ok := r.Delete(1); !ok {
t.Fatal(ok)
}
if g, e := r.Len(), 0; g != e {
t.Fatal(g, e)
}
if ok := r.Delete(1); ok {
t.Fatal(ok)
}
}
func TestDelete1(t *testing.T) {
const N = 130000
for _, x := range []int{0, -1, 0x555555, 0xaaaaaa, 0x333333, 0xcccccc, 0x314159} {
r := TreeNew(cmp)
set := r.Set
a := make([]int, N)
for i := range a {
a[i] = (i ^ x) << 1
}
for _, k := range a {
set(k, 0)
}
for i, k := range a {
ok := r.Delete(k)
if !ok {
t.Fatal(i, x, k)
}
if g, e := r.Len(), N-i-1; g != e {
t.Fatal(i, g, e)
}
}
}
}
func BenchmarkDelSeq1e3(b *testing.B) {
benchmarkDelSeq(b, 1e3)
}
func BenchmarkDelSeq1e4(b *testing.B) {
benchmarkDelSeq(b, 1e4)
}
func BenchmarkDelSeq1e5(b *testing.B) {
benchmarkDelSeq(b, 1e5)
}
func BenchmarkDelSeq1e6(b *testing.B) {
benchmarkDelSeq(b, 1e6)
}
func benchmarkDelSeq(b *testing.B, n int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
r := TreeNew(cmp)
for i := 0; i < n; i++ {
r.Set(i, i)
}
debug.FreeOSMemory()
b.StartTimer()
for j := 0; j < n; j++ {
r.Delete(j)
}
}
b.StopTimer()
}
func BenchmarkDelRnd1e3(b *testing.B) {
benchmarkDelRnd(b, 1e3)
}
func BenchmarkDelRnd1e4(b *testing.B) {
benchmarkDelRnd(b, 1e4)
}
func BenchmarkDelRnd1e5(b *testing.B) {
benchmarkDelRnd(b, 1e5)
}
func BenchmarkDelRnd1e6(b *testing.B) {
benchmarkDelRnd(b, 1e6)
}
func benchmarkDelRnd(b *testing.B, n int) {
rng := rng()
a := make([]int, n)
for i := range a {
a[i] = rng.Next()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
r := TreeNew(cmp)
for _, v := range a {
r.Set(v, 0)
}
debug.FreeOSMemory()
b.StartTimer()
for _, v := range a {
r.Delete(v)
}
b.StopTimer()
r.Close()
}
b.StopTimer()
}
func TestDelete2(t *testing.T) {
const N = 100000
for _, x := range []int{0, -1, 0x555555, 0xaaaaaa, 0x333333, 0xcccccc, 0x314159} {
r := TreeNew(cmp)
set := r.Set
a := make([]int, N)
rng := rng()
for i := range a {
a[i] = (rng.Next() ^ x) << 1
}
for _, k := range a {
set(k, 0)
}
for i, k := range a {
ok := r.Delete(k)
if !ok {
t.Fatal(i, x, k)
}
if g, e := r.Len(), N-i-1; g != e {
t.Fatal(i, g, e)
}
}
}
}
func TestEnumeratorNext(t *testing.T) {
// seeking within 3 keys: 10, 20, 30
table := []struct {
k int
hit bool
keys []int
}{
{5, false, []int{10, 20, 30}},
{10, true, []int{10, 20, 30}},
{15, false, []int{20, 30}},
{20, true, []int{20, 30}},
{25, false, []int{30}},
{30, true, []int{30}},
{35, false, []int{}},
}
for i, test := range table {
up := test.keys
r := TreeNew(cmp)
r.Set(10, 100)
r.Set(20, 200)
r.Set(30, 300)
for verChange := 0; verChange < 16; verChange++ {
en, hit := r.Seek(test.k)
if g, e := hit, test.hit; g != e {
t.Fatal(i, g, e)
}
j := 0
for {
if verChange&(1<<uint(j)) != 0 {
r.Set(20, 200)
}
k, v, err := en.Next()
if err != nil {
if err != io.EOF {
t.Fatal(i, err)
}
break
}
if j >= len(up) {
t.Fatal(i, j, verChange)
}
if g, e := k.(int), up[j]; g != e {
t.Fatal(i, j, verChange, g, e)
}
if g, e := v.(int), 10*up[j]; g != e {
t.Fatal(i, g, e)
}
j++
}
if g, e := j, len(up); g != e {
t.Fatal(i, j, g, e)
}
}
}
}
func TestEnumeratorPrev(t *testing.T) {
// seeking within 3 keys: 10, 20, 30
table := []struct {
k int
hit bool
keys []int
}{
{5, false, []int{10}},
{10, true, []int{10}},
{15, false, []int{20, 10}},
{20, true, []int{20, 10}},
{25, false, []int{30, 20, 10}},
{30, true, []int{30, 20, 10}},
{35, false, []int{}},
}
for i, test := range table {
dn := test.keys
r := TreeNew(cmp)
r.Set(10, 100)
r.Set(20, 200)
r.Set(30, 300)
for verChange := 0; verChange < 16; verChange++ {
en, hit := r.Seek(test.k)
if g, e := hit, test.hit; g != e {
t.Fatal(i, g, e)
}
j := 0
for {
if verChange&(1<<uint(j)) != 0 {
r.Set(20, 200)
}
k, v, err := en.Prev()
if err != nil {
if err != io.EOF {
t.Fatal(i, err)
}
break
}
if j >= len(dn) {
t.Fatal(i, j, verChange)
}
if g, e := k.(int), dn[j]; g != e {
t.Fatal(i, j, verChange, g, e)
}
if g, e := v.(int), 10*dn[j]; g != e {
t.Fatal(i, g, e)
}
j++
}
if g, e := j, len(dn); g != e {
t.Fatal(i, j, g, e)
}
}
}
}
func BenchmarkSeekSeq1e3(b *testing.B) {
benchmarkSeekSeq(b, 1e3)
}
func BenchmarkSeekSeq1e4(b *testing.B) {
benchmarkSeekSeq(b, 1e4)
}
func BenchmarkSeekSeq1e5(b *testing.B) {
benchmarkSeekSeq(b, 1e5)
}
func BenchmarkSeekSeq1e6(b *testing.B) {
benchmarkSeekSeq(b, 1e6)
}
func benchmarkSeekSeq(b *testing.B, n int) {
for i := 0; i < b.N; i++ {
b.StopTimer()
t := TreeNew(cmp)
for j := 0; j < n; j++ {
t.Set(j, 0)
}
debug.FreeOSMemory()
b.StartTimer()
for j := 0; j < n; j++ {
e, _ := t.Seek(j)
e.Close()
}
b.StopTimer()
t.Close()
}
b.StopTimer()
}
func BenchmarkSeekRnd1e3(b *testing.B) {
benchmarkSeekRnd(b, 1e3)
}
func BenchmarkSeekRnd1e4(b *testing.B) {
benchmarkSeekRnd(b, 1e4)
}
func BenchmarkSeekRnd1e5(b *testing.B) {
benchmarkSeekRnd(b, 1e5)
}
func BenchmarkSeekRnd1e6(b *testing.B) {
benchmarkSeekRnd(b, 1e6)
}
func benchmarkSeekRnd(b *testing.B, n int) {
r := TreeNew(cmp)
rng := rng()
a := make([]int, n)
for i := range a {
a[i] = rng.Next()
}
for _, v := range a {
r.Set(v, 0)
}
debug.FreeOSMemory()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, v := range a {
e, _ := r.Seek(v)
e.Close()
}
}
b.StopTimer()
r.Close()
}
func BenchmarkNext1e3(b *testing.B) {
benchmarkNext(b, 1e3)
}
func BenchmarkNext1e4(b *testing.B) {
benchmarkNext(b, 1e4)
}
func BenchmarkNext1e5(b *testing.B) {
benchmarkNext(b, 1e5)
}
func BenchmarkNext1e6(b *testing.B) {
benchmarkNext(b, 1e6)
}
func benchmarkNext(b *testing.B, n int) {
t := TreeNew(cmp)
for i := 0; i < n; i++ {
t.Set(i, 0)
}
debug.FreeOSMemory()
b.ResetTimer()
for i := 0; i < b.N; i++ {
en, err := t.SeekFirst()
if err != nil {
b.Fatal(err)
}
m := 0
for {
if _, _, err = en.Next(); err != nil {
break
}
m++
}
if m != n {
b.Fatal(m)
}
}
b.StopTimer()
t.Close()
}
func BenchmarkPrev1e3(b *testing.B) {
benchmarkPrev(b, 1e3)
}
func BenchmarkPrev1e4(b *testing.B) {
benchmarkPrev(b, 1e4)
}
func BenchmarkPrev1e5(b *testing.B) {
benchmarkPrev(b, 1e5)
}
func BenchmarkPrev1e6(b *testing.B) {
benchmarkPrev(b, 1e6)
}
func benchmarkPrev(b *testing.B, n int) {
t := TreeNew(cmp)
for i := 0; i < n; i++ {
t.Set(i, 0)
}
debug.FreeOSMemory()
b.ResetTimer()
for i := 0; i < b.N; i++ {
en, err := t.SeekLast()
if err != nil {
b.Fatal(err)
}
m := 0
for {
if _, _, err = en.Prev(); err != nil {
break
}
m++
}
if m != n {
b.Fatal(m)
}
}
}
func TestSeekFirst0(t *testing.T) {
b := TreeNew(cmp)
_, err := b.SeekFirst()
if g, e := err, io.EOF; g != e {
t.Fatal(g, e)
}
}
func TestSeekFirst1(t *testing.T) {
b := TreeNew(cmp)
b.Set(1, 10)
en, err := b.SeekFirst()
if err != nil {
t.Fatal(err)
}
k, v, err := en.Next()
if k != 1 || v != 10 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Next()
if err == nil {
t.Fatal(k, v, err)
}
}
func TestSeekFirst2(t *testing.T) {
b := TreeNew(cmp)
b.Set(1, 10)
b.Set(2, 20)
en, err := b.SeekFirst()
if err != nil {
t.Fatal(err)
}
k, v, err := en.Next()
if k != 1 || v != 10 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Next()
if k != 2 || v != 20 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Next()
if err == nil {
t.Fatal(k, v, err)
}
}
func TestSeekFirst3(t *testing.T) {
b := TreeNew(cmp)
b.Set(2, 20)
b.Set(3, 30)
b.Set(1, 10)
en, err := b.SeekFirst()
if err != nil {
t.Fatal(err)
}
k, v, err := en.Next()
if k != 1 || v != 10 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Next()
if k != 2 || v != 20 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Next()
if k != 3 || v != 30 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Next()
if err == nil {
t.Fatal(k, v, err)
}
}
func TestSeekLast0(t *testing.T) {
b := TreeNew(cmp)
_, err := b.SeekLast()
if g, e := err, io.EOF; g != e {
t.Fatal(g, e)
}
}
func TestSeekLast1(t *testing.T) {
b := TreeNew(cmp)
b.Set(1, 10)
en, err := b.SeekLast()
if err != nil {
t.Fatal(err)
}
k, v, err := en.Prev()
if k != 1 || v != 10 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Prev()
if err == nil {
t.Fatal(k, v, err)
}
}
func TestSeekLast2(t *testing.T) {
b := TreeNew(cmp)
b.Set(1, 10)
b.Set(2, 20)
en, err := b.SeekLast()
if err != nil {
t.Fatal(err)
}
k, v, err := en.Prev()
if k != 2 || v != 20 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Prev()
if k != 1 || v != 10 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Prev()
if err == nil {
t.Fatal(k, v, err)
}
}
func TestSeekLast3(t *testing.T) {
b := TreeNew(cmp)
b.Set(2, 20)
b.Set(3, 30)
b.Set(1, 10)
en, err := b.SeekLast()
if err != nil {
t.Fatal(err)
}
k, v, err := en.Prev()
if k != 3 || v != 30 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Prev()
if k != 2 || v != 20 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Prev()
if k != 1 || v != 10 || err != nil {
t.Fatal(k, v, err)
}
k, v, err = en.Prev()
if err == nil {
t.Fatal(k, v, err)
}
}
func TestPut(t *testing.T) {
tab := []struct {
pre []int // even index: K, odd index: V
newK int // Put(newK, ...
oldV int // Put()->oldV
exists bool // upd(exists)
write bool // upd()->write
post []int // even index: K, odd index: V
}{
// 0
{
[]int{},
1, 0, false, false,
[]int{},
},
{
[]int{},
1, 0, false, true,
[]int{1, -1},
},
{
[]int{1, 10},
0, 0, false, false,
[]int{1, 10},
},
{
[]int{1, 10},
0, 0, false, true,
[]int{0, -1, 1, 10},
},
{
[]int{1, 10},
1, 10, true, false,
[]int{1, 10},
},
// 5
{
[]int{1, 10},
1, 10, true, true,
[]int{1, -1},
},
{
[]int{1, 10},
2, 0, false, false,
[]int{1, 10},
},
{
[]int{1, 10},
2, 0, false, true,
[]int{1, 10, 2, -1},
},
}
for iTest, test := range tab {
tr := TreeNew(cmp)
for i := 0; i < len(test.pre); i += 2 {
k, v := test.pre[i], test.pre[i+1]
tr.Set(k, v)
}
oldV, written := tr.Put(test.newK, func(old interface{}, exists bool) (newV interface{}, write bool) {
if g, e := exists, test.exists; g != e {
t.Fatal(iTest, g, e)
}
if exists {
if g, e := old.(int), test.oldV; g != e {
t.Fatal(iTest, g, e)
}
}
return -1, test.write
})
if test.exists {
if g, e := oldV.(int), test.oldV; g != e {
t.Fatal(iTest, g, e)
}
}
if g, e := written, test.write; g != e {
t.Fatal(iTest, g, e)
}
n := len(test.post)
en, err := tr.SeekFirst()
if err != nil {
if n == 0 && err == io.EOF {
continue
}
t.Fatal(iTest, err)
}
for i := 0; i < len(test.post); i += 2 {
k, v, err := en.Next()
if err != nil {
t.Fatal(iTest, err)
}
if g, e := k.(int), test.post[i]; g != e {
t.Fatal(iTest, g, e)
}
if g, e := v.(int), test.post[i+1]; g != e {
t.Fatal(iTest, g, e)
}
}
_, _, err = en.Next()
if g, e := err, io.EOF; g != e {
t.Fatal(iTest, g, e)
}
}
}<|fim▁end|> | |
<|file_name|>test.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|>var defaultEnvConfig = require('./default');
module.exports = {
db: {
uri: process.env.MONGOHQ_URL || process.env.MONGODB_URI || 'mongodb://' + (process.env.DB_1_PORT_27017_TCP_ADDR || 'localhost') + '/flipflop-test',
options: {
user: '',
pass: ''
},
// Enable mongoose debug mode
debug: process.env.MONGODB_DEBUG || false
},
log: {
// logging with Morgan - https://github.com/expressjs/morgan
// Can specify one of 'combined', 'common', 'dev', 'short', 'tiny'
// format: 'dev'
// fileLogger: {
// directoryPath: process.cwd(),
// fileName: 'app.log',
// maxsize: 10485760,
// maxFiles: 2,
// json: false
// }
},
port: process.env.PORT || 3001,
app: {
title: defaultEnvConfig.app.title + ' - Test Environment'
},
uploads: {
profile: {
image: {
dest: './modules/users/client/img/profile/uploads/',
limits: {
fileSize: 100000 // Limit filesize (100kb) for testing purposes
}
}
}
},
facebook: {
clientID: process.env.FACEBOOK_ID || 'APP_ID',
clientSecret: process.env.FACEBOOK_SECRET || 'APP_SECRET',
callbackURL: '/api/auth/facebook/callback'
},
twitter: {
username: '@TWITTER_USERNAME',
clientID: process.env.TWITTER_KEY || 'CONSUMER_KEY',
clientSecret: process.env.TWITTER_SECRET || 'CONSUMER_SECRET',
callbackURL: '/api/auth/twitter/callback'
},
google: {
clientID: process.env.GOOGLE_ID || 'APP_ID',
clientSecret: process.env.GOOGLE_SECRET || 'APP_SECRET',
callbackURL: '/api/auth/google/callback'
},
linkedin: {
clientID: process.env.LINKEDIN_ID || 'APP_ID',
clientSecret: process.env.LINKEDIN_SECRET || 'APP_SECRET',
callbackURL: '/api/auth/linkedin/callback'
},
github: {
clientID: process.env.GITHUB_ID || 'APP_ID',
clientSecret: process.env.GITHUB_SECRET || 'APP_SECRET',
callbackURL: '/api/auth/github/callback'
},
paypal: {
clientID: process.env.PAYPAL_ID || 'CLIENT_ID',
clientSecret: process.env.PAYPAL_SECRET || 'CLIENT_SECRET',
callbackURL: '/api/auth/paypal/callback',
sandbox: true
},
mailer: {
from: process.env.MAILER_FROM || 'MAILER_FROM',
options: {
service: process.env.MAILER_SERVICE_PROVIDER || 'MAILER_SERVICE_PROVIDER',
auth: {
user: process.env.MAILER_EMAIL_ID || 'MAILER_EMAIL_ID',
pass: process.env.MAILER_PASSWORD || 'MAILER_PASSWORD'
}
}
},
seedDB: {
seed: process.env.MONGO_SEED === 'true',
options: {
logResults: process.env.MONGO_SEED_LOG_RESULTS !== 'false',
seedUser: {
username: process.env.MONGO_SEED_USER_USERNAME || 'seeduser',
provider: 'local',
email: process.env.MONGO_SEED_USER_EMAIL || '[email protected]',
firstName: 'User',
lastName: 'Local',
displayName: 'User Local',
roles: ['user']
},
seedAdmin: {
username: process.env.MONGO_SEED_ADMIN_USERNAME || 'seedadmin',
provider: 'local',
email: process.env.MONGO_SEED_ADMIN_EMAIL || '[email protected]',
firstName: 'Admin',
lastName: 'Local',
displayName: 'Admin Local',
roles: ['user', 'admin']
}
}
}
};<|fim▁end|> | |
<|file_name|>SlotFake.java<|end_file_name|><|fim▁begin|>/*
* This file is part of TechReborn, licensed under the MIT License (MIT).
*
* Copyright (c) 2020 TechReborn
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:<|fim▁hole|> * The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package reborncore.client.gui.slots;
import net.minecraft.inventory.Inventory;
import net.minecraft.item.ItemStack;
public class SlotFake extends BaseSlot {
public boolean mCanInsertItem;
public boolean mCanStackItem;
public int mMaxStacksize = 127;
public SlotFake(Inventory itemHandler, int par2, int par3, int par4, boolean aCanInsertItem,
boolean aCanStackItem, int aMaxStacksize) {
super(itemHandler, par2, par3, par4);
this.mCanInsertItem = aCanInsertItem;
this.mCanStackItem = aCanStackItem;
this.mMaxStacksize = aMaxStacksize;
}
@Override
public boolean canInsert(ItemStack par1ItemStack) {
return this.mCanInsertItem;
}
@Override
public int getMaxStackAmount() {
return this.mMaxStacksize;
}
@Override
public boolean hasStack() {
return false;
}
@Override
public ItemStack takeStack(int par1) {
return !this.mCanStackItem ? ItemStack.EMPTY : super.takeStack(par1);
}
@Override
public boolean canWorldBlockRemove() {
return false;
}
}<|fim▁end|> | * |
<|file_name|>uname.rs<|end_file_name|><|fim▁begin|>#![crate_name = "uu_uname"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Joao Oliveira <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: uname (GNU coreutils) 8.21 */
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use std::ffi::CStr;
use std::io::Write;
use std::mem::uninitialized;
use uucore::c_types::utsname;
struct Uts {
sysname: String,
nodename: String,
release: String,
version: String,
machine: String
}
extern {
fn uname(uts: *mut utsname);
}
unsafe fn string_from_c_str(ptr: *const i8) -> String {
String::from_utf8_lossy(CStr::from_ptr(ptr as *const std::os::raw::c_char).to_bytes()).to_string()
}
unsafe fn getuname() -> Uts {
let mut uts: utsname = uninitialized();
uname(&mut uts);
Uts {
sysname: string_from_c_str(uts.sysname.as_ptr() as *const i8),
nodename: string_from_c_str(uts.nodename.as_ptr() as *const i8),
release: string_from_c_str(uts.release.as_ptr() as *const i8),
version: string_from_c_str(uts.version.as_ptr() as *const i8),
machine: string_from_c_str(uts.machine.as_ptr() as *const i8)
}
}
static NAME: &'static str = "uname";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("a", "all", "Behave as though all of the options -mnrsv were specified.");
opts.optflag("m", "machine", "print the machine hardware name.");
opts.optflag("n", "nodename", "print the nodename (the nodename may be a name that the system is known by to a communications network).");
opts.optflag("p", "processor", "print the machine processor architecture name.");
opts.optflag("r", "release", "print the operating system release.");
opts.optflag("s", "sysname", "print the operating system name.");
opts.optflag("v", "version", "print the operating system version.");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTIONS]", NAME);
println!("");
print!("{}", opts.usage("The uname utility writes symbols representing one or more system characteristics to the standard output."));
return 0;
}
let uname = unsafe { getuname() };
let mut output = String::new();
if matches.opt_present("sysname") || matches.opt_present("all")
|| !matches.opts_present(&["nodename".to_owned(), "release".to_owned(), "version".to_owned(), "machine".to_owned()]) {
output.push_str(uname.sysname.as_ref());
output.push_str(" ");
}
if matches.opt_present("nodename") || matches.opt_present("all") {
output.push_str(uname.nodename.as_ref());
output.push_str(" ");
}
if matches.opt_present("release") || matches.opt_present("all") {
output.push_str(uname.release.as_ref());
output.push_str(" ");
}
if matches.opt_present("version") || matches.opt_present("all") {
output.push_str(uname.version.as_ref());<|fim▁hole|> if matches.opt_present("machine") || matches.opt_present("all") {
output.push_str(uname.machine.as_ref());
output.push_str(" ");
}
println!("{}", output.trim());
0
}<|fim▁end|> | output.push_str(" ");
} |
<|file_name|>download.go<|end_file_name|><|fim▁begin|>package main
// github.com/cheggaaa/pb
import (
"fmt"
"os"
"path/filepath"
"strconv"
"time"
"github.com/cavaliercoder/grab"
"github.com/fatih/color"
"github.com/gosuri/uilive"
rss "github.com/jteeuwen/go-pkg-rss"
)
func getRss(podcast *Podcast) (*rss.Feed, error) {
feed := rss.New(1, true, nil, nil)
if err := feed.Fetch(podcast.Url, nil); err != nil {
return nil, err
}
return feed, nil
}
func getRssName(url string) (string, error) {
feed := rss.New(1, true, nil, nil)
if err := feed.Fetch(url, nil); err != nil {
return "", err
}
return feed.Channels[0].Title, nil
}
func syncPodcasts(startDate time.Time, nameOrID string, count int, chekMode bool) error {
allReqs := [][]*grab.Request{}
podcasts := []*Podcast{}
if nameOrID == "" {
podcasts = cfg.GetAllPodcasts()
} else {
p, err := cfg.GetPodcastByNameOrID(nameOrID)
if err != nil {
return err
}
podcasts = append(podcasts, p)
}
for n, podcast := range podcasts {
var podcastList []*DownloadItem
filter := MakeFilter(podcast)
filter.Count = count
filter.StartDate = startDate
// download rss
feed, err := getRss(podcast)
if err != nil {
printPodcastInfo(podcast, podcastList, n+1, err)
continue
}
if len(feed.Channels) == 0 {
log.Warnf(fmt.Sprintf("No channels in %s", podcast.Name))
continue
}
// filter
podcastList, err = filter.FilterItems(feed.Channels[0])
if err != nil {
printPodcastInfo(podcast, podcastList, n+1, err)
continue
}
if chekMode {
printPodcastInfo(podcast, podcastList, n+1, err)
continue
}
// check for emptiness
if len(podcastList) == 0 {
log.Printf("%s : %s, %d files", color.CyanString("EMPTY"), podcast.Name, len(podcastList))
continue
}
// create download requests
allReqs = append(allReqs, createRequests(podcast, podcastList))
}
if !chekMode {
startDownload(allReqs)
for _, podcast := range podcasts {
// FIXME: put right date according to rss or Item PubDate
podcast.LastSynced = time.Now()
if err := cfg.UpdatePodcast(podcast); err != nil {
return err
}
}
}
return nil
}
func printPodcastInfo(podcast *Podcast, podcastList []*DownloadItem, index int, err error) {
status := ""
num := color.MagentaString("[" + strconv.Itoa(index) + "] ")
if err != nil {
status = color.RedString("FAIL")
} else {
color.GreenString("OK")
}
log.Printf("%s %s", num, podcast.Name)
log.Printf("\t* Url : %s %s", podcast.Url, status)
if err != nil {
log.Warnf("Error: %s", err)
} else {
log.Printf("\t* Awaiting files : %d", len(podcastList))
for k, podcast := range podcastList {
log.Printf("\t\t* [%d] : %s", k, podcast.ItemTitle)
}
}
}
func createRequests(podcast *Podcast, podcastList []*DownloadItem) []*grab.Request {
reqs := []*grab.Request{}
for _, entry := range podcastList {
// create dir for each entry, path is set in filter
// according to rules in configuration
entryDownloadPath := filepath.Join(podcast.DownloadPath, entry.Dir)
if !fileExists(entryDownloadPath) {
if err := os.MkdirAll(entryDownloadPath, 0777); err != nil {
log.Fatal(err)
continue
}
}
req, _ := grab.NewRequest(entry.Url)
req.Filename = filepath.Join(entryDownloadPath, entry.Filename)
req.Size = uint64(entry.Size)
req.RemoveOnError = true
reqs = append(reqs, req)
}
return reqs
}
func startDownload(downloadReqs [][]*grab.Request) {
requestCount := len(downloadReqs)
statusQueue := make(chan *downloadStatus, requestCount)
doneQueue := make(chan bool, requestCount)
client := grab.NewClient()
go func() {
// wait while all requests will be in queue
for i := 0; i < requestCount; i++ {
<-doneQueue
}
// close channels
close(statusQueue)
close(doneQueue)
}()
totalFiles := 0
for _, podcastReq := range downloadReqs {
totalFiles += len(podcastReq)
go func(requests []*grab.Request) {
curPosition := 0
podcastTotal := len(requests)
for _, req := range requests {
// increas position, used for printing<|fim▁hole|> // start downloading
resp := <-client.DoAsync(req)
// send results to monitoring channel
statusQueue <- &downloadStatus{
Total: podcastTotal,
Current: curPosition,
Response: resp,
}
// ensure files downloaded one by one, so wait complition
for !resp.IsComplete() {
time.Sleep(500 * time.Microsecond)
}
}
}(podcastReq)
}
checkDownloadProgress(statusQueue, totalFiles)
log.Infof("%d files downloaded.\n", totalFiles)
}
type downloadStatus struct {
Total int // total requests count
Current int // current position
Response *grab.Response
}
func checkDownloadProgress(respch <-chan *downloadStatus, reqCount int) {
timer := time.NewTicker(200 * time.Millisecond)
ui := uilive.New()
completed := 0
responses := make([]*downloadStatus, 0)
ui.Start()
for completed < reqCount {
select {
case resp := <-respch:
if resp != nil {
responses = append(responses, resp)
}
case <-timer.C:
// print completed requests
for i, resp := range responses {
if resp != nil && resp.Response.IsComplete() {
if resp.Response.Error != nil {
showProgressError(ui, resp)
} else {
showProgressDone(ui, resp)
}
responses[i] = nil
completed++
}
}
// print in progress requests
for _, resp := range responses {
if resp != nil {
showProgressProc(ui, resp)
}
}
}
}
timer.Stop()
ui.Stop()
}
func bytesToMb(bytesCount uint64) float64 {
return float64(bytesCount) / float64(1024*1024)
}
func showProgressError(ui *uilive.Writer, status *downloadStatus) {
fmt.Fprintf(ui.Bypass(), "Error downloading %s: %v\n",
status.Response.Request.URL(),
status.Response.Error)
}
func showProgressDone(ui *uilive.Writer, status *downloadStatus) {
fmt.Fprintf(ui.Bypass(),
"Finished %s [%d/%d] %0.2f / %0.2f Mb (%d%%)\n",
status.Response.Filename,
status.Current, status.Total,
bytesToMb(status.Response.BytesTransferred()),
bytesToMb(status.Response.Size),
int(100*status.Response.Progress()))
}
func showProgressProc(ui *uilive.Writer, status *downloadStatus) {
fmt.Fprintf(ui, "Downloading %s [%d/%d] %0.2f / %0.2f Mb (%d%%)\n",
status.Response.Filename,
status.Current, status.Total,
bytesToMb(status.Response.BytesTransferred()),
bytesToMb(status.Response.Size),
int(100*status.Response.Progress()))
}<|fim▁end|> | curPosition++
|
<|file_name|>SingleRangeElem.d.ts<|end_file_name|><|fim▁begin|>import { IRangeElem } from './IRangeElem';
import { IIterator } from '../../base/iterator';<|fim▁hole|> get step(): number;
get to(): number;
get isAll(): boolean;
get isSingle(): boolean;
get isUnbound(): boolean;
size(size?: number): number;
clone(): SingleRangeElem;
contains(value: number, size?: number): boolean;
reverse(): SingleRangeElem;
invert(index: number, size?: number): number;
iter(size?: number): IIterator<number>;
get __iterator__(): IIterator<number>;
toString(): string;
}<|fim▁end|> | export declare class SingleRangeElem implements IRangeElem {
readonly from: number;
constructor(from: number); |
<|file_name|>client.js<|end_file_name|><|fim▁begin|>;(function(root) {
/**
* Constructs a new cross storage client given the url to a hub. By default,
* an iframe is created within the document body that points to the url. It
* also accepts an options object, which may include a timeout, frameId, and
* promise. The timeout, in milliseconds, is applied to each request and
* defaults to 5000ms. The options object may also include a frameId,
* identifying an existing frame on which to install its listeners. If the
* promise key is supplied the constructor for a Promise, that Promise library
* will be used instead of the default window.Promise.
*
* @example
* var storage = new CrossStorageClient('https://store.example.com/hub.html');
*
* @example
* var storage = new CrossStorageClient('https://store.example.com/hub.html', {
* timeout: 5000,
* frameId: 'storageFrame'
* });
*
* @constructor
*
* @param {string} url The url to a cross storage hub
* @param {object} [opts] An optional object containing additional options,
* including timeout, frameId, and promise
*
* @property {string} _id A UUID v4 id
* @property {function} _promise The Promise object to use
* @property {string} _frameId The id of the iFrame pointing to the hub url
* @property {string} _origin The hub's origin
* @property {object} _requests Mapping of request ids to callbacks
* @property {bool} _connected Whether or not it has connected
* @property {bool} _closed Whether or not the client has closed
* @property {int} _count Number of requests sent
* @property {function} _listener The listener added to the window
* @property {Window} _hub The hub window
*/
function CrossStorageClient(url, opts) {
opts = opts || {};
this._id = CrossStorageClient._generateUUID();
this._promise = opts.promise || Promise;
this._frameId = opts.frameId || 'CrossStorageClient-' + this._id;
this._origin = CrossStorageClient._getOrigin(url);
this._requests = {};
this._connected = false;
this._closed = false;
this._count = 0;
this._timeout = opts.timeout || 5000;
this._listener = null;
this._installListener();
var frame;
if (opts.frameId) {
frame = document.getElementById(opts.frameId);
}
// If using a passed iframe, poll the hub for a ready message
if (frame) {
this._poll();
}
// Create the frame if not found or specified
frame = frame || this._createFrame(url);
this._hub = frame.contentWindow;
}
/**
* The styles to be applied to the generated iFrame. Defines a set of properties
* that hide the element by positioning it outside of the visible area, and
* by modifying its display.
*
* @member {Object}
*/
CrossStorageClient.frameStyle = {
display: 'none',
position: 'absolute',
top: '-999px',
left: '-999px'
};
/**
* Returns the origin of an url, with cross browser support. Accommodates
* the lack of location.origin in IE, as well as the discrepancies in the
* inclusion of the port when using the default port for a protocol, e.g.
* 443 over https. Defaults to the origin of window.location if passed a
* relative path.
*
* @param {string} url The url to a cross storage hub
* @returns {string} The origin of the url
*/
CrossStorageClient._getOrigin = function(url) {
var uri, protocol, origin;
uri = document.createElement('a');
uri.href = url;
if (!uri.host) {
uri = window.location;
}
if (!uri.protocol || uri.protocol === ':') {
protocol = window.location.protocol;
} else {
protocol = uri.protocol;
}
origin = protocol + '//' + uri.host;
origin = origin.replace(/:80$|:443$/, '');
return origin;
};
/**
* UUID v4 generation, taken from: http://stackoverflow.com/questions/
* 105034/how-to-create-a-guid-uuid-in-javascript/2117523#2117523
*
* @returns {string} A UUID v4 string
*/
CrossStorageClient._generateUUID = function() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = Math.random() * 16|0, v = c == 'x' ? r : (r&0x3|0x8);
return v.toString(16);
});
};
/**
* Returns a promise that is fulfilled when a connection has been established
* with the cross storage hub. Its use is required to avoid sending any
* requests prior to initialization being complete.
*
* @returns {Promise} A promise that is resolved on connect
*/
CrossStorageClient.prototype.onConnect = function() {
var client = this;
if (this._connected) {
return this._promise.resolve();
} else if (this._closed) {
return this._promise.reject(new Error('CrossStorageClient has closed'));
}
// Queue connect requests for client re-use
if (!this._requests.connect) {
this._requests.connect = [];
}
return new this._promise(function(resolve, reject) {
var timeout = setTimeout(function() {
reject(new Error('CrossStorageClient could not connect'));
}, client._timeout);
client._requests.connect.push(function(err) {
clearTimeout(timeout);
if (err) return reject(err);
resolve();
});
});
};
/**
* Sets a key to the specified value, optionally accepting a ttl to passively
* expire the key after a number of milliseconds. Returns a promise that is
* fulfilled on success, or rejected if any errors setting the key occurred,
* or the request timed out.
*
* @param {string} key The key to set
* @param {*} value The value to assign
* @param {int} ttl Time to live in milliseconds
* @returns {Promise} A promise that is settled on hub response or timeout
*/
CrossStorageClient.prototype.set = function(key, value, ttl) {
return this._request('set', {
key: key,
value: value,
ttl: ttl
});
};
/**
* Accepts one or more keys for which to retrieve their values. Returns a
* promise that is settled on hub response or timeout. On success, it is
* fulfilled with the value of the key if only passed a single argument.
* Otherwise it's resolved with an array of values. On failure, it is rejected
* with the corresponding error message.
*
* @param {...string} key The key to retrieve
* @returns {Promise} A promise that is settled on hub response or timeout
*/
CrossStorageClient.prototype.get = function(key) {
var args = Array.prototype.slice.call(arguments);
return this._request('get', {keys: args});
};
/**
* Accepts one or more keys for deletion. Returns a promise that is settled on
* hub response or timeout.
*
* @param {...string} key The key to delete
* @returns {Promise} A promise that is settled on hub response or timeout
*/
CrossStorageClient.prototype.del = function() {
var args = Array.prototype.slice.call(arguments);
return this._request('del', {keys: args});
};
/**
* Returns a promise that, when resolved, indicates that all localStorage
* data has been cleared.
*
* @returns {Promise} A promise that is settled on hub response or timeout
*/
CrossStorageClient.prototype.clear = function() {
return this._request('clear');
};
/**
* Returns a promise that, when resolved, passes an array of all keys
* currently in storage.
*
* @returns {Promise} A promise that is settled on hub response or timeout
*/
CrossStorageClient.prototype.getKeys = function() {
return this._request('getKeys');
};
/**
* Deletes the iframe and sets the connected state to false. The client can
* no longer be used after being invoked.
*/
CrossStorageClient.prototype.close = function() {
var frame = document.getElementById(this._frameId);
if (frame) {
frame.parentNode.removeChild(frame);
}
// Support IE8 with detachEvent
if (window.removeEventListener) {
window.removeEventListener('message', this._listener, false);
} else {
window.detachEvent('onmessage', this._listener);
}
this._connected = false;
this._closed = true;
};
/**
* Installs the necessary listener for the window message event. When a message
* is received, the client's _connected status is changed to true, and the
* onConnect promise is fulfilled. Given a response message, the callback
* corresponding to its request is invoked. If response.error holds a truthy
* value, the promise associated with the original request is rejected with
* the error. Otherwise the promise is fulfilled and passed response.result.
*
* @private
*/
CrossStorageClient.prototype._installListener = function() {
var client = this;
this._listener = function(message) {
var i, error, response;
// Ignore invalid messages, those not from the correct hub, or when
// the client has closed
if (client._closed || !message.data || typeof message.data !== 'string' ||
message.origin !== client._origin) {
return;
}
// LocalStorage isn't available in the hub
if (message.data === 'cross-storage:unavailable') {
if (!client._closed) client.close();
if (!client._requests.connect) return;
error = new Error('Closing client. Could not access localStorage in hub.');
for (i = 0; i < client._requests.connect.length; i++) {
client._requests.connect[i](error);
}
return;
}
// Handle initial connection
if (message.data.indexOf('cross-storage:') !== -1 && !client._connected) {
client._connected = true;
if (!client._requests.connect) return;
for (i = 0; i < client._requests.connect.length; i++) {
client._requests.connect[i](error);
}
delete client._requests.connect;
}
if (message.data === 'cross-storage:ready') return;
// All other messages
try {
response = JSON.parse(message.data);
} catch(e) {
return;
}
if (!response.id) return;
if (client._requests[response.id]) {
client._requests[response.id](response.error, response.result);
}
};
// Support IE8 with attachEvent
if (window.addEventListener) {
window.addEventListener('message', this._listener, false);
} else {
window.attachEvent('onmessage', this._listener);
}
};
/**
* Invoked when a frame id was passed to the client, rather than allowing
* the client to create its own iframe. Polls the hub for a ready event to
* establish a connected state.
*/
CrossStorageClient.prototype._poll = function() {<|fim▁hole|>
client = this;
interval = setInterval(function() {
if (client._connected) return clearInterval(interval);
if (!client._hub) return;
client._hub.postMessage('cross-storage:poll', client._origin);
}, 1000);
};
/**
* Creates a new iFrame containing the hub. Applies the necessary styles to
* hide the element from view, prior to adding it to the document body.
* Returns the created element.
*
* @private
*
* @param {string} url The url to the hub
* returns {HTMLIFrameElement} The iFrame element itself
*/
CrossStorageClient.prototype._createFrame = function(url) {
var frame, key;
frame = window.document.createElement('iframe');
frame.id = this._frameId;
// Style the iframe
for (key in CrossStorageClient.frameStyle) {
if (CrossStorageClient.frameStyle.hasOwnProperty(key)) {
frame.style[key] = CrossStorageClient.frameStyle[key];
}
}
window.document.body.appendChild(frame);
frame.src = url;
return frame;
};
/**
* Sends a message containing the given method and params to the hub. Stores
* a callback in the _requests object for later invocation on message, or
* deletion on timeout. Returns a promise that is settled in either instance.
*
* @private
*
* @param {string} method The method to invoke
* @param {*} params The arguments to pass
* @returns {Promise} A promise that is settled on hub response or timeout
*/
CrossStorageClient.prototype._request = function(method, params) {
var req, client;
if (this._closed) {
return this._promise.reject(new Error('CrossStorageClient has closed'));
}
client = this;
client._count++;
req = {
id: this._id + ':' + client._count,
method: 'cross-storage:' + method,
params: params
};
return new this._promise(function(resolve, reject) {
var timeout, originalToJSON;
// Timeout if a response isn't received after 4s
timeout = setTimeout(function() {
if (!client._requests[req.id]) return;
delete client._requests[req.id];
reject(new Error('Timeout: could not perform ' + req.method));
}, client._timeout);
// Add request callback
client._requests[req.id] = function(err, result) {
clearTimeout(timeout);
if (err) return reject(new Error(err));
resolve(result);
};
// In case we have a broken Array.prototype.toJSON, e.g. because of
// old versions of prototype
if (Array.prototype.toJSON) {
originalToJSON = Array.prototype.toJSON;
Array.prototype.toJSON = null;
}
// Send serialized message
client._hub.postMessage(JSON.stringify(req), client._origin);
// Restore original toJSON
if (originalToJSON) {
Array.prototype.toJSON = originalToJSON;
}
});
};
/**
* Export for various environments.
*/
if (typeof module !== 'undefined' && module.exports) {
module.exports = CrossStorageClient;
} else if (typeof exports !== 'undefined') {
exports.CrossStorageClient = CrossStorageClient;
} else if (typeof define === 'function' && define.amd) {
define('CrossStorageClient', [], function() {
return CrossStorageClient;
});
} else {
root.CrossStorageClient = CrossStorageClient;
}
}(this));<|fim▁end|> | var client, interval; |
<|file_name|>test_version.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
<|fim▁hole|><|fim▁end|> | API_VERSION = 'v1'
DOMAIN = {'contacts': {}} |
<|file_name|>Utf8StringModule.java<|end_file_name|><|fim▁begin|>package com.github.mlk.queue.codex;
import com.github.mlk.queue.Queuify;
import com.github.mlk.queue.implementation.Module;
public class Utf8StringModule implements Module {
public static Utf8StringModule utfStrings() {
return new Utf8StringModule();
}
<|fim▁hole|> @Override
public void bind(Queuify.Builder builder) {
builder.encoder(new StringEncoder())
.decoder(new StringDecoder());
}
}<|fim▁end|> | |
<|file_name|>fwidgets.py<|end_file_name|><|fim▁begin|>from plow.gui.manifest import QtCore, QtGui
from plow.gui.util import formatDateTime, formatDuration
<|fim▁hole|> "Decimal",
"DateTime",
"PillWidget",
"Checkbox"
]
class FormWidget(QtGui.QWidget):
"""
The base class for all form widgets.
"""
__LOCKED_PIX = None
def __init__(self, value, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QGridLayout(self)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self._widget = None
self.__status = QtGui.QLabel(self)
self.__status.setContentsMargins(5, 0, 0, 0)
layout.addWidget(self.__status, 0, 2)
if not FormWidget.__LOCKED_PIX:
FormWidget.__LOCKED_PIX = QtGui.QPixmap(":/images/locked.png")
FormWidget.__LOCKED_PIX = FormWidget.__LOCKED_PIX.scaled(
QtCore.QSize(12, 12), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
def setReadOnly(self, value):
self._setReadOnly(value)
if value:
self.__status.setPixmap(FormWidget.__LOCKED_PIX)
else:
self.__status.setText("")
def setSuffix(self, value):
self._setSuffix(value)
def _setSuffix(self, value):
self.layout().addWidget(QtGui.QLabel(value), 0, 1)
def _setReadOnly(self, value):
pass
def setWidget(self, widget):
self._widget = widget
self.layout().addWidget(widget, 0, 0)
class Text(FormWidget):
def __init__(self, text, parent=None):
FormWidget.__init__(self, parent)
self.setWidget(QtGui.QLineEdit(text, self))
self._widget.setFocusPolicy(QtCore.Qt.NoFocus)
self._widget.setCursorPosition(1)
def _setReadOnly(self, value):
self._widget.setReadOnly(value)
class Number(FormWidget):
def __init__(self, value, parent=None):
FormWidget.__init__(self, parent)
widget = QtGui.QSpinBox(self)
widget.setMinimum(0)
widget.setMaximum(1000000)
widget.setMinimumWidth(100)
widget.setValue(value)
self.setWidget(widget)
self._widget.setFocusPolicy(QtCore.Qt.NoFocus)
def _setReadOnly(self, value):
self._widget.setReadOnly(value)
self._widget.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
def _setSuffix(self, value):
self._widget.setSuffix(value)
class Decimal(FormWidget):
def __init__(self, value, parent=None):
FormWidget.__init__(self, parent)
widget = QtGui.QDoubleSpinBox(self)
widget.setValue(value)
self.setWidget(widget)
widget.setMinimumWidth(100)
self._widget.setFocusPolicy(QtCore.Qt.NoFocus)
def _setReadOnly(self, value):
self._widget.setReadOnly(value)
self._widget.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
def _setSuffix(self, value):
self._widget.setSuffix(value)
class DateTime(FormWidget):
def __init__(self, value, parent=None):
FormWidget.__init__(self, parent)
self.setWidget(QtGui.QLabel(formatDateTime(value), self))
class Duration(FormWidget):
def __init__(self, times, parent=None):
FormWidget.__init__(self, parent)
self.setWidget(QtGui.QLabel(formatDuration(times[0], times[1]), self))
class PillWidget(FormWidget):
def __init__(self, value, parent):
FormWidget.__init__(self, parent)
data, color = value
self.label = QtGui.QLabel(data, self)
self.label.setStyleSheet("border: 1px solid #222222; background-color: %s; border-radius: 6px;" % color)
self.label.setMinimumWidth(100)
self.setWidget(self.label)
class Checkbox(FormWidget):
def __init__(self, bvalue, parent=None):
FormWidget.__init__(self, parent)
self.setWidget(QtGui.QCheckBox(self))
self._widget.setCheckState(QtCore.Qt.Checked if bvalue else QtCore.Qt.Unchecked)
self._widget.setFocusPolicy(QtCore.Qt.NoFocus)
def _setReadOnly(self, value):
self._widget.setReadOnly(value)<|fim▁end|> | __all__ = [
"Text",
"Number", |
<|file_name|>aresource.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.<|fim▁hole|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Weblate wrapper around translate-toolkit formats to add missing
functionality.
"""
import json
from translate.storage.jsonl10n import JsonFile as JsonFileTT
class JsonFile(JsonFileTT):
"""
Workaround ttkit bug on not including added units in saved file.
This is fixed in 1.13.0
"""
def __str__(self):
data = {}
# This is really broken for many reasons, but works for
# simple JSON files.
for unit in self.units:
data[unit.getid().lstrip('.')] = unit.source
return json.dumps(
data, sort_keys=True, indent=4, ensure_ascii=False
).encode('utf-8')<|fim▁end|> | #
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
<|file_name|>source.js<|end_file_name|><|fim▁begin|>(function($)
{
$.Redactor.prototype.source = function()
{
return {
init: function()
{
var button = this.button.addFirst('html', 'HTML');
this.button.setIcon(button, '<i class="re-icon-html"></i>');
this.button.addCallback(button, this.source.toggle);
var style = {
'width': '100%',
'margin': '0',
'background': '#1d1d1d',
'box-sizing': 'border-box',
'color': '#ccc',
'font-size': '15px',
'outline': 'none',
'padding': '20px',
'line-height': '24px',
'font-family': 'Consolas, Menlo, Monaco, "Courier New", monospace'
};
this.source.$textarea = $('<textarea />');
this.source.$textarea.css(style).hide();
if (this.opts.type === 'textarea')
{
this.core.box().append(this.source.$textarea);
}
else
{
this.core.box().after(this.source.$textarea);
}
this.core.element().on('destroy.callback.redactor', $.proxy(function()
{
this.source.$textarea.remove();
}, this));
},
toggle: function()
{
return (this.source.$textarea.hasClass('open')) ? this.source.hide() : this.source.show();
},
setCaretOnShow: function()
{
this.source.offset = this.offset.get();
var scroll = $(window).scrollTop();
var width = this.core.editor().innerWidth();
var height = this.core.editor().innerHeight();
// caret position sync
this.source.start = 0;
this.source.end = 0;
var $editorDiv = $("<div/>").append($.parseHTML(this.core.editor().html(), document, true));
var $selectionMarkers = $editorDiv.find("span.redactor-selection-marker");
if ($selectionMarkers.length > 0)
{
var editorHtml = $editorDiv.html().replace(/&/g, '&');
if ($selectionMarkers.length === 1)
{
this.source.start = this.utils.strpos(editorHtml, $editorDiv.find("#selection-marker-1").prop("outerHTML"));
this.source.end = this.source.start;
}
else if ($selectionMarkers.length === 2)
{
this.source.start = this.utils.strpos(editorHtml, $editorDiv.find("#selection-marker-1").prop("outerHTML"));
this.source.end = this.utils.strpos(editorHtml, $editorDiv.find("#selection-marker-2").prop("outerHTML")) - $editorDiv.find("#selection-marker-1").prop("outerHTML").toString().length;
}
}
},
setCaretOnHide: function(html)
{
this.source.start = this.source.$textarea.get(0).selectionStart;
this.source.end = this.source.$textarea.get(0).selectionEnd;
// if selection starts from end
if (this.source.start > this.source.end && this.source.end > 0)
{
var tempStart = this.source.end;
var tempEnd = this.source.start;
this.source.start = tempStart;
this.source.end = tempEnd;
}
this.source.start = this.source.enlargeOffset(html, this.source.start);
this.source.end = this.source.enlargeOffset(html, this.source.end);
html = html.substr(0, this.source.start) + this.marker.html(1) + html.substr(this.source.start);
if (this.source.end > this.source.start)
{
var markerLength = this.marker.html(1).toString().length;
html = html.substr(0, this.source.end + markerLength) + this.marker.html(2) + html.substr(this.source.end + markerLength);
}
return html;
},
hide: function()
{
this.source.$textarea.removeClass('open').hide();
this.source.$textarea.off('.redactor-source');
var code = this.source.$textarea.val();
<|fim▁hole|> this.button.enableAll();
this.core.editor().show().focus();
this.selection.restore();
this.placeholder.enable();
this.core.callback('visual');
},
show: function()
{
this.selection.save();
this.source.setCaretOnShow();
var height = this.core.editor().height();
var code = this.code.get();
// callback
code = this.core.callback('source', code);
this.core.editor().hide();
this.button.disableAll('html');
this.source.$textarea.val(code).height(height).addClass('open').show();
this.source.$textarea.on('keyup.redactor-source', $.proxy(function()
{
if (this.opts.type === 'textarea')
{
this.core.textarea().val(this.source.$textarea.val());
}
}, this));
this.marker.remove();
$(window).scrollTop(scroll);
if (this.source.$textarea[0].setSelectionRange)
{
this.source.$textarea[0].setSelectionRange(this.source.start, this.source.end);
}
this.source.$textarea[0].scrollTop = 0;
setTimeout($.proxy(function()
{
this.source.$textarea.focus();
}, this), 0);
},
enlargeOffset: function(html, offset)
{
var htmlLength = html.length;
var c = 0;
if (html[offset] === '>')
{
c++;
}
else
{
for(var i = offset; i <= htmlLength; i++)
{
c++;
if (html[i] === '>')
{
break;
}
else if (html[i] === '<' || i === htmlLength)
{
c = 0;
break;
}
}
}
return offset + c;
}
};
};
})(jQuery);<|fim▁end|> | code = this.paragraphize.load(code);
code = this.source.setCaretOnHide(code);
this.code.start(code); |
<|file_name|>feature-gate-allow-internal-unstable.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT<|fim▁hole|>// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_macros)]
#[allow_internal_unstable] //~ ERROR allow_internal_unstable side-steps
macro_rules! foo {
() => {}
}
fn main() {}<|fim▁end|> | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"encoding/csv"
"flag"
"fmt"
"io"
"os"
"strconv"
"time"
"github.com/golang/glog"
)
var defaultValue = flag.String("default", "0", "Default value to assign before the first merge point from the second file")
func main() {
flag.Parse()
if len(flag.Args()) != 3 {
fmt.Printf("USAGE: merger <file to merge into> <file to merge> <output file>\n")
return
}
first := openCSVRead(flag.Arg(0))
second := openCSVRead(flag.Arg(1))
output := openCSVWrite(flag.Arg(2))
// Merge the first lines, usually contain the titles.
titles, err := first.Read()
if err != nil {
glog.Fatal(err)
}
secondTitles, err := second.Read()
if err != nil {
glog.Fatal(err)
}
// The first column of the second file is the timestamp.
err = output.Write(append(titles, secondTitles[1:]...))
if err != nil {
glog.Fatal(err)
}
// Grab first merge point.
oldValue := *defaultValue
mergeTime, mergeValue, err := getLine(second)
if err != nil {
glog.Fatal(err)
}
<|fim▁hole|> for {
// Read line from the first file.
values, err := first.Read()
if err != nil {
if err == io.EOF {
break
}
glog.Errorf("Failed to parse line from first file: %v", err)
continue
}
curTime, err := parseTime(values[0])
if err != nil {
glog.Errorf("Failed to parse time of line %v: %v", values, err)
continue
}
// Use the old value until we reach the new merge time.
// Zero merge time means no more content in the file.
if !mergeTime.IsZero() && !curTime.Before(mergeTime) {
oldValue = mergeValue
mergeTime, mergeValue, err = getLine(second)
if err != nil {
if err == io.EOF {
mergeTime = zero
} else {
glog.Errorf("Failed to read line from second file: %v", err)
}
}
}
// Append the second file's content into the first.
err = output.Write(append(values, oldValue))
if err != nil {
glog.Errorf("Failed to write output to file: %v", err)
}
output.Flush()
}
}
var zero time.Time
// Get a line from the second file.
func getLine(r *csv.Reader) (time.Time, string, error) {
record, err := r.Read()
if err != nil {
return zero, "", err
}
if len(record) != 2 {
return zero, "", fmt.Errorf("record had unexpected amount of fields: %v", record)
}
unixTime, err := parseTime(record[0])
if err != nil {
return zero, "", err
}
return unixTime, record[1], nil
}
// Parse time from a string with a UNIX time.
func parseTime(timeStr string) (time.Time, error) {
unixTime, err := strconv.ParseInt(timeStr, 10, 64)
if err != nil {
return zero, fmt.Errorf("failed to parse UNIX timestamp from %q: %v", timeStr, err)
}
return time.Unix(unixTime, 0), nil
}
// Open a CSV file for R/W and create if it doesn't exist.
func openCSVWrite(filename string) *csv.Writer {
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
glog.Fatalf("Failed to open %q: %v", filename, err)
}
return csv.NewWriter(file)
}
// Open a CSV file for reading.
func openCSVRead(filename string) *csv.Reader {
file, err := os.Open(filename)
if err != nil {
glog.Fatalf("Failed to open %q: %v", filename, err)
}
return csv.NewReader(file)
}<|fim▁end|> | // Merge second file into first file. |
<|file_name|>CustomerPojoService.java<|end_file_name|><|fim▁begin|><|fim▁hole|> * under the Apache License Version 2.0 (release version 0.8.0)
* http://www.apache.org/licenses/LICENSE-2.0
*
* Copyright (c) Hoteia, 2012-2014
* http://www.hoteia.com - http://twitter.com/hoteia - [email protected]
*
*/
package org.hoteia.qalingo.core.service.pojo;
import java.util.List;
import java.util.Set;
import org.dozer.Mapper;
import org.hoteia.qalingo.core.domain.Customer;
import org.hoteia.qalingo.core.domain.CustomerMarketArea;
import org.hoteia.qalingo.core.domain.CustomerWishlist;
import org.hoteia.qalingo.core.domain.MarketArea;
import org.hoteia.qalingo.core.pojo.customer.CustomerPojo;
import org.hoteia.qalingo.core.pojo.customer.CustomerWishlistPojo;
import org.hoteia.qalingo.core.pojo.util.mapper.PojoUtil;
import org.hoteia.qalingo.core.service.CustomerService;
import org.hoteia.qalingo.core.service.MarketService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
@Service("customerPojoService")
@Transactional(readOnly = true)
public class CustomerPojoService {
private final Logger logger = LoggerFactory.getLogger(getClass());
@Autowired
private Mapper dozerBeanMapper;
@Autowired
protected MarketService marketService;
@Autowired
private CustomerService customerService;
public List<CustomerPojo> getAllCustomers() {
List<Customer> customers = customerService.findCustomers();
logger.debug("Found {} customers", customers.size());
return PojoUtil.mapAll(dozerBeanMapper, customers, CustomerPojo.class);
}
public CustomerPojo getCustomerById(final String id) {
Customer customer = customerService.getCustomerById(id);
logger.debug("Found customer {} for id {}", customer, id);
return customer == null ? null : dozerBeanMapper.map(customer, CustomerPojo.class);
}
public CustomerPojo getCustomerByLoginOrEmail(final String usernameOrEmail) {
Customer customer = customerService.getCustomerByLoginOrEmail(usernameOrEmail);
logger.debug("Found customer {} for usernameOrEmail {}", customer, usernameOrEmail);
return customer == null ? null : dozerBeanMapper.map(customer, CustomerPojo.class);
}
public CustomerPojo getCustomerByPermalink(final String permalink) {
Customer customer = customerService.getCustomerByPermalink(permalink);
logger.debug("Found customer {} for usernameOrEmail {}", customer, permalink);
return customer == null ? null : dozerBeanMapper.map(customer, CustomerPojo.class);
}
@Transactional
public void saveOrUpdate(final CustomerPojo customerJsonPojo) throws Exception {
Customer customer = dozerBeanMapper.map(customerJsonPojo, Customer.class);
logger.info("Saving customer {}", customer);
customerService.saveOrUpdateCustomer(customer);
}
public List<CustomerWishlistPojo> getWishlist(final Customer customer, final MarketArea marketArea) {
final CustomerMarketArea customerMarketArea = customer.getCurrentCustomerMarketArea(marketArea.getId());
Set<CustomerWishlist> wishlistProducts = customerMarketArea.getWishlistProducts();
List<CustomerWishlistPojo> wishlists = PojoUtil.mapAll(dozerBeanMapper, wishlistProducts, CustomerWishlistPojo.class);
return wishlists;
}
public void addProductSkuToWishlist(MarketArea marketArea, Customer customer, String catalogCategoryCode, String productSkuCode) throws Exception {
customerService.addProductSkuToWishlist(marketArea, customer, catalogCategoryCode, productSkuCode);
}
}<|fim▁end|> | /**
* Most of the code in the Qalingo project is copyrighted Hoteia and licensed |
<|file_name|>issue-71611.rs<|end_file_name|><|fim▁begin|>// revisions: full min
#![cfg_attr(full, feature(adt_const_params))]
#![cfg_attr(full, allow(incomplete_features))]
fn func<A, const F: fn(inner: A)>(outer: A) {
//~^ ERROR: using function pointers as const generic parameters is forbidden
//~| ERROR: the type of const parameters must not depend on other generic parameters
F(outer);
}<|fim▁hole|><|fim▁end|> |
fn main() {} |
<|file_name|>iteratee.d.ts<|end_file_name|><|fim▁begin|>import { iteratee } from "./index";<|fim▁hole|>export = iteratee;<|fim▁end|> | |
<|file_name|>annotations.rs<|end_file_name|><|fim▁begin|>use crate::schema::*;
use diesel::sql_types::Text;
use diesel::*;
#[test]
fn association_where_struct_name_doesnt_match_table_name() {
#[derive(PartialEq, Eq, Debug, Clone, Queryable, Identifiable, Associations)]
#[belongs_to(Post)]
#[table_name = "comments"]
struct OtherComment {
id: i32,
post_id: i32,
}
let connection = connection_with_sean_and_tess_in_users_table();
let sean = find_user_by_name("Sean", &connection);
insert_into(posts::table)
.values(&sean.new_post("Hello", None))
.execute(&connection)
.unwrap();
let post = posts::table.first::<Post>(&connection).unwrap();
insert_into(comments::table)
.values(&NewComment(post.id, "comment"))
.execute(&connection)
.unwrap();
let comment_text = OtherComment::belonging_to(&post)
.select(comments::text)
.first::<String>(&connection);
assert_eq!(Ok("comment".into()), comment_text);
}
#[test]
#[cfg(not(any(feature = "sqlite", feature = "mysql")))]
fn association_where_parent_and_child_have_underscores() {
#[derive(PartialEq, Eq, Debug, Clone, Queryable, Identifiable, Associations)]
#[belongs_to(User)]
pub struct SpecialPost {
id: i32,
user_id: i32,
title: String,
}
#[derive(Insertable)]
#[table_name = "special_posts"]
struct NewSpecialPost {
user_id: i32,
title: String,
}
impl SpecialPost {
fn new(user_id: i32, title: &str) -> NewSpecialPost {
NewSpecialPost {
user_id: user_id,
title: title.to_owned(),
}
}
}
#[derive(PartialEq, Eq, Debug, Clone, Queryable, Identifiable, Associations)]
#[belongs_to(SpecialPost)]
struct SpecialComment {
id: i32,
special_post_id: i32,
}
impl SpecialComment {
fn new(special_post_id: i32) -> NewSpecialComment {
NewSpecialComment {
special_post_id: special_post_id,
}
}
}
#[derive(Insertable)]
#[table_name = "special_comments"]
struct NewSpecialComment {
special_post_id: i32,
}
let connection = connection_with_sean_and_tess_in_users_table();
let sean = find_user_by_name("Sean", &connection);
let new_post = SpecialPost::new(sean.id, "title");
let special_post: SpecialPost = insert_into(special_posts::table)
.values(&new_post)
.get_result(&connection)
.unwrap();
let new_comment = SpecialComment::new(special_post.id);
insert_into(special_comments::table)
.values(&new_comment)
.execute(&connection)
.unwrap();
let comment: SpecialComment = SpecialComment::belonging_to(&special_post)
.first(&connection)
.unwrap();
assert_eq!(special_post.id, comment.special_post_id);
}
// This module has no test functions, as it's only to test compilation.
mod associations_can_have_nullable_foreign_keys {
#![allow(dead_code)]
table! {
foos{
id -> Integer,
}
}
table! {
bars {
id -> Integer,
foo_id -> Nullable<Integer>,
}
}
// This test has no assertions, as it is for compilation purposes only.
#[derive(Identifiable)]
pub struct Foo {
id: i32,
}
#[derive(Identifiable, Associations)]
#[belongs_to(Foo)]
pub struct Bar {
id: i32,
foo_id: Option<i32>,
}
}
// This module has no test functions, as it's only to test compilation.
mod multiple_lifetimes_in_insertable_struct_definition {
#![allow(dead_code)]
use crate::schema::posts;
#[derive(Insertable)]
#[table_name = "posts"]
pub struct MyPost<'a> {
title: &'a str,
body: &'a str,
}
}
mod lifetimes_with_names_other_than_a {
#![allow(dead_code)]
use crate::schema::posts;
#[derive(Insertable)]
#[table_name = "posts"]
pub struct MyPost<'a, 'b> {
id: i32,
title: &'b str,
body: &'a str,
}
}
mod insertable_with_cow {
#![allow(dead_code)]
use crate::schema::posts;
use std::borrow::Cow;
#[derive(Insertable)]
#[table_name = "posts"]
pub struct MyPost<'a> {
id: i32,
title: Cow<'a, str>,
body: Cow<'a, str>,
}
}
mod custom_foreign_keys_are_respected_on_belongs_to {
#![allow(dead_code)]
use crate::schema::User;
table! { special_posts { id -> Integer, author_id -> Integer, } }
#[derive(Identifiable, Associations)]
#[belongs_to(User, foreign_key = "author_id")]
pub struct SpecialPost {
id: i32,
author_id: i32,
}
}
mod derive_identifiable_with_lifetime {
#![allow(dead_code)]
use crate::schema::posts;
#[derive(Identifiable)]
pub struct Post<'a> {
id: &'a i32,
}
}
#[test]
fn derive_identifiable_with_non_standard_pk() {
use diesel::associations::*;
#[derive(Identifiable)]
#[table_name = "posts"]
#[primary_key(foo_id)]
#[allow(dead_code)]
struct Foo<'a> {
id: i32,
foo_id: &'a str,
foo: i32,
}
let foo1 = Foo {
id: 1,
foo_id: "hi",
foo: 2,
};
let foo2 = Foo {
id: 2,
foo_id: "there",
foo: 3,
};
assert_eq!(&"hi", foo1.id());
assert_eq!(&"there", foo2.id());
// Fails to compile if wrong table is generated.
let _: posts::table = Foo::<'static>::table();
}
#[test]
fn derive_identifiable_with_composite_pk() {
use diesel::associations::Identifiable;
#[derive(Identifiable)]
#[primary_key(foo_id, bar_id)]
#[table_name = "posts"]
#[allow(dead_code)]
struct Foo {
id: i32,
foo_id: i32,
bar_id: i32,
foo: i32,
}
let foo1 = Foo {
id: 1,
foo_id: 2,
bar_id: 3,
foo: 4,
};
let foo2 = Foo {
id: 5,
foo_id: 6,
bar_id: 7,
foo: 8,
};
assert_eq!((&2, &3), foo1.id());
assert_eq!((&6, &7), foo2.id());
}
#[test]
fn derive_insertable_with_option_for_not_null_field_with_default() {
#[derive(Insertable)]
#[table_name = "users"]
struct NewUser {
id: Option<i32>,
name: &'static str,
}
let conn = connection();
let data = vec![
NewUser {
id: None,
name: "Jim",
},
NewUser {
id: Some(123),
name: "Bob",
},
];
assert_eq!(
Ok(2),
insert_into(users::table).values(&data).execute(&conn)
);
let users = users::table.load::<User>(&conn).unwrap();
let jim = users.iter().find(|u| u.name == "Jim");
let bob = users.iter().find(|u| u.name == "Bob");
assert!(jim.is_some());
assert_eq!(Some(&User::new(123, "Bob")), bob);
}
sql_function!(fn nextval(a: Text) -> Integer);
#[test]
#[cfg(feature = "postgres")]
fn derive_insertable_with_field_that_cannot_convert_expression_to_nullable() {
#[derive(Insertable)]
#[table_name = "users"]
struct NewUser {
id: nextval::HelperType<&'static str>,
name: &'static str,
}
let conn = connection();
let data = NewUser {
id: nextval("users_id_seq"),
name: "Jim",
};
assert_eq!(
Ok(1),
insert_into(users::table).values(&data).execute(&conn)
);
let users = users::table.load::<User>(&conn).unwrap();
let jim = users.iter().find(|u| u.name == "Jim");
<|fim▁hole|>fn nested_queryable_derives() {
#[derive(Queryable, Debug, PartialEq)]
struct UserAndPost {
user: User,
post: Post,
}
let conn = connection_with_sean_and_tess_in_users_table();
let sean = find_user_by_name("Sean", &conn);
insert_into(posts::table)
.values(&sean.new_post("Hi", None))
.execute(&conn)
.unwrap();
let post = posts::table.first(&conn).unwrap();
let expected = UserAndPost { user: sean, post };
let actual = users::table.inner_join(posts::table).get_result(&conn);
assert_eq!(Ok(expected), actual);
}<|fim▁end|> | assert!(jim.is_some());
}
#[test] |
<|file_name|>test_os_server.py<|end_file_name|><|fim▁begin|>import mock
import pytest
import yaml
import inspect
import collections
from ansible.module_utils.six import string_types
from ansible.modules.cloud.openstack import os_server
class AnsibleFail(Exception):
pass
class AnsibleExit(Exception):
pass
def params_from_doc(func):
'''This function extracts the docstring from the specified function,
parses it as a YAML document, and returns parameters for the os_server
module.'''
doc = inspect.getdoc(func)
cfg = yaml.load(doc)
for task in cfg:
for module, params in task.items():
for k, v in params.items():
if k in ['nics'] and isinstance(v, string_types):
params[k] = [v]
task[module] = collections.defaultdict(str,
params)
return cfg[0]['os_server']
class FakeCloud (object):
ports = [
{'name': 'port1', 'id': '1234'},
{'name': 'port2', 'id': '4321'},
]
networks = [
{'name': 'network1', 'id': '5678'},
{'name': 'network2', 'id': '8765'},
]
images = [
{'name': 'cirros', 'id': '1'},
{'name': 'fedora', 'id': '2'},
]
flavors = [
{'name': 'm1.small', 'id': '1', 'flavor_ram': 1024},
{'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512},
]
def _find(self, source, name):
for item in source:
if item['name'] == name or item['id'] == name:
return item
def get_image_id(self, name, exclude=None):
image = self._find(self.images, name)
if image:
return image['id']
def get_flavor(self, name):
return self._find(self.flavors, name)
def get_flavor_by_ram(self, ram, include=None):
for flavor in self.flavors:
if flavor['ram'] >= ram and (include is None or include in
flavor['name']):
return flavor
def get_port(self, name):
return self._find(self.ports, name)
def get_network(self, name):
return self._find(self.networks, name)
create_server = mock.MagicMock()
class TestNetworkArgs(object):
'''This class exercises the _network_args function of the
os_server module. For each test, we parse the YAML document
contained in the docstring to retrieve the module parameters for the
test.'''
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
def test_nics_string_net_id(self):
'''
- os_server:
nics: net-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_string_net_id_list(self):
'''
- os_server:
nics: net-id=1234,net-id=4321
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['net-id'] == '4321')
def test_nics_string_port_id(self):
'''
- os_server:
nics: port-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_string_net_name(self):
'''
- os_server:
nics: net-name=network1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '5678')
def test_nics_string_port_name(self):
'''
- os_server:
nics: port-name=port1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_structured_net_id(self):
'''
- os_server:
nics:
- net-id: '1234'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_structured_mixed(self):
'''
- os_server:
nics:
- net-id: '1234'
- port-name: port1
- 'net-name=network1,port-id=4321'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['port-id'] == '1234')
assert(args[2]['net-id'] == '5678')
assert(args[3]['port-id'] == '4321')
<|fim▁hole|> self.module.params = params_from_doc(method)
self.module.fail_json.side_effect = AnsibleFail()
self.module.exit_json.side_effect = AnsibleExit()
self.meta = mock.MagicMock()
self.meta.gett_hostvars_from_server.return_value = {
'id': '1234'
}
os_server.meta = self.meta
def test_create_server(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: network1
meta:
- key: value
'''
with pytest.raises(AnsibleExit):
os_server._create_server(self.module, self.cloud)
assert(self.cloud.create_server.call_count == 1)
assert(self.cloud.create_server.call_args[1]['image']
== self.cloud.get_image_id('cirros'))
assert(self.cloud.create_server.call_args[1]['flavor']
== self.cloud.get_flavor('m1.tiny')['id'])
assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id']
== self.cloud.get_network('network1')['id'])
def test_create_server_bad_flavor(self):
'''
- os_server:
image: cirros
flavor: missing_flavor
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_flavor' in
self.module.fail_json.call_args[1]['msg'])
def test_create_server_bad_nic(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: missing_network
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_network' in
self.module.fail_json.call_args[1]['msg'])<|fim▁end|> | class TestCreateServer(object):
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock() |
<|file_name|>set_get.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#coding=utf-8
import logging
from db_operation import *
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',datefmt='%a, %d %b %Y %H:%M:%S')
def query_conf(marathon,app):
"""
:marathon marathon name
:app_id app id
"""
db=DB()
sql="select * from app_scale_rule where marathon_name='{}' and app_id='{}'".format(marathon,app)
conn=db.connect_mysql()
result=db.select_mysql(conn,sql)<|fim▁hole|>def cpu_get(marathon,app,cpu):
"""
:marathon marathon name
:app app id
:cpu the flag that judge the cpu configuration
"""
db=DB()
conn=db.connect_mysql()
if cpu==1:
sql="select max_threshold,min_threshold from quota_info where marathon_name='{}' and app_id='{}' and rule_type='cpu'".format(marathon,app)
result=db.select_mysql(conn,sql)
db.close_mysql(conn)
return result
else:
db.close_mysql(conn)
return None
def mem_get(marathon,app,mem):
"""
:marathon marathon name
:app app id
:mem the flag that judge the cpu configuration
"""
db=DB()
conn=db.connect_mysql()
if mem==1:
sql="select max_threshold,min_threshold from quota_info where marathon_name='{}' and app_id='{}' and rule_type='memory'".format(marathon,app)
result=db.select_mysql(conn,sql)
return result
else:
return None
def thread_get(marathon,app,thread):
"""
:marathon marathon name
:app app id
:thread the flag that judge the cpu configuration
"""
db=DB()
conn=db.connect_mysql()
if thread==1:
sql="select max_threshold,min_threshold from quota_info where marathon_name='{}' and app_id='{}' and rule_type='thread'".format(marathon,app)
result=db.select_mysql(conn,sql)
return result
else:
return None
def request_queue_get(marathon,app,mem):
"""
:marathon marathon name
:app app id
:request_queue the flag that judge the cpu configuration
"""
db=DB()
conn=db.connect_mysql()
if mem==1:
sql="select max_threshold,min_threshold from quota_info where marathon_name='{}' and app_id='{}' and rule_type='request_queue'".format(marathon,app)
result=db.select_mysql(conn,sql)
return result
else:
return None
if __name__=="__main__":
#results=query_conf('marathon','test1')
#print(results)
cpu=cpu_get('marathon','test',1)
print cpu
"""
for result in results:
print(result)
for item in result:
print(item)
"""
#result=mem_get('marathon','test',0)
#print result
"""
resultl=result[0]
for item in resultl:
print(item)
"""<|fim▁end|> | logging.debug("in query_conf:{}".format(result))
return result |
<|file_name|>test.rs<|end_file_name|><|fim▁begin|>use ascii_canvas::AsciiCanvas;
use grammar::parse_tree::Span;
use message::builder::MessageBuilder;<|fim▁hole|>
use super::*;
fn install_tls() -> Tls {
Tls::test_string(
r#"foo
bar
baz
"#,
)
}
#[test]
fn hello_world() {
let _tls = install_tls();
let msg = MessageBuilder::new(Span(0, 2))
.heading()
.text("Hello, world!")
.end()
.body()
.begin_wrap()
.text(
"This is a very, very, very, very long sentence. \
OK, not THAT long!",
)
.end()
.indented_by(4)
.end()
.end();
let min_width = msg.min_width();
let mut canvas = AsciiCanvas::new(0, min_width);
msg.emit(&mut canvas);
expect_debug(
&canvas.to_strings(),
r#"
[
"tmp.txt:1:1: 1:2: Hello, world!",
"",
" This is a very, very,",
" very, very long sentence.",
" OK, not THAT long!"
]
"#
.trim(),
);
}
/// Test a case where the body in the message is longer than the
/// header (which used to mess up the `min_width` computation).
#[test]
fn long_body() {
let _tls = install_tls();
let msg = MessageBuilder::new(Span(0, 2))
.heading()
.text("Hello, world!")
.end()
.body()
.text(
"This is a very, very, very, very long sentence. \
OK, not THAT long!",
)
.end()
.end();
let min_width = msg.min_width();
let mut canvas = AsciiCanvas::new(0, min_width);
msg.emit(&mut canvas);
expect_debug(
&canvas.to_strings(),
r#"
[
"tmp.txt:1:1: 1:2: Hello, world!",
"",
" This is a very, very, very, very long sentence. OK, not THAT long!"
]
"#
.trim(),
);
}
#[test]
fn paragraphs() {
let _tls = install_tls();
let msg = MessageBuilder::new(Span(0, 2))
.heading()
.text("Hello, world!")
.end() // heading
.body()
.begin_paragraphs()
.begin_wrap()
.text(
"This is the first paragraph. It contains a lot of really interesting \
information that the reader will no doubt peruse with care.",
)
.end()
.begin_wrap()
.text(
"This is the second paragraph. It contains even more really interesting \
information that the reader will no doubt skip over with wild abandon.",
)
.end()
.begin_wrap()
.text(
"This is the final paragraph. The reader won't even spare this one \
a second glance, despite it containing just waht they need to know \
to solve their problem and to derive greater pleasure from life. \
The secret: All you need is love! Dum da da dum.",
)
.end()
.end()
.end()
.end();
let min_width = msg.min_width();
let mut canvas = AsciiCanvas::new(0, min_width);
msg.emit(&mut canvas);
expect_debug(
&canvas.to_strings(),
r#"
[
"tmp.txt:1:1: 1:2: Hello, world!",
"",
" This is the first paragraph.",
" It contains a lot of really",
" interesting information that",
" the reader will no doubt",
" peruse with care.",
"",
" This is the second paragraph.",
" It contains even more really",
" interesting information that",
" the reader will no doubt skip",
" over with wild abandon.",
"",
" This is the final paragraph.",
" The reader won't even spare",
" this one a second glance,",
" despite it containing just",
" waht they need to know to",
" solve their problem and to",
" derive greater pleasure from",
" life. The secret: All you",
" need is love! Dum da da dum."
]
"#
.trim(),
);
}<|fim▁end|> | use test_util::expect_debug;
use tls::Tls; |
<|file_name|>RustTestDescription.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.rust;
import com.facebook.buck.cxx.CxxPlatform;
import com.facebook.buck.cxx.CxxPlatforms;
import com.facebook.buck.cxx.Linker;
import com.facebook.buck.model.BuildTarget;
import com.facebook.buck.model.Flavor;
import com.facebook.buck.model.FlavorDomain;
import com.facebook.buck.model.Flavored;
import com.facebook.buck.model.InternalFlavor;
import com.facebook.buck.parser.NoSuchBuildTargetException;
import com.facebook.buck.rules.AbstractDescriptionArg;
import com.facebook.buck.rules.BinaryWrapperRule;
import com.facebook.buck.rules.BuildRule;
import com.facebook.buck.rules.BuildRuleParams;
import com.facebook.buck.rules.BuildRuleResolver;
import com.facebook.buck.rules.CellPathResolver;
import com.facebook.buck.rules.Description;<|fim▁hole|>import com.facebook.buck.rules.TargetGraph;
import com.facebook.buck.rules.Tool;
import com.facebook.buck.rules.ToolProvider;
import com.facebook.buck.versions.VersionRoot;
import com.facebook.infer.annotation.SuppressFieldNotInitialized;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Stream;
public class RustTestDescription implements
Description<RustTestDescription.Arg>,
ImplicitDepsInferringDescription<RustTestDescription.Arg>,
Flavored,
VersionRoot<RustTestDescription.Arg> {
private final RustBuckConfig rustBuckConfig;
private final FlavorDomain<CxxPlatform> cxxPlatforms;
private final CxxPlatform defaultCxxPlatform;
public RustTestDescription(
RustBuckConfig rustBuckConfig,
FlavorDomain<CxxPlatform> cxxPlatforms, CxxPlatform defaultCxxPlatform) {
this.rustBuckConfig = rustBuckConfig;
this.cxxPlatforms = cxxPlatforms;
this.defaultCxxPlatform = defaultCxxPlatform;
}
@Override
public Arg createUnpopulatedConstructorArg() {
return new Arg();
}
@Override
public <A extends Arg> BuildRule createBuildRule(
TargetGraph targetGraph,
BuildRuleParams params,
BuildRuleResolver resolver,
CellPathResolver cellRoots,
A args) throws NoSuchBuildTargetException {
final BuildTarget buildTarget = params.getBuildTarget();
BuildTarget exeTarget = params.getBuildTarget()
.withAppendedFlavors(InternalFlavor.of("unittest"));
Optional<Map.Entry<Flavor, RustBinaryDescription.Type>> type =
RustBinaryDescription.BINARY_TYPE.getFlavorAndValue(buildTarget);
boolean isCheck = type.map(t -> t.getValue().isCheck()).orElse(false);
BinaryWrapperRule testExeBuild = resolver.addToIndex(
RustCompileUtils.createBinaryBuildRule(
params.withBuildTarget(exeTarget),
resolver,
rustBuckConfig,
cxxPlatforms,
defaultCxxPlatform,
args.crate,
args.features,
Stream.of(
args.framework ? Stream.of("--test") : Stream.<String>empty(),
rustBuckConfig.getRustTestFlags().stream(),
args.rustcFlags.stream())
.flatMap(x -> x).iterator(),
args.linkerFlags.iterator(),
RustCompileUtils.getLinkStyle(params.getBuildTarget(), args.linkStyle),
args.rpath, args.srcs,
args.crateRoot,
ImmutableSet.of("lib.rs", "main.rs"),
isCheck
));
SourcePathRuleFinder ruleFinder = new SourcePathRuleFinder(resolver);
Tool testExe = testExeBuild.getExecutableCommand();
BuildRuleParams testParams = params.copyAppendingExtraDeps(
testExe.getDeps(ruleFinder));
return new RustTest(
testParams,
ruleFinder,
testExeBuild,
args.labels,
args.contacts);
}
@Override
public void findDepsForTargetFromConstructorArgs(
BuildTarget buildTarget,
CellPathResolver cellRoots,
Arg constructorArg,
ImmutableCollection.Builder<BuildTarget> extraDepsBuilder,
ImmutableCollection.Builder<BuildTarget> targetGraphOnlyDepsBuilder) {
ToolProvider compiler = rustBuckConfig.getRustCompiler();
extraDepsBuilder.addAll(compiler.getParseTimeDeps());
extraDepsBuilder.addAll(CxxPlatforms.getParseTimeDeps(cxxPlatforms.getValues()));
}
@Override
public boolean hasFlavors(ImmutableSet<Flavor> flavors) {
if (cxxPlatforms.containsAnyOf(flavors)) {
return true;
}
for (RustBinaryDescription.Type type : RustBinaryDescription.Type.values()) {
if (flavors.contains(type.getFlavor())) {
return true;
}
}
return false;
}
@Override
public Optional<ImmutableSet<FlavorDomain<?>>> flavorDomains() {
return Optional.of(ImmutableSet.of(cxxPlatforms, RustBinaryDescription.BINARY_TYPE));
}
@Override
public boolean isVersionRoot(ImmutableSet<Flavor> flavors) {
return true;
}
@SuppressFieldNotInitialized
public static class Arg extends AbstractDescriptionArg {
public ImmutableSortedSet<SourcePath> srcs = ImmutableSortedSet.of();
public ImmutableSet<String> contacts = ImmutableSet.of();
public ImmutableSortedSet<String> features = ImmutableSortedSet.of();
public ImmutableList<String> rustcFlags = ImmutableList.of();
public ImmutableList<String> linkerFlags = ImmutableList.of();
public ImmutableSortedSet<BuildTarget> deps = ImmutableSortedSet.of();
public Optional<Linker.LinkableDepType> linkStyle;
public boolean rpath = true;
public boolean framework = true;
public Optional<String> crate;
public Optional<SourcePath> crateRoot;
}
}<|fim▁end|> | import com.facebook.buck.rules.ImplicitDepsInferringDescription;
import com.facebook.buck.rules.SourcePath;
import com.facebook.buck.rules.SourcePathRuleFinder; |
<|file_name|>libdump1090.rs<|end_file_name|><|fim▁begin|>// Pitot - a customizable aviation information receiver
// Copyright (C) 2017-2018 Datong Sun ([email protected])
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use super::super::*;
use std::collections::VecDeque;
use std::os::raw::c_void;
use std::slice::from_raw_parts;
const ADDR_TYPE_ADS_B_ICAO: u8 = 1;
const ADDR_TYPE_ADS_B_OTHER: u8 = 2;
const ADDR_TYPE_ADS_R_ICAO: u8 = 3;
const ADDR_TYPE_ADS_R_OTHER: u8 = 4;
const ADDR_TYPE_TIS_B_ICAO: u8 = 5;
const ADDR_TYPE_TIS_B_OTHER: u8 = 6;
const ADDR_TYPE_UNKNOWN: u8 = 7;
const SPEED_IS_GS: u8 = 1;
const SPEED_IS_IAS: u8 = 2;
const SPEED_IS_TAS: u8 = 3;
#[derive(Debug)]
#[repr(C)]
struct TrafficT {
addr: u32,
altitude: i32,
gnss_delta: i32,
heading: u32,
speed: u32,
vs: i32,
squawk: u32,
callsign: *const u8,
category: u32,
lat: f64,
lon: f64,
nic: u32,
nacp: u32,
on_ground: u8,
addr_type: u8,
altitude_valid: u8,
altitude_is_baro: u8,
gnss_delta_valid: u8,
heading_valid: u8,
heading_is_true: u8,
speed_valid: u8,
speed_src: u8,
vs_valid: u8,
squawk_valid: u8,
callsign_valid: u8,
category_valid: u8,
pos_valid: u8,
nacp_valid: u8,
airground_valid: u8,
}
pub struct Dump1090 {
parsed: VecDeque<TrafficData>,
}
#[link(name = "dump1090")]
extern "C" {
fn dump1090_init(
cb: extern "C" fn(inst: *mut c_void, traffic: *const TrafficT),
data: *const c_void,
) -> i32;
fn dump1090_process(data: *const u8, len: usize);
}
impl Dump1090 {
pub fn new() -> Box<Self> {
// this has to be boxed to get the address of self for callback
// now
let me = Box::new(Self {
parsed: VecDeque::new(),
});
unsafe {
if dump1090_init(callback, &*me as *const _ as *const c_void) != 0 {
panic!("unable to init libdump1090");
}
}
me
}
pub fn process_data(&mut self, buf: &[u8]) {
unsafe { dump1090_process(buf.as_ptr(), buf.len()) }
}
pub fn parsed_as_mut_ref(&mut self) -> &mut VecDeque<TrafficData> {
&mut self.parsed
}
fn push_message(&mut self, msg: TrafficData) {
trace!("got a Mode S message: {:?}", msg);
self.parsed.push_back(msg);
}
}
unsafe impl Send for Dump1090 {}
extern "C" fn callback(inst: *mut c_void, traffic: *const TrafficT) {
let inst = inst as *mut Dump1090;
unsafe {
let traffic = &*traffic;
if traffic.addr == 0 {
// this happens sometimes, just ignore
return;
}
let msg = TrafficData {
addr: (
traffic.addr,
match traffic.addr_type {
ADDR_TYPE_ADS_B_ICAO => AddressType::ADSBICAO,
ADDR_TYPE_ADS_B_OTHER => AddressType::ADSBOther,
ADDR_TYPE_ADS_R_ICAO => AddressType::ADSRICAO,
ADDR_TYPE_ADS_R_OTHER => AddressType::ADSROther,
ADDR_TYPE_TIS_B_ICAO => AddressType::TISBICAO,
ADDR_TYPE_TIS_B_OTHER => AddressType::TISBOther,
ADDR_TYPE_UNKNOWN => AddressType::Unknown,
_ => unreachable!(),
},
),
altitude: match traffic.altitude_valid {
1 => Some((
traffic.altitude,
if traffic.altitude_is_baro == 1 {
AltitudeType::Baro
} else {
AltitudeType::GNSS
},
)),
_ => None,
},
gnss_delta: match traffic.gnss_delta_valid {
1 => Some(traffic.gnss_delta),
_ => None,
},
heading: match traffic.heading_valid {
1 => Some((
traffic.heading as u16,
if traffic.heading_is_true == 1 {
HeadingType::True
} else {
HeadingType::Mag
},
)),
_ => None,
},
speed: match traffic.speed_valid {
1 => Some((
traffic.speed as u16,
match traffic.speed_src {
SPEED_IS_GS => SpeedType::GS,
SPEED_IS_IAS => SpeedType::IAS,
SPEED_IS_TAS => SpeedType::TAS,
_ => unreachable!(),
},
)),
_ => None,
},<|fim▁hole|> vs: match traffic.vs_valid {
1 => Some(traffic.vs as i16),
_ => None,
},
squawk: match traffic.squawk_valid {
1 => {
let mut sq = 0_u16;
sq += (traffic.squawk as u16 >> 12) * 1000;
sq += ((traffic.squawk as u16 & 0x0F00) >> 8) * 100;
sq += ((traffic.squawk as u16 & 0x00F0) >> 4) * 10;
sq += traffic.squawk as u16 & 0x000F;
Some(sq)
}
_ => None,
},
callsign: match traffic.callsign_valid {
1 => {
let s = from_raw_parts(traffic.callsign, 8);
let mut v = Vec::with_capacity(s.len());
v.extend_from_slice(s);
String::from_utf8(v).ok().and_then(|s| {
let trimmed = s.trim();
if trimmed.len() > 0 {
Some(String::from(trimmed))
} else {
None
}
})
}
_ => None,
},
category: match traffic.category_valid {
1 => {
let mut ct = 0_u8;
ct += (((traffic.category as u8 & 0xF0) >> 4) - 0x0A) * 8;
ct += traffic.category as u8 & 0x0F;
Some(ct)
}
_ => None,
},
lat_lon: match traffic.pos_valid {
1 => Some((traffic.lat as f32, traffic.lon as f32)),
_ => None,
},
nic: match traffic.pos_valid {
1 => Some(traffic.nic as u8),
_ => None,
},
nacp: match traffic.nacp_valid {
1 => Some(traffic.nacp as u8),
_ => None,
},
on_ground: match traffic.airground_valid {
1 => Some(traffic.on_ground == 1),
_ => None,
},
source: TrafficSource::ES,
};
(*inst).push_message(msg);
}
}<|fim▁end|> | |
<|file_name|>quic_chromium_client_session.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/quic/quic_chromium_client_session.h"
#include <memory>
#include <utility>
#include "base/bind.h"
#include "base/containers/contains.h"
#include "base/feature_list.h"
#include "base/location.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/sparse_histogram.h"
#include "base/no_destructor.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/task/post_task.h"
#include "base/task/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/tick_clock.h"
#include "base/trace_event/memory_usage_estimator.h"
#include "base/values.h"
#include "net/base/features.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/base/network_activity_monitor.h"
#include "net/base/network_isolation_key.h"
#include "net/base/privacy_mode.h"
#include "net/base/url_util.h"
#include "net/cert/signed_certificate_timestamp_and_status.h"
#include "net/http/transport_security_state.h"
#include "net/log/net_log_event_type.h"
#include "net/log/net_log_source_type.h"
#include "net/quic/address_utils.h"
#include "net/quic/crypto/proof_verifier_chromium.h"
#include "net/quic/quic_chromium_connection_helper.h"
#include "net/quic/quic_chromium_packet_writer.h"
#include "net/quic/quic_connectivity_probing_manager.h"
#include "net/quic/quic_crypto_client_stream_factory.h"
#include "net/quic/quic_server_info.h"
#include "net/quic/quic_stream_factory.h"
#include "net/socket/datagram_client_socket.h"
#include "net/spdy/spdy_http_utils.h"
#include "net/spdy/spdy_log_util.h"
#include "net/spdy/spdy_session.h"
#include "net/ssl/ssl_connection_status_flags.h"
#include "net/ssl/ssl_info.h"
#include "net/third_party/quiche/src/quic/core/http/quic_client_promised_info.h"
#include "net/third_party/quiche/src/quic/core/http/spdy_server_push_utils.h"
#include "net/third_party/quiche/src/quic/core/quic_utils.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
#include "net/traffic_annotation/network_traffic_annotation.h"
#include "third_party/boringssl/src/include/openssl/ssl.h"
#include "url/origin.h"
#include "url/scheme_host_port.h"
namespace net {
namespace {
// IPv6 packets have an additional 20 bytes of overhead than IPv4 packets.
const size_t kAdditionalOverheadForIPv6 = 20;
// Maximum number of Readers that are created for any session due to
// connection migration. A new Reader is created every time this endpoint's
// IP address changes.
const size_t kMaxReadersPerQuicSession = 5;
// Time to wait (in seconds) when no networks are available and
// migrating sessions need to wait for a new network to connect.
const size_t kWaitTimeForNewNetworkSecs = 10;
const size_t kMinRetryTimeForDefaultNetworkSecs = 1;
// Maximum RTT time for this session when set initial timeout for probing
// network.
const int kDefaultRTTMilliSecs = 300;
// These values are persisted to logs. Entries should not be renumbered,
// and numeric values should never be reused.
enum class AcceptChEntries {
kNoEntries = 0,
kOnlyValidEntries = 1,
kOnlyInvalidEntries = 2,
kBothValidAndInvalidEntries = 3,
kMaxValue = kBothValidAndInvalidEntries,
};
void LogAcceptChFrameReceivedHistogram(bool has_valid_entry,
bool has_invalid_entry) {
AcceptChEntries value;
if (has_valid_entry) {
if (has_invalid_entry) {
value = AcceptChEntries::kBothValidAndInvalidEntries;
} else {
value = AcceptChEntries::kOnlyValidEntries;
}
} else {
if (has_invalid_entry) {
value = AcceptChEntries::kOnlyInvalidEntries;
} else {
value = AcceptChEntries::kNoEntries;
}
}
base::UmaHistogramEnumeration("Net.QuicSession.AcceptChFrameReceivedViaAlps",
value);
}
void LogAcceptChForOriginHistogram(bool value) {
base::UmaHistogramBoolean("Net.QuicSession.AcceptChForOrigin", value);
}
void RecordConnectionCloseErrorCodeImpl(const std::string& histogram,
uint64_t error,
bool is_google_host,
bool handshake_confirmed) {
base::UmaHistogramSparse(histogram, error);
if (handshake_confirmed) {
base::UmaHistogramSparse(histogram + ".HandshakeConfirmed", error);
} else {
base::UmaHistogramSparse(histogram + ".HandshakeNotConfirmed", error);
}
if (is_google_host) {
base::UmaHistogramSparse(histogram + "Google", error);
if (handshake_confirmed) {
base::UmaHistogramSparse(histogram + "Google.HandshakeConfirmed", error);
} else {
base::UmaHistogramSparse(histogram + "Google.HandshakeNotConfirmed",
error);
}
}
}
void LogMigrateToSocketStatus(bool success) {
UMA_HISTOGRAM_BOOLEAN("Net.QuicSession.MigrateToSocketSuccess", success);
}
void RecordConnectionCloseErrorCode(const quic::QuicConnectionCloseFrame& frame,
quic::ConnectionCloseSource source,
const std::string& hostname,
bool handshake_confirmed) {
bool is_google_host = IsGoogleHost(hostname);
std::string histogram = "Net.QuicSession.ConnectionCloseErrorCode";
if (source == quic::ConnectionCloseSource::FROM_SELF) {
// When sending a CONNECTION_CLOSE frame, it is sufficient to record
// |quic_error_code|.
histogram += "Client";
RecordConnectionCloseErrorCodeImpl(histogram, frame.quic_error_code,
is_google_host, handshake_confirmed);
return;
}
histogram += "Server";
// Record |quic_error_code|. Note that when using IETF QUIC, this is
// extracted from the CONNECTION_CLOSE frame reason phrase, and might be
// QUIC_IETF_GQUIC_ERROR_MISSING.
RecordConnectionCloseErrorCodeImpl(histogram, frame.quic_error_code,
is_google_host, handshake_confirmed);
// For IETF QUIC frames, also record the error code received on the wire.
if (frame.close_type == quic::IETF_QUIC_TRANSPORT_CONNECTION_CLOSE) {
histogram += "IetfTransport";
RecordConnectionCloseErrorCodeImpl(histogram, frame.wire_error_code,
is_google_host, handshake_confirmed);
if (frame.quic_error_code == quic::QUIC_IETF_GQUIC_ERROR_MISSING) {
histogram += "GQuicErrorMissing";
RecordConnectionCloseErrorCodeImpl(histogram, frame.wire_error_code,
is_google_host, handshake_confirmed);
}
} else if (frame.close_type == quic::IETF_QUIC_APPLICATION_CONNECTION_CLOSE) {
histogram += "IetfApplication";
RecordConnectionCloseErrorCodeImpl(histogram, frame.wire_error_code,
is_google_host, handshake_confirmed);
if (frame.quic_error_code == quic::QUIC_IETF_GQUIC_ERROR_MISSING) {
histogram += "GQuicErrorMissing";
RecordConnectionCloseErrorCodeImpl(histogram, frame.wire_error_code,
is_google_host, handshake_confirmed);
}
}
}
base::Value NetLogQuicMigrationFailureParams(
quic::QuicConnectionId connection_id,
base::StringPiece reason) {
base::DictionaryValue dict;
dict.SetString("connection_id", connection_id.ToString());
dict.SetString("reason", reason);
return std::move(dict);
}
base::Value NetLogQuicMigrationSuccessParams(
quic::QuicConnectionId connection_id) {
base::DictionaryValue dict;
dict.SetString("connection_id", connection_id.ToString());
return std::move(dict);
}
base::Value NetLogProbingResultParams(
NetworkChangeNotifier::NetworkHandle network,
const quic::QuicSocketAddress* peer_address,
bool is_success) {
base::DictionaryValue dict;
dict.SetString("network", base::NumberToString(network));
dict.SetString("peer address", peer_address->ToString());
dict.SetBoolean("is_success", is_success);
return std::move(dict);
}
// Histogram for recording the different reasons that a QUIC session is unable
// to complete the handshake.
enum HandshakeFailureReason {
HANDSHAKE_FAILURE_UNKNOWN = 0,
HANDSHAKE_FAILURE_BLACK_HOLE = 1,
HANDSHAKE_FAILURE_PUBLIC_RESET = 2,
NUM_HANDSHAKE_FAILURE_REASONS = 3,
};
void RecordHandshakeFailureReason(HandshakeFailureReason reason) {
UMA_HISTOGRAM_ENUMERATION(
"Net.QuicSession.ConnectionClose.HandshakeNotConfirmed.Reason", reason,
NUM_HANDSHAKE_FAILURE_REASONS);
}
// Note: these values must be kept in sync with the corresponding values in:
// tools/metrics/histograms/histograms.xml
enum HandshakeState {
STATE_STARTED = 0,
STATE_ENCRYPTION_ESTABLISHED = 1,
STATE_HANDSHAKE_CONFIRMED = 2,
STATE_FAILED = 3,
NUM_HANDSHAKE_STATES = 4
};
enum class ZeroRttState {
kAttemptedAndSucceeded = 0,
kAttemptedAndRejected = 1,
kNotAttempted = 2,
kMaxValue = kNotAttempted,
};
void RecordHandshakeState(HandshakeState state) {
UMA_HISTOGRAM_ENUMERATION("Net.QuicHandshakeState", state,
NUM_HANDSHAKE_STATES);
}
std::string MigrationCauseToString(MigrationCause cause) {
switch (cause) {
case UNKNOWN_CAUSE:
return "Unknown";
case ON_NETWORK_CONNECTED:
return "OnNetworkConnected";
case ON_NETWORK_DISCONNECTED:
return "OnNetworkDisconnected";
case ON_WRITE_ERROR:
return "OnWriteError";
case ON_NETWORK_MADE_DEFAULT:
return "OnNetworkMadeDefault";
case ON_MIGRATE_BACK_TO_DEFAULT_NETWORK:
return "OnMigrateBackToDefaultNetwork";
case CHANGE_NETWORK_ON_PATH_DEGRADING:
return "OnPathDegrading";
case CHANGE_PORT_ON_PATH_DEGRADING:
return "ChangePortOnPathDegrading";
case NEW_NETWORK_CONNECTED_POST_PATH_DEGRADING:
return "NewNetworkConnectedPostPathDegrading";
default:
QUIC_NOTREACHED();
break;
}
return "InvalidCause";
}
base::Value NetLogQuicClientSessionParams(
const QuicSessionKey* session_key,
const quic::QuicConnectionId& connection_id,
const quic::QuicConnectionId& client_connection_id,
const quic::ParsedQuicVersionVector& supported_versions,
int cert_verify_flags,
bool require_confirmation) {
base::Value dict(base::Value::Type::DICTIONARY);
dict.SetStringKey("host", session_key->server_id().host());
dict.SetIntKey("port", session_key->server_id().port());
dict.SetStringKey("privacy_mode",
PrivacyModeToDebugString(session_key->privacy_mode()));
dict.SetStringKey("network_isolation_key",
session_key->network_isolation_key().ToDebugString());
dict.SetBoolKey("require_confirmation", require_confirmation);
dict.SetIntKey("cert_verify_flags", cert_verify_flags);
dict.SetStringKey("connection_id", connection_id.ToString());
if (!client_connection_id.IsEmpty()) {
dict.SetStringKey("client_connection_id", client_connection_id.ToString());
}
dict.SetStringKey("versions",
ParsedQuicVersionVectorToString(supported_versions));
return dict;
}
base::Value NetLogQuicPushPromiseReceivedParams(
const spdy::Http2HeaderBlock* headers,
spdy::SpdyStreamId stream_id,
spdy::SpdyStreamId promised_stream_id,
NetLogCaptureMode capture_mode) {
base::DictionaryValue dict;
dict.SetKey("headers",
ElideHttp2HeaderBlockForNetLog(*headers, capture_mode));
dict.SetInteger("id", stream_id);
dict.SetInteger("promised_stream_id", promised_stream_id);
return std::move(dict);
}
// TODO(fayang): Remove this when necessary data is collected.
void LogProbeResultToHistogram(MigrationCause cause, bool success) {
UMA_HISTOGRAM_BOOLEAN("Net.QuicSession.PathValidationSuccess", success);
const std::string histogram_name =
"Net.QuicSession.PathValidationSuccess." + MigrationCauseToString(cause);
STATIC_HISTOGRAM_POINTER_GROUP(
histogram_name, cause, MIGRATION_CAUSE_MAX, AddBoolean(success),
base::BooleanHistogram::FactoryGet(
histogram_name, base::HistogramBase::kUmaTargetedHistogramFlag));
}
class QuicServerPushHelper : public ServerPushDelegate::ServerPushHelper {
public:
explicit QuicServerPushHelper(
base::WeakPtr<QuicChromiumClientSession> session,
const GURL& url)
: session_(session), request_url_(url) {}
void Cancel() override {
if (session_) {
session_->CancelPush(request_url_);
}
}
const GURL& GetURL() const override { return request_url_; }
NetworkIsolationKey GetNetworkIsolationKey() const override {
if (session_) {
return session_->quic_session_key().network_isolation_key();
}
return NetworkIsolationKey();
}
private:
base::WeakPtr<QuicChromiumClientSession> session_;
const GURL request_url_;
};
} // namespace
QuicChromiumClientSession::Handle::Handle(
const base::WeakPtr<QuicChromiumClientSession>& session,
url::SchemeHostPort destination)
: MultiplexedSessionHandle(session),
session_(session),
destination_(std::move(destination)),
net_log_(session_->net_log()),
was_handshake_confirmed_(session->OneRttKeysAvailable()),
net_error_(OK),
quic_error_(quic::QUIC_NO_ERROR),
port_migration_detected_(false),
server_id_(session_->server_id()),
quic_version_(session->connection()->version()),
push_handle_(nullptr),
was_ever_used_(false) {
DCHECK(session_);
session_->AddHandle(this);
}
QuicChromiumClientSession::Handle::~Handle() {
if (push_handle_) {
auto* push_handle = push_handle_;
push_handle_ = nullptr;
push_handle->Cancel();
}
if (session_)
session_->RemoveHandle(this);
}
void QuicChromiumClientSession::Handle::OnCryptoHandshakeConfirmed() {
was_handshake_confirmed_ = true;
}
void QuicChromiumClientSession::Handle::OnSessionClosed(
quic::ParsedQuicVersion quic_version,
int net_error,
quic::QuicErrorCode quic_error,
bool port_migration_detected,
LoadTimingInfo::ConnectTiming connect_timing,
bool was_ever_used) {
session_ = nullptr;
port_migration_detected_ = port_migration_detected;
net_error_ = net_error;
quic_error_ = quic_error;
quic_version_ = quic_version;
connect_timing_ = connect_timing;
push_handle_ = nullptr;
was_ever_used_ = was_ever_used;
}
bool QuicChromiumClientSession::Handle::IsConnected() const {
return session_ != nullptr;
}
bool QuicChromiumClientSession::Handle::OneRttKeysAvailable() const {
return was_handshake_confirmed_;
}
const LoadTimingInfo::ConnectTiming&
QuicChromiumClientSession::Handle::GetConnectTiming() {
if (!session_)
return connect_timing_;
return session_->GetConnectTiming();
}
void QuicChromiumClientSession::Handle::PopulateNetErrorDetails(
NetErrorDetails* details) const {
if (session_) {
session_->PopulateNetErrorDetails(details);
} else {
details->quic_port_migration_detected = port_migration_detected_;
details->quic_connection_error = quic_error_;
}
}
quic::ParsedQuicVersion QuicChromiumClientSession::Handle::GetQuicVersion()
const {
if (!session_)
return quic_version_;
return session_->GetQuicVersion();
}
void QuicChromiumClientSession::Handle::ResetPromised(
quic::QuicStreamId id,
quic::QuicRstStreamErrorCode error_code) {
if (session_)
session_->ResetPromised(id, error_code);
}
std::unique_ptr<quic::QuicConnection::ScopedPacketFlusher>
QuicChromiumClientSession::Handle::CreatePacketBundler() {
if (!session_)
return nullptr;
return std::make_unique<quic::QuicConnection::ScopedPacketFlusher>(
session_->connection());
}
bool QuicChromiumClientSession::Handle::SharesSameSession(
const Handle& other) const {
return session_.get() == other.session_.get();
}
int QuicChromiumClientSession::Handle::RendezvousWithPromised(
const spdy::Http2HeaderBlock& headers,
CompletionOnceCallback callback) {
if (!session_)
return ERR_CONNECTION_CLOSED;
quic::QuicAsyncStatus push_status =
session_->push_promise_index()->Try(headers, this, &push_handle_);
switch (push_status) {
case quic::QUIC_FAILURE:
return ERR_FAILED;
case quic::QUIC_SUCCESS:
return OK;
case quic::QUIC_PENDING:
push_callback_ = std::move(callback);
return ERR_IO_PENDING;
}
NOTREACHED();
return ERR_UNEXPECTED;
}
int QuicChromiumClientSession::Handle::RequestStream(
bool requires_confirmation,
CompletionOnceCallback callback,
const NetworkTrafficAnnotationTag& traffic_annotation) {
DCHECK(!stream_request_);
if (!session_)
return ERR_CONNECTION_CLOSED;
requires_confirmation |= session_->gquic_zero_rtt_disabled();
// std::make_unique does not work because the StreamRequest constructor
// is private.
stream_request_ = base::WrapUnique(
new StreamRequest(this, requires_confirmation, traffic_annotation));
return stream_request_->StartRequest(std::move(callback));
}
std::unique_ptr<QuicChromiumClientStream::Handle>
QuicChromiumClientSession::Handle::ReleaseStream() {
DCHECK(stream_request_);
auto handle = stream_request_->ReleaseStream();
stream_request_.reset();
return handle;
}
std::unique_ptr<QuicChromiumClientStream::Handle>
QuicChromiumClientSession::Handle::ReleasePromisedStream() {
DCHECK(push_stream_);
return std::move(push_stream_);
}
int QuicChromiumClientSession::Handle::WaitForHandshakeConfirmation(
CompletionOnceCallback callback) {
if (!session_)
return ERR_CONNECTION_CLOSED;
return session_->WaitForHandshakeConfirmation(std::move(callback));
}
void QuicChromiumClientSession::Handle::CancelRequest(StreamRequest* request) {
if (session_)
session_->CancelRequest(request);
}
int QuicChromiumClientSession::Handle::TryCreateStream(StreamRequest* request) {
if (!session_)
return ERR_CONNECTION_CLOSED;
return session_->TryCreateStream(request);
}
quic::QuicClientPushPromiseIndex*
QuicChromiumClientSession::Handle::GetPushPromiseIndex() {
if (!session_)
return push_promise_index_;
return session_->push_promise_index();
}
int QuicChromiumClientSession::Handle::GetPeerAddress(
IPEndPoint* address) const {
if (!session_)
return ERR_CONNECTION_CLOSED;
*address = ToIPEndPoint(session_->peer_address());
return OK;
}
int QuicChromiumClientSession::Handle::GetSelfAddress(
IPEndPoint* address) const {
if (!session_)
return ERR_CONNECTION_CLOSED;
*address = ToIPEndPoint(session_->self_address());
return OK;
}
bool QuicChromiumClientSession::Handle::WasEverUsed() const {
if (!session_)
return was_ever_used_;
return session_->WasConnectionEverUsed();
}
const std::vector<std::string>&
QuicChromiumClientSession::Handle::GetDnsAliasesForSessionKey(
const QuicSessionKey& key) const {
static const base::NoDestructor<std::vector<std::string>> emptyvector_result;
return session_ ? session_->GetDnsAliasesForSessionKey(key)
: *emptyvector_result;
}
bool QuicChromiumClientSession::Handle::CheckVary(
const spdy::Http2HeaderBlock& client_request,
const spdy::Http2HeaderBlock& promise_request,
const spdy::Http2HeaderBlock& promise_response) {
HttpRequestInfo promise_request_info;
ConvertHeaderBlockToHttpRequestHeaders(promise_request,
&promise_request_info.extra_headers);
HttpRequestInfo client_request_info;
ConvertHeaderBlockToHttpRequestHeaders(client_request,
&client_request_info.extra_headers);
HttpResponseInfo promise_response_info;
if (!SpdyHeadersToHttpResponse(promise_response, &promise_response_info)) {
DLOG(WARNING) << "Invalid headers";
return false;
}
HttpVaryData vary_data;
if (!vary_data.Init(promise_request_info,
*promise_response_info.headers.get())) {
// Promise didn't contain valid vary info, so URL match was sufficient.
return true;
}
// Now compare the client request for matching.
return vary_data.MatchesRequest(client_request_info,
*promise_response_info.headers.get());
}
void QuicChromiumClientSession::Handle::OnRendezvousResult(
quic::QuicSpdyStream* stream) {
DCHECK(!push_stream_);
int rv = ERR_FAILED;
if (stream) {
rv = OK;
push_stream_ =
static_cast<QuicChromiumClientStream*>(stream)->CreateHandle();
}
if (push_callback_) {
DCHECK(push_handle_);
push_handle_ = nullptr;
std::move(push_callback_).Run(rv);
}
}
QuicChromiumClientSession::StreamRequest::StreamRequest(
QuicChromiumClientSession::Handle* session,
bool requires_confirmation,
const NetworkTrafficAnnotationTag& traffic_annotation)
: session_(session),
requires_confirmation_(requires_confirmation),
stream_(nullptr),
traffic_annotation_(traffic_annotation) {}
QuicChromiumClientSession::StreamRequest::~StreamRequest() {
if (stream_)
stream_->Reset(quic::QUIC_STREAM_CANCELLED);
if (session_)
session_->CancelRequest(this);
}
int QuicChromiumClientSession::StreamRequest::StartRequest(
CompletionOnceCallback callback) {
if (!session_->IsConnected())
return ERR_CONNECTION_CLOSED;
next_state_ = STATE_WAIT_FOR_CONFIRMATION;
int rv = DoLoop(OK);
if (rv == ERR_IO_PENDING)
callback_ = std::move(callback);
return rv;
}
std::unique_ptr<QuicChromiumClientStream::Handle>
QuicChromiumClientSession::StreamRequest::ReleaseStream() {
DCHECK(stream_);
return std::move(stream_);
}
void QuicChromiumClientSession::StreamRequest::OnRequestCompleteSuccess(
std::unique_ptr<QuicChromiumClientStream::Handle> stream) {
DCHECK_EQ(STATE_REQUEST_STREAM_COMPLETE, next_state_);
stream_ = std::move(stream);
// This method is called even when the request completes synchronously.
if (callback_)
DoCallback(OK);
}
void QuicChromiumClientSession::StreamRequest::OnRequestCompleteFailure(
int rv) {
DCHECK_EQ(STATE_REQUEST_STREAM_COMPLETE, next_state_);
// This method is called even when the request completes synchronously.
if (callback_) {
// Avoid re-entrancy if the callback calls into the session.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&QuicChromiumClientSession::StreamRequest::DoCallback,
weak_factory_.GetWeakPtr(), rv));
}
}
void QuicChromiumClientSession::StreamRequest::OnIOComplete(int rv) {
rv = DoLoop(rv);
if (rv != ERR_IO_PENDING && !callback_.is_null()) {
DoCallback(rv);
}
}
void QuicChromiumClientSession::StreamRequest::DoCallback(int rv) {
CHECK_NE(rv, ERR_IO_PENDING);
CHECK(!callback_.is_null());
// The client callback can do anything, including destroying this class,
// so any pending callback must be issued after everything else is done.
std::move(callback_).Run(rv);
}
int QuicChromiumClientSession::StreamRequest::DoLoop(int rv) {
do {
State state = next_state_;
next_state_ = STATE_NONE;
switch (state) {
case STATE_WAIT_FOR_CONFIRMATION:
CHECK_EQ(OK, rv);
rv = DoWaitForConfirmation();
break;
case STATE_WAIT_FOR_CONFIRMATION_COMPLETE:
rv = DoWaitForConfirmationComplete(rv);
break;
case STATE_REQUEST_STREAM:
CHECK_EQ(OK, rv);
rv = DoRequestStream();
break;
case STATE_REQUEST_STREAM_COMPLETE:
rv = DoRequestStreamComplete(rv);
break;
default:
NOTREACHED() << "next_state_: " << next_state_;
break;
}
} while (next_state_ != STATE_NONE && next_state_ && rv != ERR_IO_PENDING);
return rv;
}
int QuicChromiumClientSession::StreamRequest::DoWaitForConfirmation() {
next_state_ = STATE_WAIT_FOR_CONFIRMATION_COMPLETE;
if (requires_confirmation_) {
return session_->WaitForHandshakeConfirmation(
base::BindOnce(&QuicChromiumClientSession::StreamRequest::OnIOComplete,
weak_factory_.GetWeakPtr()));
}
return OK;
}
int QuicChromiumClientSession::StreamRequest::DoWaitForConfirmationComplete(
int rv) {
DCHECK_NE(ERR_IO_PENDING, rv);
if (rv < 0)
return rv;
next_state_ = STATE_REQUEST_STREAM;
return OK;
}
int QuicChromiumClientSession::StreamRequest::DoRequestStream() {
next_state_ = STATE_REQUEST_STREAM_COMPLETE;
return session_->TryCreateStream(this);
}
int QuicChromiumClientSession::StreamRequest::DoRequestStreamComplete(int rv) {
DCHECK(rv == OK || !stream_);
return rv;
}
QuicChromiumClientSession::QuicChromiumPathValidationContext::
QuicChromiumPathValidationContext(
const quic::QuicSocketAddress& self_address,
const quic::QuicSocketAddress& peer_address,
NetworkChangeNotifier::NetworkHandle network,
std::unique_ptr<DatagramClientSocket> socket,
std::unique_ptr<QuicChromiumPacketWriter> writer,
std::unique_ptr<QuicChromiumPacketReader> reader)
: QuicPathValidationContext(self_address, peer_address),
network_handle_(network),
socket_(std::move(socket)),
writer_(std::move(writer)),
reader_(std::move(reader)) {}
QuicChromiumClientSession::QuicChromiumPathValidationContext::
~QuicChromiumPathValidationContext() = default;
NetworkChangeNotifier::NetworkHandle
QuicChromiumClientSession::QuicChromiumPathValidationContext::network() {
return network_handle_;
}
quic::QuicPacketWriter*
QuicChromiumClientSession::QuicChromiumPathValidationContext::WriterToUse() {
return writer_.get();
}
std::unique_ptr<QuicChromiumPacketWriter>
QuicChromiumClientSession::QuicChromiumPathValidationContext::ReleaseWriter() {
return std::move(writer_);
}
std::unique_ptr<DatagramClientSocket>
QuicChromiumClientSession::QuicChromiumPathValidationContext::ReleaseSocket() {
return std::move(socket_);
}
std::unique_ptr<QuicChromiumPacketReader>
QuicChromiumClientSession::QuicChromiumPathValidationContext::ReleaseReader() {
return std::move(reader_);
}
QuicChromiumClientSession::ConnectionMigrationValidationResultDelegate::
ConnectionMigrationValidationResultDelegate(
QuicChromiumClientSession* session)
: session_(session) {}
void QuicChromiumClientSession::ConnectionMigrationValidationResultDelegate::
OnPathValidationSuccess(
std::unique_ptr<quic::QuicPathValidationContext> context) {
auto* chrome_context =
static_cast<QuicChromiumPathValidationContext*>(context.get());
session_->OnConnectionMigrationProbeSucceeded(
chrome_context->network(), chrome_context->peer_address(),
chrome_context->self_address(), chrome_context->ReleaseSocket(),
chrome_context->ReleaseWriter(), chrome_context->ReleaseReader());
}
void QuicChromiumClientSession::ConnectionMigrationValidationResultDelegate::
OnPathValidationFailure(
std::unique_ptr<quic::QuicPathValidationContext> context) {
session_->connection()->OnPathValidationFailureAtClient();
// Note that socket, packet writer, and packet reader in |context| will be
// discarded.
auto* chrome_context =
static_cast<QuicChromiumPathValidationContext*>(context.get());
session_->OnProbeFailed(chrome_context->network(),
chrome_context->peer_address());
}
QuicChromiumClientSession::PortMigrationValidationResultDelegate::
PortMigrationValidationResultDelegate(QuicChromiumClientSession* session)
: session_(session) {}
void QuicChromiumClientSession::PortMigrationValidationResultDelegate::
OnPathValidationSuccess(
std::unique_ptr<quic::QuicPathValidationContext> context) {
auto* chrome_context =
static_cast<QuicChromiumPathValidationContext*>(context.get());
session_->OnPortMigrationProbeSucceeded(
chrome_context->network(), chrome_context->peer_address(),
chrome_context->self_address(), chrome_context->ReleaseSocket(),
chrome_context->ReleaseWriter(), chrome_context->ReleaseReader());
}
void QuicChromiumClientSession::PortMigrationValidationResultDelegate::
OnPathValidationFailure(
std::unique_ptr<quic::QuicPathValidationContext> context) {
session_->connection()->OnPathValidationFailureAtClient();
// Note that socket, packet writer, and packet reader in |context| will be
// discarded.
auto* chrome_context =
static_cast<QuicChromiumPathValidationContext*>(context.get());
session_->OnProbeFailed(chrome_context->network(),
chrome_context->peer_address());
}
QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
QuicChromiumPathValidationWriterDelegate(
QuicChromiumClientSession* session,
base::SequencedTaskRunner* task_runner)
: session_(session),
task_runner_(task_runner),
network_(NetworkChangeNotifier::kInvalidNetworkHandle) {}
QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
~QuicChromiumPathValidationWriterDelegate() = default;
int QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
HandleWriteError(
int error_code,
scoped_refptr<QuicChromiumPacketWriter::ReusableIOBuffer> last_packet) {
// Write error on the probing network is not recoverable.
DVLOG(1) << "Probing packet encounters write error " << error_code;
// Post a task to notify |session_| that this probe failed and cancel
// undergoing probing, which will delete the packet writer.
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
&QuicChromiumPathValidationWriterDelegate::NotifySessionProbeFailed,
weak_factory_.GetWeakPtr(), network_));
return error_code;
}
void QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
OnWriteError(int error_code) {
NotifySessionProbeFailed(network_);
}
void QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
OnWriteUnblocked() {}
void QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
NotifySessionProbeFailed(NetworkChangeNotifier::NetworkHandle network) {
session_->OnProbeFailed(network, peer_address_);
}
void QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
set_peer_address(const quic::QuicSocketAddress& peer_address) {
peer_address_ = peer_address;
}
void QuicChromiumClientSession::QuicChromiumPathValidationWriterDelegate::
set_network(NetworkChangeNotifier::NetworkHandle network) {
network_ = network;
}
QuicChromiumClientSession::QuicChromiumClientSession(
quic::QuicConnection* connection,
std::unique_ptr<DatagramClientSocket> socket,
QuicStreamFactory* stream_factory,
QuicCryptoClientStreamFactory* crypto_client_stream_factory,
const quic::QuicClock* clock,
TransportSecurityState* transport_security_state,
SSLConfigService* ssl_config_service,
std::unique_ptr<QuicServerInfo> server_info,
const QuicSessionKey& session_key,
bool require_confirmation,
bool migrate_session_early_v2,
bool migrate_sessions_on_network_change_v2,
NetworkChangeNotifier::NetworkHandle default_network,
quic::QuicTime::Delta retransmittable_on_wire_timeout,
bool migrate_idle_session,
bool allow_port_migration,
base::TimeDelta idle_migration_period,
base::TimeDelta max_time_on_non_default_network,
int max_migrations_to_non_default_network_on_write_error,
int max_migrations_to_non_default_network_on_path_degrading,
int yield_after_packets,
quic::QuicTime::Delta yield_after_duration,
bool go_away_on_path_degrading,
bool headers_include_h2_stream_dependency,
int cert_verify_flags,
const quic::QuicConfig& config,
std::unique_ptr<QuicCryptoClientConfigHandle> crypto_config,
const char* const connection_description,
base::TimeTicks dns_resolution_start_time,
base::TimeTicks dns_resolution_end_time,
std::unique_ptr<quic::QuicClientPushPromiseIndex> push_promise_index,
ServerPushDelegate* push_delegate,
const base::TickClock* tick_clock,
base::SequencedTaskRunner* task_runner,
std::unique_ptr<SocketPerformanceWatcher> socket_performance_watcher,
NetLog* net_log)
: quic::QuicSpdyClientSessionBase(connection,
push_promise_index.get(),
config,
connection->supported_versions()),
session_key_(session_key),
require_confirmation_(require_confirmation),
migrate_session_early_v2_(migrate_session_early_v2),
migrate_session_on_network_change_v2_(
migrate_sessions_on_network_change_v2),
migrate_idle_session_(migrate_idle_session),
allow_port_migration_(allow_port_migration),
idle_migration_period_(idle_migration_period),
max_time_on_non_default_network_(max_time_on_non_default_network),
max_migrations_to_non_default_network_on_write_error_(
max_migrations_to_non_default_network_on_write_error),
current_migrations_to_non_default_network_on_write_error_(0),
max_migrations_to_non_default_network_on_path_degrading_(
max_migrations_to_non_default_network_on_path_degrading),
current_migrations_to_non_default_network_on_path_degrading_(0),
clock_(clock),
yield_after_packets_(yield_after_packets),
yield_after_duration_(yield_after_duration),
go_away_on_path_degrading_(go_away_on_path_degrading),
most_recent_path_degrading_timestamp_(base::TimeTicks()),
most_recent_network_disconnected_timestamp_(base::TimeTicks()),
tick_clock_(tick_clock),
most_recent_stream_close_time_(tick_clock_->NowTicks()),
most_recent_write_error_(0),
most_recent_write_error_timestamp_(base::TimeTicks()),
crypto_config_(std::move(crypto_config)),
stream_factory_(stream_factory),
transport_security_state_(transport_security_state),
ssl_config_service_(ssl_config_service),
server_info_(std::move(server_info)),
pkp_bypassed_(false),
is_fatal_cert_error_(false),
num_total_streams_(0),
task_runner_(task_runner),
net_log_(NetLogWithSource::Make(net_log, NetLogSourceType::QUIC_SESSION)),
logger_(new QuicConnectionLogger(this,
connection_description,
std::move(socket_performance_watcher),
net_log_)),
http3_logger_(VersionUsesHttp3(connection->transport_version())
? new QuicHttp3Logger(net_log_)
: nullptr),
going_away_(false),
port_migration_detected_(false),
push_delegate_(push_delegate),
streams_pushed_count_(0),
streams_pushed_and_claimed_count_(0),
bytes_pushed_count_(0),
bytes_pushed_and_unclaimed_count_(0),
probing_manager_(this, task_runner_),
retry_migrate_back_count_(0),
current_migration_cause_(UNKNOWN_CAUSE),
send_packet_after_migration_(false),
wait_for_new_network_(false),
ignore_read_error_(false),
headers_include_h2_stream_dependency_(
headers_include_h2_stream_dependency),
attempted_zero_rtt_(false),
num_migrations_(0),
last_key_update_reason_(quic::KeyUpdateReason::kInvalid),
push_promise_index_(std::move(push_promise_index)),
path_validation_writer_delegate_(this, task_runner_) {
// Make sure connection migration and goaway on path degrading are not turned
// on at the same time.
DCHECK(!(migrate_session_early_v2_ && go_away_on_path_degrading_));
DCHECK(!(allow_port_migration_ && go_away_on_path_degrading_));
default_network_ = default_network;
auto* socket_raw = socket.get();
sockets_.push_back(std::move(socket));
packet_readers_.push_back(std::make_unique<QuicChromiumPacketReader>(
sockets_.back().get(), clock, this, yield_after_packets,
yield_after_duration, net_log_));
CHECK_EQ(packet_readers_.size(), sockets_.size());
crypto_stream_.reset(
crypto_client_stream_factory->CreateQuicCryptoClientStream(
session_key.server_id(), this,
std::make_unique<ProofVerifyContextChromium>(cert_verify_flags,
net_log_),
crypto_config_->GetConfig()));
if (VersionUsesHttp3(transport_version()))
set_debug_visitor(http3_logger_.get());
connection->set_debug_visitor(logger_.get());
connection->set_creator_debug_delegate(logger_.get());
migrate_back_to_default_timer_.SetTaskRunner(task_runner_);
net_log_.BeginEvent(NetLogEventType::QUIC_SESSION, [&] {
return NetLogQuicClientSessionParams(
&session_key, connection_id(), connection->client_connection_id(),
supported_versions(), cert_verify_flags, require_confirmation_);
});
IPEndPoint address;
if (socket_raw && socket_raw->GetLocalAddress(&address) == OK &&
address.GetFamily() == ADDRESS_FAMILY_IPV6) {
connection->SetMaxPacketLength(connection->max_packet_length() -
kAdditionalOverheadForIPv6);
}
connect_timing_.dns_start = dns_resolution_start_time;
connect_timing_.dns_end = dns_resolution_end_time;
if (!retransmittable_on_wire_timeout.IsZero()) {
connection->set_initial_retransmittable_on_wire_timeout(
retransmittable_on_wire_timeout);
}
}
QuicChromiumClientSession::~QuicChromiumClientSession() {
// This is referenced by the parent class's destructor, so have to delete it
// asynchronously, unfortunately. Don't use DeleteSoon, since that leaks if
// the task is not run, which is often the case in tests.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce([](std::unique_ptr<quic::QuicClientPushPromiseIndex>
push_promise_index) {},
std::move(push_promise_index_)));
DCHECK(callback_.is_null());
for (auto& observer : connectivity_observer_list_)
observer.OnSessionRemoved(this);
net_log_.EndEvent(NetLogEventType::QUIC_SESSION);
DCHECK(waiting_for_confirmation_callbacks_.empty());
DCHECK(!HasActiveRequestStreams());
DCHECK(handles_.empty());
if (!stream_requests_.empty()) {
// The session must be closed before it is destroyed.
CancelAllRequests(ERR_UNEXPECTED);
}
connection()->set_debug_visitor(nullptr);
if (connection()->connected()) {
// Ensure that the connection is closed by the time the session is
// destroyed.
connection()->CloseConnection(quic::QUIC_PEER_GOING_AWAY,
"session torn down",
quic::ConnectionCloseBehavior::SILENT_CLOSE);
}
if (IsEncryptionEstablished())
RecordHandshakeState(STATE_ENCRYPTION_ESTABLISHED);
if (OneRttKeysAvailable())
RecordHandshakeState(STATE_HANDSHAKE_CONFIRMED);
else
RecordHandshakeState(STATE_FAILED);
UMA_HISTOGRAM_COUNTS_1M("Net.QuicSession.NumTotalStreams",
num_total_streams_);
UMA_HISTOGRAM_COUNTS_1M("Net.QuicNumSentClientHellos",
crypto_stream_->num_sent_client_hellos());
UMA_HISTOGRAM_COUNTS_1M("Net.QuicSession.Pushed", streams_pushed_count_);
UMA_HISTOGRAM_COUNTS_1M("Net.QuicSession.PushedAndClaimed",
streams_pushed_and_claimed_count_);
UMA_HISTOGRAM_COUNTS_1M("Net.QuicSession.PushedBytes", bytes_pushed_count_);
DCHECK_LE(bytes_pushed_and_unclaimed_count_, bytes_pushed_count_);
UMA_HISTOGRAM_COUNTS_1M("Net.QuicSession.PushedAndUnclaimedBytes",
bytes_pushed_and_unclaimed_count_);
if (!OneRttKeysAvailable())
return;
// Sending one client_hello means we had zero handshake-round-trips.
int round_trip_handshakes = crypto_stream_->num_sent_client_hellos() - 1;
SSLInfo ssl_info;
// QUIC supports only secure urls.
if (GetSSLInfo(&ssl_info) && ssl_info.cert.get()) {
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.QuicSession.ConnectRandomPortForHTTPS",
round_trip_handshakes, 1, 3, 4);
if (require_confirmation_) {
UMA_HISTOGRAM_CUSTOM_COUNTS(
"Net.QuicSession.ConnectRandomPortRequiringConfirmationForHTTPS",
round_trip_handshakes, 1, 3, 4);
}
}
const quic::QuicConnectionStats stats = connection()->GetStats();
// The MTU used by QUIC is limited to a fairly small set of predefined values
// (initial values and MTU discovery values), but does not fare well when
// bucketed. Because of that, a sparse histogram is used here.
base::UmaHistogramSparse("Net.QuicSession.ClientSideMtu", stats.egress_mtu);
base::UmaHistogramSparse("Net.QuicSession.ServerSideMtu", stats.ingress_mtu);
UMA_HISTOGRAM_COUNTS_1M("Net.QuicSession.MtuProbesSent",
connection()->mtu_probe_count());
if (stats.packets_sent >= 100) {
// Used to monitor for regressions that effect large uploads.
UMA_HISTOGRAM_COUNTS_1000(
"Net.QuicSession.PacketRetransmitsPerMille",
1000 * stats.packets_retransmitted / stats.packets_sent);
}
if (stats.max_sequence_reordering == 0)
return;
const base::HistogramBase::Sample kMaxReordering = 100;
base::HistogramBase::Sample reordering = kMaxReordering;
if (stats.min_rtt_us > 0) {
reordering = static_cast<base::HistogramBase::Sample>(
100 * stats.max_time_reordering_us / stats.min_rtt_us);
}
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.QuicSession.MaxReorderingTime", reordering,
1, kMaxReordering, 50);
if (stats.min_rtt_us > 100 * 1000) {
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.QuicSession.MaxReorderingTimeLongRtt",
reordering, 1, kMaxReordering, 50);
}
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.MaxReordering",
static_cast<base::HistogramBase::Sample>(stats.max_sequence_reordering));
}
void QuicChromiumClientSession::Initialize() {
set_max_inbound_header_list_size(kQuicMaxHeaderListSize);
if (config()->HasClientRequestedIndependentOption(
quic::kQLVE, quic::Perspective::IS_CLIENT)) {
connection()->EnableLegacyVersionEncapsulation(session_key_.host());
}
quic::QuicSpdyClientSessionBase::Initialize();
}
size_t QuicChromiumClientSession::WriteHeadersOnHeadersStream(
quic::QuicStreamId id,
spdy::Http2HeaderBlock headers,
bool fin,
const spdy::SpdyStreamPrecedence& precedence,
quic::QuicReferenceCountedPointer<quic::QuicAckListenerInterface>
ack_listener) {
spdy::SpdyStreamId parent_stream_id = 0;
int weight = 0;
bool exclusive = false;
if (headers_include_h2_stream_dependency_) {
priority_dependency_state_.OnStreamCreation(id, precedence.spdy3_priority(),
&parent_stream_id, &weight,
&exclusive);
} else {
weight = spdy::Spdy3PriorityToHttp2Weight(precedence.spdy3_priority());
}
return WriteHeadersOnHeadersStreamImpl(id, std::move(headers), fin,
parent_stream_id, weight, exclusive,
std::move(ack_listener));
}
void QuicChromiumClientSession::UnregisterStreamPriority(quic::QuicStreamId id,
bool is_static) {
if (headers_include_h2_stream_dependency_ && !is_static) {
priority_dependency_state_.OnStreamDestruction(id);
}
quic::QuicSpdySession::UnregisterStreamPriority(id, is_static);
}
void QuicChromiumClientSession::UpdateStreamPriority(
quic::QuicStreamId id,
const spdy::SpdyStreamPrecedence& new_precedence) {
if (headers_include_h2_stream_dependency_ ||
VersionUsesHttp3(connection()->transport_version())) {
auto updates = priority_dependency_state_.OnStreamUpdate(
id, new_precedence.spdy3_priority());
for (auto update : updates) {
if (!VersionUsesHttp3(connection()->transport_version())) {
WritePriority(update.id, update.parent_stream_id, update.weight,
update.exclusive);
}
}
}
quic::QuicSpdySession::UpdateStreamPriority(id, new_precedence);
}
void QuicChromiumClientSession::OnHttp3GoAway(uint64_t id) {
quic::QuicSpdySession::OnHttp3GoAway(id);
NotifyFactoryOfSessionGoingAway();
PerformActionOnActiveStreams([id](quic::QuicStream* stream) {
if (stream->id() >= id) {
static_cast<QuicChromiumClientStream*>(stream)->OnError(
ERR_QUIC_GOAWAY_REQUEST_CAN_BE_RETRIED);
}
return true;
});
}
void QuicChromiumClientSession::OnAcceptChFrameReceivedViaAlps(
const quic::AcceptChFrame& frame) {
bool has_valid_entry = false;
bool has_invalid_entry = false;
for (const auto& entry : frame.entries) {
// |entry.origin| must be a valid origin.
GURL url(entry.origin);
if (!url.is_valid()) {
has_invalid_entry = true;
continue;
}
const url::Origin origin = url::Origin::Create(url);
std::string serialized = origin.Serialize();
if (serialized.empty() || entry.origin != serialized) {
has_invalid_entry = true;
continue;
}
has_valid_entry = true;
accept_ch_entries_received_via_alps_.insert(
std::make_pair(std::move(origin), entry.value));
}
LogAcceptChFrameReceivedHistogram(has_valid_entry, has_invalid_entry);
}
void QuicChromiumClientSession::AddHandle(Handle* handle) {
if (going_away_) {
handle->OnSessionClosed(connection()->version(), ERR_UNEXPECTED, error(),
port_migration_detected_, GetConnectTiming(),
WasConnectionEverUsed());
return;
}
DCHECK(!base::Contains(handles_, handle));
handles_.insert(handle);
}
void QuicChromiumClientSession::RemoveHandle(Handle* handle) {
DCHECK(base::Contains(handles_, handle));
handles_.erase(handle);
}
void QuicChromiumClientSession::AddConnectivityObserver(
ConnectivityObserver* observer) {
connectivity_observer_list_.AddObserver(observer);
observer->OnSessionRegistered(this, GetCurrentNetwork());
}
void QuicChromiumClientSession::RemoveConnectivityObserver(
ConnectivityObserver* observer) {
connectivity_observer_list_.RemoveObserver(observer);
}
// TODO(zhongyi): replace migration_session_* booleans with
// ConnectionMigrationMode.
ConnectionMigrationMode QuicChromiumClientSession::connection_migration_mode()
const {
if (migrate_session_early_v2_)
return ConnectionMigrationMode::FULL_MIGRATION_V2;
if (migrate_session_on_network_change_v2_)
return ConnectionMigrationMode::NO_MIGRATION_ON_PATH_DEGRADING_V2;
return ConnectionMigrationMode::NO_MIGRATION;
}
int QuicChromiumClientSession::WaitForHandshakeConfirmation(
CompletionOnceCallback callback) {
if (!connection()->connected())
return ERR_CONNECTION_CLOSED;
if (OneRttKeysAvailable())
return OK;
waiting_for_confirmation_callbacks_.push_back(std::move(callback));
return ERR_IO_PENDING;
}
int QuicChromiumClientSession::TryCreateStream(StreamRequest* request) {
if (goaway_received()) {
DVLOG(1) << "Going away.";
return ERR_CONNECTION_CLOSED;
}
if (!connection()->connected()) {
DVLOG(1) << "Already closed.";
return ERR_CONNECTION_CLOSED;
}
if (going_away_) {
return ERR_CONNECTION_CLOSED;
}
bool can_open_next = CanOpenNextOutgoingBidirectionalStream();
if (can_open_next) {
request->stream_ =
CreateOutgoingReliableStreamImpl(request->traffic_annotation())
->CreateHandle();
return OK;
}
request->pending_start_time_ = tick_clock_->NowTicks();
stream_requests_.push_back(request);
UMA_HISTOGRAM_COUNTS_1000("Net.QuicSession.NumPendingStreamRequests",
stream_requests_.size());
return ERR_IO_PENDING;
}
void QuicChromiumClientSession::CancelRequest(StreamRequest* request) {
// Remove |request| from the queue while preserving the order of the
// other elements.
auto it =
std::find(stream_requests_.begin(), stream_requests_.end(), request);
if (it != stream_requests_.end()) {
it = stream_requests_.erase(it);
}
}
bool QuicChromiumClientSession::ShouldCreateOutgoingBidirectionalStream() {
if (!crypto_stream_->encryption_established()) {
DVLOG(1) << "Encryption not active so no outgoing stream created.";
return false;
}
if (!CanOpenNextOutgoingBidirectionalStream()) {
DVLOG(1) << "Failed to create a new outgoing stream. "
<< "Already " << GetNumActiveStreams() << " open.";
return false;
}
if (goaway_received()) {
DVLOG(1) << "Failed to create a new outgoing stream. "
<< "Already received goaway.";
return false;
}
if (going_away_) {
return false;
}
return true;
}
bool QuicChromiumClientSession::ShouldCreateOutgoingUnidirectionalStream() {
NOTREACHED() << "Try to create outgoing unidirectional streams";
return false;
}
bool QuicChromiumClientSession::WasConnectionEverUsed() {
const quic::QuicConnectionStats& stats = connection()->GetStats();
return stats.bytes_sent > 0 || stats.bytes_received > 0;
}
QuicChromiumClientStream*
QuicChromiumClientSession::CreateOutgoingBidirectionalStream() {
NOTREACHED() << "CreateOutgoingReliableStreamImpl should be called directly";
return nullptr;
}
QuicChromiumClientStream*
QuicChromiumClientSession::CreateOutgoingUnidirectionalStream() {
NOTREACHED() << "Try to create outgoing unidirectional stream";
return nullptr;
}
QuicChromiumClientStream*
QuicChromiumClientSession::CreateOutgoingReliableStreamImpl(
const NetworkTrafficAnnotationTag& traffic_annotation) {
DCHECK(connection()->connected());
QuicChromiumClientStream* stream = new QuicChromiumClientStream(
GetNextOutgoingBidirectionalStreamId(), this, quic::BIDIRECTIONAL,
net_log_, traffic_annotation);
ActivateStream(base::WrapUnique(stream));
++num_total_streams_;
UMA_HISTOGRAM_COUNTS_1M("Net.QuicSession.NumOpenStreams",
GetNumActiveStreams());
// The previous histogram puts 100 in a bucket betweeen 86-113 which does
// not shed light on if chrome ever things it has more than 100 streams open.
UMA_HISTOGRAM_BOOLEAN("Net.QuicSession.TooManyOpenStreams",
GetNumActiveStreams() > 100);
return stream;
}
quic::QuicCryptoClientStream*
QuicChromiumClientSession::GetMutableCryptoStream() {
return crypto_stream_.get();
}
const quic::QuicCryptoClientStream* QuicChromiumClientSession::GetCryptoStream()
const {
return crypto_stream_.get();
}
bool QuicChromiumClientSession::GetRemoteEndpoint(IPEndPoint* endpoint) {
*endpoint = ToIPEndPoint(peer_address());
return true;
}
// TODO(rtenneti): Add unittests for GetSSLInfo which exercise the various ways
// we learn about SSL info (sync vs async vs cached).
bool QuicChromiumClientSession::GetSSLInfo(SSLInfo* ssl_info) const {
ssl_info->Reset();
if (!cert_verify_result_) {
return false;
}
ssl_info->cert_status = cert_verify_result_->cert_status;
ssl_info->cert = cert_verify_result_->verified_cert;
ssl_info->public_key_hashes = cert_verify_result_->public_key_hashes;
ssl_info->is_issued_by_known_root =
cert_verify_result_->is_issued_by_known_root;
ssl_info->pkp_bypassed = pkp_bypassed_;
ssl_info->client_cert_sent = false;
ssl_info->handshake_type = SSLInfo::HANDSHAKE_FULL;
ssl_info->pinning_failure_log = pinning_failure_log_;
ssl_info->is_fatal_cert_error = is_fatal_cert_error_;
ssl_info->signed_certificate_timestamps = cert_verify_result_->scts;
ssl_info->ct_policy_compliance = cert_verify_result_->policy_compliance;
const auto& crypto_params = crypto_stream_->crypto_negotiated_params();
uint16_t cipher_suite;
if (connection()->version().UsesTls()) {
cipher_suite = crypto_params.cipher_suite;
} else {
// Map QUIC AEADs to the corresponding TLS 1.3 cipher. OpenSSL's cipher
// suite numbers begin with a stray 0x03, so mask them off.
quic::QuicTag aead = crypto_params.aead;
switch (aead) {
case quic::kAESG:
cipher_suite = TLS1_CK_AES_128_GCM_SHA256 & 0xffff;
break;
case quic::kCC20:
cipher_suite = TLS1_CK_CHACHA20_POLY1305_SHA256 & 0xffff;
break;
default:
NOTREACHED();
return false;
}
}
int ssl_connection_status = 0;
SSLConnectionStatusSetCipherSuite(cipher_suite, &ssl_connection_status);
SSLConnectionStatusSetVersion(SSL_CONNECTION_VERSION_QUIC,
&ssl_connection_status);
ssl_info->connection_status = ssl_connection_status;
if (connection()->version().UsesTls()) {
ssl_info->key_exchange_group = crypto_params.key_exchange_group;
ssl_info->peer_signature_algorithm = crypto_params.peer_signature_algorithm;
return true;
}
// Report the QUIC key exchange as the corresponding TLS curve.
switch (crypto_stream_->crypto_negotiated_params().key_exchange) {
case quic::kP256:
ssl_info->key_exchange_group = SSL_CURVE_SECP256R1;
break;
case quic::kC255:
ssl_info->key_exchange_group = SSL_CURVE_X25519;
break;
default:
NOTREACHED();
return false;
}
// QUIC-Crypto always uses RSA-PSS or ECDSA with SHA-256.
size_t unused;
X509Certificate::PublicKeyType key_type;
X509Certificate::GetPublicKeyInfo(ssl_info->cert->cert_buffer(), &unused,
&key_type);
switch (key_type) {
case X509Certificate::kPublicKeyTypeRSA:
ssl_info->peer_signature_algorithm = SSL_SIGN_RSA_PSS_RSAE_SHA256;
break;
case X509Certificate::kPublicKeyTypeECDSA:
ssl_info->peer_signature_algorithm = SSL_SIGN_ECDSA_SECP256R1_SHA256;
break;
default:
NOTREACHED();
return false;
}
return true;
}
base::StringPiece QuicChromiumClientSession::GetAcceptChViaAlpsForOrigin(
const url::Origin& origin) const {
auto it = accept_ch_entries_received_via_alps_.find(origin);
if (it == accept_ch_entries_received_via_alps_.end()) {
LogAcceptChForOriginHistogram(false);
return {};
} else {
LogAcceptChForOriginHistogram(true);
return it->second;
}
}
int QuicChromiumClientSession::CryptoConnect(CompletionOnceCallback callback) {
connect_timing_.connect_start = tick_clock_->NowTicks();
RecordHandshakeState(STATE_STARTED);
DCHECK(flow_controller());
if (!crypto_stream_->CryptoConnect())
return ERR_QUIC_HANDSHAKE_FAILED;
if (OneRttKeysAvailable()) {
connect_timing_.connect_end = tick_clock_->NowTicks();
return OK;
}
// Unless we require handshake confirmation, activate the session if
// we have established initial encryption.
if (!require_confirmation_ && IsEncryptionEstablished())
return OK;
callback_ = std::move(callback);
return ERR_IO_PENDING;
}
int QuicChromiumClientSession::GetNumSentClientHellos() const {
return crypto_stream_->num_sent_client_hellos();
}
bool QuicChromiumClientSession::CanPool(
const std::string& hostname,
const QuicSessionKey& other_session_key) const {
DCHECK(connection()->connected());
if (!session_key_.CanUseForAliasing(other_session_key))
return false;
SSLInfo ssl_info;
if (!GetSSLInfo(&ssl_info) || !ssl_info.cert.get()) {
NOTREACHED() << "QUIC should always have certificates.";
return false;
}
return SpdySession::CanPool(transport_security_state_, ssl_info,
*ssl_config_service_, session_key_.host(),
hostname, session_key_.network_isolation_key());
}
bool QuicChromiumClientSession::ShouldCreateIncomingStream(
quic::QuicStreamId id) {
if (!connection()->connected()) {
LOG(DFATAL) << "ShouldCreateIncomingStream called when disconnected";
return false;
}
if (goaway_received()) {
DVLOG(1) << "Cannot create a new outgoing stream. "
<< "Already received goaway.";
return false;
}
if (going_away_) {
return false;
}
if (quic::QuicUtils::IsClientInitiatedStreamId(
connection()->transport_version(), id) ||
(connection()->version().HasIetfQuicFrames() &&
quic::QuicUtils::IsBidirectionalStreamId(id, connection()->version()))) {
LOG(WARNING) << "Received invalid push stream id " << id;
connection()->CloseConnection(
quic::QUIC_INVALID_STREAM_ID,
"Server created non write unidirectional stream",
quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
return true;
}
QuicChromiumClientStream* QuicChromiumClientSession::CreateIncomingStream(
quic::QuicStreamId id) {
if (!ShouldCreateIncomingStream(id)) {
return nullptr;
}
net::NetworkTrafficAnnotationTag traffic_annotation =
net::DefineNetworkTrafficAnnotation("quic_chromium_incoming_session", R"(
semantics {
sender: "Quic Chromium Client Session"
description:
"When a web server needs to push a response to a client, an incoming "
"stream is created to reply the client with pushed message instead "
"of a message from the network."
trigger:
"A request by a server to push a response to the client."
data: "None."
destination: OTHER
destination_other:
"This stream is not used for sending data."
}
policy {
cookies_allowed: NO
setting: "This feature cannot be disabled in settings."
policy_exception_justification:
"Essential for network access."
}
)");
return CreateIncomingReliableStreamImpl(id, traffic_annotation);
}
QuicChromiumClientStream* QuicChromiumClientSession::CreateIncomingStream(
quic::PendingStream* pending) {
net::NetworkTrafficAnnotationTag traffic_annotation =
net::DefineNetworkTrafficAnnotation(
"quic_chromium_incoming_pending_session", R"(
semantics {
sender: "Quic Chromium Client Session Pending Stream"
description:
"When a web server needs to push a response to a client, an incoming "
"stream is created to reply to the client with pushed message instead "
"of a message from the network."
trigger:
"A request by a server to push a response to the client."
data: "This stream is only used to receive data from the server."
destination: OTHER
destination_other:
"The web server pushing the response."
}
policy {
cookies_allowed: NO
setting: "This feature cannot be disabled in settings."
policy_exception_justification:
"Essential for network access."
}
)");
return CreateIncomingReliableStreamImpl(pending, traffic_annotation);
}
QuicChromiumClientStream*
QuicChromiumClientSession::CreateIncomingReliableStreamImpl(
quic::QuicStreamId id,
const NetworkTrafficAnnotationTag& traffic_annotation) {
DCHECK(connection()->connected());
QuicChromiumClientStream* stream = new QuicChromiumClientStream(
id, this, quic::READ_UNIDIRECTIONAL, net_log_, traffic_annotation);
ActivateStream(base::WrapUnique(stream));
++num_total_streams_;
return stream;
}
QuicChromiumClientStream*
QuicChromiumClientSession::CreateIncomingReliableStreamImpl(
quic::PendingStream* pending,
const NetworkTrafficAnnotationTag& traffic_annotation) {
DCHECK(connection()->connected());
QuicChromiumClientStream* stream =
new QuicChromiumClientStream(pending, this, net_log_, traffic_annotation);
ActivateStream(base::WrapUnique(stream));
++num_total_streams_;
return stream;
}
void QuicChromiumClientSession::OnStreamClosed(quic::QuicStreamId stream_id) {
most_recent_stream_close_time_ = tick_clock_->NowTicks();
quic::QuicStream* stream = GetActiveStream(stream_id);
if (stream != nullptr) {
logger_->UpdateReceivedFrameCounts(stream_id, stream->num_frames_received(),
stream->num_duplicate_frames_received());
if (quic::QuicUtils::IsServerInitiatedStreamId(
connection()->transport_version(), stream_id)) {
bytes_pushed_count_ += stream->stream_bytes_read();
}
}
quic::QuicSpdyClientSessionBase::OnStreamClosed(stream_id);
}
void QuicChromiumClientSession::OnCanCreateNewOutgoingStream(
bool unidirectional) {
if (CanOpenNextOutgoingBidirectionalStream() && !stream_requests_.empty() &&
crypto_stream_->encryption_established() && !goaway_received() &&
!going_away_ && connection()->connected()) {
StreamRequest* request = stream_requests_.front();
// TODO(ckrasic) - analyze data and then add logic to mark QUIC
// broken if wait times are excessive.
UMA_HISTOGRAM_TIMES("Net.QuicSession.PendingStreamsWaitTime",
tick_clock_->NowTicks() - request->pending_start_time_);
stream_requests_.pop_front();
request->OnRequestCompleteSuccess(
CreateOutgoingReliableStreamImpl(request->traffic_annotation())
->CreateHandle());
}
}
void QuicChromiumClientSession::OnConfigNegotiated() {
quic::QuicSpdyClientSessionBase::OnConfigNegotiated();
if (!stream_factory_ || !stream_factory_->allow_server_migration()) {
if (connection()->connection_migration_use_new_cid()) {
if (!config()->HasReceivedPreferredAddressConnectionIdAndToken()) {
return;
}
} else {
if (!config()->HasReceivedIPv6AlternateServerAddress() &&
!config()->HasReceivedIPv4AlternateServerAddress()) {
return;
}
}
}
// Server has sent an alternate address to connect to.
IPEndPoint old_address;
GetDefaultSocket()->GetPeerAddress(&old_address);
// Migrate only if address families match.
IPEndPoint new_address;
if (old_address.GetFamily() == ADDRESS_FAMILY_IPV6) {
if (!config()->HasReceivedIPv6AlternateServerAddress()) {
return;
}
new_address = ToIPEndPoint(config()->ReceivedIPv6AlternateServerAddress());
} else if (old_address.GetFamily() == ADDRESS_FAMILY_IPV4) {
if (!config()->HasReceivedIPv4AlternateServerAddress()) {
return;
}
new_address = ToIPEndPoint(config()->ReceivedIPv4AlternateServerAddress());
}
DCHECK_EQ(new_address.GetFamily(), old_address.GetFamily());
// Specifying kInvalidNetworkHandle for the |network| parameter
// causes the session to use the default network for the new socket.
Migrate(NetworkChangeNotifier::kInvalidNetworkHandle, new_address,
/*close_session_on_error=*/true);
}
void QuicChromiumClientSession::SetDefaultEncryptionLevel(
quic::EncryptionLevel level) {
if (!callback_.is_null() &&
(!require_confirmation_ || level == quic::ENCRYPTION_FORWARD_SECURE ||
level == quic::ENCRYPTION_ZERO_RTT)) {
// Currently for all CryptoHandshakeEvent events, callback_
// could be called because there are no error events in CryptoHandshakeEvent
// enum. If error events are added to CryptoHandshakeEvent, then the
// following code needs to changed.
std::move(callback_).Run(OK);
}
if (level == quic::ENCRYPTION_FORWARD_SECURE) {
OnCryptoHandshakeComplete();
LogZeroRttStats();
}
if (level == quic::ENCRYPTION_ZERO_RTT)
attempted_zero_rtt_ = true;
quic::QuicSpdySession::SetDefaultEncryptionLevel(level);
}
void QuicChromiumClientSession::OnTlsHandshakeComplete() {
if (!callback_.is_null()) {
// Currently for all CryptoHandshakeEvent events, callback_
// could be called because there are no error events in CryptoHandshakeEvent
// enum. If error events are added to CryptoHandshakeEvent, then the
// following code needs to changed.
std::move(callback_).Run(OK);
}
OnCryptoHandshakeComplete();
LogZeroRttStats();
quic::QuicSpdySession::OnTlsHandshakeComplete();
}
void QuicChromiumClientSession::OnNewEncryptionKeyAvailable(
quic::EncryptionLevel level,
std::unique_ptr<quic::QuicEncrypter> encrypter) {
if (!attempted_zero_rtt_ && (level == quic::ENCRYPTION_ZERO_RTT ||
level == quic::ENCRYPTION_FORWARD_SECURE)) {
base::TimeTicks now = tick_clock_->NowTicks();
DCHECK_LE(connect_timing_.connect_start, now);
UMA_HISTOGRAM_TIMES("Net.QuicSession.EncryptionEstablishedTime",
now - connect_timing_.connect_start);
}
if (level == quic::ENCRYPTION_ZERO_RTT)
attempted_zero_rtt_ = true;
QuicSpdySession::OnNewEncryptionKeyAvailable(level, std::move(encrypter));
if (!callback_.is_null() &&
(!require_confirmation_ && level == quic::ENCRYPTION_ZERO_RTT)) {
// Currently for all CryptoHandshakeEvent events, callback_
// could be called because there are no error events in CryptoHandshakeEvent
// enum. If error events are added to CryptoHandshakeEvent, then the
// following code needs to changed.
std::move(callback_).Run(OK);
}
}
void QuicChromiumClientSession::LogZeroRttStats() {
DCHECK(OneRttKeysAvailable());
ZeroRttState state;
ssl_early_data_reason_t early_data_reason = crypto_stream_->EarlyDataReason();
if (early_data_reason == ssl_early_data_accepted) {
state = ZeroRttState::kAttemptedAndSucceeded;
} else if (early_data_reason == ssl_early_data_peer_declined ||
early_data_reason == ssl_early_data_session_not_resumed ||
early_data_reason == ssl_early_data_hello_retry_request) {
state = ZeroRttState::kAttemptedAndRejected;
} else {
state = ZeroRttState::kNotAttempted;
}
UMA_HISTOGRAM_ENUMERATION("Net.QuicSession.ZeroRttState", state);
UMA_HISTOGRAM_ENUMERATION("Net.QuicSession.ZeroRttReason", early_data_reason,
ssl_early_data_reason_max_value + 1);
if (IsGoogleHost(session_key_.host())) {
UMA_HISTOGRAM_ENUMERATION("Net.QuicSession.ZeroRttReasonGoogle",
early_data_reason,
ssl_early_data_reason_max_value + 1);
} else {
UMA_HISTOGRAM_ENUMERATION("Net.QuicSession.ZeroRttReasonNonGoogle",
early_data_reason,
ssl_early_data_reason_max_value + 1);
}
}
void QuicChromiumClientSession::OnCryptoHandshakeMessageSent(
const quic::CryptoHandshakeMessage& message) {
logger_->OnCryptoHandshakeMessageSent(message);
}
void QuicChromiumClientSession::OnCryptoHandshakeMessageReceived(
const quic::CryptoHandshakeMessage& message) {
logger_->OnCryptoHandshakeMessageReceived(message);
if (message.tag() == quic::kREJ) {
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.QuicSession.RejectLength",
message.GetSerialized().length(), 1000, 10000,
50);
absl::string_view proof;
UMA_HISTOGRAM_BOOLEAN("Net.QuicSession.RejectHasProof",
message.GetStringPiece(quic::kPROF, &proof));
}
}
void QuicChromiumClientSession::OnGoAway(const quic::QuicGoAwayFrame& frame) {
quic::QuicSession::OnGoAway(frame);
NotifyFactoryOfSessionGoingAway();
port_migration_detected_ =
frame.error_code == quic::QUIC_ERROR_MIGRATING_PORT;
}
void QuicChromiumClientSession::OnConnectionClosed(
const quic::QuicConnectionCloseFrame& frame,
quic::ConnectionCloseSource source) {
DCHECK(!connection()->connected());
logger_->OnConnectionClosed(frame, source);
RecordConnectionCloseErrorCode(frame, source, session_key_.host(),
OneRttKeysAvailable());
if (OneRttKeysAvailable()) {
NetworkChangeNotifier::NetworkHandle current_network = GetCurrentNetwork();
for (auto& observer : connectivity_observer_list_)
observer.OnSessionClosedAfterHandshake(this, current_network, source,
frame.quic_error_code);
}
const quic::QuicErrorCode error = frame.quic_error_code;
const std::string& error_details = frame.error_details;
if (source == quic::ConnectionCloseSource::FROM_SELF &&
error == quic::QUIC_NETWORK_IDLE_TIMEOUT && ShouldKeepConnectionAlive()) {
quic::QuicStreamCount streams_waiting_to_write = 0;
PerformActionOnActiveStreams(
[&streams_waiting_to_write](quic::QuicStream* stream) {
if (stream->HasBufferedData())
++streams_waiting_to_write;
return true;
});
UMA_HISTOGRAM_COUNTS_100(
"Net.QuicSession.NumStreamsWaitingToWriteOnIdleTimeout",
streams_waiting_to_write);
UMA_HISTOGRAM_COUNTS_100("Net.QuicSession.NumActiveStreamsOnIdleTimeout",
GetNumActiveStreams());
}
if (source == quic::ConnectionCloseSource::FROM_PEER) {
if (error == quic::QUIC_PUBLIC_RESET) {
// is_from_google_server will be true if the received EPID is
// kEPIDGoogleFrontEnd or kEPIDGoogleFrontEnd0.
const bool is_from_google_server =
error_details.find(base::StringPrintf(
"From %s", quic::kEPIDGoogleFrontEnd)) != std::string::npos;
if (OneRttKeysAvailable()) {
UMA_HISTOGRAM_BOOLEAN(
"Net.QuicSession.ClosedByPublicReset.HandshakeConfirmed",
is_from_google_server);
} else {
UMA_HISTOGRAM_BOOLEAN("Net.QuicSession.ClosedByPublicReset",
is_from_google_server);
}
if (is_from_google_server) {
UMA_HISTOGRAM_COUNTS_100(
"Net.QuicSession.NumMigrationsExercisedBeforePublicReset",
sockets_.size() - 1);
}
base::UmaHistogramSparse(
"Net.QuicSession.LastSentPacketContentBeforePublicReset",
connection()
->sent_packet_manager()
.unacked_packets()
.GetLastPacketContent());
const quic::QuicTime last_in_flight_packet_sent_time =
connection()
->sent_packet_manager()
.unacked_packets()
.GetLastInFlightPacketSentTime();
const quic::QuicTime handshake_completion_time =
connection()->GetStats().handshake_completion_time;
if (last_in_flight_packet_sent_time.IsInitialized() &&
handshake_completion_time.IsInitialized() &&
last_in_flight_packet_sent_time >= handshake_completion_time) {
const quic::QuicTime::Delta delay =
last_in_flight_packet_sent_time - handshake_completion_time;
UMA_HISTOGRAM_LONG_TIMES_100(
"Net.QuicSession."
"LastInFlightPacketSentTimeFromHandshakeCompletionWithPublicReset",
base::Milliseconds(delay.ToMilliseconds()));
}
UMA_HISTOGRAM_LONG_TIMES_100(
"Net.QuicSession.ConnectionDurationWithPublicReset",
tick_clock_->NowTicks() - connect_timing_.connect_end);
}
if (OneRttKeysAvailable()) {
base::HistogramBase* histogram = base::SparseHistogram::FactoryGet(
"Net.QuicSession.StreamCloseErrorCodeServer.HandshakeConfirmed",
base::HistogramBase::kUmaTargetedHistogramFlag);
size_t num_streams = GetNumActiveStreams();
if (num_streams > 0)
histogram->AddCount(error, num_streams);
}
} else {
if (OneRttKeysAvailable()) {
base::HistogramBase* histogram = base::SparseHistogram::FactoryGet(
"Net.QuicSession.StreamCloseErrorCodeClient.HandshakeConfirmed",
base::HistogramBase::kUmaTargetedHistogramFlag);
size_t num_streams = GetNumActiveStreams();
if (num_streams > 0)
histogram->AddCount(error, num_streams);
} else {
if (error == quic::QUIC_HANDSHAKE_TIMEOUT) {
UMA_HISTOGRAM_BOOLEAN(
"Net.QuicSession.HandshakeTimeout.PathDegradingDetected",
connection()->IsPathDegrading());
}
}
if (error == quic::QUIC_TOO_MANY_RTOS) {
UMA_HISTOGRAM_COUNTS_1000(
"Net.QuicSession.ClosedByRtoAtClient.ReceivedPacketCount",
connection()->GetStats().packets_received);
UMA_HISTOGRAM_COUNTS_1000(
"Net.QuicSession.ClosedByRtoAtClient.SentPacketCount",
connection()->GetStats().packets_sent);
UMA_HISTOGRAM_COUNTS_100(
"Net.QuicSession."
"MaxConsecutiveRtoWithForwardProgressAndBlackholeDetected",
connection()->GetStats().max_consecutive_rto_with_forward_progress);
}
}
if (error == quic::QUIC_NETWORK_IDLE_TIMEOUT) {
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.ConnectionClose.NumOpenStreams.TimedOut",
GetNumActiveStreams());
if (OneRttKeysAvailable()) {
if (GetNumActiveStreams() > 0) {
UMA_HISTOGRAM_BOOLEAN(
"Net.QuicSession.TimedOutWithOpenStreams.HasUnackedPackets",
connection()->sent_packet_manager().HasInFlightPackets());
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.TimedOutWithOpenStreams.ConsecutiveRTOCount",
connection()->sent_packet_manager().GetConsecutiveRtoCount());
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.TimedOutWithOpenStreams.ConsecutiveTLPCount",
connection()->sent_packet_manager().GetConsecutiveTlpCount());
base::UmaHistogramSparse(
"Net.QuicSession.TimedOutWithOpenStreams.LocalPort",
connection()->self_address().port());
}
} else {
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.ConnectionClose.NumOpenStreams.HandshakeTimedOut",
GetNumActiveStreams());
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.ConnectionClose.NumTotalStreams.HandshakeTimedOut",
num_total_streams_);
}
}
if (OneRttKeysAvailable()) {
// QUIC connections should not timeout while there are open streams,
// since PING frames are sent to prevent timeouts. If, however, the
// connection timed out with open streams then QUIC traffic has become
// blackholed. Alternatively, if too many retransmission timeouts occur
// then QUIC traffic has become blackholed.
if (stream_factory_ && (error == quic::QUIC_TOO_MANY_RTOS ||
(error == quic::QUIC_NETWORK_IDLE_TIMEOUT &&
GetNumActiveStreams() > 0))) {
stream_factory_->OnBlackholeAfterHandshakeConfirmed(this);
}
UMA_HISTOGRAM_COUNTS_100(
"Net.QuicSession.CryptoRetransmitCount.HandshakeConfirmed",
connection()->GetStats().crypto_retransmit_count);
UMA_HISTOGRAM_COUNTS_100(
"Net.QuicSession.MaxConsecutiveRtoWithForwardProgress",
connection()->GetStats().max_consecutive_rto_with_forward_progress);
UMA_HISTOGRAM_COUNTS_1000("Net.QuicSession.NumPingsSent",
connection()->GetStats().ping_frames_sent);
UMA_HISTOGRAM_LONG_TIMES_100(
"Net.QuicSession.ConnectionDuration",
tick_clock_->NowTicks() - connect_timing_.connect_end);
UMA_HISTOGRAM_COUNTS_100("Net.QuicSession.NumMigrations", num_migrations_);
// These values are persisted to logs. Entries should not be renumbered
// and numeric values should never be reused.
enum class KeyUpdateSupported {
kInvalid = 0,
kUnsupported = 1,
kSupported = 2,
kSupportedLocallyOnly = 3,
kSupportedRemotelyOnly = 4,
kMaxValue = kSupportedRemotelyOnly,
};
KeyUpdateSupported key_update_supported = KeyUpdateSupported::kInvalid;
if (config()->KeyUpdateSupportedForConnection()) {
key_update_supported = KeyUpdateSupported::kSupported;
} else if (config()->KeyUpdateSupportedLocally()) {
key_update_supported = KeyUpdateSupported::kSupportedLocallyOnly;
} else if (config()->KeyUpdateSupportedRemotely()) {
key_update_supported = KeyUpdateSupported::kSupportedRemotelyOnly;
} else {
key_update_supported = KeyUpdateSupported::kUnsupported;
}
base::UmaHistogramEnumeration("Net.QuicSession.KeyUpdate.Supported",
key_update_supported);
if (config()->KeyUpdateSupportedForConnection()) {
base::UmaHistogramCounts100("Net.QuicSession.KeyUpdate.PerConnection2",
connection()->GetStats().key_update_count);
base::UmaHistogramCounts100(
"Net.QuicSession.KeyUpdate.PotentialPeerKeyUpdateAttemptCount",
connection()->PotentialPeerKeyUpdateAttemptCount());
if (last_key_update_reason_ != quic::KeyUpdateReason::kInvalid) {
std::string suffix =
last_key_update_reason_ == quic::KeyUpdateReason::kRemote ? "Remote"
: "Local";
// These values are persisted to logs. Entries should not be renumbered
// and numeric values should never be reused.
enum class KeyUpdateSuccess {
kInvalid = 0,
kSuccess = 1,
kFailedInitial = 2,
kFailedNonInitial = 3,
kMaxValue = kFailedNonInitial,
};
KeyUpdateSuccess value = KeyUpdateSuccess::kInvalid;
if (connection()->HaveSentPacketsInCurrentKeyPhaseButNoneAcked()) {
if (connection()->GetStats().key_update_count >= 2) {
value = KeyUpdateSuccess::kFailedNonInitial;
} else {
value = KeyUpdateSuccess::kFailedInitial;
}
} else {
value = KeyUpdateSuccess::kSuccess;
}
base::UmaHistogramEnumeration(
"Net.QuicSession.KeyUpdate.Success." + suffix, value);
}
}
} else {
if (error == quic::QUIC_PUBLIC_RESET) {
RecordHandshakeFailureReason(HANDSHAKE_FAILURE_PUBLIC_RESET);
} else if (connection()->GetStats().packets_received == 0) {
RecordHandshakeFailureReason(HANDSHAKE_FAILURE_BLACK_HOLE);
base::UmaHistogramSparse(
"Net.QuicSession.ConnectionClose.HandshakeFailureBlackHole.QuicError",
error);
} else {
RecordHandshakeFailureReason(HANDSHAKE_FAILURE_UNKNOWN);
base::UmaHistogramSparse(
"Net.QuicSession.ConnectionClose.HandshakeFailureUnknown.QuicError",
error);
}
UMA_HISTOGRAM_COUNTS_100(
"Net.QuicSession.CryptoRetransmitCount.HandshakeNotConfirmed",
connection()->GetStats().crypto_retransmit_count);
}
base::UmaHistogramCounts1M(
"Net.QuicSession.UndecryptablePacketsReceivedWithDecrypter",
connection()->GetStats().num_failed_authentication_packets_received);
base::UmaHistogramSparse("Net.QuicSession.QuicVersion",
connection()->transport_version());
NotifyFactoryOfSessionGoingAway();
quic::QuicSession::OnConnectionClosed(frame, source);
if (!callback_.is_null()) {
std::move(callback_).Run(ERR_QUIC_PROTOCOL_ERROR);
}
CHECK_EQ(sockets_.size(), packet_readers_.size());
for (auto& socket : sockets_) {
socket->Close();
}
DCHECK(!HasActiveRequestStreams());
CloseAllHandles(ERR_UNEXPECTED);
CancelAllRequests(ERR_CONNECTION_CLOSED);
NotifyRequestsOfConfirmation(ERR_CONNECTION_CLOSED);
NotifyFactoryOfSessionClosedLater();
}
void QuicChromiumClientSession::OnSuccessfulVersionNegotiation(
const quic::ParsedQuicVersion& version) {
logger_->OnSuccessfulVersionNegotiation(version);
quic::QuicSpdySession::OnSuccessfulVersionNegotiation(version);
}
void QuicChromiumClientSession::OnPacketReceived(
const quic::QuicSocketAddress& self_address,
const quic::QuicSocketAddress& peer_address,
bool is_connectivity_probe) {
// Notify the probing manager that a new packet is received.
probing_manager_.OnPacketReceived(self_address, peer_address,
is_connectivity_probe);
}
int QuicChromiumClientSession::HandleWriteError(
int error_code,
scoped_refptr<QuicChromiumPacketWriter::ReusableIOBuffer> packet) {
current_migration_cause_ = ON_WRITE_ERROR;
LogHandshakeStatusOnMigrationSignal();
base::UmaHistogramSparse("Net.QuicSession.WriteError", -error_code);
if (OneRttKeysAvailable()) {
base::UmaHistogramSparse("Net.QuicSession.WriteError.HandshakeConfirmed",
-error_code);
}
// For now, skip reporting if there are multiple packet writers and
// connection migration is enabled.
if (sockets_.size() == 1u || !migrate_session_early_v2_) {
NetworkChangeNotifier::NetworkHandle current_network = GetCurrentNetwork();
for (auto& observer : connectivity_observer_list_) {
observer.OnSessionEncounteringWriteError(this, current_network,
error_code);
}
}
if (error_code == ERR_MSG_TOO_BIG || stream_factory_ == nullptr ||
!migrate_session_on_network_change_v2_ || !OneRttKeysAvailable()) {
return error_code;
}
NetworkChangeNotifier::NetworkHandle current_network = GetCurrentNetwork();
net_log_.AddEventWithInt64Params(
NetLogEventType::QUIC_CONNECTION_MIGRATION_ON_WRITE_ERROR, "network",
current_network);
DCHECK(packet != nullptr);
DCHECK_NE(ERR_IO_PENDING, error_code);
DCHECK_GT(0, error_code);
DCHECK(packet_ == nullptr);
// Post a task to migrate the session onto a new network.
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&QuicChromiumClientSession::MigrateSessionOnWriteError,
weak_factory_.GetWeakPtr(), error_code,
connection()->writer()));
// Only save packet from the old path for retransmission on the new path when
// the connection ID does not change.
if (!connection()->connection_migration_use_new_cid()) {
// Store packet in the session since the actual migration and packet rewrite
// can happen via this posted task or via an async network notification.
packet_ = std::move(packet);
}
ignore_read_error_ = true;
// Cause the packet writer to return ERR_IO_PENDING and block so
// that the actual migration happens from the message loop instead
// of under the call stack of quic::QuicConnection::WritePacket.
return ERR_IO_PENDING;
}
void QuicChromiumClientSession::MigrateSessionOnWriteError(
int error_code,
quic::QuicPacketWriter* writer) {
DCHECK(migrate_session_on_network_change_v2_);
// If |writer| is no longer actively in use, abort this migration attempt.
if (writer != connection()->writer())
return;
most_recent_write_error_timestamp_ = tick_clock_->NowTicks();
most_recent_write_error_ = error_code;
if (stream_factory_ == nullptr) {
// Close the connection if migration failed. Do not cause a
// connection close packet to be sent since socket may be borked.
connection()->CloseConnection(quic::QUIC_PACKET_WRITE_ERROR,
"Write error with nulled stream factory",
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
current_migration_cause_ = ON_WRITE_ERROR;
if (migrate_idle_session_ && CheckIdleTimeExceedsIdleMigrationPeriod())
return;
if (!migrate_idle_session_ && !HasActiveRequestStreams()) {
// connection close packet to be sent since socket may be borked.
connection()->CloseConnection(quic::QUIC_PACKET_WRITE_ERROR,
"Write error for non-migratable session",
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
// Do not migrate if connection migration is disabled.
if (config()->DisableConnectionMigration()) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_DISABLED_BY_CONFIG,
connection_id(),
"Migration disabled by config");
// Close the connection since migration was disabled. Do not cause a
// connection close packet to be sent since socket may be borked.
connection()->CloseConnection(quic::QUIC_PACKET_WRITE_ERROR,
"Write error for non-migratable session",
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
NetworkChangeNotifier::NetworkHandle new_network =
stream_factory_->FindAlternateNetwork(GetCurrentNetwork());
if (new_network == NetworkChangeNotifier::kInvalidNetworkHandle) {
// No alternate network found.
HistogramAndLogMigrationFailure(MIGRATION_STATUS_NO_ALTERNATE_NETWORK,
connection_id(),
"No alternate network found");
OnNoNewNetwork();
return;
}
if (GetCurrentNetwork() == default_network_ &&
current_migrations_to_non_default_network_on_write_error_ >=
max_migrations_to_non_default_network_on_write_error_) {
HistogramAndLogMigrationFailure(
MIGRATION_STATUS_ON_WRITE_ERROR_DISABLED, connection_id(),
"Exceeds maximum number of migrations on write error");
connection()->CloseConnection(
quic::QUIC_PACKET_WRITE_ERROR,
"Too many migrations for write error for the same network",
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
current_migrations_to_non_default_network_on_write_error_++;
net_log_.BeginEventWithStringParams(
NetLogEventType::QUIC_CONNECTION_MIGRATION_TRIGGERED, "trigger",
"WriteError");
MigrationResult result =
Migrate(new_network, ToIPEndPoint(connection()->peer_address()),
/*close_session_on_error=*/false);
net_log_.EndEvent(NetLogEventType::QUIC_CONNECTION_MIGRATION_TRIGGERED);
if (result == MigrationResult::FAILURE) {
// Close the connection if migration failed. Do not cause a
// connection close packet to be sent since socket may be borked.
connection()->CloseConnection(quic::QUIC_PACKET_WRITE_ERROR,
"Write and subsequent migration failed",
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
if (new_network != default_network_) {
StartMigrateBackToDefaultNetworkTimer(
base::Seconds(kMinRetryTimeForDefaultNetworkSecs));
} else {
CancelMigrateBackToDefaultNetworkTimer();
}
}
void QuicChromiumClientSession::OnNoNewNetwork() {
DCHECK(OneRttKeysAvailable());
wait_for_new_network_ = true;
DVLOG(1) << "Force blocking the packet writer";
// Force blocking the packet writer to avoid any writes since there is no
// alternate network available.
static_cast<QuicChromiumPacketWriter*>(connection()->writer())
->set_force_write_blocked(true);
// Post a task to maybe close the session if the alarm fires.
task_runner_->PostDelayedTask(
FROM_HERE,
base::BindOnce(&QuicChromiumClientSession::OnMigrationTimeout,
weak_factory_.GetWeakPtr(), sockets_.size()),
base::Seconds(kWaitTimeForNewNetworkSecs));
}
void QuicChromiumClientSession::WriteToNewSocket() {
// Set |send_packet_after_migration_| to true so that a packet will be
// sent when the writer becomes unblocked.
send_packet_after_migration_ = true;
DVLOG(1) << "Cancel force blocking the packet writer";
// Notify writer that it is no longer forced blocked, which may call
// OnWriteUnblocked() if the writer has no write in progress.
static_cast<QuicChromiumPacketWriter*>(connection()->writer())
->set_force_write_blocked(false);
}
void QuicChromiumClientSession::OnMigrationTimeout(size_t num_sockets) {
// If number of sockets has changed, this migration task is stale.
if (num_sockets != sockets_.size())
return;
int net_error = current_migration_cause_ == ON_NETWORK_DISCONNECTED
? ERR_INTERNET_DISCONNECTED
: ERR_NETWORK_CHANGED;
// |current_migration_cause_| will be reset after logging.
LogMigrationResultToHistogram(MIGRATION_STATUS_TIMEOUT);
CloseSessionOnError(net_error, quic::QUIC_CONNECTION_MIGRATION_NO_NEW_NETWORK,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
}
// TODO(renjietang): Deprecate this method once IETF QUIC supports connection
// migration.
void QuicChromiumClientSession::OnProbeSucceeded(
NetworkChangeNotifier::NetworkHandle network,
const quic::QuicSocketAddress& peer_address,
const quic::QuicSocketAddress& self_address,
std::unique_ptr<DatagramClientSocket> socket,
std::unique_ptr<QuicChromiumPacketWriter> writer,
std::unique_ptr<QuicChromiumPacketReader> reader) {
if (current_migration_cause_ == CHANGE_PORT_ON_PATH_DEGRADING) {
DCHECK(allow_port_migration_);
OnPortMigrationProbeSucceeded(network, peer_address, self_address,
std::move(socket), std::move(writer),
std::move(reader));
return;
}
OnConnectionMigrationProbeSucceeded(network, peer_address, self_address,
std::move(socket), std::move(writer),
std::move(reader));
}
void QuicChromiumClientSession::OnPortMigrationProbeSucceeded(
NetworkChangeNotifier::NetworkHandle network,
const quic::QuicSocketAddress& peer_address,
const quic::QuicSocketAddress& self_address,
std::unique_ptr<DatagramClientSocket> socket,
std::unique_ptr<QuicChromiumPacketWriter> writer,
std::unique_ptr<QuicChromiumPacketReader> reader) {
DCHECK(socket);
DCHECK(writer);
DCHECK(reader);
net_log_.AddEvent(NetLogEventType::QUIC_SESSION_CONNECTIVITY_PROBING_FINISHED,
[&] {
return NetLogProbingResultParams(network, &peer_address,
/*is_success=*/true);
});
LogProbeResultToHistogram(current_migration_cause_, true);
// Remove |this| as the old packet writer's delegate. Write error on old
// writers will be ignored.
// Set |this| to listen on socket write events on the packet writer
// that was used for probing.
static_cast<QuicChromiumPacketWriter*>(connection()->writer())
->set_delegate(nullptr);
writer->set_delegate(this);
if (!migrate_idle_session_ && !HasActiveRequestStreams()) {
// If idle sessions won't be migrated, close the connection.
CloseSessionOnErrorLater(
ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_NO_MIGRATABLE_STREAMS,
quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (migrate_idle_session_ && CheckIdleTimeExceedsIdleMigrationPeriod())
return;
// Migrate to the probed socket immediately: socket, writer and reader will
// be acquired by connection and used as default on success.
if (!MigrateToSocket(self_address, peer_address, std::move(socket),
std::move(reader), std::move(writer))) {
LogMigrateToSocketStatus(false);
net_log_.AddEvent(
NetLogEventType::QUIC_CONNECTION_MIGRATION_FAILURE_AFTER_PROBING);
return;
}
LogMigrateToSocketStatus(true);
num_migrations_++;
HistogramAndLogMigrationSuccess(connection_id());
}
void QuicChromiumClientSession::OnConnectionMigrationProbeSucceeded(
NetworkChangeNotifier::NetworkHandle network,
const quic::QuicSocketAddress& peer_address,
const quic::QuicSocketAddress& self_address,
std::unique_ptr<DatagramClientSocket> socket,
std::unique_ptr<QuicChromiumPacketWriter> writer,
std::unique_ptr<QuicChromiumPacketReader> reader) {
DCHECK(socket);
DCHECK(writer);
DCHECK(reader);
net_log_.AddEvent(NetLogEventType::QUIC_SESSION_CONNECTIVITY_PROBING_FINISHED,
[&] {
return NetLogProbingResultParams(network, &peer_address,
/*is_success=*/true);
});
if (network == NetworkChangeNotifier::kInvalidNetworkHandle)
return;
LogProbeResultToHistogram(current_migration_cause_, true);
// Remove |this| as the old packet writer's delegate. Write error on old
// writers will be ignored.
// Set |this| to listen on socket write events on the packet writer
// that was used for probing.
static_cast<QuicChromiumPacketWriter*>(connection()->writer())
->set_delegate(nullptr);
writer->set_delegate(this);
// Close streams that are not migratable to the probed |network|.
ResetNonMigratableStreams();
if (!migrate_idle_session_ && !HasActiveRequestStreams()) {
// If idle sessions won't be migrated, close the connection.
CloseSessionOnErrorLater(
ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_NO_MIGRATABLE_STREAMS,
quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (migrate_idle_session_ && CheckIdleTimeExceedsIdleMigrationPeriod())
return;
// Migrate to the probed socket immediately: socket, writer and reader will
// be acquired by connection and used as default on success.
if (!MigrateToSocket(self_address, peer_address, std::move(socket),
std::move(reader), std::move(writer))) {
LogMigrateToSocketStatus(false);
net_log_.AddEvent(
NetLogEventType::QUIC_CONNECTION_MIGRATION_FAILURE_AFTER_PROBING);
return;
}
LogMigrateToSocketStatus(true);
net_log_.AddEventWithInt64Params(
NetLogEventType::QUIC_CONNECTION_MIGRATION_SUCCESS_AFTER_PROBING,
"migrate_to_network", network);
num_migrations_++;
HistogramAndLogMigrationSuccess(connection_id());
if (network == default_network_) {
DVLOG(1) << "Client successfully migrated to default network: "
<< default_network_;<|fim▁hole|> return;
}
DVLOG(1) << "Client successfully got off default network after "
<< "successful probing network: " << network << ".";
current_migrations_to_non_default_network_on_path_degrading_++;
if (!migrate_back_to_default_timer_.IsRunning()) {
current_migration_cause_ = ON_MIGRATE_BACK_TO_DEFAULT_NETWORK;
// Session gets off the |default_network|, stay on |network| for now but
// try to migrate back to default network after 1 second.
StartMigrateBackToDefaultNetworkTimer(
base::Seconds(kMinRetryTimeForDefaultNetworkSecs));
}
}
void QuicChromiumClientSession::OnProbeFailed(
NetworkChangeNotifier::NetworkHandle network,
const quic::QuicSocketAddress& peer_address) {
net_log_.AddEvent(NetLogEventType::QUIC_SESSION_CONNECTIVITY_PROBING_FINISHED,
[&] {
return NetLogProbingResultParams(network, &peer_address,
/*is_success=*/false);
});
if (connection()->connection_migration_use_new_cid()) {
auto* context = static_cast<QuicChromiumPathValidationContext*>(
connection()->GetPathValidationContext());
if (!context)
return;
if (context->network() == network &&
context->peer_address() == peer_address) {
connection()->CancelPathValidation();
}
}
LogProbeResultToHistogram(current_migration_cause_, false);
if (network != NetworkChangeNotifier::kInvalidNetworkHandle) {
// Probing failure can be ignored.
DVLOG(1) << "Connectivity probing failed on <network: " << network
<< ", peer_address: " << peer_address.ToString() << ">.";
DVLOG_IF(1, network == default_network_ &&
GetCurrentNetwork() != default_network_)
<< "Client probing failed on the default network, still using "
"non-default network.";
}
}
bool QuicChromiumClientSession::OnSendConnectivityProbingPacket(
QuicChromiumPacketWriter* writer,
const quic::QuicSocketAddress& peer_address) {
return connection()->SendConnectivityProbingPacket(writer, peer_address);
}
void QuicChromiumClientSession::OnNetworkConnected(
NetworkChangeNotifier::NetworkHandle network) {
if (connection()->IsPathDegrading()) {
base::TimeDelta duration =
tick_clock_->NowTicks() - most_recent_path_degrading_timestamp_;
UMA_HISTOGRAM_CUSTOM_TIMES("Net.QuicNetworkDegradingDurationTillConnected",
duration, base::Milliseconds(1),
base::Minutes(10), 50);
}
if (!migrate_session_on_network_change_v2_)
return;
net_log_.AddEventWithInt64Params(
NetLogEventType::QUIC_CONNECTION_MIGRATION_ON_NETWORK_CONNECTED,
"connected_network", network);
// If there was no migration waiting for new network and the path is not
// degrading, ignore this signal.
if (!wait_for_new_network_ && !connection()->IsPathDegrading())
return;
if (connection()->IsPathDegrading())
current_migration_cause_ = NEW_NETWORK_CONNECTED_POST_PATH_DEGRADING;
if (wait_for_new_network_) {
wait_for_new_network_ = false;
if (current_migration_cause_ == ON_WRITE_ERROR)
current_migrations_to_non_default_network_on_write_error_++;
// |wait_for_new_network_| is true, there was no working network previously.
// |network| is now the only possible candidate, migrate immediately.
MigrateNetworkImmediately(network);
} else {
// The connection is path degrading.
DCHECK(connection()->IsPathDegrading());
MaybeMigrateToAlternateNetworkOnPathDegrading();
}
}
void QuicChromiumClientSession::OnNetworkDisconnectedV2(
NetworkChangeNotifier::NetworkHandle disconnected_network) {
LogMetricsOnNetworkDisconnected();
if (!migrate_session_on_network_change_v2_)
return;
net_log_.AddEventWithInt64Params(
NetLogEventType::QUIC_CONNECTION_MIGRATION_ON_NETWORK_DISCONNECTED,
"disconnected_network", disconnected_network);
// Stop probing the disconnected network if there is one.
if (connection()->connection_migration_use_new_cid()) {
auto* context = static_cast<QuicChromiumPathValidationContext*>(
connection()->GetPathValidationContext());
if (context && context->network() == disconnected_network &&
context->peer_address() == peer_address()) {
connection()->CancelPathValidation();
}
} else {
probing_manager_.CancelProbing(disconnected_network, peer_address());
}
if (disconnected_network == default_network_) {
DVLOG(1) << "Default network: " << default_network_ << " is disconnected.";
default_network_ = NetworkChangeNotifier::kInvalidNetworkHandle;
current_migrations_to_non_default_network_on_write_error_ = 0;
}
// Ignore the signal if the current active network is not affected.
if (GetCurrentNetwork() != disconnected_network) {
DVLOG(1) << "Client's current default network is not affected by the "
<< "disconnected one.";
return;
}
current_migration_cause_ = ON_NETWORK_DISCONNECTED;
LogHandshakeStatusOnMigrationSignal();
if (!OneRttKeysAvailable()) {
// Close the connection if handshake is not confirmed. Migration before
// handshake is not allowed.
CloseSessionOnErrorLater(
ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_HANDSHAKE_UNCONFIRMED,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
// Attempt to find alternative network.
NetworkChangeNotifier::NetworkHandle new_network =
stream_factory_->FindAlternateNetwork(disconnected_network);
if (new_network == NetworkChangeNotifier::kInvalidNetworkHandle) {
OnNoNewNetwork();
return;
}
// Current network is being disconnected, migrate immediately to the
// alternative network.
MigrateNetworkImmediately(new_network);
}
void QuicChromiumClientSession::OnNetworkMadeDefault(
NetworkChangeNotifier::NetworkHandle new_network) {
LogMetricsOnNetworkMadeDefault();
if (!migrate_session_on_network_change_v2_)
return;
DCHECK_NE(NetworkChangeNotifier::kInvalidNetworkHandle, new_network);
net_log_.AddEventWithInt64Params(
NetLogEventType::QUIC_CONNECTION_MIGRATION_ON_NETWORK_MADE_DEFAULT,
"new_default_network", new_network);
default_network_ = new_network;
DVLOG(1) << "Network: " << new_network
<< " becomes default, old default: " << default_network_;
current_migration_cause_ = ON_NETWORK_MADE_DEFAULT;
current_migrations_to_non_default_network_on_write_error_ = 0;
current_migrations_to_non_default_network_on_path_degrading_ = 0;
// Simply cancel the timer to migrate back to the default network if session
// is already on the default network.
if (GetCurrentNetwork() == new_network) {
CancelMigrateBackToDefaultNetworkTimer();
HistogramAndLogMigrationFailure(MIGRATION_STATUS_ALREADY_MIGRATED,
connection_id(),
"Already migrated on the new network");
return;
}
LogHandshakeStatusOnMigrationSignal();
// Stay on the current network. Try to migrate back to default network
// without any delay, which will start probing the new default network and
// migrate to the new network immediately on success.
StartMigrateBackToDefaultNetworkTimer(base::TimeDelta());
}
void QuicChromiumClientSession::MigrateNetworkImmediately(
NetworkChangeNotifier::NetworkHandle network) {
// There is no choice but to migrate to |network|. If any error encoutered,
// close the session. When migration succeeds:
// - if no longer on the default network, start timer to migrate back;
// - otherwise, it's brought to default network, cancel the running timer to
// migrate back.
if (!migrate_idle_session_ && !HasActiveRequestStreams()) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_NO_MIGRATABLE_STREAMS,
connection_id(), "No active streams");
CloseSessionOnErrorLater(
ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_NO_MIGRATABLE_STREAMS,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
if (migrate_idle_session_ && CheckIdleTimeExceedsIdleMigrationPeriod())
return;
// Do not migrate if connection migration is disabled.
if (config()->DisableConnectionMigration()) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_DISABLED_BY_CONFIG,
connection_id(),
"Migration disabled by config");
CloseSessionOnErrorLater(ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_DISABLED_BY_CONFIG,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
if (network == GetCurrentNetwork()) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_ALREADY_MIGRATED,
connection_id(),
"Already bound to new network");
return;
}
// Cancel probing on |network| if there is any.
if (connection()->connection_migration_use_new_cid()) {
auto* context = static_cast<QuicChromiumPathValidationContext*>(
connection()->GetPathValidationContext());
if (context && context->network() == network &&
context->peer_address() == peer_address()) {
connection()->CancelPathValidation();
}
} else {
probing_manager_.CancelProbing(network, peer_address());
}
MigrationResult result =
Migrate(network, ToIPEndPoint(connection()->peer_address()),
/*close_session_on_error=*/true);
if (result == MigrationResult::FAILURE)
return;
if (network == default_network_) {
CancelMigrateBackToDefaultNetworkTimer();
return;
}
// TODO(zhongyi): reconsider this, maybe we just want to hear back
// We are forced to migrate to |network|, probably |default_network_| is
// not working, start to migrate back to default network after 1 secs.
StartMigrateBackToDefaultNetworkTimer(
base::Seconds(kMinRetryTimeForDefaultNetworkSecs));
}
void QuicChromiumClientSession::OnWriteError(int error_code) {
DCHECK_NE(ERR_IO_PENDING, error_code);
DCHECK_GT(0, error_code);
connection()->OnWriteError(error_code);
}
void QuicChromiumClientSession::OnWriteUnblocked() {
DCHECK(!connection()->writer()->IsWriteBlocked());
// A new packet will be written after migration completes, unignore read
// errors.
if (ignore_read_error_)
ignore_read_error_ = false;
if (packet_) {
DCHECK(send_packet_after_migration_);
send_packet_after_migration_ = false;
static_cast<QuicChromiumPacketWriter*>(connection()->writer())
->WritePacketToSocket(std::move(packet_));
return;
}
// Unblock the connection, which may send queued packets.
connection()->OnCanWrite();
if (send_packet_after_migration_) {
send_packet_after_migration_ = false;
if (!connection()->writer()->IsWriteBlocked()) {
connection()->SendPing();
}
}
}
void QuicChromiumClientSession::OnPathDegrading() {
if (most_recent_path_degrading_timestamp_ == base::TimeTicks())
most_recent_path_degrading_timestamp_ = tick_clock_->NowTicks();
if (go_away_on_path_degrading_ && OneRttKeysAvailable()) {
net_log_.AddEvent(
NetLogEventType::QUIC_SESSION_CLIENT_GOAWAY_ON_PATH_DEGRADING);
NotifyFactoryOfSessionGoingAway();
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.ActiveStreamsOnGoAwayAfterPathDegrading",
GetNumActiveStreams());
UMA_HISTOGRAM_COUNTS_1M(
"Net.QuicSession.DrainingStreamsOnGoAwayAfterPathDegrading",
num_outgoing_draining_streams());
return;
}
if (!go_away_on_path_degrading_) {
NetworkChangeNotifier::NetworkHandle current_network = GetCurrentNetwork();
for (auto& observer : connectivity_observer_list_)
observer.OnSessionPathDegrading(this, current_network);
}
if (!stream_factory_)
return;
if (allow_port_migration_) {
current_migration_cause_ = CHANGE_PORT_ON_PATH_DEGRADING;
MaybeMigrateToDifferentPortOnPathDegrading();
return;
}
MaybeMigrateToAlternateNetworkOnPathDegrading();
}
void QuicChromiumClientSession::OnForwardProgressMadeAfterPathDegrading() {
if (go_away_on_path_degrading_)
return;
NetworkChangeNotifier::NetworkHandle current_network = GetCurrentNetwork();
for (auto& observer : connectivity_observer_list_)
observer.OnSessionResumedPostPathDegrading(this, current_network);
}
void QuicChromiumClientSession::OnKeyUpdate(quic::KeyUpdateReason reason) {
net_log_.AddEventWithStringParams(NetLogEventType::QUIC_SESSION_KEY_UPDATE,
"reason",
quic::KeyUpdateReasonString(reason));
base::UmaHistogramEnumeration("Net.QuicSession.KeyUpdate.Reason", reason);
last_key_update_reason_ = reason;
}
void QuicChromiumClientSession::OnProofValid(
const quic::QuicCryptoClientConfig::CachedState& cached) {
DCHECK(cached.proof_valid());
if (!server_info_) {
return;
}
QuicServerInfo::State* state = server_info_->mutable_state();
state->server_config = cached.server_config();
state->source_address_token = cached.source_address_token();
state->cert_sct = cached.cert_sct();
state->chlo_hash = cached.chlo_hash();
state->server_config_sig = cached.signature();
state->certs = cached.certs();
server_info_->Persist();
}
void QuicChromiumClientSession::OnProofVerifyDetailsAvailable(
const quic::ProofVerifyDetails& verify_details) {
const ProofVerifyDetailsChromium* verify_details_chromium =
reinterpret_cast<const ProofVerifyDetailsChromium*>(&verify_details);
cert_verify_result_ = std::make_unique<CertVerifyResult>(
verify_details_chromium->cert_verify_result);
pinning_failure_log_ = verify_details_chromium->pinning_failure_log;
logger_->OnCertificateVerified(*cert_verify_result_);
pkp_bypassed_ = verify_details_chromium->pkp_bypassed;
is_fatal_cert_error_ = verify_details_chromium->is_fatal_cert_error;
}
void QuicChromiumClientSession::StartReading() {
for (auto& packet_reader : packet_readers_) {
packet_reader->StartReading();
}
}
void QuicChromiumClientSession::CloseSessionOnError(
int net_error,
quic::QuicErrorCode quic_error,
quic::ConnectionCloseBehavior behavior) {
base::UmaHistogramSparse("Net.QuicSession.CloseSessionOnError", -net_error);
if (!callback_.is_null()) {
std::move(callback_).Run(net_error);
}
NotifyAllStreamsOfError(net_error);
net_log_.AddEventWithIntParams(NetLogEventType::QUIC_SESSION_CLOSE_ON_ERROR,
"net_error", net_error);
if (connection()->connected())
connection()->CloseConnection(quic_error, "net error", behavior);
DCHECK(!connection()->connected());
CloseAllHandles(net_error);
NotifyFactoryOfSessionClosed();
}
void QuicChromiumClientSession::CloseSessionOnErrorLater(
int net_error,
quic::QuicErrorCode quic_error,
quic::ConnectionCloseBehavior behavior) {
base::UmaHistogramSparse("Net.QuicSession.CloseSessionOnError", -net_error);
if (!callback_.is_null()) {
std::move(callback_).Run(net_error);
}
NotifyAllStreamsOfError(net_error);
CloseAllHandles(net_error);
net_log_.AddEventWithIntParams(NetLogEventType::QUIC_SESSION_CLOSE_ON_ERROR,
"net_error", net_error);
if (connection()->connected())
connection()->CloseConnection(quic_error, "net error", behavior);
DCHECK(!connection()->connected());
NotifyFactoryOfSessionClosedLater();
}
void QuicChromiumClientSession::NotifyAllStreamsOfError(int net_error) {
PerformActionOnActiveStreams([net_error](quic::QuicStream* stream) {
static_cast<QuicChromiumClientStream*>(stream)->OnError(net_error);
return true;
});
}
void QuicChromiumClientSession::CloseAllHandles(int net_error) {
while (!handles_.empty()) {
Handle* handle = *handles_.begin();
handles_.erase(handle);
handle->OnSessionClosed(connection()->version(), net_error, error(),
port_migration_detected_, GetConnectTiming(),
WasConnectionEverUsed());
}
}
void QuicChromiumClientSession::CancelAllRequests(int net_error) {
UMA_HISTOGRAM_COUNTS_1000("Net.QuicSession.AbortedPendingStreamRequests",
stream_requests_.size());
while (!stream_requests_.empty()) {
StreamRequest* request = stream_requests_.front();
stream_requests_.pop_front();
request->OnRequestCompleteFailure(net_error);
}
}
void QuicChromiumClientSession::NotifyRequestsOfConfirmation(int net_error) {
// Post tasks to avoid reentrancy.
for (auto& callback : waiting_for_confirmation_callbacks_)
task_runner_->PostTask(FROM_HERE,
base::BindOnce(std::move(callback), net_error));
waiting_for_confirmation_callbacks_.clear();
}
void QuicChromiumClientSession::MaybeMigrateToDifferentPortOnPathDegrading() {
DCHECK(allow_port_migration_ && !migrate_session_early_v2_);
// Migration before handshake confirmed is not allowed.
const bool is_handshake_confirmed = version().UsesHttp3()
? connection()->IsHandshakeConfirmed()
: OneRttKeysAvailable();
if (!is_handshake_confirmed) {
HistogramAndLogMigrationFailure(
MIGRATION_STATUS_PATH_DEGRADING_BEFORE_HANDSHAKE_CONFIRMED,
connection_id(), "Path degrading before handshake confirmed");
return;
}
net_log_.BeginEvent(NetLogEventType::QUIC_PORT_MIGRATION_TRIGGERED);
if (!stream_factory_)
return;
// Probe a different port, session will migrate to the probed port on success.
StartProbing(default_network_, peer_address());
net_log_.EndEvent(NetLogEventType::QUIC_PORT_MIGRATION_TRIGGERED);
}
void QuicChromiumClientSession::
MaybeMigrateToAlternateNetworkOnPathDegrading() {
net_log_.AddEvent(
NetLogEventType::QUIC_CONNECTION_MIGRATION_ON_PATH_DEGRADING);
current_migration_cause_ = CHANGE_NETWORK_ON_PATH_DEGRADING;
if (!migrate_session_early_v2_) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_PATH_DEGRADING_NOT_ENABLED,
connection_id(),
"Migration on path degrading not enabled");
return;
}
if (GetCurrentNetwork() == default_network_ &&
current_migrations_to_non_default_network_on_path_degrading_ >=
max_migrations_to_non_default_network_on_path_degrading_) {
HistogramAndLogMigrationFailure(
MIGRATION_STATUS_ON_PATH_DEGRADING_DISABLED, connection_id(),
"Exceeds maximum number of migrations on path degrading");
return;
}
NetworkChangeNotifier::NetworkHandle alternate_network =
stream_factory_->FindAlternateNetwork(GetCurrentNetwork());
if (alternate_network == NetworkChangeNotifier::kInvalidNetworkHandle) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_NO_ALTERNATE_NETWORK,
connection_id(),
"No alternative network on path degrading");
return;
}
LogHandshakeStatusOnMigrationSignal();
const bool is_handshake_confirmed = version().UsesHttp3()
? connection()->IsHandshakeConfirmed()
: OneRttKeysAvailable();
if (!is_handshake_confirmed) {
HistogramAndLogMigrationFailure(
MIGRATION_STATUS_PATH_DEGRADING_BEFORE_HANDSHAKE_CONFIRMED,
connection_id(), "Path degrading before handshake confirmed");
return;
}
net_log_.BeginEventWithStringParams(
NetLogEventType::QUIC_CONNECTION_MIGRATION_TRIGGERED, "trigger",
"PathDegrading");
// Probe the alternative network, session will migrate to the probed
// network and decide whether it wants to migrate back to the default
// network on success.
MaybeStartProbing(alternate_network, peer_address());
net_log_.EndEvent(NetLogEventType::QUIC_CONNECTION_MIGRATION_TRIGGERED);
}
ProbingResult QuicChromiumClientSession::MaybeStartProbing(
NetworkChangeNotifier::NetworkHandle network,
const quic::QuicSocketAddress& peer_address) {
if (!stream_factory_)
return ProbingResult::FAILURE;
CHECK_NE(NetworkChangeNotifier::kInvalidNetworkHandle, network);
if (!migrate_idle_session_ && !HasActiveRequestStreams()) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_NO_MIGRATABLE_STREAMS,
connection_id(), "No active streams");
CloseSessionOnErrorLater(
ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_NO_MIGRATABLE_STREAMS,
quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return ProbingResult::DISABLED_WITH_IDLE_SESSION;
}
if (migrate_idle_session_ && CheckIdleTimeExceedsIdleMigrationPeriod())
return ProbingResult::DISABLED_WITH_IDLE_SESSION;
// Abort probing if connection migration is disabled by config.
if (version().HasIetfQuicFrames() &&
!connection()->connection_migration_use_new_cid()) {
DVLOG(1) << "Client IETF connection migration is not enabled.";
HistogramAndLogMigrationFailure(MIGRATION_STATUS_NOT_ENABLED,
connection_id(),
"IETF migration flag is false");
return ProbingResult::DISABLED_BY_CONFIG;
}
if (config()->DisableConnectionMigration()) {
DVLOG(1) << "Client disables probing network with connection migration "
<< "disabled by config";
HistogramAndLogMigrationFailure(MIGRATION_STATUS_DISABLED_BY_CONFIG,
connection_id(),
"Migration disabled by config");
return ProbingResult::DISABLED_BY_CONFIG;
}
return StartProbing(network, peer_address);
}
ProbingResult QuicChromiumClientSession::StartProbing(
NetworkChangeNotifier::NetworkHandle network,
const quic::QuicSocketAddress& peer_address) {
// Check if probing manager is probing the same path.
if (connection()->connection_migration_use_new_cid()) {
auto* context = static_cast<QuicChromiumPathValidationContext*>(
connection()->GetPathValidationContext());
if (context && context->network() == network &&
context->peer_address() == peer_address) {
return ProbingResult::PENDING;
}
} else if (probing_manager_.IsUnderProbing(network, peer_address)) {
return ProbingResult::PENDING;
}
// Create and configure socket on |network|.
std::unique_ptr<DatagramClientSocket> probing_socket =
stream_factory_->CreateSocket(net_log_.net_log(), net_log_.source());
if (stream_factory_->ConfigureSocket(probing_socket.get(),
ToIPEndPoint(peer_address), network,
session_key_.socket_tag()) != OK) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_INTERNAL_ERROR,
connection_id(),
"Socket configuration failed");
return ProbingResult::INTERNAL_ERROR;
}
// Create new packet writer and reader on the probing socket.
std::unique_ptr<QuicChromiumPacketWriter> probing_writer(
new QuicChromiumPacketWriter(probing_socket.get(), task_runner_));
std::unique_ptr<QuicChromiumPacketReader> probing_reader(
new QuicChromiumPacketReader(probing_socket.get(), clock_, this,
yield_after_packets_, yield_after_duration_,
net_log_));
int rtt_ms = connection()
->sent_packet_manager()
.GetRttStats()
->smoothed_rtt()
.ToMilliseconds();
if (rtt_ms == 0 || rtt_ms > kDefaultRTTMilliSecs)
rtt_ms = kDefaultRTTMilliSecs;
int timeout_ms = rtt_ms * 2;
if (connection()->connection_migration_use_new_cid() &&
version().HasIetfQuicFrames()) {
probing_reader->StartReading();
path_validation_writer_delegate_.set_network(network);
path_validation_writer_delegate_.set_peer_address(peer_address);
probing_writer->set_delegate(&path_validation_writer_delegate_);
IPEndPoint local_address;
probing_socket->GetLocalAddress(&local_address);
auto context = std::make_unique<QuicChromiumPathValidationContext>(
ToQuicSocketAddress(local_address), peer_address, network,
std::move(probing_socket), std::move(probing_writer),
std::move(probing_reader));
if (current_migration_cause_ != CHANGE_PORT_ON_PATH_DEGRADING) {
ValidatePath(
std::move(context),
std::make_unique<ConnectionMigrationValidationResultDelegate>(this));
return ProbingResult::PENDING;
}
ValidatePath(std::move(context),
std::make_unique<PortMigrationValidationResultDelegate>(this));
return ProbingResult::PENDING;
}
probing_manager_.StartProbing(
network, peer_address, std::move(probing_socket),
std::move(probing_writer), std::move(probing_reader),
base::Milliseconds(timeout_ms), net_log_);
return ProbingResult::PENDING;
}
void QuicChromiumClientSession::StartMigrateBackToDefaultNetworkTimer(
base::TimeDelta delay) {
if (current_migration_cause_ != ON_NETWORK_MADE_DEFAULT)
current_migration_cause_ = ON_MIGRATE_BACK_TO_DEFAULT_NETWORK;
CancelMigrateBackToDefaultNetworkTimer();
// Post a task to try migrate back to default network after |delay|.
migrate_back_to_default_timer_.Start(
FROM_HERE, delay,
base::BindOnce(
&QuicChromiumClientSession::MaybeRetryMigrateBackToDefaultNetwork,
weak_factory_.GetWeakPtr()));
}
void QuicChromiumClientSession::CancelMigrateBackToDefaultNetworkTimer() {
retry_migrate_back_count_ = 0;
migrate_back_to_default_timer_.Stop();
}
void QuicChromiumClientSession::TryMigrateBackToDefaultNetwork(
base::TimeDelta timeout) {
if (default_network_ == NetworkChangeNotifier::kInvalidNetworkHandle) {
DVLOG(1) << "Default network is not connected";
return;
}
net_log_.AddEventWithInt64Params(
NetLogEventType::QUIC_CONNECTION_MIGRATION_ON_MIGRATE_BACK, "retry_count",
retry_migrate_back_count_);
// Start probe default network immediately, if manager is probing
// the same network, this will be a no-op. Otherwise, previous probe
// will be cancelled and manager starts to probe |default_network_|
// immediately.
ProbingResult result = MaybeStartProbing(default_network_, peer_address());
if (result == ProbingResult::DISABLED_WITH_IDLE_SESSION)
return;
if (result != ProbingResult::PENDING) {
// Session is not allowed to migrate, mark session as going away, cancel
// migrate back to default timer.
NotifyFactoryOfSessionGoingAway();
CancelMigrateBackToDefaultNetworkTimer();
return;
}
retry_migrate_back_count_++;
migrate_back_to_default_timer_.Start(
FROM_HERE, timeout,
base::BindOnce(
&QuicChromiumClientSession::MaybeRetryMigrateBackToDefaultNetwork,
weak_factory_.GetWeakPtr()));
}
void QuicChromiumClientSession::MaybeRetryMigrateBackToDefaultNetwork() {
base::TimeDelta retry_migrate_back_timeout =
base::Seconds(UINT64_C(1) << retry_migrate_back_count_);
if (default_network_ == GetCurrentNetwork()) {
// If session has been back on the default already by other direct
// migration attempt, cancel migrate back now.
CancelMigrateBackToDefaultNetworkTimer();
return;
}
if (retry_migrate_back_timeout > max_time_on_non_default_network_) {
// Mark session as going away to accept no more streams.
NotifyFactoryOfSessionGoingAway();
return;
}
TryMigrateBackToDefaultNetwork(retry_migrate_back_timeout);
}
bool QuicChromiumClientSession::CheckIdleTimeExceedsIdleMigrationPeriod() {
if (!migrate_idle_session_)
return false;
if (HasActiveRequestStreams()) {
return false;
}
// There are no active/drainning streams, check the last stream's finish time.
if (tick_clock_->NowTicks() - most_recent_stream_close_time_ <
idle_migration_period_) {
// Still within the idle migration period.
return false;
}
HistogramAndLogMigrationFailure(MIGRATION_STATUS_IDLE_MIGRATION_TIMEOUT,
connection_id(),
"Ilde migration period exceeded");
CloseSessionOnErrorLater(ERR_NETWORK_CHANGED, quic::QUIC_NETWORK_IDLE_TIMEOUT,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return true;
}
void QuicChromiumClientSession::ResetNonMigratableStreams() {
// TODO(zhongyi): may close non-migratable draining streams as well to avoid
// sending additional data on alternate networks.
PerformActionOnActiveStreams([](quic::QuicStream* stream) {
QuicChromiumClientStream* chrome_stream =
static_cast<QuicChromiumClientStream*>(stream);
if (!chrome_stream->can_migrate_to_cellular_network()) {
// Close the stream in both direction by resetting the stream.
// TODO(zhongyi): use a different error code to reset streams for
// connection migration.
chrome_stream->Reset(quic::QUIC_STREAM_CANCELLED);
}
return true;
});
}
void QuicChromiumClientSession::LogMetricsOnNetworkDisconnected() {
if (most_recent_path_degrading_timestamp_ != base::TimeTicks()) {
most_recent_network_disconnected_timestamp_ = tick_clock_->NowTicks();
base::TimeDelta degrading_duration =
most_recent_network_disconnected_timestamp_ -
most_recent_path_degrading_timestamp_;
UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.QuicNetworkDegradingDurationTillDisconnected", degrading_duration,
base::Milliseconds(1), base::Minutes(10), 100);
}
if (most_recent_write_error_timestamp_ != base::TimeTicks()) {
base::TimeDelta write_error_to_disconnection_gap =
most_recent_network_disconnected_timestamp_ -
most_recent_write_error_timestamp_;
UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.QuicNetworkGapBetweenWriteErrorAndDisconnection",
write_error_to_disconnection_gap, base::Milliseconds(1),
base::Minutes(10), 100);
base::UmaHistogramSparse("Net.QuicSession.WriteError.NetworkDisconnected",
-most_recent_write_error_);
most_recent_write_error_ = 0;
most_recent_write_error_timestamp_ = base::TimeTicks();
}
}
void QuicChromiumClientSession::LogMetricsOnNetworkMadeDefault() {
if (most_recent_path_degrading_timestamp_ != base::TimeTicks()) {
if (most_recent_network_disconnected_timestamp_ != base::TimeTicks()) {
// NetworkDiscconected happens before NetworkMadeDefault, the platform
// is dropping WiFi.
base::TimeTicks now = tick_clock_->NowTicks();
base::TimeDelta disconnection_duration =
now - most_recent_network_disconnected_timestamp_;
base::TimeDelta degrading_duration =
now - most_recent_path_degrading_timestamp_;
UMA_HISTOGRAM_CUSTOM_TIMES("Net.QuicNetworkDisconnectionDuration",
disconnection_duration, base::Milliseconds(1),
base::Minutes(10), 100);
UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.QuicNetworkDegradingDurationTillNewNetworkMadeDefault",
degrading_duration, base::Milliseconds(1), base::Minutes(10), 100);
most_recent_network_disconnected_timestamp_ = base::TimeTicks();
}
most_recent_path_degrading_timestamp_ = base::TimeTicks();
}
}
void QuicChromiumClientSession::LogMigrationResultToHistogram(
QuicConnectionMigrationStatus status) {
if (current_migration_cause_ == CHANGE_PORT_ON_PATH_DEGRADING) {
UMA_HISTOGRAM_ENUMERATION("Net.QuicSession.PortMigration", status,
MIGRATION_STATUS_MAX);
current_migration_cause_ = UNKNOWN_CAUSE;
return;
}
UMA_HISTOGRAM_ENUMERATION("Net.QuicSession.ConnectionMigration", status,
MIGRATION_STATUS_MAX);
// Log the connection migraiton result to different histograms based on the
// cause of the connection migration.
std::string histogram_name = "Net.QuicSession.ConnectionMigration." +
MigrationCauseToString(current_migration_cause_);
base::UmaHistogramEnumeration(histogram_name, status, MIGRATION_STATUS_MAX);
current_migration_cause_ = UNKNOWN_CAUSE;
}
void QuicChromiumClientSession::LogHandshakeStatusOnMigrationSignal() const {
if (current_migration_cause_ == CHANGE_PORT_ON_PATH_DEGRADING) {
UMA_HISTOGRAM_BOOLEAN("Net.QuicSession.HandshakeStatusOnPortMigration",
OneRttKeysAvailable());
return;
}
UMA_HISTOGRAM_BOOLEAN("Net.QuicSession.HandshakeStatusOnConnectionMigration",
OneRttKeysAvailable());
const std::string histogram_name =
"Net.QuicSession.HandshakeStatusOnConnectionMigration." +
MigrationCauseToString(current_migration_cause_);
STATIC_HISTOGRAM_POINTER_GROUP(
histogram_name, current_migration_cause_, MIGRATION_CAUSE_MAX,
AddBoolean(OneRttKeysAvailable()),
base::BooleanHistogram::FactoryGet(
histogram_name, base::HistogramBase::kUmaTargetedHistogramFlag));
}
void QuicChromiumClientSession::HistogramAndLogMigrationFailure(
QuicConnectionMigrationStatus status,
quic::QuicConnectionId connection_id,
const char* reason) {
NetLogEventType event_type =
current_migration_cause_ == CHANGE_PORT_ON_PATH_DEGRADING
? NetLogEventType::QUIC_PORT_MIGRATION_FAILURE
: NetLogEventType::QUIC_CONNECTION_MIGRATION_FAILURE;
net_log_.AddEvent(event_type, [&] {
return NetLogQuicMigrationFailureParams(connection_id, reason);
});
// |current_migration_cause_| will be reset afterwards.
LogMigrationResultToHistogram(status);
}
void QuicChromiumClientSession::HistogramAndLogMigrationSuccess(
quic::QuicConnectionId connection_id) {
NetLogEventType event_type =
current_migration_cause_ == CHANGE_PORT_ON_PATH_DEGRADING
? NetLogEventType::QUIC_PORT_MIGRATION_SUCCESS
: NetLogEventType::QUIC_CONNECTION_MIGRATION_SUCCESS;
net_log_.AddEvent(event_type, [&] {
return NetLogQuicMigrationSuccessParams(connection_id);
});
// |current_migration_cause_| will be reset afterwards.
LogMigrationResultToHistogram(MIGRATION_STATUS_SUCCESS);
}
base::Value QuicChromiumClientSession::GetInfoAsValue(
const std::set<HostPortPair>& aliases) {
base::DictionaryValue dict;
dict.SetString("version", ParsedQuicVersionToString(connection()->version()));
dict.SetInteger("open_streams", GetNumActiveStreams());
std::vector<base::Value> stream_list;
auto* stream_list_ptr = &stream_list;
PerformActionOnActiveStreams([stream_list_ptr](quic::QuicStream* stream) {
stream_list_ptr->emplace_back(base::NumberToString(stream->id()));
return true;
});
dict.SetKey("active_streams", base::Value(std::move(stream_list)));
dict.SetIntKey("total_streams", num_total_streams_);
dict.SetStringKey("peer_address", peer_address().ToString());
dict.SetStringKey("network_isolation_key",
session_key_.network_isolation_key().ToDebugString());
dict.SetStringKey("connection_id", connection_id().ToString());
if (!connection()->client_connection_id().IsEmpty()) {
dict.SetStringKey("client_connection_id",
connection()->client_connection_id().ToString());
}
dict.SetBoolKey("connected", connection()->connected());
const quic::QuicConnectionStats& stats = connection()->GetStats();
dict.SetIntKey("packets_sent", stats.packets_sent);
dict.SetIntKey("packets_received", stats.packets_received);
dict.SetIntKey("packets_lost", stats.packets_lost);
SSLInfo ssl_info;
std::vector<base::Value> alias_list;
for (const auto& alias : aliases) {
alias_list.emplace_back(alias.ToString());
}
dict.SetKey("aliases", base::Value(std::move(alias_list)));
return std::move(dict);
}
bool QuicChromiumClientSession::gquic_zero_rtt_disabled() const {
if (!stream_factory_)
return false;
return stream_factory_->gquic_zero_rtt_disabled();
}
std::unique_ptr<QuicChromiumClientSession::Handle>
QuicChromiumClientSession::CreateHandle(url::SchemeHostPort destination) {
return std::make_unique<QuicChromiumClientSession::Handle>(
weak_factory_.GetWeakPtr(), std::move(destination));
}
bool QuicChromiumClientSession::OnReadError(
int result,
const DatagramClientSocket* socket) {
DCHECK(socket != nullptr);
base::UmaHistogramSparse("Net.QuicSession.ReadError.AnyNetwork", -result);
if (socket != GetDefaultSocket()) {
DVLOG(1) << "Ignoring read error " << ErrorToString(result)
<< " on old socket";
base::UmaHistogramSparse("Net.QuicSession.ReadError.OtherNetworks",
-result);
// Ignore read errors from sockets that are not affecting the current
// network, i.e., sockets that are no longer active and probing socket.
// TODO(jri): Maybe clean up old sockets on error.
return false;
}
if (ignore_read_error_) {
DVLOG(1) << "Ignoring read error " << ErrorToString(result)
<< " during pending migration";
// Ignore read errors during pending migration. Connection will be closed if
// pending migration failed or timed out.
base::UmaHistogramSparse("Net.QuicSession.ReadError.PendingMigration",
-result);
return false;
}
base::UmaHistogramSparse("Net.QuicSession.ReadError.CurrentNetwork", -result);
if (OneRttKeysAvailable()) {
base::UmaHistogramSparse(
"Net.QuicSession.ReadError.CurrentNetwork.HandshakeConfirmed", -result);
}
DVLOG(1) << "Closing session on read error " << ErrorToString(result);
connection()->CloseConnection(quic::QUIC_PACKET_READ_ERROR,
ErrorToString(result),
quic::ConnectionCloseBehavior::SILENT_CLOSE);
return false;
}
bool QuicChromiumClientSession::OnPacket(
const quic::QuicReceivedPacket& packet,
const quic::QuicSocketAddress& local_address,
const quic::QuicSocketAddress& peer_address) {
ProcessUdpPacket(local_address, peer_address, packet);
if (!connection()->connected()) {
NotifyFactoryOfSessionClosedLater();
return false;
}
return true;
}
void QuicChromiumClientSession::NotifyFactoryOfSessionGoingAway() {
going_away_ = true;
if (stream_factory_)
stream_factory_->OnSessionGoingAway(this);
}
void QuicChromiumClientSession::NotifyFactoryOfSessionClosedLater() {
going_away_ = true;
DCHECK_EQ(0u, GetNumActiveStreams());
DCHECK(!connection()->connected());
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&QuicChromiumClientSession::NotifyFactoryOfSessionClosed,
weak_factory_.GetWeakPtr()));
}
void QuicChromiumClientSession::NotifyFactoryOfSessionClosed() {
going_away_ = true;
DCHECK_EQ(0u, GetNumActiveStreams());
// Will delete |this|.
if (stream_factory_)
stream_factory_->OnSessionClosed(this);
}
void QuicChromiumClientSession::OnCryptoHandshakeComplete() {
if (stream_factory_)
stream_factory_->set_is_quic_known_to_work_on_current_network(true);
// Update |connect_end| only when handshake is confirmed. This should also
// take care of any failed 0-RTT request.
connect_timing_.connect_end = tick_clock_->NowTicks();
DCHECK_LE(connect_timing_.connect_start, connect_timing_.connect_end);
UMA_HISTOGRAM_TIMES(
"Net.QuicSession.HandshakeConfirmedTime",
connect_timing_.connect_end - connect_timing_.connect_start);
// Track how long it has taken to finish handshake after we have finished
// DNS host resolution.
if (!connect_timing_.dns_end.is_null()) {
UMA_HISTOGRAM_TIMES("Net.QuicSession.HostResolution.HandshakeConfirmedTime",
tick_clock_->NowTicks() - connect_timing_.dns_end);
}
auto it = handles_.begin();
while (it != handles_.end()) {
Handle* handle = *it;
++it;
handle->OnCryptoHandshakeConfirmed();
}
NotifyRequestsOfConfirmation(OK);
// Attempt to migrate back to the default network after handshake has been
// confirmed if the session is not created on the default network.
if (migrate_session_on_network_change_v2_ &&
default_network_ != NetworkChangeNotifier::kInvalidNetworkHandle &&
GetCurrentNetwork() != default_network_) {
current_migration_cause_ = ON_MIGRATE_BACK_TO_DEFAULT_NETWORK;
StartMigrateBackToDefaultNetworkTimer(
base::Seconds(kMinRetryTimeForDefaultNetworkSecs));
}
}
MigrationResult QuicChromiumClientSession::Migrate(
NetworkChangeNotifier::NetworkHandle network,
IPEndPoint peer_address,
bool close_session_on_error) {
if (!stream_factory_)
return MigrationResult::FAILURE;
if (network != NetworkChangeNotifier::kInvalidNetworkHandle) {
// This is a migration attempt from connection migration.
ResetNonMigratableStreams();
if (!migrate_idle_session_ && !HasActiveRequestStreams()) {
// If idle sessions can not be migrated, close the session if needed.
if (close_session_on_error) {
CloseSessionOnErrorLater(
ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_NO_MIGRATABLE_STREAMS,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
}
return MigrationResult::FAILURE;
}
}
// Create and configure socket on |network|.
std::unique_ptr<DatagramClientSocket> socket(
stream_factory_->CreateSocket(net_log_.net_log(), net_log_.source()));
if (stream_factory_->ConfigureSocket(socket.get(), peer_address, network,
session_key_.socket_tag()) != OK) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_INTERNAL_ERROR,
connection_id(),
"Socket configuration failed");
if (close_session_on_error) {
if (migrate_session_on_network_change_v2_) {
CloseSessionOnErrorLater(ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_INTERNAL_ERROR,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
} else {
CloseSessionOnError(ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_INTERNAL_ERROR,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
}
}
return MigrationResult::FAILURE;
}
// Create new packet reader and writer on the new socket.
std::unique_ptr<QuicChromiumPacketReader> new_reader(
new QuicChromiumPacketReader(socket.get(), clock_, this,
yield_after_packets_, yield_after_duration_,
net_log_));
new_reader->StartReading();
std::unique_ptr<QuicChromiumPacketWriter> new_writer(
new QuicChromiumPacketWriter(socket.get(), task_runner_));
static_cast<QuicChromiumPacketWriter*>(connection()->writer())
->set_delegate(nullptr);
new_writer->set_delegate(this);
IPEndPoint self_address;
socket->GetLocalAddress(&self_address);
// Migrate to the new socket.
if (!MigrateToSocket(ToQuicSocketAddress(self_address),
ToQuicSocketAddress(peer_address), std::move(socket),
std::move(new_reader), std::move(new_writer))) {
if (close_session_on_error) {
if (migrate_session_on_network_change_v2_) {
CloseSessionOnErrorLater(
ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_TOO_MANY_CHANGES,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
} else {
CloseSessionOnError(ERR_NETWORK_CHANGED,
quic::QUIC_CONNECTION_MIGRATION_TOO_MANY_CHANGES,
quic::ConnectionCloseBehavior::SILENT_CLOSE);
}
}
return MigrationResult::FAILURE;
}
HistogramAndLogMigrationSuccess(connection_id());
return MigrationResult::SUCCESS;
}
bool QuicChromiumClientSession::MigrateToSocket(
const quic::QuicSocketAddress& self_address,
const quic::QuicSocketAddress& peer_address,
std::unique_ptr<DatagramClientSocket> socket,
std::unique_ptr<QuicChromiumPacketReader> reader,
std::unique_ptr<QuicChromiumPacketWriter> writer) {
CHECK_EQ(sockets_.size(), packet_readers_.size());
// TODO(zhongyi): figure out whether we want to limit the number of
// connection migrations for v2, which includes migration on platform signals,
// write error events, and path degrading on original network.
if (!migrate_session_on_network_change_v2_ &&
sockets_.size() >= kMaxReadersPerQuicSession) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_TOO_MANY_CHANGES,
connection_id(), "Too many changes");
return false;
}
packet_readers_.push_back(std::move(reader));
sockets_.push_back(std::move(socket));
// Froce the writer to be blocked to prevent it being used until
// WriteToNewSocket completes.
DVLOG(1) << "Force blocking the packet writer";
writer->set_force_write_blocked(true);
if (!MigratePath(self_address, peer_address, writer.release(),
/*owns_writer=*/true)) {
HistogramAndLogMigrationFailure(MIGRATION_STATUS_NO_UNUSED_CONNECTION_ID,
connection_id(),
"No unused server connection ID");
DVLOG(1) << "MigratePath fails as there is no CID available";
return false;
}
// Post task to write the pending packet or a PING packet to the new
// socket. This avoids reentrancy issues if there is a write error
// on the write to the new socket.
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&QuicChromiumClientSession::WriteToNewSocket,
weak_factory_.GetWeakPtr()));
return true;
}
void QuicChromiumClientSession::PopulateNetErrorDetails(
NetErrorDetails* details) const {
details->quic_port_migration_detected = port_migration_detected_;
details->quic_connection_error = error();
}
const DatagramClientSocket* QuicChromiumClientSession::GetDefaultSocket()
const {
DCHECK(sockets_.back().get() != nullptr);
// The most recently added socket is the currently active one.
return sockets_.back().get();
}
NetworkChangeNotifier::NetworkHandle
QuicChromiumClientSession::GetCurrentNetwork() const {
// If connection migration is enabled, alternate network interface may be
// used to send packet, it is identified as the bound network of the default
// socket. Otherwise, always use |default_network_|.
return migrate_session_on_network_change_v2_
? GetDefaultSocket()->GetBoundNetwork()
: default_network_;
}
bool QuicChromiumClientSession::IsAuthorized(const std::string& hostname) {
bool result = CanPool(hostname, session_key_);
if (result)
streams_pushed_count_++;
return result;
}
bool QuicChromiumClientSession::HandlePromised(
quic::QuicStreamId id,
quic::QuicStreamId promised_id,
const spdy::Http2HeaderBlock& headers) {
bool result =
quic::QuicSpdyClientSessionBase::HandlePromised(id, promised_id, headers);
if (result) {
// The push promise is accepted, notify the push_delegate that a push
// promise has been received.
if (push_delegate_) {
std::string pushed_url =
quic::SpdyServerPushUtils::GetPromisedUrlFromHeaders(headers);
push_delegate_->OnPush(std::make_unique<QuicServerPushHelper>(
weak_factory_.GetWeakPtr(), GURL(pushed_url)),
net_log_);
}
if (headers_include_h2_stream_dependency_ ||
VersionUsesHttp3(connection()->transport_version())) {
// Even though the promised stream will not be created until after the
// push promise headers are received, send a PRIORITY frame for the
// promised stream ID. Send |kDefaultPriority| since that will be the
// initial spdy::SpdyPriority of the push promise stream when created.
const spdy::SpdyPriority priority = quic::QuicStream::kDefaultPriority;
spdy::SpdyStreamId parent_stream_id = 0;
int weight = 0;
bool exclusive = false;
priority_dependency_state_.OnStreamCreation(
promised_id, priority, &parent_stream_id, &weight, &exclusive);
if (!VersionUsesHttp3(connection()->transport_version())) {
WritePriority(promised_id, parent_stream_id, weight, exclusive);
}
}
}
net_log_.AddEvent(NetLogEventType::QUIC_SESSION_PUSH_PROMISE_RECEIVED,
[&](NetLogCaptureMode capture_mode) {
return NetLogQuicPushPromiseReceivedParams(
&headers, id, promised_id, capture_mode);
});
return result;
}
void QuicChromiumClientSession::DeletePromised(
quic::QuicClientPromisedInfo* promised) {
if (IsOpenStream(promised->id()))
streams_pushed_and_claimed_count_++;
quic::QuicSpdyClientSessionBase::DeletePromised(promised);
}
void QuicChromiumClientSession::OnPushStreamTimedOut(
quic::QuicStreamId stream_id) {
quic::QuicSpdyStream* stream = GetPromisedStream(stream_id);
if (stream != nullptr)
bytes_pushed_and_unclaimed_count_ += stream->stream_bytes_read();
}
void QuicChromiumClientSession::CancelPush(const GURL& url) {
quic::QuicClientPromisedInfo* promised_info =
quic::QuicSpdyClientSessionBase::GetPromisedByUrl(url.spec());
if (!promised_info || promised_info->is_validating()) {
// Push stream has already been claimed or is pending matched to a request.
return;
}
quic::QuicStreamId stream_id = promised_info->id();
// Collect data on the cancelled push stream.
quic::QuicSpdyStream* stream = GetPromisedStream(stream_id);
if (stream != nullptr)
bytes_pushed_and_unclaimed_count_ += stream->stream_bytes_read();
// Send the reset and remove the promised info from the promise index.
quic::QuicSpdyClientSessionBase::ResetPromised(stream_id,
quic::QUIC_STREAM_CANCELLED);
DeletePromised(promised_info);
}
const LoadTimingInfo::ConnectTiming&
QuicChromiumClientSession::GetConnectTiming() {
connect_timing_.ssl_start = connect_timing_.connect_start;
connect_timing_.ssl_end = connect_timing_.connect_end;
return connect_timing_;
}
quic::ParsedQuicVersion QuicChromiumClientSession::GetQuicVersion() const {
return connection()->version();
}
quic::QuicClientPromisedInfo* QuicChromiumClientSession::GetPromised(
const GURL& url,
const QuicSessionKey& session_key) {
if (!session_key_.CanUseForAliasing(session_key)) {
return nullptr;
}
return push_promise_index_->GetPromised(url.spec());
}
const std::vector<std::string>&
QuicChromiumClientSession::GetDnsAliasesForSessionKey(
const QuicSessionKey& key) const {
static const base::NoDestructor<std::vector<std::string>> emptyvector_result;
return stream_factory_ ? stream_factory_->GetDnsAliasesForSessionKey(key)
: *emptyvector_result;
}
bool QuicChromiumClientSession::ValidateStatelessReset(
const quic::QuicSocketAddress& self_address,
const quic::QuicSocketAddress& peer_address) {
if (probing_manager_.ValidateStatelessReset(self_address, peer_address)) {
// The stateless reset is received from probing path. We shouldn't close the
// connection, but should disable further port migration attempt.
if (allow_port_migration_)
allow_port_migration_ = false;
return false;
}
return true;
}
} // namespace net<|fim▁end|> | CancelMigrateBackToDefaultNetworkTimer(); |
<|file_name|>get_mnist_prediction.py<|end_file_name|><|fim▁begin|><|fim▁hole|> return chainer.datasets.get_mnist(withlabel=False)[0]<|fim▁end|> | import chainer
def main(): |
<|file_name|>ext.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Experimental extensions to `std` for Windows.
//!
//! For now, this module is limited to extracting handles, file
//! descriptors, and sockets, but its functionality will grow over
//! time.
#![unstable(feature = "std_misc")]
pub use sys_common::wtf8::{Wtf8Buf, EncodeWide};
use ffi::{OsStr, OsString};
use fs::{self, OpenOptions};
use libc;
use net;
use sys::os_str::Buf;
use sys_common::{AsInner, FromInner, AsInnerMut};
use old_io;
/// Raw HANDLEs.
pub type Handle = libc::HANDLE;
/// Raw SOCKETs.
pub type Socket = libc::SOCKET;
/// Extract raw handles.
pub trait AsRawHandle {
/// Extract the raw handle, without taking any ownership.
fn as_raw_handle(&self) -> Handle;
}
#[allow(deprecated)]
impl AsRawHandle for old_io::fs::File {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
impl AsRawHandle for fs::File {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle().raw()
}
}
impl AsRawHandle for old_io::pipe::PipeStream {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
#[allow(deprecated)]
impl AsRawHandle for old_io::net::pipe::UnixStream {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
#[allow(deprecated)]
impl AsRawHandle for old_io::net::pipe::UnixListener {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
#[allow(deprecated)]
impl AsRawHandle for old_io::net::pipe::UnixAcceptor {
fn as_raw_handle(&self) -> Handle {
self.as_inner().handle()
}
}
/// Extract raw sockets.
pub trait AsRawSocket {
fn as_raw_socket(&self) -> Socket;
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::tcp::TcpStream {
fn as_raw_socket(&self) -> Socket {
self.as_inner().fd()
}
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::tcp::TcpListener {
fn as_raw_socket(&self) -> Socket {
self.as_inner().socket()
}
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::tcp::TcpAcceptor {
fn as_raw_socket(&self) -> Socket {
self.as_inner().socket()
}
}
#[allow(deprecated)]
impl AsRawSocket for old_io::net::udp::UdpSocket {
fn as_raw_socket(&self) -> Socket {
self.as_inner().fd()
}
}
impl AsRawSocket for net::TcpStream {
fn as_raw_socket(&self) -> Socket { *self.as_inner().socket().as_inner() }
}
impl AsRawSocket for net::TcpListener {
fn as_raw_socket(&self) -> Socket { *self.as_inner().socket().as_inner() }
}
impl AsRawSocket for net::UdpSocket {
fn as_raw_socket(&self) -> Socket { *self.as_inner().socket().as_inner() }
}
// Windows-specific extensions to `OsString`.
pub trait OsStringExt {
/// Create an `OsString` from a potentially ill-formed UTF-16 slice of 16-bit code units.
///
/// This is lossless: calling `.encode_wide()` on the resulting string
/// will always return the original code units.
fn from_wide(wide: &[u16]) -> Self;
}
impl OsStringExt for OsString {
fn from_wide(wide: &[u16]) -> OsString {
FromInner::from_inner(Buf { inner: Wtf8Buf::from_wide(wide) })
}
}
// Windows-specific extensions to `OsStr`.
pub trait OsStrExt {
fn encode_wide(&self) -> EncodeWide;
}
impl OsStrExt for OsStr {
fn encode_wide(&self) -> EncodeWide {
self.as_inner().inner.encode_wide()
}
}
// Windows-specific extensions to `OpenOptions`
pub trait OpenOptionsExt {
/// Override the `dwDesiredAccess` argument to the call to `CreateFile` with
/// the specified value.
fn desired_access(&mut self, access: i32) -> &mut Self;
/// Override the `dwCreationDisposition` argument to the call to
/// `CreateFile` with the specified value.
///
/// This will override any values of the standard `create` flags, for
/// example.
fn creation_disposition(&mut self, val: i32) -> &mut Self;
/// Override the `dwFlagsAndAttributes` argument to the call to
/// `CreateFile` with the specified value.
///
/// This will override any values of the standard flags on the `OpenOptions`
/// structure.
fn flags_and_attributes(&mut self, val: i32) -> &mut Self;
/// Override the `dwShareMode` argument to the call to `CreateFile` with the
/// specified value.
///
/// This will override any values of the standard flags on the `OpenOptions`
/// structure.
fn share_mode(&mut self, val: i32) -> &mut Self;
}
impl OpenOptionsExt for OpenOptions {
fn desired_access(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().desired_access(access); self
}
fn creation_disposition(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().creation_disposition(access); self
}
fn flags_and_attributes(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().flags_and_attributes(access); self
}
fn share_mode(&mut self, access: i32) -> &mut OpenOptions {
self.as_inner_mut().share_mode(access); self
}
}
<|fim▁hole|>///
/// Includes all extension traits, and some important type definitions.
pub mod prelude {
#[doc(no_inline)]
pub use super::{Socket, Handle, AsRawSocket, AsRawHandle};
#[doc(no_inline)]
pub use super::{OsStrExt, OsStringExt};
#[doc(no_inline)]
pub use super::OpenOptionsExt;
}<|fim▁end|> | /// A prelude for conveniently writing platform-specific code. |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from __future__ import unicode_literals
from datetime import datetime
import logging
from types import NoneType<|fim▁hole|>from google.appengine.ext.ndb.query import Cursor
from typing import Optional, List, Union, Tuple
from mcfw.rpc import returns, arguments
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.jobs.matching import rebuild_matches_check_current
from rogerthat.bizz.jobs.notifications import calculate_next_reminder
from rogerthat.bizz.jobs.translations import localize as localize_jobs
from rogerthat.capi.jobs import newJobs
from rogerthat.consts import JOBS_WORKER_QUEUE
from rogerthat.dal.mobile import get_mobile_key_by_account
from rogerthat.dal.profile import get_user_profile
from rogerthat.models import NdbUserProfile
from rogerthat.models.jobs import JobOffer, JobMatchingCriteria, JobMatchingCriteriaNotifications, JobMatch, \
JobMatchStatus, JobNotificationSchedule, JobOfferSourceType
from rogerthat.rpc import users
from rogerthat.rpc.models import RpcCAPICall, RpcException
from rogerthat.rpc.rpc import mapping, logError, CAPI_KEYWORD_ARG_PRIORITY, \
PRIORITY_HIGH
from rogerthat.service.api.messaging import add_chat_members
from rogerthat.to.jobs import GetJobsResponseTO, JobOfferTO, NewJobsResponseTO, \
NewJobsRequestTO, SaveJobsCriteriaResponseTO, GetJobsCriteriaResponseTO, \
JobKeyLabelTO, JobCriteriaLocationTO, JobCriteriaNotificationsTO, JobCriteriaGeoLocationTO, \
SaveJobsCriteriaRequestTO, JobOfferChatActionTO, JobOfferOpenActionTO, GetJobChatInfoResponseTO, JobChatAnonymousTO, \
CreateJobChatResponseTO, CreateJobChatRequestTO, JobsInfoTO, JobOfferProviderTO
from rogerthat.translations import localize
from rogerthat.utils import now, get_epoch_from_datetime
from rogerthat.utils.location import coordinates_to_city
from solutions.common.jobs.models import JobSolicitation
TAG_JOB_CHAT = '__rt__.jobs_chat'
CONTRACT_TYPES = [
'contract_type_001',
'contract_type_002',
'contract_type_003',
'contract_type_004',
'contract_type_005',
'contract_type_006',
'contract_type_007',
]
JOB_DOMAINS = [
'job_domain_001',
'job_domain_002',
'job_domain_003',
'job_domain_004',
'job_domain_005',
'job_domain_006',
'job_domain_007',
'job_domain_008',
'job_domain_009',
'job_domain_010',
'job_domain_011',
'job_domain_012',
'job_domain_013',
'job_domain_014',
'job_domain_015',
'job_domain_016',
'job_domain_017',
'job_domain_018',
'job_domain_019',
'job_domain_020',
'job_domain_021',
'job_domain_022',
'job_domain_023',
'job_domain_024',
]
def get_job_criteria(app_user):
# type: (users.User) -> GetJobsCriteriaResponseTO
user_profile = get_user_profile(app_user)
response = GetJobsCriteriaResponseTO()
response.location = JobCriteriaLocationTO()
response.location.address = None
response.location.geo = None
response.location.distance = 20000 # 20 Km
response.contract_types = []
response.job_domains = []
response.keywords = []
response.notifications = JobCriteriaNotificationsTO()
response.notifications.timezone = None
response.notifications.how_often = JobNotificationSchedule.NEVER
response.notifications.delivery_day = 'monday'
response.notifications.delivery_time = 64800 # 18:00
job_criteria = JobMatchingCriteria.create_key(app_user).get() # type: JobMatchingCriteria
for contract_type in CONTRACT_TYPES:
to = JobKeyLabelTO()
to.key = contract_type
to.label = localize_jobs(user_profile.language, contract_type)
to.enabled = contract_type in job_criteria.contract_types if job_criteria else False
response.contract_types.append(to)
response.contract_types.sort(key=lambda item: item.label)
for domain in JOB_DOMAINS:
to = JobKeyLabelTO()
to.key = domain
to.label = localize_jobs(user_profile.language, domain)
to.enabled = domain in job_criteria.job_domains if job_criteria else False
response.job_domains.append(to)
response.job_domains.sort(key=lambda item: item.label)
if job_criteria:
response.active = job_criteria.active
response.location = JobCriteriaLocationTO()
response.location.address = job_criteria.address
response.location.geo = JobCriteriaGeoLocationTO()
response.location.geo.latitude = job_criteria.geo_location.lat
response.location.geo.longitude = job_criteria.geo_location.lon
response.location.distance = job_criteria.distance
response.keywords = job_criteria.keywords
if job_criteria.notifications:
response.notifications.how_often = job_criteria.notifications.how_often
if job_criteria.notifications.delivery_day:
response.notifications.delivery_day = job_criteria.notifications.delivery_day
if job_criteria.notifications.delivery_time:
response.notifications.delivery_time = job_criteria.notifications.delivery_time
else:
response.active = True # user first usage
return response
@returns(SaveJobsCriteriaResponseTO)
@arguments(app_user=users.User, request=SaveJobsCriteriaRequestTO)
def save_job_criteria(app_user, request):
# type: (users.User, SaveJobsCriteriaRequestTO) -> SaveJobsCriteriaResponseTO
job_criteria_key = JobMatchingCriteria.create_key(app_user)
job_criteria = job_criteria_key.get() # type: JobMatchingCriteria
new_job_profile = not job_criteria
if new_job_profile:
if not request.criteria:
return SaveJobsCriteriaResponseTO(active=False, new_profile=new_job_profile)
job_criteria = JobMatchingCriteria(key=job_criteria_key)
job_criteria.last_load_request = datetime.utcnow()
job_criteria.demo = get_community(get_user_profile(app_user).community_id).demo
original_job_criteria = None
else:
original_job_criteria = job_criteria.to_dict(exclude=['notifications', 'active'])
notifications = None
job_criteria.active = request.active
if request.criteria:
location = request.criteria.location
notifications = request.criteria.notifications
if location.geo:
job_criteria.geo_location = ndb.GeoPt(location.geo.latitude, location.geo.longitude)
if location.address:
job_criteria.address = location.address
else:
job_criteria.address = coordinates_to_city(job_criteria.geo_location.lat,
job_criteria.geo_location.lon)
job_criteria.distance = location.distance
job_criteria.contract_types = sorted(request.criteria.contract_types)
job_criteria.job_domains = sorted(request.criteria.job_domains)
job_criteria.keywords = sorted(request.criteria.keywords)
if not job_criteria.job_domains:
raise RpcException('at_least_one_job_domain_required', app_user)
if not job_criteria.contract_types:
raise RpcException('at_least_one_contract_type_required', app_user)
updated_criteria = job_criteria.to_dict(exclude=['notifications', 'active'])
should_build_matches = original_job_criteria != updated_criteria
should_calculate_reminder = should_build_matches
should_clear_notifications = should_build_matches
og_notifications = job_criteria.notifications and job_criteria.notifications.to_dict()
if not job_criteria.notifications:
job_criteria.notifications = JobMatchingCriteriaNotifications()
job_criteria.notifications.how_often = JobNotificationSchedule.NEVER
if notifications and notifications.timezone:
job_criteria.notifications.timezone = notifications.timezone
if job_criteria.notifications.how_often != notifications.how_often:
delayed_notification_types = (JobNotificationSchedule.AT_MOST_ONCE_A_DAY,
JobNotificationSchedule.AT_MOST_ONCE_A_WEEK)
if job_criteria.notifications.how_often in delayed_notification_types and \
notifications.how_often not in delayed_notification_types:
should_clear_notifications = True
job_criteria.notifications.how_often = notifications.how_often
job_criteria.notifications.delivery_day = notifications.delivery_day
job_criteria.notifications.delivery_time = notifications.delivery_time
if not should_calculate_reminder:
should_calculate_reminder = job_criteria.notifications.to_dict() != og_notifications
job_criteria.put()
if should_build_matches:
deferred.defer(rebuild_matches_check_current, app_user, _queue=JOBS_WORKER_QUEUE)
if should_calculate_reminder:
deferred.defer(calculate_next_reminder, app_user, should_clear_notifications, _queue=JOBS_WORKER_QUEUE)
return SaveJobsCriteriaResponseTO(active=job_criteria.active, new_profile=new_job_profile)
def get_oca_logo_url(language):
if language.startswith('nl'):
return 'https://storage.googleapis.com/oca-files/jobs/OCA-nl.png'
return 'https://storage.googleapis.com/oca-files/jobs/OCA.png'
def get_jobs_for_activity_type(app_user, activity_type, cursor, ids):
# type: (users.User, unicode, Optional[unicode], List[int]) -> GetJobsResponseTO
job_criteria_key = JobMatchingCriteria.create_key(app_user)
user_profile_key = NdbUserProfile.createKey(app_user)
keys = [job_criteria_key, user_profile_key]
job_criteria, user_profile = ndb.get_multi(keys) # type: Optional[JobMatchingCriteria], NdbUserProfile
resp = GetJobsResponseTO()
if not job_criteria or not job_criteria.active:
resp.is_profile_active = False
resp.items = []
resp.cursor = None
resp.has_more = False
else:
if cursor is None and activity_type == JobOfferTO.ACTIVITY_TYPE_NEW:
job_criteria.last_load_request = datetime.utcnow()
job_criteria.put()
resp.items, resp.cursor, resp.has_more = _get_jobs(activity_type, app_user, cursor, user_profile.language, ids)
resp.is_profile_active = True
info = JobsInfoTO()
info.title = localize(user_profile.language, 'app_jobs_title')
info.description = localize(user_profile.language, 'app_jobs_description')
info.providers = [
JobOfferProviderTO(image_url=get_oca_logo_url(user_profile.language)),
JobOfferProviderTO(image_url='https://storage.googleapis.com/oca-files/jobs/VDAB.jpg'),
]
resp.info = info
return resp
def bulk_save_jobs(app_user, job_ids, status):
# type: (users.User, List[int], int) -> List[int]
keys = [JobMatch.create_key(app_user, job_id) for job_id in job_ids]
matches = ndb.get_multi(keys) # type: List[JobMatch]
to_put = []
for match in matches:
if not match:
continue
match.status = status
to_put.append(match)
ndb.put_multi(to_put)
return [match.get_job_id() for match in to_put]
@mapping('com.mobicage.capi.jobs.new_jobs_response_handler')
@returns(NoneType)
@arguments(context=RpcCAPICall, result=NewJobsResponseTO)
def new_jobs_response_handler(context, result):
pass
def _get_jobs(activity_type, app_user, cursor, language, ids):
# type: (str, users.User, Optional[str], str, List[int]) -> Tuple[List[JobOfferTO], Optional[str], bool]
fetch_size = 20
start_cursor = Cursor.from_websafe_string(cursor) if cursor else None
if activity_type == JobOfferTO.ACTIVITY_TYPE_NEW:
qry = JobMatch.list_new_by_app_user(app_user)
elif activity_type == JobOfferTO.ACTIVITY_TYPE_HISTORY:
qry = JobMatch.list_by_app_user_and_status(app_user, JobMatchStatus.DELETED)
elif activity_type == JobOfferTO.ACTIVITY_TYPE_STARRED:
qry = JobMatch.list_by_app_user_and_status(app_user, JobMatchStatus.STARRED)
else:
raise Exception('Unknown activity type %s' % activity_type)
job_matches_keys, new_cursor, has_more = qry.fetch_page(
fetch_size, start_cursor=start_cursor, keys_only=True) # type: List[ndb.Key], Cursor, bool
match_keys = [JobMatch.create_key(app_user, job_id) for job_id in ids if job_id] + \
[key for key in job_matches_keys if key.id() not in ids]
offer_keys = [JobOffer.create_key(match_key.id()) for match_key in match_keys]
models = ndb.get_multi(match_keys + offer_keys) # type: List[Union[JobMatch, JobOffer]]
job_matches = models[0: len(models) / 2]
job_offers = models[len(models) / 2:]
items = []
to_put = []
for match, job_offer in zip(job_matches, job_offers): # type: JobMatch, JobOffer
if not match:
# this should only happen when the job was requested using the 'ids' property
# like when the jobs activity is opened via a button on a news item
if job_offer.id not in ids:
logging.warning('Expected JobMatch to exist, creating it anyway...')
logging.debug('Creating manual JobMatch entry for job %d', job_offer.id)
match = JobMatch.manually_create(app_user, job_offer.id)
to_put.append(match)
timestamp = get_epoch_from_datetime(match.update_date)
items.append(JobOfferTO.from_job_offer(job_offer, timestamp, language,
get_job_offer_actions(job_offer, match, language)))
ndb.put_multi(to_put)
return items, new_cursor.to_websafe_string().decode('utf-8') if new_cursor else None, has_more
def get_job_offer_actions(job_offer, match, language):
# type: (JobOffer, JobMatch, str) -> List[Union[JobOfferChatActionTO, JobOfferOpenActionTO]]
actions = []
if job_offer.source.type == JobOfferSourceType.OCA:
action = JobOfferChatActionTO()
action.label = localize(language, 'open_chat')
action.chat_key = match.chat_key # possibly None
action.icon = 'fa-comment'
actions.append(action)
return actions
def send_new_jobs_for_activity_types(app_user, activity_types):
user_profile = get_user_profile(app_user)
if not user_profile.get_mobiles():
return
request = NewJobsRequestTO()
request.creation_time = now()
request.activity_types = activity_types
mobiles = db.get([get_mobile_key_by_account(mobile_detail.account) for mobile_detail in user_profile.get_mobiles().values()])
for mobile in mobiles:
ios_push_id = None
if mobile.is_ios:
ios_push_id = mobile.iOSPushId
kwargs = {}
if ios_push_id:
kwargs[CAPI_KEYWORD_ARG_PRIORITY] = PRIORITY_HIGH
newJobs(new_jobs_response_handler, logError, app_user, request=request, MOBILE_ACCOUNT=mobile, **kwargs)
def get_job_chat_info(app_user, job_id):
# type: (users.User, int) -> GetJobChatInfoResponseTO
keys = [JobOffer.create_key(job_id), JobMatch.create_key(app_user, job_id)]
job_offer, job_match = ndb.get_multi(keys) # type: JobOffer, JobMatch
job_sln_id = long(job_offer.source.id)
solicitation = JobSolicitation.list_by_job_and_user(users.User(job_offer.service_email),
job_sln_id,
app_user.email()).get() # type: Optional[JobSolicitation]
lang = get_user_profile(app_user).language
response = GetJobChatInfoResponseTO()
response.anonymous = JobChatAnonymousTO()
response.job_id = job_id
response.anonymous.enabled = True
response.anonymous.default_value = False
response.default_text = ''
response.info_text = localize(lang, 'job_info_text')
if solicitation:
# User has already applied before, but deleted the chat.
# Add him back to the chat and return the original chat key.
job_match.chat_key = solicitation.chat_key
response.chat_key = solicitation.chat_key
with users.set_user(users.User(job_offer.service_email)):
add_chat_members(solicitation.chat_key, [app_user.email()])
job_match.put()
return response
def create_job_chat(app_user, request):
# type: (users.User, CreateJobChatRequestTO) -> CreateJobChatResponseTO
keys = [JobMatch.create_key(app_user, request.job_id),
JobOffer.create_key(request.job_id)]
job_match, job_offer = ndb.get_multi(keys) # type: JobMatch, JobOffer
if not job_match.chat_key:
# If you ever want to create a separate service for jobs, you'll have to create a service api callback for this
from solutions.common.jobs.solicitations import create_job_solicitation
message_key = create_job_solicitation(app_user, job_offer, request)
job_match.chat_key = message_key
job_match.put()
response = CreateJobChatResponseTO()
response.message_key = job_match.chat_key
return response<|fim▁end|> |
from google.appengine.ext import ndb, deferred, db |
<|file_name|>populate_mini_ws.py<|end_file_name|><|fim▁begin|>from biokbase.workspace.client import Workspace
import requests
import json
import sys
from time import time
from fix_workspace_info import fix_all_workspace_info
from pprint import pprint
kb_port = 9999
mini_ws_url = f"http://localhost:{kb_port}/services/ws"
mini_auth_url = f"http://localhost:{kb_port}/services/auth/testmode"
mini_ws_admin = "wsadmin"
narrative_spec_file = '../../../narrative_object.spec'
old_narrative_spec_file = 'old_narrative_object.spec'
test_narrative_data = 'narrative_test_data.json'
test_user = "kbasetest"
####
# BEFORE YOU RUN THIS:
# 1. Spin up mini_kb with the workspace env pointed to my branch:
# that is, the "-env" line in the ws command points to
# "https://raw.githubusercontent.com/briehl/mini_kb/master/deployment/conf/workspace-minikb.ini"
#
# 2. When this starts up, the workspace will complain. Auth is in testmode, and there's no test user/token set up
# for the Shock configuration. Do the following:
# a. enter the mongo container
# > docker exec -it mini_kb_ci-mongo_1 /bin/bash
# b. start mongo (just "mongo" at the prompt)
# c. Run the following to use gridFS:
# > use workspace
# > db.settings.findAndModify({ query: {backend: "shock"}, update: { $set: {"backend": "gridFS"} } })
# d. Exit that container, and restart the workspace container
# > docker-compose restart ws
#
# With the setup done, this script should do the job of creating accounts, importing the Narrative type,
# loading test data, etc.
def create_user(user_id):
"""
Returns a token for that user.
"""
headers = {
"Content-Type": "application/json"
}
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/user', headers=headers, data=json.dumps({'user': user_id, 'display': "User {}".format(user_id)}))
if r.status_code != 200 and r.status_code != 400:
print("Can't create dummy user!")
r.raise_for_status()
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/token', headers=headers, data=json.dumps({'user': user_id, 'type': 'Login'}))
if r.status_code != 200:
print("Can't make dummy token!")
r.raise_for_status()
token = json.loads(r.text)
return token['token']
def load_narrative_type(ws):
"""
Loads the KBaseNarrative.Narrative type info into mini kb.
ws = Workspace client configured for admin
"""
ws.request_module_ownership("KBaseNarrative")
ws.administer({
'command': 'approveModRequest',
'module': 'KBaseNarrative'
})
with open(old_narrative_spec_file, "r") as f:
old_spec = f.read()
ws.register_typespec({
'spec': old_spec,
'dryrun': 0,
'new_types': [
'Narrative',
'Cell',
'Worksheet',
'Metadata'
]
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
old_ver = n.split('-')[-1]
with open(narrative_spec_file, "r") as f:
spec = f.read()
ws.register_typespec({
'spec': spec,
'dryrun': 0,
'new_types': []
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
new_ver = n.split('-')[-1]
return {
'old_ver': old_ver,
'new_ver': new_ver
}
def load_narrative_test_data(ws, vers):
"""
Loads the test data set into mini kb ws.
Returns this structure:
wsid: {
narrative_id: int
correct_ws_meta: {}
correct_ws_perms: {}
}
there's more than 1 wsid (should be ~7-10), but that's it.
"""
with open(test_narrative_data, 'r') as f:
test_data = json.loads(f.read().strip())
uploaded_data = list()
for ws_data in test_data["old"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['old_ver']))
for ws_data in test_data["new"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['new_ver']))
return uploaded_data
def _load_workspace_data(ws, ws_data, idx, narrative_ver):
"""
Loads up a single workspace with data and returns a dict about it.
Dict contains:
id = the workspace id
perms = the workspace permissions
correct_meta = the correct workspace metadata (for validation)
"""
print(ws_data.keys())
narratives = ws_data['narratives']
ws_meta = ws_data['ws_meta']
ws_info = ws.create_workspace({"workspace": "NarrativeWS-{}-{}".format(idx, int(time()*1000))})
ws_id = ws_info[0]
info = {
"ws_id": ws_id,
"ws_info": ws_info,
"nar_info": [],
"perms": ws_data["perms"],
"correct_meta": ws_data["correct_meta"],
"loaded_meta": ws_meta
}
if len(narratives):
for idx, nar in enumerate(narratives):
objects = ws.save_objects({
'id': ws_id,
'objects': [{
'type': 'KBaseNarrative.Narrative-{}'.format(narrative_ver),
'data': nar,
'name': 'Narrative-{}'.format(idx)
}]
})
info['nar_info'].append(objects[0])
if len(ws_meta):
ws.alter_workspace_metadata({
'wsi': {'id': ws_id},
'new': ws_meta<|fim▁hole|>
perms = ws_data["perms"]
if len(perms) > 1:
admin_perm = perms['wsadmin']
ws.set_permissions({
'id': ws_id,
'new_permission': admin_perm,
'users': ['wsadmin']
})
return info
def main():
admin_token = create_user(mini_ws_admin)
admin_ws = Workspace(url=mini_ws_url, token=admin_token)
versions = load_narrative_type(admin_ws)
versions = {
'old_ver': '1.0',
'new_ver': '2.0'
}
user_token = create_user(test_user)
user_ws = Workspace(url=mini_ws_url, token=user_token)
loaded_info = load_narrative_test_data(user_ws, versions)
pprint(loaded_info)
# fix_all_workspace_info(mini_ws_url, mini_auth_url, admin_token, 100)
# for ws_data in loaded_info:
# ws_id = ws_data['ws_id']
# ws_meta = user_ws.get_workspace_info({'id': ws_id})[8]
# try:
# assert(ws_meta == ws_data['correct_meta'])
# except:
# print("WS: {}".format(ws_id))
# pprint(ws_meta)
# print("doesn't match")
# pprint(ws_data['correct_meta'])
if __name__ == '__main__':
sys.exit(main())<|fim▁end|> | }) |
<|file_name|>first.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# usage: A debugging class
import pdb
version=2.0
def my_add(a,b):
''' This is the function for addition of numbers and strings '''
print "value of a is {}".format(a)
print "value of b is {}".format(b)
return a+b
def my_div(a,b):
''' This is the function for division '''
return a/b
def my_sub(a,b):
''' This is the function for substraction '''
if a > b:
return a - b
elif b > a:
return b - a
def my_mul(a,b):
''' This is the function for multiplication '''
return a * b
# Application code
if __name__ == '__main__':
print "This is a example on understading debugging"
print "Congo, i learned to write a calculator"
pdb.set_trace()
print "summation of two numbers- {}".format(my_add(1,2))
print "multiplication of two numbers- {}".format(my_mul(1,2))
print "substartion of two numbers - {}".format(my_sub(1,2))<|fim▁hole|><|fim▁end|> | print "division of two numbers - {}".format(my_div(4,2)) |
<|file_name|>budget_tests.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2018 The PIVX developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "masternode-budget.h"
#include "tinyformat.h"
#include "utilmoneystr.h"
#include "test_syndicate.h"
#include <boost/test/unit_test.hpp>
BOOST_FIXTURE_TEST_SUITE(budget_tests, TestingSetup)<|fim▁hole|>
void CheckBudgetValue(int nHeight, std::string strNetwork, CAmount nExpectedValue)
{
CBudgetManager budget;
CAmount nBudget = budget.GetTotalBudget(nHeight);
std::string strError = strprintf("Budget is not as expected for %s. Result: %s, Expected: %s", strNetwork, FormatMoney(nBudget), FormatMoney(nExpectedValue));
BOOST_CHECK_MESSAGE(nBudget == nExpectedValue, strError);
}
BOOST_AUTO_TEST_CASE(budget_value)
{
SelectParams(CBaseChainParams::TESTNET);
int nHeightTest = Params().Zerocoin_Block_V2_Start() + 1;
CheckBudgetValue(nHeightTest, "testnet", 7300*COIN);
SelectParams(CBaseChainParams::MAIN);
nHeightTest = Params().Zerocoin_Block_V2_Start() + 1;
CheckBudgetValue(nHeightTest, "mainnet", 43200*COIN);
}
BOOST_AUTO_TEST_SUITE_END()<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>"""foo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include
from django.urls import path
urlpatterns = [
path("polls/", include("polls.urls")),
path("admin/", admin.site.urls),
]<|fim▁hole|> import debug_toolbar
urlpatterns = [
path("__debug__/", include(debug_toolbar.urls)),
] + urlpatterns<|fim▁end|> |
if settings.DEBUG: |
<|file_name|>scene.rs<|end_file_name|><|fim▁begin|>use event::Event;<|fim▁hole|>use gamestate::GameState;
pub type BoxedScene = Box<Scene + 'static>;
pub trait Scene {
fn handle_event(&mut self, e: &Event, state: &mut GameState) -> Option<BoxedScene>;
}<|fim▁end|> | |
<|file_name|>HomeAutomation.py<|end_file_name|><|fim▁begin|>from org.myrobotlab.net import BareBonesBrowserLaunch
def outsideLights(value):
if value = 1
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")<|fim▁hole|>def garageLights(value):
if value = 1
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")
else
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=0")
def alarmOn(value):
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")<|fim▁end|> | else
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=0")
|
<|file_name|>device.py<|end_file_name|><|fim▁begin|>from lxml import etree
from nxpy.interface import Interface
from nxpy.vlan import Vlan
from nxpy.flow import Flow
from util import tag_pattern
class Device(object):
# Singleton
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(
Device, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
self.name = ''
self.domain_name = ''
self.interfaces = []
self.vlans = []
self.routing_options = []
def export(self, netconf_config=False):
config = etree.Element("configuration")<|fim▁hole|> if self.domain_name:
etree.SubElement(device, "domain-name").text = self.domain_name
if len(device.getchildren()):
config.append(device)
interfaces = etree.Element('interfaces')
if len(self.interfaces):
for interface in self.interfaces:
if (interface):
interfaces.append(interface.export())
config.append(interfaces)
vlans = etree.Element('vlans')
if len(self.vlans):
for vlan in self.vlans:
if (vlan):
vlans.append(vlan.export())
config.append(vlans)
routing_options = etree.Element('routing-options')
if len(self.routing_options):
for ro in self.routing_options:
if (ro):
routing_options.append(ro.export())
config.append(routing_options)
if netconf_config:
conf = etree.Element("config")
conf.append(config)
config = conf
if len(config.getchildren()):
return config
else:
return False
def build(self, node):
for child in node:
nodeName_ = tag_pattern.match(child.tag).groups()[-1]
self.buildChildren(child, nodeName_)
def buildChildren(self, child_, nodeName_, from_subclass=False):
if nodeName_ == 'interfaces':
for node in child_:
obj_ = Interface()
obj_.build(node)
self.interfaces.append(obj_)
if nodeName_ == 'vlans':
for node in child_:
obj_ = Vlan()
obj_.build(node)
self.vlans.append(obj_)
if nodeName_ == 'routing-options':
for node in child_:
childName_ = tag_pattern.match(node.tag).groups()[-1]
# *************** FLOW ****************
if childName_ == 'flow':
obj_ = Flow()
obj_.build(node)
self.routing_options.append(obj_)<|fim▁end|> | device = etree.Element('system')
if self.name:
etree.SubElement(device, "host-name").text = self.name |
<|file_name|>windows.js<|end_file_name|><|fim▁begin|>import WindowController from "@/modv/window-controller";
import getLargestWindow from "@/modv/get-largest-window";
const state = {<|fim▁hole|> windows: [],
size: { width: 0, height: 0 }
};
// We can't store Window Objects in Vuex because the Observer traversal exceeds the stack size
const externalState = [];
// getters
const getters = {
allWindows: state => state.windows,
windowReference: () => index => externalState[index],
largestWindowSize: state => state.size,
largestWindowReference() {
return () => getLargestWindow(state.windows).window || externalState[0];
},
largestWindowController() {
return () => getLargestWindow(state.windows).controller;
},
getWindowById: state => id =>
state.windows.find(windowController => windowController.id === id),
windowIds: state => state.windows.map(windowController => windowController.id)
};
// actions
const actions = {
createWindow({ commit }, { Vue }) {
const number = state.windows.length;
return new WindowController(Vue, number).then(windowController => {
const windowRef = windowController.window;
delete windowController.window;
commit("addWindow", { windowController, windowRef });
return windowController;
});
},
destroyWindow({ commit }, { windowRef }) {
commit("removeWindow", { windowRef });
},
resize({ state, commit }, { width, height, dpr }) {
state.windows.forEach(windowController => {
windowController.resize(width, height, dpr, false);
});
commit("setSize", { width, height, dpr });
}
};
// mutations
const mutations = {
addWindow(state, { windowController, windowRef }) {
const index = state.windows.length;
windowController.window = index;
state.windows.push(windowController);
externalState.push(windowRef);
getters.largestWindowReference();
},
removeWindow(state, { windowRef }) {
state.windows.splice(windowRef, 1);
externalState.splice(windowRef, 1);
getters.largestWindowReference();
},
setSize(state, { width, height, dpr }) {
state.size = {
width,
height,
dpr,
area: width * height
};
}
};
export default {
namespaced: true,
state,
getters,
actions,
mutations
};<|fim▁end|> | |
<|file_name|>test_functions.py<|end_file_name|><|fim▁begin|># c: 14.04.2008, r: 14.04.2008
import numpy as nm
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/square_unit_tri.mesh'
def get_pars(ts, coors, mode=None, region=None, ig=None, extra_arg=None):
if mode == 'special':
if extra_arg == 'hello!':
ic = 0
else:
ic = 1
return {('x_%s' % ic) : coors[:,ic]}
def get_p_edge(ts, coors, bc=None):
if bc.name == 'p_left':
return nm.sin(nm.pi * coors[:,1])
else:
return nm.cos(nm.pi * coors[:,1])
def get_circle(coors, domain=None):
r = nm.sqrt(coors[:,0]**2.0 + coors[:,1]**2.0)
return nm.where(r < 0.2)[0]
functions = {
'get_pars1' : (lambda ts, coors, mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hello!'),),
'get_p_edge' : (get_p_edge,),
'get_circle' : (get_circle,),
}
# Just another way of adding a function, besides 'functions' keyword.
function_1 = {
'name' : 'get_pars2',
'function' : lambda ts, coors,mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hi!'),
}
materials = {
'mf1' : (None, 'get_pars1'),
'mf2' : 'get_pars2',
# Dot denotes a special value, that is not propagated to all QP.
'mf3' : ({'a' : 10.0, 'b' : 2.0, '.c' : 'ahoj'},),
}
fields = {
'pressure' : (nm.float64, 1, 'Omega', 2),
}
variables = {
'p' : ('unknown field', 'pressure', 0),
'q' : ('test field', 'pressure', 'p'),
}
wx = 0.499
regions = {
'Omega' : ('all', {}),
'Left' : ('nodes in (x < -%.3f)' % wx, {}),
'Right' : ('nodes in (x > %.3f)' % wx, {}),
'Circle' : ('nodes by get_circle', {}),
}
<|fim▁hole|> 'p_left' : ('Left', {'p.all' : 'get_p_edge'}),
'p_right' : ('Right', {'p.all' : 'get_p_edge'}),
}
equations = {
'e1' : """dw_laplace.2.Omega( mf3.a, q, p ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
}
fe = {
'chunk_size' : 1000
}
from sfepy.base.testing import TestCommon, assert_
from sfepy.base.base import pause, debug
class Test( TestCommon ):
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf(conf)
test = Test(problem = problem, conf = conf, options = options)
return test
from_conf = staticmethod( from_conf )
def test_material_functions(self):
problem = self.problem
ts = problem.get_default_ts(step=0)
problem.materials.time_update(ts,
problem.domain,
problem.equations)
coors = problem.domain.get_mesh_coors()
mat1 = problem.materials['mf1']
assert_(nm.all(coors[:,0] == mat1.get_data(None, None, 'x_0')))
mat2 = problem.materials['mf2']
assert_(nm.all(coors[:,1] == mat2.get_data(None, None, 'x_1')))
mat3 = problem.materials['mf3']
key = mat3.get_keys(region_name='Omega')[0]
assert_(nm.all(mat3.get_data(key, 0, 'a') == 10.0))
assert_(nm.all(mat3.get_data(key, 0, 'b') == 2.0))
assert_(mat3.get_data(None, None, 'c') == 'ahoj')
return True
# mat.time_update(ts, problem)
def test_ebc_functions(self):
import os.path as op
problem = self.problem
problem.set_equations(self.conf.equations)
problem.time_update()
vec = problem.solve()
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0] + '_ebc.vtk')
problem.save_state(name, vec)
ok = True
domain = problem.domain
iv = domain.regions['Left'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.sin(nm.pi * coors[:,1]),
label1='state_left', label2='bc_left')
iv = domain.regions['Right'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.cos(nm.pi * coors[:,1]),
label1='state_right', label2='bc_right')
return ok
def test_region_functions(self):
import os.path as op
problem = self.problem
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0])
problem.save_regions(name, ['Circle'])
return True<|fim▁end|> | ebcs = { |
<|file_name|>89.py<|end_file_name|><|fim▁begin|>"""
89. Gray Code
https://leetcode.com/problems/gray-code/
"""
from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
res = [0]
for i in range(n):
res += [x + 2**i for x in reversed(res)]
return res
def main():
s = Solution()
print(s.grayCode(3))
<|fim▁hole|>
if __name__ == '__main__':
raise(SystemExit(main()))<|fim▁end|> | |
<|file_name|>FieldInfo.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package com.github.am0e.jbeans;
import java.lang.annotation.Annotation;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles.Lookup;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Modifier;
/** Represents a field that can be accessed directly if the field is public or via an associated getter or
* setter.
* The class provides a getter and a setter to set the associated field value in an object.
*
* @author Anthony (ARPT)
*/
/**
* @author anthony
*
*/
public final class FieldInfo implements BaseInfo {
/**
* Field name
*/
final String name;
/**
* Field name hashcode.
*/
final int hash;
/**
* The bean field.
*/
final Field field;
/**
* Optional setter method. If this field is public, this will contain null.
*/
final MethodInfo setter;
/**
* Optional getter method. If this field is public, this will contain null.
*/
final MethodInfo getter;
/**
* If the field is a parameterized List or Map, this field will contain the
* class type of the value stored in the list or map. in the parameter. Eg:
* List<String> it will contain String. For Map<String,Double>
* it will contain Double.
*/
final Class<?> actualType;
FieldInfo(Field field, MethodInfo getter, MethodInfo setter) {
// Get the type of the field.
//
this.actualType = BeanUtils.getActualTypeFromMethodOrField(null, field);
this.field = field;
this.setter = setter;
this.getter = getter;
this.name = field.getName().intern();
this.hash = this.name.hashCode();
}
public Field getField() {
return field;
}
public Class<?> getType() {
return field.getType();
}
public Class<?> getActualType() {
return actualType;
}
public String getName() {
return name;
}
public String toString() {
return field.getDeclaringClass().getName() + "#" + name;
}
public boolean isField() {
return field == null ? false : true;
}
/**
* Returns true if the field value can be retrieved either through the
* public field itself or through a public getter method.
*/
public final boolean isReadable() {
return (Modifier.isPublic(field.getModifiers()) || getter != null);
}
public final boolean isSettable() {
return (Modifier.isPublic(field.getModifiers()) || setter != null);
}
public final boolean isTransient() {
return (Modifier.isTransient(field.getModifiers()));
}
public final Object callGetter(Object bean) throws BeanException {
if (bean == null)
return null;
// If the field is public, get the value directly.
//
try {
if (getter != null) {
// Use the public getter. We will always attempt to use this
// FIRST!!
//
return getter.method.invoke(bean);
}
if (!Modifier.isPublic(field.getModifiers())) {
throw BeanException.fmtExcStr("Field not gettable", bean, getName(), null);
}
return field.get(bean);
<|fim▁hole|> } catch (IllegalArgumentException | IllegalAccessException e) {
throw BeanException.fmtExcStr("callGetter", bean, getName(), e);
} catch (InvocationTargetException e) {
throw BeanUtils.wrapError(e.getCause());
}
}
public final void callSetter(Object bean, Object value) throws BeanException {
value = BeanUtils.cast(value, field.getType());
try {
// Use the public setter. We will always attempt to use this FIRST!!
//
if (setter != null) {
setter.method.invoke(bean, value);
return;
}
if (!Modifier.isPublic(field.getModifiers())) {
throw BeanException.fmtExcStr("Field not settable", bean, getName(), null);
}
// If the field is public, set the value directly.
//
field.set(bean, value);
} catch (IllegalArgumentException | IllegalAccessException e) {
throw BeanException.fmtExcStr("callSetter", bean, getName(), e);
} catch (InvocationTargetException e) {
throw BeanUtils.wrapError(e.getCause());
}
}
/**
* Converts a value into a value of the bean type.
*
* @param value
* The value to convert.
* @return If the value could not be converted, the value itself is
* returned. For example: if (beanField.valueOf(strVal)==strVal)
* throw new IllegalArgumentException();
*/
public final Object valueOf(Object value) {
return BeanUtils.cast(value, actualType);
}
@Override
public <T extends Annotation> T getAnnotation(Class<T> type) {
return field.getAnnotation(type);
}
@Override
public boolean isAnnotationPresent(Class<? extends Annotation> type) {
return field.getAnnotation(type) == null ? false : true;
}
@Override
public MethodHandle getHandle(Lookup lookup, boolean setter) {
try {
if (setter)
return lookup.findSetter(field.getDeclaringClass(), name, field.getType());
else
return lookup.findGetter(field.getDeclaringClass(), name, field.getType());
} catch (IllegalAccessException | NoSuchFieldException e) {
throw new BeanException(e);
}
}
@Override
public String makeSignature(StringBuilder sb) {
sb.setLength(0);
sb.append(getType().toString());
sb.append(' ');
sb.append(getName());
return sb.toString();
}
}<|fim▁end|> | |
<|file_name|>client_legacy.py<|end_file_name|><|fim▁begin|>'''
Created on May 6, 2014
@author: cmills
The idea here is to provide client-side functions to interact with the TASR
repo. We use the requests package here. We provide both stand-alone functions
and a class with methods. The class is easier if you are using non-default
values for the host or port.
'''
import requests
import tasr.app
from tasr.registered_schema import RegisteredAvroSchema
from tasr.headers import SubjectHeaderBot, SchemaHeaderBot
from tasr.client import TASRError, reg_schema_from_url
APP = tasr.app.TASR_APP
APP.set_config_mode('local')
TASR_HOST = APP.config.host
TASR_PORT = APP.config.port
TIMEOUT = 2 # seconds
def get_active_topics(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/active_topics
Retrieves available metadata for active topics (i.e. -- groups) with
registered schemas. A dict of <topic name>:<topic metadata> is returned.
'''
url = 'http://%s:%s/tasr/active_topics' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if not 200 == resp.status_code:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
topic_metas = SubjectHeaderBot.extract_metadata(resp)
return topic_metas
def get_all_topics(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic
Retrieves available metadata for all the topics (i.e. -- groups) with
registered schemas. A dict of <topic name>:<topic metadata> is returned.
'''
url = 'http://%s:%s/tasr/topic' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if not 200 == resp.status_code:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
topic_metas = SubjectHeaderBot.extract_metadata(resp)
return topic_metas
def register_schema(topic_name, schema_str, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' PUT /tasr/topic/<topic name>
Register a schema string for a topic. Returns a SchemaMetadata object
with the topic-version, topic-timestamp and ID metadata.
'''
url = 'http://%s:%s/tasr/topic/%s' % (host, port, topic_name)
headers = {'content-type': 'application/json; charset=utf8', }
rs = reg_schema_from_url(url, method='PUT', data=schema_str,
headers=headers, timeout=timeout)
return rs
def get_latest_schema(topic_name, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic/<topic name>
Retrieve the latest schema registered for the given topic name. Returns a
RegisteredSchema object back.
'''
return get_schema_version(topic_name, None, host, port, timeout)
def get_schema_version(topic_name, version, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic/<topic name>/version/<version>
Retrieve a specific schema registered for the given topic name identified
by a version (a positive integer). Returns a RegisteredSchema object.
'''
url = ('http://%s:%s/tasr/topic/%s/version/%s' %
(host, port, topic_name, version))
return reg_schema_from_url(url, timeout=timeout,
err_404='No such version.')
def schema_for_id_str(id_str, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/id/<ID string>
Retrieves a schema that has been registered for at least one topic name as
identified by a hash-based ID string. The ID string is a base64 encoded
byte sequence, starting with a 1-byte ID type and followed by fingerprint
bytes for the ID type. For example, with an SHA256-based ID, a fingerprint
is 32 bytes in length, so there would be 33 ID bytes, which would produce
an ID string of length 44 once base64-encoded. The MD5-based IDs are 17
bytes (1 + 16), producing ID strings of length 24. A RegisteredSchema
object is returned.
'''<|fim▁hole|>
def schema_for_schema_str(schema_str, object_on_miss=False,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' POST /tasr/schema
In essence this is very similar to the schema_for_id_str, but with the
calculation of the ID string being moved to the server. That is, the
client POSTs the schema JSON itself, the server canonicalizes it, then
calculates the SHA256-based ID string for what was sent, then looks for
a matching schema based on that ID string. This allows clients that do not
know how to canonicalize or hash the schemas to find the metadata (is it
registered, what version does it have for a topic) with what they have.
A RegisteredSchema object is returned if the schema string POSTed has been
registered for one or more topics.
If the schema string POSTed has yet to be registered for a topic and the
object_on_miss flag is True, a RegisteredSchema calculated for the POSTed
schema string is returned (it will have no topic-versions as there are
none). This provides an easy way for a client to get the ID strings to
use for subsequent requests.
If the object_on_miss flag is False (the default), then a request for a
previously unregistered schema will raise a TASRError.
'''
url = 'http://%s:%s/tasr/schema' % (host, port)
headers = {'content-type': 'application/json; charset=utf8', }
resp = requests.post(url, data=schema_str, headers=headers,
timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if 200 == resp.status_code:
# success -- return a normal reg schema
ras = RegisteredAvroSchema()
ras.schema_str = resp.context
schema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
elif 404 == resp.status_code and object_on_miss:
ras = RegisteredAvroSchema()
ras.schema_str = schema_str
schema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
raise TASRError('Schema not registered to any topics.')
#############################################################################
# Wrapped in a class
#############################################################################
class TASRLegacyClient(object):
'''An object means you only need to specify the host settings once.
'''
def __init__(self, host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
self.host = host
self.port = port
self.timeout = timeout
# topic calls
def get_active_topics(self):
'''Returns a dict of <topic name>:<metadata> for active topics.'''
return get_active_topics(self.host, self.port, self.timeout)
def get_all_topics(self):
'''Returns a dict of <topic name>:<metadata> for all topics.'''
return get_all_topics(self.host, self.port, self.timeout)
# schema calls
def register_schema(self, topic_name, schema_str):
'''Register a schema for a topic'''
return register_schema(topic_name, schema_str)
def get_latest_schema(self, topic_name):
'''Get the latest schema registered for a topic'''
return get_latest_schema(topic_name,
self.host, self.port, self.timeout)
def get_schema_version(self, topic_name, version=None):
'''Get a schema by version for the topic'''
return get_schema_version(topic_name, version,
self.host, self.port, self.timeout)
def schema_for_id_str(self, id_str):
'''Get a schema identified by an ID str.'''
return schema_for_id_str(id_str,
self.host, self.port, self.timeout)
def schema_for_schema_str(self, schema_str):
'''Get a schema object using a (non-canonical) schema string.'''
return schema_for_schema_str(schema_str,
self.host, self.port, self.timeout)<|fim▁end|> | url = 'http://%s:%s/tasr/id/%s' % (host, port, id_str)
return reg_schema_from_url(url, timeout=timeout,
err_404='No schema for id.') |
<|file_name|>dependencygraph.py<|end_file_name|><|fim▁begin|>from dirbalak import graph
from dirbalak import repomirrorcache
from dirbalak import describetime
from upseto import gitwrapper
class DependencyGraph:
_CONTINUOUS_INTEGRATION_VIOLATION_TIME = 14 * 24 * 60 * 60
def __init__(self, dependencies, getNodeAttributesCallback):
self._dependencies = dependencies
self._getNodeAttributesCallback = getNodeAttributesCallback
self._cachedGraph = None
def renderText(self):
return "\n".join([str(d) for d in self._dependencies]) + \
"\n\n" + self.makeGraph().renderAsTreeText()
def makeGraph(self):
if self._cachedGraph is None:
self._cachedGraph = self._makeGraph()
return self._cachedGraph
def _makeGraph(self):
graphInstance = graph.Graph(dict(ranksep=0.7))
for dep in self._dependencies:
self._addNodeToGraph(graphInstance, dep.gitURL)
if dep.requiringURL is not None:
if dep.requiringURLHash != 'origin/master':
continue
self._addNodeToGraph(graphInstance, dep.requiringURL)
self._addArcToGraph(graphInstance, dep)
return graphInstance
def _lineStyleFromDependencyType(self, type):
if type == 'upseto':
return 'solid'
elif type == 'solvent':
return 'dashed'
elif type == 'dirbalak_build_rootfs':
return 'dotted'
else:
raise AssertionError("Unknown type %s" % type)
def _addArcToGraph(self, graphInstance, dep):
basename = gitwrapper.originURLBasename(dep.gitURL)
mirror = repomirrorcache.get(dep.gitURL)
distance = mirror.distanceFromMaster(dep.hash)
requiringBasename = gitwrapper.originURLBasename(dep.requiringURL)
graphInstance.addArc(<|fim▁hole|> requiringBasename, basename, style=self._lineStyleFromDependencyType(dep.type),
** self._attributesFromDistanceFromMaster(distance))
def _addNodeToGraph(self, graphInstance, gitURL):
basename = gitwrapper.originURLBasename(gitURL)
attributes = self._getNodeAttributesCallback(gitURL)
attributes['label'] = basename
graphInstance.setNodeAttributes(basename, **attributes)
def _attributesFromDistanceFromMaster(self, distance):
if distance is None:
return {}
else:
if distance['broken']:
return dict(color="orange", label="broken")
else:
label = "behind:\\n%d commits" % distance['commits']
color = "#000000"
if 'time' in distance:
label += "\\n%s" % describetime.describeTime(distance['time'])
if distance['time'] > self._CONTINUOUS_INTEGRATION_VIOLATION_TIME:
color = "#FF0000"
else:
color = "#990000"
return dict(color=color, label=label)<|fim▁end|> | |
<|file_name|>test_controller.py<|end_file_name|><|fim▁begin|>import pytest
import time
import ray
from ray import serve
def test_redeploy_start_time(serve_instance):
"""Check that redeploying a deployment doesn't reset its start time."""
controller = serve.api._global_client._controller
@serve.deployment
def test(_):
return "1"
<|fim▁hole|> test.deploy()
deployment_info_1, route_1 = ray.get(controller.get_deployment_info.remote("test"))
start_time_ms_1 = deployment_info_1.start_time_ms
time.sleep(0.1)
@serve.deployment
def test(_):
return "2"
test.deploy()
deployment_info_2, route_2 = ray.get(controller.get_deployment_info.remote("test"))
start_time_ms_2 = deployment_info_2.start_time_ms
assert start_time_ms_1 == start_time_ms_2
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))<|fim▁end|> | |
<|file_name|>splitvec.rs<|end_file_name|><|fim▁begin|>//! Test module for SplitVec
use cripes::util::splitvec::SplitVec;
<|fim▁hole|> v.push_and_copy_state();
panic_unless_eq!(&[1, 2, 3], &*v);
v.pop();
panic_unless_eq!(&[1, 2], &*v);
v.pop_state();
panic_unless_eq!(&[1, 2, 3], &*v);
}
/// Check that `pop_and_merge_states` acts as advertised -- merging the current
/// state with the first one on the stack.
#[test]
fn test_pop_and_merge_states() {
let mut v = SplitVec::from(vec![1, 2, 3]);
v.push_state();
v.extend_from_slice(&[4, 5, 6]);
panic_unless_eq!(&[4, 5, 6], &*v);
v.pop_and_merge_states();
panic_unless_eq!(&[1, 2, 3, 4, 5, 6], &*v);
}
#[test]
fn test_dedup() {
{
let mut v = SplitVec::from(vec![1, 2, 2, 3]);
v.dedup();
panic_unless_eq!(&[1, 2, 3], &*v);
}
{
let mut v = SplitVec::from(vec![1, 1, 2, 2, 3, 3]);
v.dedup();
panic_unless_eq!(&[1, 2, 3], &*v);
}
{
let mut v = SplitVec::from(vec![1, 1, 2, 2, 3, 3]);
v.push_state();
v.dedup();
panic_unless_eq!(&[], &*v);
v.extend_from_slice(&[4, 5, 5, 6]);
v.dedup();
panic_unless_eq!(&[4, 5, 6], &*v);
v.pop_state();
panic_unless_eq!(&[1, 1, 2, 2, 3, 3], &*v);
v.dedup();
panic_unless_eq!(&[1, 2, 3], &*v);
}
}
#[test]
fn test_fmt_debug() {
{
let mut v = SplitVec::from(vec![1, 2, 3]);
panic_unless_eq!("[1, 2, 3]", format!("{:?}", v));
v.push_state();
panic_unless_eq!("[1, 2, 3 | ]", format!("{:?}", v));
v.extend_from_slice(&[4, 5, 6]);
panic_unless_eq!("[1, 2, 3 | 4, 5, 6]", format!("{:?}", v));
}
}
#[test]
fn test_index() {
let mut v = SplitVec::from(&[1, 2, 3]);
panic_unless_eq!(&[1, 2, 3], &v[..]);
v.push_state();
panic_unless_eq!(&[], &v[..]);
}<|fim▁end|> |
#[test]
fn test_push_and_copy_state() {
let mut v = SplitVec::from(vec![1, 2, 3]); |
<|file_name|>upload_clinic_codes.py<|end_file_name|><|fim▁begin|>from csv import DictReader
from django.core.management.base import BaseCommand
from registrations.models import ClinicCode
class Command(BaseCommand):
help = (
"This command takes in a CSV with the columns: uid, code, facility, province,"
"and location, and creates/updates the cliniccodes in the database."
"This will only add or update, it will not remove"
)
def add_arguments(self, parser):
parser.add_argument("data_csv", type=str, help=("The CSV with the data in it"))
def normalise_location(self, location):
"""
Normalises the location from `[longitude,latitude]` to ISO6709
"""
def fractional_part(f):
if not float(f) % 1:
return ""
parts = f.split(".")
return f".{parts[1]}"
try:
longitude, latitude = location.strip("[]").split(",")
return (
f"{int(float(latitude)):+03d}{fractional_part(latitude)}"
f"{int(float(longitude)):+04d}{fractional_part(longitude)}"
"/"
)
except (AttributeError, ValueError, TypeError):
return None
def handle(self, *args, **kwargs):
updated = 0
created = 0
with open(kwargs["data_csv"]) as f:
reader = DictReader(f)
for row in reader:
_, new = ClinicCode.objects.update_or_create(
uid=row["uid"].strip(),
defaults={
"code": row["code"].strip(),
"value": row["code"].strip(),
"name": row["facility"].strip(),
"province": {
"ec": "ZA-EC",
"fs": "ZA-FS",
"gp": "ZA-GT",
"kz": "ZA-NL",
"lp": "ZA-LP",
"mp": "ZA-MP",
"nc": "ZA-NC",
"nw": "ZA-NW",
"wc": "ZA-WC",
}[row["province"].strip()[:2].lower()],
"location": self.normalise_location(row["location"].strip()),
},
)
if new:
created += 1
else:
updated += 1
self.success(f"Updated {updated} and created {created} clinic codes")
<|fim▁hole|>
def success(self, msg):
self.log(self.style.SUCCESS, msg)<|fim▁end|> | def log(self, level, msg):
self.stdout.write(level(msg)) |
<|file_name|>test_utils.py<|end_file_name|><|fim▁begin|># Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally import exceptions
from rally.plugins.openstack.scenarios.cinder import utils
from tests.unit import fakes
from tests.unit import test
CINDER_UTILS = "rally.plugins.openstack.scenarios.cinder.utils"
CONF = cfg.CONF
class CinderScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(CinderScenarioTestCase, self).setUp()
self.scenario = utils.CinderScenario(self.context)
def test__list_volumes(self):
return_volumes_list = self.scenario._list_volumes()
self.assertEqual(self.clients("cinder").volumes.list.return_value,
return_volumes_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_volumes")
def test__list_snapshots(self):
return_snapshots_list = self.scenario._list_snapshots()
self.assertEqual(
self.clients("cinder").volume_snapshots.list.return_value,
return_snapshots_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_snapshots")
def test__set_metadata(self):
volume = fakes.FakeVolume()
self.scenario._set_metadata(volume, sets=2, set_size=4)
calls = self.clients("cinder").volumes.set_metadata.call_args_list
self.assertEqual(len(calls), 2)
for call in calls:
call_volume, metadata = call[0]
self.assertEqual(call_volume, volume)
self.assertEqual(len(metadata), 4)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.set_4_metadatas_2_times")
def test__delete_metadata(self):
volume = fakes.FakeVolume()
keys = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"]
self.scenario._delete_metadata(volume, keys, deletes=3, delete_size=4)
calls = self.clients("cinder").volumes.delete_metadata.call_args_list
self.assertEqual(len(calls), 3)
all_deleted = []
for call in calls:
call_volume, del_keys = call[0]
self.assertEqual(call_volume, volume)
self.assertEqual(len(del_keys), 4)
for key in del_keys:
self.assertIn(key, keys)
self.assertNotIn(key, all_deleted)
all_deleted.append(key)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_4_metadatas_3_times")
def test__delete_metadata_not_enough_keys(self):
volume = fakes.FakeVolume()
keys = ["a", "b", "c", "d", "e"]
self.assertRaises(exceptions.InvalidArgumentsException,
self.scenario._delete_metadata,
volume, keys, deletes=2, delete_size=3)
def test__create_volume(self):
return_volume = self.scenario._create_volume(1)
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volumes.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
def test__create_volume_with_size_range(self, mock_random):
mock_random.randint.return_value = 3
return_volume = self.scenario._create_volume(
size={"min": 1, "max": 5},
display_name="TestVolume")
self.clients("cinder").volumes.create.assert_called_once_with(
3, display_name="TestVolume")
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volumes.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
def test__update_volume(self):
fake_volume = mock.MagicMock()
volume_update_args = {"display_name": "_updated",
"display_description": "_updated"}
self.scenario.generate_random_name = mock.Mock()
self.scenario._update_volume(fake_volume, **volume_update_args)
self.clients("cinder").volumes.update.assert_called_once_with(
fake_volume,
display_name=self.scenario.generate_random_name.return_value,
display_description="_updated")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.update_volume")
def test__delete_volume(self):
cinder = mock.Mock()
self.scenario._delete_volume(cinder)
cinder.delete.assert_called_once_with()
self.mock_wait_for_status.mock.assert_called_once_with(
cinder,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
def test__extend_volume_with_size_range(self, mock_random):
volume = mock.Mock()
mock_random.randint.return_value = 3
self.clients("cinder").volumes.extend.return_value = volume<|fim▁hole|> self.scenario._extend_volume(volume, new_size={"min": 1, "max": 5})
volume.extend.assert_called_once_with(volume, 3)
self.mock_wait_for.mock.assert_called_once_with(
volume,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.extend_volume")
def test__extend_volume(self):
volume = mock.Mock()
self.clients("cinder").volumes.extend.return_value = volume
self.scenario._extend_volume(volume, 2)
self.mock_wait_for.mock.assert_called_once_with(
volume,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.extend_volume")
def test__upload_volume_to_image(self):
volume = mock.Mock()
image = {"os-volume_upload_image": {"image_id": 1}}
volume.upload_to_image.return_value = (None, image)
self.clients("cinder").images.get.return_value = image
self.scenario.generate_random_name = mock.Mock(
return_value="test_vol")
self.scenario._upload_volume_to_image(volume, False,
"container", "disk")
volume.upload_to_image.assert_called_once_with(False, "test_vol",
"container", "disk")
self.mock_wait_for.mock.assert_has_calls([
mock.call(
volume,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.
cinder_volume_create_poll_interval),
mock.call(
self.clients("glance").images.get.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
glance_image_create_poll_interval)
])
self.mock_get_from_manager.mock.assert_has_calls([mock.call(),
mock.call()])
self.mock_resource_is.mock.assert_has_calls([mock.call("available"),
mock.call("active")])
self.clients("glance").images.get.assert_called_once_with(1)
def test__create_snapshot(self):
return_snapshot = self.scenario._create_snapshot("uuid", False)
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volume_snapshots.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_snapshot)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_snapshot")
def test__delete_snapshot(self):
snapshot = mock.Mock()
self.scenario._delete_snapshot(snapshot)
snapshot.delete.assert_called_once_with()
self.mock_wait_for_status.mock.assert_called_once_with(
snapshot,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_snapshot")
def test__create_backup(self):
return_backup = self.scenario._create_backup("uuid")
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").backups.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_backup)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_backup")
def test__delete_backup(self):
backup = mock.Mock()
self.scenario._delete_backup(backup)
backup.delete.assert_called_once_with()
self.mock_wait_for_status.mock.assert_called_once_with(
backup,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_backup")
def test__restore_backup(self):
backup = mock.Mock()
restore = mock.Mock()
self.clients("cinder").restores.restore.return_value = backup
self.clients("cinder").volumes.get.return_value = restore
return_restore = self.scenario._restore_backup(backup.id, None)
self.mock_wait_for.mock.assert_called_once_with(
restore,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_restore)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.restore_backup")
def test__list_backups(self):
return_backups_list = self.scenario._list_backups()
self.assertEqual(
self.clients("cinder").backups.list.return_value,
return_backups_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_backups")
def test__get_random_server(self):
servers = [1, 2, 3]
context = {"user": {"tenant_id": "fake"},
"users": [{"tenant_id": "fake",
"users_per_tenant": 1}],
"tenant": {"id": "fake", "servers": servers}}
self.scenario.context = context
self.scenario.clients = mock.Mock()
self.scenario.clients("nova").servers.get = mock.Mock(
side_effect=lambda arg: arg)
server_id = self.scenario.get_random_server()
self.assertIn(server_id, servers)<|fim▁end|> | |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# sammy documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sammy'
copyright = u'2014, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sammydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sammy.tex', u'sammy Documentation',
u'ChangeToMyName', 'manual'),
]<|fim▁hole|># The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sammy', u'sammy Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sammy', u'sammy Documentation',
u'ChangeToMyName', 'sammy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'<|fim▁end|> | |
<|file_name|>EditorCommandHandlers-test.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2012 Adobe Systems Incorporated. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation <|fim▁hole|> * Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
/*jslint vars: true, plusplus: true, devel: true, nomen: true, indent: 4, maxerr: 50 */
/*global define, describe, it, expect, beforeEach, afterEach, waitsFor, runs, $ */
define(function (require, exports, module) {
'use strict';
var Editor = require("editor/Editor").Editor,
EditorCommandHandlers = require("editor/EditorCommandHandlers"),
Commands = require("command/Commands"),
CommandManager = require("command/CommandManager"),
SpecRunnerUtils = require("spec/SpecRunnerUtils"),
EditorUtils = require("editor/EditorUtils");
describe("EditorCommandHandlers", function () {
var defaultContent = "function foo() {\n" +
" function bar() {\n" +
" \n" +
" a();\n" +
" \n" +
" }\n" +
"\n" +
"}";
var myDocument, myEditor;
beforeEach(function () {
// create dummy Document for the Editor
myDocument = SpecRunnerUtils.createMockDocument(defaultContent);
// create Editor instance (containing a CodeMirror instance)
$("body").append("<div id='editor'/>");
myEditor = new Editor(myDocument, true, "javascript", $("#editor").get(0), {});
// Must be focused so editor commands target it
myEditor.focus();
});
afterEach(function () {
myEditor.destroy();
myEditor = null;
$("#editor").remove();
myDocument = null;
});
// Helper functions for testing cursor position / selection range
function expectCursorAt(pos) {
var selection = myEditor.getSelection();
expect(selection.start).toEqual(selection.end);
expect(selection.start).toEqual(pos);
}
function expectSelection(sel) {
expect(myEditor.getSelection()).toEqual(sel);
}
describe("Line comment/uncomment", function () {
it("should comment/uncomment a single line, cursor at start", function () {
myEditor.setCursorPos(3, 0);
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[3] = "// a();";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 3, ch: 2});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectCursorAt({line: 3, ch: 0});
});
it("should comment/uncomment a single line, cursor at end", function () {
myEditor.setCursorPos(3, 12);
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[3] = "// a();";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 3, ch: 14});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectCursorAt({line: 3, ch: 12});
});
it("should comment/uncomment first line in file", function () {
myEditor.setCursorPos(0, 0);
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[0] = "//function foo() {";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 0, ch: 2});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectCursorAt({line: 0, ch: 0});
});
it("should comment/uncomment a single partly-selected line", function () {
// select "function" on line 1
myEditor.setSelection({line: 1, ch: 4}, {line: 1, ch: 12});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[1] = "// function bar() {";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 6}, end: {line: 1, ch: 14}});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 1, ch: 4}, end: {line: 1, ch: 12}});
});
it("should comment/uncomment a single selected line", function () {
// selection covers all of line's text, but not \n at end
myEditor.setSelection({line: 1, ch: 0}, {line: 1, ch: 20});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[1] = "// function bar() {";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 0}, end: {line: 1, ch: 22}});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 1, ch: 0}, end: {line: 1, ch: 20}});
});
it("should comment/uncomment a single fully-selected line (including LF)", function () {
// selection including \n at end of line
myEditor.setSelection({line: 1, ch: 0}, {line: 2, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[1] = "// function bar() {";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 0}, end: {line: 2, ch: 0}});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 1, ch: 0}, end: {line: 2, ch: 0}});
});
it("should comment/uncomment multiple selected lines", function () {
// selection including \n at end of line
myEditor.setSelection({line: 1, ch: 0}, {line: 6, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[1] = "// function bar() {";
lines[2] = "// ";
lines[3] = "// a();";
lines[4] = "// ";
lines[5] = "// }";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 0}, end: {line: 6, ch: 0}});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 1, ch: 0}, end: {line: 6, ch: 0}});
});
it("should comment/uncomment ragged multi-line selection", function () {
myEditor.setSelection({line: 1, ch: 6}, {line: 3, ch: 9});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var lines = defaultContent.split("\n");
lines[1] = "// function bar() {";
lines[2] = "// ";
lines[3] = "// a();";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 8}, end: {line: 3, ch: 11}});
expect(myEditor.getSelectedText()).toEqual("nction bar() {\n// \n// a");
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 1, ch: 6}, end: {line: 3, ch: 9}});
});
it("should comment/uncomment after select all", function () {
myEditor.setSelection({line: 0, ch: 0}, {line: 7, ch: 1});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
var expectedText = "//function foo() {\n" +
"// function bar() {\n" +
"// \n" +
"// a();\n" +
"// \n" +
"// }\n" +
"//\n" +
"//}";
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 0, ch: 0}, end: {line: 7, ch: 3}});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 0, ch: 0}, end: {line: 7, ch: 1}});
});
it("should comment/uncomment lines that were partially commented out already, our style", function () {
// Start with line 3 commented out, with "//" at column 0
var lines = defaultContent.split("\n");
lines[3] = "// a();";
var startingContent = lines.join("\n");
myDocument.setText(startingContent);
// select lines 1-3
myEditor.setSelection({line: 1, ch: 0}, {line: 4, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
lines = defaultContent.split("\n");
lines[1] = "// function bar() {";
lines[2] = "// ";
lines[3] = "//// a();";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 0}, end: {line: 4, ch: 0}});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(startingContent);
expectSelection({start: {line: 1, ch: 0}, end: {line: 4, ch: 0}});
});
it("should comment/uncomment lines that were partially commented out already, comment closer to code", function () {
// Start with line 3 commented out, with "//" snug against the code
var lines = defaultContent.split("\n");
lines[3] = " //a();";
var startingContent = lines.join("\n");
myDocument.setText(startingContent);
// select lines 1-3
myEditor.setSelection({line: 1, ch: 0}, {line: 4, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
lines = defaultContent.split("\n");
lines[1] = "// function bar() {";
lines[2] = "// ";
lines[3] = "// //a();";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 0}, end: {line: 4, ch: 0}});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(startingContent);
expectSelection({start: {line: 1, ch: 0}, end: {line: 4, ch: 0}});
});
it("should uncomment indented, aligned comments", function () {
// Start with lines 1-5 commented out, with "//" all aligned at column 4
var lines = defaultContent.split("\n");
lines[1] = " //function bar() {";
lines[2] = " // ";
lines[3] = " // a();";
lines[4] = " // ";
lines[5] = " //}";
var startingContent = lines.join("\n");
myDocument.setText(startingContent);
// select lines 1-5
myEditor.setSelection({line: 1, ch: 0}, {line: 6, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 1, ch: 0}, end: {line: 6, ch: 0}});
});
it("should uncomment ragged partial comments", function () {
// Start with lines 1-5 commented out, with "//" snug up against each non-blank line's code
var lines = defaultContent.split("\n");
lines[1] = " //function bar() {";
lines[2] = " ";
lines[3] = " //a();";
lines[4] = " ";
lines[5] = " //}";
var startingContent = lines.join("\n");
myDocument.setText(startingContent);
// select lines 1-5
myEditor.setSelection({line: 1, ch: 0}, {line: 6, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_COMMENT, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 1, ch: 0}, end: {line: 6, ch: 0}});
});
});
describe("Duplicate", function () {
it("should duplicate whole line if no selection", function () {
// place cursor in middle of line 1
myEditor.setCursorPos(1, 10);
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines.splice(1, 0, " function bar() {");
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 2, ch: 10});
});
it("should duplicate line + \n if selected line is at end of file", function () {
var lines = defaultContent.split("\n"),
len = lines.length;
// place cursor at the beginning of the last line
myEditor.setCursorPos(len - 1, 0);
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
lines.push("}");
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: len, ch: 0});
});
it("should duplicate first line", function () {
// place cursor at start of line 0
myEditor.setCursorPos(0, 0);
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines.splice(0, 0, "function foo() {");
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 1, ch: 0});
});
it("should duplicate empty line", function () {
// place cursor on line 6
myEditor.setCursorPos(6, 0);
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines.splice(6, 0, "");
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 7, ch: 0});
});
it("should duplicate selection within a line", function () {
// select "bar" on line 1
myEditor.setSelection({line: 1, ch: 13}, {line: 1, ch: 16});
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines[1] = " function barbar() {";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 16}, end: {line: 1, ch: 19}});
});
it("should duplicate when entire line selected, excluding newline", function () {
// select all of line 1, EXcluding trailing \n
myEditor.setSelection({line: 1, ch: 0}, {line: 1, ch: 20});
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines[1] = " function bar() { function bar() {";
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 20}, end: {line: 1, ch: 40}});
});
it("should duplicate when entire line selected, including newline", function () {
// select all of line 1, INcluding trailing \n
myEditor.setSelection({line: 1, ch: 0}, {line: 2, ch: 0});
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines.splice(1, 0, " function bar() {");
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 2, ch: 0}, end: {line: 3, ch: 0}});
});
it("should duplicate when multiple lines selected", function () {
// select lines 1-3
myEditor.setSelection({line: 1, ch: 0}, {line: 4, ch: 0});
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines.splice(1, 0, " function bar() {",
" ",
" a();");
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 4, ch: 0}, end: {line: 7, ch: 0}});
});
it("should duplicate selection crossing line boundary", function () {
// select from middle of line 1 to middle of line 3
myEditor.setSelection({line: 1, ch: 13}, {line: 3, ch: 11});
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var lines = defaultContent.split("\n");
lines.splice(1, 3, " function bar() {",
" ",
" a()bar() {",
" ",
" a();");
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 3, ch: 11}, end: {line: 5, ch: 11}});
});
it("should duplicate after select all", function () {
myEditor.setSelection({line: 0, ch: 0}, {line: 7, ch: 1});
CommandManager.execute(Commands.EDIT_DUPLICATE, myEditor);
var expectedText = defaultContent + defaultContent;
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 7, ch: 1}, end: {line: 14, ch: 1}});
});
});
describe("Move Lines Up/Down", function () {
it("should move whole line up if no selection", function () {
// place cursor in middle of line 1
myEditor.setCursorPos(1, 10);
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[0];
lines[0] = lines[1];
lines[1] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 0, ch: 10});
});
it("should move whole line down if no selection", function () {
// place cursor in middle of line 1
myEditor.setCursorPos(1, 10);
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[2];
lines[2] = lines[1];
lines[1] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 2, ch: 10});
});
it("shouldn't move up first line", function () {
// place cursor at start of line 0
myEditor.setCursorPos(0, 0);
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectCursorAt({line: 0, ch: 0});
});
it("shouldn't move down last line", function () {
var lines = defaultContent.split("\n"),
len = lines.length;
// place cursor at the beginning of the last line
myEditor.setCursorPos(len - 1, 0);
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: len - 1, ch: 0});
});
it("should move up empty line", function () {
// place cursor on line 6
myEditor.setCursorPos(6, 0);
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[5];
lines[5] = lines[6];
lines[6] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 5, ch: 0});
});
it("should move down empty line", function () {
// place cursor on line 6
myEditor.setCursorPos(6, 0);
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[7];
lines[7] = lines[6];
lines[6] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 7, ch: 0});
});
it("should move up when entire line selected, excluding newline", function () {
// select all of line 1, EXcluding trailing \n
myEditor.setSelection({line: 1, ch: 0}, {line: 1, ch: 20});
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[0];
lines[0] = lines[1];
lines[1] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 0, ch: 0}, end: {line: 0, ch: 20}});
});
it("should move down when entire line selected, excluding newline", function () {
// select all of line 1, EXcluding trailing \n
myEditor.setSelection({line: 1, ch: 0}, {line: 1, ch: 20});
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[2];
lines[2] = lines[1];
lines[1] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 2, ch: 0}, end: {line: 2, ch: 20}});
});
it("should move up when entire line selected, including newline", function () {
// select all of line 1, INcluding trailing \n
myEditor.setSelection({line: 1, ch: 0}, {line: 2, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[0];
lines[0] = lines[1];
lines[1] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 0, ch: 0}, end: {line: 1, ch: 0}});
});
it("should move down when entire line selected, including newline", function () {
// select all of line 1, INcluding trailing \n
myEditor.setSelection({line: 1, ch: 0}, {line: 2, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[2];
lines[2] = lines[1];
lines[1] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 2, ch: 0}, end: {line: 3, ch: 0}});
});
it("should move up when multiple lines selected", function () {
// select lines 2-3
myEditor.setSelection({line: 2, ch: 0}, {line: 4, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[1];
lines[1] = lines[2];
lines[2] = lines[3];
lines[3] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 0}, end: {line: 3, ch: 0}});
});
it("should move down when multiple lines selected", function () {
// select lines 2-3
myEditor.setSelection({line: 2, ch: 0}, {line: 4, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[4];
lines[4] = lines[3];
lines[3] = lines[2];
lines[2] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 3, ch: 0}, end: {line: 5, ch: 0}});
});
it("should move up selection crossing line boundary", function () {
// select from middle of line 2 to middle of line 3
myEditor.setSelection({line: 2, ch: 8}, {line: 3, ch: 11});
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[1];
lines[1] = lines[2];
lines[2] = lines[3];
lines[3] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 8}, end: {line: 2, ch: 11}});
});
it("should move down selection crossing line boundary", function () {
// select from middle of line 2 to middle of line 3
myEditor.setSelection({line: 2, ch: 8}, {line: 3, ch: 11});
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[4];
lines[4] = lines[3];
lines[3] = lines[2];
lines[2] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 3, ch: 8}, end: {line: 4, ch: 11}});
});
it("should move the last line up", function () {
// place cursor in last line
myEditor.setCursorPos(7, 0);
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[6];
lines[6] = lines[7];
lines[7] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 6, ch: 0});
});
it("should move the first line down", function () {
// place cursor in first line
myEditor.setCursorPos(0, 0);
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[1];
lines[1] = lines[0];
lines[0] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectCursorAt({line: 1, ch: 0});
});
it("should move the last lines up", function () {
// select lines 6-7
myEditor.setSelection({line: 6, ch: 0}, {line: 7, ch: 1});
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[5];
lines[5] = lines[6];
lines[6] = lines[7];
lines[7] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 5, ch: 0}, end: {line: 6, ch: 1}});
});
it("should move the first lines down", function () {
// select lines 0-1
myEditor.setSelection({line: 0, ch: 0}, {line: 2, ch: 0});
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
var lines = defaultContent.split("\n");
var temp = lines[2];
lines[2] = lines[1];
lines[1] = lines[0];
lines[0] = temp;
var expectedText = lines.join("\n");
expect(myDocument.getText()).toEqual(expectedText);
expectSelection({start: {line: 1, ch: 0}, end: {line: 3, ch: 0}});
});
it("shouldn't move up after select all", function () {
myEditor.setSelection({line: 0, ch: 0}, {line: 7, ch: 1});
CommandManager.execute(Commands.EDIT_LINE_UP, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 0, ch: 0}, end: {line: 7, ch: 1}});
});
it("shouldn't move down after select all", function () {
myEditor.setSelection({line: 0, ch: 0}, {line: 7, ch: 1});
CommandManager.execute(Commands.EDIT_LINE_DOWN, myEditor);
expect(myDocument.getText()).toEqual(defaultContent);
expectSelection({start: {line: 0, ch: 0}, end: {line: 7, ch: 1}});
});
});
});
});<|fim▁end|> | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the |
<|file_name|>313_test_urllib2.py<|end_file_name|><|fim▁begin|>import unittest
from test import support
import os
import io
import socket
import urllib.request
from urllib.request import Request, OpenerDirector
# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler
class TrivialTests(unittest.TestCase):
def test_trivial(self):
# A couple trivial tests
self.assertRaises(ValueError, urllib.request.urlopen, 'bogus url')
# XXX Name hacking to get this to work on Windows.
fname = os.path.abspath(urllib.request.__file__).replace('\\', '/')
# And more hacking to get it to work on MacOS. This assumes
# urllib.pathname2url works, unfortunately...
if os.name == 'mac':
fname = '/' + fname.replace(':', '/')
if os.name == 'nt':
file_url = "file:///%s" % fname
else:
file_url = "file://%s" % fname
f = urllib.request.urlopen(file_url)
buf = f.read()
f.close()
def test_parse_http_list(self):
tests = [
('a,b,c', ['a', 'b', 'c']),
('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
('a, b, "c", "d", "e,f", g, h',
['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
('a="b\\"c", d="e\\,f", g="h\\\\i"',
['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
for string, list in tests:
self.assertEqual(urllib.request.parse_http_list(string), list)
def test_request_headers_dict():
"""
The Request.headers dictionary is not a documented interface. It should
stay that way, because the complete set of headers are only accessible
through the .get_header(), .has_header(), .header_items() interface.
However, .headers pre-dates those methods, and so real code will be using
the dictionary.
The introduction in 2.4 of those methods was a mistake for the same reason:
code that previously saw all (urllib2 user)-provided headers in .headers
now sees only a subset (and the function interface is ugly and incomplete).
A better change would have been to replace .headers dict with a dict
subclass (or UserDict.DictMixin instance?) that preserved the .headers
interface and also provided access to the "unredirected" headers. It's
probably too late to fix that, though.
Check .capitalize() case normalization:
>>> url = "http://example.com"
>>> Request(url, headers={"Spam-eggs": "blah"}).headers["Spam-eggs"]
'blah'
>>> Request(url, headers={"spam-EggS": "blah"}).headers["Spam-eggs"]
'blah'
Currently, Request(url, "Spam-eggs").headers["Spam-Eggs"] raises KeyError,
but that could be changed in future.
"""
def test_request_headers_methods():
"""
Note the case normalization of header names here, to .capitalize()-case.
This should be preserved for backwards-compatibility. (In the HTTP case,
normalization to .title()-case is done by urllib2 before sending headers to
http.client).
>>> url = "http://example.com"
>>> r = Request(url, headers={"Spam-eggs": "blah"})
>>> r.has_header("Spam-eggs")
True
>>> r.header_items()
[('Spam-eggs', 'blah')]
>>> r.add_header("Foo-Bar", "baz")
>>> items = sorted(r.header_items())
>>> items
[('Foo-bar', 'baz'), ('Spam-eggs', 'blah')]
Note that e.g. r.has_header("spam-EggS") is currently False, and
r.get_header("spam-EggS") returns None, but that could be changed in
future.
>>> r.has_header("Not-there")
False
>>> print(r.get_header("Not-there"))
None
>>> r.get_header("Not-there", "default")
'default'
"""
def test_password_manager(self):
"""
>>> mgr = urllib.request.HTTPPasswordMgr()
>>> add = mgr.add_password
>>> add("Some Realm", "http://example.com/", "joe", "password")
>>> add("Some Realm", "http://example.com/ni", "ni", "ni")
>>> add("c", "http://example.com/foo", "foo", "ni")
>>> add("c", "http://example.com/bar", "bar", "nini")
>>> add("b", "http://example.com/", "first", "blah")
>>> add("b", "http://example.com/", "second", "spam")
>>> add("a", "http://example.com", "1", "a")
>>> add("Some Realm", "http://c.example.com:3128", "3", "c")
>>> add("Some Realm", "d.example.com", "4", "d")
>>> add("Some Realm", "e.example.com:3128", "5", "e")
>>> mgr.find_user_password("Some Realm", "example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam/spam")
('joe', 'password')
>>> mgr.find_user_password("c", "http://example.com/foo")
('foo', 'ni')
>>> mgr.find_user_password("c", "http://example.com/bar")
('bar', 'nini')
Actually, this is really undefined ATM
## Currently, we use the highest-level path where more than one match:
## >>> mgr.find_user_password("Some Realm", "http://example.com/ni")
## ('joe', 'password')
Use latest add_password() in case of conflict:
>>> mgr.find_user_password("b", "http://example.com/")
('second', 'spam')
No special relationship between a.example.com and example.com:
>>> mgr.find_user_password("a", "http://example.com/")
('1', 'a')
>>> mgr.find_user_password("a", "http://a.example.com/")
(None, None)
Ports:
>>> mgr.find_user_password("Some Realm", "c.example.com")
(None, None)
>>> mgr.find_user_password("Some Realm", "c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "http://c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "d.example.com")
('4', 'd')
>>> mgr.find_user_password("Some Realm", "e.example.com:3128")
('5', 'e')
"""
pass
def test_password_manager_default_port(self):
"""
>>> mgr = urllib.request.HTTPPasswordMgr()
>>> add = mgr.add_password
The point to note here is that we can't guess the default port if there's
no scheme. This applies to both add_password and find_user_password.
>>> add("f", "http://g.example.com:80", "10", "j")
>>> add("g", "http://h.example.com", "11", "k")
>>> add("h", "i.example.com:80", "12", "l")
>>> add("i", "j.example.com", "13", "m")
>>> mgr.find_user_password("f", "g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "g.example.com")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "http://g.example.com")
('10', 'j')
>>> mgr.find_user_password("g", "h.example.com")
('11', 'k')
>>> mgr.find_user_password("g", "h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("g", "http://h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("h", "i.example.com")
(None, None)
>>> mgr.find_user_password("h", "i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("h", "http://i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("i", "j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "j.example.com:80")
(None, None)
>>> mgr.find_user_password("i", "http://j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "http://j.example.com:80")
(None, None)
"""
class MockOpener:
addheaders = []
def open(self, req, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.req, self.data, self.timeout = req, data, timeout
def error(self, proto, *args):
self.proto, self.args = proto, args
class MockFile:
def read(self, count=None): pass
def readline(self, count=None): pass
def close(self): pass
class MockHeaders(dict):
def getheaders(self, name):
return list(self.values())
class MockResponse(io.StringIO):
def __init__(self, code, msg, headers, data, url=None):
io.StringIO.__init__(self, data)
self.code, self.msg, self.headers, self.url = code, msg, headers, url
def info(self):
return self.headers
def geturl(self):
return self.url
class MockCookieJar:
def add_cookie_header(self, request):
self.ach_req = request
def extract_cookies(self, response, request):
self.ec_req, self.ec_r = request, response
class FakeMethod:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return self.handle(self.meth_name, self.action, *args)
class MockHTTPResponse(io.IOBase):
def __init__(self, fp, msg, status, reason):
self.fp = fp
self.msg = msg
self.status = status
self.reason = reason
self.code = 200
def read(self):
return ''
def info(self):
return {}
def geturl(self):
return self.url
class MockHTTPClass:
def __init__(self):
self.level = 0
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
self._tunnel_headers = {}
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.timeout = timeout
return self
def set_debuglevel(self, level):
self.level = level
def _set_tunnel(self, host, port=None, headers=None):
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def request(self, method, url, body=None, headers=None):
self.method = method
self.selector = url
if headers is not None:
self.req_headers += headers.items()
self.req_headers.sort()
if body:
self.data = body
if self.raise_on_endheaders:
import socket
raise socket.error()
def getresponse(self):
return MockHTTPResponse(MockFile(), {}, 200, "OK")
class MockHandler:
# useful for testing handler machinery
# see add_ordered_mock_handlers() docstring
handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for spec in methods:
if len(spec) == 2: name, action = spec
else: name, action = spec, None
meth = FakeMethod(name, action, self.handle)
setattr(self.__class__, name, meth)
def handle(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if action is None:
return None
elif action == "return self":
return self
elif action == "return response":
res = MockResponse(200, "OK", {}, "")
return res
elif action == "return request":
return Request("http://blah/")
elif action.startswith("error"):
code = action[action.rfind(" ")+1:]
try:
code = int(code)
except ValueError:
pass
res = MockResponse(200, "OK", {}, "")
return self.parent.error("http", args[0], res, code, "", {})
elif action == "raise":
raise urllib.error.URLError("blah")
assert False
def close(self): pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# No handler_order, leave in original order. Yuck.
return True
return self.handler_order < other.handler_order
def add_ordered_mock_handlers(opener, meth_spec):
"""Create MockHandlers and add them to an OpenerDirector.
meth_spec: list of lists of tuples and strings defining methods to define
on handlers. eg:
[["http_error", "ftp_open"], ["http_open"]]
defines methods .http_error() and .ftp_open() on one handler, and
.http_open() on another. These methods just record their arguments and
return None. Using a tuple instead of a string causes the method to
perform some action (see MockHandler.handle()), eg:
[["http_error"], [("http_open", "return request")]]
defines .http_error() on one handler (which simply returns None), and
.http_open() on another handler, which returns a Request object.
"""
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
opener.add_handler(h)
return handlers
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(urllib.request.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import email, http.client, copy
from io import StringIO
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
name = http.client.responses[self.code]
msg = email.message_from_string(self.headers)
return self.parent.error(
"http", req, MockFile(), self.code, name, msg)
else:
self.req = req
msg = email.message_from_string("\r\n\r\n")
return MockResponse(200, "OK", msg, "", req.get_full_url())
class MockHTTPSHandler(urllib.request.AbstractHTTPHandler):
# Useful for testing the Proxy-Authorization request by verifying the
# properties of httpcon
def __init__(self):
urllib.request.AbstractHTTPHandler.__init__(self)
self.httpconn = MockHTTPClass()
def https_open(self, req):
return self.do_open(self.httpconn, req)
class MockPasswordManager:
def add_password(self, realm, uri, user, password):
self.realm = realm
self.url = uri
self.user = user
self.password = password
def find_user_password(self, realm, authuri):
self.target_realm = realm
self.target_url = authuri
return self.user, self.password
class OpenerDirectorTests(unittest.TestCase):
def test_add_non_handler(self):
class NonHandler(object):
pass
self.assertRaises(TypeError,
OpenerDirector().add_handler, NonHandler())
def test_badly_named_methods(self):
# test work-around for three methods that accidentally follow the
# naming conventions for handler methods
# (*_open() / *_request() / *_response())
# These used to call the accidentally-named methods, causing a
# TypeError in real code; here, returning self from these mock
# methods would either cause no exception, or AttributeError.
from urllib.error import URLError
o = OpenerDirector()
meth_spec = [
[("do_open", "return self"), ("proxy_open", "return self")],
[("redirect_request", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
o.add_handler(urllib.request.UnknownHandler())
for scheme in "do", "proxy", "redirect":
self.assertRaises(URLError, o.open, scheme+"://example.com/")
def test_handled(self):
# handler returning non-None means no more handlers will be called
o = OpenerDirector()
meth_spec = [
["http_open", "ftp_open", "http_error_302"],
["ftp_open"],
[("http_open", "return self")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# Second .http_open() gets called, third doesn't, since second returned
# non-None. Handlers without .http_open() never get any methods called
# on them.
# In fact, second mock handler defining .http_open() returns self
# (instead of response), which becomes the OpenerDirector's return
# value.
self.assertEqual(r, handlers[2])
calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
for expected, got in zip(calls, o.calls):
handler, name, args, kwds = got
self.assertEqual((handler, name), expected)
self.assertEqual(args, (req,))
def test_handler_order(self):
o = OpenerDirector()
handlers = []
for meths, handler_order in [
([("http_open", "return self")], 500),
(["http_open"], 0),
]:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order = handler_order
handlers.append(h)
o.add_handler(h)
r = o.open("http://example.com/")
# handlers called in reverse order, thanks to their sort order
self.assertEqual(o.calls[0][0], handlers[1])
self.assertEqual(o.calls[1][0], handlers[0])
def test_raise(self):
# raising URLError stops processing of request
o = OpenerDirector()
meth_spec = [
[("http_open", "raise")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
self.assertRaises(urllib.error.URLError, o.open, req)
self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])
## def test_error(self):
## # XXX this doesn't actually seem to be used in standard library,
## # but should really be tested anyway...
def test_http_error(self):
# XXX http_error_default
# http errors are a special case
o = OpenerDirector()
meth_spec = [
[("http_open", "error 302")],
[("http_error_400", "raise"), "http_open"],
[("http_error_302", "return response"), "http_error_303",
"http_error"],
[("http_error_302")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
<|fim▁hole|> def __eq__(self, other): return True
req = Request("http://example.com/")
r = o.open(req)
assert len(o.calls) == 2
calls = [(handlers[0], "http_open", (req,)),
(handlers[2], "http_error_302",
(req, Unknown(), 302, "", {}))]
for expected, got in zip(calls, o.calls):
handler, method_name, args = expected
self.assertEqual((handler, method_name), got[:2])
self.assertEqual(args, got[2])
def test_processors(self):
# *_request / *_response methods get called appropriately
o = OpenerDirector()
meth_spec = [
[("http_request", "return request"),
("http_response", "return response")],
[("http_request", "return request"),
("http_response", "return response")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# processor methods are called on *all* handlers that define them,
# not just the first handler that handles the request
calls = [
(handlers[0], "http_request"), (handlers[1], "http_request"),
(handlers[0], "http_response"), (handlers[1], "http_response")]
for i, (handler, name, args, kwds) in enumerate(o.calls):
if i < 2:
# *_request
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 1)
self.assertTrue(isinstance(args[0], Request))
else:
# *_response
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 2)
self.assertTrue(isinstance(args[0], Request))
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
self.assertTrue(args[1] is None or
isinstance(args[1], MockResponse))
def sanepathname2url(path):
urlpath = urllib.request.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class HandlerTests(unittest.TestCase):
def test_ftp(self):
class MockFTPWrapper:
def __init__(self, data): self.data = data
def retrfile(self, filename, filetype):
self.filename, self.filetype = filename, filetype
return io.StringIO(self.data), len(self.data)
class NullFTPHandler(urllib.request.FTPHandler):
def __init__(self, data): self.data = data
def connect_ftp(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.user, self.passwd = user, passwd
self.host, self.port = host, port
self.dirs = dirs
self.ftpwrapper = MockFTPWrapper(self.data)
return self.ftpwrapper
import ftplib
data = "rheum rhaponicum"
h = NullFTPHandler(data)
o = h.parent = MockOpener()
for url, host, port, user, passwd, type_, dirs, filename, mimetype in [
("ftp://localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%25parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%2542parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%42parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://localhost:80/foo/bar/",
"localhost", 80, "", "", "D",
["foo", "bar"], "", None),
("ftp://localhost/baz.gif;type=a",
"localhost", ftplib.FTP_PORT, "", "", "A",
[], "baz.gif", None), # XXX really this should guess image/gif
]:
req = Request(url)
req.timeout = None
r = h.ftp_open(req)
# ftp authentication not yet implemented by FTPHandler
self.assertEqual(h.user, user)
self.assertEqual(h.passwd, passwd)
self.assertEqual(h.host, socket.gethostbyname(host))
self.assertEqual(h.port, port)
self.assertEqual(h.dirs, dirs)
self.assertEqual(h.ftpwrapper.filename, filename)
self.assertEqual(h.ftpwrapper.filetype, type_)
headers = r.info()
self.assertEqual(headers.get("Content-type"), mimetype)
self.assertEqual(int(headers["Content-length"]), len(data))
def test_file(self):
import email.utils, socket
h = urllib.request.FileHandler()
o = h.parent = MockOpener()
TESTFN = support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = b"hello, world\n"
urls = [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
]
try:
localaddr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
localaddr = ''
if localaddr:
urls.append("file://%s%s" % (localaddr, urlpath))
for url in urls:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
respurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
self.assertEqual(respurl, url)
for url in [
"file://localhost:80%s" % urlpath,
"file:///file_does_not_exist.txt",
"file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
os.getcwd(), TESTFN),
"file://somerandomhost.ontheinternet.com%s/%s" %
(os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(urllib.error.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = urllib.request.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", True),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
("file://somehost//foo/something.txt", True),
("file://localhost//foo/something.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (urllib.error.URLError, OSError):
self.assertFalse(ftp)
else:
self.assertIs(o.req, req)
self.assertEqual(req.type, "ftp")
self.assertEqual(req.type is "ftp", ftp)
def test_http(self):
h = urllib.request.AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", "blah")]:
req = Request(url, data, {"Foo": "bar"})
req.timeout = None
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.__contains__ # r.info() gives dict from .getreply()
self.assertEqual(r.geturl(), url)
self.assertEqual(http.host, "example.com")
self.assertEqual(http.level, 0)
self.assertEqual(http.method, method)
self.assertEqual(http.selector, "/")
self.assertEqual(http.req_headers,
[("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assertEqual(http.data, data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(urllib.error.URLError, h.do_open, http, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in "", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assertTrue("Content-length" not in req.unredirected_hdrs)
self.assertTrue("Content-type" not in req.unredirected_hdrs)
else: # POST
self.assertEqual(req.unredirected_hdrs["Content-length"], "0")
self.assertEqual(req.unredirected_hdrs["Content-type"],
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assertEqual(req.unredirected_hdrs["Host"], "example.com")
self.assertEqual(req.unredirected_hdrs["Spam"], "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assertEqual(req.unredirected_hdrs["Content-length"], "foo")
self.assertEqual(req.unredirected_hdrs["Content-type"], "bar")
self.assertEqual(req.unredirected_hdrs["Host"], "baz")
self.assertEqual(req.unredirected_hdrs["Spam"], "foo")
def test_http_doubleslash(self):
# Checks the presence of any unnecessary double slash in url does not
# break anything. Previously, a double slash directly after the host
# could could cause incorrect parsing.
h = urllib.request.AbstractHTTPHandler()
o = h.parent = MockOpener()
data = ""
ds_urls = [
"http://example.com/foo/bar/baz.html",
"http://example.com//foo/bar/baz.html",
"http://example.com/foo//bar/baz.html",
"http://example.com/foo/bar//baz.html"
]
for ds_url in ds_urls:
ds_req = Request(ds_url, data)
# Check whether host is determined correctly if there is no proxy
np_ds_req = h.do_request_(ds_req)
self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com")
# Check whether host is determined correctly if there is a proxy
ds_req.set_proxy("someproxy:3128",None)
p_ds_req = h.do_request_(ds_req)
self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")
def test_fixpath_in_weirdurls(self):
# Issue4493: urllib2 to supply '/' when to urls where path does not
# start with'/'
h = urllib.request.AbstractHTTPHandler()
o = h.parent = MockOpener()
weird_url = 'http://www.python.org?getspam'
req = Request(weird_url)
newreq = h.do_request_(req)
self.assertEqual(newreq.host,'www.python.org')
self.assertEqual(newreq.selector,'/?getspam')
url_without_path = 'http://www.python.org'
req = Request(url_without_path)
newreq = h.do_request_(req)
self.assertEqual(newreq.host,'www.python.org')
self.assertEqual(newreq.selector,'')
def test_errors(self):
h = urllib.request.HTTPErrorProcessor()
o = h.parent = MockOpener()
url = "http://example.com/"
req = Request(url)
# all 2xx are passed through
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
r = MockResponse(202, "Accepted", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
r = MockResponse(206, "Partial content", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = MockResponse(502, "Bad gateway", {}, "", url)
self.assertIsNone(h.http_response(req, r))
self.assertEqual(o.proto, "http") # o.error called
self.assertEqual(o.args, (req, r, 502, "Bad gateway", {}))
def test_cookies(self):
cj = MockCookieJar()
h = urllib.request.HTTPCookieProcessor(cj)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assertIs(cj.ach_req, req)
self.assertIs(cj.ach_req, newreq)
self.assertEqual(req.get_origin_req_host(), "example.com")
self.assertFalse(req.is_unverifiable())
newr = h.http_response(req, r)
self.assertIs(cj.ec_req, req)
self.assertIs(cj.ec_r, r)
self.assertIs(r, newr)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = urllib.request.HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307:
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
req.add_header("Nonsense", "viking=withhold")
if data is not None:
req.add_header("Content-Length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
try:
method(req, MockFile(), code, "Blah",
MockHeaders({"location": to_url}))
except urllib.error.HTTPError:
# 307 in response to POST requires user OK
self.assertTrue(code == 307 and data is not None)
self.assertEqual(o.req.get_full_url(), to_url)
try:
self.assertEqual(o.req.get_method(), "GET")
except AttributeError:
self.assertFalse(o.req.has_data())
# now it's a GET, there should not be headers regarding content
# (possibly dragged from before being a POST)
headers = [x.lower() for x in o.req.headers]
self.assertTrue("content-length" not in headers)
self.assertTrue("content-type" not in headers)
self.assertEqual(o.req.headers["Nonsense"],
"viking=withhold")
self.assertTrue("Spam" not in o.req.headers)
self.assertTrue("Spam" not in o.req.unredirected_hdrs)
# loop detection
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
MockHeaders({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except urllib.error.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assertEqual(count, urllib.request.HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except urllib.error.HTTPError:
self.assertEqual(count,
urllib.request.HTTPRedirectHandler.max_redirections)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
from http.cookiejar import CookieJar
from test.test_http_cookiejar import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = urllib.request.HTTPDefaultErrorHandler()
hrh = urllib.request.HTTPRedirectHandler()
cp = urllib.request.HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assertFalse(hh.req.has_header("Cookie"))
def test_proxy(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://acme.example.com/")
self.assertEqual(req.get_host(), "acme.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_no_proxy(self):
os.environ['no_proxy'] = 'python.org'
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.perl.org/")
self.assertEqual(req.get_host(), "www.perl.org")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com")
req = Request("http://www.python.org")
self.assertEqual(req.get_host(), "www.python.org")
r = o.open(req)
self.assertEqual(req.get_host(), "www.python.org")
del os.environ['no_proxy']
def test_proxy_https(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(https="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("https_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("https://www.example.com/")
self.assertEqual(req.get_host(), "www.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "https_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_https_proxy_authorization(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
https_handler = MockHTTPSHandler()
o.add_handler(https_handler)
req = Request("https://www.example.com/")
req.add_header("Proxy-Authorization","FooBar")
req.add_header("User-Agent","Grail")
self.assertEqual(req.get_host(), "www.example.com")
self.assertIsNone(req._tunnel_host)
r = o.open(req)
# Verify Proxy-Authorization gets tunneled to request.
# httpsconn req_headers do not have the Proxy-Authorization header but
# the req will have.
self.assertFalse(("Proxy-Authorization","FooBar") in
https_handler.httpconn.req_headers)
self.assertTrue(("User-Agent","Grail") in
https_handler.httpconn.req_headers)
self.assertIsNotNone(req._tunnel_host)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual(req.get_header("Proxy-authorization"),"FooBar")
def test_basic_auth(self, quote_char='"'):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
(quote_char, realm, quote_char) )
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = urllib.request.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler threw an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(urllib.request.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
urllib.request.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(urllib.request.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
urllib.request.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(basic_handler)
opener.add_handler(digest_handler)
opener.add_handler(http_handler)
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
r = opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
userpass = bytes('%s:%s' % (user, password), "ascii")
auth_hdr_value = ('Basic ' +
base64.encodebytes(userpass).strip().decode())
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header],
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
r = opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
class MiscTests(unittest.TestCase):
def test_build_opener(self):
class MyHTTPHandler(urllib.request.HTTPHandler): pass
class FooHandler(urllib.request.BaseHandler):
def foo_open(self): pass
class BarHandler(urllib.request.BaseHandler):
def bar_open(self): pass
build_opener = urllib.request.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler)
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler())
self.opener_has_handler(o, urllib.request.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib.request.HTTPHandler): pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
def opener_has_handler(self, opener, handler_class):
self.assertTrue(any(h.__class__ == handler_class
for h in opener.handlers))
class RequestTests(unittest.TestCase):
def setUp(self):
self.get = Request("http://www.python.org/~jeremy/")
self.post = Request("http://www.python.org/~jeremy/",
"data",
headers={"X-Test": "test"})
def test_method(self):
self.assertEqual("POST", self.post.get_method())
self.assertEqual("GET", self.get.get_method())
def test_add_data(self):
self.assertFalse(self.get.has_data())
self.assertEqual("GET", self.get.get_method())
self.get.add_data("spam")
self.assertTrue(self.get.has_data())
self.assertEqual("POST", self.get.get_method())
def test_get_full_url(self):
self.assertEqual("http://www.python.org/~jeremy/",
self.get.get_full_url())
def test_selector(self):
self.assertEqual("/~jeremy/", self.get.get_selector())
req = Request("http://www.python.org/")
self.assertEqual("/", req.get_selector())
def test_get_type(self):
self.assertEqual("http", self.get.get_type())
def test_get_host(self):
self.assertEqual("www.python.org", self.get.get_host())
def test_get_host_unquote(self):
req = Request("http://www.%70ython.org/")
self.assertEqual("www.python.org", req.get_host())
def test_proxy(self):
self.assertFalse(self.get.has_proxy())
self.get.set_proxy("www.perl.org", "http")
self.assertTrue(self.get.has_proxy())
self.assertEqual("www.python.org", self.get.get_origin_req_host())
self.assertEqual("www.perl.org", self.get.get_host())
def test_wrapped_url(self):
req = Request("<URL:http://www.python.org>")
self.assertEqual("www.python.org", req.get_host())
def test_urlwith_fragment(self):
req = Request("http://www.python.org/?qs=query#fragment=true")
self.assertEqual("/?qs=query", req.get_selector())
req = Request("http://www.python.org/#fun=true")
self.assertEqual("/", req.get_selector())
def test_main(verbose=None):
from test import test_urllib2
support.run_doctest(test_urllib2, verbose)
support.run_doctest(urllib.request, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main(verbose=True)<|fim▁end|> | class Unknown: |
<|file_name|>strict_https_loader.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from thumbor.loaders import http_loader
from tornado.concurrent import return_future<|fim▁hole|>
def _normalize_url(url):
url = http_loader.quote_url(unquote(url))
if url.startswith('http:'):
url = url.replace('http:', 'https:', 1)
return url if url.startswith('https://') else 'https://%s' % url
def validate(context, url):
if url.startswith('http://'):
return False
return http_loader.validate(context, url, normalize_url_func=_normalize_url)
def return_contents(response, url, callback, context):
return http_loader.return_contents(response, url, callback, context)
@return_future
def load(context, url, callback):
return http_loader.load_sync(context, url, callback, normalize_url_func=_normalize_url)
def encode(string):
return http_loader.encode(string)<|fim▁end|> | from urllib import unquote
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod areas;
mod area_frame_allocator;
mod paging;
mod info;
use self::paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_allocator::{AreaFrameAllocator};
use spin::{Once, Mutex, MutexGuard};
pub use self::paging::{Page, VirtualAddress};
pub use self::paging::entry::{EntryFlags};
pub const KERNEL_OFFSET: usize = 0o177777_776_000_000_000_0000;
static MEMORY_CONTROLLER: Once<Mutex<MemoryController>> = Once::new();
pub struct MemoryController {
active_table: paging::ActivePageTable,
frame_allocator: AreaFrameAllocator,
}
impl MemoryController {
pub fn map(&mut self, page: Page, flags: EntryFlags) {
self.active_table.map(page, flags, &mut self.frame_allocator);
}
//TODO impl
}
pub fn controller<'a>() -> MutexGuard<'a, MemoryController> {
MEMORY_CONTROLLER.try().expect("Memory not yet initialized").lock()
}
//TODO Figure out if we want Copy and Clone on this
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
pub struct Frame {
number: usize,
}
impl Frame {
fn containing_address(address: PhysicalAddress) -> Frame {
Frame { number: address.get() / PAGE_SIZE }
}
fn start_address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.number * PAGE_SIZE)
}
fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter {
start: start,
end: end,
}
}
}
struct FrameIter {
start: Frame,
end: Frame,
}
impl Iterator for FrameIter {
type Item = Frame;
fn next(&mut self) -> Option<Frame> {
if self.start <= self.end {
let frame = self.start.clone();
self.start.number += 1;
Some(frame)<|fim▁hole|> None
}
}
}
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame);
}
/// Align downwards. Returns the greatest x with alignment `align`
/// so that x <= addr. The alignment must be a power of 2.
pub fn align_down(addr: usize, align: usize) -> usize {
if align.is_power_of_two() {
addr & !(align - 1)
} else if align == 0 {
addr
} else {
panic!("`align` must be a power of 2");
}
}
/// Align upwards. Returns the smallest x with alignment `align`
/// so that x >= addr. The alignment must be a power of 2.
pub fn align_up(addr: usize, align: usize) -> usize {
align_down(addr + align - 1, align)
}
pub fn init(boot_info: &::bootinfo::BootInfo) {
println!("Initializing Memory");
areas::init(boot_info);
info::init(boot_info);
let mut frame_allocator = area_frame_allocator::AreaFrameAllocator::new(PhysicalAddress::new(0), info::kernel().end_physical());
let active_table = paging::remap(&mut frame_allocator);
MEMORY_CONTROLLER.call_once(|| {
Mutex::new(MemoryController {
active_table,
frame_allocator,
})
});
}<|fim▁end|> | } else { |
<|file_name|>test_taxonomy.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
from os import path
import logging
import shutil
from sqlalchemy import create_engine
from . import config
from .config import TestBase
import taxtastic
from taxtastic.taxonomy import Taxonomy, TaxonIntegrityError
import taxtastic.ncbi
import taxtastic.utils
log = logging
datadir = config.datadir
echo = False
dbname = config.ncbi_master_db
class TestTaxonomyBase(TestBase):
def setUp(self):
self.engine = create_engine('sqlite:///' + self.dbname, echo=echo)
self.tax = Taxonomy(self.engine)
def tearDown(self):
self.engine.dispose()
class TestAddNode(TestTaxonomyBase):
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddNode, self).setUp()
def tearDown(self):
pass
def test01(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[{'tax_name': 'foo'}],
source_name='ncbi'
)
lineage = self.tax.lineage('1280_1')
self.assertEqual(lineage['tax_id'], '1280_1')
self.assertEqual(lineage['tax_name'], 'foo')
def test02(self):
new_taxid = '1279_1'
new_taxname = 'between genus and species'
children = ['1280', '1281']
self.tax.add_node(
tax_id=new_taxid,
parent_id='1279',
rank='species_group',
names=[{'tax_name': new_taxname}],
children=children,
source_name='foo'
)
lineage = self.tax.lineage(new_taxid)
self.assertTrue(lineage['tax_id'] == new_taxid)
self.assertTrue(lineage['tax_name'] == new_taxname)
for taxid in children:
lineage = self.tax.lineage(taxid)
self.assertTrue(lineage['parent_id'] == new_taxid)
def test03(self):
new_taxid = '1279_1'
new_taxname = 'between genus and species'
children = ['1280', '1281']
self.assertRaises(
TaxonIntegrityError,
self.tax.add_node,
tax_id=new_taxid,
parent_id='1279',
rank='genus',
names=[{'tax_name': new_taxname}],
children=children,
source_name='ncbi')
def test04(self):
# existing node
self.assertRaises(
ValueError,
self.tax.add_node,
tax_id='1280',
parent_id='1279',
rank='species',
names=[{'tax_name': 'I already exist'}],
source_name='ncbi'
)
def test05(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo', 'is_primary': True},
{'tax_name': 'bar'},<|fim▁hole|> ],
source_name='ncbi'
)
lineage = self.tax.lineage('1280_1')
self.assertEqual(lineage['tax_id'], '1280_1')
self.assertEqual(lineage['tax_name'], 'foo')
def test06(self):
# multiple names, none primary
self.assertRaises(
ValueError,
self.tax.add_node,
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo'},
{'tax_name': 'bar'},
],
source_name='ncbi')
def test07(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo', 'is_primary': True},
{'tax_name': 'bar'},
],
source_name='ncbi',
execute=False
)
self.assertRaises(ValueError, self.tax.lineage, '1280_1')
def test08(self):
# test has_node()
self.assertTrue(self.tax.has_node('1280'))
self.assertFalse(self.tax.has_node('foo'))
class TestAddName(TestTaxonomyBase):
"""
test tax.add_node
"""
def count_names(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select count(*) from names where tax_id = ?', (tax_id,))
return result.fetchone()[0]
def count_primary_names(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select count(*) from names where tax_id = ? and is_primary',
(tax_id,))
return result.fetchone()[0]
def primary_name(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select tax_name from names where tax_id = ? and is_primary',
(tax_id,))
val = result.fetchone()
return val[0] if val else None
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddName, self).setUp()
def test_name01(self):
names_before = self.count_names('1280')
self.tax.add_name(tax_id='1280', tax_name='SA', source_name='ncbi')
self.assertEqual(names_before + 1, self.count_names('1280'))
def test_name02(self):
# number of primary names should remain 1
names_before = self.count_names('1280')
self.assertEqual(self.count_primary_names('1280'), 1)
self.tax.add_name(tax_id='1280', tax_name='SA', is_primary=True,
source_name='ncbi')
self.tax.add_name(tax_id='1280', tax_name='SA2', is_primary=True,
source_name='ncbi')
self.assertEqual(names_before + 2, self.count_names('1280'))
self.assertEqual(self.count_primary_names('1280'), 1)
def test_name03(self):
# insertion of duplicate row fails
self.tax.add_name(tax_id='1280', tax_name='SA', is_primary=True,
source_name='ncbi')
self.assertRaises(
ValueError, self.tax.add_name, tax_id='1280', tax_name='SA',
is_primary=True, source_name='ncbi')
self.assertEqual(self.primary_name('1280'), 'SA')
class TestGetSource(TestTaxonomyBase):
def setUp(self):
self.dbname = dbname
super(TestGetSource, self).setUp()
def test01(self):
self.assertRaises(ValueError, self.tax.get_source)
def test02(self):
self.assertRaises(ValueError, self.tax.get_source, 1, 'ncbi')
def test03(self):
result = self.tax.get_source(source_id=1)
self.assertDictEqual(result, {
'description': 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip',
'id': 1, 'name': 'ncbi'})
def test04(self):
result = self.tax.get_source(source_name='ncbi')
self.assertDictEqual(result, {
'description': 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip',
'id': 1, 'name': 'ncbi'})
def test05(self):
self.assertRaises(ValueError, self.tax.get_source, source_id=2)
class TestAddSource(TestTaxonomyBase):
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddSource, self).setUp()
def tearDown(self):
pass
def sources(self):
with self.tax.engine.connect() as con:
result = con.execute('select * from source')
return result.fetchall()
def test01(self):
self.tax.add_source('foo')
self.assertEqual(self.sources()[1], (2, 'foo', None))
def test02(self):
self.tax.add_source('ncbi')
self.assertEqual(
self.sources(),
[(1, 'ncbi', 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip')])
def test__node():
engine = create_engine(
'sqlite:///../testfiles/small_taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax._node(None) is None
assert tax._node('91061') == ('1239', 'class')
def test_sibling_of():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.sibling_of(None) is None
assert tax.sibling_of('91061') == '186801'
assert tax.sibling_of('1696') is None
def test_child_of():
engine = create_engine(
'sqlite:///../testfiles/small_taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.child_of(None) is None
assert tax.child_of('1239') == '91061'
assert tax.children_of('1239', 2) == ['91061', '186801']
def test_is_ancestor_of():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.is_ancestor_of('1280', '1239')
assert tax.is_ancestor_of(None, '1239') is False
assert tax.is_ancestor_of('1239', None) is False
def test_rank_and_parent():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.rank(None) is None
assert tax.rank('1239') == 'phylum'
assert tax.rank('1280') == 'species'
assert tax.parent_id(None) is None
assert tax.parent_id('1239') == '2'
def test_species_below():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
t = tax.species_below('1239')
parent_id, rank = tax._node(t)
for t in [None, '1239', '186801', '1117']:
s = tax.species_below(t)
assert t is None or s is None or tax.is_ancestor_of(s, t)
assert s is None or tax.rank(s) == 'species'
def test_is_below():
assert Taxonomy.is_below('species', 'family')
assert Taxonomy.is_below('family', 'kingdom')
assert not Taxonomy.is_below('kingdom', 'family')
assert Taxonomy.ranks_below('species') == []
assert Taxonomy.ranks_below('family') == ['species', 'genus']
def test_nary_subtree():
engine = create_engine(
'sqlite:///../testfiles/small_taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.nary_subtree(None) is None
t = tax.nary_subtree('1239')
assert t == ['1280', '372074', '1579', '1580',
'37734', '420335', '166485', '166486']<|fim▁end|> | |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-12 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True<|fim▁hole|> ]
operations = [
migrations.CreateModel(
name='Malware',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alert_id', models.CharField(max_length=90)),
('alert_type', models.CharField(max_length=80)),
('file_name', models.CharField(max_length=80)),
('computer', models.CharField(max_length=80)),
('contact_group', models.CharField(max_length=80)),
('virus', models.CharField(max_length=80)),
('actual_action', models.CharField(max_length=80)),
('comment', models.CharField(max_length=100)),
('numeric_ip', models.GenericIPAddressField(default='0.0.0.0', protocol='ipv4')),
],
),
]<|fim▁end|> |
dependencies = [ |
<|file_name|>memebot.py<|end_file_name|><|fim▁begin|>import praw
import json
import requests
import tweepy
import time
import os
import csv
import re
import configparser
import urllib.parse
import sys
from glob import glob
from gfycat.client import GfycatClient
from imgurpython import ImgurClient
import distutils.core
import itertools
import photohash
from PIL import Image
import urllib.request
# Location of the configuration file
CONFIG_FILE = 'config.ini'
<|fim▁hole|> else:
return title[:276] + '...'
def save_file(img_url, file_path):
resp = requests.get(img_url, stream=True)
if resp.status_code == 200:
with open(file_path, 'wb') as image_file:
for chunk in resp:
image_file.write(chunk)
# Return the path of the image, which is always the same since we just overwrite images
return file_path
else:
print('[EROR] File failed to download. Status code: ' + str(resp.status_code))
return
def get_media(img_url, post_id):
if any(s in img_url for s in ('i.redd.it', 'i.reddituploads.com')):
file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
file_extension = os.path.splitext(img_url)[-1].lower()
# Fix for issue with i.reddituploads.com links not having a file extension in the URL
if not file_extension:
file_extension += '.jpg'
file_name += '.jpg'
img_url += '.jpg'
# Grab the GIF versions of .GIFV links
# When Tweepy adds support for video uploads, we can use grab the MP4 versions
if (file_extension == '.gifv'):
file_extension = file_extension.replace('.gifv', '.gif')
file_name = file_name.replace('.gifv', '.gif')
img_url = img_url.replace('.gifv', '.gif')
# Download the file
file_path = IMAGE_DIR + '/' + file_name
print('[ OK ] Downloading file at URL ' + img_url + ' to ' + file_path + ', file type identified as ' + file_extension)
img = save_file(img_url, file_path)
return img
elif ('imgur.com' in img_url): # Imgur
try:
client = ImgurClient(IMGUR_CLIENT, IMGUR_CLIENT_SECRET)
except BaseException as e:
print ('[EROR] Error while authenticating with Imgur:', str(e))
return
# Working demo of regex: https://regex101.com/r/G29uGl/2
regex = r"(?:.*)imgur\.com(?:\/gallery\/|\/a\/|\/)(.*?)(?:\/.*|\.|$)"
m = re.search(regex, img_url, flags=0)
if m:
# Get the Imgur image/gallery ID
id = m.group(1)
if any(s in img_url for s in ('/a/', '/gallery/')): # Gallery links
images = client.get_album_images(id)
# Only the first image in a gallery is used
imgur_url = images[0].link
else: # Single image
imgur_url = client.get_image(id).link
# If the URL is a GIFV link, change it to a GIF
file_extension = os.path.splitext(imgur_url)[-1].lower()
if (file_extension == '.gifv'):
file_extension = file_extension.replace('.gifv', '.gif')
img_url = imgur_url.replace('.gifv', '.gif')
# Download the image
file_path = IMAGE_DIR + '/' + id + file_extension
print('[ OK ] Downloading Imgur image at URL ' + imgur_url + ' to ' + file_path)
imgur_file = save_file(imgur_url, file_path)
# Imgur will sometimes return a single-frame thumbnail instead of a GIF, so we need to check for this
if (file_extension == '.gif'):
# Open the file using the Pillow library
img = Image.open(imgur_file)
# Get the MIME type
mime = Image.MIME[img.format]
if (mime == 'image/gif'):
# Image is indeed a GIF, so it can be posted
img.close()
return imgur_file
else:
# Image is not actually a GIF, so don't post it
print('[EROR] Imgur has not processed a GIF version of this link, so it can not be posted')
img.close()
# Delete the image
try:
os.remove(imgur_file)
except BaseException as e:
print ('[EROR] Error while deleting media file:', str(e))
return
else:
return imgur_file
else:
print('[EROR] Could not identify Imgur image/gallery ID in this URL:', img_url)
return
elif ('gfycat.com' in img_url): # Gfycat
gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
client = GfycatClient()
gfycat_info = client.query_gfy(gfycat_name)
# Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
gfycat_url = gfycat_info['gfyItem']['max2mbGif']
file_path = IMAGE_DIR + '/' + gfycat_name + '.gif'
print('[ OK ] Downloading Gfycat at URL ' + gfycat_url + ' to ' + file_path)
gfycat_file = save_file(gfycat_url, file_path)
return gfycat_file
elif ('giphy.com' in img_url): # Giphy
# Working demo of regex: https://regex101.com/r/o8m1kA/2
regex = r"https?://((?:.*)giphy\.com/media/|giphy.com/gifs/|i.giphy.com/)(.*-)?(\w+)(/|\n)"
m = re.search(regex, img_url, flags=0)
if m:
# Get the Giphy ID
id = m.group(3)
# Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
giphy_url = 'https://media.giphy.com/media/' + id + '/giphy-downsized.gif'
file_path = IMAGE_DIR + '/' + id + '-downsized.gif'
print('[ OK ] Downloading Giphy at URL ' + giphy_url + ' to ' + file_path)
giphy_file = save_file(giphy_url, file_path)
return giphy_file
else:
print('[EROR] Could not identify Giphy ID in this URL:', img_url)
return
else:
print('[WARN] Post', post_id, 'doesn\'t point to an image/GIF:', img_url)
return
def tweet_creator(subreddit_info):
post_dict = {}
print ('[ OK ] Getting posts from Reddit')
for submission in subreddit_info.hot(limit=POST_LIMIT):
# If the OP has deleted his account, save it as "a deleted user"
if submission.author is None:
submission.author = "a deleted user"
submission.author.name = "a deleted user"
else:
submission.author.name = "/u/" + submission.author.name
if (submission.over_18 and NSFW_POSTS_ALLOWED is False):
# Skip over NSFW posts if they are disabled in the config file
print('[ OK ] Skipping', submission.id, 'because it is marked as NSFW')
continue
else:
post_dict[strip_title(submission.title)] = [submission.id,submission.url,submission.shortlink,submission.author.name]
return post_dict
def setup_connection_reddit(subreddit):
print ('[ OK ] Setting up connection with Reddit...')
r = praw.Reddit(
user_agent='memebot',
client_id=REDDIT_AGENT,
client_secret=REDDIT_CLIENT_SECRET)
return r.subreddit(subreddit)
def duplicate_check(id):
value = False
with open(CACHE_CSV, 'rt', newline='') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if id in row:
value = True
return value
def hash_check(hash):
if hash:
value = False
# Only extract last three lines from cache file
post_list = []
with open(CACHE_CSV, 'rt', newline='') as f:
for line in f:
post_list.append(line)
if len(post_list) > REPOST_LIMIT:
post_list.pop(0)
if any(hash in s for s in post_list):
value = True
else:
value = True
return value
def log_post(id, hash, tweetID):
with open(CACHE_CSV, 'a', newline='') as cache:
date = time.strftime("%d/%m/%Y") + ' ' + time.strftime("%H:%M:%S")
wr = csv.writer(cache, delimiter=',')
wr.writerow([id, date, hash, tweetID])
def main():
# Make sure logging file and media directory exists
if not os.path.exists(CACHE_CSV):
with open(CACHE_CSV, 'w', newline='') as cache:
default = ['Post','Date and time','Image hash', 'Tweet link']
wr = csv.writer(cache)
wr.writerow(default)
print ('[ OK ] ' + CACHE_CSV + ' file not found, created a new one')
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
print ('[ OK ] ' + IMAGE_DIR + ' folder not found, created a new one')
# Continue with script
subreddit = setup_connection_reddit(SUBREDDIT_TO_MONITOR)
post_dict = tweet_creator(subreddit)
tweeter(post_dict)
def alt_tweeter(post_link, op, username, newestTweet):
try:
# Log into alternate account
auth = tweepy.OAuthHandler(ALT_CONSUMER_KEY, ALT_CONSUMER_SECRET)
auth.set_access_token(ALT_ACCESS_TOKEN, ALT_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
# Post the tweet
tweetText = '@' + username + ' Originally posted by ' + op + ' on Reddit: ' + post_link
print('[ OK ] Posting this on alt Twitter account:', tweetText)
api.update_status(tweetText, newestTweet)
except BaseException as e:
print ('[EROR] Error while posting tweet on alt account:', str(e))
return
def tweeter(post_dict):
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_secret)
api = tweepy.API(auth)
for post in post_dict:
# Grab post details from dictionary
post_id = post_dict[post][0]
if not duplicate_check(post_id): # Make sure post is not a duplicate
file_path = get_media(post_dict[post][1], post_dict[post][0])
post_link = post_dict[post][2]
post_op = post_dict[post][3]
# Make sure the post contains media (if it doesn't, then file_path would be blank)
if (file_path):
# Scan the image against previously-posted images
try:
hash = photohash.average_hash(file_path)
print ('[ OK ] Image hash check:', hash_check(hash))
except:
# Set hash to an empty string if the check failed
hash = ""
print ('[WARN] Could not check image hash, skipping.')
# Only make a tweet if the post has not already been posted (if repost protection is enabled)
if ((REPOST_PROTECTION is True) and (hash_check(hash) is False)):
print ('[ OK ] Posting this on main twitter account:', post, file_path)
try:
# Post the tweet
api.update_with_media(filename=file_path, status=post)
# Log the tweet
username = api.me().screen_name
latestTweets = api.user_timeline(screen_name = username, count = 1, include_rts = False)
newestTweet = latestTweets[0].id_str
log_post(post_id, hash, 'https://twitter.com/' + username + '/status/' + newestTweet + '/')
# Post alt tweet
if ALT_ACCESS_TOKEN:
alt_tweeter(post_link, post_op, username, newestTweet)
else:
print('[WARN] No authentication info for alternate account in config.ini, skipping alt tweet.')
print('[ OK ] Sleeping for', DELAY_BETWEEN_TWEETS, 'seconds')
time.sleep(DELAY_BETWEEN_TWEETS)
except BaseException as e:
print ('[EROR] Error while posting tweet:', str(e))
# Log the post anyways
log_post(post_id, hash, 'Error while posting tweet: ' + str(e))
else:
print ('[ OK ] Skipping', post_id, 'because it is a repost or Memebot previously failed to post it')
log_post(post_id, hash, 'Post was already tweeted or was identified as a repost')
# Cleanup media file
try:
os.remove(file_path)
print ('[ OK ] Deleted media file at ' + file_path)
except BaseException as e:
print ('[EROR] Error while deleting media file:', str(e))
else:
print ('[ OK ] Ignoring', post_id, 'because there was not a media file downloaded')
else:
print ('[ OK ] Ignoring', post_id, 'because it was already posted')
if __name__ == '__main__':
# Check for updates
try:
with urllib.request.urlopen("https://raw.githubusercontent.com/corbindavenport/memebot/update-check/current-version.txt") as url:
s = url.read()
new_version = s.decode("utf-8").rstrip()
current_version = 3.0 # Current version of script
if (current_version < float(new_version)):
print('IMPORTANT: A new version of Memebot (' + str(new_version) + ') is available! (you have ' + str(current_version) + ')')
print ('IMPORTANT: Get the latest update from here: https://github.com/corbindavenport/memebot/releases')
else:
print('[ OK ] You have the latest version of Memebot (' + str(current_version) + ')')
except BaseException as e:
print ('[EROR] Error while checking for updates:', str(e))
# Make sure config file exists
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
except BaseException as e:
print ('[EROR] Error while reading config file:', str(e))
sys.exit()
# Create variables from config file
CACHE_CSV = config['BotSettings']['CacheFile']
IMAGE_DIR = config['BotSettings']['MediaFolder']
DELAY_BETWEEN_TWEETS = int(config['BotSettings']['DelayBetweenTweets'])
POST_LIMIT = int(config['BotSettings']['PostLimit'])
SUBREDDIT_TO_MONITOR = config['BotSettings']['SubredditToMonitor']
NSFW_POSTS_ALLOWED = bool(distutils.util.strtobool(config['BotSettings']['NSFWPostsAllowed']))
REPOST_PROTECTION = bool(distutils.util.strtobool(config['RepostSettings']['RepostProtection']))
REPOST_LIMIT = int(config['RepostSettings']['RepostLimit'])
ACCESS_TOKEN = config['PrimaryTwitterKeys']['AccessToken']
ACCESS_TOKEN_secret = config['PrimaryTwitterKeys']['AccessTokenSecret']
CONSUMER_KEY = config['PrimaryTwitterKeys']['ConsumerKey']
CONSUMER_SECRET = config['PrimaryTwitterKeys']['ConsumerSecret']
ALT_ACCESS_TOKEN = config['AltTwitterKeys']['AccessToken']
ALT_ACCESS_TOKEN_SECRET = config['AltTwitterKeys']['AccessTokenSecret']
ALT_CONSUMER_KEY = config['AltTwitterKeys']['ConsumerKey']
ALT_CONSUMER_SECRET = config['AltTwitterKeys']['ConsumerSecret']
REDDIT_AGENT = config['Reddit']['Agent']
REDDIT_CLIENT_SECRET = config['Reddit']['ClientSecret']
IMGUR_CLIENT = config['Imgur']['ClientID']
IMGUR_CLIENT_SECRET = config['Imgur']['ClientSecret']
# Set the command line window title on Windows
if os.name == 'nt':
try:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_secret)
api = tweepy.API(auth)
username = api.me().screen_name
title = '@' + username + ' - Memebot'
except:
title = 'Memebot'
os.system('title ' + title)
# Run the main script
while True:
main()
print('[ OK ] Sleeping for', DELAY_BETWEEN_TWEETS, 'seconds')
time.sleep(DELAY_BETWEEN_TWEETS)
print('[ OK ] Restarting main()...')<|fim▁end|> | def strip_title(title):
# Shortlink is 22 characters long, plus one character for a space
if len(title) < 280:
return title |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Frederic Branczyk [email protected]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# <|fim▁hole|># THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### END LICENSE
from indicator import Indicator
from gi.repository import Gtk
def main():
xf_indicator = Indicator()
Gtk.main()<|fim▁end|> | # The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# |
<|file_name|>app.component.ts<|end_file_name|><|fim▁begin|>import { Declaration } from '@angular/compiler/src/i18n/serializers/xml_helper';
import { Component } from '@angular/core';
import {Routes, RouterModule} from '@angular/router';
<|fim▁hole|>})
export class AppComponent {
title = 'app works!';
}<|fim▁end|> | @Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css'], |
<|file_name|>findsong.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env ../jazzshell
"""
Perform song identification by loading up a corpus of harmonic analyses
and comparing parse results to all of them, according to some distance metric.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
import sys
from optparse import OptionParser
from jazzparser.data.parsing import ParseResults
from jazzparser.parsers.cky.parser import DirectedCkyParser
from jazzparser.utils.options import options_help_text, ModuleOption
from jazzparser.data.tonalspace import TonalSpaceAnalysisSet
from jazzparser.formalisms.music_halfspan import Formalism
from jazzparser.utils.tableprint import pprint_table<|fim▁hole|> parser = OptionParser(usage=usage)
parser.add_option("--popt", "--parser-options", dest="popts", action="append", help="specify options for the parser that interprets the gold standard annotations. Type '--popt help' to get a list of options (we use a DirectedCkyParser)")
parser.add_option("-m", "--metric", dest="metric", action="store", help="semantics distance metric to use. Use '-m help' for a list of available metrics")
parser.add_option("--mopt", "--metric-options", dest="mopts", action="append", help="options to pass to the semantics metric. Use with '--mopt help' with -m to see available options")
parser.add_option("-r", "--print-results", dest="print_results", action="store", default=5, type="int", help="number of top search results to print for each query (parse result). Default: 5. Use -1 to print distances from all songs in the corpus")
parser.add_option("-g", "--gold-only", dest="gold_only", action="store_true", help="skip results that have no gold standard sequence associated with them (we can't tell which is the right answer for these)")
parser.add_option("--mc", "--metric-computation", dest="metric_computation", action="store_true", help="output the computation information for the metric between the parse result and each top search result")
options, arguments = parser.parse_args()
# For now, we always use the music_halfspan formalism with this script
# If we wanted to make it generic, we'd just load the formalism according
# to a command-line option
formalism = Formalism
# Process parser options
if options.popts is not None:
poptstr = options.popts
if "help" in [s.strip().lower() for s in poptstr]:
# Output this parser's option help
print options_help_text(DirectedCkyParser.PARSER_OPTIONS, intro="Available options for gold standard interpreter")
sys.exit(0)
poptstr = ":".join(poptstr)
else:
poptstr = ""
popts = ModuleOption.process_option_string(poptstr)
# Check that the options are valid
try:
DirectedCkyParser.check_options(popts)
except ModuleOptionError, err:
logger.error("Problem with parser options (--popt): %s" % err)
sys.exit(1)
# Get a distance metric
# Just check this, as it'll cause problems
if len(formalism.semantics_distance_metrics) == 0:
print "ERROR: the formalism defines no distance metrics, so this "\
"script won't work"
sys.exit(1)
# First get the metric
if options.metric == "help":
# Print out a list of metrics available
print "Available distance metrics:"
print ", ".join([metric.name for metric in \
formalism.semantics_distance_metrics])
sys.exit(0)
if options.metric is None:
# Use the first in the list as default
metric_cls = formalism.semantics_distance_metrics[0]
else:
for m in formalism.semantics_distance_metrics:
if m.name == options.metric:
metric_cls = m
break
else:
# No metric found matching this name
print "No metric '%s'" % options.metric
sys.exit(1)
print >>sys.stderr, "Using distance metric: %s" % metric_cls.name
# Now process the metric options
if options.mopts is not None:
moptstr = options.mopts
if "help" in [s.strip().lower() for s in moptstr]:
# Output this parser's option help
print options_help_text(metric_cls.OPTIONS, intro="Available options for metric '%s'" % metric_cls.name)
sys.exit(0)
moptstr = ":".join(moptstr)
else:
moptstr = ""
mopts = ModuleOption.process_option_string(moptstr)
# Instantiate the metric with these options
metric = metric_cls(options=mopts)
if len(arguments) < 2:
print >>sys.stderr, "Specify a song corpus name and one or more files to read results from"
sys.exit(1)
# First argument is an TonalSpaceAnalysisSet
corpus_name = arguments[0]
# Load the corpus file
corpus = TonalSpaceAnalysisSet.load(corpus_name)
# The rest of the args are result files to analyze
res_files = arguments[1:]
# Work out how many results to print out
if options.print_results == -1:
print_up_to = None
else:
print_up_to = options.print_results
ranks = []
num_ranked = 0
for filename in res_files:
# Load the parse results
pres = ParseResults.from_file(filename)
if options.gold_only and pres.gold_sequence is None:
# Skip this sequence altogether if requested
continue
print "######################"
print "Read %s" % filename
# Try to get a correct answer from the PR file
if pres.gold_sequence is None:
print "No correct answer specified in input file"
correct_song = None
else:
# Process the name of the sequence in the same way that
# TonalSpaceAnalysisSet does
# Ideally, they should make a common function call, but let's be
# bad for once
correct_song = pres.gold_sequence.string_name.lower()
print "Correct answer: %s" % correct_song
# Could have an empty result list: skip if it does
if len(pres.semantics) == 0:
print "No results"
# Failed to get any result: if this is one of the sequences that
# is in the corpus, count it as a 0 result. Otherwise, skip:
# we wouldn't have counted it anyway
num_ranked += 1
ranks.append(None)
continue
result = pres.semantics[0][1]
# Compare to each of the songs
distances = []
for name,songsem in corpus:
# Get the distance from this song
dist = metric.distance(result, songsem)
distances.append((name,dist,songsem))
# Sort them to get the closest first
distances.sort(key=lambda x:x[1])
print
# Print out the top results, as many as requested
top_results = distances[:print_up_to]
table = [["","Song","Distance"]] + [
["*" if res[0] == correct_song else "",
"%s" % res[0],
"%.2f" % res[1]] for res in top_results]
pprint_table(sys.stdout, table, default_just=True)
print
if correct_song is not None:
# Look for the correct answer in the results
for rank,(name,distance,__) in enumerate(distances):
# Match up the song name to the correct one
if name == correct_song:
correct_rank = rank
break
else:
# The song name was not found in the corpus at all
correct_rank = None
if correct_rank is None:
print "Song was not found in corpus"
else:
print "Correct answer got rank %d" % correct_rank
# Record the ranks so we can compute the MRR
ranks.append(correct_rank+1)
num_ranked += 1
print
if options.metric_computation:
print "Explanation of top result:"
print metric.print_computation(result, distances[0][2])
print
if num_ranked:
print "\nGot ranks for %d sequences" % num_ranked
# Compute the mean reciprocal rank, the reciprocal of the harmonic mean
# of the ranks of the correct answers
mrr = sum([0.0 if rank is None else 1.0/rank for rank in ranks], 0.0) \
/ len(ranks)
print "Mean reciprocal rank: %f" % mrr
if mrr > 0.0:
hmr = 1.0/mrr
print "Harmonic mean rank: %f" % hmr
succ_ranks = [rank for rank in ranks if rank is not None]
print "\nIncluding only successful parses (%d):" % len(succ_ranks)
mrr_succ = sum([1.0/rank for rank in succ_ranks], 0.0) / len(succ_ranks)
print "Mean reciprocal rank: %f" % mrr_succ
if mrr_succ > 0.0:
hmr_succ = 1.0/mrr_succ
print "Harmonic mean rank: %f" % hmr_succ
else:
print "\nNo results to analyze"
if __name__ == "__main__":
main()<|fim▁end|> |
def main():
usage = "%prog [options] <song-set> <results-file0> [<results-file1> ...]" |
<|file_name|>all_tests.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at<|fim▁hole|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
# Tests for v2 features.
import atom_tests.core_test
import atom_tests.data_test
import atom_tests.http_core_test
import atom_tests.auth_test
import atom_tests.mock_http_core_test
import atom_tests.client_test
import gdata_tests.client_test
import gdata_tests.core_test
import gdata_tests.data_test
import gdata_tests.data_smoke_test
import gdata_tests.client_smoke_test
import gdata_tests.live_client_test
import gdata_tests.gauth_test
import gdata_tests.blogger.data_test
import gdata_tests.blogger.live_client_test
import gdata_tests.maps.data_test
import gdata_tests.maps.live_client_test
import gdata_tests.spreadsheets.data_test
import gdata_tests.spreadsheets.live_client_test
import gdata_tests.projecthosting.data_test
import gdata_tests.projecthosting.live_client_test
import gdata_tests.sites.data_test
import gdata_tests.sites.live_client_test
import gdata_tests.analytics.data_test
import gdata_tests.analytics.live_client_test
import gdata_tests.contacts.live_client_test
import gdata_tests.calendar_resource.live_client_test
import gdata_tests.calendar_resource.data_test
import gdata_tests.apps.emailsettings.data_test
import gdata_tests.apps.emailsettings.live_client_test
def suite():
return unittest.TestSuite((
atom_tests.core_test.suite(),
atom_tests.data_test.suite(),
atom_tests.http_core_test.suite(),
atom_tests.auth_test.suite(),
atom_tests.mock_http_core_test.suite(),
atom_tests.client_test.suite(),
gdata_tests.client_test.suite(),
gdata_tests.core_test.suite(),
gdata_tests.data_test.suite(),
gdata_tests.data_smoke_test.suite(),
gdata_tests.client_smoke_test.suite(),
gdata_tests.live_client_test.suite(),
gdata_tests.gauth_test.suite(),
gdata_tests.blogger.data_test.suite(),
gdata_tests.blogger.live_client_test.suite(),
gdata_tests.maps.data_test.suite(),
gdata_tests.maps.live_client_test.suite(),
gdata_tests.spreadsheets.data_test.suite(),
gdata_tests.spreadsheets.live_client_test.suite(),
gdata_tests.projecthosting.data_test.suite(),
gdata_tests.projecthosting.live_client_test.suite(),
gdata_tests.sites.data_test.suite(),
gdata_tests.sites.live_client_test.suite(),
gdata_tests.analytics.data_test.suite(),
gdata_tests.analytics.live_client_test.suite(),
gdata_tests.contacts.live_client_test.suite(),
gdata_tests.calendar_resource.live_client_test.suite(),
gdata_tests.calendar_resource.data_test.suite(),
gdata_tests.apps.emailsettings.data_test.suite(),
gdata_tests.apps.emailsettings.live_client_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())<|fim▁end|> | #
# http://www.apache.org/licenses/LICENSE-2.0
# |
<|file_name|>GSnippetMetadatum.js<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CLASS({
name: 'GSnippetMetadatum',
package: 'foam.navigator.views',
documentation: function() {/*
Specify $$DOC{.view} as a view object or $$DOC{.text} (and optionally
$$DOC{.url}).
*/},
properties: [
{
name: 'text',
type: 'String'
},
{
name: 'url',
type: 'String'
},
{
name: 'view',
defaultValue: ''<|fim▁hole|> defaultValue: ''
}
]
});<|fim▁end|> | },
{
name: 'label', |
<|file_name|>AdminToolItem.java<|end_file_name|><|fim▁begin|>package net.geforcemods.securitycraft.items;<|fim▁hole|>import net.geforcemods.securitycraft.ConfigHandler;
import net.geforcemods.securitycraft.SCContent;
import net.geforcemods.securitycraft.api.IModuleInventory;
import net.geforcemods.securitycraft.api.IOwnable;
import net.geforcemods.securitycraft.api.IPasswordProtected;
import net.geforcemods.securitycraft.blockentities.SecretSignBlockEntity;
import net.geforcemods.securitycraft.misc.ModuleType;
import net.geforcemods.securitycraft.util.PlayerUtils;
import net.geforcemods.securitycraft.util.Utils;
import net.minecraft.ChatFormatting;
import net.minecraft.core.BlockPos;
import net.minecraft.network.chat.FormattedText;
import net.minecraft.network.chat.MutableComponent;
import net.minecraft.network.chat.TextComponent;
import net.minecraft.network.chat.TranslatableComponent;
import net.minecraft.world.InteractionHand;
import net.minecraft.world.InteractionResult;
import net.minecraft.world.InteractionResultHolder;
import net.minecraft.world.entity.player.Player;
import net.minecraft.world.item.Item;
import net.minecraft.world.item.ItemStack;
import net.minecraft.world.item.context.UseOnContext;
import net.minecraft.world.level.Level;
import net.minecraft.world.level.block.entity.BlockEntity;
public class AdminToolItem extends Item {
public AdminToolItem(Item.Properties properties) {
super(properties);
}
@Override
public InteractionResult onItemUseFirst(ItemStack stack, UseOnContext ctx) {
Level level = ctx.getLevel();
BlockPos pos = ctx.getClickedPos();
Player player = ctx.getPlayer();
MutableComponent adminToolName = Utils.localize(getDescriptionId());
if (ConfigHandler.SERVER.allowAdminTool.get()) {
if (!player.isCreative()) {
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.needCreative"), ChatFormatting.DARK_PURPLE);
return InteractionResult.FAIL;
}
InteractionResult briefcaseResult = handleBriefcase(player, ctx.getHand()).getResult();
if (briefcaseResult != InteractionResult.PASS)
return briefcaseResult;
BlockEntity be = level.getBlockEntity(pos);
if (be != null) {
boolean hasInfo = false;
if (be instanceof IOwnable ownable) {
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.owner.name", (ownable.getOwner().getName() == null ? "????" : ownable.getOwner().getName())), ChatFormatting.DARK_PURPLE);
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.owner.uuid", (ownable.getOwner().getUUID() == null ? "????" : ownable.getOwner().getUUID())), ChatFormatting.DARK_PURPLE);
hasInfo = true;
}
if (be instanceof IPasswordProtected passwordProtected) {
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.password", (passwordProtected.getPassword() == null ? "????" : passwordProtected.getPassword())), ChatFormatting.DARK_PURPLE);
hasInfo = true;
}
if (be instanceof IModuleInventory inv) {
List<ModuleType> modules = inv.getInsertedModules();
if (!modules.isEmpty()) {
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.equippedModules"), ChatFormatting.DARK_PURPLE);
for (ModuleType module : modules) {
PlayerUtils.sendMessageToPlayer(player, adminToolName, new TextComponent("- ").append(new TranslatableComponent(module.getTranslationKey())), ChatFormatting.DARK_PURPLE);
}
hasInfo = true;
}
}
if (be instanceof SecretSignBlockEntity signTe) {
PlayerUtils.sendMessageToPlayer(player, adminToolName, new TextComponent(""), ChatFormatting.DARK_PURPLE); //EMPTY
for (int i = 0; i < 4; i++) {
FormattedText text = signTe.getMessage(i, false);
if (text instanceof MutableComponent mutableComponent)
PlayerUtils.sendMessageToPlayer(player, adminToolName, mutableComponent, ChatFormatting.DARK_PURPLE);
}
hasInfo = true;
}
if (!hasInfo)
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.noInfo"), ChatFormatting.DARK_PURPLE);
return InteractionResult.SUCCESS;
}
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.noInfo"), ChatFormatting.DARK_PURPLE);
}
else
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.disabled"), ChatFormatting.DARK_PURPLE);
return InteractionResult.FAIL;
}
@Override
public InteractionResultHolder<ItemStack> use(Level level, Player player, InteractionHand hand) {
if (!player.isCreative()) {
PlayerUtils.sendMessageToPlayer(player, Utils.localize(getDescriptionId()), Utils.localize("messages.securitycraft:adminTool.needCreative"), ChatFormatting.DARK_PURPLE);
return InteractionResultHolder.fail(player.getItemInHand(hand));
}
else
return handleBriefcase(player, hand);
}
private InteractionResultHolder<ItemStack> handleBriefcase(Player player, InteractionHand hand) {
ItemStack adminTool = player.getItemInHand(hand);
if (hand == InteractionHand.MAIN_HAND && player.getOffhandItem().getItem() == SCContent.BRIEFCASE.get()) {
ItemStack briefcase = player.getOffhandItem();
MutableComponent adminToolName = Utils.localize(getDescriptionId());
String ownerName = BriefcaseItem.getOwnerName(briefcase);
String ownerUUID = BriefcaseItem.getOwnerUUID(briefcase);
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.owner.name", ownerName.isEmpty() ? "????" : ownerName), ChatFormatting.DARK_PURPLE);
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.owner.uuid", ownerUUID.isEmpty() ? "????" : ownerUUID), ChatFormatting.DARK_PURPLE);
PlayerUtils.sendMessageToPlayer(player, adminToolName, Utils.localize("messages.securitycraft:adminTool.password", briefcase.hasTag() ? briefcase.getTag().getString("passcode") : "????"), ChatFormatting.DARK_PURPLE);
return InteractionResultHolder.success(adminTool);
}
return InteractionResultHolder.pass(adminTool);
}
}<|fim▁end|> |
import java.util.List;
|
<|file_name|>std_dirs.rs<|end_file_name|><|fim▁begin|>// Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use crate::access_container::{self, AUTHENTICATOR_ENTRY};
use crate::client::AuthClient;
use crate::config::KEY_APPS;
use crate::{AuthError, AuthFuture};
use bincode::serialize;
use futures::{future, Future};
use safe_core::ipc::access_container_enc_key;
use safe_core::mdata_info;
use safe_core::nfs::create_dir;
use safe_core::utils::symmetric_encrypt;
use safe_core::{Client, CoreError, FutureExt, MDataInfo, DIR_TAG};
use safe_nd::{Error as SndError, MDataKind, MDataSeqValue};
use std::collections::HashMap;
/// Default directories to be created at registration.
pub static DEFAULT_PRIVATE_DIRS: [&str; 6] = [
"_documents",
"_downloads",
"_music",
"_pictures",
"_videos",
"_publicNames",
];
/// Publicly accessible default directories to be created upon registration.
pub static DEFAULT_PUBLIC_DIRS: [&str; 1] = ["_public"];
/// Create the root directories and the standard directories for the access container.
pub fn create(client: &AuthClient) -> Box<AuthFuture<()>> {
let c2 = client.clone();
let c3 = client.clone();
let c4 = client.clone();
// Initialise standard directories
let access_container = client.access_container();
let config_dir = client.config_root_dir();
// Try to get default dirs from the access container
let access_cont_fut = access_container::fetch_authenticator_entry(&c2)
.then(move |res| {
match res {
Ok((_, default_containers)) => {
// Make sure that all default dirs have been created
create_std_dirs(&c3, &default_containers)
}
Err(AuthError::CoreError(CoreError::DataError(SndError::NoSuchData))) => {
// Access container hasn't been created yet
let access_cont_value = fry!(random_std_dirs())
.into_iter()
.map(|(name, md_info)| (String::from(name), md_info))
.collect();
let std_dirs_fut = create_std_dirs(&c3, &access_cont_value);
let access_cont_fut =
create_access_container(&c3, &access_container, &access_cont_value);
future::join_all(vec![std_dirs_fut, access_cont_fut])
.map(|_| ())
.into_box()
}
Err(e) => err!(e),
}
})
.into_box();
future::join_all(vec![access_cont_fut, create_config_dir(&c2, &config_dir)])
.map_err(From::from)
.and_then(move |_| {
// Update account packet - root directories have been created successfully
// (so we don't have to recover them after login).
c4.set_std_dirs_created(true);
c4.update_account_packet().map_err(From::from).into_box()
})
.into_box()
}
fn create_config_dir(client: &AuthClient, config_dir: &MDataInfo) -> Box<AuthFuture<()>> {
let config_dir_entries =
btree_map![KEY_APPS.to_vec() => MDataSeqValue { data: Vec::new(), version: 0 }];
let config_dir_entries = fry!(mdata_info::encrypt_entries(config_dir, &config_dir_entries));
create_dir(client, config_dir, config_dir_entries, btree_map![])
.map_err(From::from)
.into_box()
}
fn create_access_container(
client: &AuthClient,
access_container: &MDataInfo,
default_entries: &HashMap<String, MDataInfo>,
) -> Box<AuthFuture<()>> {
let enc_key = client.secret_symmetric_key();
// Create access container
let authenticator_key = fry!(access_container_enc_key(
AUTHENTICATOR_ENTRY,
&enc_key,
fry!(access_container.nonce().ok_or_else(|| AuthError::from(
"Expected to have nonce on access container MDataInfo"
))),
)
.map_err(AuthError::from));
let access_cont_value = fry!(symmetric_encrypt(
&fry!(serialize(default_entries)),
&enc_key,
None,
));
create_dir(
client,
access_container,
btree_map![
authenticator_key => MDataSeqValue { version: 0, data: access_cont_value }
],
btree_map![],
)
.map_err(From::from)
.into_box()
}
/// Generates a list of `MDataInfo` for standard dirs.
/// Returns a collection of standard dirs along with respective `MDataInfo`s.
/// Doesn't actually put data onto the network.
pub fn random_std_dirs() -> Result<Vec<(&'static str, MDataInfo)>, CoreError> {
let pub_dirs = DEFAULT_PUBLIC_DIRS
.iter()
.map(|name| MDataInfo::random_public(MDataKind::Seq, DIR_TAG).map(|dir| (*name, dir)));
let priv_dirs = DEFAULT_PRIVATE_DIRS
.iter()
.map(|name| MDataInfo::random_private(MDataKind::Seq, DIR_TAG).map(|dir| (*name, dir)));<|fim▁hole|>
/// A registration helper function to create the set of default dirs in the users root directory.
pub fn create_std_dirs(
client: &AuthClient,
md_infos: &HashMap<String, MDataInfo>,
) -> Box<AuthFuture<()>> {
let client = client.clone();
let creations: Vec<_> = md_infos
.iter()
.map(|(_, md_info)| {
create_dir(&client, md_info, btree_map![], btree_map![]).map_err(AuthError::from)
})
.collect();
future::join_all(creations).map(|_| ()).into_box()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::run;
use crate::test_utils::create_account_and_login;
use futures::Future;
// Test creation of default dirs.
#[test]
fn creates_default_dirs() {
let auth = create_account_and_login();
unwrap!(run(&auth, |client| {
let client = client.clone();
create_std_dirs(
&client,
&unwrap!(random_std_dirs())
.into_iter()
.map(|(k, v)| (k.to_owned(), v))
.collect(),
)
.then(move |res| {
assert!(res.is_ok());
access_container::fetch_authenticator_entry(&client)
})
.then(move |res| {
let (_, mdata_entries) = unwrap!(res);
assert_eq!(
mdata_entries.len(),
DEFAULT_PUBLIC_DIRS.len() + DEFAULT_PRIVATE_DIRS.len()
);
for key in DEFAULT_PUBLIC_DIRS
.iter()
.chain(DEFAULT_PRIVATE_DIRS.iter())
{
// let's check whether all our entries have been created properly
assert!(mdata_entries.contains_key(*key));
}
Ok(())
})
}));
}
}<|fim▁end|> | priv_dirs.chain(pub_dirs).collect()
} |
<|file_name|>primitive.rs<|end_file_name|><|fim▁begin|>use value::{Value};
// Primitive views are how Eve programs access built-in functions
#[derive(Clone, Debug, Copy)]
pub enum Primitive {
LT,
LTE,
NEQ,
Add,
Subtract,
Multiply,
Divide,
Remainder,
Round,
Split,
Concat,
AsNumber,
AsText,
Count,
Contains,
Sum,
Mean,
StandardDeviation,
}
impl Primitive {
pub fn eval<'a>(&self, input_bindings: &[(usize, usize)], inputs: &[Value], source: &str, errors: &mut Vec<Vec<Value>>) -> Vec<Vec<Value>> {
use primitive::Primitive::*;
use value::Value::*;
let values = input_bindings.iter().enumerate().map(|(ix, &(field_ix, variable_ix))| {
assert_eq!(ix, field_ix);
&inputs[variable_ix]
}).collect::<Vec<_>>();
let mut type_error = || {
errors.push(vec![
String(source.to_owned()),
string!("Type error while calling: {:?} {:?}", self, &values)
]);
vec![]
};
match (*self, &values[..]) {
// NOTE be aware that arguments will be in alphabetical order by field id
(LT, [ref a, ref b]) => if a < b {vec![vec![]]} else {vec![]},
(LTE, [ref a, ref b]) => if a <= b {vec![vec![]]} else {vec![]},
(NEQ, [ref a, ref b]) => if a != b {vec![vec![]]} else {vec![]},
(Add, [ref a, ref b]) => {
match (a.parse_as_f64(), b.parse_as_f64()) {
(Some(a), Some(b)) => vec![vec![Float(a+b)]],
_ => type_error(),
}
}
(Subtract, [ref a, ref b]) => {
match (a.parse_as_f64(), b.parse_as_f64()) {
(Some(a), Some(b)) => vec![vec![Float(a-b)]],
_ => type_error(),
}
}
(Multiply, [ref a, ref b]) => {
match (a.parse_as_f64(), b.parse_as_f64()) {
(Some(a), Some(b)) => vec![vec![Float(a*b)]],
_ => type_error(),
}
}
(Divide, [ref a, ref b]) => {
match (a.parse_as_f64(), b.parse_as_f64()) {
(Some(_), Some(0f64)) => type_error(),
(Some(a), Some(b)) => vec![vec![Float(a/b)]],
_ => type_error(),
}
}
(Remainder, [ref a, ref b]) => {
match (a.parse_as_f64(), b.parse_as_f64()) {
(Some(a), Some(b)) => vec![vec![Float(a%b)]],
_ => type_error(),
}
}
(Round, [ref a, ref b]) => {
match (a.parse_as_f64(), b.parse_as_f64()) {
(Some(a), Some(b)) => vec![vec![Float((a*10f64.powf(b)).round()/10f64.powf(b))]],
_ => type_error(),
}
}
(Contains, [ref inner, ref outer]) => {
let inner_lower = format!("{}", inner).to_lowercase();
let outer_lower = format!("{}", outer).to_lowercase();
vec![vec![Bool(outer_lower.contains(&inner_lower))]]
},
(Split, [ref delimiter, ref text]) => {
format!("{}", text).split(&format!("{}", delimiter)).enumerate().map(|(ix, segment)|
vec![Float((ix + 1) as f64), String(segment.to_owned())]
).collect()
},
(Concat, [ref a, ref b]) => vec![vec![string!("{}{}", a, b)]],
(AsNumber, [ref a]) => {
match a.parse_as_f64() {
Some(a) => vec![vec![Float(a)]],
None => type_error(),
}
}
(AsText, [ref a]) => vec![vec![string!("{}", a)]],
(Count, [&Column(ref column)]) => vec![vec![Float(column.len() as f64)]],
(Sum, [ref a]) => {
match a.parse_as_f64_vec() {
Some(a) => {
if a.len() == 0 {
vec![vec![Float(0f64)]]
} else {
let sum = a.iter().fold(0f64, |acc, value| { acc + value });
vec![vec![Float(sum)]]
}
}
None => type_error(),
}
}
(Mean, [ref a]) => {
match a.parse_as_f64_vec() {
Some(a) => {
if a.len() == 0 {
vec![vec![Float(0f64)]]
} else {
let sum = a.iter().fold(0f64, |acc, value| { acc + value });
let mean = sum / (a.len() as f64);
vec![vec![Float(mean)]]
}
}
None => type_error(),
}
}
(StandardDeviation, [ref a]) => {
match a.parse_as_f64_vec() {
Some(a) => {
if a.len() == 0 {
vec![vec![Float(0f64)]]
} else {
let sum = a.iter().fold(0f64, |acc, value| { acc + value });
let mean = sum / (a.len() as f64);
let sum_squares = a.iter().fold(0f64, |acc, value| { acc + value.powi(2) });
let standard_deviation = ((sum_squares / (a.len() as f64)) - mean.powi(2)).sqrt();
vec![vec![Float(standard_deviation)]]
}
}
None => type_error(),
}
}
_ => type_error(),
}
}
pub fn from_str(string: &str) -> Self {
match string {
"<" => Primitive::LT,
"<=" => Primitive::LTE,
"!=" => Primitive::NEQ,
"+" => Primitive::Add,
"-" => Primitive::Subtract,
"*" => Primitive::Multiply,
"/" => Primitive::Divide,
"remainder" => Primitive::Remainder,
"round" => Primitive::Round,
"contains" => Primitive::Contains,
"split" => Primitive::Split,
"concat" => Primitive::Concat,
"as number" => Primitive::AsNumber,
"as text" => Primitive::AsText,
"count" => Primitive::Count,
"sum" => Primitive::Sum,
"mean" => Primitive::Mean,
"standard deviation" => Primitive::StandardDeviation,
_ => panic!("Unknown primitive: {:?}", string),
}
}
}
// List of (view_id, scalar_input_field_ids, vector_input_field_ids, output_field_ids, description)
pub fn primitives() -> Vec<(&'static str, Vec<&'static str>, Vec<&'static str>, Vec<&'static str>, &'static str)> {
vec![
("<", vec!["A", "B"], vec![], vec![], "Is A less than B?"),
("<=", vec!["A", "B"], vec![], vec![], "Is A less than or equal to B?"),
("!=", vec!["A", "B"], vec![], vec![], "Is A not equal to B?"),
("+", vec!["A", "B"], vec![], vec!["result"], "A plus B."),
("-", vec!["A", "B"], vec![], vec!["result"], "A minus B."),
("*", vec!["A", "B"], vec![], vec!["result"], "A times B."),
("/", vec!["A", "B"], vec![], vec!["result"], "A divided by B."),
("remainder", vec!["A", "B"], vec![], vec!["result"], "The remainder of A after dividing by B."),
("round", vec!["A", "B"], vec![], vec!["result"], "Round A to B decimal places."),
("contains", vec!["inner", "outer"], vec![], vec!["result"], "Does the outer text contain the inner text?"),
("split", vec!["delimiter", "text"], vec![], vec!["ix", "segment"], "Split the text into a new segment at each occurence of the delimiter."),
("concat", vec!["A", "B"], vec![], vec!["result"], "Join the texts A and B together."),
("as number", vec!["A"], vec![], vec!["result"], "Store A internally as a number."),
("as text", vec!["A"], vec![], vec!["result"], "Store A internally as text."),
("count", vec![], vec!["A"], vec!["result"], "Count the number of elements in A."),
("sum", vec![], vec!["A"], vec!["result"], "Sum together the elements of A."),
("mean", vec![], vec!["A"], vec!["result"], "Take the mean of the elements of A."),
("standard deviation", vec![], vec!["A"], vec!["result"], "Take the standard deviation of the elements of A."),
]<|fim▁hole|><|fim▁end|> | } |
<|file_name|>getblocktemplate_proposals.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core Developers
// Copyright (c) 2015 Solarminx
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework import SolariTestFramework
from solarirpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:<|fim▁hole|> for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytes(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return blk
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytes(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(SolariTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def run_test(self):
node = self.nodes[0]
node.setgenerate(True, 1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = b'\xff'
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytes(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()<|fim▁end|> | n = []
if len(cur) & 1:
cur.append(cur[-1]) |
<|file_name|>growl.ngfactory.ts<|end_file_name|><|fim▁begin|>/**
* This file is generated by the Angular 2 template compiler.
* Do not edit.
*/
/* tslint:disable */
import * as import0 from '@angular/core/src/linker/ng_module_factory';
import * as import1 from '../../../components/growl/growl';
import * as import2 from '@angular/common/src/common_module';
import * as import3 from '@angular/common/src/localization';
import * as import4 from '@angular/core/src/di/injector';
import * as import5 from '@angular/core/src/i18n/tokens';
import * as import6 from '@angular/core/src/render/api';
import * as import7 from '@angular/core/src/linker/view';
import * as import8 from '@angular/core/src/linker/element';
import * as import9 from '../../../components/dom/domhandler';
import * as import10 from '@angular/core/src/linker/view_utils';
import * as import11 from '@angular/core/src/linker/view_type';
import * as import12 from '@angular/core/src/change_detection/change_detection';
import * as import13 from '@angular/core/src/linker/element_ref';
import * as import14 from '@angular/core/src/change_detection/differs/iterable_differs';
import * as import15 from '@angular/core/src/metadata/view';
import * as import16 from '@angular/core/src/linker/component_factory';
import * as import17 from '@angular/common/src/directives/ng_for';
import * as import18 from '@angular/core/src/linker/template_ref';
import * as import19 from '@angular/core/src/security';
import * as import20 from '@angular/common/src/directives/ng_class';
import * as import21 from '@angular/core/src/change_detection/differs/keyvalue_differs';
class GrowlModuleInjector extends import0.NgModuleInjector<import1.GrowlModule> {
_CommonModule_0:import2.CommonModule;
_GrowlModule_1:import1.GrowlModule;
__LOCALE_ID_2:any;
__NgLocalization_3:import3.NgLocaleLocalization;
__TRANSLATIONS_FORMAT_4:any;
constructor(parent:import4.Injector) {
super(parent,[],[]);
}
get _LOCALE_ID_2():any {
if ((this.__LOCALE_ID_2 == (null as any))) { (this.__LOCALE_ID_2 = (null as any)); }
return this.__LOCALE_ID_2;
}
get _NgLocalization_3():import3.NgLocaleLocalization {
if ((this.__NgLocalization_3 == (null as any))) { (this.__NgLocalization_3 = new import3.NgLocaleLocalization(this._LOCALE_ID_2)); }
return this.__NgLocalization_3;
}
get _TRANSLATIONS_FORMAT_4():any {
if ((this.__TRANSLATIONS_FORMAT_4 == (null as any))) { (this.__TRANSLATIONS_FORMAT_4 = (null as any)); }
return this.__TRANSLATIONS_FORMAT_4;
}
createInternal():import1.GrowlModule {
this._CommonModule_0 = new import2.CommonModule();
this._GrowlModule_1 = new import1.GrowlModule();
return this._GrowlModule_1;
}
getInternal(token:any,notFoundResult:any):any {
if ((token === import2.CommonModule)) { return this._CommonModule_0; }
if ((token === import1.GrowlModule)) { return this._GrowlModule_1; }
if ((token === import5.LOCALE_ID)) { return this._LOCALE_ID_2; }
if ((token === import3.NgLocalization)) { return this._NgLocalization_3; }
if ((token === import5.TRANSLATIONS_FORMAT)) { return this._TRANSLATIONS_FORMAT_4; }
return notFoundResult;
}
destroyInternal():void {
}
}
export const GrowlModuleNgFactory:import0.NgModuleFactory<import1.GrowlModule> = new import0.NgModuleFactory(GrowlModuleInjector,import1.GrowlModule);
var renderType_Growl_Host:import6.RenderComponentType = (null as any);
class _View_Growl_Host0 extends import7.AppView<any> {
_el_0:any;
/*private*/ _appEl_0:import8.AppElement;
_DomHandler_0_4:import9.DomHandler;
_Growl_0_5:import1.Growl;
constructor(viewUtils:import10.ViewUtils,parentInjector:import4.Injector,declarationEl:import8.AppElement) {
super(_View_Growl_Host0,renderType_Growl_Host,import11.ViewType.HOST,viewUtils,parentInjector,declarationEl,import12.ChangeDetectorStatus.CheckAlways);
}
createInternal(rootSelector:string):import8.AppElement {
this._el_0 = this.selectOrCreateHostElement('p-growl',rootSelector,(null as any));
this._appEl_0 = new import8.AppElement(0,(null as any),this,this._el_0);
var compView_0:any = viewFactory_Growl0(this.viewUtils,this.injector(0),this._appEl_0);
this._DomHandler_0_4 = new import9.DomHandler();
this._Growl_0_5 = new import1.Growl(new import13.ElementRef(this._el_0),this._DomHandler_0_4,this.parentInjector.get(import14.IterableDiffers));
this._appEl_0.initComponent(this._Growl_0_5,[],compView_0);
compView_0.create(this._Growl_0_5,this.projectableNodes,(null as any));
this.init([].concat([this._el_0]),[this._el_0],[],[]);
return this._appEl_0;
}
injectorGetInternal(token:any,requestNodeIndex:number,notFoundResult:any):any {
if (((token === import9.DomHandler) && (0 === requestNodeIndex))) { return this._DomHandler_0_4; }
if (((token === import1.Growl) && (0 === requestNodeIndex))) { return this._Growl_0_5; }
return notFoundResult;
}
detectChangesInternal(throwOnChange:boolean):void {
if (!throwOnChange) { this._Growl_0_5.ngDoCheck(); }
this.detectContentChildrenChanges(throwOnChange);
this.detectViewChildrenChanges(throwOnChange);
if (!throwOnChange) { if ((this.numberOfChecks === 0)) { this._Growl_0_5.ngAfterViewInit(); } }
}
destroyInternal():void {
this._Growl_0_5.ngOnDestroy();
}
}
function viewFactory_Growl_Host0(viewUtils:import10.ViewUtils,parentInjector:import4.Injector,declarationEl:import8.AppElement):import7.AppView<any> {
if ((renderType_Growl_Host === (null as any))) { (renderType_Growl_Host = viewUtils.createRenderComponentType('',0,import15.ViewEncapsulation.None,[],{})); }
return new _View_Growl_Host0(viewUtils,parentInjector,declarationEl);
}
export const GrowlNgFactory:import16.ComponentFactory<import1.Growl> = new import16.ComponentFactory<import1.Growl>('p-growl',viewFactory_Growl_Host0,import1.Growl);
const styles_Growl:any[] = [];
var renderType_Growl:import6.RenderComponentType = (null as any);
class _View_Growl0 extends import7.AppView<import1.Growl> {
_text_0:any;
_el_1:any;
_text_2:any;
_anchor_3:any;
/*private*/ _appEl_3:import8.AppElement;
_TemplateRef_3_5:any;
_NgFor_3_6:import17.NgFor;
_text_4:any;
_text_5:any;
/*private*/ _expr_0:any;
/*private*/ _expr_1:any;
constructor(viewUtils:import10.ViewUtils,parentInjector:import4.Injector,declarationEl:import8.AppElement) {
super(_View_Growl0,renderType_Growl,import11.ViewType.COMPONENT,viewUtils,parentInjector,declarationEl,import12.ChangeDetectorStatus.CheckAlways);
}
createInternal(rootSelector:string):import8.AppElement {
const parentRenderNode:any = this.renderer.createViewRoot(this.declarationAppElement.nativeElement);
this._text_0 = this.renderer.createText(parentRenderNode,'\n ',(null as any));
this._el_1 = this.renderer.createElement(parentRenderNode,'div',(null as any));
this.renderer.setElementAttribute(this._el_1,'class','ui-growl ui-widget');
this._text_2 = this.renderer.createText(this._el_1,'\n ',(null as any));
this._anchor_3 = this.renderer.createTemplateAnchor(this._el_1,(null as any));
this._appEl_3 = new import8.AppElement(3,1,this,this._anchor_3);
this._TemplateRef_3_5 = new import18.TemplateRef_(this._appEl_3,viewFactory_Growl1);
this._NgFor_3_6 = new import17.NgFor(this._appEl_3.vcRef,this._TemplateRef_3_5,this.parentInjector.get(import14.IterableDiffers),this.ref);
this._text_4 = this.renderer.createText(this._el_1,'\n ',(null as any));
this._text_5 = this.renderer.createText(parentRenderNode,'\n ',(null as any));
this._expr_0 = import12.UNINITIALIZED;
this._expr_1 = import12.UNINITIALIZED;
this.init([],[
this._text_0,
this._el_1,
this._text_2,
this._anchor_3,
this._text_4,
this._text_5
]
,[],[]);
return (null as any);
}
injectorGetInternal(token:any,requestNodeIndex:number,notFoundResult:any):any {
if (((token === import18.TemplateRef) && (3 === requestNodeIndex))) { return this._TemplateRef_3_5; }
if (((token === import17.NgFor) && (3 === requestNodeIndex))) { return this._NgFor_3_6; }
return notFoundResult;
}
detectChangesInternal(throwOnChange:boolean):void {
var changes:{[key: string]:import12.SimpleChange} = (null as any);
changes = (null as any);
const currVal_1:any = this.context.value;
if (import10.checkBinding(throwOnChange,this._expr_1,currVal_1)) {
this._NgFor_3_6.ngForOf = currVal_1;
if ((changes === (null as any))) { (changes = {}); }
changes['ngForOf'] = new import12.SimpleChange(this._expr_1,currVal_1);
this._expr_1 = currVal_1;
}
if ((changes !== (null as any))) { this._NgFor_3_6.ngOnChanges(changes); }
if (!throwOnChange) { this._NgFor_3_6.ngDoCheck(); }
this.detectContentChildrenChanges(throwOnChange);
const currVal_0:any = this.context.zIndex;
if (import10.checkBinding(throwOnChange,this._expr_0,currVal_0)) {
this.renderer.setElementStyle(this._el_1,'zIndex',((this.viewUtils.sanitizer.sanitize(import19.SecurityContext.STYLE,currVal_0) == (null as any))? (null as any): this.viewUtils.sanitizer.sanitize(import19.SecurityContext.STYLE,currVal_0).toString()));
this._expr_0 = currVal_0;
}
this.detectViewChildrenChanges(throwOnChange);
}
}
export function viewFactory_Growl0(viewUtils:import10.ViewUtils,parentInjector:import4.Injector,declarationEl:import8.AppElement):import7.AppView<import1.Growl> {
if ((renderType_Growl === (null as any))) { (renderType_Growl = viewUtils.createRenderComponentType('c:/Users/eirwn/Documents/primeng-min/components/growl/growl.ts class Growl - inline template',0,import15.ViewEncapsulation.None,styles_Growl,{})); }
return new _View_Growl0(viewUtils,parentInjector,declarationEl);
}
class _View_Growl1 extends import7.AppView<any> {
_el_0:any;
_NgClass_0_3:import20.NgClass;
_text_1:any;
_el_2:any;
_text_3:any;
_el_4:any;
_text_5:any;
_el_6:any;
_NgClass_6_3:import20.NgClass;
_text_7:any;
_el_8:any;
_text_9:any;
_el_10:any;
_text_11:any;
_text_12:any;
_el_13:any;<|fim▁hole|> _text_14:any;
_text_15:any;
_text_16:any;
_el_17:any;
_text_18:any;
_text_19:any;
/*private*/ _expr_0:any;
_map_0:any;
/*private*/ _expr_1:any;
/*private*/ _expr_3:any;
_map_1:any;
/*private*/ _expr_4:any;
/*private*/ _expr_5:any;
/*private*/ _expr_6:any;
constructor(viewUtils:import10.ViewUtils,parentInjector:import4.Injector,declarationEl:import8.AppElement) {
super(_View_Growl1,renderType_Growl,import11.ViewType.EMBEDDED,viewUtils,parentInjector,declarationEl,import12.ChangeDetectorStatus.CheckAlways);
}
createInternal(rootSelector:string):import8.AppElement {
this._el_0 = this.renderer.createElement((null as any),'div',(null as any));
this.renderer.setElementAttribute(this._el_0,'aria-live','polite');
this.renderer.setElementAttribute(this._el_0,'class','ui-growl-item-container ui-state-highlight ui-corner-all ui-shadow');
this._NgClass_0_3 = new import20.NgClass(this.parent.parentInjector.get(import14.IterableDiffers),this.parent.parentInjector.get(import21.KeyValueDiffers),new import13.ElementRef(this._el_0),this.renderer);
this._text_1 = this.renderer.createText(this._el_0,'\n ',(null as any));
this._el_2 = this.renderer.createElement(this._el_0,'div',(null as any));
this.renderer.setElementAttribute(this._el_2,'class','ui-growl-item');
this._text_3 = this.renderer.createText(this._el_2,'\n ',(null as any));
this._el_4 = this.renderer.createElement(this._el_2,'div',(null as any));
this.renderer.setElementAttribute(this._el_4,'class','ui-growl-icon-close fa fa-close');
this._text_5 = this.renderer.createText(this._el_2,'\n ',(null as any));
this._el_6 = this.renderer.createElement(this._el_2,'span',(null as any));
this.renderer.setElementAttribute(this._el_6,'class','ui-growl-image fa fa-2x ui-growl-image-info');
this._NgClass_6_3 = new import20.NgClass(this.parent.parentInjector.get(import14.IterableDiffers),this.parent.parentInjector.get(import21.KeyValueDiffers),new import13.ElementRef(this._el_6),this.renderer);
this._text_7 = this.renderer.createText(this._el_2,'\n ',(null as any));
this._el_8 = this.renderer.createElement(this._el_2,'div',(null as any));
this.renderer.setElementAttribute(this._el_8,'class','ui-growl-message');
this._text_9 = this.renderer.createText(this._el_8,'\n ',(null as any));
this._el_10 = this.renderer.createElement(this._el_8,'span',(null as any));
this.renderer.setElementAttribute(this._el_10,'class','ui-growl-title');
this._text_11 = this.renderer.createText(this._el_10,'',(null as any));
this._text_12 = this.renderer.createText(this._el_8,'\n ',(null as any));
this._el_13 = this.renderer.createElement(this._el_8,'p',(null as any));
this._text_14 = this.renderer.createText(this._el_13,'',(null as any));
this._text_15 = this.renderer.createText(this._el_8,'\n ',(null as any));
this._text_16 = this.renderer.createText(this._el_2,'\n ',(null as any));
this._el_17 = this.renderer.createElement(this._el_2,'div',(null as any));
this.renderer.setElementAttribute(this._el_17,'style','clear: both;');
this._text_18 = this.renderer.createText(this._el_2,'\n ',(null as any));
this._text_19 = this.renderer.createText(this._el_0,'\n ',(null as any));
this._expr_0 = import12.UNINITIALIZED;
this._map_0 = import10.pureProxy4((p0:any,p1:any,p2:any,p3:any):{[key: string]:any} => {
return {
'ui-growl-message-info': p0,
'ui-growl-message-warn': p1,
'ui-growl-message-error': p2,
'ui-growl-message-success': p3
}
;
});
this._expr_1 = import12.UNINITIALIZED;
var disposable_0:Function = this.renderer.listen(this._el_4,'click',this.eventHandler(this._handle_click_4_0.bind(this)));
this._expr_3 = import12.UNINITIALIZED;
this._map_1 = import10.pureProxy4((p0:any,p1:any,p2:any,p3:any):{[key: string]:any} => {
return {
'fa-info-circle': p0,
'fa-warning': p1,
'fa-close': p2,
'fa-check': p3
}
;
});
this._expr_4 = import12.UNINITIALIZED;
this._expr_5 = import12.UNINITIALIZED;
this._expr_6 = import12.UNINITIALIZED;
this.init([].concat([this._el_0]),[
this._el_0,
this._text_1,
this._el_2,
this._text_3,
this._el_4,
this._text_5,
this._el_6,
this._text_7,
this._el_8,
this._text_9,
this._el_10,
this._text_11,
this._text_12,
this._el_13,
this._text_14,
this._text_15,
this._text_16,
this._el_17,
this._text_18,
this._text_19
]
,[disposable_0],[]);
return (null as any);
}
injectorGetInternal(token:any,requestNodeIndex:number,notFoundResult:any):any {
if (((token === import20.NgClass) && (6 === requestNodeIndex))) { return this._NgClass_6_3; }
if (((token === import20.NgClass) && ((0 <= requestNodeIndex) && (requestNodeIndex <= 19)))) { return this._NgClass_0_3; }
return notFoundResult;
}
detectChangesInternal(throwOnChange:boolean):void {
const currVal_0:any = 'ui-growl-item-container ui-state-highlight ui-corner-all ui-shadow';
if (import10.checkBinding(throwOnChange,this._expr_0,currVal_0)) {
this._NgClass_0_3.klass = currVal_0;
this._expr_0 = currVal_0;
}
const currVal_1:any = this._map_0((this.context.$implicit.severity == 'info'),(this.context.$implicit.severity == 'warn'),(this.context.$implicit.severity == 'error'),(this.context.$implicit.severity == 'success'));
if (import10.checkBinding(throwOnChange,this._expr_1,currVal_1)) {
this._NgClass_0_3.ngClass = currVal_1;
this._expr_1 = currVal_1;
}
if (!throwOnChange) { this._NgClass_0_3.ngDoCheck(); }
const currVal_3:any = 'ui-growl-image fa fa-2x ui-growl-image-info';
if (import10.checkBinding(throwOnChange,this._expr_3,currVal_3)) {
this._NgClass_6_3.klass = currVal_3;
this._expr_3 = currVal_3;
}
const currVal_4:any = this._map_1((this.context.$implicit.severity == 'info'),(this.context.$implicit.severity == 'warn'),(this.context.$implicit.severity == 'error'),(this.context.$implicit.severity == 'success'));
if (import10.checkBinding(throwOnChange,this._expr_4,currVal_4)) {
this._NgClass_6_3.ngClass = currVal_4;
this._expr_4 = currVal_4;
}
if (!throwOnChange) { this._NgClass_6_3.ngDoCheck(); }
this.detectContentChildrenChanges(throwOnChange);
const currVal_5:any = import10.interpolate(1,'',this.context.$implicit.summary,'');
if (import10.checkBinding(throwOnChange,this._expr_5,currVal_5)) {
this.renderer.setText(this._text_11,currVal_5);
this._expr_5 = currVal_5;
}
const currVal_6:any = import10.interpolate(1,'',this.context.$implicit.detail,'');
if (import10.checkBinding(throwOnChange,this._expr_6,currVal_6)) {
this.renderer.setText(this._text_14,currVal_6);
this._expr_6 = currVal_6;
}
this.detectViewChildrenChanges(throwOnChange);
}
private _handle_click_4_0($event:any):boolean {
this.markPathToRootAsCheckOnce();
const pd_0:any = ((<any>this.parent.context.remove(this.context.$implicit,this._el_0)) !== false);
return (true && pd_0);
}
}
function viewFactory_Growl1(viewUtils:import10.ViewUtils,parentInjector:import4.Injector,declarationEl:import8.AppElement):import7.AppView<any> {
return new _View_Growl1(viewUtils,parentInjector,declarationEl);
}<|fim▁end|> | |
<|file_name|>2_4_1_a.rs<|end_file_name|><|fim▁begin|>/*
2.4.6:
a) S -> + S S | - S S | a
$> rustc -o parser 2_4_1_a.rs
$> ./parser
*/
static CODE: &'static str = "-+aa-aa";
pub fn sa(mut head: i32) -> i32 {
match CODE.chars().nth(head as usize){
None => {
panic!("missing required element!");
},
Some('a') => {
head += 1;
},
Some('+') | Some('-') => {
head += 1;
head = sa(head);
head = sa(head);
},<|fim▁hole|> _ => { panic!("undefind element!"); }
}
head
}
fn main() {
let head = sa(0);
if head as usize!= CODE.len() {
panic!("parsed {} chars, but totally {} chars", head, CODE.len());
}
}<|fim▁end|> | |
<|file_name|>test_tasks.py<|end_file_name|><|fim▁begin|>from unittest import mock
import os
import pytest
from django.conf import settings
from waffle.testutils import override_switch
from olympia import amo
from olympia.addons.tasks import (
recreate_theme_previews,
update_addon_average_daily_users,
update_addon_hotness,
update_addon_weekly_downloads,
)
from olympia.amo.tests import addon_factory, root_storage
from olympia.versions.models import VersionPreview
@pytest.mark.django_db
def test_recreate_theme_previews():
xpi_path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/mozilla_static_theme.zip'
)
addon_without_previews = addon_factory(type=amo.ADDON_STATICTHEME)
root_storage.copy_stored_file(
xpi_path, addon_without_previews.current_version.file.file_path
)
addon_with_previews = addon_factory(type=amo.ADDON_STATICTHEME)
root_storage.copy_stored_file(
xpi_path, addon_with_previews.current_version.file.file_path
)
VersionPreview.objects.create(
version=addon_with_previews.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]},
)
assert addon_without_previews.current_previews.count() == 0
assert addon_with_previews.current_previews.count() == 1
recreate_theme_previews([addon_without_previews.id, addon_with_previews.id])
assert addon_without_previews.reload().current_previews.count() == 2
assert addon_with_previews.reload().current_previews.count() == 2
sizes = addon_without_previews.current_previews.values_list('sizes', flat=True)
renderings = amo.THEME_PREVIEW_RENDERINGS
assert list(sizes) == [
{
'image': list(renderings['firefox']['full']),
'thumbnail': list(renderings['firefox']['thumbnail']),
'image_format': renderings['firefox']['image_format'],
'thumbnail_format': renderings['firefox']['thumbnail_format'],<|fim▁hole|> 'image_format': renderings['amo']['image_format'],
'thumbnail_format': renderings['amo']['thumbnail_format'],
},
]
PATCH_PATH = 'olympia.addons.tasks'
@pytest.mark.django_db
@mock.patch(f'{PATCH_PATH}.parse_addon')
def test_create_missing_theme_previews(parse_addon_mock):
parse_addon_mock.return_value = {}
theme = addon_factory(type=amo.ADDON_STATICTHEME)
amo_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={
'image': amo.THEME_PREVIEW_RENDERINGS['amo']['full'],
'thumbnail': amo.THEME_PREVIEW_RENDERINGS['amo']['thumbnail'],
'thumbnail_format': amo.THEME_PREVIEW_RENDERINGS['amo']['thumbnail_format'],
'image_format': amo.THEME_PREVIEW_RENDERINGS['amo']['image_format'],
},
)
firefox_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={
'image': amo.THEME_PREVIEW_RENDERINGS['firefox']['full'],
'thumbnail': amo.THEME_PREVIEW_RENDERINGS['firefox']['thumbnail'],
},
)
# add another extra preview size that should be ignored
extra_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]},
)
# addon has all the complete previews already so skip when only_missing=True
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 0
assert resize.call_count == 0
recreate_theme_previews([theme.id], only_missing=False)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# If the add-on is missing a preview, we call generate_static_theme_preview
VersionPreview.objects.get(id=amo_preview.id).delete()
firefox_preview.save()
extra_preview.save()
assert VersionPreview.objects.count() == 2
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# Preview is correct dimensions but wrong format, call generate_static_theme_preview
amo_preview.sizes['image_format'] = 'foo'
amo_preview.save()
firefox_preview.save()
extra_preview.save()
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# But we don't do the full regeneration to just get new thumbnail sizes or formats
amo_preview.sizes['thumbnail'] = [666, 444]
amo_preview.sizes['image_format'] = 'svg'
amo_preview.save()
assert amo_preview.thumbnail_dimensions == [666, 444]
firefox_preview.sizes['thumbnail_format'] = 'gif'
firefox_preview.save()
assert firefox_preview.get_format('thumbnail') == 'gif'
extra_preview.save()
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 0 # not called
assert resize.call_count == 2
amo_preview.reload()
assert amo_preview.thumbnail_dimensions == [720, 92]
firefox_preview.reload()
assert firefox_preview.get_format('thumbnail') == 'png'
assert VersionPreview.objects.count() == 3
@pytest.mark.django_db
def test_update_addon_average_daily_users():
addon = addon_factory(average_daily_users=0)
count = 123
data = [(addon.guid, count)]
assert addon.average_daily_users == 0
update_addon_average_daily_users(data)
addon.refresh_from_db()
assert addon.average_daily_users == count
@pytest.mark.django_db
@override_switch('local-statistics-processing', active=True)
def test_update_deleted_addon_average_daily_users():
addon = addon_factory(average_daily_users=0)
addon.delete()
count = 123
data = [(addon.guid, count)]
assert addon.average_daily_users == 0
update_addon_average_daily_users(data)
addon.refresh_from_db()
assert addon.average_daily_users == count
@pytest.mark.django_db
def test_update_addon_hotness():
addon1 = addon_factory(hotness=0, status=amo.STATUS_APPROVED)
addon2 = addon_factory(hotness=123, status=amo.STATUS_APPROVED)
addon3 = addon_factory(hotness=123, status=amo.STATUS_AWAITING_REVIEW)
averages = {
addon1.guid: {'avg_this_week': 213467, 'avg_three_weeks_before': 123467},
addon2.guid: {
'avg_this_week': 1,
'avg_three_weeks_before': 1,
},
addon3.guid: {'avg_this_week': 213467, 'avg_three_weeks_before': 123467},
}
update_addon_hotness(averages=averages.items())
addon1.refresh_from_db()
addon2.refresh_from_db()
addon3.refresh_from_db()
assert addon1.hotness > 0
# Too low averages so we set the hotness to 0.
assert addon2.hotness == 0
# We shouldn't have processed this add-on.
assert addon3.hotness == 123
def test_update_addon_weekly_downloads():
addon = addon_factory(weekly_downloads=0)
count = 123
data = [(addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
def test_update_addon_weekly_downloads_ignores_deleted_addons():
guid = 'some@guid'
deleted_addon = addon_factory(guid=guid)
deleted_addon.delete()
deleted_addon.update(guid=None)
addon = addon_factory(guid=guid, weekly_downloads=0)
count = 123
data = [(addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
def test_update_addon_weekly_downloads_skips_non_existent_addons():
addon = addon_factory(weekly_downloads=0)
count = 123
invalid_hashed_guid = 'does.not@exist'
data = [(invalid_hashed_guid, 0), (addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count<|fim▁end|> | },
{
'image': list(renderings['amo']['full']),
'thumbnail': list(renderings['amo']['thumbnail']), |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Bootstrap installation of setuptools
from ez_setup import use_setuptools
use_setuptools()
import os
import sys
from fnmatch import fnmatchcase
from distutils.util import convert_path
from propane_distribution import cmdclassdict
from setuptools import setup, find_packages
from engineer import version
PROJECT = 'engineer'
################################################################################
# find_package_data written by Ian Bicking.
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:<|fim▁hole|> for pattern in exclude:
if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
################################################################################
# noinspection PyShadowingBuiltins
def get_install_requirements(requirements_file='requirements.txt'):
requirements = []
with open(requirements_file) as file:
temp = file.readlines()
temp = [i[:-1] for i in temp]
for line in temp:
if line is None or line == '' or line.startswith(('#', '-e', '-r')):
continue
else:
requirements.append(line)
return requirements
# noinspection PyShadowingBuiltins
def get_readme():
with open('README.md') as file:
return file.read()
setup(
name=PROJECT,
version=version.string,
author='Tyler Butler',
author_email='[email protected]',
platforms='any',
packages=find_packages(),
entry_points={
'console_scripts': [
'engineer=engineer.engine:cmdline',
'engineer_dev=engineer.devtools:main [dev]'
],
},
url='http://github.com/tylerbutler/engineer',
license='MIT',
description='A static website generator.',
long_description=get_readme(),
install_requires=get_install_requirements(),
tests_require=get_install_requirements('requirements_tests.txt'),
extras_require={
'dev': ['argh', 'clint']
},
cmdclass=cmdclassdict,
include_package_data=True,
package_data=find_package_data(PROJECT,
package=PROJECT,
only_in_packages=False),
# Setting to False doesn't create an egg - easier to debug and hack on
zip_safe=True,
)<|fim▁end|> | stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False |
<|file_name|>unit.py<|end_file_name|><|fim▁begin|>"""
Header Unit Class
"""
### INCLUDES ###
import logging
from gate.conversions import round_int
from variable import HeaderVariable
from common import MIN_ALARM, MAX_ALARM
### CONSTANTS ###
## Logger ##
LOGGER = logging.getLogger(__name__)
# LOGGER.setLevel(logging.DEBUG)
### CLASSES ###
class HeaderUnit(HeaderVariable):
""" ADC Unit Class"""
def __init__(self, formula, **kwargs):
"""
Initializes header unit, done as part of Header initialization by using provided unit dictionary.
:param formula: formula to calculate this variable. You can use any internal constant names or
internal variable names in this formula that have been declared earlier.
:param measuring_units: Official unit name that will be displayed to user via web interface.
:param min_value: Minimum constant value or a formula to calculate it. Used for validation.
:param max_value: Maximum constant value or a formula to calculate it. Used for validation.
:param str_format: Specify string formatting. Used for display and logs.
:return: Header Unit instance
"""
defaults = {
# Local Must Haves
'formula': formula,
# Internal
'_external': True,
# Defaults
'measuring_units': '',
'min_value': 0,
'max_value': 100,
# Min Alarm
'min_alarm_message': MIN_ALARM,
# Max Alarm
'max_alarm_message': MAX_ALARM,
'step': 0.01,
'str_format': '{0:.2f}'
}
defaults.update(kwargs)
super(HeaderUnit, self).__init__(**defaults)
# Fetch Value Methods
def get_min(self, provider):
"""
Get minimum value for the selected unit. Either fetch static value or calculate using internal formula.
:param provider: data provider we are working with
:return: minimum value for the selected unit.
"""
return self._get_min_max('min', provider)
def get_max(self, provider):
"""
Get maximum value for the selected unit. Either fetch static value or calculate using internal formula.
:param provider: data provider we are working with
:return: maximum value for the selected unit.
"""
return self._get_min_max('max', provider)
def _get_min_max(self, selector, provider):
"""
Internal shortcut for min/max value fetch/calculation
:param selector: ``min`` or ``max``
:param provider: data provider we are working with
:return: min/max value
"""
output = None
if selector in ('min', 'max'):
if self.enables(provider, 'const_set'):
_selector_value = self[selector + '_value']
if type(_selector_value) in (int, float):
# We have constant value!
output = _selector_value
else:
# We have another constant or variable!
for node_field in ('constants', 'data_out'):
if _selector_value in provider[node_field][self['data_field']]:
output = provider[node_field][self['data_field']][_selector_value]
break
if output is not None:
_rounding_scheme = {'min': 'floor', 'max': 'ceil'}
output = round_int(output, _rounding_scheme[selector], 0)
return output
def get_float(self, provider, data_in=None):
"""
Fetches current value for the selected units if log data is not provided.
Otherwise, applies formulas using provided data_in and fetches results dictionary.
:param provider: data provider that we are working with
:return: current value dictionary/calculated value dictionary using log data
"""
output = None
# if data_in is None:
# header_enable = self.enables(provider, 'live_enables')
# header_enable |= self.enables(provider, 'diag_enables')
# else:
# header_enable = self.enables(provider, 'log_enables')
header_enable = self.enables(provider, 'const_set')
<|fim▁hole|> if header_enable:
data_out = {}
if data_in is None:
data_out = provider['data_out'][self['data_field']]
elif self['data_field'] in data_in:
data_out = data_in[self['data_field']]
if self['internal_name'] in data_out:
output = data_out[self['internal_name']]
return output
def get_string(self, provider, data_in=None):
"""
Fetches current value for the selected units if log data is not provided.
Otherwise, applies formulas using provided data_in and fetches results dictionary.
:param provider: data provider that we are working with
:return: current value dictionary/calculated value dictionary using log data
"""
output = self.get_float(provider, data_in)
if output is not None:
if type(self['str_format']) in (str, unicode):
output = self['str_format'].format(output)
else:
output = self['str_format'](output)
return output<|fim▁end|> | |
<|file_name|>menu.js<|end_file_name|><|fim▁begin|>import collapsibleFactory from '../common/collapsible';
import collapsibleGroupFactory from '../common/collapsible-group';
const PLUGIN_KEY = 'menu';
/*
* Manage the behaviour of a menu
* @param {jQuery} $menu<|fim▁hole|> constructor($menu) {
this.$menu = $menu;
this.$body = $('body');
this.hasMaxMenuDisplayDepth = this.$body.find('.navPages-list').hasClass('navPages-list-depth-max');
// Init collapsible
this.collapsibles = collapsibleFactory('[data-collapsible]', { $context: this.$menu });
this.collapsibleGroups = collapsibleGroupFactory($menu);
// Auto-bind
this.onMenuClick = this.onMenuClick.bind(this);
this.onDocumentClick = this.onDocumentClick.bind(this);
// Listen
this.bindEvents();
}
collapseAll() {
this.collapsibles.forEach(collapsible => collapsible.close());
this.collapsibleGroups.forEach(group => group.close());
}
collapseNeighbors($neighbors) {
const $collapsibles = collapsibleFactory('[data-collapsible]', { $context: $neighbors });
$collapsibles.forEach($collapsible => $collapsible.close());
}
bindEvents() {
this.$menu.on('click', this.onMenuClick);
this.$body.on('click', this.onDocumentClick);
}
unbindEvents() {
this.$menu.off('click', this.onMenuClick);
this.$body.off('click', this.onDocumentClick);
}
onMenuClick(event) {
event.stopPropagation();
if (this.hasMaxMenuDisplayDepth) {
const $neighbors = $(event.target).parent().siblings();
this.collapseNeighbors($neighbors);
}
}
onDocumentClick() {
this.collapseAll();
}
}
/*
* Create a new Menu instance
* @param {string} [selector]
* @return {Menu}
*/
export default function menuFactory(selector = `[data-${PLUGIN_KEY}]`) {
const $menu = $(selector).eq(0);
const instanceKey = `${PLUGIN_KEY}Instance`;
const cachedMenu = $menu.data(instanceKey);
if (cachedMenu instanceof Menu) {
return cachedMenu;
}
const menu = new Menu($menu);
$menu.data(instanceKey, menu);
return menu;
}<|fim▁end|> | */
class Menu { |
<|file_name|>parser.py<|end_file_name|><|fim▁begin|>"""ApacheParser is a member object of the ApacheConfigurator class."""
import copy
import fnmatch
import logging
import os
import re
import subprocess
import sys
import six
from certbot import errors
from certbot_apache import constants
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, vhostroot=None, version=(2, 4),
configurator=None):
# Note: Order is important here.
# Needed for calling save() with reverter functionality that resides in
# AugeasConfigurator superclass of ApacheConfigurator. This resolves
# issues with aug.load() after adding new files / defines to parse tree
self.configurator = configurator
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.parser_paths = {}
self.variables = {}
if version >= (2, 4):
self.update_runtime_variables()
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self.parse_file(self.loc["root"])
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
self.existing_paths = copy.deepcopy(self.parser_paths)
# Must also attempt to parse additional virtual host root
if vhostroot:
self.parse_file(os.path.abspath(vhostroot) + "/" +
constants.os_constant("vhost_files"))
# check to see if there were unparsed define statements
if version < (2, 4):
if self.find_dir("Define", exclude=False):
raise errors.PluginError("Error parsing runtime variables")
def add_include(self, main_config, inc_path):
"""Add Include for a new configuration file if one does not exist
:param str main_config: file path to main Apache config file
:param str inc_path: path of file to include
"""
if len(self.find_dir(case_i("Include"), inc_path)) == 0:
logger.debug("Adding Include %s to %s",
inc_path, get_aug_path(main_config))
self.add_dir(
get_aug_path(main_config),
"Include", inc_path)
# Add new path to parser paths
new_dir = os.path.dirname(inc_path)
new_file = os.path.basename(inc_path)
if new_dir in self.existing_paths.keys():
# Add to existing path
self.existing_paths[new_dir].append(new_file)
else:
# Create a new path
self.existing_paths[new_dir] = [new_file]
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in six.moves.zip(
iterator, iterator):
mod_name = self.get_arg(match_name)
mod_filename = self.get_arg(match_filename)
if mod_name and mod_filename:
self.modules.add(mod_name)
self.modules.add(os.path.basename(mod_filename)[:-2] + "c")
else:
logger.debug("Could not read LoadModule directive from " +
"Augeas path: {0}".format(match_name[6:]))
def update_runtime_variables(self):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within
the dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables...
simply for arg_get()
"""
stdout = self._get_runtime_cfg()
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
return
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"runtime config dump.")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
constants.os_constant("define_cmd"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error running command %s for runtime parameters!%s",
constants.os_constant("define_cmd"), os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s",
constants.os_constant("define_cmd"))
# Small errors that do not impede
if proc.returncode != 0:
logger.warning("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" %
(args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does
# this, but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
if not value:
return None
else:
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self.parse_file(os.path.join(arg, "*"))
else:
self.parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
if sys.version_info < (3, 6):
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
else: # pragma: no cover
# Since Python 3.6, it returns a different pattern like (?s:.*\.load)\Z
return fnmatch.translate(clean_fn_match)[4:-3]
def parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Ensure that we have the latest Augeas DOM state on disk before
# calling aug.load() which reloads the state from disk
if self.configurator:
self.configurator.ensure_augeas_state()
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd['%s' =~ glob(incl)]" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def parsed_in_current(self, filep):
"""Checks if the file path is parsed by current Augeas parser config
ie. returns True if the file is found on a path that's found in live
Augeas configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.parser_paths)
def parsed_in_original(self, filep):
"""Checks if the file path is parsed by existing Apache config.
ie. returns True if the file is found on a path that matches Include or
IncludeOptional statement in the Apache configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.existing_paths)
def _parsed_by_parser_paths(self, filep, paths):
"""Helper function that searches through provided paths and returns
True if file path is found in the set"""
for directory in paths.keys():
for filename in paths[directory]:
if fnmatch.fnmatch(filep, os.path.join(directory, filename)):
return True
return False
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
if new_file_match == "*":
remove_old = True
else:
remove_old = False
except KeyError:
use_new = True
remove_old = False
return use_new, remove_old
def _remove_httpd_transform(self, filepath):
"""Remove path from Augeas transform
:param str filepath: filepath to remove
"""
remove_basenames = self.parser_paths[os.path.dirname(filepath)]
remove_dirname = os.path.dirname(filepath)
for name in remove_basenames:
remove_path = remove_dirname + "/" + name
remove_inc = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % remove_path)
self.aug.remove(remove_inc[0])
self.parser_paths.pop(remove_dirname)
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
# Add included path to paths dictionary
try:
self.parser_paths[os.path.dirname(incl)].append(
os.path.basename(incl))
except KeyError:<|fim▁hole|> def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self.loc["root"]
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf", "conf/httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path<|fim▁end|> | self.parser_paths[os.path.dirname(incl)] = [
os.path.basename(incl)]
|
<|file_name|>effective_route.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveRoute(Model):
"""Effective Route.
:param name: The name of the user defined route. This is optional.
:type name: str
:param source: Who created the route. Possible values are: 'Unknown',
'User', 'VirtualNetworkGateway', and 'Default'. Possible values include:
'Unknown', 'User', 'VirtualNetworkGateway', 'Default'
:type source: str or
~azure.mgmt.network.v2016_09_01.models.EffectiveRouteSource
:param state: The value of effective route. Possible values are: 'Active'
and 'Invalid'. Possible values include: 'Active', 'Invalid'
:type state: str or
~azure.mgmt.network.v2016_09_01.models.EffectiveRouteState
:param address_prefix: The address prefixes of the effective routes in<|fim▁hole|> :param next_hop_ip_address: The IP address of the next hop of the
effective route.
:type next_hop_ip_address: list[str]
:param next_hop_type: The type of Azure hop the packet should be sent to.
Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet',
'VirtualAppliance', and 'None'. Possible values include:
'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance',
'None'
:type next_hop_type: str or
~azure.mgmt.network.v2016_09_01.models.RouteNextHopType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'address_prefix': {'key': 'addressPrefix', 'type': '[str]'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': '[str]'},
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EffectiveRoute, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.source = kwargs.get('source', None)
self.state = kwargs.get('state', None)
self.address_prefix = kwargs.get('address_prefix', None)
self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None)
self.next_hop_type = kwargs.get('next_hop_type', None)<|fim▁end|> | CIDR notation.
:type address_prefix: list[str] |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::io;
use std::cmp::min;
type Matrix = Vec<Vec<u64>>;
fn readline () -> String {
let mut input_str = String::new();
io::stdin().read_line(&mut input_str).ok().expect("read error");
input_str
}
fn read_matrix (n: usize) -> Matrix {
(0..n).map(|_| must_parse_arr(&readline())).collect()
}
fn must_parse<T> (num_str: &str) -> T where T: std::str::FromStr {
num_str.trim().parse().ok().expect("parse error")
}
fn must_parse_arr<T> (s: &str) -> Vec<T> where T: std::str::FromStr {
s.split_whitespace().map(|item| must_parse(item)).collect()
}
fn num_arr_to_string (n: &Vec<u64>) -> String {
n.iter().map(|d| d.to_string()).collect::<Vec<String>>().join(" ")
}
struct CircleIter {
counter: usize,
circle_size: usize,
circle_pos: usize,
circle_rows: usize,
circle_cols: usize,
first_row: usize,
last_row: usize,
first_col: usize,
last_col: usize,
}
type Pos = (usize, usize, usize);
impl Iterator for CircleIter {
type Item = Pos;
fn next (&mut self) -> Option<Pos> {
if self.counter == self.circle_size {
return None;
}
let pos = if self.counter < self.circle_rows - 1 {
(self.circle_pos + self.counter, self.first_col, self.counter)
} else if self.counter < self.circle_rows - 1 + self.circle_cols - 1 {
(self.last_row, self.first_col + self.counter + 1 - self.circle_rows, self.counter)
} else if self.counter < 2 * self.circle_rows - 2 + self.circle_cols - 1 {
let temp = self.counter + 2 - self.circle_rows - self.circle_cols;
(self.last_row - temp, self.last_col, self.counter)
} else {
let temp = self.counter + 3 - 2 * self.circle_rows - self.circle_cols;
(self.first_row, self.last_col - temp, self.counter)
};
self.counter += 1;
Some(pos)
}
}
<|fim▁hole|>fn iter_circle (rows: usize, cols: usize, circle_pos: usize) -> CircleIter {
CircleIter {
counter: 0,
circle_size: calc_circle_size(rows, cols, circle_pos),
circle_pos: circle_pos,
circle_rows: rows - 2 * circle_pos,
circle_cols: cols - 2 * circle_pos,
first_row: circle_pos,
first_col: circle_pos,
last_col: cols - circle_pos - 1,
last_row: rows - circle_pos - 1,
}
}
fn calc_circle_size (rows: usize, cols: usize, circle_pos: usize) -> usize {
2 * rows - 4 * circle_pos + 2 * cols - 4 * circle_pos - 4
}
fn read_circle(m: &Matrix, rows: usize, cols: usize, circle_pos: usize) -> Vec<u64> {
let mut circle: Vec<u64> = Vec::with_capacity(calc_circle_size(rows, cols, circle_pos));
for (row, col, _) in iter_circle(rows, cols, circle_pos) {
circle.push(m[row][col]);
}
circle
}
fn main() {
let params: Vec<u64> = must_parse_arr(&readline());
let rows = params[0] as usize;
let cols = params[1] as usize;
let rotations = params[2];
let matrix = &read_matrix(rows);
let mut result: Matrix = (0..rows).map(|_| vec![0; cols]).collect();
for circle_pos in 0..(min(rows, cols) / 2) {
let circle = read_circle(matrix, rows, cols, circle_pos);
let r = (rotations % circle.len() as u64) as usize;
for (row, col, pos) in iter_circle(rows, cols, circle_pos) {
let val_pos = if pos < r { pos + circle.len() - r } else { pos - r};
result[row][col] = circle[val_pos];
}
}
for vec in result {
println!("{}", num_arr_to_string(&vec));
}
}<|fim▁end|> | |
<|file_name|>SynchronouslyLoadedUserBinaryDictionary.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.androidtweak.inputmethod.myanmar;
import android.content.Context;
import com.androidtweak.inputmethod.keyboard.ProximityInfo;
public class SynchronouslyLoadedUserBinaryDictionary extends UserBinaryDictionary {
public SynchronouslyLoadedUserBinaryDictionary(final Context context, final String locale) {
this(context, locale, false);
}
public SynchronouslyLoadedUserBinaryDictionary(final Context context, final String locale,
final boolean alsoUseMoreRestrictiveLocales) {
super(context, locale, alsoUseMoreRestrictiveLocales);
}
@Override
public synchronized void getWords(final WordComposer codes,
final CharSequence prevWordForBigrams, final WordCallback callback,
final ProximityInfo proximityInfo) {<|fim▁hole|> @Override
public synchronized boolean isValidWord(CharSequence word) {
syncReloadDictionaryIfRequired();
return isValidWordInner(word);
}
}<|fim▁end|> | syncReloadDictionaryIfRequired();
getWordsInner(codes, prevWordForBigrams, callback, proximityInfo);
}
|
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for @webpack-blocks/assets 2.0
// Project: https://github.com/andywer/webpack-blocks/tree/master/packages/assets
// Definitions by: Max Boguslavskiy <https://github.com/maxbogus>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 3.7
import { Block } from 'webpack-blocks';
export namespace css {
type UrlFilter = (url: string, resourcePath: string) => boolean;
type ImportFilter = (parseImport: ParseImportOptions, resourcePath: string) => boolean;
type GetLocalIdent = (context: any, localIdentName: any, localName: any, options: any) => string;
type NameFunction = (file: string) => any;
type PathFunction = (url: string, resourcePath: string, context: string) => any;
interface ParseImportOptions {
url: string;
media: string;
}
interface ModuleOptions {
mode?: string | undefined;
localIdentName?: string | undefined;
context?: string | undefined;
hashPrefix?: string | undefined;
getLocalIdent?: GetLocalIdent | undefined;
localIdentRegExp?: string | RegExp | undefined;
/**
* 0 => no loaders (default);
* 1 => postcss-loader;
* 2 => postcss-loader, sass-loader
*/
importLoaders?: 0 | 1 | 2 | undefined;
localsConvention?: 'asIs' | 'camelCase' | 'camelCaseOnly' | 'dashes' | 'dashesOnly' | undefined;
onlyLocals?: boolean | undefined;
}
interface CssOptions {
url?: boolean | UrlFilter | undefined;
import?: boolean | ImportFilter | undefined;
modules?: boolean | string | ModuleOptions | undefined;
sourceMap?: boolean | undefined;
}
interface FileOptions {
name?: string | NameFunction | undefined;
outputPath?: string | PathFunction | undefined;
publicPath?: string | PathFunction | undefined;
postTransformPublicPath?: ((p: string) => string) | undefined;
context?: string | undefined;
emitFile?: boolean | undefined;
regExp?: RegExp | undefined;
}
interface UrlOptions {
fallback?: string | undefined;
limit?: number | boolean | string | undefined;<|fim▁hole|>
function modules(options?: ModuleOptions): any;
}
export function css(options?: css.CssOptions): Block;
export function file(options?: css.FileOptions): Block;
export function url(options?: css.UrlOptions): Block;<|fim▁end|> | mimetype?: string | undefined;
} |
<|file_name|>regions-infer-paramd-indirect.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that we correctly infer that b and c must be region
// parameterized because they reference a which requires a region.
type a<'a> = &'a int;
type b<'a> = Box<a<'a>>;
struct c<'a> {
f: Box<b<'a>>
}
trait set_f<'a> {
fn set_f_ok(&mut self, b: Box<b<'a>>);
fn set_f_bad(&mut self, b: Box<b>);
}
impl<'a> set_f<'a> for c<'a> {
fn set_f_ok(&mut self, b: Box<b<'a>>) {
self.f = b;
}
fn set_f_bad(&mut self, b: Box<b>) {
self.f = b; //~ ERROR mismatched types: expected `Box<Box<&'a int>>`, found `Box<Box<&int>>`
}<|fim▁hole|>fn main() {}<|fim▁end|> | }
|
<|file_name|>train_ptb_dep_sa_albert_topk.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-03-07 23:48
from hanlp.metrics.parsing import conllx_eval
from hanlp.datasets.parsing.ptb import PTB_SD330_DEV, PTB_SD330_TRAIN, PTB_SD330_TEST, PTB_TOKEN_MAPPING
from hanlp.components.parsers.biaffine_parser_tf import BiaffineTransformerDependencyParserTF, \
StructuralAttentionDependencyParserTF
from hanlp.pretrained.glove import GLOVE_840B_300D
from tests import cdroot
cdroot()
save_dir = 'data/model/dep/ptb_sa_topk'
parser = StructuralAttentionDependencyParserTF()
parser.fit(PTB_SD330_TRAIN, PTB_SD330_DEV, save_dir, 'bert-base-uncased',
batch_size=3000,
warmup_steps_ratio=.1,
token_mapping=PTB_TOKEN_MAPPING,
samples_per_batch=150,
transformer_dropout=.33,
masked_lm_dropout=.33,
learning_rate=2e-3,<|fim▁hole|> # early_stopping_patience=10,
# num_decoder_layers=2,
)
parser.load(save_dir)
# output = f'{save_dir}/test.predict.conll'
parser.evaluate(PTB_SD330_TEST, save_dir, warm_up=False)
# uas, las = conllx_eval.evaluate(PTB_SD330_TEST, output)
# print(f'Official UAS: {uas:.4f} LAS: {las:.4f}')
print(f'Model saved in {save_dir}')<|fim▁end|> | learning_rate_transformer=1e-5,
# alpha=1, |
<|file_name|>text.py<|end_file_name|><|fim▁begin|>import time
from torba.server import util
def sessions_lines(data):
"""A generator returning lines for a list of sessions.
data is the return value of rpc_sessions()."""
fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} '
'{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}')
yield fmt.format('ID', 'Flags', 'Client', 'Proto',
'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer')
for (id_, flags, peer, client, proto, reqs, txs_sent, subs,<|fim▁hole|> '{:,d}'.format(txs_sent),
'{:,d}'.format(subs),
'{:,d}'.format(recv_count),
'{:,d}'.format(recv_size // 1024),
'{:,d}'.format(send_count),
'{:,d}'.format(send_size // 1024),
util.formatted_time(time, sep=''), peer)
def groups_lines(data):
"""A generator returning lines for a list of groups.
data is the return value of rpc_groups()."""
fmt = ('{:<6} {:>9} {:>9} {:>6} {:>6} {:>8}'
'{:>7} {:>9} {:>7} {:>9}')
yield fmt.format('ID', 'Sessions', 'Bwidth KB', 'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB')
for (id_, session_count, bandwidth, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size) in data:
yield fmt.format(id_,
'{:,d}'.format(session_count),
'{:,d}'.format(bandwidth // 1024),
'{:,d}'.format(reqs),
'{:,d}'.format(txs_sent),
'{:,d}'.format(subs),
'{:,d}'.format(recv_count),
'{:,d}'.format(recv_size // 1024),
'{:,d}'.format(send_count),
'{:,d}'.format(send_size // 1024))
def peers_lines(data):
"""A generator returning lines for a list of peers.
data is the return value of rpc_peers()."""
def time_fmt(t):
if not t:
return 'Never'
return util.formatted_time(now - t)
now = time.time()
fmt = ('{:<30} {:<6} {:>5} {:>5} {:<17} {:>4} '
'{:>4} {:>8} {:>11} {:>11} {:>5} {:>20} {:<15}')
yield fmt.format('Host', 'Status', 'TCP', 'SSL', 'Server', 'Min',
'Max', 'Pruning', 'Last Good', 'Last Try',
'Tries', 'Source', 'IP Address')
for item in data:
features = item['features']
hostname = item['host']
host = features['hosts'][hostname]
yield fmt.format(hostname[:30],
item['status'],
host.get('tcp_port') or '',
host.get('ssl_port') or '',
features['server_version'] or 'unknown',
features['protocol_min'],
features['protocol_max'],
features['pruning'] or '',
time_fmt(item['last_good']),
time_fmt(item['last_try']),
item['try_count'],
item['source'][:20],
item['ip_addr'] or '')<|fim▁end|> | recv_count, recv_size, send_count, send_size, time) in data:
yield fmt.format(id_, flags, client, proto,
'{:,d}'.format(reqs), |
<|file_name|>LiveDebugVariables.cpp<|end_file_name|><|fim▁begin|>//===- LiveDebugVariables.cpp - Tracking debug info variables -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveDebugVariables analysis.
//
// Remove all DBG_VALUE instructions referencing virtual registers and replace
// them with a data structure tracking where live user variables are kept - in a
// virtual register or in a stack slot.
//
// Allow the data structure to be updated during register allocation when values
// are moved between registers and stack slots. Finally emit new DBG_VALUE
// instructions after register allocation is complete.
//
//===----------------------------------------------------------------------===//
#include "LiveDebugVariables.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <memory>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "livedebug"
static cl::opt<bool>
EnableLDV("live-debug-variables", cl::init(true),
cl::desc("Enable the live debug variables pass"), cl::Hidden);
STATISTIC(NumInsertedDebugValues, "Number of DBG_VALUEs inserted");
char LiveDebugVariables::ID = 0;
INITIALIZE_PASS_BEGIN(LiveDebugVariables, "livedebugvars",
"Debug Variable Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_END(LiveDebugVariables, "livedebugvars",
"Debug Variable Analysis", false, false)
void LiveDebugVariables::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineDominatorTree>();
AU.addRequiredTransitive<LiveIntervals>();
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(nullptr) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
}
/// LocMap - Map of where a user value is live, and its location.
typedef IntervalMap<SlotIndex, unsigned, 4> LocMap;
namespace {
/// UserValueScopes - Keeps track of lexical scopes associated with a
/// user value's source location.
class UserValueScopes {
DebugLoc DL;
LexicalScopes &LS;
SmallPtrSet<const MachineBasicBlock *, 4> LBlocks;
public:
UserValueScopes(DebugLoc D, LexicalScopes &L) : DL(std::move(D)), LS(L) {}
/// dominates - Return true if current scope dominates at least one machine
/// instruction in a given machine basic block.
bool dominates(MachineBasicBlock *MBB) {
if (LBlocks.empty())
LS.getMachineBasicBlocks(DL, LBlocks);
return LBlocks.count(MBB) != 0 || LS.dominates(DL, MBB);
}
};
} // end anonymous namespace
/// UserValue - A user value is a part of a debug info user variable.
///
/// A DBG_VALUE instruction notes that (a sub-register of) a virtual register
/// holds part of a user variable. The part is identified by a byte offset.
///
/// UserValues are grouped into equivalence classes for easier searching. Two
/// user values are related if they refer to the same variable, or if they are
/// held by the same virtual register. The equivalence class is the transitive
/// closure of that relation.
namespace {
class LDVImpl;
class UserValue {
const MDNode *Variable; ///< The debug info variable we are part of.
const MDNode *Expression; ///< Any complex address expression.
unsigned offset; ///< Byte offset into variable.
bool IsIndirect; ///< true if this is a register-indirect+offset value.
DebugLoc dl; ///< The debug location for the variable. This is
///< used by dwarf writer to find lexical scope.
UserValue *leader; ///< Equivalence class leader.
UserValue *next; ///< Next value in equivalence class, or null.
/// Numbered locations referenced by locmap.
SmallVector<MachineOperand, 4> locations;
/// Map of slot indices where this value is live.
LocMap locInts;
/// coalesceLocation - After LocNo was changed, check if it has become
/// identical to another location, and coalesce them. This may cause LocNo or
/// a later location to be erased, but no earlier location will be erased.
void coalesceLocation(unsigned LocNo);
/// insertDebugValue - Insert a DBG_VALUE into MBB at Idx for LocNo.
void insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx, unsigned LocNo,
LiveIntervals &LIS, const TargetInstrInfo &TII);
/// splitLocation - Replace OldLocNo ranges with NewRegs ranges where NewRegs
/// is live. Returns true if any changes were made.
bool splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals &LIS);
public:
/// UserValue - Create a new UserValue.
UserValue(const MDNode *var, const MDNode *expr, unsigned o, bool i,
DebugLoc L, LocMap::Allocator &alloc)
: Variable(var), Expression(expr), offset(o), IsIndirect(i),
dl(std::move(L)), leader(this), next(nullptr), locInts(alloc) {}
/// getLeader - Get the leader of this value's equivalence class.
UserValue *getLeader() {
UserValue *l = leader;
while (l != l->leader)
l = l->leader;
return leader = l;
}
/// getNext - Return the next UserValue in the equivalence class.
UserValue *getNext() const { return next; }
/// match - Does this UserValue match the parameters?
bool match(const MDNode *Var, const MDNode *Expr, const DILocation *IA,
unsigned Offset, bool indirect) const {
return Var == Variable && Expr == Expression && dl->getInlinedAt() == IA &&
Offset == offset && indirect == IsIndirect;
}
/// merge - Merge equivalence classes.
static UserValue *merge(UserValue *L1, UserValue *L2) {
L2 = L2->getLeader();
if (!L1)
return L2;
L1 = L1->getLeader();
if (L1 == L2)
return L1;
// Splice L2 before L1's members.
UserValue *End = L2;
while (End->next) {
End->leader = L1;
End = End->next;
}
End->leader = L1;
End->next = L1->next;
L1->next = L2;
return L1;
}
/// getLocationNo - Return the location number that matches Loc.
unsigned getLocationNo(const MachineOperand &LocMO) {
if (LocMO.isReg()) {
if (LocMO.getReg() == 0)
return ~0u;
// For register locations we dont care about use/def and other flags.
for (unsigned i = 0, e = locations.size(); i != e; ++i)
if (locations[i].isReg() &&
locations[i].getReg() == LocMO.getReg() &&
locations[i].getSubReg() == LocMO.getSubReg())
return i;
} else
for (unsigned i = 0, e = locations.size(); i != e; ++i)
if (LocMO.isIdenticalTo(locations[i]))
return i;
locations.push_back(LocMO);
// We are storing a MachineOperand outside a MachineInstr.
locations.back().clearParent();
// Don't store def operands.
if (locations.back().isReg())
locations.back().setIsUse();
return locations.size() - 1;
}
/// mapVirtRegs - Ensure that all virtual register locations are mapped.
void mapVirtRegs(LDVImpl *LDV);
/// addDef - Add a definition point to this value.
void addDef(SlotIndex Idx, const MachineOperand &LocMO) {
// Add a singular (Idx,Idx) -> Loc mapping.
LocMap::iterator I = locInts.find(Idx);
if (!I.valid() || I.start() != Idx)
I.insert(Idx, Idx.getNextSlot(), getLocationNo(LocMO));
else
// A later DBG_VALUE at the same SlotIndex overrides the old location.
I.setValue(getLocationNo(LocMO));
}
/// extendDef - Extend the current definition as far as possible down the
/// dominator tree. Stop when meeting an existing def or when leaving the live
/// range of VNI.
/// End points where VNI is no longer live are added to Kills.
/// @param Idx Starting point for the definition.
/// @param LocNo Location number to propagate.
/// @param LR Restrict liveness to where LR has the value VNI. May be null.
/// @param VNI When LR is not null, this is the value to restrict to.
/// @param Kills Append end points of VNI's live range to Kills.
/// @param LIS Live intervals analysis.
/// @param MDT Dominator tree.
void extendDef(SlotIndex Idx, unsigned LocNo,
LiveRange *LR, const VNInfo *VNI,
SmallVectorImpl<SlotIndex> *Kills,
LiveIntervals &LIS, MachineDominatorTree &MDT,
UserValueScopes &UVS);
/// addDefsFromCopies - The value in LI/LocNo may be copies to other
/// registers. Determine if any of the copies are available at the kill
/// points, and add defs if possible.
/// @param LI Scan for copies of the value in LI->reg.
/// @param LocNo Location number of LI->reg.
/// @param Kills Points where the range of LocNo could be extended.
/// @param NewDefs Append (Idx, LocNo) of inserted defs here.
void addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
const SmallVectorImpl<SlotIndex> &Kills,
SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs,
MachineRegisterInfo &MRI,
LiveIntervals &LIS);
/// computeIntervals - Compute the live intervals of all locations after
/// collecting all their def points.
void computeIntervals(MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
LiveIntervals &LIS, MachineDominatorTree &MDT,
UserValueScopes &UVS);
/// splitRegister - Replace OldReg ranges with NewRegs ranges where NewRegs is
/// live. Returns true if any changes were made.
bool splitRegister(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals &LIS);
/// rewriteLocations - Rewrite virtual register locations according to the
/// provided virtual register map.
void rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI);
/// emitDebugValues - Recreate DBG_VALUE instruction from data structures.
void emitDebugValues(VirtRegMap *VRM,
LiveIntervals &LIS, const TargetInstrInfo &TRI);
/// getDebugLoc - Return DebugLoc of this UserValue.
DebugLoc getDebugLoc() { return dl;}
void print(raw_ostream &, const TargetRegisterInfo *);
};
} // namespace
/// LDVImpl - Implementation of the LiveDebugVariables pass.
namespace {
class LDVImpl {
LiveDebugVariables &pass;
LocMap::Allocator allocator;
MachineFunction *MF;
LiveIntervals *LIS;
LexicalScopes LS;
MachineDominatorTree *MDT;
const TargetRegisterInfo *TRI;
/// Whether emitDebugValues is called.
bool EmitDone;
/// Whether the machine function is modified during the pass.
bool ModifiedMF;
/// userValues - All allocated UserValue instances.
SmallVector<std::unique_ptr<UserValue>, 8> userValues;
/// Map virtual register to eq class leader.
typedef DenseMap<unsigned, UserValue*> VRMap;
VRMap virtRegToEqClass;
/// Map user variable to eq class leader.
typedef DenseMap<const MDNode *, UserValue*> UVMap;
UVMap userVarMap;
/// getUserValue - Find or create a UserValue.
UserValue *getUserValue(const MDNode *Var, const MDNode *Expr,
unsigned Offset, bool IsIndirect, const DebugLoc &DL);
/// lookupVirtReg - Find the EC leader for VirtReg or null.
UserValue *lookupVirtReg(unsigned VirtReg);
/// handleDebugValue - Add DBG_VALUE instruction to our maps.
/// @param MI DBG_VALUE instruction
/// @param Idx Last valid SLotIndex before instruction.
/// @return True if the DBG_VALUE instruction should be deleted.
bool handleDebugValue(MachineInstr &MI, SlotIndex Idx);
/// collectDebugValues - Collect and erase all DBG_VALUE instructions, adding
/// a UserValue def for each instruction.
/// @param mf MachineFunction to be scanned.
/// @return True if any debug values were found.
bool collectDebugValues(MachineFunction &mf);
/// computeIntervals - Compute the live intervals of all user values after
/// collecting all their def points.
void computeIntervals();
public:
LDVImpl(LiveDebugVariables *ps)
: pass(*ps), MF(nullptr), EmitDone(false), ModifiedMF(false) {}
bool runOnMachineFunction(MachineFunction &mf);
/// clear - Release all memory.
void clear() {
MF = nullptr;
userValues.clear();
virtRegToEqClass.clear();
userVarMap.clear();
// Make sure we call emitDebugValues if the machine function was modified.
assert((!ModifiedMF || EmitDone) &&
"Dbg values are not emitted in LDV");
EmitDone = false;
ModifiedMF = false;
LS.reset();
}
/// mapVirtReg - Map virtual register to an equivalence class.
void mapVirtReg(unsigned VirtReg, UserValue *EC);
/// splitRegister - Replace all references to OldReg with NewRegs.
void splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs);
/// emitDebugValues - Recreate DBG_VALUE instruction from data structures.
void emitDebugValues(VirtRegMap *VRM);
void print(raw_ostream&);
};
} // namespace
static void printDebugLoc(const DebugLoc &DL, raw_ostream &CommentOS,
const LLVMContext &Ctx) {
if (!DL)
return;
auto *Scope = cast<DIScope>(DL.getScope());
// Omit the directory, because it's likely to be long and uninteresting.
CommentOS << Scope->getFilename();
CommentOS << ':' << DL.getLine();
if (DL.getCol() != 0)
CommentOS << ':' << DL.getCol();
DebugLoc InlinedAtDL = DL.getInlinedAt();
if (!InlinedAtDL)
return;
CommentOS << " @[ ";
printDebugLoc(InlinedAtDL, CommentOS, Ctx);
CommentOS << " ]";
}
static void printExtendedName(raw_ostream &OS, const DILocalVariable *V,
const DILocation *DL) {
const LLVMContext &Ctx = V->getContext();
StringRef Res = V->getName();
if (!Res.empty())
OS << Res << "," << V->getLine();
if (auto *InlinedAt = DL->getInlinedAt()) {
if (DebugLoc InlinedAtDL = InlinedAt) {
OS << " @[";
printDebugLoc(InlinedAtDL, OS, Ctx);
OS << "]";
}
}
}
void UserValue::print(raw_ostream &OS, const TargetRegisterInfo *TRI) {
auto *DV = cast<DILocalVariable>(Variable);
OS << "!\"";
printExtendedName(OS, DV, dl);
OS << "\"\t";
if (offset)
OS << '+' << offset;
for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I) {
OS << " [" << I.start() << ';' << I.stop() << "):";
if (I.value() == ~0u)
OS << "undef";
else
OS << I.value();
}
for (unsigned i = 0, e = locations.size(); i != e; ++i) {
OS << " Loc" << i << '=';
locations[i].print(OS, TRI);
}
OS << '\n';
}
void LDVImpl::print(raw_ostream &OS) {
OS << "********** DEBUG VARIABLES **********\n";
for (unsigned i = 0, e = userValues.size(); i != e; ++i)
userValues[i]->print(OS, TRI);
}
void UserValue::coalesceLocation(unsigned LocNo) {
unsigned KeepLoc = 0;
for (unsigned e = locations.size(); KeepLoc != e; ++KeepLoc) {
if (KeepLoc == LocNo)
continue;
if (locations[KeepLoc].isIdenticalTo(locations[LocNo]))
break;
}
// No matches.
if (KeepLoc == locations.size())
return;
// Keep the smaller location, erase the larger one.
unsigned EraseLoc = LocNo;
if (KeepLoc > EraseLoc)
std::swap(KeepLoc, EraseLoc);
locations.erase(locations.begin() + EraseLoc);
// Rewrite values.
for (LocMap::iterator I = locInts.begin(); I.valid(); ++I) {
unsigned v = I.value();
if (v == EraseLoc)
I.setValue(KeepLoc); // Coalesce when possible.
else if (v > EraseLoc)
I.setValueUnchecked(v-1); // Avoid coalescing with untransformed values.
}
}
void UserValue::mapVirtRegs(LDVImpl *LDV) {
for (unsigned i = 0, e = locations.size(); i != e; ++i)
if (locations[i].isReg() &&
TargetRegisterInfo::isVirtualRegister(locations[i].getReg()))
LDV->mapVirtReg(locations[i].getReg(), this);
}
UserValue *LDVImpl::getUserValue(const MDNode *Var, const MDNode *Expr,
unsigned Offset, bool IsIndirect,
const DebugLoc &DL) {
UserValue *&Leader = userVarMap[Var];
if (Leader) {
UserValue *UV = Leader->getLeader();
Leader = UV;
for (; UV; UV = UV->getNext())
if (UV->match(Var, Expr, DL->getInlinedAt(), Offset, IsIndirect))
return UV;
}
userValues.push_back(
make_unique<UserValue>(Var, Expr, Offset, IsIndirect, DL, allocator));
UserValue *UV = userValues.back().get();
Leader = UserValue::merge(Leader, UV);
return UV;
}
void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Only map VirtRegs");
UserValue *&Leader = virtRegToEqClass[VirtReg];
Leader = UserValue::merge(Leader, EC);
}
UserValue *LDVImpl::lookupVirtReg(unsigned VirtReg) {
if (UserValue *UV = virtRegToEqClass.lookup(VirtReg))
return UV->getLeader();
return nullptr;
}
bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) {
// DBG_VALUE loc, offset, variable
if (MI.getNumOperands() != 4 ||
!(MI.getOperand(1).isReg() || MI.getOperand(1).isImm()) ||
!MI.getOperand(2).isMetadata()) {
DEBUG(dbgs() << "Can't handle " << MI);
return false;
}
// Get or create the UserValue for (variable,offset).
bool IsIndirect = MI.isIndirectDebugValue();
unsigned Offset = IsIndirect ? MI.getOperand(1).getImm() : 0;
const MDNode *Var = MI.getDebugVariable();
const MDNode *Expr = MI.getDebugExpression();
//here.
UserValue *UV = getUserValue(Var, Expr, Offset, IsIndirect, MI.getDebugLoc());
UV->addDef(Idx, MI.getOperand(0));
return true;
}
bool LDVImpl::collectDebugValues(MachineFunction &mf) {
bool Changed = false;
for (MachineFunction::iterator MFI = mf.begin(), MFE = mf.end(); MFI != MFE;
++MFI) {
MachineBasicBlock *MBB = &*MFI;
for (MachineBasicBlock::iterator MBBI = MBB->begin(), MBBE = MBB->end();
MBBI != MBBE;) {
if (!MBBI->isDebugValue()) {
++MBBI;
continue;
}
// DBG_VALUE has no slot index, use the previous instruction instead.
SlotIndex Idx =
MBBI == MBB->begin()
? LIS->getMBBStartIdx(MBB)
: LIS->getInstructionIndex(*std::prev(MBBI)).getRegSlot();
// Handle consecutive DBG_VALUE instructions with the same slot index.
do {
if (handleDebugValue(*MBBI, Idx)) {
MBBI = MBB->erase(MBBI);
Changed = true;
} else
++MBBI;
} while (MBBI != MBBE && MBBI->isDebugValue());
}
}
return Changed;
}
/// We only propagate DBG_VALUES locally here. LiveDebugValues performs a
/// data-flow analysis to propagate them beyond basic block boundaries.
void UserValue::extendDef(SlotIndex Idx, unsigned LocNo, LiveRange *LR,
const VNInfo *VNI, SmallVectorImpl<SlotIndex> *Kills,
LiveIntervals &LIS, MachineDominatorTree &MDT,
UserValueScopes &UVS) {
SlotIndex Start = Idx;
MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start);
SlotIndex Stop = LIS.getMBBEndIdx(MBB);
LocMap::iterator I = locInts.find(Start);
// Limit to VNI's live range.
bool ToEnd = true;
if (LR && VNI) {
LiveInterval::Segment *Segment = LR->getSegmentContaining(Start);
if (!Segment || Segment->valno != VNI) {
if (Kills)
Kills->push_back(Start);
return;
}<|fim▁hole|> Stop = Segment->end;
ToEnd = false;
}
}
// There could already be a short def at Start.
if (I.valid() && I.start() <= Start) {
// Stop when meeting a different location or an already extended interval.
Start = Start.getNextSlot();
if (I.value() != LocNo || I.stop() != Start)
return;
// This is a one-slot placeholder. Just skip it.
++I;
}
// Limited by the next def.
if (I.valid() && I.start() < Stop) {
Stop = I.start();
ToEnd = false;
}
// Limited by VNI's live range.
else if (!ToEnd && Kills)
Kills->push_back(Stop);
if (Start < Stop)
I.insert(Start, Stop, LocNo);
}
void
UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
const SmallVectorImpl<SlotIndex> &Kills,
SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs,
MachineRegisterInfo &MRI, LiveIntervals &LIS) {
if (Kills.empty())
return;
// Don't track copies from physregs, there are too many uses.
if (!TargetRegisterInfo::isVirtualRegister(LI->reg))
return;
// Collect all the (vreg, valno) pairs that are copies of LI.
SmallVector<std::pair<LiveInterval*, const VNInfo*>, 8> CopyValues;
for (MachineOperand &MO : MRI.use_nodbg_operands(LI->reg)) {
MachineInstr *MI = MO.getParent();
// Copies of the full value.
if (MO.getSubReg() || !MI->isCopy())
continue;
unsigned DstReg = MI->getOperand(0).getReg();
// Don't follow copies to physregs. These are usually setting up call
// arguments, and the argument registers are always call clobbered. We are
// better off in the source register which could be a callee-saved register,
// or it could be spilled.
if (!TargetRegisterInfo::isVirtualRegister(DstReg))
continue;
// Is LocNo extended to reach this copy? If not, another def may be blocking
// it, or we are looking at a wrong value of LI.
SlotIndex Idx = LIS.getInstructionIndex(*MI);
LocMap::iterator I = locInts.find(Idx.getRegSlot(true));
if (!I.valid() || I.value() != LocNo)
continue;
if (!LIS.hasInterval(DstReg))
continue;
LiveInterval *DstLI = &LIS.getInterval(DstReg);
const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getRegSlot());
assert(DstVNI && DstVNI->def == Idx.getRegSlot() && "Bad copy value");
CopyValues.push_back(std::make_pair(DstLI, DstVNI));
}
if (CopyValues.empty())
return;
DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI << '\n');
// Try to add defs of the copied values for each kill point.
for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
SlotIndex Idx = Kills[i];
for (unsigned j = 0, e = CopyValues.size(); j != e; ++j) {
LiveInterval *DstLI = CopyValues[j].first;
const VNInfo *DstVNI = CopyValues[j].second;
if (DstLI->getVNInfoAt(Idx) != DstVNI)
continue;
// Check that there isn't already a def at Idx
LocMap::iterator I = locInts.find(Idx);
if (I.valid() && I.start() <= Idx)
continue;
DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #"
<< DstVNI->id << " in " << *DstLI << '\n');
MachineInstr *CopyMI = LIS.getInstructionFromIndex(DstVNI->def);
assert(CopyMI && CopyMI->isCopy() && "Bad copy value");
unsigned LocNo = getLocationNo(CopyMI->getOperand(0));
I.insert(Idx, Idx.getNextSlot(), LocNo);
NewDefs.push_back(std::make_pair(Idx, LocNo));
break;
}
}
}
void
UserValue::computeIntervals(MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI,
LiveIntervals &LIS,
MachineDominatorTree &MDT,
UserValueScopes &UVS) {
SmallVector<std::pair<SlotIndex, unsigned>, 16> Defs;
// Collect all defs to be extended (Skipping undefs).
for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I)
if (I.value() != ~0u)
Defs.push_back(std::make_pair(I.start(), I.value()));
// Extend all defs, and possibly add new ones along the way.
for (unsigned i = 0; i != Defs.size(); ++i) {
SlotIndex Idx = Defs[i].first;
unsigned LocNo = Defs[i].second;
const MachineOperand &Loc = locations[LocNo];
if (!Loc.isReg()) {
extendDef(Idx, LocNo, nullptr, nullptr, nullptr, LIS, MDT, UVS);
continue;
}
// Register locations are constrained to where the register value is live.
if (TargetRegisterInfo::isVirtualRegister(Loc.getReg())) {
LiveInterval *LI = nullptr;
const VNInfo *VNI = nullptr;
if (LIS.hasInterval(Loc.getReg())) {
LI = &LIS.getInterval(Loc.getReg());
VNI = LI->getVNInfoAt(Idx);
}
SmallVector<SlotIndex, 16> Kills;
extendDef(Idx, LocNo, LI, VNI, &Kills, LIS, MDT, UVS);
if (LI)
addDefsFromCopies(LI, LocNo, Kills, Defs, MRI, LIS);
continue;
}
// For physregs, use the live range of the first regunit as a guide.
unsigned Unit = *MCRegUnitIterator(Loc.getReg(), &TRI);
LiveRange *LR = &LIS.getRegUnit(Unit);
const VNInfo *VNI = LR->getVNInfoAt(Idx);
// Don't track copies from physregs, it is too expensive.
extendDef(Idx, LocNo, LR, VNI, nullptr, LIS, MDT, UVS);
}
// Finally, erase all the undefs.
for (LocMap::iterator I = locInts.begin(); I.valid();)
if (I.value() == ~0u)
I.erase();
else
++I;
}
void LDVImpl::computeIntervals() {
for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
UserValueScopes UVS(userValues[i]->getDebugLoc(), LS);
userValues[i]->computeIntervals(MF->getRegInfo(), *TRI, *LIS, *MDT, UVS);
userValues[i]->mapVirtRegs(this);
}
}
bool LDVImpl::runOnMachineFunction(MachineFunction &mf) {
clear();
MF = &mf;
LIS = &pass.getAnalysis<LiveIntervals>();
MDT = &pass.getAnalysis<MachineDominatorTree>();
TRI = mf.getSubtarget().getRegisterInfo();
LS.initialize(mf);
DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
<< mf.getName() << " **********\n");
bool Changed = collectDebugValues(mf);
computeIntervals();
DEBUG(print(dbgs()));
ModifiedMF = Changed;
return Changed;
}
static void removeDebugValues(MachineFunction &mf) {
for (MachineBasicBlock &MBB : mf) {
for (auto MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ) {
if (!MBBI->isDebugValue()) {
++MBBI;
continue;
}
MBBI = MBB.erase(MBBI);
}
}
}
bool LiveDebugVariables::runOnMachineFunction(MachineFunction &mf) {
if (!EnableLDV)
return false;
if (!mf.getFunction()->getSubprogram()) {
removeDebugValues(mf);
return false;
}
if (!pImpl)
pImpl = new LDVImpl(this);
return static_cast<LDVImpl*>(pImpl)->runOnMachineFunction(mf);
}
void LiveDebugVariables::releaseMemory() {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->clear();
}
LiveDebugVariables::~LiveDebugVariables() {
if (pImpl)
delete static_cast<LDVImpl*>(pImpl);
}
//===----------------------------------------------------------------------===//
// Live Range Splitting
//===----------------------------------------------------------------------===//
bool
UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals& LIS) {
DEBUG({
dbgs() << "Splitting Loc" << OldLocNo << '\t';
print(dbgs(), nullptr);
});
bool DidChange = false;
LocMap::iterator LocMapI;
LocMapI.setMap(locInts);
for (unsigned i = 0; i != NewRegs.size(); ++i) {
LiveInterval *LI = &LIS.getInterval(NewRegs[i]);
if (LI->empty())
continue;
// Don't allocate the new LocNo until it is needed.
unsigned NewLocNo = ~0u;
// Iterate over the overlaps between locInts and LI.
LocMapI.find(LI->beginIndex());
if (!LocMapI.valid())
continue;
LiveInterval::iterator LII = LI->advanceTo(LI->begin(), LocMapI.start());
LiveInterval::iterator LIE = LI->end();
while (LocMapI.valid() && LII != LIE) {
// At this point, we know that LocMapI.stop() > LII->start.
LII = LI->advanceTo(LII, LocMapI.start());
if (LII == LIE)
break;
// Now LII->end > LocMapI.start(). Do we have an overlap?
if (LocMapI.value() == OldLocNo && LII->start < LocMapI.stop()) {
// Overlapping correct location. Allocate NewLocNo now.
if (NewLocNo == ~0u) {
MachineOperand MO = MachineOperand::CreateReg(LI->reg, false);
MO.setSubReg(locations[OldLocNo].getSubReg());
NewLocNo = getLocationNo(MO);
DidChange = true;
}
SlotIndex LStart = LocMapI.start();
SlotIndex LStop = LocMapI.stop();
// Trim LocMapI down to the LII overlap.
if (LStart < LII->start)
LocMapI.setStartUnchecked(LII->start);
if (LStop > LII->end)
LocMapI.setStopUnchecked(LII->end);
// Change the value in the overlap. This may trigger coalescing.
LocMapI.setValue(NewLocNo);
// Re-insert any removed OldLocNo ranges.
if (LStart < LocMapI.start()) {
LocMapI.insert(LStart, LocMapI.start(), OldLocNo);
++LocMapI;
assert(LocMapI.valid() && "Unexpected coalescing");
}
if (LStop > LocMapI.stop()) {
++LocMapI;
LocMapI.insert(LII->end, LStop, OldLocNo);
--LocMapI;
}
}
// Advance to the next overlap.
if (LII->end < LocMapI.stop()) {
if (++LII == LIE)
break;
LocMapI.advanceTo(LII->start);
} else {
++LocMapI;
if (!LocMapI.valid())
break;
LII = LI->advanceTo(LII, LocMapI.start());
}
}
}
// Finally, remove any remaining OldLocNo intervals and OldLocNo itself.
locations.erase(locations.begin() + OldLocNo);
LocMapI.goToBegin();
while (LocMapI.valid()) {
unsigned v = LocMapI.value();
if (v == OldLocNo) {
DEBUG(dbgs() << "Erasing [" << LocMapI.start() << ';'
<< LocMapI.stop() << ")\n");
LocMapI.erase();
} else {
if (v > OldLocNo)
LocMapI.setValueUnchecked(v-1);
++LocMapI;
}
}
DEBUG({dbgs() << "Split result: \t"; print(dbgs(), nullptr);});
return DidChange;
}
bool
UserValue::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs,
LiveIntervals &LIS) {
bool DidChange = false;
// Split locations referring to OldReg. Iterate backwards so splitLocation can
// safely erase unused locations.
for (unsigned i = locations.size(); i ; --i) {
unsigned LocNo = i-1;
const MachineOperand *Loc = &locations[LocNo];
if (!Loc->isReg() || Loc->getReg() != OldReg)
continue;
DidChange |= splitLocation(LocNo, NewRegs, LIS);
}
return DidChange;
}
void LDVImpl::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs) {
bool DidChange = false;
for (UserValue *UV = lookupVirtReg(OldReg); UV; UV = UV->getNext())
DidChange |= UV->splitRegister(OldReg, NewRegs, *LIS);
if (!DidChange)
return;
// Map all of the new virtual registers.
UserValue *UV = lookupVirtReg(OldReg);
for (unsigned i = 0; i != NewRegs.size(); ++i)
mapVirtReg(NewRegs[i], UV);
}
void LiveDebugVariables::
splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs, LiveIntervals &LIS) {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->splitRegister(OldReg, NewRegs);
}
void
UserValue::rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI) {
// Iterate over locations in reverse makes it easier to handle coalescing.
for (unsigned i = locations.size(); i ; --i) {
unsigned LocNo = i-1;
MachineOperand &Loc = locations[LocNo];
// Only virtual registers are rewritten.
if (!Loc.isReg() || !Loc.getReg() ||
!TargetRegisterInfo::isVirtualRegister(Loc.getReg()))
continue;
unsigned VirtReg = Loc.getReg();
if (VRM.isAssignedReg(VirtReg) &&
TargetRegisterInfo::isPhysicalRegister(VRM.getPhys(VirtReg))) {
// This can create a %noreg operand in rare cases when the sub-register
// index is no longer available. That means the user value is in a
// non-existent sub-register, and %noreg is exactly what we want.
Loc.substPhysReg(VRM.getPhys(VirtReg), TRI);
} else if (VRM.getStackSlot(VirtReg) != VirtRegMap::NO_STACK_SLOT) {
// FIXME: Translate SubIdx to a stackslot offset.
Loc = MachineOperand::CreateFI(VRM.getStackSlot(VirtReg));
} else {
Loc.setReg(0);
Loc.setSubReg(0);
}
coalesceLocation(LocNo);
}
}
/// findInsertLocation - Find an iterator for inserting a DBG_VALUE
/// instruction.
static MachineBasicBlock::iterator
findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx,
LiveIntervals &LIS) {
SlotIndex Start = LIS.getMBBStartIdx(MBB);
Idx = Idx.getBaseIndex();
// Try to find an insert location by going backwards from Idx.
MachineInstr *MI;
while (!(MI = LIS.getInstructionFromIndex(Idx))) {
// We've reached the beginning of MBB.
if (Idx == Start) {
MachineBasicBlock::iterator I = MBB->SkipPHIsAndLabels(MBB->begin());
return I;
}
Idx = Idx.getPrevIndex();
}
// Don't insert anything after the first terminator, though.
return MI->isTerminator() ? MBB->getFirstTerminator() :
std::next(MachineBasicBlock::iterator(MI));
}
void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx,
unsigned LocNo,
LiveIntervals &LIS,
const TargetInstrInfo &TII) {
MachineBasicBlock::iterator I = findInsertLocation(MBB, Idx, LIS);
MachineOperand &Loc = locations[LocNo];
++NumInsertedDebugValues;
assert(cast<DILocalVariable>(Variable)
->isValidLocationForIntrinsic(getDebugLoc()) &&
"Expected inlined-at fields to agree");
if (Loc.isReg())
BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE),
IsIndirect, Loc.getReg(), offset, Variable, Expression);
else
BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE))
.addOperand(Loc)
.addImm(offset)
.addMetadata(Variable)
.addMetadata(Expression);
}
void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
const TargetInstrInfo &TII) {
MachineFunction::iterator MFEnd = VRM->getMachineFunction().end();
for (LocMap::const_iterator I = locInts.begin(); I.valid();) {
SlotIndex Start = I.start();
SlotIndex Stop = I.stop();
unsigned LocNo = I.value();
DEBUG(dbgs() << "\t[" << Start << ';' << Stop << "):" << LocNo);
MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start)->getIterator();
SlotIndex MBBEnd = LIS.getMBBEndIdx(&*MBB);
DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
insertDebugValue(&*MBB, Start, LocNo, LIS, TII);
// This interval may span multiple basic blocks.
// Insert a DBG_VALUE into each one.
while(Stop > MBBEnd) {
// Move to the next block.
Start = MBBEnd;
if (++MBB == MFEnd)
break;
MBBEnd = LIS.getMBBEndIdx(&*MBB);
DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
insertDebugValue(&*MBB, Start, LocNo, LIS, TII);
}
DEBUG(dbgs() << '\n');
if (MBB == MFEnd)
break;
++I;
}
}
void LDVImpl::emitDebugValues(VirtRegMap *VRM) {
DEBUG(dbgs() << "********** EMITTING LIVE DEBUG VARIABLES **********\n");
if (!MF)
return;
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
DEBUG(userValues[i]->print(dbgs(), TRI));
userValues[i]->rewriteLocations(*VRM, *TRI);
userValues[i]->emitDebugValues(VRM, *LIS, *TII);
}
EmitDone = true;
}
void LiveDebugVariables::emitDebugValues(VirtRegMap *VRM) {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->emitDebugValues(VRM);
}
bool LiveDebugVariables::doInitialization(Module &M) {
return Pass::doInitialization(M);
}
#ifndef NDEBUG
LLVM_DUMP_METHOD void LiveDebugVariables::dump() {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->print(dbgs());
}
#endif<|fim▁end|> | if (Segment->end < Stop) { |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.