prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>config_tags.py<|end_file_name|><|fim▁begin|>from django import template
from system.models import Configuration
register = template.Library()
@register.assignment_tag
def get_config(conf_name=None):<|fim▁hole|> c = Configuration.get_by_name_all_fields(conf_name)
if not c:
return None
return {
"name": c.name,
"value": c.value,
"description": c.description,
"hash": c.hash
}<|fim▁end|> | if conf_name is None:
raise Exception("Invalid config name")
|
<|file_name|>wtforms_utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Extended WTForms field
Classes TimeField, DatePickerWidget, DateTimePickerWidget and TimePickerWidget
are taken from `flask-admin` extension.
:copyright: (c) 2011 by wilsaj.
:license: BSD, see LICENSE for more details.
:source: https://raw.github.com/wilsaj/flask-admin/master/flask_admin/wtforms.py
"""
import datetime
import time
from flask import session
from wtforms import Form as WTForm
from wtforms.widgets import TextInput, HTMLString, html_params
from wtforms.fields import Field, TextField, HiddenField, FileField
from flask.ext.wtf import Form
from wtforms.ext.csrf.session import SessionSecureForm
from wtforms.compat import text_type
from invenio.config import CFG_SITE_SECRET_KEY
class RowWidget(object):
"""
Renders a list of fields as a set of table rows with th/td pairs.
"""
def __init__(self):
pass
def __call__(self, field, **kwargs):
html = []
hidden = ''
for subfield in field:
if subfield.type == 'HiddenField':
hidden += text_type(subfield)
else:
html.append('%s%s' % (hidden, text_type(subfield(class_="span1", placeholder=subfield.label.text))))
hidden = ''
if hidden:
html.append(hidden)
return HTMLString(''.join(html))
class TimeField(Field):
"""A text field which stores a `time.time` matching a format."""
widget = TextInput()
def __init__(self, label=None, validators=None,
format='%H:%M:%S', **kwargs):
super(TimeField, self).__init__(label, validators, **kwargs)
self.format = format
def _value(self):
if self.raw_data:
return u' '.join(self.raw_data)
else:
return self.data and self.data.strftime(self.format) or u''
def process_formdata(self, valuelist):
if valuelist:
time_str = u' '.join(valuelist)
try:
timetuple = time.strptime(time_str, self.format)
self.data = datetime.time(*timetuple[3:6])
except ValueError:
self.data = None
raise
class DatePickerWidget(TextInput):
"""
TextInput widget that adds a 'datepicker' class to the html input
element; this makes it easy to write a jQuery selector that adds a
UI widget for date picking.
"""
def __call__(self, field, **kwargs):
c = kwargs.pop('class', '') or kwargs.pop('class_', '')
kwargs['class'] = u'datepicker %s' % c
return super(DatePickerWidget, self).__call__(field, **kwargs)
class DateTimePickerWidget(TextInput):
"""TextInput widget that adds a 'datetimepicker' class to the html
adds a UI widget for datetime picking.
"""
def __call__(self, field, **kwargs):
c = kwargs.pop('class', '') or kwargs.pop('class_', '')
kwargs['class'] = u'datetimepicker %s' % c
return super(DateTimePickerWidget, self).__call__(field, **kwargs)
class TimePickerWidget(TextInput):
"""TextInput widget that adds a 'timepicker' class to the html
input element; this makes it easy to write a jQuery selector that
adds a UI widget for time picking.
"""
def __call__(self, field, **kwargs):
c = kwargs.pop('class', '') or kwargs.pop('class_', '')
kwargs['class'] = u'timepicker %s' % c
return super(TimePickerWidget, self).__call__(field, **kwargs)<|fim▁hole|> def __init__(self, label=None, validators=None, data_provide="typeahead", data_source=None, **kwargs):
super(AutocompleteField, self).__init__(label, validators, **kwargs)
if data_source:
self.widget = TypeheadWidget(data_source, data_provide)
class TypeheadWidget(object):
def __init__(self, autocomplete_list, data_provide):
if callable(autocomplete_list):
self.autocomplete_list = autocomplete_list()
else:
self.autocomplete_list = '["{}"]'.format('","'.join(autocomplete_list))
self.data_provide = data_provide
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('type', 'text')
kwargs.setdefault('data-provide', self.data_provide)
kwargs.setdefault('data-source', self.autocomplete_list)
if 'value' not in kwargs:
kwargs['value'] = field._value()
return HTMLString(u'<input %s />' % html_params(name=field.name, **kwargs))
def has_file_field(form):
"""Test whether or not a form has a FileField in it. This is used
to know whether or not we need to set enctype to
multipart/form-data.
"""
for field in form:
if isinstance(field, FileField):
return True
return False
class FilterTextField(TextField):
alias = None
def __init__(self, *args, **kwargs):
self.alias = kwargs.get('alias')
if 'alias' in kwargs:
del kwargs['alias']
super(TextField, self).__init__(*args, **kwargs)
if not self.raw_data:
self.raw_data = []
def _value(self):
if self.raw_data:
return self.raw_data.pop()
return u''
class InvenioForm(WTForm):
@property
def required_field_names(self):
return [field.name for field in self if hasattr(field, 'required')]
class InvenioBaseForm(Form, SessionSecureForm):
SECRET_KEY = CFG_SITE_SECRET_KEY
TIME_LIMIT = datetime.timedelta(minutes=20)
def __init__(self, *args, **kwargs):
super(InvenioBaseForm, self).__init__(*args, csrf_context=session, **kwargs)
def add_fields(self, name, field):
self.__setattr__(name, field)
class FilterForm(InvenioBaseForm):
"""
Filter forms contains hidden fields to keep sorting.
"""
sort_by = HiddenField()
order = HiddenField()<|fim▁end|> |
class AutocompleteField(TextField): |
<|file_name|>run.py<|end_file_name|><|fim▁begin|>#!env/bin/python
from app import app
import sys
port = 5000
debug = True<|fim▁hole|>app.run(debug = debug, port = port)<|fim▁end|> | if len(sys.argv) == 3:
debug = sys.argv[1] == 'debug'
port = int(sys.argv[2])
|
<|file_name|>provider.py<|end_file_name|><|fim▁begin|>from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class ShopifyAccount(ProviderAccount):
pass<|fim▁hole|> id = 'shopify'
name = 'Shopify'
account_class = ShopifyAccount
def get_auth_params(self, request, action):
ret = super(ShopifyProvider, self).get_auth_params(request, action)
shop = request.GET.get('shop', None)
if shop:
ret.update({'shop': shop})
return ret
def get_default_scope(self):
return ['read_orders', 'read_products']
def extract_uid(self, data):
return str(data['shop']['id'])
def extract_common_fields(self, data):
# See: https://docs.shopify.com/api/shop
# User is only available with Shopify Plus, email is the only
# common field
return dict(email=data['shop']['email'])
providers.registry.register(ShopifyProvider)<|fim▁end|> |
class ShopifyProvider(OAuth2Provider): |
<|file_name|>input_manager.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
* slider_manager.cpp : Manage an input slider
*****************************************************************************
* Copyright (C) 2000-2005 the VideoLAN team
* $Id: input_manager.cpp 14556 2006-03-01 19:56:34Z fkuehne $
*
* Authors: Gildas Bazin <[email protected]>
* Clément Stenac <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#include "input_manager.hpp"
#include "interface.hpp"<|fim▁hole|>/* include the toolbar graphics */
#include "bitmaps/prev.xpm"
#include "bitmaps/next.xpm"
#include "bitmaps/playlist.xpm"
/* IDs for the controls */
enum
{
SliderScroll_Event = wxID_HIGHEST,
DiscMenu_Event,
DiscPrev_Event,
DiscNext_Event
};
BEGIN_EVENT_TABLE(InputManager, wxPanel)
/* Slider events */
EVT_COMMAND_SCROLL(SliderScroll_Event, InputManager::OnSliderUpdate)
/* Disc Buttons events */
EVT_BUTTON(DiscMenu_Event, InputManager::OnDiscMenu)
EVT_BUTTON(DiscPrev_Event, InputManager::OnDiscPrev)
EVT_BUTTON(DiscNext_Event, InputManager::OnDiscNext)
END_EVENT_TABLE()
#define STATUS_STOP 0
#define STATUS_PLAYING 1
#define STATUS_PAUSE 2
/*****************************************************************************
* Constructor.
*****************************************************************************/
InputManager::InputManager( intf_thread_t *_p_intf, Interface *_p_main_intf,
wxWindow *p_parent )
: wxPanel( p_parent )
{
p_intf = _p_intf;
p_main_intf = _p_main_intf;
p_input = NULL;
i_old_playing_status = STATUS_STOP;
i_old_rate = INPUT_RATE_DEFAULT;
b_slider_free = VLC_TRUE;
i_input_hide_delay = 0;
/* Create slider */
slider = new wxSlider( this, SliderScroll_Event, 0, 0, SLIDER_MAX_POS );
/* Create disc buttons */
disc_frame = new wxPanel( this );
disc_menu_button = new wxBitmapButton( disc_frame, DiscMenu_Event,
wxBitmap( playlist_xpm ) );
disc_prev_button = new wxBitmapButton( disc_frame, DiscPrev_Event,
wxBitmap( prev_xpm ) );
disc_next_button = new wxBitmapButton( disc_frame, DiscNext_Event,
wxBitmap( next_xpm ) );
disc_sizer = new wxBoxSizer( wxHORIZONTAL );
disc_sizer->Add( disc_menu_button, 1, wxEXPAND | wxLEFT | wxRIGHT, 1 );
disc_sizer->Add( disc_prev_button, 1, wxEXPAND | wxLEFT | wxRIGHT, 1 );
disc_sizer->Add( disc_next_button, 1, wxEXPAND | wxLEFT | wxRIGHT, 1 );
disc_frame->SetSizer( disc_sizer );
disc_sizer->Layout();
/* Add everything to the panel */
sizer = new wxBoxSizer( wxHORIZONTAL );
SetSizer( sizer );
sizer->Add( slider, 1, wxEXPAND | wxALL, 5 );
sizer->Add( disc_frame, 0, wxALL, 2 );
/* Hide by default */
sizer->Hide( disc_frame );
sizer->Hide( slider );
sizer->Layout();
Fit();
}
InputManager::~InputManager()
{
vlc_mutex_lock( &p_intf->change_lock );
if( p_intf->p_sys->p_input ) vlc_object_release( p_intf->p_sys->p_input );
p_intf->p_sys->p_input = NULL;
vlc_mutex_unlock( &p_intf->change_lock );
}
/*****************************************************************************
* Public methods.
*****************************************************************************/
vlc_bool_t InputManager::IsPlaying()
{
return (p_input && !p_input->b_die);
}
/*****************************************************************************
* Private methods.
*****************************************************************************/
void InputManager::UpdateInput()
{
playlist_t *p_playlist =
(playlist_t *)vlc_object_find( p_intf, VLC_OBJECT_PLAYLIST,
FIND_ANYWHERE );
if( p_playlist != NULL )
{
LockPlaylist( p_intf->p_sys, p_playlist );
p_input = p_intf->p_sys->p_input = p_playlist->p_input;
if( p_intf->p_sys->p_input )
vlc_object_yield( p_intf->p_sys->p_input );
UnlockPlaylist( p_intf->p_sys, p_playlist );
vlc_object_release( p_playlist );
}
}
void InputManager::UpdateNowPlaying()
{
char *psz_now_playing = vlc_input_item_GetInfo( p_input->input.p_item,
_(VLC_META_INFO_CAT), _(VLC_META_NOW_PLAYING) );
if( psz_now_playing && *psz_now_playing )
{
p_main_intf->statusbar->SetStatusText(
wxString(wxU(psz_now_playing)) + wxT( " - " ) +
wxU(p_input->input.p_item->psz_name), 2 );
}
else
{
p_main_intf->statusbar->SetStatusText(
wxU(p_input->input.p_item->psz_name), 2 );
}
free( psz_now_playing );
}
void InputManager::UpdateButtons( vlc_bool_t b_play )
{
if( !b_play )
{
if( i_old_playing_status == STATUS_STOP ) return;
i_old_playing_status = STATUS_STOP;
p_main_intf->TogglePlayButton( PAUSE_S );
p_main_intf->statusbar->SetStatusText( wxT(""), 0 );
p_main_intf->statusbar->SetStatusText( wxT(""), 2 );
/* wxCocoa pretends to support this, but at least 2.6.x doesn't */
#ifndef __APPLE__
#ifdef wxHAS_TASK_BAR_ICON
if( p_main_intf->p_systray )
{
p_main_intf->p_systray->UpdateTooltip(
wxString(wxT("VLC media player - ")) + wxU(_("Stopped")) );
}
#endif
#endif
return;
}
/* Manage Playing status */
vlc_value_t val;
var_Get( p_input, "state", &val );
val.i_int = val.i_int == PAUSE_S ? STATUS_PAUSE : STATUS_PLAYING;
if( i_old_playing_status != val.i_int )
{
i_old_playing_status = val.i_int;
p_main_intf->TogglePlayButton( val.i_int == STATUS_PAUSE ?
PAUSE_S : PLAYING_S );
/* wxCocoa pretends to support this, but at least 2.6.x doesn't */
#ifndef __APPLE__
#ifdef wxHAS_TASK_BAR_ICON
if( p_main_intf->p_systray )
{
p_main_intf->p_systray->UpdateTooltip(
wxU(p_input->input.p_item->psz_name) + wxString(wxT(" - ")) +
(val.i_int == PAUSE_S ? wxU(_("Paused")) : wxU(_("Playing"))));
}
#endif
#endif
}
}
void InputManager::UpdateDiscButtons()
{
vlc_value_t val;
var_Change( p_input, "title", VLC_VAR_CHOICESCOUNT, &val, NULL );
if( val.i_int > 0 && !disc_frame->IsShown() )
{
vlc_value_t val;
#define HELP_MENU N_("Menu")
#define HELP_PCH N_("Previous chapter")
#define HELP_NCH N_("Next chapter")
#define HELP_PTR N_("Previous track")
#define HELP_NTR N_("Next track")
var_Change( p_input, "chapter", VLC_VAR_CHOICESCOUNT, &val, NULL );
if( val.i_int > 0 )
{
disc_menu_button->Show();
disc_sizer->Show( disc_menu_button );
disc_sizer->Layout();
disc_sizer->Fit( disc_frame );
disc_menu_button->SetToolTip( wxU(_( HELP_MENU ) ) );
disc_prev_button->SetToolTip( wxU(_( HELP_PCH ) ) );
disc_next_button->SetToolTip( wxU(_( HELP_NCH ) ) );
}
else
{
disc_menu_button->Hide();
disc_sizer->Hide( disc_menu_button );
disc_prev_button->SetToolTip( wxU(_( HELP_PTR ) ) );
disc_next_button->SetToolTip( wxU(_( HELP_NTR ) ) );
}
ShowDiscFrame();
}
else if( val.i_int == 0 && disc_frame->IsShown() )
{
HideDiscFrame();
}
}
void InputManager::HideSlider()
{
ShowSlider( false );
}
void InputManager::HideDiscFrame()
{
ShowDiscFrame( false );
}
void InputManager::UpdateTime()
{
char psz_time[ MSTRTIME_MAX_SIZE ], psz_total[ MSTRTIME_MAX_SIZE ];
mtime_t i_seconds;
i_seconds = var_GetTime( p_intf->p_sys->p_input, "length" ) / 1000000;
secstotimestr( psz_total, i_seconds );
i_seconds = var_GetTime( p_intf->p_sys->p_input, "time" ) / 1000000;
secstotimestr( psz_time, i_seconds );
p_main_intf->statusbar->SetStatusText(
wxU(psz_time) + wxString(wxT(" / ")) +wxU(psz_total), 0 );
}
void InputManager::Update()
{
/* Update the input */
if( p_input == NULL )
{
UpdateInput();
if( p_input )
{
slider->SetValue( 0 );
}
else if( !i_input_hide_delay )
{
i_input_hide_delay = mdate() + 200000;
}
else if( i_input_hide_delay < mdate() )
{
if( disc_frame->IsShown() ) HideDiscFrame();
if( slider->IsShown() ) HideSlider();
i_input_hide_delay = 0;
}
}
else if( p_input->b_dead )
{
UpdateButtons( VLC_FALSE );
vlc_object_release( p_input );
p_input = NULL;
}
else
{
i_input_hide_delay = 0;
}
if( p_input && !p_input->b_die )
{
vlc_value_t pos, len;
UpdateTime();
UpdateButtons( VLC_TRUE );
UpdateNowPlaying();
UpdateDiscButtons();
/* Really manage the slider */
var_Get( p_input, "position", &pos );
var_Get( p_input, "length", &len );
if( pos.f_float > 0 && !slider->IsShown() ) ShowSlider();
else if( pos.f_float <= 0 && slider->IsShown() ) HideSlider();
/* Update the slider if the user isn't dragging it. */
if( slider->IsShown() && b_slider_free )
{
i_slider_pos = (int)(SLIDER_MAX_POS * pos.f_float);
slider->SetValue( i_slider_pos );
}
/* Manage Speed status */
vlc_value_t val;
var_Get( p_input, "rate", &val );
if( i_old_rate != val.i_int )
{
p_main_intf->statusbar->SetStatusText(
wxString::Format(wxT("x%.2f"),
(float)INPUT_RATE_DEFAULT / val.i_int ), 1 );
i_old_rate = val.i_int;
}
}
}
/*****************************************************************************
* Event Handlers.
*****************************************************************************/
void InputManager::OnDiscMenu( wxCommandEvent& WXUNUSED(event) )
{
input_thread_t *p_input =
(input_thread_t *)vlc_object_find( p_intf, VLC_OBJECT_INPUT,
FIND_ANYWHERE );
if( p_input )
{
vlc_value_t val; val.i_int = 2;
var_Set( p_input, "title 0", val);
vlc_object_release( p_input );
}
}
void InputManager::OnDiscPrev( wxCommandEvent& WXUNUSED(event) )
{
input_thread_t *p_input =
(input_thread_t *)vlc_object_find( p_intf, VLC_OBJECT_INPUT,
FIND_ANYWHERE );
if( p_input )
{
int i_type = var_Type( p_input, "prev-chapter" );
vlc_value_t val; val.b_bool = VLC_TRUE;
var_Set( p_input, ( i_type & VLC_VAR_TYPE ) != 0 ?
"prev-chapter" : "prev-title", val );
vlc_object_release( p_input );
}
}
void InputManager::OnDiscNext( wxCommandEvent& WXUNUSED(event) )
{
input_thread_t *p_input =
(input_thread_t *)vlc_object_find( p_intf, VLC_OBJECT_INPUT,
FIND_ANYWHERE );
if( p_input )
{
int i_type = var_Type( p_input, "next-chapter" );
vlc_value_t val; val.b_bool = VLC_TRUE;
var_Set( p_input, ( i_type & VLC_VAR_TYPE ) != 0 ?
"next-chapter" : "next-title", val );
vlc_object_release( p_input );
}
}
void InputManager::OnSliderUpdate( wxScrollEvent& event )
{
vlc_mutex_lock( &p_intf->change_lock );
#ifdef WIN32
if( event.GetEventType() == wxEVT_SCROLL_THUMBRELEASE
|| event.GetEventType() == wxEVT_SCROLL_ENDSCROLL )
{
#endif
if( i_slider_pos != event.GetPosition() && p_intf->p_sys->p_input )
{
vlc_value_t pos;
pos.f_float = (float)event.GetPosition() / (float)SLIDER_MAX_POS;
var_Set( p_intf->p_sys->p_input, "position", pos );
}
#ifdef WIN32
b_slider_free = VLC_TRUE;
}
else
{
b_slider_free = VLC_FALSE;
if( p_intf->p_sys->p_input ) UpdateTime();
}
#endif
#undef WIN32
vlc_mutex_unlock( &p_intf->change_lock );
}
void InputManager::ShowSlider( bool show )
{
if( !!show == !!slider->IsShown() ) return;
UpdateVideoWindow( p_intf, p_main_intf->video_window );
sizer->Show( slider, show );
sizer->Layout();
wxCommandEvent intf_event( wxEVT_INTF, 0 );
p_main_intf->AddPendingEvent( intf_event );
}
void InputManager::ShowDiscFrame( bool show )
{
if( !!show == !!disc_frame->IsShown() ) return;
UpdateVideoWindow( p_intf, p_main_intf->video_window );
sizer->Show( disc_frame, show );
sizer->Layout();
wxCommandEvent intf_event( wxEVT_INTF, 0 );
p_main_intf->AddPendingEvent( intf_event );
}<|fim▁end|> | #include "video.hpp"
#include <vlc_meta.h>
|
<|file_name|>loadsettings.hh<|end_file_name|><|fim▁begin|>// -*- mode: c++; tab-width: 4; indent-tabs-mode: t; eval: (progn (c-set-style "stroustrup") (c-set-offset 'innamespace 0)); -*-
// vi:set ts=4 sts=4 sw=4 noet :
//
// Copyright 2010, 2012 wkhtmltopdf authors
//
// This file is part of wkhtmltopdf.
//
// wkhtmltopdf is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// wkhtmltopdf is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with wkhtmltopdf. If not, see <http://www.gnu.org/licenses/>.
#ifndef __LOADSETTINGS_HH__
#define __LOADSETTINGS_HH__
#ifdef __WKHTMLTOX_UNDEF_QT_DLL__
#ifdef QT_DLL
#undef QT_DLL
#endif
#endif
#include <QNetworkProxy>
#include <QString>
#include <wkhtmltox/dllbegin.inc>
namespace wkhtmltopdf {
namespace settings {
/*! \brief Settings considering proxy */
struct DLL_PUBLIC Proxy {
Proxy();
//! Type of proxy to use
QNetworkProxy::ProxyType type;
//! The port of the proxy to use
int port;
//! The host name of the proxy to use or NULL
QString host;
//! Username for the said proxy or NULL
QString user;
//! Password for the said proxy or NULL
QString password;
};
struct DLL_PUBLIC PostItem {
QString name;
QString value;
bool file;
};
struct DLL_PUBLIC LoadGlobal {
LoadGlobal();
//! Path of the cookie jar file
QString cookieJar;
};
struct DLL_PUBLIC LoadPage {
LoadPage();
enum LoadErrorHandling {
abort,
skip,
ignore
};
//! Username used for http auth login
QString username;
//! Password used for http auth login
QString password;
//! How many milliseconds should we wait for a Javascript redirect<|fim▁hole|>
//! What window.status value should we wait for
QString windowStatus;
//! Dump rendered HTML to file
QString dumpHtml;
//! What zoom factor should we apply when printing
// TODO MOVE
float zoomFactor;
//! Map of custom header variables
QList< QPair<QString, QString> > customHeaders;
//! Set if the custom header should be repeated for each resource request
bool repeatCustomHeaders;
//! Map of cookies
QList< QPair<QString, QString> > cookies;
QList< PostItem > post;
//! Block access to local files for the given page
bool blockLocalFileAccess;
//! If access to local files is not allowed in general, allow it for these files
QList< QString > allowed;
//! Stop Javascript from running too long
bool stopSlowScripts;
//! Output Javascript debug messages
bool debugJavascript;
//! What should we do about load errors
LoadErrorHandling loadErrorHandling;
LoadErrorHandling mediaLoadErrorHandling;
//! Proxy related settings
Proxy proxy;
//! Additional javascript to run on a page once it has loaded
QList< QString > runScript;
QString checkboxSvg;
QString checkboxCheckedSvg;
QString radiobuttonSvg;
QString radiobuttonCheckedSvg;
QString cacheDir;
static QList<QString> mediaFilesExtensions;
};
DLL_PUBLIC LoadPage::LoadErrorHandling strToLoadErrorHandling(const char * s, bool * ok=0);
DLL_PUBLIC QString loadErrorHandlingToStr(LoadPage::LoadErrorHandling leh);
DLL_PUBLIC Proxy strToProxy(const char * s, bool * ok=0);
DLL_PUBLIC QString proxyToStr(const Proxy & proxy);
}
}
#include <wkhtmltox/dllend.inc>
#endif //__LOADSETTINGS_HH__<|fim▁end|> | int jsdelay; |
<|file_name|>issue.go<|end_file_name|><|fim▁begin|>// Copyright 2016 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package gitea
import (
"bytes"
"encoding/json"
"fmt"
"time"
)
// StateType issue state type
type StateType string
const (
// StateOpen pr is opend
StateOpen StateType = "open"
// StateClosed pr is closed
StateClosed StateType = "closed"
)
// PullRequestMeta PR info if an issue is a PR
type PullRequestMeta struct {
HasMerged bool `json:"merged"`
Merged *time.Time `json:"merged_at"`
}
// Issue represents an issue in a repository
// swagger:model
type Issue struct {
ID int64 `json:"id"`
URL string `json:"url"`
Index int64 `json:"number"`
Poster *User `json:"user"`
Title string `json:"title"`
Body string `json:"body"`
Labels []*Label `json:"labels"`
Milestone *Milestone `json:"milestone"`
Assignee *User `json:"assignee"`
Assignees []*User `json:"assignees"`
// Whether the issue is open or closed
//
// type: string
// enum: open,closed
State StateType `json:"state"`
Comments int `json:"comments"`
// swagger:strfmt date-time
Created time.Time `json:"created_at"`
// swagger:strfmt date-time
Updated time.Time `json:"updated_at"`
// swagger:strfmt date-time
Closed *time.Time `json:"closed_at"`
// swagger:strfmt date-time
Deadline *time.Time `json:"due_date"`
PullRequest *PullRequestMeta `json:"pull_request"`
}
// ListIssueOption list issue options
type ListIssueOption struct {
Page int
State string
}
// ListIssues returns all issues assigned the authenticated user
func (c *Client) ListIssues(opt ListIssueOption) ([]*Issue, error) {
issues := make([]*Issue, 0, 10)
return issues, c.getParsedResponse("GET", fmt.Sprintf("/issues?page=%d", opt.Page), nil, nil, &issues)
}
// ListUserIssues returns all issues assigned to the authenticated user
func (c *Client) ListUserIssues(opt ListIssueOption) ([]*Issue, error) {
issues := make([]*Issue, 0, 10)
return issues, c.getParsedResponse("GET", fmt.Sprintf("/user/issues?page=%d", opt.Page), nil, nil, &issues)
}
// ListRepoIssues returns all issues for a given repository
func (c *Client) ListRepoIssues(owner, repo string, opt ListIssueOption) ([]*Issue, error) {
issues := make([]*Issue, 0, 10)
return issues, c.getParsedResponse("GET", fmt.Sprintf("/repos/%s/%s/issues?page=%d", owner, repo, opt.Page), nil, nil, &issues)
}
// GetIssue returns a single issue for a given repository
func (c *Client) GetIssue(owner, repo string, index int64) (*Issue, error) {
issue := new(Issue)
return issue, c.getParsedResponse("GET", fmt.Sprintf("/repos/%s/%s/issues/%d", owner, repo, index), nil, nil, issue)
}
// CreateIssueOption options to create one issue
type CreateIssueOption struct {
// required:true
Title string `json:"title" binding:"Required"`
Body string `json:"body"`
// username of assignee
Assignee string `json:"assignee"`
Assignees []string `json:"assignees"`
// swagger:strfmt date-time
Deadline *time.Time `json:"due_date"`
// milestone id<|fim▁hole|> Milestone int64 `json:"milestone"`
// list of label ids
Labels []int64 `json:"labels"`
Closed bool `json:"closed"`
}
// CreateIssue create a new issue for a given repository
func (c *Client) CreateIssue(owner, repo string, opt CreateIssueOption) (*Issue, error) {
body, err := json.Marshal(&opt)
if err != nil {
return nil, err
}
issue := new(Issue)
return issue, c.getParsedResponse("POST", fmt.Sprintf("/repos/%s/%s/issues", owner, repo),
jsonHeader, bytes.NewReader(body), issue)
}
// EditIssueOption options for editing an issue
type EditIssueOption struct {
Title string `json:"title"`
Body *string `json:"body"`
Assignee *string `json:"assignee"`
Assignees []string `json:"assignees"`
Milestone *int64 `json:"milestone"`
State *string `json:"state"`
// swagger:strfmt date-time
Deadline *time.Time `json:"due_date"`
}
// EditIssue modify an existing issue for a given repository
func (c *Client) EditIssue(owner, repo string, index int64, opt EditIssueOption) (*Issue, error) {
body, err := json.Marshal(&opt)
if err != nil {
return nil, err
}
issue := new(Issue)
return issue, c.getParsedResponse("PATCH", fmt.Sprintf("/repos/%s/%s/issues/%d", owner, repo, index),
jsonHeader, bytes.NewReader(body), issue)
}
// EditDeadlineOption options for creating a deadline
type EditDeadlineOption struct {
// required:true
// swagger:strfmt date-time
Deadline *time.Time `json:"due_date"`
}
// IssueDeadline represents an issue deadline
// swagger:model
type IssueDeadline struct {
// swagger:strfmt date-time
Deadline *time.Time `json:"due_date"`
}<|fim▁end|> | |
<|file_name|>direct_logging.cpp<|end_file_name|><|fim▁begin|>// This program sends logging records directly to the server, rather
// than going through the client logging daemon.
#include "ace/SOCK_Connector.h"<|fim▁hole|>#include "ace/OS_NS_time.h"
#include "ace/OS_NS_stdlib.h"
#include "ace/OS_NS_unistd.h"
#include "ace/CDR_Stream.h"
static u_short LOGGER_PORT = ACE_DEFAULT_SERVER_PORT;
static const ACE_TCHAR *const LOGGER_HOST = ACE_DEFAULT_SERVER_HOST;
static const ACE_TCHAR *const DATA = ACE_TEXT ("hello world\n");
int
ACE_TMAIN (int argc, ACE_TCHAR *argv[])
{
u_short logger_port = argc > 1 ? ACE_OS::atoi (argv[1]) : LOGGER_PORT;
const ACE_TCHAR *logger_host = argc > 2 ? argv[2] : LOGGER_HOST;
ACE_SOCK_Stream logger;
ACE_SOCK_Connector connector;
ACE_INET_Addr addr (logger_port, logger_host);
ACE_Log_Record log_record (LM_DEBUG,
ACE_OS::time ((time_t *) 0),
ACE_OS::getpid ());
if (connector.connect (logger, addr) == -1)
ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
log_record.msg_data (DATA);
const size_t max_payload_size =
4 // type()
+ 8 // timestamp
+ 4 // process id
+ 4 // data length
+ ACE_Log_Record::MAXLOGMSGLEN // data
+ ACE_CDR::MAX_ALIGNMENT; // padding;
// Insert contents of <log_record> into payload stream.
ACE_OutputCDR payload (max_payload_size);
payload << log_record;
// Get the number of bytes used by the CDR stream.
ACE_CDR::ULong length = payload.total_length ();
// Send a header so the receiver can determine the byte order and
// size of the incoming CDR stream.
ACE_OutputCDR header (ACE_CDR::MAX_ALIGNMENT + 8);
header << ACE_OutputCDR::from_boolean (ACE_CDR_BYTE_ORDER);
// Store the size of the payload that follows
header << ACE_CDR::ULong (length);
// Use an iovec to send both buffer and payload simultaneously.
iovec iov[2];
iov[0].iov_base = header.begin ()->rd_ptr ();
iov[0].iov_len = 8;
iov[1].iov_base = payload.begin ()->rd_ptr ();
iov[1].iov_len = length;
if (logger.sendv_n (iov, 2) == -1)
ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1);
else if (logger.close () == -1)
ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "close"), -1);
#if defined (ACE_WIN32)
// !!Important, Winsock is broken in that if you don't close
// down the connection before exiting main, you'll lose data.
// More over, your server might get "Access Violation" from
// within Winsock functions.
// Here we close down the connection to Logger by redirecting
// the logging destination back to stderr.
ACE_LOG_MSG->open (0, ACE_Log_Msg::STDERR, 0);
#endif /* ACE_WIN32 */
return 0;
}<|fim▁end|> | #include "ace/Log_Record.h"
#include "ace/Log_Msg.h" |
<|file_name|>types.rs<|end_file_name|><|fim▁begin|>use std::cell::RefCell;
use std::fmt;
use std::rc::Rc;
use syntax::abi;
pub use self::Global::*;
pub use self::Type::*;
pub use self::IKind::*;
pub use self::FKind::*;
#[derive(Clone)]
pub enum Global {
GType(Rc<RefCell<TypeInfo>>),
GComp(Rc<RefCell<CompInfo>>),
GCompDecl(Rc<RefCell<CompInfo>>),
GEnum(Rc<RefCell<EnumInfo>>),
GEnumDecl(Rc<RefCell<EnumInfo>>),
GVar(Rc<RefCell<VarInfo>>),
GFunc(Rc<RefCell<VarInfo>>),
GOther
}
impl Global {
pub fn compinfo(&self) -> Rc<RefCell<CompInfo>> {
match *self {
GComp(ref i) => return i.clone(),
GCompDecl(ref i) => return i.clone(),
_ => panic!("global_compinfo".to_string())
}
}
pub fn enuminfo(&self) -> Rc<RefCell<EnumInfo>> {
match *self {
GEnum(ref i) => return i.clone(),
GEnumDecl(ref i) => return i.clone(),
_ => panic!("global_enuminfo".to_string())
}
}
pub fn typeinfo(&self) -> Rc<RefCell<TypeInfo>> {
match *self {
GType(ref i) => return i.clone(),
_ => panic!("global_typeinfo".to_string())
}
}
pub fn varinfo(&self) -> Rc<RefCell<VarInfo>> {
match *self {
GVar(ref i) => i.clone(),
GFunc(ref i) => i.clone(),
_ => panic!("global_varinfo".to_string())
}
}
}
impl fmt::Debug for Global {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
GType(ref ti) => ti.borrow().fmt(f),
GComp(ref ci) => ci.borrow().fmt(f),
GCompDecl(ref ci) => ci.borrow().fmt(f),
GEnum(ref ei) => ei.borrow().fmt(f),
GEnumDecl(ref ei) => ei.borrow().fmt(f),
GVar(ref vi) => vi.borrow().fmt(f),
GFunc(ref vi) => vi.borrow().fmt(f),
GOther => "*".fmt(f),
}
}
}
#[derive(Clone, PartialEq)]
pub struct FuncSig {
pub ret_ty: Box<Type>,
pub args: Vec<(String, Type)>,
pub is_variadic: bool,
pub abi: abi::Abi,
}
#[derive(Clone, PartialEq)]
pub enum Type {
TVoid,
TInt(IKind, Layout),
TFloat(FKind, Layout),
TPtr(Box<Type>, bool, Layout),
TArray(Box<Type>, usize, Layout),
TFuncProto(FuncSig),
TFuncPtr(FuncSig),
TNamed(Rc<RefCell<TypeInfo>>),
TComp(Rc<RefCell<CompInfo>>),
TEnum(Rc<RefCell<EnumInfo>>)
}
impl Type {
pub fn size(&self) -> usize {
match self {
&TInt(_, l) => l.size,
&TFloat(_, l) => l.size,
&TPtr(_, _, l) => l.size,
&TArray(_, _, l) => l.size,
&TNamed(ref ti) => ti.borrow().ty.size(),
&TComp(ref ci) => ci.borrow().layout.size,
&TEnum(ref ei) => ei.borrow().layout.size,
&TVoid => 0,
&TFuncProto(..) => 0,
&TFuncPtr(..) => 0,
}
}
#[allow(dead_code)]
pub fn align(&self) -> usize {
match self {
&TInt(_, l) => l.align,
&TFloat(_, l) => l.align,
&TPtr(_, _, l) => l.align,
&TArray(_, _, l) => l.align,
&TNamed(ref ti) => ti.borrow().ty.align(),
&TComp(ref ci) => ci.borrow().layout.align,
&TEnum(ref ei) => ei.borrow().layout.align,
&TVoid => 0,
&TFuncProto(..) => 0,
&TFuncPtr(..) => 0,
}
}
}
#[derive(Copy, Clone, PartialEq)]
pub struct Layout {
pub size: usize,
pub align: usize,
}
impl Layout {
pub fn new(size: usize, align: usize) -> Layout {
Layout { size: size, align: align }
}
pub fn zero() -> Layout {
Layout { size: 0, align: 0 }
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum IKind {
IBool,
ISChar,
IUChar,
IShort,
IUShort,
IInt,
IUInt,
ILong,
IULong,
ILongLong,
IULongLong
}
#[derive(Copy, Clone, PartialEq)]
pub enum FKind {
FFloat,
FDouble
}
#[derive(Clone, PartialEq)]
pub enum CompMember {
Field(FieldInfo),
Comp(Rc<RefCell<CompInfo>>),
CompField(Rc<RefCell<CompInfo>>, FieldInfo),
}
#[derive(Copy, Clone, PartialEq)]
pub enum CompKind {
Struct,
Union,
}
#[derive(Clone, PartialEq)]
pub struct CompInfo {
pub kind: CompKind,
pub name: String,
pub members: Vec<CompMember>,
pub layout: Layout,
}
impl CompInfo {
pub fn new(name: String, kind: CompKind, members: Vec<CompMember>, layout: Layout) -> CompInfo {
CompInfo {
kind: kind,
name: name,
members: members,
layout: layout,
}
}
}
impl fmt::Debug for CompInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.name.fmt(f)
}
}
#[derive(Clone, PartialEq)]
pub struct FieldInfo {
pub name: String,
pub ty: Type,
pub bitfields: Option<Vec<(String, u32)>>,
}
impl FieldInfo {
pub fn new(name: String, ty: Type, bitfields: Option<Vec<(String, u32)>>) -> FieldInfo {
FieldInfo {
name: name,
ty: ty,
bitfields: bitfields,
}
}
}
#[derive(Clone, PartialEq)]
pub struct EnumInfo {
pub name: String,
pub items: Vec<EnumItem>,
pub kind: IKind,
pub layout: Layout,
}
impl EnumInfo {
pub fn new(name: String, kind: IKind, items: Vec<EnumItem>, layout: Layout) -> EnumInfo {
EnumInfo {
name: name,
items: items,
kind: kind,
layout: layout,
}
}
}
impl fmt::Debug for EnumInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.name.fmt(f)
}
}
#[derive(Clone, PartialEq)]
pub struct EnumItem {
pub name: String,
pub val: i64
}
impl EnumItem {
pub fn new(name: String, val: i64) -> EnumItem {
EnumItem {
name: name,
val: val
}
}
}<|fim▁hole|> pub ty: Type
}
impl TypeInfo {
pub fn new(name: String, ty: Type) -> TypeInfo {
TypeInfo {
name: name,
ty: ty
}
}
}
impl fmt::Debug for TypeInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.name.fmt(f)
}
}
#[derive(Clone)]
pub struct VarInfo {
pub name: String,
pub ty: Type,
//TODO: support non-integer constants
pub val: Option<i64>,
pub is_const: bool
}
impl VarInfo {
pub fn new(name: String, ty: Type) -> VarInfo {
VarInfo {
name: name,
ty: ty,
val: None,
is_const: false
}
}
}
impl fmt::Debug for VarInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.name.fmt(f)
}
}<|fim▁end|> |
#[derive(Clone, PartialEq)]
pub struct TypeInfo {
pub name: String, |
<|file_name|>ExampleConfigCase.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2015 Adaptris Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.adaptris.core;<|fim▁hole|>import com.adaptris.core.stubs.UpgradedToJunit4;
import com.adaptris.interlok.junit.scaffolding.ExampleConfigGenerator;
@Deprecated
@Removal(version = "4.0.0",
message = "moved to com.adaptris.interlok.junit.scaffolding")
public abstract class ExampleConfigCase extends ExampleConfigGenerator implements UpgradedToJunit4 {
}<|fim▁end|> |
import com.adaptris.annotation.Removal; |
<|file_name|>trivial-bounds-lint.rs<|end_file_name|><|fim▁begin|>#![feature(trivial_bounds)]
#![allow(unused)]
#![deny(trivial_bounds)]
<|fim▁hole|>
trait Y<T>: Copy {}
trait Z {
type S: Copy;
}
// Check only the bound the user writes trigger the lint
fn trivial_elaboration<T>() where T: X<i32> + Z<S = i32>, i32: Y<T> {} // OK
fn global_param() where i32: X<()> {} //~ ERROR
// Should only error on the trait bound, not the implicit
// projection bound <i32 as Z>::S == i32.
fn global_projection() where i32: Z<S = i32> {} //~ ERROR
impl A {
fn new() -> A { A }
}
// Lifetime bounds should be linted as well
fn global_lifetimes() where i32: 'static, &'static str: 'static {}
//~^ ERROR
//~| ERROR
fn local_lifetimes<'a>() where i32: 'a, &'a str: 'a {} // OK
fn global_outlives() where 'static: 'static {} //~ ERROR
// Check that each bound is checked individually
fn mixed_bounds<T: Copy>() where i32: X<T> + Copy {} //~ ERROR
fn main() {}<|fim▁end|> | struct A where i32: Copy; //~ ERROR
trait X<T: Copy> {} |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import FixtureSet from '../../FixtureSet';
import TestCase from '../../TestCase';
import PasswordTestCase from './PasswordTestCase';
const React = window.React;
function NumberInputs() {
return (
<FixtureSet title="Password inputs">
<TestCase
title="The show password icon"
description={`
Some browsers have an unmask password icon that React accidentally
prevents the display of.
`}
affectedBrowsers="IE Edge, IE 11">
<TestCase.Steps>
<li>Type any string (not an actual password)</li>
</TestCase.Steps>
<TestCase.ExpectedResult>
The field should include the "unmasking password" icon.<|fim▁hole|> </TestCase.ExpectedResult>
<PasswordTestCase />
</TestCase>
</FixtureSet>
);
}
export default NumberInputs;<|fim▁end|> | |
<|file_name|>pn_port_cos_bw.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_port_cos_bw
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to modify port-cos-bw
description:
- This module can be used to update bw settings for CoS queues.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(update) to modify the port-cos-bw.
required: True
type: str
choices: ['update']
pn_max_bw_limit:
description:
- Maximum b/w in percentage.
required: False
type: str
pn_cos:
description:
- CoS priority.
required: False
type: str
pn_port:
description:
- physical port number.
required: False
type: str
pn_weight:
description:
- Scheduling weight (1 to 127) after b/w guarantee met.
required: False
type: str
choices: ['priority', 'no-priority']
pn_min_bw_guarantee:
description:
- Minimum b/w in precentage.
required: False
type: str
"""
EXAMPLES = """
- name: port cos bw modify
pn_port_cos_bw:
pn_cliswitch: "sw01"
state: "update"
pn_port: "1"
pn_cos: "0"
pn_min_bw_guarantee: "60"
- name: port cos bw modify
pn_port_cos_bw:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_cos: "0"
pn_weight: "priority"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the port-cos-bw command.
returned: always
type: list
stderr:
description: set of error responses from the port-cos-bw command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
<|fim▁hole|>def main():
""" This section is for arguments parsing """
state_map = dict(
update='port-cos-bw-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_max_bw_limit=dict(required=False, type='str'),
pn_cos=dict(required=False, type='str'),
pn_port=dict(required=False, type='str'),
pn_weight=dict(required=False, type='str',
choices=['priority', 'no-priority']),
pn_min_bw_guarantee=dict(required=False, type='str'),
),
required_if=(
['state', 'update', ['pn_cos', 'pn_port']],
),
required_one_of=[['pn_max_bw_limit', 'pn_min_bw_guarantee', 'pn_weight']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
max_bw_limit = module.params['pn_max_bw_limit']
cos = module.params['pn_cos']
port = module.params['pn_port']
weight = module.params['pn_weight']
min_bw_guarantee = module.params['pn_min_bw_guarantee']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if command == 'port-cos-bw-modify':
cli += ' %s ' % command
if max_bw_limit:
cli += ' max-bw-limit ' + max_bw_limit
if cos:
cli += ' cos ' + cos
if port:
cli += ' port ' + port
if weight:
cli += ' weight ' + weight
if min_bw_guarantee:
cli += ' min-bw-guarantee ' + min_bw_guarantee
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>service_check.py<|end_file_name|><|fim▁begin|>"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
class MahoutServiceCheck(Script):
def service_check(self, env):
import params
env.set_params(params)
mahout_command = format("mahout seqdirectory --input /user/{smokeuser}/mahoutsmokeinput/sample-mahout-test.txt "
"--output /user/{smokeuser}/mahoutsmokeoutput/ --charset utf-8")
test_command = format("fs -test -e /user/{smokeuser}/mahoutsmokeoutput/_SUCCESS")
File( format("{tmp_dir}/sample-mahout-test.txt"),
content = "Test text which will be converted to sequence file.",
mode = 0755
)
params.HdfsResource(format("/user/{smokeuser}"),
type="directory",
action="create_on_execute",
owner=params.smokeuser,
mode=params.smoke_hdfs_user_mode,
)
params.HdfsResource(format("/user/{smokeuser}/mahoutsmokeoutput"),
action="delete_on_execute",
type="directory",
)
params.HdfsResource(format("/user/{smokeuser}/mahoutsmokeinput"),
action="create_on_execute",
type="directory",
owner=params.smokeuser,
)
params.HdfsResource(format("/user/{smokeuser}/mahoutsmokeinput/sample-mahout-test.txt"),
action="create_on_execute",
type="file",
owner=params.smokeuser,
source=format("{tmp_dir}/sample-mahout-test.txt")
)
params.HdfsResource(None, action="execute")
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
Execute(kinit_cmd,
user=params.smokeuser)
Execute( mahout_command,
tries = 3,
try_sleep = 5,
environment={'MAHOUT_HOME': params.mahout_home,'JAVA_HOME': params.java64_home},
path = format('/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
user = params.smokeuser
)
ExecuteHadoop( test_command,
tries = 10,<|fim▁hole|> try_sleep = 6,
user = params.smokeuser,
conf_dir = params.hadoop_conf_dir,
bin_dir = params.hadoop_bin_dir
)
if __name__ == "__main__":
MahoutServiceCheck().execute()<|fim▁end|> | |
<|file_name|>ColorCodingModifier.cpp<|end_file_name|><|fim▁begin|>///////////////////////////////////////////////////////////////////////////////
//
// Copyright (2013) Alexander Stukowski
//
// This file is part of OVITO (Open Visualization Tool).
//
// OVITO is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// OVITO is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
///////////////////////////////////////////////////////////////////////////////
#include <plugins/particles/Particles.h>
#include <core/viewport/Viewport.h>
#include <core/scene/pipeline/PipelineObject.h>
#include <core/animation/controller/Controller.h>
#include <core/reference/CloneHelper.h>
#include <core/gui/mainwin/MainWindow.h>
#include <core/gui/dialogs/LoadImageFileDialog.h>
#include <core/gui/properties/FloatParameterUI.h>
#include <core/gui/properties/Vector3ParameterUI.h>
#include <core/gui/properties/ColorParameterUI.h>
#include <core/gui/properties/BooleanParameterUI.h>
#include <core/gui/properties/CustomParameterUI.h>
#include <core/plugins/PluginManager.h>
#include <core/gui/dialogs/SaveImageFileDialog.h>
#include <core/rendering/SceneRenderer.h>
#include <core/viewport/ViewportConfiguration.h>
#include <plugins/particles/util/ParticlePropertyParameterUI.h>
#include "ColorCodingModifier.h"
namespace Ovito { namespace Particles { OVITO_BEGIN_INLINE_NAMESPACE(Modifiers) OVITO_BEGIN_INLINE_NAMESPACE(Coloring)
IMPLEMENT_SERIALIZABLE_OVITO_OBJECT(Particles, ColorCodingModifier, ParticleModifier);
SET_OVITO_OBJECT_EDITOR(ColorCodingModifier, ColorCodingModifierEditor);
DEFINE_REFERENCE_FIELD(ColorCodingModifier, _startValueCtrl, "StartValue", Controller);
DEFINE_REFERENCE_FIELD(ColorCodingModifier, _endValueCtrl, "EndValue", Controller);
DEFINE_REFERENCE_FIELD(ColorCodingModifier, _colorGradient, "ColorGradient", ColorCodingGradient);
DEFINE_PROPERTY_FIELD(ColorCodingModifier, _colorOnlySelected, "SelectedOnly");
DEFINE_PROPERTY_FIELD(ColorCodingModifier, _keepSelection, "KeepSelection");
DEFINE_PROPERTY_FIELD(ColorCodingModifier, _sourceProperty, "SourceProperty");
SET_PROPERTY_FIELD_LABEL(ColorCodingModifier, _startValueCtrl, "Start value");
SET_PROPERTY_FIELD_LABEL(ColorCodingModifier, _endValueCtrl, "End value");
SET_PROPERTY_FIELD_LABEL(ColorCodingModifier, _colorGradient, "Color gradient");
SET_PROPERTY_FIELD_LABEL(ColorCodingModifier, _colorOnlySelected, "Color only selected particles");
SET_PROPERTY_FIELD_LABEL(ColorCodingModifier, _keepSelection, "Keep particles selected");
SET_PROPERTY_FIELD_LABEL(ColorCodingModifier, _sourceProperty, "Source property");
IMPLEMENT_SERIALIZABLE_OVITO_OBJECT(Particles, ColorCodingGradient, RefTarget);
IMPLEMENT_SERIALIZABLE_OVITO_OBJECT(Particles, ColorCodingHSVGradient, ColorCodingGradient);
IMPLEMENT_SERIALIZABLE_OVITO_OBJECT(Particles, ColorCodingGrayscaleGradient, ColorCodingGradient);
IMPLEMENT_SERIALIZABLE_OVITO_OBJECT(Particles, ColorCodingHotGradient, ColorCodingGradient);
IMPLEMENT_SERIALIZABLE_OVITO_OBJECT(Particles, ColorCodingJetGradient, ColorCodingGradient);
IMPLEMENT_SERIALIZABLE_OVITO_OBJECT(Particles, ColorCodingImageGradient, ColorCodingGradient);
DEFINE_PROPERTY_FIELD(ColorCodingImageGradient, _image, "Image");
OVITO_BEGIN_INLINE_NAMESPACE(Internal)
IMPLEMENT_OVITO_OBJECT(Particles, ColorCodingModifierEditor, ParticleModifierEditor);
OVITO_END_INLINE_NAMESPACE
/******************************************************************************
* Constructs the modifier object.
******************************************************************************/
ColorCodingModifier::ColorCodingModifier(DataSet* dataset) : ParticleModifier(dataset),
_colorOnlySelected(false), _keepSelection(false)
{
INIT_PROPERTY_FIELD(ColorCodingModifier::_startValueCtrl);
INIT_PROPERTY_FIELD(ColorCodingModifier::_endValueCtrl);
INIT_PROPERTY_FIELD(ColorCodingModifier::_colorGradient);
INIT_PROPERTY_FIELD(ColorCodingModifier::_colorOnlySelected);
INIT_PROPERTY_FIELD(ColorCodingModifier::_keepSelection);
INIT_PROPERTY_FIELD(ColorCodingModifier::_sourceProperty);
_colorGradient = new ColorCodingHSVGradient(dataset);
_startValueCtrl = ControllerManager::instance().createFloatController(dataset);
_endValueCtrl = ControllerManager::instance().createFloatController(dataset);
}
/******************************************************************************
* Loads the user-defined default values of this object's parameter fields from the
* application's settings store.
******************************************************************************/
void ColorCodingModifier::loadUserDefaults()
{
ParticleModifier::loadUserDefaults();
// Load the default gradient type set by the user.
QSettings settings;
settings.beginGroup(ColorCodingModifier::OOType.plugin()->pluginId());
settings.beginGroup(ColorCodingModifier::OOType.name());
QString typeString = settings.value(PROPERTY_FIELD(ColorCodingModifier::_colorGradient).identifier()).toString();
if(!typeString.isEmpty()) {
try {
OvitoObjectType* gradientType = OvitoObjectType::decodeFromString(typeString);
if(!colorGradient() || colorGradient()->getOOType() != *gradientType) {
OORef<ColorCodingGradient> gradient = dynamic_object_cast<ColorCodingGradient>(gradientType->createInstance(dataset()));
if(gradient) setColorGradient(gradient);
}
}
catch(...) {}
}
}
/******************************************************************************
* Asks the modifier for its validity interval at the given time.
******************************************************************************/
TimeInterval ColorCodingModifier::modifierValidity(TimePoint time)
{
TimeInterval interval = ParticleModifier::modifierValidity(time);
if(_startValueCtrl) interval.intersect(_startValueCtrl->validityInterval(time));
if(_endValueCtrl) interval.intersect(_endValueCtrl->validityInterval(time));
return interval;
}
/******************************************************************************
* This method is called by the system when the modifier has been inserted
* into a pipeline.
******************************************************************************/
void ColorCodingModifier::initializeModifier(PipelineObject* pipeline, ModifierApplication* modApp)
{
ParticleModifier::initializeModifier(pipeline, modApp);
if(sourceProperty().isNull()) {
// Select the first available particle property from the input.
PipelineFlowState input = pipeline->evaluatePipeline(dataset()->animationSettings()->time(), modApp, false);
ParticlePropertyReference bestProperty;
for(DataObject* o : input.objects()) {
ParticlePropertyObject* property = dynamic_object_cast<ParticlePropertyObject>(o);
if(property && (property->dataType() == qMetaTypeId<int>() || property->dataType() == qMetaTypeId<FloatType>())) {
bestProperty = ParticlePropertyReference(property, (property->componentCount() > 1) ? 0 : -1);
}
}
if(!bestProperty.isNull())
setSourceProperty(bestProperty);
}
// Automatically adjust value range.
if(startValue() == 0 && endValue() == 0)
adjustRange();
}
/******************************************************************************
* This modifies the input object.
******************************************************************************/
PipelineStatus ColorCodingModifier::modifyParticles(TimePoint time, TimeInterval& validityInterval)
{
// Get the source property.
if(sourceProperty().isNull())
throw Exception(tr("Select a particle property first."));
ParticlePropertyObject* property = sourceProperty().findInState(input());
if(!property)
throw Exception(tr("The particle property with the name '%1' does not exist.").arg(sourceProperty().name()));
if(sourceProperty().vectorComponent() >= (int)property->componentCount())
throw Exception(tr("The vector component is out of range. The particle property '%1' contains only %2 values per particle.").arg(sourceProperty().name()).arg(property->componentCount()));
int vecComponent = std::max(0, sourceProperty().vectorComponent());
int stride = property->stride() / property->dataTypeSize();
if(!_colorGradient)
throw Exception(tr("No color gradient has been selected."));
// Get modifier's parameter values.
FloatType startValue = 0, endValue = 0;
if(_startValueCtrl) startValue = _startValueCtrl->getFloatValue(time, validityInterval);
if(_endValueCtrl) endValue = _endValueCtrl->getFloatValue(time, validityInterval);
// Get the particle selection property if enabled by the user.
ParticlePropertyObject* selProperty = nullptr;
const int* sel = nullptr;
std::vector<Color> existingColors;
if(colorOnlySelected()) {
selProperty = inputStandardProperty(ParticleProperty::SelectionProperty);
if(selProperty) {
sel = selProperty->constDataInt();
existingColors = inputParticleColors(time, validityInterval);
}
}
// Create the color output property.
ParticlePropertyObject* colorProperty = outputStandardProperty(ParticleProperty::ColorProperty);
OVITO_ASSERT(colorProperty->size() == property->size());
Color* c_begin = colorProperty->dataColor();
Color* c_end = c_begin + colorProperty->size();
Color* c = c_begin;
if(property->dataType() == qMetaTypeId<FloatType>()) {
const FloatType* v = property->constDataFloat() + vecComponent;
for(; c != c_end; ++c, v += stride) {
// If the "only selected" option is enabled, and the particle is not selected, use the existing particle color.
if(sel && !(*sel++)) {
*c = existingColors[c - c_begin];
continue;
}
// Compute linear interpolation.
FloatType t;
if(startValue == endValue) {
if((*v) == startValue) t = 0.5;
else if((*v) > startValue) t = 1.0;
else t = 0.0;
}
else t = ((*v) - startValue) / (endValue - startValue);
// Clamp values.
if(t < 0) t = 0;
else if(t > 1) t = 1;
*c = _colorGradient->valueToColor(t);
}
}
else if(property->dataType() == qMetaTypeId<int>()) {
const int* v = property->constDataInt() + vecComponent;
for(; c != c_end; ++c, v += stride) {
// If the "only selected" option is enabled, and the particle is not selected, use the existing particle color.
if(sel && !(*sel++)) {
*c = existingColors[c - c_begin];
continue;
}
// Compute linear interpolation.
FloatType t;
if(startValue == endValue) {
if((*v) == startValue) t = 0.5;
else if((*v) > startValue) t = 1.0;
else t = 0.0;
}
else t = ((*v) - startValue) / (endValue - startValue);
// Clamp values.
if(t < 0) t = 0;
else if(t > 1) t = 1;
*c = _colorGradient->valueToColor(t);
}
}
else
throw Exception(tr("The particle property '%1' has an invalid or non-numeric data type.").arg(property->name()));
// Clear particle selection if requested.
if(selProperty && !keepSelection())
output().removeObject(selProperty);
colorProperty->changed();
return PipelineStatus::Success;
}
/******************************************************************************
* Sets the start and end value to the minimum and maximum value
* in the selected particle property.
******************************************************************************/
bool ColorCodingModifier::adjustRange()
{
// Determine the minimum and maximum values of the selected particle property.
// Get the value data channel from the input object.
PipelineFlowState inputState = getModifierInput();
ParticlePropertyObject* property = sourceProperty().findInState(inputState);
if(!property)
return false;
if(sourceProperty().vectorComponent() >= (int)property->componentCount())
return false;
int vecComponent = std::max(0, sourceProperty().vectorComponent());
int stride = property->stride() / property->dataTypeSize();
// Iterate over all atoms.
FloatType maxValue = FLOATTYPE_MIN;
FloatType minValue = FLOATTYPE_MAX;
if(property->dataType() == qMetaTypeId<FloatType>()) {
const FloatType* v = property->constDataFloat() + vecComponent;
const FloatType* vend = v + (property->size() * stride);
for(; v != vend; v += stride) {
if(*v > maxValue) maxValue = *v;
if(*v < minValue) minValue = *v;
}
}
else if(property->dataType() == qMetaTypeId<int>()) {
const int* v = property->constDataInt() + vecComponent;
const int* vend = v + (property->size() * stride);
for(; v != vend; v += stride) {
if(*v > maxValue) maxValue = *v;
if(*v < minValue) minValue = *v;
}
}
if(minValue == +FLOATTYPE_MAX)
return false;
if(startValueController())
startValueController()->setCurrentFloatValue(minValue);
if(endValueController())
endValueController()->setCurrentFloatValue(maxValue);
return true;
}
/******************************************************************************
* Saves the class' contents to the given stream.
******************************************************************************/
void ColorCodingModifier::saveToStream(ObjectSaveStream& stream)
{
ParticleModifier::saveToStream(stream);
stream.beginChunk(0x02);
stream.endChunk();
}
/******************************************************************************
* Loads the class' contents from the given stream.
******************************************************************************/
void ColorCodingModifier::loadFromStream(ObjectLoadStream& stream)
{
ParticleModifier::loadFromStream(stream);
int version = stream.expectChunkRange(0, 0x02);
if(version == 0x01) {
ParticlePropertyReference pref;
stream >> pref;
setSourceProperty(pref);
}
stream.closeChunk();
}
/******************************************************************************
* Loads the given image file from disk.
******************************************************************************/
void ColorCodingImageGradient::loadImage(const QString& filename)
{
QImage image(filename);
if(image.isNull())
throw Exception(tr("Could not load image file '%1'.").arg(filename));
setImage(image);
}
/******************************************************************************
* Converts a scalar value to a color value.
******************************************************************************/
Color ColorCodingImageGradient::valueToColor(FloatType t)
{
if(image().isNull()) return Color(0,0,0);
QPoint p;
if(image().width() > image().height())
p = QPoint(std::min((int)(t * image().width()), image().width()-1), 0);
else
p = QPoint(0, std::min((int)(t * image().height()), image().height()-1));
return Color(image().pixel(p));
}
OVITO_BEGIN_INLINE_NAMESPACE(Internal)
/******************************************************************************
* Sets up the UI widgets of the editor.
******************************************************************************/
void ColorCodingModifierEditor::createUI(const RolloutInsertionParameters& rolloutParams)
{
// Create a rollout.
QWidget* rollout = createRollout(tr("Color coding"), rolloutParams, "particles.modifiers.color_coding.html");
// Create the rollout contents.
QVBoxLayout* layout1 = new QVBoxLayout(rollout);
layout1->setContentsMargins(4,4,4,4);
layout1->setSpacing(2);
ParticlePropertyParameterUI* sourcePropertyUI = new ParticlePropertyParameterUI(this, PROPERTY_FIELD(ColorCodingModifier::_sourceProperty));
layout1->addWidget(new QLabel(tr("Property:"), rollout));
layout1->addWidget(sourcePropertyUI->comboBox());
colorGradientList = new QComboBox(rollout);
layout1->addWidget(new QLabel(tr("Color gradient:"), rollout));
layout1->addWidget(colorGradientList);
colorGradientList->setIconSize(QSize(48,16));
connect(colorGradientList, (void (QComboBox::*)(int))&QComboBox::activated, this, &ColorCodingModifierEditor::onColorGradientSelected);
for(const OvitoObjectType* clazz : PluginManager::instance().listClasses(ColorCodingGradient::OOType)) {
if(clazz == &ColorCodingImageGradient::OOType)
continue;
colorGradientList->addItem(iconFromColorMapClass(clazz), clazz->displayName(), QVariant::fromValue(clazz));
}
colorGradientList->insertSeparator(colorGradientList->count());
colorGradientList->addItem(tr("Load custom color map..."));
_gradientListContainCustomItem = false;
// Update color legend if another modifier has been loaded into the editor.
connect(this, &ColorCodingModifierEditor::contentsReplaced, this, &ColorCodingModifierEditor::updateColorGradient);
layout1->addSpacing(10);
QGridLayout* layout2 = new QGridLayout();
layout2->setContentsMargins(0,0,0,0);
layout2->setColumnStretch(1, 1);
layout1->addLayout(layout2);
// End value parameter.
FloatParameterUI* endValuePUI = new FloatParameterUI(this, PROPERTY_FIELD(ColorCodingModifier::_endValueCtrl));
layout2->addWidget(endValuePUI->label(), 0, 0);
layout2->addLayout(endValuePUI->createFieldLayout(), 0, 1);
// Insert color legend display.
colorLegendLabel = new QLabel(rollout);
colorLegendLabel->setScaledContents(true);
layout2->addWidget(colorLegendLabel, 1, 1);
// Start value parameter.
FloatParameterUI* startValuePUI = new FloatParameterUI(this, PROPERTY_FIELD(ColorCodingModifier::_startValueCtrl));
layout2->addWidget(startValuePUI->label(), 2, 0);
layout2->addLayout(startValuePUI->createFieldLayout(), 2, 1);
// Export color scale button.
QToolButton* exportBtn = new QToolButton(rollout);
exportBtn->setIcon(QIcon(":/particles/icons/export_color_scale.png"));
exportBtn->setToolTip("Export color map to image file");
exportBtn->setAutoRaise(true);
exportBtn->setIconSize(QSize(42,22));
connect(exportBtn, &QPushButton::clicked, this, &ColorCodingModifierEditor::onExportColorScale);
layout2->addWidget(exportBtn, 1, 0, Qt::AlignCenter);
layout1->addSpacing(8);
QPushButton* adjustBtn = new QPushButton(tr("Adjust range"), rollout);
connect(adjustBtn, &QPushButton::clicked, this, &ColorCodingModifierEditor::onAdjustRange);
layout1->addWidget(adjustBtn);
layout1->addSpacing(4);
QPushButton* reverseBtn = new QPushButton(tr("Reverse range"), rollout);
connect(reverseBtn, &QPushButton::clicked, this, &ColorCodingModifierEditor::onReverseRange);
layout1->addWidget(reverseBtn);
layout1->addSpacing(8);
// Only selected particles.
BooleanParameterUI* onlySelectedPUI = new BooleanParameterUI(this, PROPERTY_FIELD(ColorCodingModifier::_colorOnlySelected));
layout1->addWidget(onlySelectedPUI->checkBox());
// Keep selection
BooleanParameterUI* keepSelectionPUI = new BooleanParameterUI(this, PROPERTY_FIELD(ColorCodingModifier::_keepSelection));
layout1->addWidget(keepSelectionPUI->checkBox());
connect(onlySelectedPUI->checkBox(), &QCheckBox::toggled, keepSelectionPUI, &BooleanParameterUI::setEnabled);
keepSelectionPUI->setEnabled(false);
}
/******************************************************************************
* Updates the display for the color gradient.
******************************************************************************/
void ColorCodingModifierEditor::updateColorGradient()
{
ColorCodingModifier* mod = static_object_cast<ColorCodingModifier>(editObject());
if(!mod) return;
// Create the color legend image.
int legendHeight = 128;
QImage image(1, legendHeight, QImage::Format_RGB32);
for(int y = 0; y < legendHeight; y++) {
FloatType t = (FloatType)y / (legendHeight - 1);
Color color = mod->colorGradient()->valueToColor(1.0 - t);
image.setPixel(0, y, QColor(color).rgb());
}
colorLegendLabel->setPixmap(QPixmap::fromImage(image));
// Select the right entry in the color gradient selector.
bool isCustomMap = false;
if(mod->colorGradient()) {
int index = colorGradientList->findData(QVariant::fromValue(&mod->colorGradient()->getOOType()));
if(index >= 0)
colorGradientList->setCurrentIndex(index);
else
isCustomMap = true;
}
else colorGradientList->setCurrentIndex(-1);
if(isCustomMap) {
if(!_gradientListContainCustomItem) {
_gradientListContainCustomItem = true;
colorGradientList->insertItem(colorGradientList->count() - 2, iconFromColorMap(mod->colorGradient()), tr("Custom color map"));
colorGradientList->insertSeparator(colorGradientList->count() - 3);
}
else {
colorGradientList->setItemIcon(colorGradientList->count() - 3, iconFromColorMap(mod->colorGradient()));
}
colorGradientList->setCurrentIndex(colorGradientList->count() - 3);
}
else if(_gradientListContainCustomItem) {
_gradientListContainCustomItem = false;
colorGradientList->removeItem(colorGradientList->count() - 3);
colorGradientList->removeItem(colorGradientList->count() - 3);
}
}
/******************************************************************************
* This method is called when a reference target changes.
******************************************************************************/
bool ColorCodingModifierEditor::referenceEvent(RefTarget* source, ReferenceEvent* event)
{
if(source == editObject() && event->type() == ReferenceEvent::ReferenceChanged &&
static_cast<ReferenceFieldEvent*>(event)->field() == PROPERTY_FIELD(ColorCodingModifier::_colorGradient)) {
updateColorGradient();
}
return ParticleModifierEditor::referenceEvent(source, event);
}
/******************************************************************************
* Is called when the user selects a color gradient in the list box.
******************************************************************************/
void ColorCodingModifierEditor::onColorGradientSelected(int index)
{
if(index < 0) return;
ColorCodingModifier* mod = static_object_cast<ColorCodingModifier>(editObject());
OVITO_CHECK_OBJECT_POINTER(mod);
const OvitoObjectType* descriptor = colorGradientList->itemData(index).value<const OvitoObjectType*>();
if(descriptor) {
undoableTransaction(tr("Change color gradient"), [descriptor, mod]() {
OORef<ColorCodingGradient> gradient = static_object_cast<ColorCodingGradient>(descriptor->createInstance(mod->dataset()));
if(gradient) {
mod->setColorGradient(gradient);
QSettings settings;
settings.beginGroup(ColorCodingModifier::OOType.plugin()->pluginId());
settings.beginGroup(ColorCodingModifier::OOType.name());
settings.setValue(PROPERTY_FIELD(ColorCodingModifier::_colorGradient).identifier(),
QVariant::fromValue(OvitoObjectType::encodeAsString(descriptor)));
}
});
}
else if(index == colorGradientList->count() - 1) {
undoableTransaction(tr("Change color gradient"), [this, mod]() {
LoadImageFileDialog fileDialog(container(), tr("Pick color map image"));
if(fileDialog.exec()) {
OORef<ColorCodingImageGradient> gradient(new ColorCodingImageGradient(mod->dataset()));
gradient->loadImage(fileDialog.imageInfo().filename());
mod->setColorGradient(gradient);
}
});
}
}
/******************************************************************************
* Is called when the user presses the "Adjust Range" button.
******************************************************************************/
void ColorCodingModifierEditor::onAdjustRange()
{
ColorCodingModifier* mod = static_object_cast<ColorCodingModifier>(editObject());
OVITO_CHECK_OBJECT_POINTER(mod);
undoableTransaction(tr("Adjust range"), [mod]() {
mod->adjustRange();
});
}
/******************************************************************************
* Is called when the user presses the "Reverse Range" button.
******************************************************************************/
void ColorCodingModifierEditor::onReverseRange()
{
ColorCodingModifier* mod = static_object_cast<ColorCodingModifier>(editObject());
if(mod->startValueController() && mod->endValueController()) {
undoableTransaction(tr("Reverse range"), [mod]() {
// Swap controllers for start and end value.
OORef<Controller> oldStartValue = mod->startValueController();
mod->setStartValueController(mod->endValueController());
mod->setEndValueController(oldStartValue);
});
}
}
/******************************************************************************
* Is called when the user presses the "Export color scale" button.
******************************************************************************/
void ColorCodingModifierEditor::onExportColorScale()
{
ColorCodingModifier* mod = static_object_cast<ColorCodingModifier>(editObject());
if(!mod || !mod->colorGradient()) return;
SaveImageFileDialog fileDialog(colorLegendLabel, tr("Save color map"));
if(fileDialog.exec()) {
// Create the color legend image.
int legendWidth = 32;
int legendHeight = 256;
QImage image(1, legendHeight, QImage::Format_RGB32);
for(int y = 0; y < legendHeight; y++) {
FloatType t = (FloatType)y / (FloatType)(legendHeight - 1);
Color color = mod->colorGradient()->valueToColor(1.0 - t);
image.setPixel(0, y, QColor(color).rgb());
}
QString imageFilename = fileDialog.imageInfo().filename();<|fim▁hole|> Exception ex(tr("Failed to save image to file '%1'.").arg(imageFilename));
ex.showError();
}
}
}
/******************************************************************************
* Returns an icon representing the given color map class.
******************************************************************************/
QIcon ColorCodingModifierEditor::iconFromColorMapClass(const OvitoObjectType* clazz)
{
/// Cache icons for color map types.
static std::map<const OvitoObjectType*, QIcon> iconCache;
auto entry = iconCache.find(clazz);
if(entry != iconCache.end())
return entry->second;
DataSet* dataset = mainWindow()->datasetContainer().currentSet();
OVITO_ASSERT(dataset);
if(dataset) {
try {
// Create a temporary instance of the color map class.
OORef<ColorCodingGradient> map = static_object_cast<ColorCodingGradient>(clazz->createInstance(dataset));
if(map) {
QIcon icon = iconFromColorMap(map);
iconCache.insert(std::make_pair(clazz, icon));
return icon;
}
}
catch(...) {}
}
return QIcon();
}
/******************************************************************************
* Returns an icon representing the given color map.
******************************************************************************/
QIcon ColorCodingModifierEditor::iconFromColorMap(ColorCodingGradient* map)
{
const int sizex = 48;
const int sizey = 16;
QImage image(sizex, sizey, QImage::Format_RGB32);
for(int x = 0; x < sizex; x++) {
FloatType t = (FloatType)x / (sizex - 1);
uint c = QColor(map->valueToColor(t)).rgb();
for(int y = 0; y < sizey; y++)
image.setPixel(x, y, c);
}
return QIcon(QPixmap::fromImage(image));
}
OVITO_END_INLINE_NAMESPACE
OVITO_END_INLINE_NAMESPACE
OVITO_END_INLINE_NAMESPACE
} // End of namespace
} // End of namespace<|fim▁end|> | if(!image.scaled(legendWidth, legendHeight, Qt::IgnoreAspectRatio, Qt::FastTransformation).save(imageFilename, fileDialog.imageInfo().format())) { |
<|file_name|>PancakeNode.hpp<|end_file_name|><|fim▁begin|>/**
* \file PancakeNode.hpp<|fim▁hole|> *
*
* \author eaburns
* \date 18-01-2010
*/
#include "search/Node.hpp"
#include "pancake/PancakeTypes.hpp"
#include "pancake/PancakeState.hpp"
#if !defined(_PANCAKE_NODE_H_)
#define _PANCAKE_NODE_H_
typedef Node<PancakeState14, PancakeCost> PancakeNode14;
#endif // !_PANCAKE_NODE_H_<|fim▁end|> | * |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(plugin_registrar)]
#![feature(slice_patterns, box_syntax, rustc_private)]
#[macro_use(walk_list)]
extern crate syntax;
extern crate rustc_serialize;<|fim▁hole|>
// Load rustc as a plugin to get macros.
#[macro_use]
extern crate rustc;
extern crate rustc_plugin;
#[macro_use]
extern crate log;
mod kythe;
mod pass;
mod visitor;
use kythe::writer::JsonEntryWriter;
use rustc_plugin::Registry;
use rustc::lint::LateLintPassObject;
// Informs the compiler of the existence and implementation of our plugin.
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
let pass = box pass::KytheLintPass::new(box JsonEntryWriter);
reg.register_late_lint_pass(pass as LateLintPassObject);
}<|fim▁end|> | |
<|file_name|>Reset.tsx<|end_file_name|><|fim▁begin|>// part:@sanity/base/reset-icon
import React from 'react'
const strokeStyle = {
stroke: 'currentColor',
strokeWidth: 1.2,
}
const ResetIcon = () => (
<svg
data-sanity-icon
viewBox="0 0 25 25"
fill="none"
xmlns="http://www.w3.org/2000/svg"
preserveAspectRatio="xMidYMid"
width="1em"
height="1em"
>
<path d="M9.50001 15.5L15.5 9.49999" style={strokeStyle} />
<path d="M9.5 9.5L15.5 15.5" style={strokeStyle} />
<path
d="M4.56189 13.5C4.52104 13.1724 4.5 12.8387 4.5 12.5C4.5 8.08172 8.08172 4.5 12.5 4.5C16.9183 4.5 20.5 8.08172 20.5 12.5C20.5 16.9183 16.9183 20.5 12.5 20.5C9.75033 20.5 7.32466 19.1128 5.88468 17"<|fim▁hole|> <path d="M7 11L4.56189 13.5L2 11" style={strokeStyle} />
</svg>
)
export default ResetIcon<|fim▁end|> | style={strokeStyle}
/> |
<|file_name|>JsonOutputStreamSerializer.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2011-2015 The Cryptonote developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "JsonOutputStreamSerializer.h"
#include <cassert>
#include <stdexcept>
#include "Common/StringTools.h"
using Common::JsonValue;
using namespace CryptoNote;
namespace CryptoNote {
std::ostream& operator<<(std::ostream& out, const JsonOutputStreamSerializer& enumerator) {
out << enumerator.root;
return out;
}
}
namespace {
template <typename T>
void insertOrPush(JsonValue& js, Common::StringView name, const T& value) {
if (js.isArray()) {
js.pushBack(JsonValue(value));
} else {
js.insert(std::string(name), JsonValue(value));
}
}
}
JsonOutputStreamSerializer::JsonOutputStreamSerializer() : root(JsonValue::OBJECT) {
chain.push_back(&root);
}
JsonOutputStreamSerializer::~JsonOutputStreamSerializer() {
}
ISerializer::SerializerType JsonOutputStreamSerializer::type() const {
return ISerializer::OUTPUT;
}
bool JsonOutputStreamSerializer::beginObject(Common::StringView name) {
JsonValue& parent = *chain.back();
JsonValue obj(JsonValue::OBJECT);
if (parent.isObject()) {
chain.push_back(&parent.insert(std::string(name), obj));
} else {
chain.push_back(&parent.pushBack(obj));
}
return true;
}
void JsonOutputStreamSerializer::endObject() {
assert(!chain.empty());
chain.pop_back();
}
bool JsonOutputStreamSerializer::beginArray(size_t& size, Common::StringView name) {
JsonValue val(JsonValue::ARRAY);
JsonValue& res = chain.back()->insert(std::string(name), val);
chain.push_back(&res);
return true;
}
void JsonOutputStreamSerializer::endArray() {
assert(!chain.empty());
chain.pop_back();
}
bool JsonOutputStreamSerializer::operator()(uint64_t& value, Common::StringView name) {
int64_t v = static_cast<int64_t>(value);
return operator()(v, name);
}
bool JsonOutputStreamSerializer::operator()(uint16_t& value, Common::StringView name) {
uint64_t v = static_cast<uint64_t>(value);
return operator()(v, name);
}
bool JsonOutputStreamSerializer::operator()(int16_t& value, Common::StringView name) {
int64_t v = static_cast<int64_t>(value);
return operator()(v, name);
}
bool JsonOutputStreamSerializer::operator()(uint32_t& value, Common::StringView name) {
uint64_t v = static_cast<uint64_t>(value);
return operator()(v, name);
}
bool JsonOutputStreamSerializer::operator()(int32_t& value, Common::StringView name) {
int64_t v = static_cast<int64_t>(value);
return operator()(v, name);
}
bool JsonOutputStreamSerializer::operator()(int64_t& value, Common::StringView name) {
insertOrPush(*chain.back(), name, value);
return true;
}
bool JsonOutputStreamSerializer::operator()(double& value, Common::StringView name) {
insertOrPush(*chain.back(), name, value);
return true;
}
bool JsonOutputStreamSerializer::operator()(std::string& value, Common::StringView name) {
insertOrPush(*chain.back(), name, value);
return true;
}
bool JsonOutputStreamSerializer::operator()(uint8_t& value, Common::StringView name) {
insertOrPush(*chain.back(), name, static_cast<int64_t>(value));
return true;
}
bool JsonOutputStreamSerializer::operator()(bool& value, Common::StringView name) {
insertOrPush(*chain.back(), name, value);
return true;
}
bool JsonOutputStreamSerializer::binary(void* value, size_t size, Common::StringView name) {
std::string hex = Common::toHex(value, size);<|fim▁hole|>bool JsonOutputStreamSerializer::binary(std::string& value, Common::StringView name) {
return binary(const_cast<char*>(value.data()), value.size(), name);
}<|fim▁end|> | return (*this)(hex, name);
}
|
<|file_name|>add.py<|end_file_name|><|fim▁begin|>import forge
from forge.models.groups import Group
class Add(object):
def __init__(self,json_args,session):<|fim▁hole|> raise forge.ArgumentError()
self.name = json_args['name']
self.distribution = json_args['distribution']
self.session = session
def call(self):
group = Group(self.name,self.distribution)
self.session.add(group)
self.session.commit()
return {'name':self.name, 'distribution':self.distribution}<|fim▁end|> | if type(json_args) != type({}):
raise TypeError("JSON Arg must be dict type")
if 'name' and 'distribution' not in json_args.keys(): |
<|file_name|>interfaces.ts<|end_file_name|><|fim▁begin|>export interface JCDecauxParams {
contractName?: string;
urlApi?: string;
timeout?: number;
}
export interface RequestParams {
apiKey?: string;
contract?: string;
}
export interface Position {
lat: number;
lng: number;
}
export interface Contract {
name: string;
commercial_name: string;
country_code: string;
cities: Array<string>;
}
export interface Station {
number: number,
name: string,<|fim▁hole|> banking: boolean,
bonus: boolean,
status: string,
contract_name: string,
bike_stands: number,
available_bike_stands: number,
available_bikes: number,
last_update: number
}<|fim▁end|> | address: string,
position: Position, |
<|file_name|>utilities.py<|end_file_name|><|fim▁begin|># Copyright (c) 2020, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/spherical_functions/blob/master/LICENSE>
### NOTE: The functions in this file are intended purely for inclusion in the Grid class. In
### particular, they assume that the first argument, `self` is an instance of Grid. They should
### probably not be used outside of that class.
def modes(self, ell_max=None, **kwargs):
"""Return mode weights of function decomposed into SWSHs
<|fim▁hole|>
The output array has one less dimension than this object; rather than the last two axes giving
the values on the two-dimensional grid, the last axis gives the mode weights.
Parameters
==========
ell_max: None or int [defaults to None]
Maximum ell value in the output. If None, the result will have enough ell values to express
the data on the grid without aliasing: (max(n_phi, n_theta) - 1) // 2.
**kwargs: any types
Additional keyword arguments are passed through to the Modes constructor on output
"""
import copy
import numpy as np
import spinsfast
from .. import Modes
ell_max = ell_max or (max(n_phi, n_theta) - 1) // 2
metadata = copy.copy
return Modes(spinsfast.map2salm(self.view(np.ndarray), self.s, ell_max),
spin_weight=self.s, ell_min=0, ell_max=ell_max, **metadata)
def _check_broadcasting(self, array, reverse=False):
"""Test whether or not the given array can broadcast against this object"""
import numpy as np
if isinstance(array, type(self)):
try:
if reverse:
np.broadcast(array, self)
else:
np.broadcast(self, array)
except ValueError:
return False
else:
return True
else:
if np.ndim(array) > np.ndim(self)-2:
raise ValueError(f"Cannot broadcast array of {np.ndim(array)} dimensions against {type(self).__name__} "
f"object of fewer ({np.ndim(self)-2}) non-grid dimensions.\n"
"This is to ensure that scalars do not operate on individual "
"grid values; they must operate on all simultaneously.\n"
"If that is the case and you still want to broadcast, add more "
"dimensions before this object's first dimension.")
try:
if reverse:
np.broadcast(array, self[..., 0, 0])
else:
np.broadcast(self[..., 0, 0], array)
except ValueError:
return False
else:
return True<|fim▁end|> | This method uses `spinsfast` to convert values on an equiangular grid to mode weights. |
<|file_name|>NGAboutBox.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2005–2012 Goethe Center for Scientific Computing - Simulation and Modelling (G-CSC Frankfurt)
* Copyright (c) 2012-2015 Goethe Center for Scientific Computing - Computational Neuroscience (G-CSC Frankfurt)
*
* This file is part of NeuGen.
*
* NeuGen is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License version 3
* as published by the Free Software Foundation.
*
* see: http://opensource.org/licenses/LGPL-3.0
* file://path/to/NeuGen/LICENSE
*
* NeuGen is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* This version of NeuGen includes copyright notice and attribution requirements.
* According to the LGPL this information must be displayed even if you modify<|fim▁hole|> * Attribution Requirements:
*
* If you create derived work you must do the following regarding copyright
* notice and author attribution.
*
* Add an additional notice, stating that you modified NeuGen. In addition
* you must cite the publications listed below. A suitable notice might read
* "NeuGen source code modified by YourName 2012".
*
* Note, that these requirements are in full accordance with the LGPL v3
* (see 7. Additional Terms, b).
*
* Publications:
*
* S. Wolf, S. Grein, G. Queisser. NeuGen 2.0 -
* Employing NeuGen 2.0 to automatically generate realistic
* morphologies of hippocapal neurons and neural networks in 3D.
* Neuroinformatics, 2013, 11(2), pp. 137-148, doi: 10.1007/s12021-012-9170-1
*
*
* J. P. Eberhard, A. Wanner, G. Wittum. NeuGen -
* A tool for the generation of realistic morphology
* of cortical neurons and neural networks in 3D.
* Neurocomputing, 70(1-3), pp. 327-343, doi: 10.1016/j.neucom.2006.01.028
*
*/
package org.neugen.gui;
import org.jdesktop.application.Action;
/**
* @author Sergei Wolf
*/
public final class NGAboutBox extends javax.swing.JDialog {
private static final long serialVersionUID = 1L;
public NGAboutBox(java.awt.Frame parent) {
super(parent);
initComponents();
getRootPane().setDefaultButton(closeButton);
}
@Action
public void closeAboutBox() {
dispose();
}
/**
* This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
closeButton = new javax.swing.JButton();
javax.swing.JLabel appTitleLabel = new javax.swing.JLabel();
javax.swing.JLabel versionLabel = new javax.swing.JLabel();
javax.swing.JLabel appVersionLabel = new javax.swing.JLabel();
javax.swing.JLabel vendorLabel = new javax.swing.JLabel();
javax.swing.JLabel appVendorLabel = new javax.swing.JLabel();
javax.swing.JLabel homepageLabel = new javax.swing.JLabel();
javax.swing.JLabel appHomepageLabel = new javax.swing.JLabel();
javax.swing.JLabel appDescLabel = new javax.swing.JLabel();
setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE);
org.jdesktop.application.ResourceMap resourceMap = org.jdesktop.application.Application.getInstance(org.neugen.gui.NeuGenApp.class).getContext().getResourceMap(NGAboutBox.class);
setTitle(resourceMap.getString("title")); // NOI18N
setModal(true);
setName("aboutBox"); // NOI18N
setResizable(false);
javax.swing.ActionMap actionMap = org.jdesktop.application.Application.getInstance(org.neugen.gui.NeuGenApp.class).getContext().getActionMap(NGAboutBox.class, this);
closeButton.setAction(actionMap.get("closeAboutBox")); // NOI18N
closeButton.setName("closeButton"); // NOI18N
appTitleLabel.setFont(appTitleLabel.getFont().deriveFont(appTitleLabel.getFont().getStyle() | java.awt.Font.BOLD, appTitleLabel.getFont().getSize()+4));
appTitleLabel.setText(resourceMap.getString("Application.title")); // NOI18N
appTitleLabel.setName("appTitleLabel"); // NOI18N
versionLabel.setFont(versionLabel.getFont().deriveFont(versionLabel.getFont().getStyle() | java.awt.Font.BOLD));
versionLabel.setText(resourceMap.getString("versionLabel.text")); // NOI18N
versionLabel.setName("versionLabel"); // NOI18N
appVersionLabel.setText(resourceMap.getString("Application.version")); // NOI18N
appVersionLabel.setName("appVersionLabel"); // NOI18N
vendorLabel.setFont(vendorLabel.getFont().deriveFont(vendorLabel.getFont().getStyle() | java.awt.Font.BOLD));
vendorLabel.setText(resourceMap.getString("vendorLabel.text")); // NOI18N
vendorLabel.setName("vendorLabel"); // NOI18N
appVendorLabel.setText(resourceMap.getString("Application.vendor")); // NOI18N
appVendorLabel.setName("appVendorLabel"); // NOI18N
homepageLabel.setFont(homepageLabel.getFont().deriveFont(homepageLabel.getFont().getStyle() | java.awt.Font.BOLD));
homepageLabel.setText(resourceMap.getString("homepageLabel.text")); // NOI18N
homepageLabel.setName("homepageLabel"); // NOI18N
appHomepageLabel.setText(resourceMap.getString("Application.homepage")); // NOI18N
appHomepageLabel.setName("appHomepageLabel"); // NOI18N
appDescLabel.setText(resourceMap.getString("appDescLabel.text")); // NOI18N
appDescLabel.setName("appDescLabel"); // NOI18N
org.jdesktop.layout.GroupLayout layout = new org.jdesktop.layout.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(layout.createSequentialGroup()
.addContainerGap()
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(vendorLabel)
.add(appTitleLabel)
.add(appDescLabel, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, 269, Short.MAX_VALUE)
.add(layout.createSequentialGroup()
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(versionLabel)
.add(homepageLabel))
.addPreferredGap(org.jdesktop.layout.LayoutStyle.RELATED)
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(appVendorLabel)
.add(appVersionLabel)
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.TRAILING)
.add(closeButton)
.add(appHomepageLabel)))))
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(org.jdesktop.layout.GroupLayout.TRAILING, layout.createSequentialGroup()
.addContainerGap()
.add(appTitleLabel)
.addPreferredGap(org.jdesktop.layout.LayoutStyle.RELATED)
.add(appDescLabel, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(org.jdesktop.layout.LayoutStyle.UNRELATED)
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.BASELINE)
.add(versionLabel)
.add(appVersionLabel))
.addPreferredGap(org.jdesktop.layout.LayoutStyle.RELATED)
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.TRAILING)
.add(layout.createSequentialGroup()
.add(appVendorLabel)
.addPreferredGap(org.jdesktop.layout.LayoutStyle.UNRELATED)
.add(appHomepageLabel))
.add(homepageLabel))
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(layout.createSequentialGroup()
.addPreferredGap(org.jdesktop.layout.LayoutStyle.RELATED, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.add(vendorLabel)
.add(49, 49, 49))
.add(layout.createSequentialGroup()
.add(18, 18, 18)
.add(closeButton, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 23, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)
.addContainerGap())))
);
pack();
}// </editor-fold>//GEN-END:initComponents
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton closeButton;
// End of variables declaration//GEN-END:variables
}<|fim▁end|> | * the source code of NeuGen. The copyright statement/attribution may not be removed.
* |
<|file_name|>TextFiles.java<|end_file_name|><|fim▁begin|>/*----------------------------------------------------------------------------*
* This file is part of Pitaya. *
* Copyright (C) 2012-2016 Osman KOCAK <[email protected]> *
* *
* This program is free software: you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or (at your *
* option) any later version. *
* This program is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public *
* License for more details. *
* You should have received a copy of the GNU Lesser General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*----------------------------------------------------------------------------*/
package org.kocakosm.pitaya.io;
import org.kocakosm.pitaya.charset.Charsets;
import org.kocakosm.pitaya.util.Parameters;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
/**
* Text files utilities.
*
* @see XFiles
*
* @author Osman KOCAK
*/
public final class TextFiles
{
/**
* Returns the first (up to 10) lines of the given {@code File} using
* the system's default charset. Named after the Unix command of the
* same name.
*
* @param f the {@code File} to read.
*
* @return the first lines of the given {@code File}.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> head(File f) throws IOException
{
return head(f, Charsets.DEFAULT);
}
/**
* Returns the first (up to 10) lines of the given {@code File} using
* the specified charset. Named after the Unix command of the same name.
*
* @param f the {@code File} to read.
* @param charset the charset to use.
*
* @return the first lines of the given {@code File}.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> head(File f, Charset charset) throws IOException
{
return head(f, 10, charset);
}
/**
* Returns the first (up to {@code n}) lines of the given {@code File}
* using the system's default charset. Named after the Unix command of
* the same name.
*
* @param f the {@code File} to read.
* @param n the maximum number of lines to read.
*
* @return the first lines of the given {@code File}.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws IllegalArgumentException if {@code n} is negative.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> head(File f, int n) throws IOException
{
return head(f, n, Charsets.DEFAULT);
}
/**
* Returns the first (up to {@code n}) lines of the given {@code File}
* using the specified charset. Named after the Unix command of the same
* name.
*
* @param f the {@code File} to read.
* @param n the maximum number of lines to read.
* @param charset the charset to use.
*
* @return the first lines of the given {@code File}.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws IllegalArgumentException if {@code n} is negative.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> head(File f, int n, Charset charset)
throws IOException
{
Parameters.checkCondition(n >= 0);
List<String> lines = new ArrayList<String>();
BufferedReader reader = newReader(f, charset);
try {
String line = reader.readLine();
while (line != null && lines.size() < n) {
lines.add(line);
line = reader.readLine();
}
} finally {
IO.close(reader);
}
return Collections.unmodifiableList(lines);
}
/**
* Returns the last (up to 10) lines of the given {@code File} using the
* system's default charset. Named after the Unix command of the same
* name.
*
* @param f the {@code File} to read.
*
* @return the last lines of the given {@code File}.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> tail(File f) throws IOException
{
return tail(f, Charsets.DEFAULT);
}
/**
* Returns the last (up to 10) lines of the given {@code File} using the
* specified charset. Named after the Unix command of the same name.
*
* @param f the {@code File} to read.
* @param charset the charset to use.
*
* @return the last lines of the given {@code File}.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> tail(File f, Charset charset) throws IOException
{
return tail(f, 10, charset);
}
/**
* Returns the last (up to n) lines of the given {@code File} using the
* system's default charset. Named after the Unix command of the same
* name.
*
* @param f the {@code File} to read.
* @param n the maximum number of lines to read.
*
* @return the last lines of the given {@code File}.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws IllegalArgumentException if {@code n} is negative.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> tail(File f, int n) throws IOException
{
return tail(f, n, Charsets.DEFAULT);
}
/**
* Returns the last (up to n) lines of the given {@code File} using the
* specified charset. Named after the Unix command of the same name.
*
* @param f the {@code File} to read.
* @param n the maximum number of lines to read.
* @param charset the charset to use.
*
* @return the last lines of the given {@code File}.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws IllegalArgumentException if {@code n} is negative.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> tail(File f, int n, Charset charset)
throws IOException
{
Parameters.checkCondition(n >= 0);
if (n == 0) {
return Collections.emptyList();
}
List<String> lines = new LinkedList<String>();
BufferedReader reader = newReader(f, charset);
try {
String line = reader.readLine();
while (line != null) {
lines.add(line);
if (lines.size() > n) {
lines.remove(0);
}
line = reader.readLine();
}
} finally {
IO.close(reader);
}
return Collections.unmodifiableList(lines);
}
/**
* Returns a new {@code BufferedReader} to read the given {@code File}
* using the system's default charset.
*
* @param f the file to read from.
*
* @return a {@code BufferedReader} to read the given {@code File}.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws FileNotFoundException if {@code f} doesn't exist, or if it is
* a directory rather than a regular file, or if it can't be opened
* for reading.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static BufferedReader newReader(File f) throws FileNotFoundException
{
return newReader(f, Charsets.DEFAULT);
}
/**
* Returns a new {@code BufferedReader} to read the given {@code File}
* using the specified charset.
*
* @param f the file to read from.
* @param charset the charset to use.
*
* @return a {@code BufferedReader} to read the given {@code File}.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws FileNotFoundException if {@code f} doesn't exist, or if it is
* a directory rather than a regular file, or if it can't be opened
* for reading.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static BufferedReader newReader(File f, Charset charset)
throws FileNotFoundException
{
InputStream in = new FileInputStream(f);
return new BufferedReader(new InputStreamReader(in, charset));
}
/**
* Returns a new {@code BufferedWriter} to write to the given<|fim▁hole|> *
* @param f the file to write to.
* @param options the write options.
*
* @return a {@code BufferedWriter} to write to the given {@code File}.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws IllegalArgumentException if incompatible options are given.
* @throws FileNotFoundException if {@code f} exists but is a directory
* rather than a regular file, or if it does not exist but cannot
* be created, or if it cannot be opened for any other reason.
* @throws IOException if the {@link WriteOption#CREATE} option is given
* and the specified file already exists.
* @throws SecurityException if a security manager exists and denies
* write access to {@code f}.
*/
public static BufferedWriter newWriter(File f, WriteOption... options)
throws IOException
{
return newWriter(f, Charsets.DEFAULT, options);
}
/**
* Returns a new {@code BufferedWriter} to write to the given
* {@code File} using the specified charset.
*
* @param f the file to write to.
* @param charset the charset to use.
* @param options the write options.
*
* @return a {@code BufferedWriter} to write to the given {@code File}.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws IllegalArgumentException if incompatible options are given.
* @throws FileNotFoundException if {@code f} exists but is a directory
* rather than a regular file, or if it does not exist but cannot
* be created, or if it cannot be opened for any other reason.
* @throws IOException if the {@link WriteOption#CREATE} option is given
* and the specified file already exists.
* @throws SecurityException if a security manager exists and denies
* write access to {@code f}.
*/
public static BufferedWriter newWriter(File f, Charset charset,
WriteOption... options) throws IOException
{
OutputStream out = XFiles.newOutputStream(f, options);
return new BufferedWriter(new OutputStreamWriter(out, charset));
}
/**
* Reads the whole content of the given {@code File} as a {@code String}
* using the system's default charset.
*
* @param f the file to read.
*
* @return the file's content as a {@code String}.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static String read(File f) throws IOException
{
return read(f, Charsets.DEFAULT);
}
/**
* Reads the whole content of the given {@code File} as a {@code String}
* using the specified charset.
*
* @param f the file to read.
* @param charset the charset to use.
*
* @return the file's content as a {@code String}.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static String read(File f, Charset charset) throws IOException
{
BufferedReader in = newReader(f, charset);
try {
return CharStreams.read(in);
} finally {
IO.close(in);
}
}
/**
* Reads all the lines from the given {@code File} using the system's
* default charset. Note that the returned {@code List} is immutable.
*
* @param f the file to read.
*
* @return the file's lines.
*
* @throws NullPointerException if {@code f} is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> readLines(File f) throws IOException
{
return readLines(f, Charsets.DEFAULT);
}
/**
* Reads all the lines from the given {@code File} using the specified
* charset. Note that the returned {@code List} is immutable.
*
* @param f the file to read.
* @param charset the charset to use.
*
* @return the file's lines.
*
* @throws NullPointerException if one of the arguments is {@code null}.
* @throws IOException if {@code f} does not exist, or if it is a
* directory rather than a regular file, or if it can't be read.
* @throws SecurityException if a security manager exists and denies
* read access to {@code f}.
*/
public static List<String> readLines(File f, Charset charset)
throws IOException
{
BufferedReader in = newReader(f, charset);
try {
return CharStreams.readLines(in);
} finally {
IO.close(in);
}
}
private TextFiles()
{
/* ... */
}
}<|fim▁end|> | * {@code File} using the system's default charset. |
<|file_name|>snowball.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Natural Language Toolkit: Snowball Stemmer
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Peter Michael Stahl <[email protected]>
# Peter Ljunglof <[email protected]> (revisions)
# Algorithms: Dr Martin Porter <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Snowball stemmers
This module provides a port of the Snowball stemmers
developed by Martin Porter.
There is also a demo function: `snowball.demo()`.
"""
from __future__ import unicode_literals, print_function
from nltk import compat
from nltk.corpus import stopwords
from nltk.stem import porter
from nltk.stem.util import suffix_replace
from nltk.stem.api import StemmerI
class SnowballStemmer(StemmerI):
"""
Snowball Stemmer
The following languages are supported:
Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
The algorithm for English is documented here:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by Martin Porter.
These stemmers are called Snowball, because Porter created
a programming language with this name for creating
new stemming algorithms. There is more information available
at http://snowball.tartarus.org/
The stemmer is invoked as shown below:
>>> from nltk.stem import SnowballStemmer
>>> print(" ".join(SnowballStemmer.languages)) # See which languages are supported
danish dutch english finnish french german hungarian
italian norwegian porter portuguese romanian russian
spanish swedish
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem("Autobahnen") # Stem a word
'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem("Autobahnen")
'autobahn'
:param language: The language whose subclass is instantiated.
:type language: str or unicode
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
:raise ValueError: If there is no stemmer for the specified
language, a ValueError is raised.
"""
languages = ("danish", "dutch", "english", "finnish", "french", "german",
"hungarian", "italian", "norwegian", "porter", "portuguese",
"romanian", "russian", "spanish", "swedish")
def __init__(self, language, ignore_stopwords=False):
if language not in self.languages:
raise ValueError("The language '%s' is not supported." % language)
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
@compat.python_2_unicode_compatible
class _LanguageSpecificStemmer(StemmerI):
"""
This helper subclass offers the possibility
to invoke a specific stemmer directly.
This is useful if you already know the language to be stemmed at runtime.
Create an instance of the Snowball stemmer.
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
"""
def __init__(self, ignore_stopwords=False):
# The language is the name of the class, minus the final "Stemmer".
language = type(self).__name__.lower()
if language.endswith("stemmer"):
language = language[:-7]
self.stopwords = set()
if ignore_stopwords:
try:
for word in stopwords.words(language):
self.stopwords.add(word)
except IOError:
raise ValueError("%r has no list of stopwords. Please set"
" 'ignore_stopwords' to 'False'." % self)
def __repr__(self):
"""
Print out the string representation of the respective class.
"""
return "<%s>" % type(self).__name__
class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer):
"""
A word stemmer based on the original Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of the module
nltk.stem.porter for more information.
"""
def __init__(self, ignore_stopwords=False):
_LanguageSpecificStemmer.__init__(self, ignore_stopwords)
porter.PorterStemmer.__init__(self)
class _ScandinavianStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
class _StandardStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[0] in vowels and word[1] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
class DanishStemmer(_ScandinavianStemmer):
"""
The Danish Snowball stemmer.
:cvar __vowels: The Danish vowels.
:type __vowels: unicode
:cvar __consonants: The Danish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Danish double consonants.
:type __double_consonants: tuple
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Danish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/danish/stemmer.html
"""
# The language's vowels and other important characters are defined.
__vowels = "aeiouy\xE6\xE5\xF8"
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__s_ending = "abcdfghjklmnoprtvyz\xE5"
# The different suffixes, divided into the algorithm's steps
# and organized by length, are listed in tuples.
__step1_suffixes = ("erendes", "erende", "hedens", "ethed",
"erede", "heden", "heder", "endes",
"ernes", "erens", "erets", "ered",
"ende", "erne", "eren", "erer", "heds",
"enes", "eres", "eret", "hed", "ene", "ere",
"ens", "ers", "ets", "en", "er", "es", "et",
"e", "s")
__step2_suffixes = ("gd", "dt", "gt", "kt")
__step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig")
def stem(self, word):
"""
Stem a Danish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
# Every word is put into lower case for normalization.
word = word.lower()
if word in self.stopwords:
return word
# After this, the required regions are generated
# by the respective helper method.
r1 = self._r1_scandinavian(word, self.__vowels)
# Then the actual stemming process starts.
# Every new step is explicitly indicated
# according to the descriptions on the Snowball website.
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
if r1.endswith("igst"):
word = word[:-2]
r1 = r1[:-2]
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "l\xF8st":
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(self.__step2_suffixes):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 4: Undouble
for double_cons in self.__double_consonants:
if word.endswith(double_cons) and len(word) > 3:
word = word[:-1]
break
return word
class DutchStemmer(_StandardStemmer):
"""
The Dutch Snowball stemmer.
:cvar __vowels: The Dutch vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
:type __step3b_suffixes: tuple
:note: A detailed description of the Dutch
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/dutch/stemmer.html
"""
__vowels = "aeiouy\xE8"
__step1_suffixes = ("heden", "ene", "en", "se", "s")
__step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig")
def stem(self, word):
"""
Stem a Dutch word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step2_success = False
# Vowel accents are removed.
word = (word.replace("\xE4", "a").replace("\xE1", "a")
.replace("\xEB", "e").replace("\xE9", "e")
.replace("\xED", "i").replace("\xEF", "i")
.replace("\xF6", "o").replace("\xF3", "o")
.replace("\xFC", "u").replace("\xFA", "u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
for i in range(1, len(word)-1):
if (word[i-1] in self.__vowels and word[i] == "i" and
word[i+1] in self.__vowels):
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "heden":
word = suffix_replace(word, suffix, "heid")
r1 = suffix_replace(r1, suffix, "heid")
if r2.endswith("heden"):
r2 = suffix_replace(r2, suffix, "heid")
elif (suffix in ("ene", "en") and
not word.endswith("heden") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-3:-len(suffix)] != "gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in ("se", "s") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-1] != "j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith("e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith("heid") and word[-5] != "c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith("en") and word[-3] not in self.__vowels and
word[-5:-2] != "gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith("ig") and word[-3] != "e":
word = word[:-2]
else:
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "ig" and word[-3] != "e":
word = word[:-2]
elif suffix == "lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith("e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "baar":
word = word[:-4]
elif suffix == "bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != "I":
if word[-3:-1] in ("aa", "ee", "oo", "uu"):
if word[-4] not in self.__vowels:
word = "".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace("I", "i").replace("Y", "y")
return word
class EnglishStemmer(_StandardStemmer):
"""
The English Snowball stemmer.
:cvar __vowels: The English vowels.
:type __vowels: unicode
:cvar __double_consonants: The English double consonants.
:type __double_consonants: tuple
:cvar __li_ending: Letters that may directly appear before a word final 'li'.
:type __li_ending: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm.
:type __step1a_suffixes: tuple
:cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm.
:type __step1b_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __special_words: A dictionary containing words
which have to be stemmed specially.
:type __special_words: dict
:note: A detailed description of the English
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/english/stemmer.html
"""
__vowels = "aeiouy"
__double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn",
"pp", "rr", "tt")
__li_ending = "cdeghkmnrt"
__step0_suffixes = ("'s'", "'s", "'")
__step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s")
__step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed")
__step2_suffixes = ('ization', 'ational', 'fulness', 'ousness',
'iveness', 'tional', 'biliti', 'lessli',
'entli', 'ation', 'alism', 'aliti', 'ousli',
'iviti', 'fulli', 'enci', 'anci', 'abli',
'izer', 'ator', 'alli', 'bli', 'ogi', 'li')
__step3_suffixes = ('ational', 'tional', 'alize', 'icate', 'iciti',
'ative', 'ical', 'ness', 'ful')
__step4_suffixes = ('ement', 'ance', 'ence', 'able', 'ible', 'ment',
'ant', 'ent', 'ism', 'ate', 'iti', 'ous',
'ive', 'ize', 'ion', 'al', 'er', 'ic')
__step5_suffixes = ("e", "l")
__special_words = {"skis" : "ski",
"skies" : "sky",
"dying" : "die",
"lying" : "lie",
"tying" : "tie",
"idly" : "idl",
"gently" : "gentl",
"ugly" : "ugli",
"early" : "earli",
"only" : "onli",
"singly" : "singl",
"sky" : "sky",
"news" : "news",
"howe" : "howe",
"atlas" : "atlas",
"cosmos" : "cosmos",
"bias" : "bias",
"andes" : "andes",
"inning" : "inning",
"innings" : "inning",
"outing" : "outing",
"outings" : "outing",
"canning" : "canning",
"cannings" : "canning",
"herring" : "herring",
"herrings" : "herring",
"earring" : "earring",
"earrings" : "earring",
"proceed" : "proceed",
"proceeds" : "proceed",
"proceeded" : "proceed",
"proceeding" : "proceed",
"exceed" : "exceed",
"exceeds" : "exceed",
"exceeded" : "exceed",
"exceeding" : "exceed",
"succeed" : "succeed",
"succeeds" : "succeed",
"succeeded" : "succeed",
"succeeding" : "succeed"}
def stem(self, word):
"""
Stem an English word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords or len(word) <= 2:
return word
elif word in self.__special_words:
return self.__special_words[word]
# Map the different apostrophe characters to a single consistent one
word = (word.replace("\u2019", "\x27")
.replace("\u2018", "\x27")
.replace("\u201B", "\x27"))
if word.startswith("\x27"):
word = word[1:]
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
step1a_vowel_found = False
step1b_vowel_found = False
r1 = ""
r2 = ""
if word.startswith(("gener", "commun", "arsen")):
if word.startswith(("gener", "arsen")):
r1 = word[5:]
else:
r1 = word[6:]
for i in range(1, len(r1)):
if r1[i] not in self.__vowels and r1[i-1] in self.__vowels:
r2 = r1[i+1:]
break
else:
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 0
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 1a
for suffix in self.__step1a_suffixes:
if word.endswith(suffix):
if suffix == "sses":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("ied", "ies"):
if len(word[:-len(suffix)]) > 1:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix == "s":
for letter in word[:-2]:
if letter in self.__vowels:
step1a_vowel_found = True
break
if step1a_vowel_found:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
break
# STEP 1b
for suffix in self.__step1b_suffixes:
if word.endswith(suffix):
if suffix in ("eed", "eedly"):
if r1.endswith(suffix):
word = suffix_replace(word, suffix, "ee")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ee")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ee")
else:
r2 = ""
else:
for letter in word[:-len(suffix)]:
if letter in self.__vowels:
step1b_vowel_found = True
break
if step1b_vowel_found:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("at", "bl", "iz")):
word = "".join((word, "e"))
r1 = "".join((r1, "e"))
if len(word) > 5 or len(r1) >=3:
r2 = "".join((r2, "e"))
elif word.endswith(self.__double_consonants):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif ((r1 == "" and len(word) >= 3 and
word[-1] not in self.__vowels and
word[-1] not in "wxY" and
word[-2] in self.__vowels and
word[-3] not in self.__vowels)
or
(r1 == "" and len(word) == 2 and
word[0] in self.__vowels and
word[1] not in self.__vowels)):
word = "".join((word, "e"))
if len(r1) > 0:
r1 = "".join((r1, "e"))
if len(r2) > 0:
r2 = "".join((r2, "e"))
break
# STEP 1c
if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels:
word = "".join((word[:-1], "i"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "i"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "i"))
else:
r2 = ""
# STEP 2
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("enci", "anci", "abli"):
word = "".join((word[:-1], "e"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "e"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "e"))
else:
r2 = ""
elif suffix == "entli":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("izer", "ization"):
word = suffix_replace(word, suffix, "ize")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ize")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ize")
else:
r2 = ""
elif suffix in ("ational", "ation", "ator"):
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = "e"
elif suffix in ("alism", "aliti", "alli"):
word = suffix_replace(word, suffix, "al")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "al")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "al")
else:
r2 = ""
elif suffix == "fulness":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
elif suffix in ("ousli", "ousness"):
word = suffix_replace(word, suffix, "ous")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ous")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ous")
else:
r2 = ""
elif suffix in ("iveness", "iviti"):
word = suffix_replace(word, suffix, "ive")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ive")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ive")
else:
r2 = "e"
elif suffix in ("biliti", "bli"):
word = suffix_replace(word, suffix, "ble")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ble")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ble")
else:
r2 = ""
elif suffix == "ogi" and word[-4] == "l":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix in ("fulli", "lessli"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "li" and word[-3] in self.__li_ending:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
break
# STEP 3
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ational":
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = ""
elif suffix == "alize":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
elif suffix in ("icate", "iciti", "ical"):
word = suffix_replace(word, suffix, "ic")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ic")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ic")
else:
r2 = ""
elif suffix in ("ful", "ness"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
elif suffix == "ative" and r2.endswith(suffix):
word = word[:-5]
r1 = r1[:-5]
r2 = r2[:-5]
break
# STEP 4
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if r2.endswith(suffix):
if suffix == "ion":
if word[-4] in "st":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5
if r2.endswith("l") and word[-2] == "l":
word = word[:-1]
elif r2.endswith("e"):
word = word[:-1]
elif r1.endswith("e"):
if len(word) >= 4 and (word[-2] in self.__vowels or
word[-2] in "wxY" or
word[-3] not in self.__vowels or
word[-4] in self.__vowels):
word = word[:-1]
word = word.replace("Y", "y")
return word
class FinnishStemmer(_StandardStemmer):
"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6"
__restricted_vowels = "aeiou\xE4\xF6"
__long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4",
"\xF6\xF6")
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__step1_suffixes = ('kaan', 'k\xE4\xE4n', 'sti', 'kin', 'han',
'h\xE4n', 'ko', 'k\xF6', 'pa', 'p\xE4')
__step2_suffixes = ('nsa', 'ns\xE4', 'mme', 'nne', 'si', 'ni',
'an', '\xE4n', 'en')
__step3_suffixes = ('siin', 'tten', 'seen', 'han', 'hen', 'hin',
'hon', 'h\xE4n', 'h\xF6n', 'den', 'tta',
'tt\xE4', 'ssa', 'ss\xE4', 'sta',
'st\xE4', 'lla', 'll\xE4', 'lta',
'lt\xE4', 'lle', 'ksi', 'ine', 'ta',
't\xE4', 'na', 'n\xE4', 'a', '\xE4',
'n')
__step4_suffixes = ('impi', 'impa', 'imp\xE4', 'immi', 'imma',
'imm\xE4', 'mpi', 'mpa', 'mp\xE4', 'mmi',
'mma', 'mm\xE4', 'eja', 'ej\xE4')
def stem(self, word):
"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix)-1] in "ntaeiouy\xE4\xF6":
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "si":
if word[-3] != "k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith("kse"):
word = suffix_replace(word, "kse", "ksi")
if r1.endswith("kse"):
r1 = suffix_replace(r1, "kse", "ksi")
if r2.endswith("kse"):
r2 = suffix_replace(r2, "kse", "ksi")
elif suffix == "an":
if (word[-4:-2] in ("ta", "na") or
word[-5:-2] in ("ssa", "sta", "lla", "lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "\xE4n":
if (word[-4:-2] in ("t\xE4", "n\xE4") or
word[-5:-2] in ("ss\xE4", "st\xE4",
"ll\xE4", "lt\xE4")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "en":
if word[-5:-2] in ("lle", "ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("han", "hen", "hin", "hon", "h\xE4n",
"h\xF6n"):
if ((suffix == "han" and word[-4] == "a") or
(suffix == "hen" and word[-4] == "e") or
(suffix == "hin" and word[-4] == "i") or
(suffix == "hon" and word[-4] == "o") or
(suffix == "h\xE4n" and word[-4] == "\xE4") or
(suffix == "h\xF6n" and word[-4] == "\xF6")):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in ("siin", "den", "tten"):
if (word[-len(suffix)-1] == "i" and
word[-len(suffix)-2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == "seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in ("a", "\xE4"):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in ("tta", "tt\xE4"):
if word[-4] == "e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == "n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == "ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma",
"mm\xE4"):
if word[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in "ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == "t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith("imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith("mma") and r2[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in "a\xE4ei"):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(("oj", "uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith("jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in range(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i-1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i-1:-i+1] in self.__double_consonants:
word = "".join((word[:-i], word[-i+1:]))
break
return word
class FrenchStemmer(_StandardStemmer):
"""
The French Snowball stemmer.
:cvar __vowels: The French vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the French
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/french/stemmer.html
"""
__vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9"
__step1_suffixes = ('issements', 'issement', 'atrices', 'atrice',
'ateurs', 'ations', 'logies', 'usions',
'utions', 'ements', 'amment', 'emment',
'ances', 'iqUes', 'ismes', 'ables', 'istes',
'ateur', 'ation', 'logie', 'usion', 'ution',
'ences', 'ement', 'euses', 'ments', 'ance',
'iqUe', 'isme', 'able', 'iste', 'ence',
'it\xE9s', 'ives', 'eaux', 'euse', 'ment',
'eux', 'it\xE9', 'ive', 'ifs', 'aux', 'if')
__step2a_suffixes = ('issaIent', 'issantes', 'iraIent', 'issante',
'issants', 'issions', 'irions', 'issais',
'issait', 'issant', 'issent', 'issiez', 'issons',
'irais', 'irait', 'irent', 'iriez', 'irons',
'iront', 'isses', 'issez', '\xEEmes',
'\xEEtes', 'irai', 'iras', 'irez', 'isse',
'ies', 'ira', '\xEEt', 'ie', 'ir', 'is',
'it', 'i')
__step2b_suffixes = ('eraIent', 'assions', 'erions', 'assent',
'assiez', '\xE8rent', 'erais', 'erait',
'eriez', 'erons', 'eront', 'aIent', 'antes',
'asses', 'ions', 'erai', 'eras', 'erez',
'\xE2mes', '\xE2tes', 'ante', 'ants',
'asse', '\xE9es', 'era', 'iez', 'ais',
'ait', 'ant', '\xE9e', '\xE9s', 'er',
'ez', '\xE2t', 'ai', 'as', '\xE9', 'a')
__step4_suffixes = ('i\xE8re', 'I\xE8re', 'ion', 'ier', 'Ier',
'e', '\xEB')
def stem(self, word):
"""
Stem a French word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
if word[i-1] in self.__vowels or word[i+1] in self.__vowels:
if word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "eaux":
word = word[:-1]
step1_success = True
elif suffix in ("euse", "euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = suffix_replace(word, suffix, "eux")
step1_success = True
elif suffix in ("ement", "ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "iv" and "iv" in r2:
word = word[:-2]
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
elif word[-3:] == "eus":
if "eus" in r2:
word = word[:-3]
elif "eus" in r1:
word = "".join((word[:-1], "x"))
elif word[-3:] in ("abl", "iqU"):
if "abl" in r2 or "iqU" in r2:
word = word[:-3]
elif word[-3:] in ("i\xE8r", "I\xE8r"):
if "i\xE8r" in rv or "I\xE8r" in rv:
word = "".join((word[:-3], "i"))
elif suffix == "amment" and suffix in rv:
word = suffix_replace(word, "amment", "ant")
rv = suffix_replace(rv, "amment", "ant")
rv_ending_found = True
elif suffix == "emment" and suffix in rv:
word = suffix_replace(word, "emment", "ent")
rv_ending_found = True
elif (suffix in ("ment", "ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix)-1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == "aux" and suffix in r1:
word = "".join((word[:-2], "l"))
step1_success = True
elif (suffix in ("issement", "issements") and suffix in r1
and word[-len(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("ance", "iqUe", "isme", "able", "iste",
"eux", "ances", "iqUes", "ismes",
"ables", "istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("atrice", "ateur", "ation", "atrices",
"ateurs", "ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif suffix in ("logie", "logies") and suffix in r2:
word = suffix_replace(word, suffix, "log")
step1_success = True
elif (suffix in ("usion", "ution", "usions", "utions") and
suffix in r2):
word = suffix_replace(word, suffix, "u")
step1_success = True
elif suffix in ("ence", "ences") and suffix in r2:
word = suffix_replace(word, suffix, "ent")
step1_success = True
elif suffix in ("it\xE9", "it\xE9s") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == "abil":
if "abil" in r2:
word = word[:-4]
else:
word = "".join((word[:-2], "l"))
elif word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif word[-2:] == "iv":
if "iv" in r2:
word = word[:-2]
elif (suffix in ("if", "ive", "ifs", "ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == "ions" and "ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in ('eraIent', 'erions', '\xE8rent',
'erais', 'erait', 'eriez',
'erons', 'eront', 'erai', 'eras',
'erez', '\xE9es', 'era', 'iez',
'\xE9e', '\xE9s', 'er', 'ez',
'\xE9'):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in ('assions', 'assent', 'assiez',
'aIent', 'antes', 'asses',
'\xE2mes', '\xE2tes', 'ante',
'ants', 'asse', 'ais', 'ait',
'ant', '\xE2t', 'ai', 'as',
'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith("e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == "Y":
word = "".join((word[:-1], "i"))
elif word[-1] == "\xE7":
word = "".join((word[:-1], "c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == "s" and
word[-2] not in "aiou\xE8s"):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == "ion" and suffix in r2 and
rv[-4] in "st"):
word = word[:-3]
elif suffix in ("ier", "i\xE8re", "Ier",
"I\xE8re"):
word = suffix_replace(word, suffix, "i")
elif suffix == "e":
word = word[:-1]
elif suffix == "\xEB" and word[-3:-1] == "gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith(("enn", "onn", "ett", "ell", "eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in range(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in ("\xE9", "\xE8"):
word = "".join((word[:-i], "e", word[-i+1:]))
break
word = (word.replace("I", "i")
.replace("U", "u")
.replace("Y", "y"))
return word
def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
class GermanStemmer(_StandardStemmer):
"""
The German Snowball stemmer.
:cvar __vowels: The German vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __st_ending: Letter that may directly appear before a word final 'st'.
:type __st_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the German
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/german/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6\xFC"
__s_ending = "bdfghklmnrt"
__st_ending = "bdfghklmnt"
__step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s")
__step2_suffixes = ("est", "en", "er", "st")
__step3_suffixes = ("isch", "lich", "heit", "keit",
"end", "ung", "ig", "ik")
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word
class HungarianStemmer(_LanguageSpecificStemmer):
"""
The Hungarian Snowball stemmer.
:cvar __vowels: The Hungarian vowels.
:type __vowels: unicode
:cvar __digraphs: The Hungarian digraphs.
:type __digraphs: tuple
:cvar __double_consonants: The Hungarian double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm.
:type __step6_suffixes: tuple
:cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm.
:type __step7_suffixes: tuple
:cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm.
:type __step8_suffixes: tuple
:cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm.
:type __step9_suffixes: tuple
:note: A detailed description of the Hungarian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/hungarian/stemmer.html
"""
__vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB"
__digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs")
__double_consonants = ("bb", "cc", "ccs", "dd", "ff", "gg",
"ggy", "jj", "kk", "ll", "lly", "mm",
"nn", "nny", "pp", "rr", "ss", "ssz",
"tt", "tty", "vv", "zz", "zzs")
__step1_suffixes = ("al", "el")
__step2_suffixes = ('k\xE9ppen', 'onk\xE9nt', 'enk\xE9nt',
'ank\xE9nt', 'k\xE9pp', 'k\xE9nt', 'ban',
'ben', 'nak', 'nek', 'val', 'vel', 't\xF3l',
't\xF5l', 'r\xF3l', 'r\xF5l', 'b\xF3l',
'b\xF5l', 'hoz', 'hez', 'h\xF6z',
'n\xE1l', 'n\xE9l', '\xE9rt', 'kor',
'ba', 'be', 'ra', 're', 'ig', 'at', 'et',
'ot', '\xF6t', 'ul', '\xFCl', 'v\xE1',
'v\xE9', 'en', 'on', 'an', '\xF6n',
'n', 't')
__step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n")
__step4_suffixes = ('astul', 'est\xFCl', '\xE1stul',
'\xE9st\xFCl', 'stul', 'st\xFCl')
__step5_suffixes = ("\xE1", "\xE9")
__step6_suffixes = ('ok\xE9', '\xF6k\xE9', 'ak\xE9',
'ek\xE9', '\xE1k\xE9', '\xE1\xE9i',
'\xE9k\xE9', '\xE9\xE9i', 'k\xE9',
'\xE9i', '\xE9\xE9', '\xE9')
__step7_suffixes = ('\xE1juk', '\xE9j\xFCk', '\xFCnk',
'unk', 'juk', 'j\xFCk', '\xE1nk',
'\xE9nk', 'nk', 'uk', '\xFCk', 'em',
'om', 'am', 'od', 'ed', 'ad', '\xF6d',
'ja', 'je', '\xE1m', '\xE1d', '\xE9m',
'\xE9d', 'm', 'd', 'a', 'e', 'o',
'\xE1', '\xE9')
__step8_suffixes = ('jaitok', 'jeitek', 'jaink', 'jeink', 'aitok',
'eitek', '\xE1itok', '\xE9itek', 'jaim',
'jeim', 'jaid', 'jeid', 'eink', 'aink',
'itek', 'jeik', 'jaik', '\xE1ink',
'\xE9ink', 'aim', 'eim', 'aid', 'eid',
'jai', 'jei', 'ink', 'aik', 'eik',
'\xE1im', '\xE1id', '\xE1ik', '\xE9im',
'\xE9id', '\xE9ik', 'im', 'id', 'ai',
'ei', 'ik', '\xE1i', '\xE9i', 'i')
__step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok",
"ek", "ak", "k")
def stem(self, word):
"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = "".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = "".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith("\xE1"):
word = "".join((word[:-1], "a"))
r1 = suffix_replace(r1, "\xE1", "a")
elif r1.endswith("\xE9"):
word = "".join((word[:-1], "e"))
r1 = suffix_replace(r1, "\xE9", "e")
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "\xE9n":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == "\xE1stul":
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix == "\xE9st\xFCl":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = "".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = "".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in ("\xE1k\xE9", "\xE1\xE9i"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9k\xE9", "\xE9\xE9i",
"\xE9\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1nk", "\xE1juk", "\xE1m",
"\xE1d", "\xE1"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9nk", "\xE9j\xFCk",
"\xE9m", "\xE9d", "\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1im", "\xE1id", "\xE1i",
"\xE1ink", "\xE1itok", "\xE1ik"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9im", "\xE9id", "\xE9i",
"\xE9ink", "\xE9itek", "\xE9ik"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "\xE1k":
word = suffix_replace(word, suffix, "a")
elif suffix == "\xE9k":
word = suffix_replace(word, suffix, "e")
else:
word = word[:-len(suffix)]
break
return word
def __r1_hungarian(self, word, vowels, digraphs):
"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
class ItalianStemmer(_StandardStemmer):
"""
The Italian Snowball stemmer.
:cvar __vowels: The Italian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:note: A detailed description of the Italian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/italian/stemmer.html
"""
__vowels = "aeiou\xE0\xE8\xEC\xF2\xF9"
__step0_suffixes = ('gliela', 'gliele', 'glieli', 'glielo',
'gliene', 'sene', 'mela', 'mele', 'meli',
'melo', 'mene', 'tela', 'tele', 'teli',
'telo', 'tene', 'cela', 'cele', 'celi',
'celo', 'cene', 'vela', 'vele', 'veli',
'velo', 'vene', 'gli', 'ci', 'la', 'le',
'li', 'lo', 'mi', 'ne', 'si', 'ti', 'vi')
__step1_suffixes = ('atrice', 'atrici', 'azione', 'azioni',
'uzione', 'uzioni', 'usione', 'usioni',
'amento', 'amenti', 'imento', 'imenti',
'amente', 'abile', 'abili', 'ibile', 'ibili',
'mente', 'atore', 'atori', 'logia', 'logie',
'anza', 'anze', 'iche', 'ichi', 'ismo',
'ismi', 'ista', 'iste', 'isti', 'ist\xE0',
'ist\xE8', 'ist\xEC', 'ante', 'anti',
'enza', 'enze', 'ico', 'ici', 'ica', 'ice',
'oso', 'osi', 'osa', 'ose', 'it\xE0',
'ivo', 'ivi', 'iva', 'ive')
__step2_suffixes = ('erebbero', 'irebbero', 'assero', 'assimo',
'eranno', 'erebbe', 'eremmo', 'ereste',
'eresti', 'essero', 'iranno', 'irebbe',
'iremmo', 'ireste', 'iresti', 'iscano',
'iscono', 'issero', 'arono', 'avamo', 'avano',
'avate', 'eremo', 'erete', 'erono', 'evamo',
'evano', 'evate', 'iremo', 'irete', 'irono',
'ivamo', 'ivano', 'ivate', 'ammo', 'ando',
'asse', 'assi', 'emmo', 'enda', 'ende',
'endi', 'endo', 'erai', 'erei', 'Yamo',
'iamo', 'immo', 'irai', 'irei', 'isca',
'isce', 'isci', 'isco', 'ano', 'are', 'ata',
'ate', 'ati', 'ato', 'ava', 'avi', 'avo',
'er\xE0', 'ere', 'er\xF2', 'ete', 'eva',
'evi', 'evo', 'ir\xE0', 'ire', 'ir\xF2',
'ita', 'ite', 'iti', 'ito', 'iva', 'ivi',
'ivo', 'ono', 'uta', 'ute', 'uti', 'uto',
'ar', 'ir')
def stem(self, word):
"""
Stem an Italian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace("\xE1", "\xE0")
.replace("\xE9", "\xE8")
.replace("\xED", "\xEC")
.replace("\xF3", "\xF2")
.replace("\xFA", "\xF9"))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word [i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix)-4:-len(suffix)] in ("ando", "endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix)-2:-len(suffix)] in
("ar", "er", "ir")):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
r2 = suffix_replace(r2, suffix, "e")
rv = suffix_replace(rv, suffix, "e")
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in ("amento", "amenti",
"imento", "imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("azione", "azioni", "atore", "atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("logia", "logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in ("uzione", "uzioni",
"usione", "usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in ("enza", "enze"):
word = suffix_replace(word, suffix, "te")
rv = suffix_replace(rv, suffix, "te")
elif suffix == "it\xE0":
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("ivo", "ivi", "iva", "ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith("at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8",
"\xEC", "\xF2")):
word = word[:-1]
rv = rv[:-1]
if rv.endswith("i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith(("ch", "gh")):
word = word[:-1]
word = word.replace("I", "i").replace("U", "u")
return word
class NorwegianStemmer(_ScandinavianStemmer):
"""
The Norwegian Snowball stemmer.
:cvar __vowels: The Norwegian vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Norwegian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/norwegian/stemmer.html
<|fim▁hole|> """
__vowels = "aeiouy\xE6\xE5\xF8"
__s_ending = "bcdfghjlmnoprtvyz"
__step1_suffixes = ("hetenes", "hetene", "hetens", "heter",
"heten", "endes", "ande", "ende", "edes",
"enes", "erte", "ede", "ane", "ene", "ens",
"ers", "ets", "het", "ast", "ert", "en",
"ar", "er", "as", "es", "et", "a", "e", "s")
__step2_suffixes = ("dt", "vt")
__step3_suffixes = ("hetslov", "eleg", "elig", "elov", "slov",
"leg", "eig", "lig", "els", "lov", "ig")
def stem(self, word):
"""
Stem a Norwegian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in ("erte", "ert"):
word = suffix_replace(word, suffix, "er")
r1 = suffix_replace(r1, suffix, "er")
elif suffix == "s":
if (word[-2] in self.__s_ending or
(word[-2] == "k" and word[-3] not in self.__vowels)):
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
word = word[:-len(suffix)]
break
return word
class PortugueseStemmer(_StandardStemmer):
"""
The Portuguese Snowball stemmer.
:cvar __vowels: The Portuguese vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Portuguese
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/portuguese/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4"
__step1_suffixes = ('amentos', 'imentos', 'uço~es', 'amento',
'imento', 'adoras', 'adores', 'a\xE7o~es',
'logias', '\xEAncias', 'amente',
'idades', 'an\xE7as', 'ismos', 'istas', 'adora',
'a\xE7a~o', 'antes', '\xE2ncia',
'logia', 'uça~o', '\xEAncia',
'mente', 'idade', 'an\xE7a', 'ezas', 'icos', 'icas',
'ismo', '\xE1vel', '\xEDvel', 'ista',
'osos', 'osas', 'ador', 'ante', 'ivas',
'ivos', 'iras', 'eza', 'ico', 'ica',
'oso', 'osa', 'iva', 'ivo', 'ira')
__step2_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'\xE1ssemos', '\xEAssemos', '\xEDssemos',
'ar\xEDeis', 'er\xEDeis', 'ir\xEDeis',
'\xE1sseis', '\xE9sseis', '\xEDsseis',
'\xE1ramos', '\xE9ramos', '\xEDramos',
'\xE1vamos', 'aremos', 'eremos', 'iremos',
'ariam', 'eriam', 'iriam', 'assem', 'essem',
'issem', 'ara~o', 'era~o', 'ira~o', 'arias',
'erias', 'irias', 'ardes', 'erdes', 'irdes',
'asses', 'esses', 'isses', 'astes', 'estes',
'istes', '\xE1reis', 'areis', '\xE9reis',
'ereis', '\xEDreis', 'ireis', '\xE1veis',
'\xEDamos', 'armos', 'ermos', 'irmos',
'aria', 'eria', 'iria', 'asse', 'esse',
'isse', 'aste', 'este', 'iste', 'arei',
'erei', 'irei', 'aram', 'eram', 'iram',
'avam', 'arem', 'erem', 'irem',
'ando', 'endo', 'indo', 'adas', 'idas',
'ar\xE1s', 'aras', 'er\xE1s', 'eras',
'ir\xE1s', 'avas', 'ares', 'eres', 'ires',
'\xEDeis', 'ados', 'idos', '\xE1mos',
'amos', 'emos', 'imos', 'iras', 'ada', 'ida',
'ar\xE1', 'ara', 'er\xE1', 'era',
'ir\xE1', 'ava', 'iam', 'ado', 'ido',
'ias', 'ais', 'eis', 'ira', 'ia', 'ei', 'am',
'em', 'ar', 'er', 'ir', 'as',
'es', 'is', 'eu', 'iu', 'ou')
__step4_suffixes = ("os", "a", "i", "o", "\xE1",
"\xED", "\xF3")
def stem(self, word):
"""
Stem a Portuguese word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
word = (word.replace("\xE3", "a~")
.replace("\xF5", "o~")
.replace("q\xFC", "qu")
.replace("g\xFC", "gu"))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif (suffix in ("ira", "iras") and rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "e"):
step1_success = True
word = suffix_replace(word, suffix, "ir")
rv = suffix_replace(rv, suffix, "ir")
elif r2.endswith(suffix):
step1_success = True
if suffix in ("logia", "logias"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uça~o", "uço~es"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("\xEAncia", "\xEAncias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith(("ante", "avel", "ivel")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idade", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("iva", "ivo", "ivas", "ivos"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3
if step1_success or step2_success:
if rv.endswith("i") and word[-2] == "c":
word = word[:-1]
rv = rv[:-1]
### STEP 4: Residual suffix
if not step1_success and not step2_success:
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 5
if rv.endswith(("e", "\xE9", "\xEA")):
word = word[:-1]
rv = rv[:-1]
if ((word.endswith("gu") and rv.endswith("u")) or
(word.endswith("ci") and rv.endswith("i"))):
word = word[:-1]
elif word.endswith("\xE7"):
word = suffix_replace(word, "\xE7", "c")
word = word.replace("a~", "\xE3").replace("o~", "\xF5")
return word
class RomanianStemmer(_StandardStemmer):
"""
The Romanian Snowball stemmer.
:cvar __vowels: The Romanian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Romanian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/romanian/stemmer.html
"""
__vowels = "aeiou\u0103\xE2\xEE"
__step0_suffixes = ('iilor', 'ului', 'elor', 'iile', 'ilor',
'atei', 'a\u0163ie', 'a\u0163ia', 'aua',
'ele', 'iua', 'iei', 'ile', 'ul', 'ea',
'ii')
__step1_suffixes = ('abilitate', 'abilitati', 'abilit\u0103\u0163i',
'ibilitate', 'abilit\u0103i', 'ivitate',
'ivitati', 'ivit\u0103\u0163i', 'icitate',
'icitati', 'icit\u0103\u0163i', 'icatori',
'ivit\u0103i', 'icit\u0103i', 'icator',
'a\u0163iune', 'atoare', '\u0103toare',
'i\u0163iune', 'itoare', 'iciva', 'icive',
'icivi', 'iciv\u0103', 'icala', 'icale',
'icali', 'ical\u0103', 'ativa', 'ative',
'ativi', 'ativ\u0103', 'atori', '\u0103tori',
'itiva', 'itive', 'itivi', 'itiv\u0103',
'itori', 'iciv', 'ical', 'ativ', 'ator',
'\u0103tor', 'itiv', 'itor')
__step2_suffixes = ('abila', 'abile', 'abili', 'abil\u0103',
'ibila', 'ibile', 'ibili', 'ibil\u0103',
'atori', 'itate', 'itati', 'it\u0103\u0163i',
'abil', 'ibil', 'oasa', 'oas\u0103', 'oase',
'anta', 'ante', 'anti', 'ant\u0103', 'ator',
'it\u0103i', 'iune', 'iuni', 'isme', 'ista',
'iste', 'isti', 'ist\u0103', 'i\u015Fti',
'ata', 'at\u0103', 'ati', 'ate', 'uta',
'ut\u0103', 'uti', 'ute', 'ita', 'it\u0103',
'iti', 'ite', 'ica', 'ice', 'ici', 'ic\u0103',
'osi', 'o\u015Fi', 'ant', 'iva', 'ive', 'ivi',
'iv\u0103', 'ism', 'ist', 'at', 'ut', 'it',
'ic', 'os', 'iv')
__step3_suffixes = ('seser\u0103\u0163i', 'aser\u0103\u0163i',
'iser\u0103\u0163i', '\xE2ser\u0103\u0163i',
'user\u0103\u0163i', 'seser\u0103m',
'aser\u0103m', 'iser\u0103m', '\xE2ser\u0103m',
'user\u0103m', 'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'easc\u0103', 'ar\u0103\u0163i',
'ur\u0103\u0163i', 'ir\u0103\u0163i',
'\xE2r\u0103\u0163i', 'ase\u015Fi',
'aser\u0103', 'ise\u015Fi', 'iser\u0103',
'\xe2se\u015Fi', '\xE2ser\u0103',
'use\u015Fi', 'user\u0103', 'ser\u0103m',
'sesem', 'indu', '\xE2ndu', 'eaz\u0103',
'e\u015Fti', 'e\u015Fte', '\u0103\u015Fti',
'\u0103\u015Fte', 'ea\u0163i', 'ia\u0163i',
'ar\u0103m', 'ur\u0103m', 'ir\u0103m',
'\xE2r\u0103m', 'asem', 'isem',
'\xE2sem', 'usem', 'se\u015Fi', 'ser\u0103',
'sese', 'are', 'ere', 'ire', '\xE2re',
'ind', '\xE2nd', 'eze', 'ezi', 'esc',
'\u0103sc', 'eam', 'eai', 'eau', 'iam',
'iai', 'iau', 'a\u015Fi', 'ar\u0103',
'u\u015Fi', 'ur\u0103', 'i\u015Fi', 'ir\u0103',
'\xE2\u015Fi', '\xe2r\u0103', 'ase',
'ise', '\xE2se', 'use', 'a\u0163i',
'e\u0163i', 'i\u0163i', '\xe2\u0163i', 'sei',
'ez', 'am', 'ai', 'au', 'ea', 'ia', 'ui',
'\xE2i', '\u0103m', 'em', 'im', '\xE2m',
'se')
def stem(self, word):
"""
Stem a Romanian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Removal of plurals and other simplifications
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if suffix in r1:
if suffix in ("ul", "ului"):
word = word[:-len(suffix)]
if suffix in rv:
rv = rv[:-len(suffix)]
else:
rv = ""
elif (suffix == "aua" or suffix == "atei" or
(suffix == "ile" and word[-5:-3] != "ab")):
word = word[:-2]
elif suffix in ("ea", "ele", "elor"):
word = suffix_replace(word, suffix, "e")
if suffix in rv:
rv = suffix_replace(rv, suffix, "e")
else:
rv = ""
elif suffix in ("ii", "iua", "iei",
"iile", "iilor", "ilor"):
word = suffix_replace(word, suffix, "i")
if suffix in rv:
rv = suffix_replace(rv, suffix, "i")
else:
rv = ""
elif suffix in ("a\u0163ie", "a\u0163ia"):
word = word[:-1]
break
# STEP 1: Reduction of combining suffixes
while True:
replacement_done = False
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix in r1:
step1_success = True
replacement_done = True
if suffix in ("abilitate", "abilitati",
"abilit\u0103i",
"abilit\u0103\u0163i"):
word = suffix_replace(word, suffix, "abil")
elif suffix == "ibilitate":
word = word[:-5]
elif suffix in ("ivitate", "ivitati",
"ivit\u0103i",
"ivit\u0103\u0163i"):
word = suffix_replace(word, suffix, "iv")
elif suffix in ("icitate", "icitati", "icit\u0103i",
"icit\u0103\u0163i", "icator",
"icatori", "iciv", "iciva",
"icive", "icivi", "iciv\u0103",
"ical", "icala", "icale", "icali",
"ical\u0103"):
word = suffix_replace(word, suffix, "ic")
elif suffix in ("ativ", "ativa", "ative", "ativi",
"ativ\u0103", "a\u0163iune",
"atoare", "ator", "atori",
"\u0103toare",
"\u0103tor", "\u0103tori"):
word = suffix_replace(word, suffix, "at")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "at")
elif suffix in ("itiv", "itiva", "itive", "itivi",
"itiv\u0103", "i\u0163iune",
"itoare", "itor", "itori"):
word = suffix_replace(word, suffix, "it")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "it")
else:
step1_success = False
break
if not replacement_done:
break
# STEP 2: Removal of standard suffixes
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if suffix in r2:
step2_success = True
if suffix in ("iune", "iuni"):
if word[-5] == "\u0163":
word = "".join((word[:-5], "t"))
elif suffix in ("ism", "isme", "ist", "ista", "iste",
"isti", "ist\u0103", "i\u015Fti"):
word = suffix_replace(word, suffix, "ist")
else:
word = word[:-len(suffix)]
break
# STEP 3: Removal of verb suffixes
if not step1_success and not step2_success:
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if suffix in rv:
if suffix in ('seser\u0103\u0163i', 'seser\u0103m',
'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'ser\u0103m', 'sesem',
'se\u015Fi', 'ser\u0103', 'sese',
'a\u0163i', 'e\u0163i', 'i\u0163i',
'\xE2\u0163i', 'sei', '\u0103m',
'em', 'im', '\xE2m', 'se'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
else:
if (not rv.startswith(suffix) and
rv[rv.index(suffix)-1] not in
"aeio\u0103\xE2\xEE"):
word = word[:-len(suffix)]
break
# STEP 4: Removal of final vowel
for suffix in ("ie", "a", "e", "i", "\u0103"):
if word.endswith(suffix):
if suffix in rv:
word = word[:-len(suffix)]
break
word = word.replace("I", "i").replace("U", "u")
return word
class RussianStemmer(_LanguageSpecificStemmer):
"""
The Russian Snowball stemmer.
:cvar __perfective_gerund_suffixes: Suffixes to be deleted.
:type __perfective_gerund_suffixes: tuple
:cvar __adjectival_suffixes: Suffixes to be deleted.
:type __adjectival_suffixes: tuple
:cvar __reflexive_suffixes: Suffixes to be deleted.
:type __reflexive_suffixes: tuple
:cvar __verb_suffixes: Suffixes to be deleted.
:type __verb_suffixes: tuple
:cvar __noun_suffixes: Suffixes to be deleted.
:type __noun_suffixes: tuple
:cvar __superlative_suffixes: Suffixes to be deleted.
:type __superlative_suffixes: tuple
:cvar __derivational_suffixes: Suffixes to be deleted.
:type __derivational_suffixes: tuple
:note: A detailed description of the Russian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/russian/stemmer.html
"""
__perfective_gerund_suffixes = ("ivshis'", "yvshis'", "vshis'",
"ivshi", "yvshi", "vshi", "iv",
"yv", "v")
__adjectival_suffixes = ('ui^ushchi^ui^u', 'ui^ushchi^ai^a',
'ui^ushchimi', 'ui^ushchymi', 'ui^ushchego',
'ui^ushchogo', 'ui^ushchemu', 'ui^ushchomu',
'ui^ushchikh', 'ui^ushchykh',
'ui^ushchui^u', 'ui^ushchaia',
'ui^ushchoi^u', 'ui^ushchei^u',
'i^ushchi^ui^u', 'i^ushchi^ai^a',
'ui^ushchee', 'ui^ushchie',
'ui^ushchye', 'ui^ushchoe', 'ui^ushchei`',
'ui^ushchii`', 'ui^ushchyi`',
'ui^ushchoi`', 'ui^ushchem', 'ui^ushchim',
'ui^ushchym', 'ui^ushchom', 'i^ushchimi',
'i^ushchymi', 'i^ushchego', 'i^ushchogo',
'i^ushchemu', 'i^ushchomu', 'i^ushchikh',
'i^ushchykh', 'i^ushchui^u', 'i^ushchai^a',
'i^ushchoi^u', 'i^ushchei^u', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`',
'i^ushchyi`', 'i^ushchoi`', 'i^ushchem',
'i^ushchim', 'i^ushchym', 'i^ushchom',
'shchi^ui^u', 'shchi^ai^a', 'ivshi^ui^u',
'ivshi^ai^a', 'yvshi^ui^u', 'yvshi^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'shchui^u', 'shchai^a', 'shchoi^u',
'shchei^u', 'ivshimi', 'ivshymi',
'ivshego', 'ivshogo', 'ivshemu', 'ivshomu',
'ivshikh', 'ivshykh', 'ivshui^u',
'ivshai^a', 'ivshoi^u', 'ivshei^u',
'yvshimi', 'yvshymi', 'yvshego', 'yvshogo',
'yvshemu', 'yvshomu', 'yvshikh', 'yvshykh',
'yvshui^u', 'yvshai^a', 'yvshoi^u',
'yvshei^u', 'vshi^ui^u', 'vshi^ai^a',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'ivshee', 'ivshie', 'ivshye', 'ivshoe',
'ivshei`', 'ivshii`', 'ivshyi`',
'ivshoi`', 'ivshem', 'ivshim', 'ivshym',
'ivshom', 'yvshee', 'yvshie', 'yvshye',
'yvshoe', 'yvshei`', 'yvshii`',
'yvshyi`', 'yvshoi`', 'yvshem',
'yvshim', 'yvshym', 'yvshom', 'vshimi',
'vshymi', 'vshego', 'vshogo', 'vshemu',
'vshomu', 'vshikh', 'vshykh', 'vshui^u',
'vshai^a', 'vshoi^u', 'vshei^u',
'emi^ui^u', 'emi^ai^a', 'nni^ui^u',
'nni^ai^a', 'vshee',
'vshie', 'vshye', 'vshoe', 'vshei`',
'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'emee', 'emie', 'emye', 'emoe',
'emei`', 'emii`', 'emyi`',
'emoi`', 'emem', 'emim', 'emym',
'emom', 'nnee', 'nnie', 'nnye', 'nnoe',
'nnei`', 'nnii`', 'nnyi`',
'nnoi`', 'nnem', 'nnim', 'nnym',
'nnom', 'i^ui^u', 'i^ai^a', 'imi', 'ymi',
'ego', 'ogo', 'emu', 'omu', 'ikh',
'ykh', 'ui^u', 'ai^a', 'oi^u', 'ei^u',
'ee', 'ie', 'ye', 'oe', 'ei`',
'ii`', 'yi`', 'oi`', 'em',
'im', 'ym', 'om')
__reflexive_suffixes = ("si^a", "s'")
__verb_suffixes = ("esh'", 'ei`te', 'ui`te', 'ui^ut',
"ish'", 'ete', 'i`te', 'i^ut', 'nno',
'ila', 'yla', 'ena', 'ite', 'ili', 'yli',
'ilo', 'ylo', 'eno', 'i^at', 'uet', 'eny',
"it'", "yt'", 'ui^u', 'la', 'na', 'li',
'em', 'lo', 'no', 'et', 'ny', "t'",
'ei`', 'ui`', 'il', 'yl', 'im',
'ym', 'en', 'it', 'yt', 'i^u', 'i`',
'l', 'n')
__noun_suffixes = ('ii^ami', 'ii^akh', 'i^ami', 'ii^am', 'i^akh',
'ami', 'iei`', 'i^am', 'iem', 'akh',
'ii^u', "'i^u", 'ii^a', "'i^a", 'ev', 'ov',
'ie', "'e", 'ei', 'ii', 'ei`',
'oi`', 'ii`', 'em', 'am', 'om',
'i^u', 'i^a', 'a', 'e', 'i', 'i`',
'o', 'u', 'y', "'")
__superlative_suffixes = ("ei`she", "ei`sh")
__derivational_suffixes = ("ost'", "ost")
def stem(self, word):
"""
Stem a Russian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
if word in self.stopwords:
return word
chr_exceeded = False
for i in range(len(word)):
if ord(word[i]) > 255:
chr_exceeded = True
break
if chr_exceeded:
word = self.__cyrillic_to_roman(word)
step1_success = False
adjectival_removed = False
verb_removed = False
undouble_success = False
superlative_removed = False
rv, r2 = self.__regions_russian(word)
# Step 1
for suffix in self.__perfective_gerund_suffixes:
if rv.endswith(suffix):
if suffix in ("v", "vshi", "vshis'"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
if not step1_success:
for suffix in self.__reflexive_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
for suffix in self.__adjectival_suffixes:
if rv.endswith(suffix):
if suffix in ('i^ushchi^ui^u', 'i^ushchi^ai^a',
'i^ushchui^u', 'i^ushchai^a', 'i^ushchoi^u',
'i^ushchei^u', 'i^ushchimi', 'i^ushchymi',
'i^ushchego', 'i^ushchogo', 'i^ushchemu',
'i^ushchomu', 'i^ushchikh', 'i^ushchykh',
'shchi^ui^u', 'shchi^ai^a', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`', 'i^ushchyi`',
'i^ushchoi`', 'i^ushchem', 'i^ushchim',
'i^ushchym', 'i^ushchom', 'vshi^ui^u',
'vshi^ai^a', 'shchui^u', 'shchai^a',
'shchoi^u', 'shchei^u', 'emi^ui^u',
'emi^ai^a', 'nni^ui^u', 'nni^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'vshui^u', 'vshai^a', 'vshoi^u', 'vshei^u',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'vshimi', 'vshymi', 'vshego', 'vshogo',
'vshemu', 'vshomu', 'vshikh', 'vshykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'vshee', 'vshie', 'vshye', 'vshoe',
'vshei`', 'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'emee', 'emie', 'emye', 'emoe', 'emei`',
'emii`', 'emyi`', 'emoi`', 'emem', 'emim',
'emym', 'emom', 'nnee', 'nnie', 'nnye',
'nnoe', 'nnei`', 'nnii`', 'nnyi`', 'nnoi`',
'nnem', 'nnim', 'nnym', 'nnom'):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
if not adjectival_removed:
for suffix in self.__verb_suffixes:
if rv.endswith(suffix):
if suffix in ("la", "na", "ete", "i`te", "li",
"i`", "l", "em", "n", "lo", "no",
"et", "i^ut", "ny", "t'", "esh'",
"nno"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
if not adjectival_removed and not verb_removed:
for suffix in self.__noun_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# Step 2
if rv.endswith("i"):
word = word[:-1]
r2 = r2[:-1]
# Step 3
for suffix in self.__derivational_suffixes:
if r2.endswith(suffix):
word = word[:-len(suffix)]
break
# Step 4
if word.endswith("nn"):
word = word[:-1]
undouble_success = True
if not undouble_success:
for suffix in self.__superlative_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
superlative_removed = True
break
if word.endswith("nn"):
word = word[:-1]
if not undouble_success and not superlative_removed:
if word.endswith("'"):
word = word[:-1]
if chr_exceeded:
word = self.__roman_to_cyrillic(word)
return word
def __regions_russian(self, word):
"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
:param word: The Russian word whose regions RV and R2 are determined.
:type word: str or unicode
:return: the regions RV and R2 for the respective Russian word.
:rtype: tuple
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
r1 = ""
r2 = ""
rv = ""
vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y")
word = (word.replace("i^a", "A")
.replace("i^u", "U")
.replace("e`", "E"))
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in range(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
rv = (rv.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
return (rv, r2)
def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word
def __roman_to_cyrillic(self, word):
"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
:param word: The word that is transliterated.
:type word: str or unicode
:return: word, the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("i^u", "\u044E").replace("i^a", "\u044F")
.replace("shch", "\u0449").replace("kh", "\u0445")
.replace("t^s", "\u0446").replace("ch", "\u0447")
.replace("e`", "\u044D").replace("i`", "\u0439")
.replace("sh", "\u0448").replace("k", "\u043A")
.replace("e", "\u0435").replace("zh", "\u0436")
.replace("a", "\u0430").replace("b", "\u0431")
.replace("v", "\u0432").replace("g", "\u0433")
.replace("d", "\u0434").replace("e", "\u0435")
.replace("z", "\u0437").replace("i", "\u0438")
.replace("l", "\u043B").replace("m", "\u043C")
.replace("n", "\u043D").replace("o", "\u043E")
.replace("p", "\u043F").replace("r", "\u0440")
.replace("s", "\u0441").replace("t", "\u0442")
.replace("u", "\u0443").replace("f", "\u0444")
.replace("''", "\u044A").replace("y", "\u044B")
.replace("'", "\u044C"))
return word
class SpanishStemmer(_StandardStemmer):
"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC"
__step0_suffixes = ("selas", "selos", "sela", "selo", "las",
"les", "los", "nos", "me", "se", "la", "le",
"lo")
__step1_suffixes = ('amientos', 'imientos', 'amiento', 'imiento',
'aciones', 'uciones', 'adoras', 'adores',
'ancias', 'log\xEDas', 'encias', 'amente',
'idades', 'anzas', 'ismos', 'ables', 'ibles',
'istas', 'adora', 'aci\xF3n', 'antes',
'ancia', 'log\xEDa', 'uci\xf3n', 'encia',
'mente', 'anza', 'icos', 'icas', 'ismo',
'able', 'ible', 'ista', 'osos', 'osas',
'ador', 'ante', 'idad', 'ivas', 'ivos',
'ico',
'ica', 'oso', 'osa', 'iva', 'ivo')
__step2a_suffixes = ('yeron', 'yendo', 'yamos', 'yais', 'yan',
'yen', 'yas', 'yes', 'ya', 'ye', 'yo',
'y\xF3')
__step2b_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'i\xE9ramos', 'i\xE9semos', 'ar\xEDais',
'aremos', 'er\xEDais', 'eremos',
'ir\xEDais', 'iremos', 'ierais', 'ieseis',
'asteis', 'isteis', '\xE1bamos',
'\xE1ramos', '\xE1semos', 'ar\xEDan',
'ar\xEDas', 'ar\xE9is', 'er\xEDan',
'er\xEDas', 'er\xE9is', 'ir\xEDan',
'ir\xEDas', 'ir\xE9is',
'ieran', 'iesen', 'ieron', 'iendo', 'ieras',
'ieses', 'abais', 'arais', 'aseis',
'\xE9amos', 'ar\xE1n', 'ar\xE1s',
'ar\xEDa', 'er\xE1n', 'er\xE1s',
'er\xEDa', 'ir\xE1n', 'ir\xE1s',
'ir\xEDa', 'iera', 'iese', 'aste', 'iste',
'aban', 'aran', 'asen', 'aron', 'ando',
'abas', 'adas', 'idas', 'aras', 'ases',
'\xEDais', 'ados', 'idos', 'amos', 'imos',
'emos', 'ar\xE1', 'ar\xE9', 'er\xE1',
'er\xE9', 'ir\xE1', 'ir\xE9', 'aba',
'ada', 'ida', 'ara', 'ase', '\xEDan',
'ado', 'ido', '\xEDas', '\xE1is',
'\xE9is', '\xEDa', 'ad', 'ed', 'id',
'an', 'i\xF3', 'ar', 'er', 'ir', 'as',
'\xEDs', 'en', 'es')
__step3_suffixes = ("os", "a", "e", "o", "\xE1",
"\xE9", "\xED", "\xF3")
def stem(self, word):
"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if not (word.endswith(suffix) and rv.endswith(suffix)):
continue
if ((rv[:-len(suffix)].endswith(("ando", "\xE1ndo",
"ar", "\xE1r",
"er", "\xE9r",
"iendo", "i\xE9ndo",
"ir", "\xEDr"))) or
(rv[:-len(suffix)].endswith("yendo") and
word[:-len(suffix)].endswith("uyendo"))):
word = self.__replace_accented(word[:-len(suffix)])
r1 = self.__replace_accented(r1[:-len(suffix)])
r2 = self.__replace_accented(r2[:-len(suffix)])
rv = self.__replace_accented(rv[:-len(suffix)])
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if not word.endswith(suffix):
continue
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("adora", "ador", "aci\xF3n", "adoras",
"adores", "aciones", "ante", "antes",
"ancia", "ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("log\xEDa", "log\xEDas"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uci\xF3n", "uciones"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("encia", "encias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ante", "able", "ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idad", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in ("abil", "ic", "iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in ("ivo", "iva", "ivos", "ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if suffix in ("en", "es", "\xE9is", "emos"):
if word.endswith("gu"):
word = word[:-1]
if rv.endswith("gu"):
rv = rv[:-1]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
if suffix in ("e", "\xE9"):
rv = rv[:-len(suffix)]
if word[-2:] == "gu" and rv.endswith("u"):
word = word[:-1]
break
word = self.__replace_accented(word)
return word
def __replace_accented(self, word):
"""
Replaces all accented letters on a word with their non-accented
counterparts.
:param word: A spanish word, with or without accents
:type word: str or unicode
:return: a word with the accented letters (á, é, í, ó, ú) replaced with
their non-accented counterparts (a, e, i, o, u)
:rtype: str or unicode
"""
return (word.replace("\xE1", "a")
.replace("\xE9", "e")
.replace("\xED", "i")
.replace("\xF3", "o")
.replace("\xFA", "u"))
class SwedishStemmer(_ScandinavianStemmer):
"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = "aeiouy\xE4\xE5\xF6"
__s_ending = "bcdfghjklmnoprtvy"
__step1_suffixes = ("heterna", "hetens", "heter", "heten",
"anden", "arnas", "ernas", "ornas", "andes",
"andet", "arens", "arna", "erna", "orna",
"ande", "arne", "aste", "aren", "ades",
"erns", "ade", "are", "ern", "ens", "het",
"ast", "ad", "en", "ar", "er", "or", "as",
"es", "at", "a", "e", "s")
__step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt")
__step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig")
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", "l\xF6st"):
word = word[:-1]
break
return word
def demo():
"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
import re
from nltk.corpus import udhr
udhr_corpus = {"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print("\n")
print("******************************")
print("Demo for the Snowball stemmers")
print("******************************")
while True:
language = compat.raw_input("Please enter the name of the language " +
"to be demonstrated\n" +
"/".join(SnowballStemmer.languages) +
"\n" +
"(enter 'exit' in order to leave): ")
if language == "exit":
break
if language not in SnowballStemmer.languages:
print(("\nOops, there is no stemmer for this language. " +
"Please try again.\n"))
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language]) [:300]
stemmed = " ".join(stemmer.stem(word) for word in excerpt)
stemmed = re.sub(r"(.{,70})\s", r'\1\n', stemmed+' ').rstrip()
excerpt = " ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r'\1\n', excerpt+' ').rstrip()
print("\n")
print('-' * 70)
print('ORIGINAL'.center(70))
print(excerpt)
print("\n\n")
print('STEMMED RESULTS'.center(70))
print(stemmed)
print('-' * 70)
print("\n")<|fim▁end|> | |
<|file_name|>check-api-contrail-9081.py<|end_file_name|><|fim▁begin|># !/usr/bin/python
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import urllib2
import sys
import simplejson as json
import ConfigParser
import signal
import time
CONF_FILE = '/etc/check_api.conf'
plugin_name = "check-api-contrail-9081"
plugin_instance = "lma-contrail-extension"
plugin_interval = 90
plugin_type = 'gauge'
plugin_request = 'active'
url = "http://127.0.0.1:9081"
class OSAPI(object):
def __init__(self, config):
self.config = config
self.username = self.config.get('api', 'user')
self.password = self.config.get('api', 'password')
self.tenant_name = self.config.get('api', 'tenant')
self.endpoint_keystone = self.config.get('api',
'keystone_endpoints'
).split(',')
self.token = None
self.tenant_id = None
self.get_token()
def get_timeout(self, service):
try:
return int(self.config.get('api', '%s_timeout' % service))
except ConfigParser.NoOptionError:
return 1
def get_token(self):
data = json.dumps({
"auth":
{
'tenantName': self.tenant_name,
'passwordCredentials':
{
'username': self.username,
'password': self.password
}
}
})
for keystone in self.endpoint_keystone:
try:
request = urllib2.Request(
'%s/tokens' % keystone,
data=data,
headers={
'Content-type': 'application/json'
})
data = json.loads(
urllib2.urlopen(
request, timeout=self.get_timeout('keystone')).read())
self.token = data['access']['token']['id']
self.tenant_id = data['access']['token']['tenant']['id']
return
except Exception as e:
print("Got exception '%s'" % e)
sys.exit(1)
def check_api(self, url, service):
try:
request = urllib2.Request(
url,
headers={
'X-Auth-Token': self.token,
})
start_time = time.time()
p = urllib2.urlopen(request, timeout=self.get_timeout(service))
end_time = time.time()
except urllib2.HTTPError, e:
return
except Exception as e:
print e
sys.exit(1)
return "%.3f" % (end_time - start_time)
def configure_callback(conf):
for node in conf.children:
val = str(node.values[0])
def restore_sigchld():
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def log_verbose(msg):
collectd.info('%s plugin [verbose]: %s' % (plugin_name, msg))
def payload():
config = ConfigParser.RawConfigParser()
config.read(CONF_FILE)
API = OSAPI(config)
payload = API.check_api(url, "contrail")
return payload
def payload_callback():
log_verbose('Read callback called')
value = payload()
# log_verbose(
# 'Sending value: %s.%s=%s' % (plugin_name, '-'.join([val.plugin, val.type]), value))
val = collectd.Values(
plugin=plugin_name, # metric source
plugin_instance=plugin_instance,
type=plugin_type,
type_instance=plugin_name,
interval=plugin_interval,
meta={'0': True},
values=[value]
)
<|fim▁hole|> if sys.argv[1]:
url = sys.argv[1]
else:
print "Please provide URL"
sys.exit(1)
print "Plugin: " + plugin_name
payload = payload()
print("%s" % (payload))
sys.exit(0)
else:
import collectd
collectd.register_init(restore_sigchld)
collectd.register_config(configure_callback)
collectd.register_read(payload_callback, plugin_interval)<|fim▁end|> | val.dispatch()
if __name__ == '__main__': |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for cv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see<|fim▁hole|>"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cv.settings")
application = get_wsgi_application()<|fim▁end|> | https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ |
<|file_name|>group.go<|end_file_name|><|fim▁begin|>package apimanagement
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// GroupClient is the apiManagement Client
type GroupClient struct {
BaseClient
}
// NewGroupClient creates an instance of the GroupClient client.
func NewGroupClient(subscriptionID string) GroupClient {
return NewGroupClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewGroupClientWithBaseURI creates an instance of the GroupClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewGroupClientWithBaseURI(baseURI string, subscriptionID string) GroupClient {
return GroupClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or Updates a group.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceName - the name of the API Management service.
// groupID - group identifier. Must be unique in the current API Management service instance.
// parameters - create parameters.
// ifMatch - eTag of the Entity. Not required when creating an entity, but required when updating an entity.
func (client GroupClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, ifMatch string) (result GroupContract, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GroupClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: groupID,
Constraints: []validation.Constraint{{Target: "groupID", Name: validation.MaxLength, Rule: 256, Chain: nil},
{Target: "groupID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.GroupCreateParametersProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.GroupCreateParametersProperties.DisplayName", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.GroupCreateParametersProperties.DisplayName", Name: validation.MaxLength, Rule: 300, Chain: nil},
{Target: "parameters.GroupCreateParametersProperties.DisplayName", Name: validation.MinLength, Rule: 1, Chain: nil},
}},
}}}}}); err != nil {
return result, validation.NewError("apimanagement.GroupClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, serviceName, groupID, parameters, ifMatch)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "CreateOrUpdate", resp, "Failure responding to request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client GroupClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"groupId": autorest.Encode("path", groupID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2021-01-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
if len(ifMatch) > 0 {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client GroupClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client GroupClient) CreateOrUpdateResponder(resp *http.Response) (result GroupContract, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes specific group of the API Management service instance.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceName - the name of the API Management service.
// groupID - group identifier. Must be unique in the current API Management service instance.
// ifMatch - eTag of the Entity. ETag should match the current entity state from the header response of the GET
// request or it should be * for unconditional update.
func (client GroupClient) Delete(ctx context.Context, resourceGroupName string, serviceName string, groupID string, ifMatch string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GroupClient.Delete")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode<|fim▁hole|> }()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: groupID,
Constraints: []validation.Constraint{{Target: "groupID", Name: validation.MaxLength, Rule: 256, Chain: nil},
{Target: "groupID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("apimanagement.GroupClient", "Delete", err.Error())
}
req, err := client.DeletePreparer(ctx, resourceGroupName, serviceName, groupID, ifMatch)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Delete", resp, "Failure responding to request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client GroupClient) DeletePreparer(ctx context.Context, resourceGroupName string, serviceName string, groupID string, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"groupId": autorest.Encode("path", groupID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2021-01-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}", pathParameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client GroupClient) DeleteSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client GroupClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the details of the group specified by its identifier.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceName - the name of the API Management service.
// groupID - group identifier. Must be unique in the current API Management service instance.
func (client GroupClient) Get(ctx context.Context, resourceGroupName string, serviceName string, groupID string) (result GroupContract, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GroupClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: groupID,
Constraints: []validation.Constraint{{Target: "groupID", Name: validation.MaxLength, Rule: 256, Chain: nil},
{Target: "groupID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("apimanagement.GroupClient", "Get", err.Error())
}
req, err := client.GetPreparer(ctx, resourceGroupName, serviceName, groupID)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client GroupClient) GetPreparer(ctx context.Context, resourceGroupName string, serviceName string, groupID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"groupId": autorest.Encode("path", groupID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2021-01-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client GroupClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client GroupClient) GetResponder(resp *http.Response) (result GroupContract, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetEntityTag gets the entity state (Etag) version of the group specified by its identifier.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceName - the name of the API Management service.
// groupID - group identifier. Must be unique in the current API Management service instance.
func (client GroupClient) GetEntityTag(ctx context.Context, resourceGroupName string, serviceName string, groupID string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GroupClient.GetEntityTag")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: groupID,
Constraints: []validation.Constraint{{Target: "groupID", Name: validation.MaxLength, Rule: 256, Chain: nil},
{Target: "groupID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("apimanagement.GroupClient", "GetEntityTag", err.Error())
}
req, err := client.GetEntityTagPreparer(ctx, resourceGroupName, serviceName, groupID)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "GetEntityTag", nil, "Failure preparing request")
return
}
resp, err := client.GetEntityTagSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "GetEntityTag", resp, "Failure sending request")
return
}
result, err = client.GetEntityTagResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "GetEntityTag", resp, "Failure responding to request")
return
}
return
}
// GetEntityTagPreparer prepares the GetEntityTag request.
func (client GroupClient) GetEntityTagPreparer(ctx context.Context, resourceGroupName string, serviceName string, groupID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"groupId": autorest.Encode("path", groupID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2021-01-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsHead(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetEntityTagSender sends the GetEntityTag request. The method will close the
// http.Response Body if it receives an error.
func (client GroupClient) GetEntityTagSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetEntityTagResponder handles the response to the GetEntityTag request. The method always
// closes the http.Response Body.
func (client GroupClient) GetEntityTagResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// ListByService lists a collection of groups defined within a service instance.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceName - the name of the API Management service.
// filter - | Field | Usage | Supported operators | Supported functions
// |</br>|-------------|-------------|-------------|-------------|</br>| name | filter | ge, le, eq, ne, gt, lt
// | substringof, contains, startswith, endswith |</br>| displayName | filter | ge, le, eq, ne, gt, lt |
// substringof, contains, startswith, endswith |</br>| description | filter | ge, le, eq, ne, gt, lt |
// substringof, contains, startswith, endswith |</br>| externalId | filter | eq | |</br>
// top - number of records to return.
// skip - number of records to skip.
func (client GroupClient) ListByService(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result GroupCollectionPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GroupClient.ListByService")
defer func() {
sc := -1
if result.gc.Response.Response != nil {
sc = result.gc.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: top,
Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}},
{TargetValue: skip,
Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}}}}); err != nil {
return result, validation.NewError("apimanagement.GroupClient", "ListByService", err.Error())
}
result.fn = client.listByServiceNextResults
req, err := client.ListByServicePreparer(ctx, resourceGroupName, serviceName, filter, top, skip)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "ListByService", nil, "Failure preparing request")
return
}
resp, err := client.ListByServiceSender(req)
if err != nil {
result.gc.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "ListByService", resp, "Failure sending request")
return
}
result.gc, err = client.ListByServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "ListByService", resp, "Failure responding to request")
return
}
if result.gc.hasNextLink() && result.gc.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListByServicePreparer prepares the ListByService request.
func (client GroupClient) ListByServicePreparer(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2021-01-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
if top != nil {
queryParameters["$top"] = autorest.Encode("query", *top)
}
if skip != nil {
queryParameters["$skip"] = autorest.Encode("query", *skip)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByServiceSender sends the ListByService request. The method will close the
// http.Response Body if it receives an error.
func (client GroupClient) ListByServiceSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByServiceResponder handles the response to the ListByService request. The method always
// closes the http.Response Body.
func (client GroupClient) ListByServiceResponder(resp *http.Response) (result GroupCollection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByServiceNextResults retrieves the next set of results, if any.
func (client GroupClient) listByServiceNextResults(ctx context.Context, lastResults GroupCollection) (result GroupCollection, err error) {
req, err := lastResults.groupCollectionPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.GroupClient", "listByServiceNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByServiceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "apimanagement.GroupClient", "listByServiceNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "listByServiceNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByServiceComplete enumerates all values, automatically crossing page boundaries as required.
func (client GroupClient) ListByServiceComplete(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result GroupCollectionIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GroupClient.ListByService")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByService(ctx, resourceGroupName, serviceName, filter, top, skip)
return
}
// Update updates the details of the group specified by its identifier.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceName - the name of the API Management service.
// groupID - group identifier. Must be unique in the current API Management service instance.
// parameters - update parameters.
// ifMatch - eTag of the Entity. ETag should match the current entity state from the header response of the GET
// request or it should be * for unconditional update.
func (client GroupClient) Update(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupUpdateParameters, ifMatch string) (result GroupContract, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/GroupClient.Update")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: groupID,
Constraints: []validation.Constraint{{Target: "groupID", Name: validation.MaxLength, Rule: 256, Chain: nil},
{Target: "groupID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("apimanagement.GroupClient", "Update", err.Error())
}
req, err := client.UpdatePreparer(ctx, resourceGroupName, serviceName, groupID, parameters, ifMatch)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.GroupClient", "Update", resp, "Failure responding to request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client GroupClient) UpdatePreparer(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupUpdateParameters, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"groupId": autorest.Encode("path", groupID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2021-01-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client GroupClient) UpdateSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client GroupClient) UpdateResponder(resp *http.Response) (result GroupContract, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}<|fim▁end|> | }
tracing.EndSpan(ctx, sc, err) |
<|file_name|>SundayQuickSearcher.java<|end_file_name|><|fim▁begin|>/*
* Copyright Matt Palmer 2011-2013, All rights reserved.
*
* This code is licensed under a standard 3-clause BSD license:
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * The names of its contributors may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package net.byteseek.searcher.sequence.sunday;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import net.byteseek.io.reader.Window;
import net.byteseek.io.reader.WindowReader;
import net.byteseek.matcher.bytes.ByteMatcher;
import net.byteseek.matcher.sequence.SequenceMatcher;
import net.byteseek.object.factory.DoubleCheckImmutableLazyObject;
import net.byteseek.object.factory.LazyObject;
import net.byteseek.object.factory.ObjectFactory;
import net.byteseek.searcher.SearchResult;
import net.byteseek.searcher.SearchUtils;
import net.byteseek.searcher.sequence.AbstractSequenceSearcher;
/**
*
* @author Matt Palmer
*/
public final class SundayQuickSearcher extends AbstractSequenceSearcher {
private final LazyObject<int[]> forwardInfo;
private final LazyObject<int[]> backwardInfo;
/**
* Constructs a Sunday Quick searcher given a {@link SequenceMatcher}
* to search for.
*
* @param sequence The sequence to search for.
*/
public SundayQuickSearcher(final SequenceMatcher sequence) {
super(sequence);
forwardInfo = new DoubleCheckImmutableLazyObject<int[]>(new ForwardInfoFactory());
backwardInfo = new DoubleCheckImmutableLazyObject<int[]>(new BackwardInfoFactory());
}
/**
* {@inheritDoc}
*/
@Override
public List<SearchResult<SequenceMatcher>> searchForwards(final byte[] bytes, final int fromPosition, final int toPosition) {
// Get the objects needed to search:
final int[] safeShifts = forwardInfo.get();
final SequenceMatcher sequence = getMatcher();
// Calculate safe bounds for the search:
final int length = sequence.length();
final int finalPosition = bytes.length - length;
final int lastLoopPosition = finalPosition - 1;
final int lastPosition = toPosition < lastLoopPosition?
toPosition : lastLoopPosition;
int searchPosition = fromPosition > 0?
fromPosition : 0;
// Search forwards. The loop does not check for the final
// position, as we shift on the byte after the sequence.
while (searchPosition <= lastPosition) {
if (sequence.matchesNoBoundsCheck(bytes, searchPosition)) {
return SearchUtils.singleResult(searchPosition, sequence);
}
searchPosition += safeShifts[bytes[searchPosition + length] & 0xFF];
}
// Check the final position if necessary:
if (searchPosition == finalPosition &&
toPosition >= finalPosition &&
sequence.matches(bytes, finalPosition)) {
return SearchUtils.singleResult(finalPosition, sequence);
}
return SearchUtils.noResults();
}
/**
* {@inheritDoc}
*/
@Override
public List<SearchResult<SequenceMatcher>> doSearchForwards(final WindowReader reader,
final long fromPosition, final long toPosition ) throws IOException {
// Initialise
final int[] safeShifts = forwardInfo.get();
final SequenceMatcher sequence = getMatcher();
final int length = sequence.length();
long searchPosition = fromPosition;
// While there is a window to search in...
// If there is no window immediately after the sequence,
// then there is no match, since this is only invoked if the
// sequence is already crossing into another window.
Window window;
while (searchPosition <= toPosition &&
(window = reader.getWindow(searchPosition + length)) != null) {
// Initialise array search:
final byte[] array = window.getArray();
final int arrayStartPosition = reader.getWindowOffset(searchPosition + length);
final int arrayEndPosition = window.length() - 1;
final long distanceToEnd = toPosition - window.getWindowPosition() + length ;
final int finalPosition = distanceToEnd < arrayEndPosition?
(int) distanceToEnd : arrayEndPosition;
int arraySearchPosition = arrayStartPosition;
// Search fowards in the array using the reader interface to match.
// The loop does not check the final position, as we shift on the byte
// after the sequence (so would get an IndexOutOfBoundsException in the final position).
while (arraySearchPosition < finalPosition) {
if (sequence.matches(reader, searchPosition)) {
return SearchUtils.singleResult(searchPosition, sequence);
}
final int shift = safeShifts[array[arraySearchPosition] & 0xFF];
searchPosition += shift;
arraySearchPosition += shift;
}
// Check final position if necessary:
if (arraySearchPosition == finalPosition ||
searchPosition == toPosition) {
if (sequence.matches(reader, searchPosition)) {
return SearchUtils.singleResult(searchPosition, sequence);
}
searchPosition += safeShifts[array[arraySearchPosition] & 0xFF];
}
}
return SearchUtils.noResults();
}
/**
* {@inheritDoc}
*/
@Override
public List<SearchResult<SequenceMatcher>> searchBackwards(final byte[] bytes, final int fromPosition, final int toPosition) {<|fim▁hole|> final SequenceMatcher sequence = getMatcher();
// Calculate safe bounds for the search:
final int lastLoopPosition = toPosition > 1?
toPosition : 1;
final int firstPossiblePosition = bytes.length - sequence.length();
int searchPosition = fromPosition < firstPossiblePosition ?
fromPosition : firstPossiblePosition;
// Search backwards. The loop does not check the
// first position in the array, because we shift on the byte
// immediately before the current search position.
while (searchPosition >= lastLoopPosition) {
if (sequence.matchesNoBoundsCheck(bytes, searchPosition)) {
return SearchUtils.singleResult(searchPosition, sequence);
}
searchPosition -= safeShifts[bytes[searchPosition - 1] & 0xFF];
}
// Check for first position if necessary:
if (searchPosition == 0 &&
toPosition < 1 &&
sequence.matches(bytes, 0)) {
return SearchUtils.singleResult(0, sequence);
}
return SearchUtils.noResults();
}
/**
* {@inheritDoc}
*/
@Override
public List<SearchResult<SequenceMatcher>> doSearchBackwards(final WindowReader reader,
final long fromPosition, final long toPosition ) throws IOException {
// Initialise
final int[] safeShifts = forwardInfo.get();
final SequenceMatcher sequence = getMatcher();
long searchPosition = fromPosition;
// While there is a window to search in...
// If there is no window immediately before the sequence,
// then there is no match, since this is only invoked if the
// sequence is already crossing into another window.
Window window;
while (searchPosition >= toPosition &&
(window = reader.getWindow(searchPosition - 1)) != null) {
// Initialise array search:
final byte[] array = window.getArray();
final int arrayStartPosition = reader.getWindowOffset(searchPosition - 1);
// Search to the beginning of the array, or the final search position,
// whichver comes first.
final long endRelativeToWindow = toPosition - window.getWindowPosition();
final int arrayEndSearchPosition = endRelativeToWindow > 0?
(int) endRelativeToWindow : 0;
int arraySearchPosition = arrayStartPosition;
// Search backwards in the array using the reader interface to match.
// The loop does not check the final position, as we shift on the byte
// before it.
while (arraySearchPosition > arrayEndSearchPosition) {
if (sequence.matches(reader, searchPosition)) {
return SearchUtils.singleResult(searchPosition, sequence);
}
final int shift = safeShifts[array[arraySearchPosition] & 0xFF];
searchPosition -= shift;
arraySearchPosition -= shift;
}
// Check final position if necessary:
if (arraySearchPosition == arrayEndSearchPosition ||
searchPosition == toPosition) {
if (sequence.matches(reader, searchPosition)) {
return SearchUtils.singleResult(searchPosition, sequence);
}
searchPosition -= safeShifts[array[arraySearchPosition] & 0xFF];
}
}
return SearchUtils.noResults();
}
/**
* {@inheritDoc}
*/
@Override
public void prepareForwards() {
forwardInfo.get();
}
/**
* {@inheritDoc}
*/
@Override
public void prepareBackwards() {
backwardInfo.get();
}
@Override
public String toString() {
return getClass().getSimpleName() + "[sequence:" + matcher + ']';
}
private final class ForwardInfoFactory implements ObjectFactory<int[]> {
private ForwardInfoFactory() {
}
/**
* Calculates the safe shifts to use if searching forwards.
* A safe shift is either the length of the sequence plus one, if the
* byte does not appear in the {@link SequenceMatcher}, or
* the shortest distance it appears from the end of the matcher.
*/
@Override
public int[] create() {
// First set the default shift to the length of the sequence plus one.
final int[] shifts = new int[256];
final SequenceMatcher sequence = getMatcher();
final int numBytes = sequence.length();
Arrays.fill(shifts, numBytes + 1);
// Now set specific byte shifts for the bytes actually in
// the sequence itself. The shift is the distance of each character
// from the end of the sequence, where the last position equals 1.
// Each position can match more than one byte (e.g. if a byte class appears).
for (int sequenceByteIndex = 0; sequenceByteIndex < numBytes; sequenceByteIndex++) {
final ByteMatcher aMatcher = sequence.getMatcherForPosition(sequenceByteIndex);
final byte[] matchingBytes = aMatcher.getMatchingBytes();
final int distanceFromEnd = numBytes - sequenceByteIndex;
for (final byte b : matchingBytes) {
shifts[b & 0xFF] = distanceFromEnd;
}
}
return shifts;
}
}
private final class BackwardInfoFactory implements ObjectFactory<int[]> {
private BackwardInfoFactory() {
}
/**
* Calculates the safe shifts to use if searching backwards.
* A safe shift is either the length of the sequence plus one, if the
* byte does not appear in the {@link SequenceMatcher}, or
* the shortest distance it appears from the beginning of the matcher.
*/
@Override
public int[] create() {
// First set the default shift to the length of the sequence
// (negative if search direction is reversed)
final int[] shifts = new int[256];
final SequenceMatcher sequence = getMatcher();
final int numBytes = sequence.length();
Arrays.fill(shifts, numBytes + 1);
// Now set specific byte shifts for the bytes actually in
// the sequence itself. The shift is the distance of each character
// from the start of the sequence, where the first position equals 1.
// Each position can match more than one byte (e.g. if a byte class appears).
for (int sequenceByteIndex = numBytes - 1; sequenceByteIndex >= 0; sequenceByteIndex--) {
final ByteMatcher aMatcher = sequence.getMatcherForPosition(sequenceByteIndex);
final byte[] matchingBytes = aMatcher.getMatchingBytes();
final int distanceFromStart = sequenceByteIndex + 1;
for (final byte b : matchingBytes) {
shifts[b & 0xFF] = distanceFromStart;
}
}
return shifts;
}
}
}<|fim▁end|> |
// Get objects needed to search:
final int[] safeShifts = backwardInfo.get(); |
<|file_name|>ua_program_state_machine_type.ts<|end_file_name|><|fim▁begin|>/**
* @module node-opcua-address-space
*/
// tslint:disable:no-empty-interface
import { InstantiateObjectOptions, UAMethod, UAObject, UAObjectType } from "node-opcua-address-space-base";<|fim▁hole|>export interface UAProgramStateMachineEx extends UAStateMachineEx {}
export interface UAProgramStateMachineType extends UAProgramStateMachine_Base, UAObjectType {
instantiate(options: InstantiateObjectOptions): UAProgramStateMachineEx;
}<|fim▁end|> | import { UAFiniteStateMachine, UAProgramStateMachine, UAProgramStateMachine_Base } from "node-opcua-nodeset-ua";
import { UAStateMachineEx, UAStateMachineType } from "./ua_state_machine_type";
|
<|file_name|>tp.py<|end_file_name|><|fim▁begin|>from pylab import *
from math import exp, sqrt
from image import *
from filters import *
from nibabel import load
import numpy
import image_processing
import image as img
import os
# This module generates
class C:
#a Config class, just a collection of constants
output_dir = '../report/img/results/'
input_dir = '../report/img/input/'
output_dir_plots = '../report/img/plots/'
# algorithm parameters to generate Sim and Reg plots
noise_levels = [0, 0.25, 0.5,1,2,3,5] # noise levels to distort the images
gaussian_sigmas = [0.5,1,2]
bilateral_sigmaDs = [0.5,1,2]
bilateral_sigmaRs = [2,20]
# plot configuration variables
column_names=['sim','reg','e','noise']
colors=['g','r','c','m','y','b'] # for different sigmaD
markers=['<','>','v','^'] # for different sigmaR
lines=['-',''] # for different typtes of algorithms
# algorithm parameters to generate result images
default_noise_level = 1.5
default_noise_level_mri = 1.5
default_gaussian_sigma = 1
default_gaussian_sigma_noise = 1.5
default_bilateral_sigma = (1, 7)
default_bilateral_sigma_noise = (1.5, 7)
default_number_of_bins = 256
# generate the plot for filtering algorithms
def generate_plot_filtering(results,name,column_y):
xlabel('Noise ($\sigma$)')
for sigma in C.gaussian_sigmas:
gaussian = results['gaussian']
gaussian = gaussian[gaussian[:, 4] == sigma]
label = 'Gaussian, $\sigma=%.2f$' % sigma
style='o'+C.colors[C.gaussian_sigmas.index(sigma)]+'--'
plot(gaussian[:, 3], gaussian[:, column_y], style,label=label)
legend(loc=2)
for sigmaD in C.bilateral_sigmaDs:
for sigmaR in C.bilateral_sigmaRs:
bilateral= results['bilateral']
bilateral = bilateral[bilateral[:, 4] == sigmaD]
bilateral = bilateral[bilateral[:, 5] == sigmaR]
label = 'Bilateral, $\sigma_d=%.2f$, $\sigma_r=%.2f$' % (sigmaD,sigmaR)
style=C.markers[C.bilateral_sigmaRs.index(sigmaR)]+C.colors[C.bilateral_sigmaDs.index(sigmaD)]+'-'
plot(bilateral[:, 3], bilateral[:, column_y],style,label=label )
legend(loc=2)
savepngfig(C.output_dir_plots+name+'_filtering_'+C.column_names[column_y])
# generate the plot for otsu's algorithm, with and without noise and different
# types of filters
def generate_plot_otsu(results,name,column_y):
xlabel('Noise ($\sigma$)')
otsu = results['otsu']
plot(otsu[:, 3], otsu[:, column_y],'-.', label='otsu')
legend(loc=2)
for sigma in C.gaussian_sigmas:
otsu = results['otsu_gaussian']
otsu = otsu[otsu[:, 4] == sigma]
label = 'Otsu with gaussian, $\sigma=%.2f$' % sigma
style='o'+C.colors[C.gaussian_sigmas.index(sigma)]+'--'
plot(otsu[:, 3], otsu[:, column_y], style,label=label)
legend(loc=1)
for sigmaD in C.bilateral_sigmaDs:
for sigmaR in C.bilateral_sigmaRs:
otsu = results['otsu_bilateral']
otsu = otsu[otsu[:, 4] == sigmaD]
otsu = otsu[otsu[:, 5] == sigmaR]
label = 'Otsu with bilateral, $\sigma_d=%.2f$, $\sigma_r=%.2f$' % (sigmaD,sigmaR)
style=C.markers[C.bilateral_sigmaRs.index(sigmaR)]+C.colors[C.bilateral_sigmaDs.index(sigmaD)]+'-'
plot(otsu[:, 3], otsu[:, column_y],style, label=label)
legend(loc=1)
savepngfig(C.output_dir_plots+name+'_otsu_'+C.column_names[column_y])
# Generate all the plot images according to the results dictionary
# for image with given name
def generate_plot_images(results, name):
for k in results:
results[k] = array(results[k])
functions=[generate_plot_otsu,generate_plot_filtering]
labels=[(0,'$Sim(I,J)$'),(1,'$Reg(J)$')]
for f in functions:
for (column_y,label) in labels:
figure()
ylabel(label)
f(results,name,column_y)
xlim(0,C.noise_levels[-1]*1.5)
# generate a dictionary with Sim, Reg and E values for every combination of the
# algorithm parameters in class C, for a given image with a certain name
def generate_plots(image, name):
results = {}
results['otsu'] = []
results['otsu_bilateral'] = []
results['otsu_gaussian'] = []
results['bilateral'] = []
results['gaussian'] = []
otsu, t = image_processing.threshold_otsu(image, C.default_number_of_bins)
for noise in C.noise_levels:
print 'Image %s, Noise %.2f ' % (name, noise)
image_with_noise = add_gaussian_noise(image, noise)
print 'Image %s, otsu ' % (name)
otsu_noise, t = image_processing.threshold_otsu(image_with_noise, C.default_number_of_bins)
s, r, e = transformation_energy(otsu, otsu_noise)
results['otsu'].append([s, r, e, noise])
for sigma in C.gaussian_sigmas:
print 'Image %s, gaussian s=%.2f ' % (name, sigma)
gaussian = gaussian_filter(image_with_noise, sigma)
s, r, e = transformation_energy(image, gaussian)
results['gaussian'].append([s, r, e, noise, sigma])
if (sigma<2):
otsu_gaussian, t = image_processing.threshold_otsu(gaussian, C.default_number_of_bins)
s, r, e = transformation_energy(otsu, otsu_gaussian)
results['otsu_gaussian'].append([s, r, e, noise, sigma])
for sigmaD in C.bilateral_sigmaDs:
for sigmaR in C.bilateral_sigmaRs:
print 'Image %s, bilateral sd=%.2f, sr=%.2f ' % (name, sigmaD,sigmaR)
bilateral = bilateral_filter(image_with_noise, sigmaD, sigmaR)
s, r, e = transformation_energy(image, bilateral)
results['bilateral'].append([s, r, e, noise, sigmaD, sigmaR])
otsu_bilateral, t = image_processing.threshold_otsu(bilateral, C.default_number_of_bins)
s, r, e = transformation_energy(otsu, otsu_bilateral)
results['otsu_bilateral'].append([s, r, e, noise, sigmaD, sigmaR])
print 'Generating plot images...'
generate_plot_images(results, name)
# Generate the images that will be visually inspected
# For the given image, calculate:
# 1) Bilateral, gaussian and otsu's without noise
# 2) Bilateral, gaussian and otsu's with noise
# 3) Otsu's with noise, but after applying Bilateral, gaussian filtering
# Result images are saved with the given name as a prefix
def generate_result_images(image, name):
image = add_gaussian_noise(image, 0)
print 'Processing image %s' % name
save_image_png(image, C.output_dir + name)
if (name.startswith('mri')):
noise = C.default_noise_level_mri
else:
noise = C.default_noise_level
image_with_default_noise = add_gaussian_noise(image, noise)
save_image_png(image_with_default_noise, C.output_dir + name + '_noise')
print 'Image %s: bilateral' % name
(sigmaD, sigmaR) = C.default_bilateral_sigma
bilateral = bilateral_filter(image, sigmaD, sigmaR)
save_image_png(bilateral, C.output_dir + name + '_bilateral')
## for sigmaR in [1,2,3,4,5,7,8,9,10,11,12,13,14,15,17,18]:
## bilateral = bilateral_filter(image, sigmaD, sigmaR)
## if (sigmaR<10):
## n='0'+str(sigmaR)
## else:
## n=str(sigmaR)
## save_image_png(bilateral, C.output_dir + name + '_bilateral_'+n)
print 'Image %s: bilateral noise' % name
(sigmaD, sigmaR) = C.default_bilateral_sigma_noise
bilateral_noise = bilateral_filter(image_with_default_noise, sigmaD, sigmaR)
save_image_png(bilateral_noise, C.output_dir + name + '_noise_bilateral')
print 'Image %s: gaussian' % name
sigma = C.default_gaussian_sigma
gaussian = gaussian_filter(image, sigma)
save_image_png(gaussian, C.output_dir + name + '_gaussian')
print 'Image %s: gaussian noise' % name
sigma = C.default_gaussian_sigma_noise
gaussian_noise = gaussian_filter(image_with_default_noise, sigma)
save_image_png(gaussian_noise, C.output_dir + name + '_noise_gaussian')
print 'Image %s: Otsu' % name
otsu, t = image_processing.threshold_otsu(image, C.default_number_of_bins)
save_image_png(otsu, C.output_dir + name + '_otsu')
print 'Image %s: Otsu noise' % name
otsu_noise, t = image_processing.threshold_otsu(image_with_default_noise, C.default_number_of_bins)
save_image_png(otsu_noise, C.output_dir + name + '_otsu_noise')
print 'Image %s: Otsu noise bilateral' % name
otsu_bilateral_noise, t = image_processing.threshold_otsu(bilateral_noise, C.default_number_of_bins)
save_image_png(otsu_bilateral_noise, C.output_dir + name + '_otsu_noise_bilateral')
print 'Image %s: Otsu noise gaussian' % name
otsu_gaussian_noise, t = image_processing.threshold_otsu(gaussian_noise, C.default_number_of_bins)
save_image_png(otsu_gaussian_noise, C.output_dir + name + '_otsu_noise_gaussian')
# reads an image with a given extension from C.input_dir<|fim▁hole|> data = load(C.input_dir+ filename + '.'+extension)
image = data.get_data()
else:
image = imread(C.input_dir + filename + '.'+extension)
s = image.shape
if len(s) > 2 and s[2] in [3, 4]: # "detect" rgb images
image = img.rgb2gray(image)
#image= img.rgb2gray_color_preserving(image)
image=rescale_grayscale_image(image)
return image
def main():
set_printoptions(precision=4, linewidth=150, suppress=True) # print values with less precision
params = {'legend.fontsize': 8,
'legend.linewidth': 1,
'legend.labelspacing':0.2,
'legend.loc':2}
rcParams.update(params) # change global plotting parameters
if not os.path.exists(C.output_dir_plots):# generate output_dir_plots
os.makedirs(C.output_dir_plots)
if not os.path.exists(C.output_dir):# generate output_dir
os.makedirs(C.output_dir)
#image sets
synthetic_images=[('borders','png'),
('borders_contrast','png'),
('gradient1','png'),
('gradient2','png'),]
mri_images=[('mri1','png'),
('mri2','png'),
('mri3','png')]
mri_image=[('T1w_acpc_dc_restore_1.25.nii','gz')]
# select image set
#images=synthetic_images+mri_images
#images = mri_image
images=synthetic_images
# for each image, read it, and generate the resulting plots
# for each image, read it, and generate the resulting images
for filename,extension in images:
image = read_image(filename,extension)
#generate_result_images(image,filename)
generate_plots(image, filename)
if __name__ == '__main__':
main()<|fim▁end|> | def read_image(filename,extension):
if (extension=='gz'): |
<|file_name|>factory.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.<|fim▁hole|>
// This file was automatically generated by informer-gen with arguments: --input-dirs=[k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/apis/apiregistration,k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/apis/apiregistration/v1alpha1] --internal-clientset-package=k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/clientset_generated/internalclientset --listers-package=k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/listers --output-package=k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/informers --versioned-clientset-package=k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/clientset_generated/clientset
package informers
import (
clientset "k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/clientset_generated/clientset"
internalclientset "k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/clientset_generated/internalclientset"
apiregistration "k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/informers/apiregistration"
internalinterfaces "k8s.io/kubernetes/cmd/kubernetes-discovery/pkg/client/informers/internalinterfaces"
cache "k8s.io/kubernetes/pkg/client/cache"
runtime "k8s.io/kubernetes/pkg/runtime"
reflect "reflect"
sync "sync"
time "time"
)
type sharedInformerFactory struct {
internalClient internalclientset.Interface
versionedClient clientset.Interface
lock sync.Mutex
defaultResync time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory
func NewSharedInformerFactory(internalClient internalclientset.Interface, versionedClient clientset.Interface, defaultResync time.Duration) SharedInformerFactory {
return &sharedInformerFactory{
internalClient: internalClient,
versionedClient: versionedClient,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
}
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InternalInformerFor(obj runtime.Object, newFunc internalinterfaces.NewInternalInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = newFunc(f.internalClient, f.defaultResync)
f.informers[informerType] = informer
return informer
}
// VersionedInformerFor returns the SharedIndexInformer for obj using a
// versioned client.
func (f *sharedInformerFactory) VersionedInformerFor(obj runtime.Object, newFunc internalinterfaces.NewVersionedInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = newFunc(f.versionedClient, f.defaultResync)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
Apiregistration() apiregistration.Interface
}
func (f *sharedInformerFactory) Apiregistration() apiregistration.Interface {
return apiregistration.New(f)
}<|fim▁end|> | */ |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Contact: Jacob Schreiber ( [email protected] )
"""
For detailed documentation and examples, see the README.
"""
# Make our dependencies explicit so compiled Cython code won't segfault trying
# to load them.
import networkx, matplotlib.pyplot, scipy
import numpy as np
import os
import pyximport
# Adapted from Cython docs https://github.com/cython/cython/wiki/
# InstallingOnWindows#mingw--numpy--pyximport-at-runtime
if os.name == 'nt':
if 'CPATH' in os.environ:
os.environ['CPATH'] = os.environ['CPATH'] + np.get_include()
else:
os.environ['CPATH'] = np.get_include()
# XXX: we're assuming that MinGW is installed in C:\MinGW (default)
if 'PATH' in os.environ:
os.environ['PATH'] = os.environ['PATH'] + ';C:\MinGW\bin'
else:
os.environ['PATH'] = 'C:\MinGW\bin'
mingw_setup_args = { 'options': { 'build_ext': { 'compiler': 'mingw32' } } }
pyximport.install(setup_args=mingw_setup_args)
elif os.name == 'posix':
if 'CFLAGS' in os.environ:
os.environ['CFLAGS'] = os.environ['CFLAGS'] + ' -I' + np.get_include()
else:
os.environ['CFLAGS'] = ' -I' + np.get_include()
pyximport.install()
from yabn import *
__version__ = '0.1.0'<|fim▁end|> | # __init__.py: Yet Another Bayes Net library |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | default_app_config = 'users.apps.UserConfig' |
<|file_name|>LoginPage.js<|end_file_name|><|fim▁begin|>/**
* Created by uzysjung on 2016. 10. 21..
*/
import React, { PropTypes,Component } from 'react';
import Box from '../../components/widget/Box'
import { Link, browserHistory } from 'react-router'
import superagent from 'superagent';
import { Form , FormGroup, Col, Button, FormControl, Checkbox, ControlLabel , PageHeader, Alert } from 'react-bootstrap'
const styleLogin = {
panel : {
maxWidth : 600,
position : 'absolute',
top : '50%',
left : '50%',
transform : 'translate(-50%,-50%)'
},
header : {
maxHeight : 40,
bottomMargin : 100,
borderBottom : '1px solid #bababa'
}
};
class LoginPage extends React.Component {
constructor(props) {
super(props);
this.state = {
email : '',
password : ''
}
}
componentWillMount() {
const { authenticated, replace, redirect } = this.props;
if (authenticated) {
replace(redirect)
}
}
componentDidMount() {
}
handleFormSubmit = (e) => {
e.preventDefault();
const { email, password } = this.state;
setTimeout(() => this.setState({error: false}), 3000);
if ( !email || email.length < 1) {
this.setState({error: 'Insert Email address'});
return;
}
if (!/^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}$/i.test(email)) {
this.setState({error: 'Please check whether this email is valid'});
return;
}
if (!password) {
this.setState({error: 'Insert Password'});
return;
}
if ( password && password.length < 5 ) {
this.setState({error: 'Password must be longer than 5 characters'});
return;
}
superagent.post('/api/login').send({login_email: email, login_pw: password}).end((err, result) => {
if (!err) {
localStorage.setItem('jwt', result.body.token);
browserHistory.push('/');
} else {
this.setState({error: 'Login email/password incorrect :('});
}
});
};
handleForChange = (e) => {
console.log('e.target.id',e.target.id);
switch(e.target.id) {
case 'formHorizontalEmail' :
this.setState( { email : e.target.value } );
break;
case 'formHorizontalPassword' :
this.setState( { password : e.target.value } );
break;<|fim▁hole|>
renderAlert() {
if (this.state.error) {
return (
<Alert bsStyle="danger">
{this.state.error}
</Alert>
)
}
return null;
}
render() {
return (
<div style={styleLogin.panel}>
<PageHeader style={styleLogin.header}>Weapon Management System</PageHeader>
<Box
title="Login"
status="info"
solid
>
<Form onSubmit={this.handleFormSubmit} horizontal>
<FormGroup controlId="formHorizontalEmail">
<Col componentClass={ControlLabel} sm={2}>
Email
</Col>
<Col sm={10}>
<FormControl type="email" placeholder="Email" value={this.state.email} onChange={this.handleForChange} />
</Col>
</FormGroup>
<FormGroup controlId="formHorizontalPassword">
<Col componentClass={ControlLabel} sm={2}>
Password
</Col>
<Col sm={10}>
<FormControl type="password" placeholder="Password" value={this.state.password} onChange={this.handleForChange} />
</Col>
</FormGroup>
<FormGroup>
<Col smOffset={2} sm={10}>
<Checkbox>Remember me</Checkbox>
</Col>
</FormGroup>
{this.renderAlert()}
<FormGroup>
<Col smOffset={2} sm={10}>
<Button className="btn btn-success" type="submit">
Sign in
</Button>
</Col>
</FormGroup>
</Form>
</Box>
</div>
);
}
}
export default LoginPage;<|fim▁end|> | }
}; |
<|file_name|>file.go<|end_file_name|><|fim▁begin|>package command<|fim▁hole|> "strings"
"github.com/bitrise-io/go-utils/pathutil"
)
// CopyFile ...
func CopyFile(src, dst string) error {
// replace with a pure Go implementation?
// Golang proposal was: https://go-review.googlesource.com/#/c/1591/5/src/io/ioutil/ioutil.go
isDir, err := pathutil.IsDirExists(src)
if err != nil {
return err
}
if isDir {
return errors.New("Source is a directory: " + src)
}
args := []string{src, dst}
return RunCommand("rsync", args...)
}
// CopyDir ...
func CopyDir(src, dst string, isOnlyContent bool) error {
if isOnlyContent && !strings.HasSuffix(src, "/") {
src = src + "/"
}
args := []string{"-ar", src, dst}
return RunCommand("rsync", args...)
}
// RemoveDir ...
func RemoveDir(dirPth string) error {
if exist, err := pathutil.IsPathExists(dirPth); err != nil {
return err
} else if exist {
if err := os.RemoveAll(dirPth); err != nil {
return err
}
}
return nil
}
// RemoveFile ...
func RemoveFile(pth string) error {
if exist, err := pathutil.IsPathExists(pth); err != nil {
return err
} else if exist {
if err := os.Remove(pth); err != nil {
return err
}
}
return nil
}<|fim▁end|> |
import (
"errors"
"os" |
<|file_name|>genomemapper_wrapper.py<|end_file_name|><|fim▁begin|>#! /usr/bin/python
"""
Runs GenomeMapper on single-end or paired-end data.<|fim▁hole|>import optparse, os, sys, tempfile
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option('', '--threads', dest='threads', help='The number of threads to run')
parser.add_option('', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format')
parser.add_option('', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format')
parser.add_option('', '--output', dest='output', help='The output file')
parser.add_option('', '--paired', dest='paired', help='Whether the data is single- or paired-end')
parser.add_option('', '--genomeSource', dest='genomeSource', help='The type of reference provided')
parser.add_option('', '--ref', dest='ref', help='The reference genome to use or index')
parser.add_option('', '--indexSettings', dest='index_settings', help='Whether or not indexing options are to be set')
parser.add_option('', '--params', dest='params', help='Whether to use default or specified parameters')
parser.add_option('', '--seedlength', dest='seedlength', help='GenomeMapper Index Seed Length')
parser.add_option('', '--alignseedlength', dest='alignseedlength', help='GenomeMapper Alignment Seed Length')
parser.add_option('', '--format', dest='format', help='Output format (bed or shore)')
parser.add_option('', '--maxmismatches', dest='maxmismatches', help='Maximal number of mismatches')
parser.add_option('', '--maxgaps', dest='maxgaps', help='Maximal number of gaps')
parser.add_option('', '--maxedits', dest='maxedits', help='Maximal number of edit operations')
parser.add_option('', '--reportall', dest='reportall', help='Report all hits')
(options, args) = parser.parse_args()
# index if necessary
if options.genomeSource == 'history':
# set up commands
if options.index_settings =='index_pre_set':
indexing_cmds = ''
else:
try:
indexing_cmds = '%s ' % \
(('','-s %s'%options.seedlength)[options.seedlength!='None' and options.seedlength>=1])
except ValueError:
indexing_cmds = ''
# make temp directory for placement of indices and copy reference file there
tmp_dir = tempfile.gettempdir()
try:
os.system('cp %s %s' % (options.ref, tmp_dir))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(tmp_dir, os.path.split(options.ref)[1])
cmd1 = 'gmindex -v -i %s %s' % (options.ref, indexing_cmds)
try:
os.system(cmd1)
except Exception, erf:
stop_err('Error indexing reference sequence\n' + str(erf))
if options.params == 'pre_set':
aligning_cmds = '-v '
else:
try:
print options
aligning_cmds = '%s %s %s %s %s %s -v ' % \
(('','-f %s' % options.format)[options.format!='None'],
('','-a')[options.reportall!='None'],
('','-M %s' % options.maxmismatches)[options.maxmismatches!='None'],
('','-G %s' % options.maxgaps)[options.maxgaps!='None'],
('','-E %s' % options.maxedits)[options.maxedits!='None'],
('','-l %s' % options.alignseedlength)[options.alignseedlength!='None'])
except ValueError, erf:
stop_err('Something is wrong with the alignment parameters and the alignment could not be run\n' + str(erf))
# prepare actual aligning commands
if options.paired == 'paired':
print "Sorry, paired end alignments are not implemented yet"
return
#cmd2 = 'genomemapper %s %s -1 %s -2 %s > %s ' % (options.ref, options.input1, options.input2, options.output)
else:
cmd2 = 'genomemapper %s -i %s -q %s -o %s ' % (aligning_cmds, options.ref, options.input1, options.output)
# align
try:
print cmd2
os.system(cmd2)
except Exception, erf:
stop_err("Error aligning sequence\n" + str(erf))
if __name__=="__main__": __main__()<|fim▁end|> | """
|
<|file_name|>Cogs.py<|end_file_name|><|fim▁begin|>import os
from lib.base_plugin import BasePlugin
from lib.paths import SteamCloudPath, SteamGamesPath
class CogsPlugin(BasePlugin):
Name = "Cogs"
support_os = ["Windows"]<|fim▁hole|> def backup(self, _):
_.add_folder('Data', os.path.join(SteamCloudPath, '26500'), 'remote')
def restore(self, _):
_.restore_folder('Data', os.path.join(SteamCloudPath, '26500'), 'remote')
def detect(self):
if os.path.isdir(os.path.join(SteamGamesPath, 'cogs')):
return True
return False<|fim▁end|> | |
<|file_name|>server.rs<|end_file_name|><|fim▁begin|>#![allow(unstable)]
extern crate hyper;
extern crate test;
use test::Bencher;
use std::old_io::net::ip::Ipv4Addr;
use hyper::method::Method::Get;
use hyper::server::{Request, Response};
<|fim▁hole|>fn request(url: hyper::Url) {
let req = hyper::client::Request::new(Get, url).unwrap();
req.start().unwrap().send().unwrap().read_to_string().unwrap();
}
fn hyper_handle(_: Request, res: Response) {
let mut res = res.start().unwrap();
res.write_all(PHRASE).unwrap();
res.end().unwrap();
}
#[bench]
fn bench_hyper(b: &mut Bencher) {
let server = hyper::Server::http(Ipv4Addr(127, 0, 0, 1), 0);
let mut listener = server.listen(hyper_handle).unwrap();
let url = hyper::Url::parse(format!("http://{}", listener.socket).as_slice()).unwrap();
b.iter(|| request(url.clone()));
listener.close().unwrap();
}<|fim▁end|> | static PHRASE: &'static [u8] = b"Benchmarking hyper vs others!";
|
<|file_name|>SAPI_Locations.java<|end_file_name|><|fim▁begin|>/*
* This file is part of NucleusFramework for Bukkit, licensed under the MIT License (MIT).
*
* Copyright (c) JCThePants (www.jcwhatever.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.jcwhatever.nucleus.internal.managed.scripting.api;
import com.jcwhatever.nucleus.Nucleus;
import com.jcwhatever.nucleus.mixins.IDisposable;
import com.jcwhatever.nucleus.utils.PreCon;
import com.jcwhatever.nucleus.utils.coords.NamedLocation;
import org.bukkit.Location;
import java.util.Collection;
import javax.annotation.Nullable;
/**
* Sub script API for named locations that can be retrieved by scripts.
*/
public class SAPI_Locations implements IDisposable {
private boolean _isDisposed;
@Override
public boolean isDisposed() {
return _isDisposed;
}
@Override
public void dispose() {
_isDisposed = true;
}
/**
* Get a quest script location by name.
*
* @param name The name of the location.
*/
@Nullable
public Location get(String name) {
PreCon.notNullOrEmpty(name);
NamedLocation result = Nucleus.getScriptManager().getLocations().get(name);
if (result == null)
return null;
return result.toLocation();
}
/**
* Get all script location objects.
*/
public Collection<NamedLocation> getScriptLocations() {
return Nucleus.getScriptManager().getLocations().getAll();
}
/**
* Get all script location objects.
*
* @param output The output collection to put results into.
*
* @return The output collection.
*/<|fim▁hole|> }
}<|fim▁end|> | public <T extends Collection<NamedLocation>> T getScriptLocations(T output) {
PreCon.notNull(output);
return Nucleus.getScriptManager().getLocations().getAll(output); |
<|file_name|>token.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::BinOpToken::*;
pub use self::Nonterminal::*;
pub use self::DelimToken::*;
pub use self::IdentStyle::*;
pub use self::Lit::*;
pub use self::Token::*;
use ast;
use ext::mtwt;
use ptr::P;
use util::interner::{RcStr, StrInterner};
use util::interner;
use serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fmt;
use std::mem;
use std::path::BytesContainer;
use std::rc::Rc;
#[allow(non_camel_case_types)]
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum BinOpToken {
Plus,
Minus,
Star,
Slash,
Percent,
Caret,
And,
Or,
Shl,
Shr,
}
impl Copy for BinOpToken {}
/// A delimeter token
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum DelimToken {
/// A round parenthesis: `(` or `)`
Paren,
/// A square bracket: `[` or `]`
Bracket,
/// A curly brace: `{` or `}`
Brace,
}
impl Copy for DelimToken {}
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum IdentStyle {
/// `::` follows the identifier with no whitespace in-between.
ModName,
Plain,
}
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum Lit {
Byte(ast::Name),
Char(ast::Name),
Integer(ast::Name),
Float(ast::Name),
Str_(ast::Name),
StrRaw(ast::Name, uint), /* raw str delimited by n hash symbols */
Binary(ast::Name),
BinaryRaw(ast::Name, uint), /* raw binary str delimited by n hash symbols */
}
impl Lit {
pub fn short_name(&self) -> &'static str {
match *self {
Byte(_) => "byte",
Char(_) => "char",
Integer(_) => "integer",
Float(_) => "float",
Str_(_) | StrRaw(..) => "str",
Binary(_) | BinaryRaw(..) => "binary str"
}
}
}
impl Copy for Lit {}
impl Copy for IdentStyle {}
#[allow(non_camel_case_types)]
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum Token {
/* Expression-operator symbols. */
Eq,
Lt,
Le,
EqEq,
Ne,
Ge,
Gt,
AndAnd,
OrOr,
Not,
Tilde,
BinOp(BinOpToken),
BinOpEq(BinOpToken),
/* Structural symbols */
At,
Dot,
DotDot,
DotDotDot,
Comma,
Semi,
Colon,
ModSep,
RArrow,
LArrow,
FatArrow,
Pound,
Dollar,
Question,
/// An opening delimeter, eg. `{`
OpenDelim(DelimToken),
/// A closing delimeter, eg. `}`
CloseDelim(DelimToken),
/* Literals */
Literal(Lit, Option<ast::Name>),
/* Name components */
Ident(ast::Ident, IdentStyle),
Underscore,
Lifetime(ast::Ident),
/* For interpolation */
Interpolated(Nonterminal),
// Can be expanded into several tokens.
/// Doc comment
DocComment(ast::Name),
// In left-hand-sides of MBE macros:
/// Parse a nonterminal (name to bind, name of NT, styles of their idents)
MatchNt(ast::Ident, ast::Ident, IdentStyle, IdentStyle),
// In right-hand-sides of MBE macros:
/// A syntactic variable that will be filled in by macro expansion.
SubstNt(ast::Ident, IdentStyle),
// Junk. These carry no data because we don't really care about the data
// they *would* carry, and don't really want to allocate a new ident for
// them. Instead, users could extract that from the associated span.
/// Whitespace
Whitespace,
/// Comment
Comment,
Shebang(ast::Name),
Eof,
}
impl Token {
/// Returns `true` if the token can appear at the start of an expression.
pub fn can_begin_expr(&self) -> bool {
match *self {
OpenDelim(_) => true,
Ident(_, _) => true,
Underscore => true,
Tilde => true,
Literal(_, _) => true,
Pound => true,
At => true,
Not => true,
BinOp(Minus) => true,
BinOp(Star) => true,
BinOp(And) => true,
BinOp(Or) => true, // in lambda syntax
OrOr => true, // in lambda syntax
ModSep => true,
Interpolated(NtExpr(..)) => true,
Interpolated(NtIdent(..)) => true,
Interpolated(NtBlock(..)) => true,
Interpolated(NtPath(..)) => true,
_ => false,
}
}
/// Returns `true` if the token is any literal
pub fn is_lit(&self) -> bool {
match *self {
Literal(_, _) => true,
_ => false,
}
}
/// Returns `true` if the token is an identifier.
pub fn is_ident(&self) -> bool {
match *self {
Ident(_, _) => true,
_ => false,
}
}
/// Returns `true` if the token is an interpolated path.
pub fn is_path(&self) -> bool {
match *self {
Interpolated(NtPath(..)) => true,
_ => false,
}
}
/// Returns `true` if the token is a path that is not followed by a `::`
/// token.
#[allow(non_upper_case_globals)]
pub fn is_plain_ident(&self) -> bool {
match *self {
Ident(_, Plain) => true,
_ => false,
}
}
/// Returns `true` if the token is a lifetime.
pub fn is_lifetime(&self) -> bool {
match *self {
Lifetime(..) => true,
_ => false,
}
}
/// Returns `true` if the token is either the `mut` or `const` keyword.
pub fn is_mutability(&self) -> bool {
self.is_keyword(keywords::Mut) ||
self.is_keyword(keywords::Const)
}
/// Maps a token to its corresponding binary operator.
pub fn to_binop(&self) -> Option<ast::BinOp> {
match *self {
BinOp(Star) => Some(ast::BiMul),
BinOp(Slash) => Some(ast::BiDiv),
BinOp(Percent) => Some(ast::BiRem),
BinOp(Plus) => Some(ast::BiAdd),
BinOp(Minus) => Some(ast::BiSub),
BinOp(Shl) => Some(ast::BiShl),
BinOp(Shr) => Some(ast::BiShr),
BinOp(And) => Some(ast::BiBitAnd),
BinOp(Caret) => Some(ast::BiBitXor),
BinOp(Or) => Some(ast::BiBitOr),
Lt => Some(ast::BiLt),
Le => Some(ast::BiLe),
Ge => Some(ast::BiGe),
Gt => Some(ast::BiGt),
EqEq => Some(ast::BiEq),
Ne => Some(ast::BiNe),
AndAnd => Some(ast::BiAnd),
OrOr => Some(ast::BiOr),
_ => None,
}
}
/// Returns `true` if the token is a given keyword, `kw`.
#[allow(non_upper_case_globals)]
pub fn is_keyword(&self, kw: keywords::Keyword) -> bool {
match *self {
Ident(sid, Plain) => kw.to_name() == sid.name,
_ => false,
}
}
/// Returns `true` if the token is either a special identifier, or a strict
/// or reserved keyword.
#[allow(non_upper_case_globals)]
pub fn is_any_keyword(&self) -> bool {
match *self {
Ident(sid, Plain) => {
let n = sid.name;
n == SELF_KEYWORD_NAME
|| n == STATIC_KEYWORD_NAME
|| n == SUPER_KEYWORD_NAME
|| STRICT_KEYWORD_START <= n
&& n <= RESERVED_KEYWORD_FINAL
},
_ => false
}
}
/// Returns `true` if the token may not appear as an identifier.
#[allow(non_upper_case_globals)]
pub fn is_strict_keyword(&self) -> bool {
match *self {
Ident(sid, Plain) => {
let n = sid.name;
n == SELF_KEYWORD_NAME
|| n == STATIC_KEYWORD_NAME
|| n == SUPER_KEYWORD_NAME
|| STRICT_KEYWORD_START <= n
&& n <= STRICT_KEYWORD_FINAL
},
Ident(sid, ModName) => {
let n = sid.name;
n != SELF_KEYWORD_NAME
&& n != SUPER_KEYWORD_NAME
&& STRICT_KEYWORD_START <= n
&& n <= STRICT_KEYWORD_FINAL
}
_ => false,
}
}
/// Returns `true` if the token is a keyword that has been reserved for
/// possible future use.
#[allow(non_upper_case_globals)]
pub fn is_reserved_keyword(&self) -> bool {
match *self {
Ident(sid, Plain) => {
let n = sid.name;
RESERVED_KEYWORD_START <= n
&& n <= RESERVED_KEYWORD_FINAL
},
_ => false,
}
}
/// Hygienic identifier equality comparison.
///
/// See `styntax::ext::mtwt`.
pub fn mtwt_eq(&self, other : &Token) -> bool {
match (self, other) {
(&Ident(id1,_), &Ident(id2,_)) | (&Lifetime(id1), &Lifetime(id2)) =>
mtwt::resolve(id1) == mtwt::resolve(id2),
_ => *self == *other
}
}
}
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash)]
/// For interpolation during macro expansion.
pub enum Nonterminal {
NtItem(P<ast::Item>),
NtBlock(P<ast::Block>),
NtStmt(P<ast::Stmt>),
NtPat(P<ast::Pat>),
NtExpr(P<ast::Expr>),
NtTy(P<ast::Ty>),
NtIdent(Box<ast::Ident>, IdentStyle),
/// Stuff inside brackets for attributes
NtMeta(P<ast::MetaItem>),
NtPath(Box<ast::Path>),
NtTT(P<ast::TokenTree>), // needs P'ed to break a circularity
}
impl fmt::Show for Nonterminal {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
NtItem(..) => f.pad("NtItem(..)"),
NtBlock(..) => f.pad("NtBlock(..)"),
NtStmt(..) => f.pad("NtStmt(..)"),
NtPat(..) => f.pad("NtPat(..)"),
NtExpr(..) => f.pad("NtExpr(..)"),
NtTy(..) => f.pad("NtTy(..)"),
NtIdent(..) => f.pad("NtIdent(..)"),
NtMeta(..) => f.pad("NtMeta(..)"),
NtPath(..) => f.pad("NtPath(..)"),
NtTT(..) => f.pad("NtTT(..)"),
}
}
}
// Get the first "argument"
macro_rules! first {
( $first:expr, $( $remainder:expr, )* ) => ( $first )
}
// Get the last "argument" (has to be done recursively to avoid phoney local ambiguity error)
macro_rules! last {
( $first:expr, $( $remainder:expr, )+ ) => ( last!( $( $remainder, )+ ) );
( $first:expr, ) => ( $first )
}
// In this macro, there is the requirement that the name (the number) must be monotonically
// increasing by one in the special identifiers, starting at 0; the same holds for the keywords,
// except starting from the next number instead of zero, and with the additional exception that
// special identifiers are *also* allowed (they are deduplicated in the important place, the
// interner), an exception which is demonstrated by "static" and "self".
macro_rules! declare_special_idents_and_keywords {(
// So now, in these rules, why is each definition parenthesised?
// Answer: otherwise we get a spurious local ambiguity bug on the "}"
pub mod special_idents {
$( ($si_name:expr, $si_static:ident, $si_str:expr); )*
}
pub mod keywords {
'strict:
$( ($sk_name:expr, $sk_variant:ident, $sk_str:expr); )*
'reserved:
$( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )*
}
) => {
static STRICT_KEYWORD_START: ast::Name = first!($( ast::Name($sk_name), )*);
static STRICT_KEYWORD_FINAL: ast::Name = last!($( ast::Name($sk_name), )*);
static RESERVED_KEYWORD_START: ast::Name = first!($( ast::Name($rk_name), )*);
static RESERVED_KEYWORD_FINAL: ast::Name = last!($( ast::Name($rk_name), )*);
pub mod special_idents {
use ast;
$(
#[allow(non_upper_case_globals)]
pub const $si_static: ast::Ident = ast::Ident {
name: ast::Name($si_name),
ctxt: 0,
};
)*
}
pub mod special_names {
use ast;
$(
#[allow(non_upper_case_globals)]
pub const $si_static: ast::Name = ast::Name($si_name);
)*
}
/// All the valid words that have meaning in the Rust language.
///
/// Rust keywords are either 'strict' or 'reserved'. Strict keywords may not
/// appear as identifiers at all. Reserved keywords are not used anywhere in
/// the language and may not appear as identifiers.
pub mod keywords {
pub use self::Keyword::*;
use ast;
pub enum Keyword {
$( $sk_variant, )*
$( $rk_variant, )*
}
impl Copy for Keyword {}
impl Keyword {
pub fn to_name(&self) -> ast::Name {
match *self {
$( $sk_variant => ast::Name($sk_name), )*
$( $rk_variant => ast::Name($rk_name), )*
}
}
}
}
fn mk_fresh_ident_interner() -> IdentInterner {
// The indices here must correspond to the numbers in
// special_idents, in Keyword to_name(), and in static
// constants below.
let mut init_vec = Vec::new();
$(init_vec.push($si_str);)*
$(init_vec.push($sk_str);)*
$(init_vec.push($rk_str);)*
interner::StrInterner::prefill(init_vec.as_slice())
}
}}
// If the special idents get renumbered, remember to modify these two as appropriate
pub const SELF_KEYWORD_NAME: ast::Name = ast::Name(SELF_KEYWORD_NAME_NUM);
const STATIC_KEYWORD_NAME: ast::Name = ast::Name(STATIC_KEYWORD_NAME_NUM);
const SUPER_KEYWORD_NAME: ast::Name = ast::Name(SUPER_KEYWORD_NAME_NUM);
pub const SELF_KEYWORD_NAME_NUM: u32 = 1;
const STATIC_KEYWORD_NAME_NUM: u32 = 2;
const SUPER_KEYWORD_NAME_NUM: u32 = 3;
// NB: leaving holes in the ident table is bad! a different ident will get
// interned with the id from the hole, but it will be between the min and max
// of the reserved words, and thus tagged as "reserved".
declare_special_idents_and_keywords! {
pub mod special_idents {
// These ones are statics
(0, invalid, "");
(super::SELF_KEYWORD_NAME_NUM, self_, "self");
(super::STATIC_KEYWORD_NAME_NUM, statik, "static");
(super::SUPER_KEYWORD_NAME_NUM, super_, "super");
(4, static_lifetime, "'static");
// for matcher NTs
(5, tt, "tt");
(6, matchers, "matchers");
// outside of libsyntax
(7, clownshoe_abi, "__rust_abi");
(8, opaque, "<opaque>");
(9, unnamed_field, "<unnamed_field>");
(10, type_self, "Self");
(11, prelude_import, "prelude_import");
}
pub mod keywords {
// These ones are variants of the Keyword enum
'strict:
(12, As, "as");
(13, Break, "break");
(14, Crate, "crate");
(15, Else, "else");
(16, Enum, "enum");
(17, Extern, "extern");
(18, False, "false");
(19, Fn, "fn");
(20, For, "for");
(21, If, "if");
(22, Impl, "impl");
(23, In, "in");
(24, Let, "let");
(25, Loop, "loop");
(26, Match, "match");
(27, Mod, "mod");
(28, Move, "move");
(29, Mut, "mut");
(30, Pub, "pub");
(31, Ref, "ref");
(32, Return, "return");
// Static and Self are also special idents (prefill de-dupes)
(super::STATIC_KEYWORD_NAME_NUM, Static, "static");
(super::SELF_KEYWORD_NAME_NUM, Self, "self");
(33, Struct, "struct");
(super::SUPER_KEYWORD_NAME_NUM, Super, "super");
(34, True, "true");
(35, Trait, "trait");
(36, Type, "type");
(37, Unsafe, "unsafe");
(38, Use, "use");
(39, Virtual, "virtual");
(40, While, "while");
(41, Continue, "continue");
(42, Proc, "proc");
(43, Box, "box");
(44, Const, "const");
(45, Where, "where");
'reserved:
(46, Alignof, "alignof");
(47, Be, "be");
(48, Offsetof, "offsetof");
(49, Priv, "priv");
(50, Pure, "pure");
(51, Sizeof, "sizeof");
(52, Typeof, "typeof");
(53, Unsized, "unsized");
(54, Yield, "yield");
(55, Do, "do");
(56, Abstract, "abstract");
(57, Final, "final");
(58, Override, "override");
}
}
// looks like we can get rid of this completely...
pub type IdentInterner = StrInterner;
// if an interner exists in TLS, return it. Otherwise, prepare a
// fresh one.
// FIXME(eddyb) #8726 This should probably use a task-local reference.
pub fn get_ident_interner() -> Rc<IdentInterner> {
thread_local!(static KEY: Rc<::parse::token::IdentInterner> = {
Rc::new(mk_fresh_ident_interner())
})
KEY.with(|k| k.clone())
}
/// Reset the ident interner to its initial state.
pub fn reset_ident_interner() {
let interner = get_ident_interner();
interner.reset(mk_fresh_ident_interner());
}
/// Represents a string stored in the task-local interner. Because the
/// interner lives for the life of the task, this can be safely treated as an
/// immortal string, as long as it never crosses between tasks.
///
/// FIXME(pcwalton): You must be careful about what you do in the destructors
/// of objects stored in TLS, because they may run after the interner is
/// destroyed. In particular, they must not access string contents. This can
/// be fixed in the future by just leaking all strings until task death
/// somehow.
#[deriving(Clone, PartialEq, Hash, PartialOrd, Eq, Ord)]<|fim▁hole|>
impl InternedString {
#[inline]
pub fn new(string: &'static str) -> InternedString {
InternedString {
string: RcStr::new(string),
}
}
#[inline]
fn new_from_rc_str(string: RcStr) -> InternedString {
InternedString {
string: string,
}
}
#[inline]
pub fn get<'a>(&'a self) -> &'a str {
self.string.as_slice()
}
}
impl BytesContainer for InternedString {
fn container_as_bytes<'a>(&'a self) -> &'a [u8] {
// FIXME #12938: This is a workaround for the incorrect signature
// of `BytesContainer`, which is itself a workaround for the lack of
// DST.
unsafe {
let this = self.get();
mem::transmute::<&[u8],&[u8]>(this.container_as_bytes())
}
}
}
impl fmt::Show for InternedString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.string.as_slice())
}
}
#[allow(deprecated)]
impl<'a> Equiv<&'a str> for InternedString {
fn equiv(&self, other: & &'a str) -> bool {
(*other) == self.string.as_slice()
}
}
impl<'a> PartialEq<&'a str> for InternedString {
#[inline(always)]
fn eq(&self, other: & &'a str) -> bool {
PartialEq::eq(self.string.as_slice(), *other)
}
#[inline(always)]
fn ne(&self, other: & &'a str) -> bool {
PartialEq::ne(self.string.as_slice(), *other)
}
}
impl<'a> PartialEq<InternedString > for &'a str {
#[inline(always)]
fn eq(&self, other: &InternedString) -> bool {
PartialEq::eq(*self, other.string.as_slice())
}
#[inline(always)]
fn ne(&self, other: &InternedString) -> bool {
PartialEq::ne(*self, other.string.as_slice())
}
}
impl<D:Decoder<E>, E> Decodable<D, E> for InternedString {
fn decode(d: &mut D) -> Result<InternedString, E> {
Ok(get_name(get_ident_interner().intern(
try!(d.read_str()).as_slice())))
}
}
impl<S:Encoder<E>, E> Encodable<S, E> for InternedString {
fn encode(&self, s: &mut S) -> Result<(), E> {
s.emit_str(self.string.as_slice())
}
}
/// Returns the string contents of a name, using the task-local interner.
#[inline]
pub fn get_name(name: ast::Name) -> InternedString {
let interner = get_ident_interner();
InternedString::new_from_rc_str(interner.get(name))
}
/// Returns the string contents of an identifier, using the task-local
/// interner.
#[inline]
pub fn get_ident(ident: ast::Ident) -> InternedString {
get_name(ident.name)
}
/// Interns and returns the string contents of an identifier, using the
/// task-local interner.
#[inline]
pub fn intern_and_get_ident(s: &str) -> InternedString {
get_name(intern(s))
}
/// Maps a string to its interned representation.
#[inline]
pub fn intern(s: &str) -> ast::Name {
get_ident_interner().intern(s)
}
/// gensym's a new uint, using the current interner.
#[inline]
pub fn gensym(s: &str) -> ast::Name {
get_ident_interner().gensym(s)
}
/// Maps a string to an identifier with an empty syntax context.
#[inline]
pub fn str_to_ident(s: &str) -> ast::Ident {
ast::Ident::new(intern(s))
}
/// Maps a string to a gensym'ed identifier.
#[inline]
pub fn gensym_ident(s: &str) -> ast::Ident {
ast::Ident::new(gensym(s))
}
// create a fresh name that maps to the same string as the old one.
// note that this guarantees that str_ptr_eq(ident_to_string(src),interner_get(fresh_name(src)));
// that is, that the new name and the old one are connected to ptr_eq strings.
pub fn fresh_name(src: &ast::Ident) -> ast::Name {
let interner = get_ident_interner();
interner.gensym_copy(src.name)
// following: debug version. Could work in final except that it's incompatible with
// good error messages and uses of struct names in ambiguous could-be-binding
// locations. Also definitely destroys the guarantee given above about ptr_eq.
/*let num = rand::task_rng().gen_uint_range(0,0xffff);
gensym(format!("{}_{}",ident_to_string(src),num))*/
}
// create a fresh mark.
pub fn fresh_mark() -> ast::Mrk {
gensym("mark").uint() as u32
}
#[cfg(test)]
mod test {
use super::*;
use ast;
use ext::mtwt;
fn mark_ident(id : ast::Ident, m : ast::Mrk) -> ast::Ident {
ast::Ident { name: id.name, ctxt:mtwt::apply_mark(m, id.ctxt) }
}
#[test] fn mtwt_token_eq_test() {
assert!(Gt.mtwt_eq(&Gt));
let a = str_to_ident("bac");
let a1 = mark_ident(a,92);
assert!(Ident(a, ModName).mtwt_eq(&Ident(a1, Plain)));
}
}<|fim▁end|> | pub struct InternedString {
string: RcStr,
} |
<|file_name|>textdecoder.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::TextDecoderBinding;
use dom::bindings::codegen::Bindings::TextDecoderBinding::TextDecoderMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::str::USVString;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::Encoding;
use encoding::label::encoding_from_whatwg_label;
use encoding::types::{EncodingRef, DecoderTrap};
use js::jsapi::JS_GetObjectAsArrayBufferView;
use js::jsapi::{JSContext, JSObject};
use std::borrow::ToOwned;
use std::ptr;
use std::slice;
#[dom_struct]
pub struct TextDecoder {
reflector_: Reflector,
#[ignore_heap_size_of = "Defined in rust-encoding"]
encoding: EncodingRef,
fatal: bool,
}
impl TextDecoder {
fn new_inherited(encoding: EncodingRef, fatal: bool) -> TextDecoder {
TextDecoder {
reflector_: Reflector::new(),
encoding: encoding,
fatal: fatal,
}
}
fn make_range_error() -> Fallible<Root<TextDecoder>> {
Err(Error::Range("The given encoding is not supported.".to_owned()))
}
pub fn new(global: GlobalRef, encoding: EncodingRef, fatal: bool) -> Root<TextDecoder> {
reflect_dom_object(box TextDecoder::new_inherited(encoding, fatal),
global,
TextDecoderBinding::Wrap)
}
/// https://encoding.spec.whatwg.org/#dom-textdecoder
pub fn Constructor(global: GlobalRef,
label: DOMString,
options: &TextDecoderBinding::TextDecoderOptions)
-> Fallible<Root<TextDecoder>> {
let encoding = match encoding_from_whatwg_label(&label) {
None => return TextDecoder::make_range_error(),
Some(enc) => enc
};
// The rust-encoding crate has WHATWG compatibility, so we are
// guaranteed to have a whatwg_name because we successfully got
// the encoding from encoding_from_whatwg_label.
// Use match + panic! instead of unwrap for better error message
match encoding.whatwg_name() {
None => panic!("Label {} fits valid encoding without valid name", label),
Some("replacement") => return TextDecoder::make_range_error(),
_ => ()
};
Ok(TextDecoder::new(global, encoding, options.fatal))
}
}
impl TextDecoderMethods for TextDecoder {
// https://encoding.spec.whatwg.org/#dom-textdecoder-encoding
fn Encoding(&self) -> DOMString {
self.encoding.whatwg_name().unwrap().to_owned()
}
// https://encoding.spec.whatwg.org/#dom-textdecoder-fatal
fn Fatal(&self) -> bool {
self.fatal
}
#[allow(unsafe_code)]
// https://encoding.spec.whatwg.org/#dom-textdecoder-decode
fn Decode(&self, _cx: *mut JSContext, input: Option<*mut JSObject>)
-> Fallible<USVString> {
let input = match input {
Some(input) => input,
None => return Ok(USVString("".to_owned())),
};
let mut length = 0;
let mut data = ptr::null_mut();
if unsafe { JS_GetObjectAsArrayBufferView(input, &mut length, &mut data).is_null() } {
return Err(Error::Type("Argument to TextDecoder.decode is not an ArrayBufferView".to_owned()));
}
let buffer = unsafe {
slice::from_raw_parts(data as *const _, length as usize)
};
let trap = if self.fatal {<|fim▁hole|> DecoderTrap::Replace
};
match self.encoding.decode(buffer, trap) {
Ok(s) => Ok(USVString(s)),
Err(_) => Err(Error::Type("Decoding failed".to_owned())),
}
}
}<|fim▁end|> | DecoderTrap::Strict
} else { |
<|file_name|>mutability-inherits-through-fixed-length-vec.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass<|fim▁hole|> let mut ints = [0; 32];
ints[0] += 1;
assert_eq!(ints[0], 1);
}
fn test2() {
let mut ints = [0; 32];
for i in &mut ints { *i += 22; }
for i in &ints { assert_eq!(*i, 22); }
}
pub fn main() {
test1();
test2();
}<|fim▁end|> |
fn test1() { |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an<|fim▁hole|>// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Contains record-based API for reading Parquet files.
mod api;
pub mod reader;
mod triplet;
pub use self::api::{
List, ListAccessor, Map, MapAccessor, Row, RowAccessor, RowFormatter,
};<|fim▁end|> | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
<|file_name|>example.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>pub fn after(start: DateTime) -> DateTime {
start + Duration::seconds(1_000_000_000)
}<|fim▁end|> | use time::{Duration, PrimitiveDateTime as DateTime};
|
<|file_name|>htmldialogelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLDialogElementBinding;
use dom::bindings::codegen::Bindings::HTMLDialogElementBinding::HTMLDialogElementMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::Element;
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::node::{Node, window_from_node};
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLDialogElement {
htmlelement: HTMLElement,
return_value: DOMRefCell<DOMString>,
}
impl HTMLDialogElement {
fn new_inherited(local_name: LocalName,
prefix: Option<Prefix>,
document: &Document) -> HTMLDialogElement {
HTMLDialogElement {
htmlelement:
HTMLElement::new_inherited(local_name, prefix, document),
return_value: DOMRefCell::new(DOMString::new()),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<Prefix>,
document: &Document) -> Root<HTMLDialogElement> {
Node::reflect_node(box HTMLDialogElement::new_inherited(local_name, prefix, document),
document,
HTMLDialogElementBinding::Wrap)
}
}
impl HTMLDialogElementMethods for HTMLDialogElement {
// https://html.spec.whatwg.org/multipage/#dom-dialog-open
make_bool_getter!(Open, "open");
<|fim▁hole|>
// https://html.spec.whatwg.org/multipage/#dom-dialog-returnvalue
fn ReturnValue(&self) -> DOMString {
let return_value = self.return_value.borrow();
return_value.clone()
}
// https://html.spec.whatwg.org/multipage/#dom-dialog-returnvalue
fn SetReturnValue(&self, return_value: DOMString) {
*self.return_value.borrow_mut() = return_value;
}
// https://html.spec.whatwg.org/multipage/#dom-dialog-close
fn Close(&self, return_value: Option<DOMString>) {
let element = self.upcast::<Element>();
let target = self.upcast::<EventTarget>();
let win = window_from_node(self);
// Step 1 & 2
if element.remove_attribute(&ns!(), &local_name!("open")).is_none() {
return;
}
// Step 3
if let Some(new_value) = return_value {
*self.return_value.borrow_mut() = new_value;
}
// TODO: Step 4 implement pending dialog stack removal
// Step 5
win.dom_manipulation_task_source().queue_simple_event(target, atom!("close"), &win);
}
}<|fim▁end|> | // https://html.spec.whatwg.org/multipage/#dom-dialog-open
make_bool_setter!(SetOpen, "open"); |
<|file_name|>material.rs<|end_file_name|><|fim▁begin|>//many more properties
//http://en.wikipedia.org/wiki/List_of_materials_properties
pub struct Material{
opacity:u8,<|fim▁hole|> density:u8,//specific weight
specularity:u8,
refraction:u8,
hardness:u8,
}
impl Material{
pub fn new(&self, opacity:u8, specularity:u8, density:u8, refraction:u8, hardness:u8)->{
Material{opacity:opacity, denisty:density, specularity:specularity, refraction:u8, hardness:u8}
}
}<|fim▁end|> | |
<|file_name|>else-if-brace-style-closing-next-line.rs<|end_file_name|><|fim▁begin|>// rustfmt-single_line_if_else_max_width: 0
// rustfmt-else_if_brace_style: ClosingNextLine
fn main() {
if false
{
();
();
}
if false // lone if comment
{
();
();
}
let a =
if 0 > 1 {
unreachable!()
}
else<|fim▁hole|>
if true
{
();
} else if false {
();
();
}
else {
();
();
();
}
if true // else-if-chain if comment
{
();
}
else if false // else-if-chain else-if comment
{
();
();
} else // else-if-chain else comment
{
();
();
();
}
}<|fim▁end|> | {
0x0
}; |
<|file_name|>toString.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>import { isString, isNumber } from '../is';
/**
* @param {any} value
* @returns {string};
*/
export const toString = (value: any): string => {
switch (true) {
case isString(value):
return value;
case isNumber(value):
return String(value);
}
return '';
};<|fim▁end|> | |
<|file_name|>script.js<|end_file_name|><|fim▁begin|>$(document).ready(function () {
$(window).on('load', function() {
let pathname = window.location.pathname.split('/')
// console.log(pathname)
if(pathname[1]==='operador') {
$('#home').addClass('active')
let action = pathname[2].split('-')
if (action[0]==='update' || action[0]==='perfil') {
$('#home').removeClass('active');
}
// console.log(action[0], action[1])
if(action[1] ==='paciente' || action[1] ==='pacientes') {
$('#home').removeClass('active')
$('#pacientes').removeClass('collapsed');
$('#pacientes').addClass('active');
$('#subPaciente').addClass('in');
if (action[0] ==='cadastrar') {
$('#menu_cadastrar_paciente').addClass('active')
}
if (action[0] ==='pesquisar') {
$('#menu_pesquisar_paciente').addClass('active')
}
if (action[0] ==='alterar') {
$('#menu_alterar_paciente').removeClass('hidden')
$('#menu_alterar_paciente').addClass('active')
}
} else {
if (action[1] ==='consulta' || action[1] ==='consultas') {
$('#home').removeClass('active')
$('#consultas').removeClass('collapsed');
$('#consultas').addClass('active');
$('#subConsulta').addClass('in');
if (action[0] ==='agendar') {
$('#menu_agendar_consulta').removeClass('hidden');
$('#menu_agendar_consulta').addClass('active');
}
if (action[0] ==='buscar') {
$('#menu_pesquisar_consulta').addClass('active');
}
if (action[0] ==='listagem') {
$('#menu_lista_consulta').addClass('active');
}
} else {
if (action[1] ==='sucesso') {
$('#home').removeClass('active')
$('#consultas').removeClass('collapsed');
$('#consultas').addClass('active');
$('#subConsulta').addClass('in');
}
}
}
} else {
if (pathname[1]==='administrador') {
// $('#home').addClass('active')
// $('#home').removeClass('active')
switch (pathname[2]) {
case 'atendentes':
$('#home').removeClass('active')
$('#atendentes').removeClass('collapsed')
$('#atendentes').addClass('active')
break;
case 'operadores':
$('#home').removeClass('active')
$('#operadores').removeClass('collapsed')
$('#operadores').addClass('active')
break;
case 'administradores':
$('#home').removeClass('active')
$('#administradores').removeClass('collapsed')
$('#administradores').addClass('active')
break;
case 'medicos':
$('#home').removeClass('active')
$('#medicos').removeClass('collapsed')
$('#medicos').addClass('active')
break;
case 'especialidades':
$('#home').removeClass('active')
$('#especialidades').removeClass('collapsed')
$('#especialidades').addClass('active')
break;
case 'relatorio-diario':
$('#home').removeClass('active')
$('#relatorios').removeClass('collapsed')
$('#relatorios').addClass('active')
$('#subRelatios').addClass('in')
break;
case 'relatorio-mensais':
$('#home').removeClass('active')
$('#relatorios').removeClass('collapsed')
$('#relatorios').addClass('active')
$('#subRelatios').addClass('in');
break;
}
}<|fim▁hole|>
$('#form-change-password').submit(function() {
$('.loading').fadeOut(700).removeClass('hidden');
});
$('#form_login').submit(function() {
$('.loading').fadeOut(700).removeClass('hidden');
});
$('#cancel').click(function() {
$('.loading').fadeOut(700).removeClass('hidden');
});
// função para buscar os valores dos selected de medicos por especialidade:
$('#especialidade').change(function () {
var idEspecialidade = $(this).val();
$.get('/operador/get-medicos/' + idEspecialidade, function (medicos) {
$('#medico').empty();
$('#medico').append('<option value="" disabled selected>Selecione...</option>');
$.each(medicos, function (key, medico) {
$('#medico').append('<option value="'+medico.id_medico+'">'+medico.nome_medico+'</option>');
});
$('#medico').prop("disabled", false);
});
});
//função para buscar os valores dos selected de data por medicos e especialidade:
$('#medico').change(function () {
var idEspecialidade = $('#especialidade').val();
var idMedico = $(this).val();
$.get('/operador/especialidade/'+idEspecialidade+'/medico/'+idMedico, function (calendarios) {
$('#periodo').empty();
$('#vagas').attr('value', '');
$('#local_id').attr('value', '');
$('#local_nome_fantasia').attr('value', '');
$('#data_consulta').empty();
$('#data_consulta').append('<option value="" disabled selected>Selecione...</option>');
$.each(calendarios, function (key, calendario) {
//$('#data_consulta').append('<option value="'+calendario.id+'"> <?php date("d/m/Y", strtotime('+calendario.data+')) ?> </option>');
var data = moment(calendario.data).format('DD/MM/YYYY');
$('#data_consulta').append('<option value="'+calendario.id_calendario+'"><time>'+data+'</time></option>');
});
$('#data_consulta').prop("disabled", false);
});
});
$('#data_consulta').change(function () {
var idCaleandario = $(this).val();
$.get('/operador/periodos/'+idCaleandario, function (periodos) {
$('#periodo').empty();
$('#vagas').attr('value', '');
$('#local_id').attr('value', '');
$('#local_nome_fantasia').attr('value', '');
$('#periodo').append('<option value="" disabled selected>Selecione...</option>');
$.each(periodos, function (key, periodo) {
$('#periodo').append('<option value="'+periodo.id_periodo+'">'+periodo.nome+'</option>');
});
$('#periodo').prop("disabled", false);
});
});
$('#periodo').change(function () {
var idPeriodo = $(this).val();
$.get('/operador/vagas-disponiveis/'+idPeriodo, function (vagas) {
$('#vagas').attr('value', vagas.vagas_disponiveis);
});
$.get('/operador/local/'+idPeriodo, function (local) {
$('#local_id').attr('value', local.id_local);
$('#local_nome_fantasia').attr('value', local.nome_fantasia);
});
});
// DIV loading no carregamento da pagina:
$('.loading').fadeOut(700).addClass('hidden');
$('#search_type').change(function () {
var option = $(this).val();
if (option == 1) {
$('.fields_filtrar').val('');
$('#div_number_cpf').addClass('hidden');
$('#div_date_nasc').addClass('hidden');
$('#div_number_cns').removeClass('hidden');
} else {
if (option == 2) {
$('.fields_filtrar').val('');
$('#div_number_cns').addClass('hidden');
$('#div_date_nasc').addClass('hidden');
$('#div_number_cpf').removeClass('hidden');
} else {
$('.fields_filtrar').val('');
$('#div_number_cns').addClass('hidden');
$('#div_number_cpf').addClass('hidden');
$('#div_date_nasc').removeClass('hidden');
}
}
});
$('#form_filtro-paciente').submit(function () {
$('#numero_cns').unmask();
$('#cpf').unmask();
});
});<|fim▁end|> | }
}) |
<|file_name|>submaker_supersu.py<|end_file_name|><|fim▁begin|>from .submaker import Submaker
import zipfile
import os
import shutil
import logging
logger = logging.getLogger(__name__ )
class SuperSuSubmaker(Submaker):
def make(self, workDir):
supersuZipProp = self.getTargetConfigProperty("root.methods.supersu.path")
assert supersuZipProp.getValue(), "Must set %s to the supersu zip file" % supersuZipProp.getKey()
includeApk = self.getTargetConfigValue("root.methods.supersu.include_apk", True)
includeArchs = set(self.getTargetConfigValue("root.methods.supersu.include_archs", []))
superSuTargetRelativePath = "supersu"
supersuTargetPath = os.path.join(workDir, superSuTargetRelativePath)
postinstFilePath = os.path.join(supersuTargetPath, "supersu_installer_includer")
supersuOriginalUpdatescriptPath = os.path.join(supersuTargetPath, "supersu_installer.sh")
newSuperSuZipPath = os.path.join(supersuTargetPath, "supersu.zip")
superSuZipTmpExtract = "/tmp/supersu.zip"
superSuUpdatescriptTmpExtract = "/tmp/supersu_installer.sh"
superuserApkPath = os.path.join("common", "Superuser.apk")
with self.newtmpWorkDir() as tmpDir:
with zipfile.ZipFile(supersuZipProp.resolveAsRelativePath(), "r") as z:
z.extractall(tmpDir)
os.mkdir(os.path.join(workDir, "supersu"))
archs = set(
[f for f in os.listdir(tmpDir) if not f in ("common", "META-INF")]
)
unsupportedArchs = includeArchs.difference(archs)
if len(unsupportedArchs):
unsupportedArchs = list(unsupportedArchs)
raise ValueError("Can't find archs: [%s] in supersu" % (", ".join(unsupportedArchs)))
targetArchs = includeArchs if len(includeArchs) else archs
newSuperSuZip = zipfile.ZipFile(newSuperSuZipPath, "w")
<|fim▁hole|> os.remove(os.path.join(tmpDir, superuserApkPath))
self.__addDirToZip(newSuperSuZip, os.path.join(tmpDir, "common"), "common")
if self.getMaker().getConfig().isMakeable("update.busybox"):
#process file, with busybox onboard in assumption
with open(os.path.join(tmpDir, "META-INF/com/google/android/update-binary"), "r") as f:
with open(supersuOriginalUpdatescriptPath, "w") as targetF:
for l in f.readlines():
if l.startswith("#!"):
targetF.write("#!" + self.getTargetConfigValue("root.methods.supersu.sh", "/system/bin/sh") + "\n")
else:
targetF.write(l)
else:
shutil.copy(os.path.join(tmpDir, "META-INF/com/google/android/update-binary"), supersuOriginalUpdatescriptPath)
postInstscript = "ui_print(\"Installing SuperSU..\");\n"
postInstscript += "run_program(\"%s\", \"1\", \"stdout\", \"%s\");" % (superSuUpdatescriptTmpExtract, superSuZipTmpExtract)
with open(postinstFilePath, "w") as postinstFile:
postinstFile.write(postInstscript)
superSuConfig = supersuZipProp.getConfig()
currPostInst = superSuConfig.get("script.post", [], directOnly=True)
currPostInst.append(postinstFilePath)
superSuConfig.set("update.script.post", currPostInst)
self.setValue("update.files.add." + newSuperSuZipPath.replace(workDir, "").replace(".", "\.") , {
"destination": superSuZipTmpExtract
})
self.setValue("update.files.add." + supersuOriginalUpdatescriptPath.replace(workDir, "").replace(".", "\."), {
"destination": superSuUpdatescriptTmpExtract,
"mode": "0755",
"uid": "0",
"gid": "0"
})
def __addDirToZip(self, zipFile, dirPath, zipRoot):
zipFile.write(dirPath, zipRoot)
for f in os.listdir(dirPath):
src = os.path.join(dirPath, f)
dest = os.path.join(zipRoot, f)
if os.path.isdir(src):
self.__addDirToZip(zipFile, src, dest)
else:
zipFile.write(src, dest)<|fim▁end|> | for arch in targetArchs:
self.__addDirToZip(newSuperSuZip, os.path.join(tmpDir, arch), arch)
if not includeApk: |
<|file_name|>saxutils.py<|end_file_name|><|fim▁begin|>"""
A library of useful helper classes to the saxlib classes, for the
convenience of application and driver writers.
$Id: saxutils.py,v 1.19 2001/03/20 07:19:46 loewis Exp $
"""
import types, sys, urllib, urlparse, os, string
import handler, _exceptions, xmlreader
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError: # 1.5 compatibility:UnicodeType not defined
_StringTypes = [types.StringType]
<|fim▁hole|>
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = string.replace(data, "&", "&")
data = string.replace(data, "<", "<")
data = string.replace(data, ">", ">")
for chars, entity in entities.items():
data = string.replace(data, chars, entity)
return data
# --- DefaultHandler
class DefaultHandler(handler.EntityResolver, handler.DTDHandler,
handler.ContentHandler, handler.ErrorHandler):
"""Default base class for SAX2 event handlers. Implements empty
methods for all callback methods, which can be overridden by
application implementors. Replaces the deprecated SAX1 HandlerBase
class."""
# --- Location
class Location:
"""Represents a location in an XML entity. Initialized by being passed
a locator, from which it reads off the current location, which is then
stored internally."""
def __init__(self, locator):
self.__col = locator.getColumnNumber()
self.__line = locator.getLineNumber()
self.__pubid = locator.getPublicId()
self.__sysid = locator.getSystemId()
def getColumnNumber(self):
return self.__col
def getLineNumber(self):
return self.__line
def getPublicId(self):
return self.__pubid
def getSystemId(self):
return self.__sysid
# --- ErrorPrinter
class ErrorPrinter:
"A simple class that just prints error messages to standard out."
def __init__(self, level=0, outfile=sys.stderr):
self._level = level
self._outfile = outfile
def warning(self, exception):
if self._level <= 0:
self._outfile.write("WARNING in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def error(self, exception):
if self._level <= 1:
self._outfile.write("ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def fatalError(self, exception):
if self._level <= 2:
self._outfile.write("FATAL ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def __getpos(self, exception):
if isinstance(exception, _exceptions.SAXParseException):
return "%s:%s:%s" % (exception.getSystemId(),
exception.getLineNumber(),
exception.getColumnNumber())
else:
return "<unknown>"
# --- ErrorRaiser
class ErrorRaiser:
"A simple class that just raises the exceptions it is passed."
def __init__(self, level = 0):
self._level = level
def error(self, exception):
if self._level <= 1:
raise exception
def fatalError(self, exception):
if self._level <= 2:
raise exception
def warning(self, exception):
if self._level <= 0:
raise exception
# --- AttributesImpl now lives in xmlreader
from xmlreader import AttributesImpl
# --- XMLGenerator is the SAX2 ContentHandler for writing back XML
try:
import codecs
def _outputwrapper(stream,encoding):
writerclass = codecs.lookup(encoding)[3]
return writerclass(stream)
except ImportError: # 1.5 compatibility: fall back to do-nothing
def _outputwrapper(stream,encoding):
return stream
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = _outputwrapper(out,encoding)
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s="%s"' % (name, escape(value)))
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
# default namespace
name = name[1]
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write('<' + name)
for k,v in self._undeclared_ns_maps:
if k is None:
self._out.write(' xmlns="%s"' % v)
else:
self._out.write(' xmlns:%s="%s"' % (k,v))
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
name = self._current_context[name[0]] + ":" + name[1]
self._out.write(' %s="%s"' % (name, escape(value)))
self._out.write('>')
def endElementNS(self, name, qname):
# XXX: if qname is not None, we better use it.
# Python 2.0b2 requires us to use the recorded prefix for
# name[0], though
if name[0] is None:
qname = name[1]
elif self._current_context[name[0]] is None:
qname = name[1]
else:
qname = self._current_context[name[0]] + ":" + name[1]
self._out.write('</%s>' % qname)
def characters(self, content):
self._out.write(escape(content))
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
# --- ContentGenerator is the SAX1 DocumentHandler for writing back XML
class ContentGenerator(XMLGenerator):
def characters(self, str, start, end):
# In SAX1, characters receives start and end; in SAX2, it receives
# a string. For plain strings, we may want to use a buffer object.
return XMLGenerator.characters(self, str[start:start+end])
# --- XMLFilterImpl
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# FIXME: remove this backward compatibility hack when not needed anymore
XMLFilterImpl = XMLFilterBase
# --- BaseIncrementalParser
class BaseIncrementalParser(xmlreader.IncrementalParser):
"""This class implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def parse(self, source):
source = prepare_input_source(source)
self.prepareParser(source)
self._cont_handler.startDocument()
# FIXME: what about char-stream?
inf = source.getByteStream()
buffer = inf.read(16384)
while buffer != "":
self.feed(buffer)
buffer = inf.read(16384)
self.close()
self.reset()
self._cont_handler.endDocument()
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if type(source) in _StringTypes:
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
if os.path.isfile(sysid):
basehead = os.path.split(os.path.normpath(base))[0]
source.setSystemId(os.path.join(basehead, sysid))
f = open(sysid, "rb")
else:
source.setSystemId(urlparse.urljoin(base, sysid))
f = urllib.urlopen(source.getSystemId())
source.setByteStream(f)
return source
# ===========================================================================
#
# DEPRECATED SAX 1.0 CLASSES
#
# ===========================================================================
# --- AttributeMap
class AttributeMap:
"""An implementation of AttributeList that takes an (attr,val) hash
and uses it to implement the AttributeList interface."""
def __init__(self, map):
self.map=map
def getLength(self):
return len(self.map.keys())
def getName(self, i):
try:
return self.map.keys()[i]
except IndexError,e:
return None
def getType(self, i):
return "CDATA"
def getValue(self, i):
try:
if type(i)==types.IntType:
return self.map[self.getName(i)]
else:
return self.map[i]
except KeyError,e:
return None
def __len__(self):
return len(self.map)
def __getitem__(self, key):
if type(key)==types.IntType:
return self.map.keys()[key]
else:
return self.map[key]
def items(self):
return self.map.items()
def keys(self):
return self.map.keys()
def has_key(self,key):
return self.map.has_key(key)
def get(self, key, alternative=None):
return self.map.get(key, alternative)
def copy(self):
return AttributeMap(self.map.copy())
def values(self):
return self.map.values()
# --- Event broadcasting object
class EventBroadcaster:
"""Takes a list of objects and forwards any method calls received
to all objects in the list. The attribute list holds the list and
can freely be modified by clients."""
class Event:
"Helper objects that represent event methods."
def __init__(self,list,name):
self.list=list
self.name=name
def __call__(self,*rest):
for obj in self.list:
apply(getattr(obj,self.name), rest)
def __init__(self,list):
self.list=list
def __getattr__(self,name):
return self.Event(self.list,name)
def __repr__(self):
return "<EventBroadcaster instance at %d>" % id(self)
# --- ESIS document handler
import saxlib
class ESISDocHandler(saxlib.HandlerBase):
"A SAX document handler that produces naive ESIS output."
def __init__(self,writer=sys.stdout):
self.writer=writer
def processingInstruction (self,target, remainder):
"""Receive an event signalling that a processing instruction
has been found."""
self.writer.write("?"+target+" "+remainder+"\n")
def startElement(self,name,amap):
"Receive an event signalling the start of an element."
self.writer.write("("+name+"\n")
for a_name in amap.keys():
self.writer.write("A"+a_name+" "+amap[a_name]+"\n")
def endElement(self,name):
"Receive an event signalling the end of an element."
self.writer.write(")"+name+"\n")
def characters(self,data,start_ix,length):
"Receive an event signalling that character data has been found."
self.writer.write("-"+data[start_ix:start_ix+length]+"\n")
# --- XML canonizer
class Canonizer(saxlib.HandlerBase):
"A SAX document handler that produces canonized XML output."
def __init__(self,writer=sys.stdout):
self.elem_level=0
self.writer=writer
def processingInstruction (self,target, remainder):
if not target=="xml":
self.writer.write("<?"+target+" "+remainder+"?>")
def startElement(self,name,amap):
self.writer.write("<"+name)
a_names=amap.keys()
a_names.sort()
for a_name in a_names:
self.writer.write(" "+a_name+"=\"")
self.write_data(amap[a_name])
self.writer.write("\"")
self.writer.write(">")
self.elem_level=self.elem_level+1
def endElement(self,name):
self.writer.write("</"+name+">")
self.elem_level=self.elem_level-1
def ignorableWhitespace(self,data,start_ix,length):
self.characters(data,start_ix,length)
def characters(self,data,start_ix,length):
if self.elem_level>0:
self.write_data(data[start_ix:start_ix+length])
def write_data(self,data):
"Writes datachars to writer."
data=string.replace(data,"&","&")
data=string.replace(data,"<","<")
data=string.replace(data,"\"",""")
data=string.replace(data,">",">")
data=string.replace(data,chr(9),"	")
data=string.replace(data,chr(10)," ")
data=string.replace(data,chr(13)," ")
self.writer.write(data)
# --- mllib
class mllib:
"""A re-implementation of the htmllib, sgmllib and xmllib interfaces as a
SAX DocumentHandler."""
# Unsupported:
# - setnomoretags
# - setliteral
# - translate_references
# - handle_xml
# - handle_doctype
# - handle_charref
# - handle_entityref
# - handle_comment
# - handle_cdata
# - tag_attributes
def __init__(self):
self.reset()
def reset(self):
import saxexts # only used here
self.parser=saxexts.XMLParserFactory.make_parser()
self.handler=mllib.Handler(self.parser,self)
self.handler.reset()
def feed(self,data):
self.parser.feed(data)
def close(self):
self.parser.close()
def get_stack(self):
return self.handler.get_stack()
# --- Handler methods (to be overridden)
def handle_starttag(self,name,method,atts):
method(atts)
def handle_endtag(self,name,method):
method()
def handle_data(self,data):
pass
def handle_proc(self,target,data):
pass
def unknown_starttag(self,name,atts):
pass
def unknown_endtag(self,name):
pass
def syntax_error(self,message):
pass
# --- The internal handler class
class Handler(saxlib.DocumentHandler,saxlib.ErrorHandler):
"""An internal class to handle SAX events and translate them to mllib
events."""
def __init__(self,driver,handler):
self.driver=driver
self.driver.setDocumentHandler(self)
self.driver.setErrorHandler(self)
self.handler=handler
self.reset()
def get_stack(self):
return self.stack
def reset(self):
self.stack=[]
# --- DocumentHandler methods
def characters(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def endElement(self, name):
if hasattr(self.handler,"end_"+name):
self.handler.handle_endtag(name,
getattr(self.handler,"end_"+name))
else:
self.handler.unknown_endtag(name)
del self.stack[-1]
def ignorableWhitespace(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def processingInstruction(self, target, data):
self.handler.handle_proc(target,data)
def startElement(self, name, atts):
self.stack.append(name)
if hasattr(self.handler,"start_"+name):
self.handler.handle_starttag(name,
getattr(self.handler,
"start_"+name),
atts)
else:
self.handler.unknown_starttag(name,atts)
# --- ErrorHandler methods
def error(self, exception):
self.handler.syntax_error(str(exception))
def fatalError(self, exception):
raise RuntimeError(str(exception))<|fim▁end|> | def escape(data, entities={}):
"""Escape &, <, and > in a string of data. |
<|file_name|>partial-relation.rs<|end_file_name|><|fim▁begin|>// edition:2018
// run-pass
async fn lotsa_lifetimes<'a, 'b, 'c>(a: &'a u32, b: &'b u32, c: &'c u32) -> (&'a u32, &'b u32)
where 'b: 'a
{
drop((a, c));
(b, b)<|fim▁hole|>}
fn main() {
let _ = lotsa_lifetimes(&22, &44, &66);
}<|fim▁end|> | |
<|file_name|>directives.js<|end_file_name|><|fim▁begin|>'use strict';
angular.module('BattleShipApp.directives', []).<|fim▁hole|> elm.text(version);
};
}]);<|fim▁end|> | directive('appVersion', ['version', function (version)
{
return function (scope, elm, attrs)
{ |
<|file_name|>wayknew.py<|end_file_name|><|fim▁begin|>from json import dumps
import requests
d = {'yolar': 535, 'galaxy': 536, 'solar': 551, 'swordkeeper': 552, 'civilization': 553, 'CurvatureDrive': 554,
'singer': 555, 'hdfragments': 556, 'evans': 557, 'dagon': 558, 'di-foil': 559, 'dimension': 560, 'farmer': 562,
'hibernation': 564, 'huformation': 565, 'mentalseal': 566, 'midas': 567, 'momentum': 568, 'owl': 569,
'shooter': 570, 'sophon': 571, 'bye': 573, 'cms': 575, 'nsk': 576, 'painter': 577, 'redcoast': 578,
'scientificboundary': 579, 'wall-breaker': 580}
head = {
'Accept': 'application/json',
'Authorization': 'eyJhbGciOiJIUzI1NiIsImlhdCI6MTQ5NzU4MDY5NywiZXhwIjoxNTEzMTMyNjk3fQ.eyJpZCI6MjB9.uo8JdyzBBQ-oGxzMyoiFDlycWk-fqagZLVgwrwqTSBM',
'Content-Type': 'application/json',
'Host': 'api.wayknew.com',
'Referer': 'https://wayknew.com/articles/535/edit',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
<|fim▁hole|> exit(-1)
title = repo_name + ' 线上API文档'
url = 'https://api.wayknew.com/api/articles/' + str(d[repo_name])
html_content = '<p>' + str_doc + '</p>'
request_data = {'title': title, 'content': str_doc, 'html_content': html_content}
rsp = requests.patch(url, dumps(request_data), headers=head)
if rsp.status_code != 200:
print(rsp.text)
exit(rsp.status_code)
print(repo_name + ' api write to wayknew success')<|fim▁end|> | def write_to_wayknew(repo_name, str_doc):
if repo_name not in d.keys():
print('article not in wayknew, please create a null article in wayknew!') |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | '''Contains AJAX apps of mother app''' |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from distutils.core import setup
setup(
name='py-viitenumero',
version='1.0',
description='Python module for generating Finnish national payment reference number',
author='Mohanjith Sudirikku Hannadige',
author_email='[email protected]',
url='http://www.codemaster.fi/python/maksu/',
download_url = 'https://github.com/codemasteroy/py-viitenumero/tarball/1.0'
packages=[ 'maksu' ],
keywords=[ 'payments', 'creditor reference', 'finland', 'suomi' ]<|fim▁hole|><|fim▁end|> | ) |
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | require('./loader.jsx'); |
<|file_name|>plotfixEND1.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os,sys<|fim▁hole|> print(s)
retval = os.system(s)
return retval
def main():
alphas = range(-8,9)
orders = [1]
machs = [0.55,0.65,0.75,0.85,0.95,1.05]
#now, we need to recursively move everybody back
for order in orders:
for mach in machs:
for alpha in alphas:
result = '/home/vbetro/online_edu/images/order%d/mach%0.2f/alpha%+03d/naca0012_%d_%0.2f_%+03d_02.png'%(order,mach,alpha,order,mach,alpha);
if not os.path.exists(result):
mysystem('cp /home/vbetro/online_edu/images/order%d/mach%0.2f/alpha%+03d/naca0012_%d_%0.2f_%+03d_01.png /home/vbetro/online_edu/images/order%d/mach%0.2f/alpha%+03d/naca0012_%d_%0.2f_%+03d_02.png'%(order,mach,alpha,order,mach,alpha,order,mach,alpha,order,mach,alpha));
if __name__ == "__main__":
main()<|fim▁end|> | import glob
def mysystem(s): |
<|file_name|>node-cfg.js<|end_file_name|><|fim▁begin|>'use strict';
var $ = require('jquery');
require('./github-node-cfg.js');
var AddonHelper = require('js/addonHelper');
<|fim▁hole|><|fim▁end|> | $(window.contextVars.githubSettingsSelector).on('submit', AddonHelper.onSubmitSettings); |
<|file_name|>fmStats.cpp<|end_file_name|><|fim▁begin|>/***********************************************************************************
Copyright (C) 2016 Mohammad D. Mottaghi
Under the terms of the MIT license, permission is granted to anyone to use, copy,
modify, publish, redistribute, and/or sell copies of this source code for any
commercial and non-commercial purposes, subject to the following restrictions:
1. The above copyright notice and this permission notice shall not be removed
from any source distribution.
2. The origin of this file shall not be misrepresented; The name of the original
author shall be cited in any copy, or modified version of this source code.
3. If used in a product, acknowledgment in the product documentation would be
appreciated, but is not required.
4. Modified versions must be plainly marked as such, and must not be
misrepresented as being the original source code.
This source code is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability, fitness
for a particular purpose and noninfringement. In no event shall the author
or copyright holders be liable for any claim, damages or other liability,
whether in an action of contract, tort or otherwise, arising from, out of or
in connection with this source code or the use or other dealings in it.
Mohammd Mottaghi Dastjerdi (mamad[a~t]cs.duke.edu)
Sep. 1,2016
***********************************************************************************/
#include <vcl.h>
#pragma hdrstop
#include "fmStats.h"
//---------------------------------------------------------------------------
#pragma package(smart_init)
#pragma resource "*.dfm"
TfrmStats *frmStats;
//---------------------------------------------------------------------------
__fastcall TfrmStats::TfrmStats(TComponent* Owner)
: TForm(Owner), total_html_tags(CJerdyParser::HtmlTags().CountLeft()), tag_descriptors(CJerdyParser::HtmlTags().Current())
{
}
//---------------------------------------------------------------------------
void __fastcall TfrmStats::CountTags(const CjHtmlElement* node)
{
tag_frequency[node->Meta()->id]++;
for (long i=0, cnt = node->ChildrenCount() ; i < cnt ; i++)
CountTags(node->Child(i));
}
//---------------------------------------------------------------------------
void __fastcall TfrmStats::FormShow(TObject *Sender)
{
AnsiString title;
title.cat_printf("Frequency Of Tags Under '%s' At Level %ld", subtree_root->TagName(), subtree_root->Level());
Caption = title;
freqChart->Title->Text->Clear();
freqChart->Title->Text->Add(title);<|fim▁hole|> CountTags(subtree_root);
for (long index_max, max, i=0 ; i < total_html_tags ; i++)
{
index_max=tiNil;
max=-1;
for (long j=0 ; j < total_html_tags ; j++)
if (max < tag_frequency[j])
{
max = tag_frequency[j];
index_max = j;
}
if (index_max > tiRoot && tag_frequency[index_max]>0)
Series1->Add(tag_frequency[index_max], tag_descriptors[index_max].meta.name);
tag_frequency[index_max] = -1;
}
delete []tag_frequency;
}
//---------------------------------------------------------------------------
void __fastcall TfrmStats::FormKeyPress(TObject *Sender, char &Key)
{
if (Key==27)
Close();
}
//---------------------------------------------------------------------------<|fim▁end|> | Series1->Clear();
tag_frequency = new long[total_html_tags+1];
memset(tag_frequency, 0, (total_html_tags+1) * sizeof(long)); |
<|file_name|>iclimate_functions.go<|end_file_name|><|fim▁begin|>package ig
import "fmt"
// SetTempTarget will set the temperature that the room should be kept to
func (ic *IntelliClimate) SetTempTarget(target float64) error {
return fmt.Errorf("not implemented")<|fim▁hole|>func (ic *IntelliClimate) SetCO2Target(target float64) error {
return fmt.Errorf("not implemented")
}
// SetRHTarget will set the RH target that the room should be kept to
func (ic *IntelliClimate) SetRHTarget(target float64) error {
return fmt.Errorf("not implemented")
}
// EnableCO2Dosing will enable the CO2 dosing
func (ic *IntelliClimate) EnableCO2Dosing() error {
return fmt.Errorf("not implemented")
}
// DisableCO2Dosing will disable the CO2 dosing
func (ic *IntelliClimate) DisableCO2Dosing() error {
return fmt.Errorf("not implemented")
}<|fim▁end|> | }
// SetCO2Target will set the CO2 levels in PPM that the room should be kept to |
<|file_name|>parse.rs<|end_file_name|><|fim▁begin|>//! Utilities for defining a JSON parser.
use util::Id;
use std::cell::RefCell;
use std::collections::{ HashMap, HashSet };
use std::error::Error as StdError;
use std::fmt::{ Display, Debug, Error as FmtError, Formatter };
use std::hash::Hash;
use std::rc::Rc;
use std::sync::Arc;
use serde_json::error;
use serde::ser::{ Serialize, Serializer };
use serde_json;
pub use serde_json::value::Value as JSON;
use serde::de::Error;
/// Utility function: Make sure that we have consumed all the fields of an object.
pub fn check_fields(path: Path, json: &JSON) -> Result<(), ParseError> {
if let JSON::Object(ref obj) = *json {
if obj.is_empty() {
Ok(())
} else {
Err(ParseError::unknown_fields(obj.keys().cloned().collect(), &path))
}
} else {
Ok(())
}
}
/// A path in the JSON tree. Used for displaying error messages.
#[derive(Clone, Debug)]
pub struct Path {
buf: Rc<RefCell<String>>,
len: usize,
}
impl Path {
/// Create an empty Path.
pub fn new() -> Self {
Path {
buf: Rc::new(RefCell::new(String::new())),
len: 0,
}
}
/// Push a suffix after a path.
pub fn push_str<F, T>(&self, suffix: &str, cb: F) -> T
where F: FnOnce(Path) -> T
{
let buf = self.buf.clone();
let len;
{
let mut str = buf.borrow_mut();
str.push_str(suffix);
len = str.len();
}
let path = Path {
buf: buf,
len: len,
};
let result = cb(path);
{
let mut str = self.buf.borrow_mut();
str.truncate(self.len)
}
result
}
pub fn push_index<F, T>(&self, index: usize, cb: F) -> T
where F: FnOnce(Path) -> T
{
self.push_str(&format!("[{}]", index), cb)
}
pub fn push<F, T>(&self, suffix: &str, cb: F) -> T
where F: FnOnce(Path) -> T
{
self.push_str(&format!(".{}", suffix), cb)
}
pub fn to_string(&self) -> String {
let mut str = self.buf.borrow().clone();
str.truncate(self.len);
str
}
}
/// An error during parsing.
#[derive(Debug)]
pub enum ParseError {
JSON(JSONError),
MissingField {
name: String,
at: String,
},
UnknownFields {
names: Vec<String>,
at: String,
},
TypeError {
name: String,
at: String,
expected: String,
},
EmptyObject {
at: String
},
UnknownConstant {
at: String,
constant: String,
}
}
impl Display for ParseError {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FmtError> {
(self as &Debug).fmt(formatter)
}
}
impl StdError for ParseError {
fn description(&self) -> &str {
"Error while parsing to JSON"
}
}
impl ParseError {
pub fn missing_field(name: &str, at: &Path) -> Self {
ParseError::MissingField {
name: name.to_owned(),
at: at.to_string(),
}
}
pub fn type_error(name: &str, at: &Path, expected: &str) -> Self {
ParseError::TypeError {
name: name.to_owned(),
at: at.to_string(),
expected: expected.to_owned()
}
}
pub fn unknown_fields(names: Vec<String>, at: &Path) -> Self {
ParseError::UnknownFields {
names: names,
at: at.to_string()
}
}
pub fn unknown_constant(value: &str, at: &Path) -> Self {
ParseError::UnknownConstant {
constant: value.to_owned(),
at: at.to_string()
}
}
pub fn empty_object(at: &Path) -> Self {
ParseError::EmptyObject {
at: at.to_string(),
}
}
pub fn json(error: error::Error) -> Self {
ParseError::JSON(JSONError(error))
}
}
#[derive(Debug)]
pub struct JSONError(error::Error);
impl Serialize for JSONError {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer
{
serializer.serialize_str(&format!("{:?}", self))
}
}
/// An object that knows how to parse values from JSON into type T.
///
/// The JSON object is expected to be consumed along the way. A successful parse will
/// typically leave an empty JSON object.
pub trait Parser<T: Sized> {
fn description() -> String;
fn from_str(source: &str) -> Result<T, ParseError> {
Self::from_str_at(Path::new(), source)
}
fn from_str_at(path: Path, source: &str) -> Result<T, ParseError> {
match serde_json::from_str(source) {
Err(err) => Err(ParseError::json(err)),
Ok(mut json) => Self::parse(path, &mut json)
}
}
/// Parse a single value from JSON, consuming as much as necessary from JSON.
fn parse(path: Path, source: &mut JSON) -> Result<T, ParseError>;
/// Parse a field from JSON, consuming it.
fn take(path: Path, source: &mut JSON, field_name: &str) -> Result<T, ParseError> {
match Self::take_opt(path.clone(), source, field_name) {
Some(result) => result,
None => Err(ParseError::missing_field(field_name, &path))
}
}
/// Parse a field from JSON, consuming it.
fn take_opt(path: Path, source: &mut JSON, field_name: &str) -> Option<Result<T, ParseError>> {
if let JSON::Object(ref mut obj) = *source {
if let Some(mut v) = obj.remove(field_name) {
Some(Self::parse(path, &mut v))
} else {
None
}
} else {
Some(Err(ParseError::type_error(field_name, &path, "object")))
}
}
/// Parse a field containing an array from JSON, consuming the field.
fn take_vec_opt(path: Path, source: &mut JSON, field_name: &str) -> Option<Result<Vec<T>, ParseError>>
{
if let JSON::Object(ref mut obj) = *source {
if let Some(ref mut json) = obj.remove(field_name) {
if let JSON::Array(ref mut vec) = *json {
let mut result = Vec::with_capacity(vec.len());
for (json, i) in vec.iter_mut().zip(0..) {
match path.push_index(i, |path| Self::parse(path, json)) {
Err(error) => return Some(Err(error)),
Ok(parsed) => result.push(parsed)
}
}
Some(Ok(result))
} else {
Some(Err(ParseError::type_error(field_name, &path, "array")))
}
} else {
None
}
} else {
Some(Err(ParseError::missing_field(field_name, &path)))
}
}
fn take_vec(path: Path, source: &mut JSON, field_name: &str) -> Result<Vec<T>, ParseError> {
match Self::take_vec_opt(path.clone(), source, field_name) {
Some(result) => result,
None => Err(ParseError::missing_field(field_name, &path))
}
}
}
impl Parser<f64> for f64 {
fn description() -> String {
"Number".to_owned()
}
fn parse(path: Path, source: &mut JSON) -> Result<Self, ParseError> {
match *source {
JSON::I64(val) => Ok(val as f64),
JSON::F64(val) => Ok(val),
JSON::U64(val) => Ok(val as f64),
_ => Err(ParseError::type_error("as float", &path, "number"))
}
}
}
impl Parser<bool> for bool {
fn description() -> String {
"bool".to_owned()
}
fn parse(path: Path, source: &mut JSON) -> Result<Self, ParseError> {
match *source {
JSON::Bool(ref b) => Ok(*b),
JSON::U64(0) | JSON::I64(0) => Ok(false),
JSON::U64(1) | JSON::I64(1) => Ok(true),
JSON::String(ref str) if str == "true" => Ok(true),
JSON::String(ref str) if str == "false" => Ok(false),
_ => Err(ParseError::type_error("as bool", &path, "boolean"))
}
}
}
impl Parser<u8> for u8 {
fn description() -> String {
"byte".to_owned()
}
fn parse(path: Path, source: &mut JSON) -> Result<Self, ParseError> {
match source.as_u64() {
None => Err(ParseError::type_error("as byte", &path, "positive integer")),
Some(ref val) if *val > u8::max_value() as u64 =>
Err(ParseError::type_error("as byte", &path, "positive integer")),
Some(ref val) => Ok(*val as u8)
}
}
}
impl<T> Parser<Vec<T>> for Vec<T> where T: Parser<T> {
fn description() -> String {<|fim▁hole|> match *source {
JSON::Array(ref mut array) => {
let mut result = Vec::with_capacity(array.len());
for (source, i) in array.iter_mut ().zip(0..) {
let value = try!(path.push_index(i, |path| T::parse(path, source)));
result.push(value)
}
Ok(result)
}
JSON::Null => {
// Accept `null` as an empty array.
Ok(vec![])
}
_ => {
// Attempt to promote the value to an array.
let single = try!(path.push_str("", |path| T::parse(path, source)));
Ok(vec![single])
}
}
}
}
/*
impl<T, U> Parser<(T, U)> for (T, U) where T: Parser<T>, U: Parser<U> {
fn description() -> String {
format!("({}, {})", T::description(), U::description())
}
fn parse(path: Path, source: &mut JSON) -> Result<Self, ParseError> {
match *source {
JSON::Array(ref mut array) if array.len() == 2 => {
let mut right = array.pop().unwrap(); // We just checked that length == 2
let mut left = array.pop().unwrap(); // We just checked that length == 2
let left_parsed = try!(path.push(&T::description() as &str, |path| {T::parse(path, &mut left)}));
let right_parsed = try!(path.push(&U::description() as &str, |path| {U::parse(path, &mut right)}));
Ok((left_parsed, right_parsed))
}
_ => Err(ParseError::type_error("pair of values", &path, "array"))
}
}
}
*/
impl Parser<String> for String {
fn description() -> String {
"String".to_owned()
}
fn parse(path: Path, source: &mut JSON) -> Result<Self, ParseError> {
match source.as_string() {
Some(str) => Ok(str.to_owned()),
None => Err(ParseError::type_error("string", &path, "string"))
}
}
}
impl<T> Parser<Arc<T>> for Arc<T> where T: Parser<T> {
fn description() -> String {
T::description()
}
fn parse(path: Path, source: &mut JSON) -> Result<Self, ParseError> {
Ok(Arc::new(try!(T::parse(path, source))))
}
}
pub trait ToJSON {
fn to_json(&self) -> JSON;
}
impl ToJSON for String {
fn to_json(&self) -> JSON {
JSON::String(self.clone())
}
}
impl ToJSON for bool {
fn to_json(&self) -> JSON {
JSON::Bool(*self)
}
}
impl ToJSON for f64 {
fn to_json(&self) -> JSON {
JSON::F64(*self)
}
}
impl ToJSON for usize {
fn to_json(&self) -> JSON {
JSON::U64(*self as u64)
}
}
impl ToJSON for JSON {
fn to_json(&self) -> JSON {
self.clone()
}
}
impl<T> ToJSON for HashSet<T> where T: ToJSON + Eq + Hash {
fn to_json(&self) -> JSON {
JSON::Array((*self).iter().map(T::to_json).collect())
}
}
impl<T> ToJSON for HashMap<String, T> where T: ToJSON {
fn to_json(&self) -> JSON {
JSON::Object(self.iter().map(|(k, v)| (k.clone(), T::to_json(v))).collect())
}
}
impl<T> ToJSON for Vec<T> where T: ToJSON {
fn to_json(&self) -> JSON {
JSON::Array(self.iter().map(|x| x.to_json()).collect())
}
}
impl<'a, T> ToJSON for Vec<(&'a str, T)> where T: ToJSON {
fn to_json(&self) -> JSON {
JSON::Object(self.iter().map(|&(ref k, ref v)| {
((*k).to_owned(), v.to_json())
}).collect())
}
}
impl <'a> ToJSON for &'a str {
fn to_json(&self) -> JSON {
JSON::String((*self).to_owned())
}
}
impl<'a, T> ToJSON for &'a T where T: ToJSON {
fn to_json(&self) -> JSON {
(**self).to_json()
}
}
impl<K, T, V> ToJSON for HashMap<Id<K>, Result<T, V>> where T: ToJSON, V: ToJSON {
fn to_json(&self) -> JSON {
JSON::Object(self.iter().map(|(k, result)| {
let k = k.to_string();
let result = match *result {
Ok(ref ok) => ok.to_json(),
Err(ref err) => vec![("Error", err)].to_json()
};
(k, result)
}).collect())
}
}
impl<T> ToJSON for Option<T> where T: ToJSON {
fn to_json(&self) -> JSON {
match *self {
None => JSON::Null,
Some(ref result) => result.to_json()
}
}
}
impl ToJSON for () {
fn to_json(&self) -> JSON {
JSON::Null
}
}
/*
impl<T> Parser<T> for T where T: Deserialize {
fn parse(_: Path, source: &mut JSON) -> Result<T, ParseError> {
use serde_json;
serde_json::from_value(source.clone()).map_err(ParseError::json)
}
}
*/<|fim▁end|> | format!("Array<{}>", T::description())
}
fn parse(path: Path, source: &mut JSON) -> Result<Self, ParseError> {
// Otherwise, parse as an actual array. |
<|file_name|>rules.js<|end_file_name|><|fim▁begin|>'use strict';
/* jshint -W098 */
angular.module('mean.rules').controller('RulesController', ['$scope', '$stateParams', '$location', '$http','Global', 'Rules', 'MeanUser','Circles','Groups',
function($scope, $stateParams, $location, $http, Global, Rules, MeanUser,Circles,Groups) {
$scope.global = Global;
$scope.rules = {};
$scope.rule = {};
$scope.groups={};
$scope.sortType = 'name'; // set the default sort type
$scope.sortReverse = false; // set the default sort order
$scope.searchFish = ''; // set the default search/filter term
$scope.hasAuthorization = function(rule) {
if (!rule || !rule.user) return false;
return MeanUser.isAdmin || rule.user._id === MeanUser.user._id;
};
$scope.popup1 = {
opened: false
};
$scope.testdataerror=false;
$scope.popup2 = {
opened: false
};
$scope.testdataresult='';
$scope.openpicker1 = function() {
$scope.popup1.opened = true;
};
$scope.openpicker2 = function() {
$scope.popup2.opened = true;
};
$scope.availableCircles = [];
Circles.mine(function(acl) {
$scope.availableCircles = acl.allowed;
$scope.allDescendants = acl.descendants;
});
$scope.showDescendants = function(permission) {
var temp = $('.ui-select-container .btn-primary').text().split(' ');
temp.shift(); //remove close icon
var selected = temp.join(' ');
$scope.descendants = $scope.allDescendants[selected];
};
$scope.selectPermission = function() {
$scope.descendants = [];
};
$scope.create = function(isValid) {
if (isValid) {
// $scope.article.permissions.push('test test');
var rule = new Rules($scope.rule);
rule.$save(function(response) {
$location.path('rules/' + response._id);
});
$scope.rules = {};
<|fim▁hole|> $scope.submitted = true;
}
};
$scope.remove = function(rule) {
if (rule) {
rule.$remove(function(response) {
for (var i in $scope.rules) {
if ($scope.rules[i] === rule) {
$scope.rules.splice(i, 1);
}
}
$location.path('rules');
});
} else {
$scope.rules.$remove(function(response) {
$location.path('rules');
});
}
};
$scope.update = function(isValid) {
if (isValid) {
var rule = $scope.rule;
if (!rule.updated) {
rule.updated = [];
}
rule.updated.push(new Date().getTime());
rule.$update(function() {
$location.path('rules/' + rule._id);
});
} else {
$scope.submitted = true;
}
};
$scope.findGroups = function() {
Groups.query(function(groups) {
$scope.groups = groups;
});
};
$scope.find = function() {
Rules.query(function(rules) {
$scope.rules = rules;
});
};
$scope.findOne = function() {
Rules.get({
ruleId: $stateParams.ruleId
}, function(rule) {
$scope.rule = rule;
});
};
$scope.documentupdate = function(testdata) {
$scope.testdataerror=false;
try{
testdata = JSON.parse(testdata);
} catch (ex) {
$scope.testdataerror=true;
}
}
$scope.cmdtestdata = function (testdata,execIf,execThen,execElse) {
var td={};
$scope.testdataerror=false;
try{
td = JSON.parse(testdata);
} catch (ex) {
$scope.testdataerror=true;
return;
}
$scope.testdataresult = '';
$http({
method: 'PUT',
url: '/api/rulesprocessor/testdata' ,
headers: {
'Content-Type': 'application/json'
},
data: {
"document": td,
"execIf":execIf,
"execThen":execThen,
"execElse":execElse
}
}).then(function successCallback(response) {
if (response.data === undefined) {
$scope.testdataresult = '';
} else {
$scope.testdataresult = '' +
'IF() evaluated to: ' + response.data.resExecIf.var0 +
'\nThen() evaluated to: ' + JSON.stringify(response.data.resExecThen) +
'\nElse() evaluated to: ' + JSON.stringify(response.data.resExecElse);
}
}, function errorCallback(response) {
$scope.testdataresult = 'Error: (HTTP ' + response.status + ') ' + response.data.error;
});
}
}
]);<|fim▁end|> | } else { |
<|file_name|>move.js<|end_file_name|><|fim▁begin|><|fim▁hole|> return {
restrict: 'E',
scope: {
selection: '=',
type: '@',
ngConfirmMessage: '@',
ngConfirm: '&'
},
link: function(scope, element, attrs) {
scope.myFunction = function(){
var spanDropdown = document.querySelectorAll('span.btn-group')[0];
spanDropdown.classList.add("open");
};
scope.icon = 'glyphicon-list';
if (attrs.type == 'move_to_package') scope.button = 'Move to Package';
var vods_array = [];
$http.get('../api/vodpackages?package_type_id=3&package_type_id=4').then(function(response) {
var data = response.data;
for(var i=0;i<data.length;i++){
vods_array.push({name:data[i].package_name,id:data[i].id})
}
});
scope.list_of_vods = vods_array;
var newarray = [];
scope.moveto = function () {
var spanDropdown = document.querySelectorAll('span.btn-group')[0];
spanDropdown.classList.remove("open");
var array_of_selection_vod = scope.selection;
scope.change = function(name,id){
scope.button = name;
var id_of_selected_package = id;
for(var j=0;j<array_of_selection_vod.length;j++){
newarray.push({package_id:id_of_selected_package,vod_id:array_of_selection_vod[j].values.id})
}
if(newarray.length === 0) {
notification.log('Sorry, you have not selected any Vod item.', { addnCls: 'humane-flatty-error' });
} else {
$http.post("../api/package_vod", newarray).then(function (response,data, status, headers, config,file) {
notification.log('Vod successfully added', { addnCls: 'humane-flatty-success' });
window.location.replace("#/vodPackages/edit/"+id_of_selected_package);
},function (data, status, headers, config) {
notification.log('Something Wrong', { addnCls: 'humane-flatty-error' });
}).on(error, function(error){
winston.error("The error during post request is ")
});
}
};
}
},
template: '<div class="btn-group" uib-dropdown is-open="status.isopen"> ' +
'<button ng-click="myFunction()" id="single-button" type="button" class="btn btn-default" uib-dropdown-toggle ng-disabled="disabled">' +
'<span class="glyphicon {{icon}}"></span> {{button}} <span class="caret"></span>' +
'</button>' +
'<ul class="dropdown-menu" uib-dropdown-menu role="menu" aria-labelledby="single-button">' +
'<li role="menuitem" ng-click="change(choice.name,choice.id)" ng-repeat="choice in list_of_vods">' +
'<p id="paragraph_vod" ng-click="moveto()">{{choice.name}}</p>' +
'</li>' +
'</ul>' +
'</div>'
};
}
move.$inject = ['Restangular', '$uibModal', '$q', 'notification', '$state','$http'];
export default move;<|fim▁end|> | function move(Restangular, $uibModal, $q, notification, $state,$http) {
'use strict';
|
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for retry 0.10
// Project: https://github.com/tim-kos/node-retry
// Definitions by: Stan Goldmann <https://github.com/krenor>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.3
export interface RetryOperation {
/**
* Defines the function that is to be retried and executes it for the first time right away.
*
* @param callback The function that is to be retried
* @param callback.current Number of attempts callback has been executed so far.
* @param [options.timeout] A timeout in milliseconds.
* @param [options.callback] Callback to execute when the operation takes longer than the timeout.
*
*/
attempt(callback: (current: number) => void, options?: AttemptOptions): void;
/**
* Returns false when no error value is given, or the maximum amount of retries has been reached.
* Otherwise it returns true, and retries the operation after the timeout for the current attempt number.
*
*
*/
retry(err?: Error): boolean;
/**
* The number of attempts it took to call the retrying function before it was successful.
*
*/
attempts(): number;
/**
* A reference to the error object that occured most frequently.
* Errors are compared using the error.message property.
*
* @return If no errors occured so far the value will be null.
*/
mainError(): Error | null;
/**
* Returns an array of all errors that have been passed to RetryOperation.retry() so far.
*
*/
errors(): Error[];
/**
* Stops the operation being retried. Useful for aborting the operation on a fatal error etc.
*/
stop(): void;
}
export interface AttemptOptions {
timeout?: number;
callback?(): void;
}
/**
* Create a new RetryOperation object.
*
* @param [options.retries=10] The maximum amount of times to retry the operation.
* @param [options.factor=2] The exponential factor to use.
* @param [options.minTimeout=1000] The number of milliseconds before starting the first retry.
* @param [options.maxTimeout=Infinity] The maximum number of milliseconds between two retries.
* @param [options.randomize=false] Randomizes the timeouts by multiplying a factor between 1-2.
* @param [options.forever=false] Wether to retry forever.
* @param [options.unref=false] Wether to unref the setTimeout's.
*
*/
export function operation(options?: OperationOptions): RetryOperation;
export interface OperationOptions {
/**
* The maximum amount of times to retry the operation.
* @default 10
*/
retries?: number;
/**
* The exponential factor to use.
* @default 2
*/
factor?: number;
/**
* The number of milliseconds before starting the first retry.
* @default 1000
*/
minTimeout?: number;
/**
* The maximum number of milliseconds between two retries.
* @default Infinity
*/
maxTimeout?: number;
/**
* Randomizes the timeouts by multiplying a factor between 1-2.
* @default false
*/
randomize?: boolean;
forever?: boolean;
unref?: boolean;
}
/** Get an array with timeouts and their return values in milliseconds. */
export function timeouts(options?: TimeoutsOptions): number[];
export interface TimeoutsOptions {
retries?: number;
factor?: number;
minTimeout?: number;
maxTimeout?: number;
randomize?: boolean;
}
/**
* Create a new timeout (in milliseconds) based on the given parameters.
*
* @param attempt Representing for which retry the timeout should be calculated.
* @return timeout
*/
export function createTimeout(attempt: number, options?: CreateTimeoutOptions): number;
export interface CreateTimeoutOptions {
/**
* The exponential factor to use.
* @default 2
*/
factor?: number;
/**
* The number of milliseconds before starting the first retry.
* @default 1000
*/
minTimeout?: number;
/**
* The maximum number of milliseconds between two retries.
* @default Infinity
*/
maxTimeout?: number;
/**
* Randomizes the timeouts by multiplying a factor between 1-2.
* @default false
*/
randomize?: boolean;
}
/**
* Wrap all functions of the object with retry.
*
* @param object The object to be wrapped
* @param methods Methods which need to be wrapped
*
*/
export function wrap(object: object, options?: WrapOptions, methods?: string[]): void;
export interface WrapOptions {
/**
* The maximum amount of times to retry the operation.
* @default 10
*/
retries?: number;
/**
* The exponential factor to use.
* @default 2
*/
factor?: number;
/**
* The number of milliseconds before starting the first retry.
* @default 1000
*/
minTimeout?: number;
/**
* The maximum number of milliseconds between two retries.
* @default Infinity
*/
maxTimeout?: number;
/**
* Randomizes the timeouts by multiplying a factor between 1-2.
* @default false
*/
randomize?: boolean;
/**
* Whether to retry forever.
* @default false
*/
forever?: boolean;
/**
* Whether to unref the setTimeout's.
* @default false<|fim▁hole|>}<|fim▁end|> | */
unref?: boolean; |
<|file_name|>Statistics.cpp<|end_file_name|><|fim▁begin|>#include "Statistics.hpp"
#include "CaptureInstance.hpp"
#include "MainWindow.hpp"
#include "ui_Statistics.h"
using namespace EPL_Viz;
using namespace EPL_DataCollect;
Statistics::Statistics(MainWindow *parent) : QDialog(parent), ui(new Ui::Statistics), mw(parent) {
ui->setupUi(this);
refresh();
}
Statistics::~Statistics() { delete ui; }
void Statistics::refresh() {
CaptureInstance *ci = mw->getCaptureInstance();
auto stats = ci->getCycleBuilder()->getStats();
auto ihStats = ci->getInputHandler()->getStats();
ui->List->clear();
// Make sure not to devide by 0
if (stats.cycleCount == 0)
stats.cycleCount = 1;
if (ihStats.packetsParsed == 0)
ihStats.packetsParsed = 1;
if (ihStats.cyclesParsed == 0)
ihStats.cyclesParsed = 1;
QList<QTreeWidgetItem *> items;
QTreeWidgetItem * pItem = nullptr;
items.append(new QTreeWidgetItem({"Cycle count", std::to_string(stats.cycleCount).c_str()}));
items.append(new QTreeWidgetItem({"Cycle count", std::to_string(stats.cycleCount).c_str()}));
items.append(new QTreeWidgetItem({"Packet count", std::to_string(stats.packetCount).c_str()}));
items.append(new QTreeWidgetItem({"Events count", std::to_string(stats.eventsCount).c_str()}));
items.append(new QTreeWidgetItem({"Packet parsed", std::to_string(ihStats.packetsParsed).c_str()}));
items.append(new QTreeWidgetItem({"Cycles parsed", std::to_string(ihStats.cyclesParsed).c_str()}));
items.append(new QTreeWidgetItem({"Cycle Processing time", ""}));
pItem = items.back();
items.append(new QTreeWidgetItem(pItem, {"Total", (std::to_string(stats.totalTime.count()) + " ns").c_str()}));
items.append(new QTreeWidgetItem(
pItem, {"Average", (std::to_string(stats.totalTime.count() / stats.cycleCount) + " ns").c_str()}));
items.append(new QTreeWidgetItem({"Time waited for packets to be parsed", ""}));
pItem = items.back();
items.append(
new QTreeWidgetItem(pItem, {"Total", (std::to_string(stats.waitForPacketsTime.count()) + " ns").c_str()}));
items.append(new QTreeWidgetItem(<|fim▁hole|> pItem, {"Average", (std::to_string(stats.waitForPacketsTime.count() / stats.cycleCount) + " ns").c_str()}));
items.append(new QTreeWidgetItem({"Packets parsing time", ""}));
pItem = items.back();
items.append(
new QTreeWidgetItem(pItem, {"Total", (std::to_string(ihStats.timePacketsParsed.count()) + " ns").c_str()}));
items.append(new QTreeWidgetItem(
pItem,
{"Average", (std::to_string(ihStats.timePacketsParsed.count() / ihStats.packetsParsed) + " ns").c_str()}));
items.append(new QTreeWidgetItem({"Packets of a Cycle parsing time", ""}));
pItem = items.back();
items.append(
new QTreeWidgetItem(pItem, {"Total", (std::to_string(ihStats.timeCyclesParsed.count()) + " ns").c_str()}));
items.append(new QTreeWidgetItem(
pItem, {"Average", (std::to_string(ihStats.timeCyclesParsed.count() / ihStats.cyclesParsed) + " ns").c_str()}));
ui->List->addTopLevelItems(items);
ui->List->expandAll();
ui->List->header()->resizeSection(0, 275);
}<|fim▁end|> | |
<|file_name|>server.go<|end_file_name|><|fim▁begin|>/*
* Glue - Robust Go and Javascript Socket Library
* Copyright (C) 2015 Roland Singer <roland.singer[at]desertbit.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Package glue - Robust Go and Javascript Socket Library.
// This library is thread-safe.
package glue
import (
"fmt"
"net"
"net/http"
"sync"
"time"
"github.com/desertbit/glue/backend"
)
//####################//
//### Public Types ###//
//####################//
// OnNewSocketFunc is an event function.
type OnNewSocketFunc func(s *Socket)
//###################//
//### Server Type ###//
//###################//
// A Server represents a glue server which handles incoming socket connections.
type Server struct {
bs *backend.Server
options *Options
block bool
onNewSocket OnNewSocketFunc
sockets map[string]*Socket // A map holding all active current sockets.
socketsMutex sync.Mutex
}
// NewServer creates a new glue server instance.
// One variadic arguments specifies the server options.
func NewServer(o ...Options) *Server {
// Get or create the options.
var options *Options
if len(o) > 0 {
options = &o[0]
} else {
options = &Options{}
}
// Set the default option values for unset values.
options.SetDefaults()
// Create a new backend server.
bs := backend.NewServer(len(options.HTTPHandleURL), options.EnableCORS, options.CheckOrigin)
// Create a new server value.
s := &Server{
bs: bs,
options: options,
onNewSocket: func(*Socket) {}, // Initialize with dummy function to remove nil check.
sockets: make(map[string]*Socket),
}
// Set the backend server event function.
bs.OnNewSocketConnection(s.handleOnNewSocketConnection)
return s
}
// Block new incomming connections.
func (s *Server) Block(b bool) {
s.block = b
}
// OnNewSocket sets the event function which is
// triggered if a new socket connection was made.
// The event function must not block! As soon as the event function
// returns, the socket is added to the active sockets map.
func (s *Server) OnNewSocket(f OnNewSocketFunc) {
s.onNewSocket = f
}
// GetSocket obtains a socket by its ID.
// Returns nil if not found.
func (s *Server) GetSocket(id string) *Socket {
// Lock the mutex.
s.socketsMutex.Lock()
defer s.socketsMutex.Unlock()
// Obtain the socket.
socket, ok := s.sockets[id]
if !ok {
return nil
}
return socket
}
// Sockets returns a list of all current connected sockets.
// Hint: Sockets are added to the active sockets list before the OnNewSocket
// event function is called.
// Use the IsInitialized flag to determind if a socket is not ready yet...
func (s *Server) Sockets() []*Socket {
// Lock the mutex.
s.socketsMutex.Lock()
defer s.socketsMutex.Unlock()
// Create the slice.
list := make([]*Socket, len(s.sockets))
// Add all sockets from the map.
i := 0
for _, s := range s.sockets {
list[i] = s
i++
}
return list
}
// Release this package. This will block all new incomming socket connections
// and close all current connected sockets.
func (s *Server) Release() {
// Block all new incomming socket connections.
s.Block(true)
// Wait for a little moment, so new incomming sockets are added
// to the sockets active list.
time.Sleep(200 * time.Millisecond)
// Close all current connected sockets.
sockets := s.Sockets()
for _, s := range sockets {
s.Close()<|fim▁hole|>// This is a blocking method.
func (s *Server) Run() error {
// Skip if set to none.
if s.options.HTTPSocketType != HTTPSocketTypeNone {
// Set the base glue HTTP handler.
http.Handle(s.options.HTTPHandleURL, s)
// Start the http server.
if s.options.HTTPSocketType == HTTPSocketTypeUnix {
// Listen on the unix socket.
l, err := net.Listen("unix", s.options.HTTPListenAddress)
if err != nil {
return fmt.Errorf("Listen: %v", err)
}
// Start the http server.
err = http.Serve(l, nil)
if err != nil {
return fmt.Errorf("Serve: %v", err)
}
} else if s.options.HTTPSocketType == HTTPSocketTypeTCP {
// Start the http server.
err := http.ListenAndServe(s.options.HTTPListenAddress, nil)
if err != nil {
return fmt.Errorf("ListenAndServe: %v", err)
}
} else {
return fmt.Errorf("invalid socket options type: %v", s.options.HTTPSocketType)
}
} else {
// HINT: This is only a placeholder until the internal glue TCP server is implemented.
w := make(chan struct{})
<-w
}
return nil
}
// ServeHTTP implements the HTTP Handler interface of the http package.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.bs.ServeHTTP(w, r)
}
//########################//
//### Server - Private ###//
//########################//
func (s *Server) handleOnNewSocketConnection(bs backend.BackendSocket) {
// Close the socket if incomming connections should be blocked.
if s.block {
bs.Close()
return
}
// Create a new socket value.
// The goroutines are started automatically.
newSocket(s, bs)
}<|fim▁end|> | }
}
// Run starts the server and listens for incoming socket connections. |
<|file_name|>files_2.js<|end_file_name|><|fim▁begin|>var searchData=
[
['checksyscall_2eh',['CheckSysCall.h',['../db/d19/_check_sys_call_8h.html',1,'']]],<|fim▁hole|><|fim▁end|> | ['classid_2eh',['ClassID.h',['../dc/d14/_class_i_d_8h.html',1,'']]],
['comparator_2eh',['Comparator.h',['../d7/d0c/_comparator_8h.html',1,'']]]
]; |
<|file_name|>EditHero.js<|end_file_name|><|fim▁begin|>import React from 'react';
const EditHero = props => {
if (props.selectedHero) {
return (
<div>
<div className="editfields">
<div>
<label>id: </label>
{props.addingHero
? <input
type="number"
name="id"
placeholder="id"
value={props.selectedHero.id}
onChange={props.onChange}
/>
: <label className="value">
{props.selectedHero.id}
</label>}
</div>
<div>
<label>name: </label>
<input
name="name"
value={props.selectedHero.name}
placeholder="name"
onChange={props.onChange}
/>
</div>
<div>
<label>saying: </label>
<input
name="saying"
value={props.selectedHero.saying}
placeholder="saying"
onChange={props.onChange}
/>
</div>
</div>
<button onClick={props.onCancel}>Cancel</button>
<button onClick={props.onSave}>Save</button>
</div>
);<|fim▁hole|> } else {
return <div />;
}
};
export default EditHero;<|fim▁end|> | |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.views import generic
from polls.models import Choice,Poll
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_poll_list'
def get_queryset(self):
"""
Return the last five published polls (not including those set to be
published in the future).
"""
return Poll.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Poll
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Poll
template_name = 'polls/results.html'
def vote(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# display voting form<|fim▁hole|> 'poll':p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results',args=(p.id,)))<|fim▁end|> | return render(request, 'polls/detail.html', { |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>fn main() {
// ANCHOR: here
enum IpAddrKind {
V4,
V6,
}
struct IpAddr {
kind: IpAddrKind,
address: String,
}
<|fim▁hole|> address: String::from("127.0.0.1"),
};
let loopback = IpAddr {
kind: IpAddrKind::V6,
address: String::from("::1"),
};
// ANCHOR_END: here
}<|fim▁end|> | let home = IpAddr {
kind: IpAddrKind::V4, |
<|file_name|>DbPBS.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
import os
import abc
import shelve
from pprint import pprint
from cloudmesh_base.tables import dict_printer
from cloudmesh_base.Shell import Shell
from cloudmesh_base.util import banner
from cloudmesh_base.util import path_expand
from cloudmesh_pbs.OpenPBS import OpenPBS
class pbs_db_interface(object):
__metaclass__ = abc.ABCMeta
db = None
def data(self):
return dict(self.db)
def __getitem__(self, index):
return self.db[index]
def __setitem__(self, index, value):
self.db[index] = value
@abc.abstractmethod
def load(self, filename):
"""loads the saved databsa from the file"""
@abc.abstractmethod
def get(self, id):
"""get the object with the id"""
@abc.abstractmethod
def set(self, id, value):
"""set the objet with the id to value"""
def set_filename(self, filename):
"""set the objet with the id to value"""
self.filename = filename
def remove(self):
try:
os.remove(self.filename)
except:
pass
@abc.abstractmethod
def save(self):
"""save the cloudmesh_job"""
@abc.abstractmethod
def update(self):
"""load the cloudmesh_job"""
class DbPBS(pbs_db_interface):
def __init__(self, filename=None):
self.pbs = OpenPBS(deploy=True)
self.open()
def open(self, filename=None):
if filename is not None:
self.filename = filename
else:
self.filename = path_expand(self.pbs.database_filename())
path = os.path.dirname(self.filename)
Shell.mkdir(path)
self.load()
def clear(self):
for id in self.db:
del self.db[id]
self.save()
def load(self):
"""load the cloudmesh_job"""
print('loading', self.filename)
# remove db ending so that shelve automatically adds it
self.filename = self.filename.replace(".db", "")
self.db = shelve.open(self.filename, writeback=True)
def save(self):
self.db.sync()
def get(self, id):
return self.db[id]
def status(self, id):
return self.get(id)["job_state"]
def set(self, id, value):
self.db[id] = value
self.save()
def keys(self):
self.data().keys()
def delete(self, id):
del self.db[id]
def close(self):
self.db.close()
def update(self, host=None, user=True):
if host is None:
print("host is none is not supported yet")
raise
print("QSTAT")
r = dict(self.pbs.qstat(host, user=user, format='dict'))
pprint(r)
if r is not {}:
for jobid in r:
self.db[jobid] = r[jobid]
self.save()
else:
print("no jobs found after query")
print("update completed")
def info(self):
print("Filename:", self.filename)
def list(self, attributes=None, output="table"):
if self.db is None or len(self.db) == 0:
print("No jobs found")
return None
columns = attributes
if columns is None:
columns = ["cm_jobid", "cm_host", "cm_user", "Job_Name", "job_state", "exit_status"]
# prepare the dict
d = {}
for jobid in self.db:
content = {}
for attribute in columns:
try:
content[attribute] = self.db[jobid][attribute]
except:
content[attribute] = "None"
d[jobid] = content
# print the dict
if output in ["csv", "table", "dict", "yaml"]:
return dict_printer(d, order=columns, output=output)
return None
def qsub(self, name, host, script, template=None, kind="dict"):
r = self.pbs.qsub(name, host, script, template=template, kind=kind)
pprint(r)
return dict(r)
if __name__ == "__main__":<|fim▁hole|>
db = DbPBS()
db.clear()
db.info()
db.update(host="india", user=False)
print(db.list(output="table"))
print(db.list(output="csv"))
print(db.list(output="dict"))
print(db.list(output="yaml"))
banner("user")
db.clear()
db.update(host="india")
print(db.list(output="table"))
if qsub:
banner('qsub')
pbs = OpenPBS()
jobname = "job-" + pbs.jobid + ".pbs"
host = "india"
script_template = pbs.read_script("etc/job.pbs")
print(script_template)
r = db.qsub(jobname, host, 'echo "Hello"', template=script_template)
pprint(r)
banner('variable list')
pprint(OpenPBS.variable_list(r))<|fim▁end|> |
qsub = False |
<|file_name|>TransitionHLAPI.java<|end_file_name|><|fim▁begin|>/**
* Copyright 2009-2016 Université Paris Ouest and Sorbonne Universités,
Univ. Paris 06 - CNRS UMR 7606 (LIP6)
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Project leader / Initial Contributor:
* Lom Messan Hillah - <[email protected]>
*
* Contributors:
* ${ocontributors} - <$oemails}>
*
* Mailing list:
* [email protected]
*/
/**
* (C) Sorbonne Universités, UPMC Univ Paris 06, UMR CNRS 7606 (LIP6)
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Lom HILLAH (LIP6) - Initial models and implementation
* Rachid Alahyane (UPMC) - Infrastructure and continuous integration
* Bastien Bouzerau (UPMC) - Architecture
* Guillaume Giffo (UPMC) - Code generation refactoring, High-level API
*
* $Id ggiffo, Wed Feb 10 15:00:49 CET 2016$
*/
package fr.lip6.move.pnml.ptnet.hlapi;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.List;
import org.apache.axiom.om.OMElement;
import org.eclipse.emf.common.util.DiagnosticChain;
import fr.lip6.move.pnml.framework.hlapi.HLAPIClass;
import fr.lip6.move.pnml.framework.utils.IdRefLinker;
import fr.lip6.move.pnml.framework.utils.ModelRepository;
import fr.lip6.move.pnml.framework.utils.exception.InnerBuildException;
import fr.lip6.move.pnml.framework.utils.exception.InvalidIDException;
import fr.lip6.move.pnml.framework.utils.exception.OtherException;
import fr.lip6.move.pnml.framework.utils.exception.VoidRepositoryException;
import fr.lip6.move.pnml.ptnet.Arc;
import fr.lip6.move.pnml.ptnet.Name;
import fr.lip6.move.pnml.ptnet.NodeGraphics;
import fr.lip6.move.pnml.ptnet.Page;
import fr.lip6.move.pnml.ptnet.PtnetFactory;
import fr.lip6.move.pnml.ptnet.RefTransition;
import fr.lip6.move.pnml.ptnet.ToolInfo;
import fr.lip6.move.pnml.ptnet.Transition;
import fr.lip6.move.pnml.ptnet.impl.PtnetFactoryImpl;
public class TransitionHLAPI implements HLAPIClass,PnObjectHLAPI,NodeHLAPI,TransitionNodeHLAPI{
/**
* The contained LLAPI element.
*/
private Transition item;
/**
* this constructor allows you to set all 'settable' values
* excepted container.
*/
public TransitionHLAPI(
java.lang.String id
, NameHLAPI name
, NodeGraphicsHLAPI nodegraphics
) throws InvalidIDException ,VoidRepositoryException {//BEGIN CONSTRUCTOR BODY
PtnetFactory fact = PtnetFactoryImpl.eINSTANCE;
synchronized(fact){item = fact.createTransition();}
if(id!=null){
item.setId(ModelRepository.getInstance().getCurrentIdRepository().checkId(id, this));
}
if(name!=null)
item.setName((Name)name.getContainedItem());
if(nodegraphics!=null)
item.setNodegraphics((NodeGraphics)nodegraphics.getContainedItem());
}
/**
* this constructor allows you to set all 'settable' values, including container if any.
*/
public TransitionHLAPI(
java.lang.String id
, NameHLAPI name
, NodeGraphicsHLAPI nodegraphics
, PageHLAPI containerPage
) throws InvalidIDException ,VoidRepositoryException {//BEGIN CONSTRUCTOR BODY
PtnetFactory fact = PtnetFactoryImpl.eINSTANCE;
synchronized(fact){item = fact.createTransition();}
if(id!=null){
item.setId(ModelRepository.getInstance().getCurrentIdRepository().checkId(id, this));
}
if(name!=null)
item.setName((Name)name.getContainedItem());
if(nodegraphics!=null)
item.setNodegraphics((NodeGraphics)nodegraphics.getContainedItem());
if(containerPage!=null)
item.setContainerPage((Page)containerPage.getContainedItem());
}
/**
* This constructor give access to required stuff only (not container if any)
*/
public TransitionHLAPI(
java.lang.String id
) throws InvalidIDException ,VoidRepositoryException {//BEGIN CONSTRUCTOR BODY
PtnetFactory fact = PtnetFactoryImpl.eINSTANCE;
synchronized(fact){item = fact.createTransition();}
if(id!=null){
item.setId(ModelRepository.getInstance().getCurrentIdRepository().checkId(id, this));
}
}
/**
* This constructor give access to required stuff only (and container)
*/
public TransitionHLAPI(
java.lang.String id
, PageHLAPI containerPage
) throws InvalidIDException ,VoidRepositoryException {//BEGIN CONSTRUCTOR BODY
PtnetFactory fact = PtnetFactoryImpl.eINSTANCE;
synchronized(fact){item = fact.createTransition();}
if(id!=null){
item.setId(ModelRepository.getInstance().getCurrentIdRepository().checkId(id, this));
}
if(containerPage!=null)
item.setContainerPage((Page)containerPage.getContainedItem());
}
/**
* This constructor encapsulate a low level API object in HLAPI.
*/
public TransitionHLAPI(Transition lowLevelAPI){
item = lowLevelAPI;
}
// access to low level API
/**
* Return encapsulated object
*/
public Transition getContainedItem(){
return item;
}
//getters giving LLAPI object
/**
* Return the encapsulate Low Level API object.
*/
public String getId(){
return item.getId();
}
/**
* Return the encapsulate Low Level API object.
*/
public Name getName(){
return item.getName();
}
/**
* Return the encapsulate Low Level API object.
*/
public List<ToolInfo> getToolspecifics(){
return item.getToolspecifics();
}
/**
* Return the encapsulate Low Level API object.
*/
public Page getContainerPage(){
return item.getContainerPage();
}
/**
* Return the encapsulate Low Level API object.
*/
public List<Arc> getInArcs(){
return item.getInArcs();
}
/**
* Return the encapsulate Low Level API object.
*/
public List<Arc> getOutArcs(){
return item.getOutArcs();
}
/**
* Return the encapsulate Low Level API object.
*/
public NodeGraphics getNodegraphics(){
return item.getNodegraphics();
}
/**
* Return the encapsulate Low Level API object.
*/
public List<RefTransition> getReferencingTransitions(){
return item.getReferencingTransitions();
}
//getters giving HLAPI object
/**
* This accessor automatically encapsulate an element of the current object.
* WARNING : this creates a new object in memory.
* @return : null if the element is null
*/
public NameHLAPI getNameHLAPI(){
if(item.getName() == null) return null;
return new NameHLAPI(item.getName());
}
/**
* This accessor automatically encapsulate all elements of the selected sublist.
* WARNING : this can creates a lot of new object in memory.
*/
public java.util.List<ToolInfoHLAPI> getToolspecificsHLAPI(){
java.util.List<ToolInfoHLAPI> retour = new ArrayList<ToolInfoHLAPI>();
for (ToolInfo elemnt : getToolspecifics()) {
retour.add(new ToolInfoHLAPI(elemnt));
}
return retour;
}
/**
* This accessor automatically encapsulate an element of the current object.
* WARNING : this creates a new object in memory.
* @return : null if the element is null
*/
public PageHLAPI getContainerPageHLAPI(){
if(item.getContainerPage() == null) return null;
return new PageHLAPI(item.getContainerPage());
}
/**
* This accessor automatically encapsulate all elements of the selected sublist.
* WARNING : this can creates a lot of new object in memory.
*/
public java.util.List<ArcHLAPI> getInArcsHLAPI(){
java.util.List<ArcHLAPI> retour = new ArrayList<ArcHLAPI>();
for (Arc elemnt : getInArcs()) {
retour.add(new ArcHLAPI(elemnt));
}
return retour;
}
/**
* This accessor automatically encapsulate all elements of the selected sublist.
* WARNING : this can creates a lot of new object in memory.
*/
public java.util.List<ArcHLAPI> getOutArcsHLAPI(){
java.util.List<ArcHLAPI> retour = new ArrayList<ArcHLAPI>();
for (Arc elemnt : getOutArcs()) {
retour.add(new ArcHLAPI(elemnt));
}
return retour;
}
/**
* This accessor automatically encapsulate an element of the current object.
* WARNING : this creates a new object in memory.
* @return : null if the element is null
*/
public NodeGraphicsHLAPI getNodegraphicsHLAPI(){<|fim▁hole|> if(item.getNodegraphics() == null) return null;
return new NodeGraphicsHLAPI(item.getNodegraphics());
}
/**
* This accessor automatically encapsulate all elements of the selected sublist.
* WARNING : this can creates a lot of new object in memory.
*/
public java.util.List<RefTransitionHLAPI> getReferencingTransitionsHLAPI(){
java.util.List<RefTransitionHLAPI> retour = new ArrayList<RefTransitionHLAPI>();
for (RefTransition elemnt : getReferencingTransitions()) {
retour.add(new RefTransitionHLAPI(elemnt));
}
return retour;
}
//Special getter for list of generics object, return only one object type.
//setters (including container setter if aviable)
/**
* set Id
*/
public void setIdHLAPI(
java.lang.String elem) throws InvalidIDException ,VoidRepositoryException {
if(elem!=null){
try{
item.setId(ModelRepository.getInstance().getCurrentIdRepository().changeId(this, elem));
}catch (OtherException e){
ModelRepository.getInstance().getCurrentIdRepository().checkId(elem, this);
}
}
}
/**
* set Name
*/
public void setNameHLAPI(
NameHLAPI elem){
if(elem!=null)
item.setName((Name)elem.getContainedItem());
}
/**
* set Nodegraphics
*/
public void setNodegraphicsHLAPI(
NodeGraphicsHLAPI elem){
if(elem!=null)
item.setNodegraphics((NodeGraphics)elem.getContainedItem());
}
/**
* set ContainerPage
*/
public void setContainerPageHLAPI(
PageHLAPI elem){
if(elem!=null)
item.setContainerPage((Page)elem.getContainedItem());
}
//setters/remover for lists.
public void addToolspecificsHLAPI(ToolInfoHLAPI unit){
item.getToolspecifics().add((ToolInfo)unit.getContainedItem());
}
public void removeToolspecificsHLAPI(ToolInfoHLAPI unit){
item.getToolspecifics().remove((ToolInfo)unit.getContainedItem());
}
//equals method
public boolean equals(TransitionHLAPI item){
return item.getContainedItem().equals(getContainedItem());
}
//PNML
/**
* Returns the PNML xml tree for this object.
*/
public String toPNML(){
return item.toPNML();
}
/**
* Writes the PNML XML tree of this object into file channel.
*/
public void toPNML(FileChannel fc){
item.toPNML(fc);
}
/**
* creates an object from the xml nodes.(symetric work of toPNML)
*/
public void fromPNML(OMElement subRoot,IdRefLinker idr) throws InnerBuildException, InvalidIDException, VoidRepositoryException{
item.fromPNML(subRoot,idr);
}
public boolean validateOCL(DiagnosticChain diagnostics){
return item.validateOCL(diagnostics);
}
}<|fim▁end|> | |
<|file_name|>041First Missing Positive.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Given an unsorted integer array, find the first missing positive integer.
#
# For example,
# Given [1,2,0] return 3,
# and [3,4,-1,1] return 2.
# [1,3,4,2,5,8,9,7]
# Your algorithm should run in O(n) time and uses constant space.
# 根据题目来看, find the first minimum missing positive integer, 所以一定是从1 开始的
# 本题的关键是合理利用下标, 即index值
# 第一次遍历, 将每个数与其下标对应, 1 对应 0, 2 对应 1...
# 第二次遍历, 查看哪个index于其值不符, 即是first missing integer.
class Solution():
def firstMissingPositive(self, A):
i = 0<|fim▁hole|> while i < len(A):
# 变换和变换条件是关键
if A[i] > 0 and A[i] - 1 < len(A) and A[i] != A[A[i] - 1]:
A[A[i] - 1], A[i] = A[i], A[A[i] - 1]
else:
i += 1
for i, integer in enumerate(A):
if integer != i + 1:
return i + 1
return len(A) + 1
print Solution().firstMissingPositive([4,5,8,9,7])<|fim▁end|> | |
<|file_name|>_blob_containers_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._blob_containers_operations import build_clear_legal_hold_request, build_create_or_update_immutability_policy_request, build_create_request, build_delete_immutability_policy_request, build_delete_request, build_extend_immutability_policy_request, build_get_immutability_policy_request, build_get_request, build_lease_request, build_list_request, build_lock_immutability_policy_request, build_set_legal_hold_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobContainersOperations:
"""BlobContainersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
include: Optional[Union[str, "_models.ListContainersInclude"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.ListContainerItems"]:
"""Lists all containers and does not support a prefix like data plane. Also SRP today does not
return continuation token.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param maxpagesize: Optional. Specified maximum number of containers that can be included in
the list.
:type maxpagesize: str
:param filter: Optional. When specified, only container names starting with the filter will be
listed.
:type filter: str
:param include: Optional, used to include the properties for soft deleted blob containers.
:type include: str or ~azure.mgmt.storage.v2019_06_01.models.ListContainersInclude
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListContainerItems or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2019_06_01.models.ListContainerItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainerItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
include=include,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
include=include,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListContainerItems", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
<|fim▁hole|> return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers'} # type: ignore
@distributed_trace_async
async def create(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs: Any
) -> "_models.BlobContainer":
"""Creates a new container under the specified account as described by request body. The container
resource includes metadata and properties for that container. It does not include a list of the
blobs contained by the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties of the blob container to create.
:type blob_container: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(blob_container, 'BlobContainer')
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs: Any
) -> "_models.BlobContainer":
"""Updates container properties as specified in request body. Properties not mentioned in the
request will be unchanged. Update fails if the specified container doesn't already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties to update for the blob container.
:type blob_container: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(blob_container, 'BlobContainer')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs: Any
) -> "_models.BlobContainer":
"""Gets properties of a specified container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs: Any
) -> None:
"""Deletes specified container under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
@distributed_trace_async
async def set_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs: Any
) -> "_models.LegalHold":
"""Sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold
follows an append pattern and does not clear out the existing tags that are not specified in
the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be set to a blob container.
:type legal_hold: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(legal_hold, 'LegalHold')
request = build_set_legal_hold_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.set_legal_hold.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold'} # type: ignore
@distributed_trace_async
async def clear_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs: Any
) -> "_models.LegalHold":
"""Clears legal hold tags. Clearing the same or non-existent tag results in an idempotent
operation. ClearLegalHold clears out only the specified tags in the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be clear from a blob container.
:type legal_hold: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(legal_hold, 'LegalHold')
request = build_clear_legal_hold_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.clear_legal_hold.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clear_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold'} # type: ignore
@distributed_trace_async
async def create_or_update_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Creates or updates an unlocked immutability policy. ETag in If-Match is honored if given but
not required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be created or updated to a blob
container.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
_json = None
request = build_create_or_update_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self.create_or_update_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
@distributed_trace_async
async def get_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Gets the existing immutability policy along with the corresponding ETag in response headers and
body.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
template_url=self.get_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
@distributed_trace_async
async def delete_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Aborts an unlocked immutability policy. The response of delete has
immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this
operation. Deleting a locked immutability policy is not allowed, the only way is to delete the
container after deleting all expired blobs inside the policy locked container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
template_url=self.delete_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
@distributed_trace_async
async def lock_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_lock_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
template_url=self.lock_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lock_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock'} # type: ignore
@distributed_trace_async
async def extend_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs: Any
) -> "_models.ImmutabilityPolicy":
"""Extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
action allowed on a Locked policy will be this action. ETag in If-Match is required for this
operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be extended for a blob
container.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
_json = None
request = build_extend_immutability_policy_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
if_match=if_match,
json=_json,
template_url=self.extend_immutability_policy.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
extend_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend'} # type: ignore
@distributed_trace_async
async def lease(
self,
resource_group_name: str,
account_name: str,
container_name: str,
parameters: Optional["_models.LeaseContainerRequest"] = None,
**kwargs: Any
) -> "_models.LeaseContainerResponse":
"""The Lease Container operation establishes and manages a lock on a container for delete
operations. The lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param parameters: Lease Container request body.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.LeaseContainerRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseContainerResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LeaseContainerResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LeaseContainerResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'LeaseContainerRequest')
else:
_json = None
request = build_lease_request(
resource_group_name=resource_group_name,
account_name=account_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.lease.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LeaseContainerResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease'} # type: ignore<|fim▁end|> | |
<|file_name|>symmetric-difference.py<|end_file_name|><|fim▁begin|><|fim▁hole|>[_, ms, _, ns] = list(sys.stdin)
ms = set(int(m) for m in ms.split(' '))
ns = set(int(n) for n in ns.split(' '))
print(sep='\n', *sorted(ms.difference(ns).union(ns.difference(ms))))<|fim▁end|> | import sys
|
<|file_name|>hello_canvas.rs<|end_file_name|><|fim▁begin|>//! Basic hello world example, drawing
//! to a canvas.
use ggez::event;
use ggez::graphics::{self, Color};
use ggez::{Context, GameResult};
use glam::*;
use std::env;
use std::path;
struct MainState {
text: graphics::Text,
canvas: graphics::Canvas,
frames: usize,
draw_with_canvas: bool,
}
impl MainState {
fn new(ctx: &mut Context) -> GameResult<MainState> {
// The ttf file will be in your resources directory. Later, we
// will mount that directory so we can omit it in the path here.
let font = graphics::Font::new(ctx, "/LiberationMono-Regular.ttf")?;
let text = graphics::Text::new(("Hello world!", font, 48.0));
let canvas = graphics::Canvas::with_window_size(ctx)?;
let s = MainState {
text,
canvas,
draw_with_canvas: false,
frames: 0,
};
Ok(s)
}
}
impl event::EventHandler<ggez::GameError> for MainState {
fn update(&mut self, _ctx: &mut Context) -> GameResult {
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> GameResult {
let dest_point = Vec2::new(10.0, 10.0);
if self.draw_with_canvas {
println!("Drawing with canvas");
graphics::clear(ctx, graphics::Color::from((64, 0, 0, 0)));
graphics::set_canvas(ctx, Some(&self.canvas));
graphics::clear(ctx, graphics::Color::from((255, 255, 255, 128)));
graphics::draw(
ctx,
&self.text,
graphics::DrawParam::new()
.dest(dest_point)
.color(Color::from((0, 0, 0, 255))),
)?;
graphics::set_canvas(ctx, None);
graphics::draw(
ctx,
&self.canvas,
graphics::DrawParam::new().color(Color::from((255, 255, 255, 128))),
)?;
} else {
println!("Drawing without canvas");
graphics::set_canvas(ctx, None);
graphics::clear(ctx, [0.25, 0.0, 0.0, 1.0].into());
graphics::draw(
ctx,
&self.text,
graphics::DrawParam::new()
.dest(dest_point)
.color(Color::from((192, 128, 64, 255))),
)?;
}
graphics::present(ctx)?;
self.frames += 1;
if (self.frames % 100) == 0 {
println!("FPS: {}", ggez::timer::fps(ctx));
}
<|fim▁hole|> Ok(())
}
fn key_down_event(
&mut self,
_ctx: &mut Context,
_keycode: ggez::event::KeyCode,
_keymod: ggez::event::KeyMods,
repeat: bool,
) {
if !repeat {
self.draw_with_canvas = !self.draw_with_canvas;
println!("Canvas on: {}", self.draw_with_canvas);
}
}
}
pub fn main() -> GameResult {
let resource_dir = if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") {
let mut path = path::PathBuf::from(manifest_dir);
path.push("resources");
path
} else {
path::PathBuf::from("./resources")
};
let cb = ggez::ContextBuilder::new("hello_canvas", "ggez").add_resource_path(resource_dir);
let (mut ctx, event_loop) = cb.build()?;
let state = MainState::new(&mut ctx)?;
event::run(ctx, event_loop, state)
}<|fim▁end|> | |
<|file_name|>ZadatProblemAction.java<|end_file_name|><|fim▁begin|>/**
*
*/
package cz.geokuk.core.napoveda;
import java.awt.event.ActionEvent;
import java.awt.event.KeyEvent;
import java.net.MalformedURLException;
import java.net.URL;
import cz.geokuk.core.program.FConst;
import cz.geokuk.framework.Action0;
import cz.geokuk.util.process.BrowserOpener;
/**
* @author Martin Veverka
*
*/
public class ZadatProblemAction extends Action0 {
private static final long serialVersionUID = -2882817111560336824L;
/**
* @param aBoard
*/
public ZadatProblemAction() {
super("Zadat problém ...");
putValue(SHORT_DESCRIPTION, "Zobrazí stránku na code.google.com, která umožní zadat chybu v Geokuku nebo požadavek na novou funkcionalitu.");
putValue(MNEMONIC_KEY, KeyEvent.VK_P);
}
/*
* (non-Javadoc)
*
* @see java.awt.event.ActionListener#actionPerformed(java.awt.event.ActionEvent)
*/
@Override
public void actionPerformed(final ActionEvent aE) {
try {
BrowserOpener.displayURL(new URL(FConst.POST_PROBLEM_URL));
} catch (final MalformedURLException e) {
throw new RuntimeException(e);
}
}<|fim▁hole|><|fim▁end|> |
} |
<|file_name|>Main.js<|end_file_name|><|fim▁begin|>Ext.define('TrackApp.view.main.Main', {
extend: 'Ext.panel.Panel',
requires: [
'Ext.resizer.Splitter'<|fim▁hole|>
xtype: 'app-main',
controller: 'main',
viewModel: {
type: 'main'
},
title: 'Oslo-Bergen til fots',
header: {
titlePosition: 0,
defaults: {
xtype: 'button',
toggleGroup: 'menu'
},
items: [{
text: 'Bilder',
id: 'instagram'
},{
text: 'Høydeprofil',
id: 'profile'
},{
text: 'Posisjon',
id: 'positions'
},{
text: 'Facebook',
id: 'facebookUrl',
reference: 'facebookBtn'
},{
text: 'Instagram',
id: 'instagramUrl',
reference: 'instagramBtn'
}]
},
layout: {
type: 'vbox',
pack: 'start',
align: 'stretch'
},
items: [{
reference: 'map',
xtype: 'map',
flex: 3
}, {
reference: 'bottom',
xtype: 'panel',
flex: 2,
//split: true,
hidden: true,
layout: {
type: 'fit'
},
defaults: {
hidden: true
},
items: [{
reference: 'profile',
xtype: 'profile'
},{
reference: 'positions',
xtype: 'positions'
}]
}]
});<|fim▁end|> | ], |
<|file_name|>soap.go<|end_file_name|><|fim▁begin|>package soap
import (
"bytes"
"crypto/tls"
"fmt"
"io/ioutil"<|fim▁hole|>)
//BasicAuthGet adds basic auth and perform a request to path
func BasicAuthGet(path, username, password string) ([]byte, int, error) {
request, err := BasicAuthRequest(path, username, password)
if err != nil {
return nil, 0, err
}
return GetRequest(request, true)
}
//BasicAuthRequest creates a "GET" request with basic auth and return it
func BasicAuthRequest(path, username, password string) (*http.Request, error) {
request, err := http.NewRequest("GET", path, nil)
if err != nil {
return nil, err
}
request.SetBasicAuth(username, password)
if err != nil {
return nil, err
}
return request, nil
}
//Get makes a GET request to path
func Get(path string) ([]byte, int, error) {
request, err := http.NewRequest("GET", path, nil)
if err != nil {
return nil, 0, err
}
return GetRequest(request, true)
}
//GetRequest executes a request and returns an []byte response, response code, and error
func GetRequest(request *http.Request, insecure bool) ([]byte, int, error) {
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
}
response, err := client.Do(request)
if err != nil {
return nil, 0, err
}
return ResponseCheck(response)
}
//XMLBasicAuthGet adds basic auth and perform a request to path
func XMLBasicAuthGet(path, username, password string) ([]byte, int, error) {
request, err := BasicAuthRequest(path, username, password)
if err != nil {
return nil, 0, err
}
request.Header.Set("accept", "application/xml")
return GetRequest(request, true)
}
//XMLBasicAuthGet adds basic auth and perform a request to path
func XMLADXBasicAuthPost(path, method, payload, username, password string) ([]byte, int, error) {
req, err := http.NewRequest("POST", path, bytes.NewBufferString(payload))
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
req.SetBasicAuth(username, password)
req.Header.Set("Content-Type", "text/xml; charset=utf-8")
req.Header.Set("Accept", "text/xml")
req.Header.Set("SOAPAction", fmt.Sprintf("\"urn:webservicesapi#%v\"", method))
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
defer resp.Body.Close()
return ResponseCheck(resp)
}
//XMLGet performs a "GET" request to the path and appends the application/xml
func XMLGet(path string) ([]byte, int, error) {
request, err := http.NewRequest("GET", path, nil)
if err != nil {
return nil, 0, err
}
request.Header.Set("accept", "application/xml")
return Get(path)
}
//XMLGetRequest performs a get request and sets the application/xml header to accept
func XMLGetRequest(request *http.Request) ([]byte, int, error) {
request.Header.Set("accept", "application/xml")
return GetRequest(request, true)
}
//ResponseCheck reads the response and return data, status code or error it encountered.
func ResponseCheck(response *http.Response) ([]byte, int, error) {
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, response.StatusCode, err
}
return body, response.StatusCode, nil
}
//Post will porform a post request with the data provided.
func Post(payload []byte, path string, headers map[string]string, insecure bool) ([]byte, int, error) {
req, err := http.NewRequest("POST", path, bytes.NewBuffer(payload))
for k, v := range headers {
req.Header.Set(k, v)
}
client := &http.Client{Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
}
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
defer resp.Body.Close()
return ResponseCheck(resp)
}
//XMLPost will porform a post request with the data provided, it will add content-type header xml to the header map.
func XMLPost(payload []byte, path string, headers map[string]string, insecure bool) ([]byte, int, error) {
if headers == nil {
headers = make(map[string]string)
}
headers["Content-Type"] = "application/xml"
return Post(payload, path, headers, insecure)
}
// generic response writter for APIs
type Response map[string]interface{}
//This method returns a xml marshaled response
func (r Response) XML() string {
b, err := xml.MarshalIndent(r, "", " ")
if err != nil {
return ""
}
return strings.Replace(string(b), "%", "%%", -1)
}
// XMLWithHeader will take a structure and xml marshal with
// the <?xml version="1.0" encoding="UTF-8"?> header prepended
// to the XML request.
func XMLMarshalHead(r interface{}) string {
rv := []byte(xml.Header)
b, err := xml.MarshalIndent(r, "", " ")
if err != nil {
return ""
}
rv = append(rv, b...)
return strings.Replace(string(rv), "\\\"", "\"", -1)
}
//This method returns a json marshaled response
func (r Response) String() string {
b, err := json.Marshal(r)
if err != nil {
return ""
}
return strings.Replace(string(b), "%", "%%", -1)
}
//returns the resp map as a xml string with the error code provided
func XMLErrHandler(w http.ResponseWriter, r *http.Request, resp Response, code int) {
w.Header().Set("Content-Type", "application/xml")
http.Error(w, resp.XML(), code)
return
}
//returns the resp map as a xml string with a 200 OK
func XMLResHandler(w http.ResponseWriter, r *http.Request, resp Response) {
w.Header().Set("Content-Type", "application/xml")
fmt.Fprintf(w, resp.XML())
return
}<|fim▁end|> | "net/http"
"encoding/xml"
"strings"
"encoding/json" |
<|file_name|>validators.py<|end_file_name|><|fim▁begin|>"""Additional form validators
"""
# future imports
from __future__ import absolute_import
# stdlib import
import re
from StringIO import StringIO
# third-party imports
from PIL import Image
from wtforms import ValidationError
from wtforms import validators
# Pulled from http://www.regular-expressions.info/email.html
email_re = re.compile(
r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
r"@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?",
re.IGNORECASE
)
def validate_email_address(form, field):
"""Validate a string email address against the email regex
"""
if (not isinstance(field.data, basestring) or
not email_re.search(field.data)):
raise ValidationError('Not a valid email address.')
def validate_image_format(form, field):
"""Use PIL to inspect an image, to see its format type.
"""
valid_formats = ['JPG', 'JPEG', 'PNG']
if len(field.raw_data):
if hasattr(field.raw_data[0], 'filename'):
try:
i = Image.open(StringIO(field.raw_data[0].value))
if i.format not in valid_formats:
raise ValidationError('Invalid image provided.')
except IOError:
raise ValidationError('Invalid image format found.')
def validate_image_size(width=None, height=None):
def _validate_image_size(form, field):
if len(field.raw_data):
if hasattr(field.raw_data[0], 'filename'):
try:
i = Image.open(StringIO(field.raw_data[0].value))
if (width and height) and ((width, height) != i.size):
raise ValidationError(
'Image must be {}x{}, found {}x{}.'.format(
width,
height,
i.size[0],
i.size[1]
)<|fim▁hole|> raise ValidationError(
'Image must be {}px in width, found {}px.'.format(
width,
i.size[0]
)
)
elif height and height != i.size[1]:
raise ValidationError(
'Image must be {}px in height, found {}px.'.format(
height,
i.size[1]
)
)
except IOError:
raise ValidationError('Invalid image format found.')
return _validate_image_size
class RequiredIf(validators.Required):
"""A validator which makes a field required if
another field is set and has a truthy value.
"""
other_field_name = None
exta_validators = []
def __init__(self, other_field_name, *args, **kwargs):
self.other_field_name = other_field_name
self.exta_validators = args
super(RequiredIf, self).__init__(*args, **kwargs)
def __call__(self, form, field):
other_field = form._fields.get(self.other_field_name)
if other_field is None:
raise Exception(
'no field named "%s" in form' % self.other_field_name)
if bool(other_field.data):
super(RequiredIf, self).__call__(form, field)
for val in self.exta_validators:
val.__call__(form, field)<|fim▁end|> | )
elif width and width != i.size[0]: |
<|file_name|>routes.js<|end_file_name|><|fim▁begin|>import React from 'react'
import { Router, Route, hashHistory } from 'react-router'
import Home from './components/ui/Home'
import About from './components/ui/About'
import MemberList from './components/ui/MemberList'
import { Left, Right, Whoops404 } from './components'
<|fim▁hole|> <Router history={hashHistory}>
<Route path="/" component={Home} />
<Route path="/" component={Left}>
<Route path="about" component={About} />
<Route path="members" component={MemberList} />
</Route>
<Route path="*" component={Whoops404} />
</Router>
)
export default routes<|fim▁end|> | const routes = ( |
<|file_name|>ipv4-drb.cc<|end_file_name|><|fim▁begin|>#include "ns3/log.h"
#include "ipv4-drb.h"
namespace ns3 {
NS_LOG_COMPONENT_DEFINE("Ipv4Drb");
NS_OBJECT_ENSURE_REGISTERED (Ipv4Drb);
TypeId
Ipv4Drb::GetTypeId (void)<|fim▁hole|> .SetParent<Object>()
.SetGroupName ("Internet")
.AddConstructor<Ipv4Drb> ();
return tid;
}
Ipv4Drb::Ipv4Drb ()
{
NS_LOG_FUNCTION (this);
}
Ipv4Drb::~Ipv4Drb ()
{
NS_LOG_FUNCTION (this);
}
Ipv4Address
Ipv4Drb::GetCoreSwitchAddress (uint32_t flowId)
{
NS_LOG_FUNCTION (this);
uint32_t listSize = m_coreSwitchAddressList.size();
if (listSize == 0)
{
return Ipv4Address ();
}
uint32_t index = rand () % listSize;
std::map<uint32_t, uint32_t>::iterator itr = m_indexMap.find (flowId);
if (itr != m_indexMap.end ())
{
index = itr->second;
}
m_indexMap[flowId] = ((index + 1) % listSize);
Ipv4Address addr = m_coreSwitchAddressList[index];
NS_LOG_DEBUG (this << " The index for flow: " << flowId << " is : " << index);
return addr;
}
void
Ipv4Drb::AddCoreSwitchAddress (Ipv4Address addr)
{
NS_LOG_FUNCTION (this << addr);
m_coreSwitchAddressList.push_back (addr);
}
void
Ipv4Drb::AddCoreSwitchAddress (uint32_t k, Ipv4Address addr)
{
for (uint32_t i = 0; i < k; i++)
{
Ipv4Drb::AddCoreSwitchAddress(addr);
}
}
}<|fim▁end|> | {
static TypeId tid = TypeId("ns3::Ipv4Drb") |
<|file_name|>theoretical.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Define the Expansion Valve component.
"""
from scr.logic.components.component import Component as Cmp
from scr.logic.components.component import ComponentInfo as CmpInfo
from scr.logic.components.component import component, fundamental_equation
def update_saved_data_to_last_version(orig_data, orig_version):
return orig_data
@component('theoretical_expansion_valve', CmpInfo.EXPANSION_VALVE, 1, update_saved_data_to_last_version)
class Theoretical(Cmp):
def __init__(self, id_, inlet_nodes_id, outlet_nodes_id, component_data):
super().__init__(id_, inlet_nodes_id, outlet_nodes_id, component_data)
""" Fundamental properties equations """
@fundamental_equation()
# function name can be arbitrary. Return a single vector with each side of the equation evaluated.
def _eval_intrinsic_equations(self):
id_inlet_node = self.get_id_inlet_nodes()[0]
inlet_node = self.get_inlet_node(id_inlet_node)
id_outlet_node = self.get_id_outlet_nodes()[0]<|fim▁hole|>
h_in = inlet_node.enthalpy()
h_out = outlet_node.enthalpy()
return [h_in / 1000.0, h_out / 1000.0]<|fim▁end|> | outlet_node = self.get_outlet_node(id_outlet_node) |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for made_with_twd_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one<|fim▁hole|>middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "made_with_twd_project.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "made_with_twd_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)<|fim▁end|> | that later delegates to the Django one. For example, you could introduce WSGI |
<|file_name|>0020_rename_index_relationship.py<|end_file_name|><|fim▁begin|>from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0019_allow_closed_by_null'),
]
<|fim▁hole|> model_name='commcarecaseindexsql',
old_name='relationship',
new_name='relationship_id',
),
]<|fim▁end|> | operations = [
migrations.RenameField( |
<|file_name|>AnyOf_test.cpp<|end_file_name|><|fim▁begin|>/*
* AnyOf_test.cpp<|fim▁hole|> * Author: ljeff
*/
#include "AnyOf.h"
namespace algorithm {
} /* namespace algorithm */<|fim▁end|> | *
* Created on: Jan 30, 2016 |
<|file_name|>kegg_database.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Tue May 31 10:57:02 2016
@author: noore
"""
import bioservices.kegg
import pandas as pd
kegg = bioservices.kegg.KEGG()
cid2name = kegg.list('cpd')
cid2name = filter(lambda x: len(x) == 2, map(lambda l : l.split('\t'), cid2name.split('\n')))
cid_df = pd.DataFrame(cid2name, columns=['cpd', 'names'])
cid_df['cpd'] = cid_df['cpd'].apply(lambda x: x[4:])
cid_df['name'] = cid_df['names'].apply(lambda s: s.split(';')[0])
cid_df.set_index('cpd', inplace=True)
cid_df['inchi'] = None
for cid in cid_df.index[0:10]:
ChEBI = re.findall('ChEBI: ([\d\s]+)\n', kegg.get(cid))
if len(ChEBI) == 0:
print 'Cannot find a ChEBI for %s' % cid
elif len(ChEBI) > 1:
print 'Error parsing compound %s' % cid<|fim▁hole|>
cid2chebi.to_csv(settings.KEGG2CHEBI_FNAME)<|fim▁end|> | else:
cid2chebi.at[cid, 'ChEBI'] = ChEBI[0] |
<|file_name|>Student.java<|end_file_name|><|fim▁begin|>public class Student {
private String namn;<|fim▁hole|> private int födelseår, status, id;
public Student(){}
public String getNamn() {
return namn;
}
public void setNamn(String nyNamn) {
namn=nyNamn;
}
public int getId() {
return id;
}
public void setId(int nyId) {
id=nyId;
}
public int getStatus() {
return status;
}
public void setStatus(int nyStatus) {
status=nyStatus;
}
public int getFödelseår() {
return födelseår;
}
public void setFödelseår(int nyFödelseår) {
födelseår=nyFödelseår;
}
public String print() {
return namn+"\t"+id+"\t"+födelseår;
}
}<|fim▁end|> | |
<|file_name|>vpcmpgtw.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn vpcmpgtw_1() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(XMM4)), operand2: Some(Direct(XMM5)), operand3: Some(Direct(XMM5)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 209, 101, 229], OperandSize::Dword)
}
fn vpcmpgtw_2() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM1)), operand3: Some(Indirect(EDI, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 241, 101, 7], OperandSize::Dword)
}
<|fim▁hole|> run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM1)), operand3: Some(Direct(XMM2)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 241, 101, 250], OperandSize::Qword)
}
fn vpcmpgtw_4() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM2)), operand3: Some(IndirectScaledIndexedDisplaced(RDX, RDX, Two, 1340486030, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 233, 101, 132, 82, 142, 49, 230, 79], OperandSize::Qword)
}
fn vpcmpgtw_5() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM4)), operand3: Some(Direct(YMM4)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 221, 101, 244], OperandSize::Dword)
}
fn vpcmpgtw_6() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(YMM2)), operand2: Some(Direct(YMM4)), operand3: Some(IndirectScaledDisplaced(EDI, Four, 789118305, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 221, 101, 20, 189, 97, 253, 8, 47], OperandSize::Dword)
}
fn vpcmpgtw_7() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(YMM4)), operand2: Some(Direct(YMM5)), operand3: Some(Direct(YMM1)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 213, 101, 225], OperandSize::Qword)
}
fn vpcmpgtw_8() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(YMM7)), operand2: Some(Direct(YMM4)), operand3: Some(IndirectDisplaced(RBX, 1439538764, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 221, 101, 187, 76, 158, 205, 85], OperandSize::Qword)
}
fn vpcmpgtw_9() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K1)), operand2: Some(Direct(XMM4)), operand3: Some(Direct(XMM2)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K7), broadcast: None }, &[98, 241, 93, 15, 101, 202], OperandSize::Dword)
}
fn vpcmpgtw_10() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K5)), operand2: Some(Direct(XMM2)), operand3: Some(IndirectScaledDisplaced(EDI, Four, 1686663324, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K7), broadcast: None }, &[98, 241, 109, 15, 101, 44, 189, 156, 112, 136, 100], OperandSize::Dword)
}
fn vpcmpgtw_11() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K2)), operand2: Some(Direct(XMM12)), operand3: Some(Direct(XMM7)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 241, 29, 11, 101, 215], OperandSize::Qword)
}
fn vpcmpgtw_12() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K5)), operand2: Some(Direct(XMM17)), operand3: Some(IndirectScaledIndexed(RSI, RCX, Four, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K1), broadcast: None }, &[98, 241, 117, 1, 101, 44, 142], OperandSize::Qword)
}
fn vpcmpgtw_13() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K3)), operand2: Some(Direct(YMM5)), operand3: Some(Direct(YMM2)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 241, 85, 45, 101, 218], OperandSize::Dword)
}
fn vpcmpgtw_14() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K1)), operand2: Some(Direct(YMM2)), operand3: Some(IndirectScaledIndexedDisplaced(ESI, EDI, Two, 995193770, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K4), broadcast: None }, &[98, 241, 109, 44, 101, 140, 126, 170, 115, 81, 59], OperandSize::Dword)
}
fn vpcmpgtw_15() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K1)), operand2: Some(Direct(YMM29)), operand3: Some(Direct(YMM28)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 145, 21, 35, 101, 204], OperandSize::Qword)
}
fn vpcmpgtw_16() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K5)), operand2: Some(Direct(YMM28)), operand3: Some(IndirectScaledIndexedDisplaced(RCX, RCX, Four, 466836431, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 241, 29, 34, 101, 172, 137, 207, 91, 211, 27], OperandSize::Qword)
}
fn vpcmpgtw_17() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K6)), operand2: Some(Direct(ZMM6)), operand3: Some(Direct(ZMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 241, 77, 75, 101, 240], OperandSize::Dword)
}
fn vpcmpgtw_18() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K1)), operand2: Some(Direct(ZMM5)), operand3: Some(Indirect(EAX, Some(OperandSize::Zmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 241, 85, 78, 101, 8], OperandSize::Dword)
}
fn vpcmpgtw_19() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K7)), operand2: Some(Direct(ZMM3)), operand3: Some(Direct(ZMM30)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K4), broadcast: None }, &[98, 145, 101, 76, 101, 254], OperandSize::Qword)
}
fn vpcmpgtw_20() {
run_test(&Instruction { mnemonic: Mnemonic::VPCMPGTW, operand1: Some(Direct(K4)), operand2: Some(Direct(ZMM11)), operand3: Some(IndirectDisplaced(RDX, 298168796, Some(OperandSize::Zmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 241, 37, 75, 101, 162, 220, 177, 197, 17], OperandSize::Qword)
}<|fim▁end|> | fn vpcmpgtw_3() { |
<|file_name|>tools.py<|end_file_name|><|fim▁begin|>"""Non-application-specific convenience methods for GPkit"""
import numpy as np
def te_exp_minus1(posy, nterm):
"""Taylor expansion of e^{posy} - 1
Arguments
---------
posy : gpkit.Posynomial
Variable or expression to exponentiate
nterm : int
Number of non-constant terms in resulting Taylor expansion<|fim▁hole|>
Returns
-------
gpkit.Posynomial
Taylor expansion of e^{posy} - 1, carried to nterm terms
"""
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= i
res += posy**i / factorial_denom
return res
def te_secant(var, nterm):
"""Taylor expansion of secant(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of secant(x), carried to nterm terms
"""
# The first 12 Euler Numbers
E2n = np.asarray([1.0,
5,
61,
1385,
50521,
2702765,
199360981,
19391512145,
2404879675441,
370371188237525,
69348874393137901,
15514534163557086905])
if nterm > 12:
n_extend = np.asarray(range(13, nterm+1))
E2n_add = (8 * np.sqrt(n_extend/np.pi)
* (4*n_extend/(np.pi * np.exp(1)))**(2*n_extend))
E2n = np.append(E2n, E2n_add)
res = 1
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res = res + var**(2*i) * E2n[i-1] / factorial_denom
return res
def te_tangent(var, nterm):
"""Taylor expansion of tangent(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of tangent(x), carried to nterm terms
"""
if nterm > 15:
raise NotImplementedError("Tangent expansion not implemented above"
" 15 terms")
# The first 15 Bernoulli Numbers
B2n = np.asarray([1/6,
-1/30,
1/42,
-1/30,
5/66,
-691/2730,
7/6,
-3617/510,
43867/798,
-174611/330,
854513/138,
-236364091/2730,
8553103/6,
-23749461029/870,
8615841276005/14322])
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res += ((-1)**(i-1) * 2**(2*i) * (2**(2*i) - 1) *
B2n[i-1] / factorial_denom * var**(2*i-1))
return res<|fim▁end|> | |
<|file_name|>query.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#! /usr/bin/env python
import os
import sys
from IPython.terminal.embed import InteractiveShellEmbed
from mongoalchemy.session import Session
HERE = os.path.abspath(os.path.dirname(__file__))
ROOT = os.path.join(HERE, '..')
sys.path.append(ROOT)
from server.model.user import User # noqa
from server.model.notification import Notification # noqa
from server.settings import config # noqa
config.configure()
session = Session.connect(config.get("mongo_database_name"))
ipshell = InteractiveShellEmbed()
banner = "[*] Import the model you want to query: from server.model.{model_name} import {Model}" # noqa
ipshell(banner)<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! Provide methods to generate, read, write or validate keysets.
mod binary_io;
pub use binary_io::*;
mod handle;
pub use handle::*;
#[cfg(feature = "json")]
#[cfg_attr(docsrs, doc(cfg(feature = "json")))]
mod json_io;
#[cfg(feature = "json")]<|fim▁hole|>mod mem_io;
pub use mem_io::*;
mod reader;
pub use reader::*;
mod validation;
pub use validation::*;
mod writer;
pub use writer::*;
#[cfg(feature = "insecure")]
#[cfg_attr(docsrs, doc(cfg(feature = "insecure")))]
pub mod insecure;<|fim▁end|> | pub use json_io::*;
mod manager;
pub use manager::*; |
<|file_name|>vue-resource.common.js<|end_file_name|><|fim▁begin|>/*!
* vue-resource v1.5.3
* https://github.com/pagekit/vue-resource
* Released under the MIT License.
*/
'use strict';
/**
* Promises/A+ polyfill v1.1.4 (https://github.com/bramstein/promis)
*/
var RESOLVED = 0;
var REJECTED = 1;
var PENDING = 2;
function Promise$1(executor) {
this.state = PENDING;
this.value = undefined;
this.deferred = [];
var promise = this;
try {
executor(function (x) {
promise.resolve(x);
}, function (r) {
promise.reject(r);
});
} catch (e) {
promise.reject(e);
}
}
Promise$1.reject = function (r) {
return new Promise$1(function (resolve, reject) {
reject(r);
});
};
Promise$1.resolve = function (x) {
return new Promise$1(function (resolve, reject) {
resolve(x);
});
};
Promise$1.all = function all(iterable) {
return new Promise$1(function (resolve, reject) {
var count = 0,
result = [];
if (iterable.length === 0) {
resolve(result);
}
function resolver(i) {
return function (x) {
result[i] = x;
count += 1;
if (count === iterable.length) {
resolve(result);
}
};
}
for (var i = 0; i < iterable.length; i += 1) {
Promise$1.resolve(iterable[i]).then(resolver(i), reject);
}
});
};
Promise$1.race = function race(iterable) {
return new Promise$1(function (resolve, reject) {
for (var i = 0; i < iterable.length; i += 1) {
Promise$1.resolve(iterable[i]).then(resolve, reject);
}
});
};
var p = Promise$1.prototype;
p.resolve = function resolve(x) {
var promise = this;
if (promise.state === PENDING) {
if (x === promise) {
throw new TypeError('Promise settled with itself.');
}
var called = false;
try {
var then = x && x['then'];
if (x !== null && typeof x === 'object' && typeof then === 'function') {
then.call(x, function (x) {
if (!called) {
promise.resolve(x);
}
called = true;
}, function (r) {
if (!called) {
promise.reject(r);
}
called = true;
});
return;
}
} catch (e) {
if (!called) {
promise.reject(e);
}
return;
}
promise.state = RESOLVED;
promise.value = x;
promise.notify();
}
};
p.reject = function reject(reason) {
var promise = this;
if (promise.state === PENDING) {
if (reason === promise) {
throw new TypeError('Promise settled with itself.');
}
promise.state = REJECTED;
promise.value = reason;
promise.notify();
}
};
p.notify = function notify() {
var promise = this;
nextTick(function () {
if (promise.state !== PENDING) {
while (promise.deferred.length) {
var deferred = promise.deferred.shift(),
onResolved = deferred[0],
onRejected = deferred[1],
resolve = deferred[2],
reject = deferred[3];
try {
if (promise.state === RESOLVED) {
if (typeof onResolved === 'function') {
resolve(onResolved.call(undefined, promise.value));
} else {
resolve(promise.value);
}
} else if (promise.state === REJECTED) {
if (typeof onRejected === 'function') {
resolve(onRejected.call(undefined, promise.value));
} else {
reject(promise.value);
}
}
} catch (e) {
reject(e);
}
}
}
});
};
p.then = function then(onResolved, onRejected) {
var promise = this;
return new Promise$1(function (resolve, reject) {
promise.deferred.push([onResolved, onRejected, resolve, reject]);
promise.notify();
});
};
p["catch"] = function (onRejected) {
return this.then(undefined, onRejected);
};
/**
* Promise adapter.
*/
if (typeof Promise === 'undefined') {
window.Promise = Promise$1;
}
function PromiseObj(executor, context) {
if (executor instanceof Promise) {
this.promise = executor;
} else {
this.promise = new Promise(executor.bind(context));
}
this.context = context;
}
PromiseObj.all = function (iterable, context) {
return new PromiseObj(Promise.all(iterable), context);
};
PromiseObj.resolve = function (value, context) {
return new PromiseObj(Promise.resolve(value), context);
};
PromiseObj.reject = function (reason, context) {
return new PromiseObj(Promise.reject(reason), context);
};
PromiseObj.race = function (iterable, context) {
return new PromiseObj(Promise.race(iterable), context);
};
var p$1 = PromiseObj.prototype;
p$1.bind = function (context) {
this.context = context;
return this;
};
p$1.then = function (fulfilled, rejected) {
if (fulfilled && fulfilled.bind && this.context) {
fulfilled = fulfilled.bind(this.context);
}
if (rejected && rejected.bind && this.context) {
rejected = rejected.bind(this.context);
}
return new PromiseObj(this.promise.then(fulfilled, rejected), this.context);
};
p$1["catch"] = function (rejected) {
if (rejected && rejected.bind && this.context) {
rejected = rejected.bind(this.context);
}
return new PromiseObj(this.promise["catch"](rejected), this.context);
};
p$1["finally"] = function (callback) {
return this.then(function (value) {
callback.call(this);
return value;
}, function (reason) {
callback.call(this);
return Promise.reject(reason);
});
};
/**
* Utility functions.
*/
var _ref = {},
hasOwnProperty = _ref.hasOwnProperty,
slice = [].slice,
debug = false,
ntick;
var inBrowser = typeof window !== 'undefined';
function Util (_ref2) {
var config = _ref2.config,
nextTick = _ref2.nextTick;
ntick = nextTick;
debug = config.debug || !config.silent;
}
function warn(msg) {
if (typeof console !== 'undefined' && debug) {
console.warn('[VueResource warn]: ' + msg);
}
}
function error(msg) {
if (typeof console !== 'undefined') {
console.error(msg);
}
}
function nextTick(cb, ctx) {
return ntick(cb, ctx);
}
function trim(str) {
return str ? str.replace(/^\s*|\s*$/g, '') : '';
}
function trimEnd(str, chars) {
if (str && chars === undefined) {
return str.replace(/\s+$/, '');
}
if (!str || !chars) {
return str;
}
return str.replace(new RegExp("[" + chars + "]+$"), '');
}
function toLower(str) {
return str ? str.toLowerCase() : '';
}
function toUpper(str) {
return str ? str.toUpperCase() : '';
}
var isArray = Array.isArray;
function isString(val) {
return typeof val === 'string';
}
function isFunction(val) {
return typeof val === 'function';
}
function isObject(obj) {
return obj !== null && typeof obj === 'object';
}
function isPlainObject(obj) {
return isObject(obj) && Object.getPrototypeOf(obj) == Object.prototype;
}
function isBlob(obj) {
return typeof Blob !== 'undefined' && obj instanceof Blob;
}
function isFormData(obj) {
return typeof FormData !== 'undefined' && obj instanceof FormData;
}
function when(value, fulfilled, rejected) {
var promise = PromiseObj.resolve(value);
if (arguments.length < 2) {
return promise;
}
return promise.then(fulfilled, rejected);
}
function options(fn, obj, opts) {
opts = opts || {};
if (isFunction(opts)) {
opts = opts.call(obj);
}
return merge(fn.bind({
$vm: obj,
$options: opts
}), fn, {
$options: opts
});
}
function each(obj, iterator) {
var i, key;
if (isArray(obj)) {
for (i = 0; i < obj.length; i++) {
iterator.call(obj[i], obj[i], i);
}
} else if (isObject(obj)) {
for (key in obj) {
if (hasOwnProperty.call(obj, key)) {
iterator.call(obj[key], obj[key], key);
}
}
}
return obj;
}
var assign = Object.assign || _assign;
function merge(target) {
var args = slice.call(arguments, 1);
args.forEach(function (source) {
_merge(target, source, true);
});
return target;
}
function defaults(target) {
var args = slice.call(arguments, 1);
args.forEach(function (source) {
for (var key in source) {
if (target[key] === undefined) {
target[key] = source[key];
}
}
});
return target;
}
function _assign(target) {
var args = slice.call(arguments, 1);
args.forEach(function (source) {
_merge(target, source);
});
return target;
}
function _merge(target, source, deep) {
for (var key in source) {
if (deep && (isPlainObject(source[key]) || isArray(source[key]))) {
if (isPlainObject(source[key]) && !isPlainObject(target[key])) {
target[key] = {};
}
if (isArray(source[key]) && !isArray(target[key])) {
target[key] = [];
}
_merge(target[key], source[key], deep);
} else if (source[key] !== undefined) {
target[key] = source[key];
}
}
}
/**
* Root Prefix Transform.
*/
function root (options$$1, next) {
var url = next(options$$1);
if (isString(options$$1.root) && !/^(https?:)?\//.test(url)) {
url = trimEnd(options$$1.root, '/') + '/' + url;
}
return url;
}
/**
* Query Parameter Transform.
*/
function query (options$$1, next) {
var urlParams = Object.keys(Url.options.params),
query = {},
url = next(options$$1);
each(options$$1.params, function (value, key) {
if (urlParams.indexOf(key) === -1) {
query[key] = value;
}
});
query = Url.params(query);
if (query) {
url += (url.indexOf('?') == -1 ? '?' : '&') + query;
}
return url;
}
/**
* URL Template v2.0.6 (https://github.com/bramstein/url-template)
*/
function expand(url, params, variables) {
var tmpl = parse(url),
expanded = tmpl.expand(params);
if (variables) {
variables.push.apply(variables, tmpl.vars);
}
return expanded;
}
function parse(template) {
var operators = ['+', '#', '.', '/', ';', '?', '&'],
variables = [];
return {
vars: variables,
expand: function expand(context) {
return template.replace(/\{([^{}]+)\}|([^{}]+)/g, function (_, expression, literal) {
if (expression) {
var operator = null,
values = [];
if (operators.indexOf(expression.charAt(0)) !== -1) {
operator = expression.charAt(0);
expression = expression.substr(1);
}
expression.split(/,/g).forEach(function (variable) {
var tmp = /([^:*]*)(?::(\d+)|(\*))?/.exec(variable);
values.push.apply(values, getValues(context, operator, tmp[1], tmp[2] || tmp[3]));
variables.push(tmp[1]);
});
if (operator && operator !== '+') {
var separator = ',';
if (operator === '?') {
separator = '&';
} else if (operator !== '#') {
separator = operator;
}
return (values.length !== 0 ? operator : '') + values.join(separator);
} else {
return values.join(',');
}
} else {
return encodeReserved(literal);
}
});
}
};
}
function getValues(context, operator, key, modifier) {
var value = context[key],
result = [];
if (isDefined(value) && value !== '') {
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {
value = value.toString();
if (modifier && modifier !== '*') {
value = value.substring(0, parseInt(modifier, 10));
}
result.push(encodeValue(operator, value, isKeyOperator(operator) ? key : null));
} else {
if (modifier === '*') {
if (Array.isArray(value)) {
value.filter(isDefined).forEach(function (value) {
result.push(encodeValue(operator, value, isKeyOperator(operator) ? key : null));
});
} else {
Object.keys(value).forEach(function (k) {
if (isDefined(value[k])) {
result.push(encodeValue(operator, value[k], k));
}
});
}
} else {
var tmp = [];
if (Array.isArray(value)) {
value.filter(isDefined).forEach(function (value) {
tmp.push(encodeValue(operator, value));
});
} else {
Object.keys(value).forEach(function (k) {
if (isDefined(value[k])) {
tmp.push(encodeURIComponent(k));
tmp.push(encodeValue(operator, value[k].toString()));
}
});
}
if (isKeyOperator(operator)) {
result.push(encodeURIComponent(key) + '=' + tmp.join(','));
} else if (tmp.length !== 0) {
result.push(tmp.join(','));
}
}
}
} else {
if (operator === ';') {
result.push(encodeURIComponent(key));
} else if (value === '' && (operator === '&' || operator === '?')) {
result.push(encodeURIComponent(key) + '=');
} else if (value === '') {
result.push('');
}
}
return result;
}
function isDefined(value) {
return value !== undefined && value !== null;
}
function isKeyOperator(operator) {
return operator === ';' || operator === '&' || operator === '?';
}
function encodeValue(operator, value, key) {
value = operator === '+' || operator === '#' ? encodeReserved(value) : encodeURIComponent(value);
if (key) {
return encodeURIComponent(key) + '=' + value;
} else {
return value;
}
}
function encodeReserved(str) {
return str.split(/(%[0-9A-Fa-f]{2})/g).map(function (part) {
if (!/%[0-9A-Fa-f]/.test(part)) {
part = encodeURI(part);
}
return part;
}).join('');
}
/**
* URL Template (RFC 6570) Transform.
*/
function template (options) {
var variables = [],
url = expand(options.url, options.params, variables);
variables.forEach(function (key) {
delete options.params[key];
});
return url;
}
/**
* Service for URL templating.
*/
function Url(url, params) {
var self = this || {},
options$$1 = url,
transform;
if (isString(url)) {
options$$1 = {
url: url,
params: params
};
}
options$$1 = merge({}, Url.options, self.$options, options$$1);
Url.transforms.forEach(function (handler) {
if (isString(handler)) {
handler = Url.transform[handler];
}
if (isFunction(handler)) {
transform = factory(handler, transform, self.$vm);
}
});
return transform(options$$1);
}
/**
* Url options.
*/
Url.options = {
url: '',
root: null,
params: {}
};
/**
* Url transforms.
*/
Url.transform = {
template: template,
query: query,
root: root
};
Url.transforms = ['template', 'query', 'root'];
/**
* Encodes a Url parameter string.
*
* @param {Object} obj
*/
Url.params = function (obj) {
var params = [],
escape = encodeURIComponent;
params.add = function (key, value) {
if (isFunction(value)) {
value = value();
}
if (value === null) {
value = '';
}
this.push(escape(key) + '=' + escape(value));
};
serialize(params, obj);
return params.join('&').replace(/%20/g, '+');
};
/**
* Parse a URL and return its components.
*
* @param {String} url
*/
Url.parse = function (url) {
var el = document.createElement('a');
if (document.documentMode) {
el.href = url;
url = el.href;
}
el.href = url;
return {
href: el.href,
protocol: el.protocol ? el.protocol.replace(/:$/, '') : '',
port: el.port,
host: el.host,
hostname: el.hostname,
pathname: el.pathname.charAt(0) === '/' ? el.pathname : '/' + el.pathname,
search: el.search ? el.search.replace(/^\?/, '') : '',
hash: el.hash ? el.hash.replace(/^#/, '') : ''
};
};
function factory(handler, next, vm) {
return function (options$$1) {
return handler.call(vm, options$$1, next);
};
}
function serialize(params, obj, scope) {
var array = isArray(obj),
plain = isPlainObject(obj),
hash;
each(obj, function (value, key) {
hash = isObject(value) || isArray(value);
if (scope) {
key = scope + '[' + (plain || hash ? key : '') + ']';
}
if (!scope && array) {
params.add(value.name, value.value);
} else if (hash) {
serialize(params, value, key);
} else {
params.add(key, value);
}
});
}
/**
* XDomain client (Internet Explorer).
*/
function xdrClient (request) {
return new PromiseObj(function (resolve) {
var xdr = new XDomainRequest(),
handler = function handler(_ref) {
var type = _ref.type;
var status = 0;
if (type === 'load') {
status = 200;
} else if (type === 'error') {
status = 500;
}
resolve(request.respondWith(xdr.responseText, {
status: status
}));
};
request.abort = function () {
return xdr.abort();
};
xdr.open(request.method, request.getUrl());
if (request.timeout) {
xdr.timeout = request.timeout;
}
xdr.onload = handler;
xdr.onabort = handler;
xdr.onerror = handler;
xdr.ontimeout = handler;
xdr.onprogress = function () {};
xdr.send(request.getBody());
});
}
/**
* CORS Interceptor.
*/
var SUPPORTS_CORS = inBrowser && 'withCredentials' in new XMLHttpRequest();
function cors (request) {
if (inBrowser) {
var orgUrl = Url.parse(location.href);
var reqUrl = Url.parse(request.getUrl());
if (reqUrl.protocol !== orgUrl.protocol || reqUrl.host !== orgUrl.host) {
request.crossOrigin = true;
request.emulateHTTP = false;
if (!SUPPORTS_CORS) {
request.client = xdrClient;
}
}
}
}
/**
* Form data Interceptor.
*/
function form (request) {
if (isFormData(request.body)) {
request.headers["delete"]('Content-Type');
} else if (isObject(request.body) && request.emulateJSON) {
request.body = Url.params(request.body);
request.headers.set('Content-Type', 'application/x-www-form-urlencoded');
}
}
/**
* JSON Interceptor.
*/
function json (request) {
var type = request.headers.get('Content-Type') || '';
if (isObject(request.body) && type.indexOf('application/json') === 0) {
request.body = JSON.stringify(request.body);
}
return function (response) {
return response.bodyText ? when(response.text(), function (text) {
var type = response.headers.get('Content-Type') || '';
if (type.indexOf('application/json') === 0 || isJson(text)) {
try {
response.body = JSON.parse(text);
} catch (e) {
response.body = null;
}
} else {
response.body = text;
}
return response;
}) : response;
};
}
function isJson(str) {
var start = str.match(/^\s*(\[|\{)/);
var end = {
'[': /]\s*$/,
'{': /}\s*$/
};
return start && end[start[1]].test(str);
}
/**
* JSONP client (Browser).
*/
function jsonpClient (request) {
return new PromiseObj(function (resolve) {
var name = request.jsonp || 'callback',
callback = request.jsonpCallback || '_jsonp' + Math.random().toString(36).substr(2),
body = null,
handler,
script;
handler = function handler(_ref) {
var type = _ref.type;
var status = 0;
if (type === 'load' && body !== null) {
status = 200;
} else if (type === 'error') {
status = 500;
}
if (status && window[callback]) {
delete window[callback];
document.body.removeChild(script);
}
resolve(request.respondWith(body, {
status: status
}));
};
window[callback] = function (result) {
body = JSON.stringify(result);
};
request.abort = function () {
handler({<|fim▁hole|> request.params[name] = callback;
if (request.timeout) {
setTimeout(request.abort, request.timeout);
}
script = document.createElement('script');
script.src = request.getUrl();
script.type = 'text/javascript';
script.async = true;
script.onload = handler;
script.onerror = handler;
document.body.appendChild(script);
});
}
/**
* JSONP Interceptor.
*/
function jsonp (request) {
if (request.method == 'JSONP') {
request.client = jsonpClient;
}
}
/**
* Before Interceptor.
*/
function before (request) {
if (isFunction(request.before)) {
request.before.call(this, request);
}
}
/**
* HTTP method override Interceptor.
*/
function method (request) {
if (request.emulateHTTP && /^(PUT|PATCH|DELETE)$/i.test(request.method)) {
request.headers.set('X-HTTP-Method-Override', request.method);
request.method = 'POST';
}
}
/**
* Header Interceptor.
*/
function header (request) {
var headers = assign({}, Http.headers.common, !request.crossOrigin ? Http.headers.custom : {}, Http.headers[toLower(request.method)]);
each(headers, function (value, name) {
if (!request.headers.has(name)) {
request.headers.set(name, value);
}
});
}
/**
* XMLHttp client (Browser).
*/
function xhrClient (request) {
return new PromiseObj(function (resolve) {
var xhr = new XMLHttpRequest(),
handler = function handler(event) {
var response = request.respondWith('response' in xhr ? xhr.response : xhr.responseText, {
status: xhr.status === 1223 ? 204 : xhr.status,
// IE9 status bug
statusText: xhr.status === 1223 ? 'No Content' : trim(xhr.statusText)
});
each(trim(xhr.getAllResponseHeaders()).split('\n'), function (row) {
response.headers.append(row.slice(0, row.indexOf(':')), row.slice(row.indexOf(':') + 1));
});
resolve(response);
};
request.abort = function () {
return xhr.abort();
};
xhr.open(request.method, request.getUrl(), true);
if (request.timeout) {
xhr.timeout = request.timeout;
}
if (request.responseType && 'responseType' in xhr) {
xhr.responseType = request.responseType;
}
if (request.withCredentials || request.credentials) {
xhr.withCredentials = true;
}
if (!request.crossOrigin) {
request.headers.set('X-Requested-With', 'XMLHttpRequest');
} // deprecated use downloadProgress
if (isFunction(request.progress) && request.method === 'GET') {
xhr.addEventListener('progress', request.progress);
}
if (isFunction(request.downloadProgress)) {
xhr.addEventListener('progress', request.downloadProgress);
} // deprecated use uploadProgress
if (isFunction(request.progress) && /^(POST|PUT)$/i.test(request.method)) {
xhr.upload.addEventListener('progress', request.progress);
}
if (isFunction(request.uploadProgress) && xhr.upload) {
xhr.upload.addEventListener('progress', request.uploadProgress);
}
request.headers.forEach(function (value, name) {
xhr.setRequestHeader(name, value);
});
xhr.onload = handler;
xhr.onabort = handler;
xhr.onerror = handler;
xhr.ontimeout = handler;
xhr.send(request.getBody());
});
}
/**
* Http client (Node).
*/
function nodeClient (request) {
var client = require('got');
return new PromiseObj(function (resolve) {
var url = request.getUrl();
var body = request.getBody();
var method = request.method;
var headers = {},
handler;
request.headers.forEach(function (value, name) {
headers[name] = value;
});
client(url, {
body: body,
method: method,
headers: headers
}).then(handler = function handler(resp) {
var response = request.respondWith(resp.body, {
status: resp.statusCode,
statusText: trim(resp.statusMessage)
});
each(resp.headers, function (value, name) {
response.headers.set(name, value);
});
resolve(response);
}, function (error$$1) {
return handler(error$$1.response);
});
});
}
/**
* Base client.
*/
function Client (context) {
var reqHandlers = [sendRequest],
resHandlers = [];
if (!isObject(context)) {
context = null;
}
function Client(request) {
while (reqHandlers.length) {
var handler = reqHandlers.pop();
if (isFunction(handler)) {
var _ret = function () {
var response = void 0,
next = void 0;
response = handler.call(context, request, function (val) {
return next = val;
}) || next;
if (isObject(response)) {
return {
v: new PromiseObj(function (resolve, reject) {
resHandlers.forEach(function (handler) {
response = when(response, function (response) {
return handler.call(context, response) || response;
}, reject);
});
when(response, resolve, reject);
}, context)
};
}
if (isFunction(response)) {
resHandlers.unshift(response);
}
}();
if (typeof _ret === "object") return _ret.v;
} else {
warn("Invalid interceptor of type " + typeof handler + ", must be a function");
}
}
}
Client.use = function (handler) {
reqHandlers.push(handler);
};
return Client;
}
function sendRequest(request) {
var client = request.client || (inBrowser ? xhrClient : nodeClient);
return client(request);
}
/**
* HTTP Headers.
*/
var Headers = /*#__PURE__*/function () {
function Headers(headers) {
var _this = this;
this.map = {};
each(headers, function (value, name) {
return _this.append(name, value);
});
}
var _proto = Headers.prototype;
_proto.has = function has(name) {
return getName(this.map, name) !== null;
};
_proto.get = function get(name) {
var list = this.map[getName(this.map, name)];
return list ? list.join() : null;
};
_proto.getAll = function getAll(name) {
return this.map[getName(this.map, name)] || [];
};
_proto.set = function set(name, value) {
this.map[normalizeName(getName(this.map, name) || name)] = [trim(value)];
};
_proto.append = function append(name, value) {
var list = this.map[getName(this.map, name)];
if (list) {
list.push(trim(value));
} else {
this.set(name, value);
}
};
_proto["delete"] = function _delete(name) {
delete this.map[getName(this.map, name)];
};
_proto.deleteAll = function deleteAll() {
this.map = {};
};
_proto.forEach = function forEach(callback, thisArg) {
var _this2 = this;
each(this.map, function (list, name) {
each(list, function (value) {
return callback.call(thisArg, value, name, _this2);
});
});
};
return Headers;
}();
function getName(map, name) {
return Object.keys(map).reduce(function (prev, curr) {
return toLower(name) === toLower(curr) ? curr : prev;
}, null);
}
function normalizeName(name) {
if (/[^a-z0-9\-#$%&'*+.^_`|~]/i.test(name)) {
throw new TypeError('Invalid character in header field name');
}
return trim(name);
}
/**
* HTTP Response.
*/
var Response = /*#__PURE__*/function () {
function Response(body, _ref) {
var url = _ref.url,
headers = _ref.headers,
status = _ref.status,
statusText = _ref.statusText;
this.url = url;
this.ok = status >= 200 && status < 300;
this.status = status || 0;
this.statusText = statusText || '';
this.headers = new Headers(headers);
this.body = body;
if (isString(body)) {
this.bodyText = body;
} else if (isBlob(body)) {
this.bodyBlob = body;
if (isBlobText(body)) {
this.bodyText = blobText(body);
}
}
}
var _proto = Response.prototype;
_proto.blob = function blob() {
return when(this.bodyBlob);
};
_proto.text = function text() {
return when(this.bodyText);
};
_proto.json = function json() {
return when(this.text(), function (text) {
return JSON.parse(text);
});
};
return Response;
}();
Object.defineProperty(Response.prototype, 'data', {
get: function get() {
return this.body;
},
set: function set(body) {
this.body = body;
}
});
function blobText(body) {
return new PromiseObj(function (resolve) {
var reader = new FileReader();
reader.readAsText(body);
reader.onload = function () {
resolve(reader.result);
};
});
}
function isBlobText(body) {
return body.type.indexOf('text') === 0 || body.type.indexOf('json') !== -1;
}
/**
* HTTP Request.
*/
var Request = /*#__PURE__*/function () {
function Request(options$$1) {
this.body = null;
this.params = {};
assign(this, options$$1, {
method: toUpper(options$$1.method || 'GET')
});
if (!(this.headers instanceof Headers)) {
this.headers = new Headers(this.headers);
}
}
var _proto = Request.prototype;
_proto.getUrl = function getUrl() {
return Url(this);
};
_proto.getBody = function getBody() {
return this.body;
};
_proto.respondWith = function respondWith(body, options$$1) {
return new Response(body, assign(options$$1 || {}, {
url: this.getUrl()
}));
};
return Request;
}();
/**
* Service for sending network requests.
*/
var COMMON_HEADERS = {
'Accept': 'application/json, text/plain, */*'
};
var JSON_CONTENT_TYPE = {
'Content-Type': 'application/json;charset=utf-8'
};
function Http(options$$1) {
var self = this || {},
client = Client(self.$vm);
defaults(options$$1 || {}, self.$options, Http.options);
Http.interceptors.forEach(function (handler) {
if (isString(handler)) {
handler = Http.interceptor[handler];
}
if (isFunction(handler)) {
client.use(handler);
}
});
return client(new Request(options$$1)).then(function (response) {
return response.ok ? response : PromiseObj.reject(response);
}, function (response) {
if (response instanceof Error) {
error(response);
}
return PromiseObj.reject(response);
});
}
Http.options = {};
Http.headers = {
put: JSON_CONTENT_TYPE,
post: JSON_CONTENT_TYPE,
patch: JSON_CONTENT_TYPE,
"delete": JSON_CONTENT_TYPE,
common: COMMON_HEADERS,
custom: {}
};
Http.interceptor = {
before: before,
method: method,
jsonp: jsonp,
json: json,
form: form,
header: header,
cors: cors
};
Http.interceptors = ['before', 'method', 'jsonp', 'json', 'form', 'header', 'cors'];
['get', 'delete', 'head', 'jsonp'].forEach(function (method$$1) {
Http[method$$1] = function (url, options$$1) {
return this(assign(options$$1 || {}, {
url: url,
method: method$$1
}));
};
});
['post', 'put', 'patch'].forEach(function (method$$1) {
Http[method$$1] = function (url, body, options$$1) {
return this(assign(options$$1 || {}, {
url: url,
method: method$$1,
body: body
}));
};
});
/**
* Service for interacting with RESTful services.
*/
function Resource(url, params, actions, options$$1) {
var self = this || {},
resource = {};
actions = assign({}, Resource.actions, actions);
each(actions, function (action, name) {
action = merge({
url: url,
params: assign({}, params)
}, options$$1, action);
resource[name] = function () {
return (self.$http || Http)(opts(action, arguments));
};
});
return resource;
}
function opts(action, args) {
var options$$1 = assign({}, action),
params = {},
body;
switch (args.length) {
case 2:
params = args[0];
body = args[1];
break;
case 1:
if (/^(POST|PUT|PATCH)$/i.test(options$$1.method)) {
body = args[0];
} else {
params = args[0];
}
break;
case 0:
break;
default:
throw 'Expected up to 2 arguments [params, body], got ' + args.length + ' arguments';
}
options$$1.body = body;
options$$1.params = assign({}, options$$1.params, params);
return options$$1;
}
Resource.actions = {
get: {
method: 'GET'
},
save: {
method: 'POST'
},
query: {
method: 'GET'
},
update: {
method: 'PUT'
},
remove: {
method: 'DELETE'
},
"delete": {
method: 'DELETE'
}
};
/**
* Install plugin.
*/
function plugin(Vue) {
if (plugin.installed) {
return;
}
Util(Vue);
Vue.url = Url;
Vue.http = Http;
Vue.resource = Resource;
Vue.Promise = PromiseObj;
Object.defineProperties(Vue.prototype, {
$url: {
get: function get() {
return options(Vue.url, this, this.$options.url);
}
},
$http: {
get: function get() {
return options(Vue.http, this, this.$options.http);
}
},
$resource: {
get: function get() {
return Vue.resource.bind(this);
}
},
$promise: {
get: function get() {
var _this = this;
return function (executor) {
return new Vue.Promise(executor, _this);
};
}
}
});
}
if (typeof window !== 'undefined' && window.Vue && !window.Vue.resource) {
window.Vue.use(plugin);
}
module.exports = plugin;<|fim▁end|> | type: 'abort'
});
};
|
<|file_name|>test_gzip_cache.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''Core plugins unit tests'''
import os
import tempfile
import unittest
import time
from contextlib import contextmanager
from tempfile import mkdtemp
from shutil import rmtree
from hashlib import md5
import gzip_cache
@contextmanager
def temporary_folder():
"""creates a temporary folder, return it and delete it afterwards.
This allows to do something like this in tests:
>>> with temporary_folder() as d:
# do whatever you want
"""
tempdir = mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
class TestGzipCache(unittest.TestCase):
def test_should_compress(self):
user_exclude_types = ()
# Some filetypes should compress and others shouldn't.
self.assertTrue(gzip_cache.should_compress('foo.html', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('bar.css', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('baz.js', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('foo.txt', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('foo.gz', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('bar.png', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('baz.mp3', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('foo.mov', user_exclude_types))
user_exclude_types = ('.html', '.xyz')
self.assertFalse(gzip_cache.should_compress('foo.html', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('bar.xyz', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('foo.gz', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('baz.js', user_exclude_types))
def test_should_overwrite(self):
# Default to false if GZIP_CACHE_OVERWRITE is not set
settings = { }
self.assertFalse(gzip_cache.should_overwrite(settings))
settings = { 'GZIP_CACHE_OVERWRITE': False }
self.assertFalse(gzip_cache.should_overwrite(settings))
settings = { 'GZIP_CACHE_OVERWRITE': True }
self.assertTrue(gzip_cache.should_overwrite(settings))
def test_creates_gzip_file(self):
# A file matching the input filename with a .gz extension is created.
# The plugin walks over the output content after the finalized signal
# so it is safe to assume that the file exists (otherwise walk would
# not report it). Therefore, create a dummy file to use.
with temporary_folder() as tempdir:
_, a_html_filename = tempfile.mkstemp(suffix='.html', dir=tempdir)
with open(a_html_filename, 'w') as f:
f.write('A' * 24) # under this length, compressing is useless and create_gzip_file will not create any file
gzip_cache.create_gzip_file(a_html_filename, False)
self.assertTrue(os.path.exists(a_html_filename + '.gz'))
def test_creates_same_gzip_file(self):
# Should create the same gzip file from the same contents.
# gzip will create a slightly different file because it includes
# a timestamp in the compressed file by default. This can cause
# problems for some caching strategies.
with temporary_folder() as tempdir:
_, a_html_filename = tempfile.mkstemp(suffix='.html', dir=tempdir)
with open(a_html_filename, 'w') as f:<|fim▁hole|> a_gz_filename = a_html_filename + '.gz'
gzip_cache.create_gzip_file(a_html_filename, False)
gzip_hash = get_md5(a_gz_filename)
time.sleep(1)
gzip_cache.create_gzip_file(a_html_filename, False)
self.assertEqual(gzip_hash, get_md5(a_gz_filename))
def test_overwrites_gzip_file(self):
# A file matching the input filename with a .gz extension is not created.
# The plugin walks over the output content after the finalized signal
# so it is safe to assume that the file exists (otherwise walk would
# not report it). Therefore, create a dummy file to use.
with temporary_folder() as tempdir:
_, a_html_filename = tempfile.mkstemp(suffix='.html', dir=tempdir)
gzip_cache.create_gzip_file(a_html_filename, True)
self.assertFalse(os.path.exists(a_html_filename + '.gz'))
def get_md5(filepath):
with open(filepath, 'rb') as fh:
return md5(fh.read()).hexdigest()<|fim▁end|> | f.write('A' * 24) # under this length, compressing is useless and create_gzip_file will not create any file |
<|file_name|>change_version_number.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
## \file change_version_number.py
# \brief Python script for updating the version number of the SU2 suite.
# \author A. Aranake
# \version 5.0.0 "Raven"<|fim▁hole|>#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
# Prof. Edwin van der Weide's group at the University of Twente.
# Prof. Vincent Terrapon's group at the University of Liege.
#
# Copyright (C) 2012-2017 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# Run the script from the base directory (ie $SU2HOME). Grep will search directories recursively for matches in version number
import os,sys
oldvers = '4.3.0 "Cardinal"'
newvers = '5.0.0 "Raven"'
os.system('rm -rf version.txt')
# Grep flag cheatsheet:
# -I : Ignore binary files
# -F : Match exact pattern (instead of regular expressions)
# -w : Match whole word
# -r : search directory recursively
# -v : Omit search string (.svn omitted, line containing ISC is CGNS related)
os.system("grep -IFwr '%s' *|grep -vF '.svn' |grep -v ISC > version.txt"%oldvers)
# Create a list of files to adjust
filelist = []
f = open('version.txt','r')
for line in f.readlines():
candidate = line.split(':')[0]
if not candidate in filelist and candidate.find(sys.argv[0])<0:
filelist.append(candidate)
f.close()
print filelist
# Prompt user before continuing
yorn = ''
while(not yorn.lower()=='y'):
yorn = raw_input('Replace %s with %s in the listed files? [Y/N]: '%(oldvers,newvers))
if yorn.lower()=='n':
print 'The file version.txt contains matches of oldvers'
sys.exit()
# Loop through and correct all files
for fname in filelist:
s = open(fname,'r').read()
s_new = s.replace(oldvers,newvers)
f = open(fname,'w')
f.write(s_new)
f.close()
os.system('rm -rf version.txt')<|fim▁end|> | #
# SU2 Original Developers: Dr. Francisco D. Palacios.
# Dr. Thomas D. Economon. |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/**
* Created by fengyuanzemin on 17/2/15.
*/
import Vue from 'vue';
import Vuex from 'vuex';
import * as actions from './actions';
import * as mutations from './mutations';
Vue.use(Vuex);
const state = {
isShow: false,
msg: '出错了',
isBig: true,
token: localStorage.getItem('f-token'),<|fim▁hole|> init: false
};
export default new Vuex.Store({
state,
actions,
mutations
});<|fim▁end|> | |
<|file_name|>LatexLintBear.py<|end_file_name|><|fim▁begin|>from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
@linter(executable='chktex',
output_format='regex',
output_regex=r'(?P<severity>Error|Warning) \d+ in .+ line '
r'(?P<line>\d+): (?P<message>.*)')
class LatexLintBear:
"""
Checks the code with ``chktex``.
"""
LANGUAGES = {'Tex'}
REQUIREMENTS = {DistributionRequirement('chktex', zypper='texlive-chktex')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Formatting'}
@staticmethod
def create_arguments(filename, file, config_file):
return (
'--format',<|fim▁hole|> filename,
)<|fim▁end|> | '%k %n in {0} line %l: %m!n'.format(filename), |
<|file_name|>init.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// import zone.js from npm here because integration test will load zone.js
// from built npm_package instead of source
import 'zone.js/node';
import 'zone.js/testing';
// Only needed to satisfy the check in core/src/util/decorators.ts<|fim▁hole|><|fim▁end|> | // TODO(alexeagle): maybe remove that check?
require('reflect-metadata'); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.