prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>Link.java<|end_file_name|><|fim▁begin|>package au.com.mountainpass.hyperstate.core;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import org.eclipse.jdt.annotation.Nullable;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.http.MediaType;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonUnwrapped;
import au.com.mountainpass.hyperstate.core.entities.CreatedEntity;
import au.com.mountainpass.hyperstate.core.entities.DeletedEntity;
import au.com.mountainpass.hyperstate.core.entities.EntityWrapper;
import au.com.mountainpass.hyperstate.core.entities.UpdatedEntity;
@JsonInclude(JsonInclude.Include.NON_DEFAULT)
public class Link extends Titled {
@Nullable
private MediaType representationFormat = MediaTypes.SIREN_JSON;
private Address address;
public Link(@JsonProperty("href") Address address,
@JsonProperty("title") final String title,
@JsonProperty("class") final String... classes) {
super(title, classes);
this.address = address;
}
public Link() {
}
public Link(Address address) {
this.address = address;
}
@JsonProperty("type")
public MediaType getRepresentationFormat() {
return representationFormat == null ? MediaTypes.SIREN_JSON
: representationFormat;
}
public <T extends EntityWrapper<?>> CompletableFuture<T> resolve(
Class<T> type) {
return address.get(type);
}
public <T extends EntityWrapper<?>> CompletableFuture<T> resolve(
ParameterizedTypeReference<T> type) {
return address.get(type);
}
@JsonIgnore
public String getPath() {
return address.getPath();
}
public CompletableFuture<EntityWrapper<?>> get(
Map<String, Object> filteredParameters) {
return address.get(filteredParameters);
}
public CompletableFuture<DeletedEntity> delete(<|fim▁hole|> }
public CompletableFuture<CreatedEntity> create(
Map<String, Object> filteredParameters) {
return address.create(filteredParameters);
}
public CompletableFuture<UpdatedEntity> update(
Map<String, Object> filteredParameters) {
return address.update(filteredParameters);
}
public CompletableFuture<EntityWrapper<?>> get() {
return address.get();
}
@JsonIgnore
public <T extends EntityWrapper<?>> CompletableFuture<T> get(
Class<T> type) {
return address.get(type);
}
/**
* @return the address
*/
@JsonUnwrapped
public Address getAddress() {
return address;
}
/**
* @param address
* the address to set
*/
public void setAddress(Address address) {
this.address = address;
}
}<|fim▁end|> | Map<String, Object> filteredParameters) {
return address.delete(filteredParameters);
|
<|file_name|>0002_auto_20170727_1504.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-07-27 15:04
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('press', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='press',
name='password_length',
field=models.PositiveIntegerField(default=12, validators=[django.core.validators.MinValueValidator(9)]),
),
migrations.AddField(
model_name='press',
name='password_number',
field=models.BooleanField(default=False, help_text='If set, passwords must include one number.'),
),
migrations.AddField(
model_name='press',
name='password_upper',
field=models.BooleanField(default=False, help_text='If set, passwords must include one upper case.'),
),<|fim▁hole|><|fim▁end|> | ] |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 {
return;
}
// we count occurences of values
let size = (max - min + 1) as usize;
let mut count = vec![0; size];
<|fim▁hole|> count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn rosetta_vector() {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
}
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
}<|fim▁end|> | for e in array.iter() { |
<|file_name|>test.js<|end_file_name|><|fim▁begin|>/* global describe, it, assert, expect */
'use strict';
// TESTS //
describe( 'paddingLeft', function tests() {
var el = document.querySelector( '#fixture' );
it( 'should expose an attribute for specifying the left padding between the canvas edge and the graph area', function test() {
expect( el.paddingLeft ).to.be.a( 'number' );
});
it( 'should emit an `error` if not set to a positive integer', function test( done ) {
var num = el.paddingLeft,
values;
values = [
function(){},
'5',
-1,
3.14,
NaN,
// undefined, // TODO: enable once https://github.com/Polymer/polymer/issues/1053 is resolved
true,
[],
{}
];
el.addEventListener( 'err', onError );
<|fim▁hole|>
function next() {
el.paddingLeft = values.shift();
}
function onError( evt ) {
assert.instanceOf( evt.detail, TypeError );
if ( values.length ) {
setTimeout( next, 0 );
return;
}
setTimeout( end, 0 );
}
function end() {
assert.strictEqual( el.paddingLeft, num );
el.removeEventListener( 'err', onError );
done();
}
});
it( 'should emit a `changed` event when set to a new value', function test( done ) {
el.addEventListener( 'changed', onChange );
el.paddingLeft = 0;
function onChange( evt ) {
assert.isObject( evt.detail );
assert.strictEqual( evt.detail.attr, 'paddingLeft' );
el.removeEventListener( 'changed', onChange );
done();
}
});
it( 'should update the background width' );
it( 'should update the clipPath width' );
it( 'should update the graph position' );
it( 'should update the vertices' );
it( 'should update the edges' );
});<|fim▁end|> | next(); |
<|file_name|>Project.py<|end_file_name|><|fim▁begin|>############################ Copyrights and license ############################
# #
# Copyright 2018 bbi-yggy <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.ProjectColumn
from . import Consts
class Project(github.GithubObject.CompletableGithubObject):
"""
This class represents Projects. The reference can be found here http://developer.github.com/v3/projects
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def columns_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._columns_url)
return self._columns_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def creator(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def node_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def owner_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._owner_url)
return self._owner_url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /projects/:project_id <https://developer.github.com/v3/projects/#delete-a-project>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url, headers={"Accept": Consts.mediaTypeProjectsPreview}
)
def edit(
self,
name=github.GithubObject.NotSet,
body=github.GithubObject.NotSet,
state=github.GithubObject.NotSet,
organization_permission=github.GithubObject.NotSet,
private=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /projects/:project_id <https://developer.github.com/v3/projects/#update-a-project>`_
:param name: string
:param body: string
:param state: string
:param organization_permission: string
:param private: bool
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, str), name
assert body is github.GithubObject.NotSet or isinstance(name, str), body
assert state is github.GithubObject.NotSet or isinstance(name, str), state
assert organization_permission is github.GithubObject.NotSet or isinstance(
organization_permission, str
), organization_permission
assert private is github.GithubObject.NotSet or isinstance(
private, bool
), private
patch_parameters = dict()
if name is not github.GithubObject.NotSet:
patch_parameters["name"] = name
if body is not github.GithubObject.NotSet:
patch_parameters["body"] = body
if state is not github.GithubObject.NotSet:
patch_parameters["state"] = state
if organization_permission is not github.GithubObject.NotSet:
patch_parameters["organization_permission"] = organization_permission
if private is not github.GithubObject.NotSet:
patch_parameters["private"] = private
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=patch_parameters,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
self._useAttributes(data)
def get_columns(self):
"""
:calls: `GET /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#list-project-columns>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.ProjectColumn.ProjectColumn`
"""
return github.PaginatedList.PaginatedList(
github.ProjectColumn.ProjectColumn,
self._requester,
self.columns_url,
None,
{"Accept": Consts.mediaTypeProjectsPreview},
)
def create_column(self, name):
"""
calls: `POST /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string
"""
assert isinstance(name, str), name
post_parameters = {"name": name}
import_header = {"Accept": Consts.mediaTypeProjectsPreview}
headers, data = self._requester.requestJsonAndCheck(
"POST", self.url + "/columns", headers=import_header, input=post_parameters
)
return github.ProjectColumn.ProjectColumn(
self._requester, headers, data, completed=True
)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._columns_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._creator = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._node_id = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._owner_url = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "columns_url" in attributes: # pragma no branch
self._columns_url = self._makeStringAttribute(attributes["columns_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["creator"]
)
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "owner_url" in attributes: # pragma no branch
self._owner_url = self._makeStringAttribute(attributes["owner_url"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])<|fim▁hole|> if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])<|fim▁end|> | |
<|file_name|>test_multicast.rs<|end_file_name|><|fim▁begin|>// TODO: This doesn't pass on android 64bit CI...
// Figure out why!
#![cfg(not(target_os = "android"))]
use mio::{Events, Poll, PollOpt, Ready, Token};
use mio::net::UdpSocket;
use bytes::{Buf, MutBuf, RingBuf, SliceBuf};
use std::str;
use std::net::IpAddr;
use localhost;
const LISTENER: Token = Token(0);
const SENDER: Token = Token(1);
pub struct UdpHandler {
tx: UdpSocket,
rx: UdpSocket,
msg: &'static str,
buf: SliceBuf<'static>,
rx_buf: RingBuf,
localhost: IpAddr,
shutdown: bool,
}
impl UdpHandler {
fn new(tx: UdpSocket, rx: UdpSocket, msg: &'static str) -> UdpHandler {
let sock = UdpSocket::bind(&"127.0.0.1:12345".parse().unwrap()).unwrap();
UdpHandler {
tx: tx,
rx: rx,
msg: msg,
buf: SliceBuf::wrap(msg.as_bytes()),
rx_buf: RingBuf::new(1024),
localhost: sock.local_addr().unwrap().ip(),
shutdown: false,<|fim▁hole|> fn handle_read(&mut self, _: &mut Poll, token: Token, _: Ready) {
match token {
LISTENER => {
debug!("We are receiving a datagram now...");
match unsafe { self.rx.recv_from(self.rx_buf.mut_bytes()) } {
Ok((cnt, addr)) => {
unsafe { MutBuf::advance(&mut self.rx_buf, cnt); }
assert_eq!(addr.ip(), self.localhost);
}
res => panic!("unexpected result: {:?}", res),
}
assert!(str::from_utf8(self.rx_buf.bytes()).unwrap() == self.msg);
self.shutdown = true;
},
_ => ()
}
}
fn handle_write(&mut self, _: &mut Poll, token: Token, _: Ready) {
match token {
SENDER => {
let addr = self.rx.local_addr().unwrap();
let cnt = self.tx.send_to(self.buf.bytes(), &addr).unwrap();
self.buf.advance(cnt);
},
_ => ()
}
}
}
#[test]
pub fn test_multicast() {
drop(::env_logger::init());
debug!("Starting TEST_UDP_CONNECTIONLESS");
let mut poll = Poll::new().unwrap();
let addr = localhost();
let any = "0.0.0.0:0".parse().unwrap();
let tx = UdpSocket::bind(&any).unwrap();
let rx = UdpSocket::bind(&addr).unwrap();
info!("Joining group 227.1.1.100");
let any = "0.0.0.0".parse().unwrap();
rx.join_multicast_v4(&"227.1.1.100".parse().unwrap(), &any).unwrap();
info!("Joining group 227.1.1.101");
rx.join_multicast_v4(&"227.1.1.101".parse().unwrap(), &any).unwrap();
info!("Registering SENDER");
poll.register(&tx, SENDER, Ready::writable(), PollOpt::edge()).unwrap();
info!("Registering LISTENER");
poll.register(&rx, LISTENER, Ready::readable(), PollOpt::edge()).unwrap();
let mut events = Events::with_capacity(1024);
let mut handler = UdpHandler::new(tx, rx, "hello world");
info!("Starting event loop to test with...");
while !handler.shutdown {
poll.poll(&mut events, None).unwrap();
for event in &events {
if event.readiness().is_readable() {
handler.handle_read(&mut poll, event.token(), event.readiness());
}
if event.readiness().is_writable() {
handler.handle_write(&mut poll, event.token(), event.readiness());
}
}
}
}<|fim▁end|> | }
}
|
<|file_name|>subscriptionusages.go<|end_file_name|><|fim▁begin|>package sql
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// SubscriptionUsagesClient is the the Azure SQL Database management API provides a RESTful set of web services that
// interact with Azure SQL Database services to manage your databases. The API enables you to create, retrieve, update,
// and delete databases.
type SubscriptionUsagesClient struct {
BaseClient
}
// NewSubscriptionUsagesClient creates an instance of the SubscriptionUsagesClient client.
func NewSubscriptionUsagesClient(subscriptionID string) SubscriptionUsagesClient {
return NewSubscriptionUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewSubscriptionUsagesClientWithBaseURI creates an instance of the SubscriptionUsagesClient client.
func NewSubscriptionUsagesClientWithBaseURI(baseURI string, subscriptionID string) SubscriptionUsagesClient {
return SubscriptionUsagesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Get gets a subscription usage metric.
//
// locationName is the name of the region where the resource is located. usageName is name of usage metric to
// return.
func (client SubscriptionUsagesClient) Get(ctx context.Context, locationName string, usageName string) (result SubscriptionUsage, err error) {
req, err := client.GetPreparer(ctx, locationName, usageName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client SubscriptionUsagesClient) GetPreparer(ctx context.Context, locationName string, usageName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"locationName": autorest.Encode("path", locationName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"usageName": autorest.Encode("path", usageName),
}
const APIVersion = "2015-05-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationName}/usages/{usageName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client SubscriptionUsagesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client SubscriptionUsagesClient) GetResponder(resp *http.Response) (result SubscriptionUsage, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByLocation gets all subscription usage metrics in a given location.
//
// locationName is the name of the region where the resource is located.
func (client SubscriptionUsagesClient) ListByLocation(ctx context.Context, locationName string) (result SubscriptionUsageListResultPage, err error) {
result.fn = client.listByLocationNextResults
req, err := client.ListByLocationPreparer(ctx, locationName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "ListByLocation", nil, "Failure preparing request")
return
}
resp, err := client.ListByLocationSender(req)
if err != nil {
result.sulr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "ListByLocation", resp, "Failure sending request")
return
}
result.sulr, err = client.ListByLocationResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "ListByLocation", resp, "Failure responding to request")
}
return<|fim▁hole|>}
// ListByLocationPreparer prepares the ListByLocation request.
func (client SubscriptionUsagesClient) ListByLocationPreparer(ctx context.Context, locationName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"locationName": autorest.Encode("path", locationName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-05-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationName}/usages", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByLocationSender sends the ListByLocation request. The method will close the
// http.Response Body if it receives an error.
func (client SubscriptionUsagesClient) ListByLocationSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByLocationResponder handles the response to the ListByLocation request. The method always
// closes the http.Response Body.
func (client SubscriptionUsagesClient) ListByLocationResponder(resp *http.Response) (result SubscriptionUsageListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByLocationNextResults retrieves the next set of results, if any.
func (client SubscriptionUsagesClient) listByLocationNextResults(lastResults SubscriptionUsageListResult) (result SubscriptionUsageListResult, err error) {
req, err := lastResults.subscriptionUsageListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "listByLocationNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByLocationSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "listByLocationNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByLocationResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.SubscriptionUsagesClient", "listByLocationNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByLocationComplete enumerates all values, automatically crossing page boundaries as required.
func (client SubscriptionUsagesClient) ListByLocationComplete(ctx context.Context, locationName string) (result SubscriptionUsageListResultIterator, err error) {
result.page, err = client.ListByLocation(ctx, locationName)
return
}<|fim▁end|> | |
<|file_name|>alieztv.py<|end_file_name|><|fim▁begin|>import re
from os.path import splitext
from livestreamer.compat import urlparse, unquote
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import HTTPStream, RTMPStream
_url_re = re.compile("""
http(s)?://(\w+\.)?aliez.tv
(?:
/live/[^/]+
)?
(?:
/video/\d+/[^/]+
)?
""", re.VERBOSE)
_file_re = re.compile("\"?file\"?:\s+['\"]([^'\"]+)['\"]")
_swf_url_re = re.compile("swfobject.embedSWF\(\"([^\"]+)\",")
_schema = validate.Schema(
validate.union({
"urls": validate.all(
validate.transform(_file_re.findall),<|fim▁hole|> "swf": validate.all(
validate.transform(_swf_url_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.url(
scheme="http",
path=validate.endswith("swf")
)
)
)
)
})
)
class Aliez(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url, schema=_schema)
streams = {}
for url in res["urls"]:
parsed = urlparse(url)
if parsed.scheme.startswith("rtmp"):
params = {
"rtmp": url,
"pageUrl": self.url,
"live": True
}
if res["swf"]:
params["swfVfy"] = res["swf"]
stream = RTMPStream(self.session, params)
streams["live"] = stream
elif parsed.scheme.startswith("http"):
name = splitext(parsed.path)[1][1:]
stream = HTTPStream(self.session, url)
streams[name] = stream
return streams
__plugin__ = Aliez<|fim▁end|> | validate.map(unquote),
[validate.url()]
), |
<|file_name|>os0file.cc<|end_file_name|><|fim▁begin|>/***********************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2012, 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
gratefully acknowledged and are described briefly in the InnoDB
documentation. The contributions by Percona Inc. are incorporated with
their permission, and subject to the conditions contained in the file
COPYING.Percona.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
***********************************************************************/
/**************************************************//**
@file os/os0file.cc
The interface to the operating system file i/o primitives
Created 10/21/1995 Heikki Tuuri
*******************************************************/
#include "os0file.h"
#ifdef UNIV_NONINL
#include "os0file.ic"
#endif
#include "ut0mem.h"
#include "srv0srv.h"
#include "srv0start.h"
#include "fil0fil.h"
#include "buf0buf.h"
#include "srv0mon.h"
#ifndef UNIV_HOTBACKUP
# include "os0sync.h"
# include "os0thread.h"
#else /* !UNIV_HOTBACKUP */
# ifdef __WIN__
/* Add includes for the _stat() call to compile on Windows */
# include <sys/types.h>
# include <sys/stat.h>
# include <errno.h>
# endif /* __WIN__ */
#endif /* !UNIV_HOTBACKUP */
#if defined(LINUX_NATIVE_AIO)
#include <libaio.h>
#endif
/** Insert buffer segment id */
static const ulint IO_IBUF_SEGMENT = 0;
/** Log segment id */
static const ulint IO_LOG_SEGMENT = 1;
/* This specifies the file permissions InnoDB uses when it creates files in
Unix; the value of os_innodb_umask is initialized in ha_innodb.cc to
my_umask */
#ifndef __WIN__
/** Umask for creating files */
UNIV_INTERN ulint os_innodb_umask = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
#else
/** Umask for creating files */
UNIV_INTERN ulint os_innodb_umask = 0;
#endif /* __WIN__ */
#ifndef UNIV_HOTBACKUP
/* We use these mutexes to protect lseek + file i/o operation, if the
OS does not provide an atomic pread or pwrite, or similar */
#define OS_FILE_N_SEEK_MUTEXES 16
UNIV_INTERN os_ib_mutex_t os_file_seek_mutexes[OS_FILE_N_SEEK_MUTEXES];
/* In simulated aio, merge at most this many consecutive i/os */
#define OS_AIO_MERGE_N_CONSECUTIVE 64
/**********************************************************************
InnoDB AIO Implementation:
=========================
We support native AIO for windows and linux. For rest of the platforms
we simulate AIO by special io-threads servicing the IO-requests.
Simulated AIO:
==============
In platforms where we 'simulate' AIO following is a rough explanation
of the high level design.
There are four io-threads (for ibuf, log, read, write).
All synchronous IO requests are serviced by the calling thread using
os_file_write/os_file_read. The Asynchronous requests are queued up
in an array (there are four such arrays) by the calling thread.
Later these requests are picked up by the io-thread and are serviced
synchronously.
Windows native AIO:
==================
If srv_use_native_aio is not set then windows follow the same
code as simulated AIO. If the flag is set then native AIO interface
is used. On windows, one of the limitation is that if a file is opened
for AIO no synchronous IO can be done on it. Therefore we have an
extra fifth array to queue up synchronous IO requests.
There are innodb_file_io_threads helper threads. These threads work
on the four arrays mentioned above in Simulated AIO. No thread is
required for the sync array.
If a synchronous IO request is made, it is first queued in the sync
array. Then the calling thread itself waits on the request, thus
making the call synchronous.
If an AIO request is made the calling thread not only queues it in the
array but also submits the requests. The helper thread then collects
the completed IO request and calls completion routine on it.
Linux native AIO:
=================
If we have libaio installed on the system and innodb_use_native_aio
is set to TRUE we follow the code path of native AIO, otherwise we
do simulated AIO.
There are innodb_file_io_threads helper threads. These threads work
on the four arrays mentioned above in Simulated AIO.
If a synchronous IO request is made, it is handled by calling
os_file_write/os_file_read.
If an AIO request is made the calling thread not only queues it in the
array but also submits the requests. The helper thread then collects
the completed IO request and calls completion routine on it.
**********************************************************************/
/** Flag: enable debug printout for asynchronous i/o */
UNIV_INTERN ibool os_aio_print_debug = FALSE;
#ifdef UNIV_PFS_IO
/* Keys to register InnoDB I/O with performance schema */
UNIV_INTERN mysql_pfs_key_t innodb_file_data_key;
UNIV_INTERN mysql_pfs_key_t innodb_file_log_key;
UNIV_INTERN mysql_pfs_key_t innodb_file_temp_key;
#endif /* UNIV_PFS_IO */
/** The asynchronous i/o array slot structure */
struct os_aio_slot_t{
ibool is_read; /*!< TRUE if a read operation */
ulint pos; /*!< index of the slot in the aio
array */
ibool reserved; /*!< TRUE if this slot is reserved */
time_t reservation_time;/*!< time when reserved */
ulint len; /*!< length of the block to read or
write */
byte* buf; /*!< buffer used in i/o */
ulint type; /*!< OS_FILE_READ or OS_FILE_WRITE */
os_offset_t offset; /*!< file offset in bytes */
os_file_t file; /*!< file where to read or write */
const char* name; /*!< file name or path */
ibool io_already_done;/*!< used only in simulated aio:
TRUE if the physical i/o already
made and only the slot message
needs to be passed to the caller
of os_aio_simulated_handle */
fil_node_t* message1; /*!< message which is given by the */
void* message2; /*!< the requester of an aio operation
and which can be used to identify
which pending aio operation was
completed */
#ifdef WIN_ASYNC_IO
HANDLE handle; /*!< handle object we need in the
OVERLAPPED struct */
OVERLAPPED control; /*!< Windows control block for the
aio request */
#elif defined(LINUX_NATIVE_AIO)
struct iocb control; /* Linux control block for aio */
int n_bytes; /* bytes written/read. */
int ret; /* AIO return code */
#endif /* WIN_ASYNC_IO */
};
/** The asynchronous i/o array structure */
struct os_aio_array_t{
os_ib_mutex_t mutex; /*!< the mutex protecting the aio array */
os_event_t not_full;
/*!< The event which is set to the
signaled state when there is space in
the aio outside the ibuf segment;
os_event_set() and os_event_reset()
are protected by os_aio_array_t::mutex */
os_event_t is_empty;
/*!< The event which is set to the
signaled state when there are no
pending i/os in this array;
os_event_set() and os_event_reset()
are protected by os_aio_array_t::mutex */
ulint n_slots;/*!< Total number of slots in the aio
array. This must be divisible by
n_threads. */
ulint n_segments;
/*!< Number of segments in the aio
array of pending aio requests. A
thread can wait separately for any one
of the segments. */
ulint cur_seg;/*!< We reserve IO requests in round
robin fashion to different segments.
This points to the segment that is to
be used to service next IO request. */
ulint n_reserved;
/*!< Number of reserved slots in the
aio array outside the ibuf segment */
os_aio_slot_t* slots; /*!< Pointer to the slots in the array */
#ifdef __WIN__
HANDLE* handles;
/*!< Pointer to an array of OS native
event handles where we copied the
handles from slots, in the same
order. This can be used in
WaitForMultipleObjects; used only in
Windows */
#endif /* __WIN__ */
#if defined(LINUX_NATIVE_AIO)
io_context_t* aio_ctx;
/* completion queue for IO. There is
one such queue per segment. Each thread
will work on one ctx exclusively. */
struct io_event* aio_events;
/* The array to collect completed IOs.
There is one such event for each
possible pending IO. The size of the
array is equal to n_slots. */
#endif /* LINUX_NATIV_AIO */
};
#if defined(LINUX_NATIVE_AIO)
/** timeout for each io_getevents() call = 500ms. */
#define OS_AIO_REAP_TIMEOUT (500000000UL)
/** time to sleep, in microseconds if io_setup() returns EAGAIN. */
#define OS_AIO_IO_SETUP_RETRY_SLEEP (500000UL)
/** number of attempts before giving up on io_setup(). */
#define OS_AIO_IO_SETUP_RETRY_ATTEMPTS 5
#endif
/** Array of events used in simulated aio. */
static os_event_t* os_aio_segment_wait_events;
/** The aio arrays for non-ibuf i/o and ibuf i/o, as well as sync aio. These
are NULL when the module has not yet been initialized. @{ */
static os_aio_array_t* os_aio_read_array = NULL; /*!< Reads */
static os_aio_array_t* os_aio_write_array = NULL; /*!< Writes */
static os_aio_array_t* os_aio_ibuf_array = NULL; /*!< Insert buffer */
static os_aio_array_t* os_aio_log_array = NULL; /*!< Redo log */
static os_aio_array_t* os_aio_sync_array = NULL; /*!< Synchronous I/O */
/* @} */
/** Number of asynchronous I/O segments. Set by os_aio_init(). */
static ulint os_aio_n_segments = ULINT_UNDEFINED;
/** If the following is TRUE, read i/o handler threads try to
wait until a batch of new read requests have been posted */
static ibool os_aio_recommend_sleep_for_read_threads = FALSE;
#endif /* !UNIV_HOTBACKUP */
UNIV_INTERN ulint os_n_file_reads = 0;
UNIV_INTERN ulint os_bytes_read_since_printout = 0;
UNIV_INTERN ulint os_n_file_writes = 0;
UNIV_INTERN ulint os_n_fsyncs = 0;
UNIV_INTERN ulint os_n_file_reads_old = 0;
UNIV_INTERN ulint os_n_file_writes_old = 0;
UNIV_INTERN ulint os_n_fsyncs_old = 0;
UNIV_INTERN time_t os_last_printout;
UNIV_INTERN ibool os_has_said_disk_full = FALSE;
#if !defined(UNIV_HOTBACKUP) \
&& (!defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8)
/** The mutex protecting the following counts of pending I/O operations */
static os_ib_mutex_t os_file_count_mutex;
#endif /* !UNIV_HOTBACKUP && (!HAVE_ATOMIC_BUILTINS || UNIV_WORD_SIZE < 8) */
/** Number of pending os_file_pread() operations */
UNIV_INTERN ulint os_file_n_pending_preads = 0;
/** Number of pending os_file_pwrite() operations */
UNIV_INTERN ulint os_file_n_pending_pwrites = 0;
/** Number of pending write operations */
UNIV_INTERN ulint os_n_pending_writes = 0;
/** Number of pending read operations */
UNIV_INTERN ulint os_n_pending_reads = 0;
#ifdef UNIV_DEBUG
# ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Validates the consistency the aio system some of the time.
@return TRUE if ok or the check was skipped */
UNIV_INTERN
ibool
os_aio_validate_skip(void)
/*======================*/
{
/** Try os_aio_validate() every this many times */
# define OS_AIO_VALIDATE_SKIP 13
/** The os_aio_validate() call skip counter.
Use a signed type because of the race condition below. */
static int os_aio_validate_count = OS_AIO_VALIDATE_SKIP;
/* There is a race condition below, but it does not matter,
because this call is only for heuristic purposes. We want to
reduce the call frequency of the costly os_aio_validate()
check in debug builds. */
if (--os_aio_validate_count > 0) {
return(TRUE);
}
os_aio_validate_count = OS_AIO_VALIDATE_SKIP;
return(os_aio_validate());
}
# endif /* !UNIV_HOTBACKUP */
#endif /* UNIV_DEBUG */
#ifdef __WIN__
/***********************************************************************//**
Gets the operating system version. Currently works only on Windows.
@return OS_WIN95, OS_WIN31, OS_WINNT, OS_WIN2000, OS_WINXP, OS_WINVISTA,
OS_WIN7. */
UNIV_INTERN
ulint
os_get_os_version(void)
/*===================*/
{
OSVERSIONINFO os_info;
os_info.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
ut_a(GetVersionEx(&os_info));
if (os_info.dwPlatformId == VER_PLATFORM_WIN32s) {
return(OS_WIN31);
} else if (os_info.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS) {
return(OS_WIN95);
} else if (os_info.dwPlatformId == VER_PLATFORM_WIN32_NT) {
switch (os_info.dwMajorVersion) {
case 3:
case 4:
return(OS_WINNT);
case 5:
return (os_info.dwMinorVersion == 0)
? OS_WIN2000 : OS_WINXP;
case 6:
return (os_info.dwMinorVersion == 0)
? OS_WINVISTA : OS_WIN7;
default:
return(OS_WIN7);
}
} else {
ut_error;
return(0);
}
}
#endif /* __WIN__ */
/***********************************************************************//**
Retrieves the last error number if an error occurs in a file io function.
The number should be retrieved before any other OS calls (because they may
overwrite the error number). If the number is not known to this program,
the OS error number + 100 is returned.
@return error number, or OS error number + 100 */
static
ulint
os_file_get_last_error_low(
/*=======================*/
bool report_all_errors, /*!< in: TRUE if we want an error
message printed of all errors */
bool on_error_silent) /*!< in: TRUE then don't print any
diagnostic to the log */
{
#ifdef __WIN__
ulint err = (ulint) GetLastError();
if (err == ERROR_SUCCESS) {
return(0);
}
if (report_all_errors
|| (!on_error_silent
&& err != ERROR_DISK_FULL
&& err != ERROR_FILE_EXISTS)) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Operating system error number %lu"
" in a file operation.\n", (ulong) err);
if (err == ERROR_PATH_NOT_FOUND) {
fprintf(stderr,
"InnoDB: The error means the system"
" cannot find the path specified.\n");
if (srv_is_being_started) {
fprintf(stderr,
"InnoDB: If you are installing InnoDB,"
" remember that you must create\n"
"InnoDB: directories yourself, InnoDB"
" does not create them.\n");
}
} else if (err == ERROR_ACCESS_DENIED) {
fprintf(stderr,
"InnoDB: The error means mysqld does not have"
" the access rights to\n"
"InnoDB: the directory. It may also be"
" you have created a subdirectory\n"
"InnoDB: of the same name as a data file.\n");
} else if (err == ERROR_SHARING_VIOLATION
|| err == ERROR_LOCK_VIOLATION) {
fprintf(stderr,
"InnoDB: The error means that another program"
" is using InnoDB's files.\n"
"InnoDB: This might be a backup or antivirus"
" software or another instance\n"
"InnoDB: of MySQL."
" Please close it to get rid of this error.\n");
} else if (err == ERROR_WORKING_SET_QUOTA
|| err == ERROR_NO_SYSTEM_RESOURCES) {
fprintf(stderr,
"InnoDB: The error means that there are no"
" sufficient system resources or quota to"
" complete the operation.\n");
} else if (err == ERROR_OPERATION_ABORTED) {
fprintf(stderr,
"InnoDB: The error means that the I/O"
" operation has been aborted\n"
"InnoDB: because of either a thread exit"
" or an application request.\n"
"InnoDB: Retry attempt is made.\n");
} else {
fprintf(stderr,
"InnoDB: Some operating system error numbers"
" are described at\n"
"InnoDB: "
REFMAN
"operating-system-error-codes.html\n");
}
}
fflush(stderr);
if (err == ERROR_FILE_NOT_FOUND) {
return(OS_FILE_NOT_FOUND);
} else if (err == ERROR_DISK_FULL) {
return(OS_FILE_DISK_FULL);
} else if (err == ERROR_FILE_EXISTS) {
return(OS_FILE_ALREADY_EXISTS);
} else if (err == ERROR_SHARING_VIOLATION
|| err == ERROR_LOCK_VIOLATION) {
return(OS_FILE_SHARING_VIOLATION);
} else if (err == ERROR_WORKING_SET_QUOTA
|| err == ERROR_NO_SYSTEM_RESOURCES) {
return(OS_FILE_INSUFFICIENT_RESOURCE);
} else if (err == ERROR_OPERATION_ABORTED) {
return(OS_FILE_OPERATION_ABORTED);
} else if (err == ERROR_ACCESS_DENIED) {
return(OS_FILE_ACCESS_VIOLATION);
} else if (err == ERROR_BUFFER_OVERFLOW) {
return(OS_FILE_NAME_TOO_LONG);
} else {
return(OS_FILE_ERROR_MAX + err);
}
#else
int err = errno;
if (err == 0) {
return(0);
}
if (report_all_errors
|| (err != ENOSPC && err != EEXIST && !on_error_silent)) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Operating system error number %d"
" in a file operation.\n", err);
if (err == ENOENT) {
fprintf(stderr,
"InnoDB: The error means the system"
" cannot find the path specified.\n");
if (srv_is_being_started) {
fprintf(stderr,
"InnoDB: If you are installing InnoDB,"
" remember that you must create\n"
"InnoDB: directories yourself, InnoDB"
" does not create them.\n");
}
} else if (err == EACCES) {
fprintf(stderr,
"InnoDB: The error means mysqld does not have"
" the access rights to\n"
"InnoDB: the directory.\n");
} else {
if (strerror(err) != NULL) {
fprintf(stderr,
"InnoDB: Error number %d"
" means '%s'.\n",
err, strerror(err));
}
fprintf(stderr,
"InnoDB: Some operating system"
" error numbers are described at\n"
"InnoDB: "
REFMAN
"operating-system-error-codes.html\n");
}
}
fflush(stderr);
switch (err) {
case ENOSPC:
return(OS_FILE_DISK_FULL);
case ENOENT:
return(OS_FILE_NOT_FOUND);
case EEXIST:
return(OS_FILE_ALREADY_EXISTS);
case ENAMETOOLONG:
return(OS_FILE_NAME_TOO_LONG);
case EXDEV:
case ENOTDIR:
case EISDIR:
return(OS_FILE_PATH_ERROR);
case EAGAIN:
if (srv_use_native_aio) {
return(OS_FILE_AIO_RESOURCES_RESERVED);
}
break;
case EINTR:
if (srv_use_native_aio) {
return(OS_FILE_AIO_INTERRUPTED);
}
break;
case EACCES:
return(OS_FILE_ACCESS_VIOLATION);
}
return(OS_FILE_ERROR_MAX + err);
#endif
}
/***********************************************************************//**
Retrieves the last error number if an error occurs in a file io function.
The number should be retrieved before any other OS calls (because they may
overwrite the error number). If the number is not known to this program,
the OS error number + 100 is returned.
@return error number, or OS error number + 100 */
UNIV_INTERN
ulint
os_file_get_last_error(
/*===================*/
bool report_all_errors) /*!< in: TRUE if we want an error
message printed of all errors */
{
return(os_file_get_last_error_low(report_all_errors, false));
}
/****************************************************************//**
Does error handling when a file operation fails.
Conditionally exits (calling exit(3)) based on should_exit value and the
error type, if should_exit is TRUE then on_error_silent is ignored.
@return TRUE if we should retry the operation */
static
ibool
os_file_handle_error_cond_exit(
/*===========================*/
const char* name, /*!< in: name of a file or NULL */
const char* operation, /*!< in: operation */
ibool should_exit, /*!< in: call exit(3) if unknown error
and this parameter is TRUE */
ibool on_error_silent)/*!< in: if TRUE then don't print
any message to the log iff it is
an unknown non-fatal error */
{
ulint err;
err = os_file_get_last_error_low(false, on_error_silent);
switch (err) {
case OS_FILE_DISK_FULL:
/* We only print a warning about disk full once */
if (os_has_said_disk_full) {
return(FALSE);
}
/* Disk full error is reported irrespective of the
on_error_silent setting. */
if (name) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Encountered a problem with"
" file %s\n", name);
}
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Disk is full. Try to clean the disk"
" to free space.\n");
os_has_said_disk_full = TRUE;
fflush(stderr);
ut_error;
return(FALSE);
case OS_FILE_AIO_RESOURCES_RESERVED:
case OS_FILE_AIO_INTERRUPTED:
return(TRUE);
case OS_FILE_PATH_ERROR:
case OS_FILE_ALREADY_EXISTS:
case OS_FILE_ACCESS_VIOLATION:
return(FALSE);
case OS_FILE_SHARING_VIOLATION:
os_thread_sleep(10000000); /* 10 sec */
return(TRUE);
case OS_FILE_OPERATION_ABORTED:
case OS_FILE_INSUFFICIENT_RESOURCE:
os_thread_sleep(100000); /* 100 ms */
return(TRUE);
default:
/* If it is an operation that can crash on error then it
is better to ignore on_error_silent and print an error message
to the log. */
if (should_exit || !on_error_silent) {
ib_logf(IB_LOG_LEVEL_ERROR, "File %s: '%s' returned OS "
"error " ULINTPF ".%s", name ? name : "(unknown)",
operation, err, should_exit
? " Cannot continue operation" : "");
}
if (should_exit) {
exit(1);
}
}
return(FALSE);
}
/****************************************************************//**
Does error handling when a file operation fails.
@return TRUE if we should retry the operation */
static
ibool
os_file_handle_error(
/*=================*/
const char* name, /*!< in: name of a file or NULL */
const char* operation) /*!< in: operation */
{
/* exit in case of unknown error */
return(os_file_handle_error_cond_exit(name, operation, TRUE, FALSE));
}
/****************************************************************//**
Does error handling when a file operation fails.
@return TRUE if we should retry the operation */
ibool
os_file_handle_error_no_exit(
/*=========================*/
const char* name, /*!< in: name of a file or NULL */
const char* operation, /*!< in: operation */
ibool on_error_silent)/*!< in: if TRUE then don't print
any message to the log. */
{
/* don't exit in case of unknown error */
return(os_file_handle_error_cond_exit(
name, operation, FALSE, on_error_silent));
}
#undef USE_FILE_LOCK
#define USE_FILE_LOCK
#if defined(UNIV_HOTBACKUP) || defined(__WIN__)
/* InnoDB Hot Backup does not lock the data files.
* On Windows, mandatory locking is used.
*/
# undef USE_FILE_LOCK
#endif
#ifdef USE_FILE_LOCK
/****************************************************************//**
Obtain an exclusive lock on a file.
@return 0 on success */
static
int
os_file_lock(
/*=========*/
int fd, /*!< in: file descriptor */
const char* name) /*!< in: file name */
{
struct flock lk;
ut_ad(!srv_read_only_mode);
lk.l_type = F_WRLCK;
lk.l_whence = SEEK_SET;
lk.l_start = lk.l_len = 0;
if (fcntl(fd, F_SETLK, &lk) == -1) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unable to lock %s, error: %d", name, errno);
if (errno == EAGAIN || errno == EACCES) {
ib_logf(IB_LOG_LEVEL_INFO,
"Check that you do not already have "
"another mysqld process using the "
"same InnoDB data or log files.");
}
return(-1);
}
return(0);
}
#endif /* USE_FILE_LOCK */
#ifndef UNIV_HOTBACKUP
/****************************************************************//**
Creates the seek mutexes used in positioned reads and writes. */
UNIV_INTERN
void
os_io_init_simple(void)
/*===================*/
{
#if !defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8
os_file_count_mutex = os_mutex_create();
#endif /* !HAVE_ATOMIC_BUILTINS || UNIV_WORD_SIZE < 8 */
for (ulint i = 0; i < OS_FILE_N_SEEK_MUTEXES; i++) {
os_file_seek_mutexes[i] = os_mutex_create();
}
}
/** Create a temporary file. This function is like tmpfile(3), but
the temporary file is created in the given parameter path. If the path
is null then it will create the file in the mysql server configuration
parameter (--tmpdir).
@param[in] path location for creating temporary file
@return temporary file handle, or NULL on error */
UNIV_INTERN
FILE*
os_file_create_tmpfile(
const char* path)
{
FILE* file = NULL;
int fd = innobase_mysql_tmpfile(path);
ut_ad(!srv_read_only_mode);
if (fd >= 0) {
file = fdopen(fd, "w+b");
}
if (!file) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: unable to create temporary file;"
" errno: %d\n", errno);
if (fd >= 0) {
close(fd);
}
}
return(file);
}
#endif /* !UNIV_HOTBACKUP */
/***********************************************************************//**
The os_file_opendir() function opens a directory stream corresponding to the
directory named by the dirname argument. The directory stream is positioned
at the first entry. In both Unix and Windows we automatically skip the '.'
and '..' items at the start of the directory listing.
@return directory stream, NULL if error */
UNIV_INTERN
os_file_dir_t
os_file_opendir(
/*============*/
const char* dirname, /*!< in: directory name; it must not
contain a trailing '\' or '/' */
ibool error_is_fatal) /*!< in: TRUE if we should treat an
error as a fatal error; if we try to
open symlinks then we do not wish a
fatal error if it happens not to be
a directory */
{
os_file_dir_t dir;
#ifdef __WIN__
LPWIN32_FIND_DATA lpFindFileData;
char path[OS_FILE_MAX_PATH + 3];
ut_a(strlen(dirname) < OS_FILE_MAX_PATH);
strcpy(path, dirname);
strcpy(path + strlen(path), "\\*");
/* Note that in Windows opening the 'directory stream' also retrieves
the first entry in the directory. Since it is '.', that is no problem,
as we will skip over the '.' and '..' entries anyway. */
lpFindFileData = static_cast<LPWIN32_FIND_DATA>(
ut_malloc(sizeof(WIN32_FIND_DATA)));
dir = FindFirstFile((LPCTSTR) path, lpFindFileData);
ut_free(lpFindFileData);
if (dir == INVALID_HANDLE_VALUE) {
if (error_is_fatal) {
os_file_handle_error(dirname, "opendir");
}
return(NULL);
}
return(dir);
#else
dir = opendir(dirname);
if (dir == NULL && error_is_fatal) {
os_file_handle_error(dirname, "opendir");
}
return(dir);
#endif /* __WIN__ */
}
/***********************************************************************//**
Closes a directory stream.
@return 0 if success, -1 if failure */
UNIV_INTERN
int
os_file_closedir(
/*=============*/
os_file_dir_t dir) /*!< in: directory stream */
{
#ifdef __WIN__
BOOL ret;
ret = FindClose(dir);
if (!ret) {
os_file_handle_error_no_exit(NULL, "closedir", FALSE);
return(-1);
}
return(0);
#else
int ret;
ret = closedir(dir);
if (ret) {
os_file_handle_error_no_exit(NULL, "closedir", FALSE);
}
return(ret);
#endif /* __WIN__ */
}
/***********************************************************************//**
This function returns information of the next file in the directory. We jump
over the '.' and '..' entries in the directory.
@return 0 if ok, -1 if error, 1 if at the end of the directory */
UNIV_INTERN
int
os_file_readdir_next_file(
/*======================*/
const char* dirname,/*!< in: directory name or path */
os_file_dir_t dir, /*!< in: directory stream */
os_file_stat_t* info) /*!< in/out: buffer where the info is returned */
{
#ifdef __WIN__
LPWIN32_FIND_DATA lpFindFileData;
BOOL ret;
lpFindFileData = static_cast<LPWIN32_FIND_DATA>(
ut_malloc(sizeof(WIN32_FIND_DATA)));
next_file:
ret = FindNextFile(dir, lpFindFileData);
if (ret) {
ut_a(strlen((char*) lpFindFileData->cFileName)
< OS_FILE_MAX_PATH);
if (strcmp((char*) lpFindFileData->cFileName, ".") == 0
|| strcmp((char*) lpFindFileData->cFileName, "..") == 0) {
goto next_file;
}
strcpy(info->name, (char*) lpFindFileData->cFileName);
info->size = (ib_int64_t)(lpFindFileData->nFileSizeLow)
+ (((ib_int64_t)(lpFindFileData->nFileSizeHigh))
<< 32);
if (lpFindFileData->dwFileAttributes
& FILE_ATTRIBUTE_REPARSE_POINT) {
/* TODO: test Windows symlinks */
/* TODO: MySQL has apparently its own symlink
implementation in Windows, dbname.sym can
redirect a database directory:
REFMAN "windows-symbolic-links.html" */
info->type = OS_FILE_TYPE_LINK;
} else if (lpFindFileData->dwFileAttributes
& FILE_ATTRIBUTE_DIRECTORY) {
info->type = OS_FILE_TYPE_DIR;
} else {
/* It is probably safest to assume that all other
file types are normal. Better to check them rather
than blindly skip them. */
info->type = OS_FILE_TYPE_FILE;
}
}
ut_free(lpFindFileData);
if (ret) {
return(0);
} else if (GetLastError() == ERROR_NO_MORE_FILES) {
return(1);
} else {
os_file_handle_error_no_exit(NULL, "readdir_next_file", FALSE);
return(-1);
}
#else
struct dirent* ent;
char* full_path;
int ret;
struct stat statinfo;
#ifdef HAVE_READDIR_R
char dirent_buf[sizeof(struct dirent)
+ _POSIX_PATH_MAX + 100];
/* In /mysys/my_lib.c, _POSIX_PATH_MAX + 1 is used as
the max file name len; but in most standards, the
length is NAME_MAX; we add 100 to be even safer */
#endif
next_file:
#ifdef HAVE_READDIR_R
ret = readdir_r(dir, (struct dirent*) dirent_buf, &ent);
if (ret != 0
#ifdef UNIV_AIX
/* On AIX, only if we got non-NULL 'ent' (result) value and
a non-zero 'ret' (return) value, it indicates a failed
readdir_r() call. An NULL 'ent' with an non-zero 'ret'
would indicate the "end of the directory" is reached. */
&& ent != NULL
#endif
) {
fprintf(stderr,
"InnoDB: cannot read directory %s, error %lu\n",
dirname, (ulong) ret);
return(-1);
}
if (ent == NULL) {
/* End of directory */
return(1);
}
ut_a(strlen(ent->d_name) < _POSIX_PATH_MAX + 100 - 1);
#else
ent = readdir(dir);
if (ent == NULL) {
return(1);
}
#endif
ut_a(strlen(ent->d_name) < OS_FILE_MAX_PATH);
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) {
goto next_file;
}
strcpy(info->name, ent->d_name);
full_path = static_cast<char*>(
ut_malloc(strlen(dirname) + strlen(ent->d_name) + 10));
sprintf(full_path, "%s/%s", dirname, ent->d_name);
ret = stat(full_path, &statinfo);
if (ret) {
if (errno == ENOENT) {
/* readdir() returned a file that does not exist,
it must have been deleted in the meantime. Do what
would have happened if the file was deleted before
readdir() - ignore and go to the next entry.
If this is the last entry then info->name will still
contain the name of the deleted file when this
function returns, but this is not an issue since the
caller shouldn't be looking at info when end of
directory is returned. */
ut_free(full_path);
goto next_file;
}
os_file_handle_error_no_exit(full_path, "stat", FALSE);
ut_free(full_path);
return(-1);
}
info->size = (ib_int64_t) statinfo.st_size;
if (S_ISDIR(statinfo.st_mode)) {
info->type = OS_FILE_TYPE_DIR;
} else if (S_ISLNK(statinfo.st_mode)) {
info->type = OS_FILE_TYPE_LINK;
} else if (S_ISREG(statinfo.st_mode)) {
info->type = OS_FILE_TYPE_FILE;
} else {
info->type = OS_FILE_TYPE_UNKNOWN;
}
ut_free(full_path);
return(0);
#endif
}
/*****************************************************************//**
This function attempts to create a directory named pathname. The new
directory gets default permissions. On Unix the permissions are
(0770 & ~umask). If the directory exists already, nothing is done and
the call succeeds, unless the fail_if_exists arguments is true.
If another error occurs, such as a permission error, this does not crash,
but reports the error and returns FALSE.
@return TRUE if call succeeds, FALSE on error */
UNIV_INTERN
ibool
os_file_create_directory(
/*=====================*/
const char* pathname, /*!< in: directory name as
null-terminated string */
ibool fail_if_exists) /*!< in: if TRUE, pre-existing directory
is treated as an error. */
{
#ifdef __WIN__
BOOL rcode;
rcode = CreateDirectory((LPCTSTR) pathname, NULL);
if (!(rcode != 0
|| (GetLastError() == ERROR_ALREADY_EXISTS
&& !fail_if_exists))) {
os_file_handle_error_no_exit(
pathname, "CreateDirectory", FALSE);
return(FALSE);
}
return(TRUE);
#else
int rcode;
rcode = mkdir(pathname, 0770);
if (!(rcode == 0 || (errno == EEXIST && !fail_if_exists))) {
/* failure */
os_file_handle_error_no_exit(pathname, "mkdir", FALSE);
return(FALSE);
}
return (TRUE);
#endif /* __WIN__ */
}
/****************************************************************//**
NOTE! Use the corresponding macro os_file_create_simple(), not directly
this function!
A simple function to open or create a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INTERN
os_file_t
os_file_create_simple_func(
/*=======================*/
const char* name, /*!< in: name of the file or path as a
null-terminated string */
ulint create_mode,/*!< in: create mode */
ulint access_type,/*!< in: OS_FILE_READ_ONLY or
OS_FILE_READ_WRITE */
ibool* success)/*!< out: TRUE if succeed, FALSE if error */
{
os_file_t file;
ibool retry;
*success = FALSE;
#ifdef __WIN__
DWORD access;
DWORD create_flag;
DWORD attributes = 0;
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
if (create_mode == OS_FILE_OPEN) {
create_flag = OPEN_EXISTING;
} else if (srv_read_only_mode) {
create_flag = OPEN_EXISTING;
} else if (create_mode == OS_FILE_CREATE) {
create_flag = CREATE_NEW;
} else if (create_mode == OS_FILE_CREATE_PATH) {
ut_a(!srv_read_only_mode);
/* Create subdirs along the path if needed */
*success = os_file_create_subdirs_if_needed(name);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unable to create subdirectories '%s'",
name);
return((os_file_t) -1);
}
create_flag = CREATE_NEW;
create_mode = OS_FILE_CREATE;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
return((os_file_t) -1);
}
if (access_type == OS_FILE_READ_ONLY) {
access = GENERIC_READ;
} else if (srv_read_only_mode) {
ib_logf(IB_LOG_LEVEL_INFO,
"read only mode set. Unable to "
"open file '%s' in RW mode, trying RO mode", name);
access = GENERIC_READ;
} else if (access_type == OS_FILE_READ_WRITE) {
access = GENERIC_READ | GENERIC_WRITE;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file access type (%lu) for file '%s'",
access_type, name);
return((os_file_t) -1);
}
do {
/* Use default security attributes and no template file. */
file = CreateFile(
(LPCTSTR) name, access, FILE_SHARE_READ, NULL,
create_flag, attributes, NULL);
if (file == INVALID_HANDLE_VALUE) {
*success = FALSE;
retry = os_file_handle_error(
name, create_mode == OS_FILE_OPEN ?
"open" : "create");
} else {
*success = TRUE;
retry = false;
}
} while (retry);
#else /* __WIN__ */
int create_flag;
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
if (create_mode == OS_FILE_OPEN) {
if (access_type == OS_FILE_READ_ONLY) {
create_flag = O_RDONLY;
} else if (srv_read_only_mode) {
create_flag = O_RDONLY;
} else {
create_flag = O_RDWR;
}
} else if (srv_read_only_mode) {
create_flag = O_RDONLY;
} else if (create_mode == OS_FILE_CREATE) {
create_flag = O_RDWR | O_CREAT | O_EXCL;
} else if (create_mode == OS_FILE_CREATE_PATH) {
/* Create subdirs along the path if needed */
*success = os_file_create_subdirs_if_needed(name);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unable to create subdirectories '%s'",
name);
return((os_file_t) -1);
}
create_flag = O_RDWR | O_CREAT | O_EXCL;
create_mode = OS_FILE_CREATE;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
return((os_file_t) -1);
}
do {
file = ::open(name, create_flag, os_innodb_umask);
if (file == -1) {
*success = FALSE;
retry = os_file_handle_error(
name,
create_mode == OS_FILE_OPEN
? "open" : "create");
} else {
*success = TRUE;
retry = false;
}
} while (retry);
#ifdef USE_FILE_LOCK
if (!srv_read_only_mode
&& *success
&& access_type == OS_FILE_READ_WRITE
&& os_file_lock(file, name)) {
*success = FALSE;
close(file);
file = -1;
}
#endif /* USE_FILE_LOCK */
#endif /* __WIN__ */
return(file);
}
/****************************************************************//**
NOTE! Use the corresponding macro
os_file_create_simple_no_error_handling(), not directly this function!
A simple function to open or create a file.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INTERN
os_file_t
os_file_create_simple_no_error_handling_func(
/*=========================================*/
const char* name, /*!< in: name of the file or path as a
null-terminated string */
ulint create_mode,/*!< in: create mode */
ulint access_type,/*!< in: OS_FILE_READ_ONLY,
OS_FILE_READ_WRITE, or
OS_FILE_READ_ALLOW_DELETE; the last option is
used by a backup program reading the file */
ibool* success)/*!< out: TRUE if succeed, FALSE if error */
{
os_file_t file;
*success = FALSE;
#ifdef __WIN__
DWORD access;
DWORD create_flag;
DWORD attributes = 0;
DWORD share_mode = FILE_SHARE_READ;
ut_a(name);
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
if (create_mode == OS_FILE_OPEN) {
create_flag = OPEN_EXISTING;
} else if (srv_read_only_mode) {
create_flag = OPEN_EXISTING;
} else if (create_mode == OS_FILE_CREATE) {
create_flag = CREATE_NEW;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
return((os_file_t) -1);
}
if (access_type == OS_FILE_READ_ONLY) {
access = GENERIC_READ;
} else if (srv_read_only_mode) {
access = GENERIC_READ;
} else if (access_type == OS_FILE_READ_WRITE) {
access = GENERIC_READ | GENERIC_WRITE;
} else if (access_type == OS_FILE_READ_ALLOW_DELETE) {
ut_a(!srv_read_only_mode);
access = GENERIC_READ;
/*!< A backup program has to give mysqld the maximum
freedom to do what it likes with the file */
share_mode |= FILE_SHARE_DELETE | FILE_SHARE_WRITE;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file access type (%lu) for file '%s'",
access_type, name);
return((os_file_t) -1);
}
file = CreateFile((LPCTSTR) name,
access,
share_mode,
NULL, // Security attributes
create_flag,
attributes,
NULL); // No template file
*success = (file != INVALID_HANDLE_VALUE);
#else /* __WIN__ */
int create_flag;
const char* mode_str = NULL;
ut_a(name);
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
if (create_mode == OS_FILE_OPEN) {
mode_str = "OPEN";
if (access_type == OS_FILE_READ_ONLY) {
create_flag = O_RDONLY;
} else if (srv_read_only_mode) {
create_flag = O_RDONLY;
} else {
ut_a(access_type == OS_FILE_READ_WRITE
|| access_type == OS_FILE_READ_ALLOW_DELETE);
create_flag = O_RDWR;
}
} else if (srv_read_only_mode) {
mode_str = "OPEN";
create_flag = O_RDONLY;
} else if (create_mode == OS_FILE_CREATE) {
mode_str = "CREATE";
create_flag = O_RDWR | O_CREAT | O_EXCL;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
return((os_file_t) -1);
}
file = ::open(name, create_flag, os_innodb_umask);
*success = file == -1 ? FALSE : TRUE;
/* This function is always called for data files, we should disable
OS caching (O_DIRECT) here as we do in os_file_create_func(), so
we open the same file in the same mode, see man page of open(2). */
if (!srv_read_only_mode
&& *success
&& (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT
|| srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC)) {
os_file_set_nocache(file, name, mode_str);
}
#ifdef USE_FILE_LOCK
if (!srv_read_only_mode
&& *success
&& access_type == OS_FILE_READ_WRITE
&& os_file_lock(file, name)) {
*success = FALSE;
close(file);
file = -1;
}
#endif /* USE_FILE_LOCK */
#endif /* __WIN__ */
return(file);
}
/****************************************************************//**
Tries to disable OS caching on an opened file descriptor. */
UNIV_INTERN
void
os_file_set_nocache(
/*================*/
int fd /*!< in: file descriptor to alter */
MY_ATTRIBUTE((unused)),
const char* file_name /*!< in: used in the diagnostic
message */
MY_ATTRIBUTE((unused)),
const char* operation_name MY_ATTRIBUTE((unused)))
/*!< in: "open" or "create"; used
in the diagnostic message */
{
/* some versions of Solaris may not have DIRECTIO_ON */
#if defined(UNIV_SOLARIS) && defined(DIRECTIO_ON)
if (directio(fd, DIRECTIO_ON) == -1) {
int errno_save = errno;
ib_logf(IB_LOG_LEVEL_ERROR,
"Failed to set DIRECTIO_ON on file %s: %s: %s, "
"continuing anyway.",
file_name, operation_name, strerror(errno_save));
}
#elif defined(O_DIRECT)
if (fcntl(fd, F_SETFL, O_DIRECT) == -1) {
int errno_save = errno;
static bool warning_message_printed = false;
if (errno_save == EINVAL) {
if (!warning_message_printed) {
warning_message_printed = true;
# ifdef UNIV_LINUX
ib_logf(IB_LOG_LEVEL_WARN,
"Failed to set O_DIRECT on file "
"%s: %s: %s, continuing anyway. "
"O_DIRECT is known to result "
"in 'Invalid argument' on Linux on "
"tmpfs, see MySQL Bug#26662.",
file_name, operation_name,
strerror(errno_save));
# else /* UNIV_LINUX */
goto short_warning;
# endif /* UNIV_LINUX */
}
} else {
# ifndef UNIV_LINUX
short_warning:
# endif
ib_logf(IB_LOG_LEVEL_WARN,
"Failed to set O_DIRECT on file %s: %s: %s, "
"continuing anyway.",
file_name, operation_name, strerror(errno_save));
}
}
#endif /* defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) */
}
/****************************************************************//**
NOTE! Use the corresponding macro os_file_create(), not directly
this function!
Opens an existing file or creates a new.
@return own: handle to the file, not defined if error, error number
can be retrieved with os_file_get_last_error */
UNIV_INTERN
os_file_t
os_file_create_func(
/*================*/
const char* name, /*!< in: name of the file or path as a
null-terminated string */
ulint create_mode,/*!< in: create mode */
ulint purpose,/*!< in: OS_FILE_AIO, if asynchronous,
non-buffered i/o is desired,
OS_FILE_NORMAL, if any normal file;
NOTE that it also depends on type, os_aio_..
and srv_.. variables whether we really use
async i/o or unbuffered i/o: look in the
function source code for the exact rules */
ulint type, /*!< in: OS_DATA_FILE or OS_LOG_FILE */
ibool* success)/*!< out: TRUE if succeed, FALSE if error */
{
os_file_t file;
ibool retry;
ibool on_error_no_exit;
ibool on_error_silent;
#ifdef __WIN__
DBUG_EXECUTE_IF(
"ib_create_table_fail_disk_full",
*success = FALSE;
SetLastError(ERROR_DISK_FULL);
return((os_file_t) -1);
);
#else /* __WIN__ */
DBUG_EXECUTE_IF(
"ib_create_table_fail_disk_full",
*success = FALSE;
errno = ENOSPC;
return((os_file_t) -1);
);
#endif /* __WIN__ */
#ifdef __WIN__
DWORD create_flag;
DWORD share_mode = FILE_SHARE_READ;
on_error_no_exit = create_mode & OS_FILE_ON_ERROR_NO_EXIT
? TRUE : FALSE;
on_error_silent = create_mode & OS_FILE_ON_ERROR_SILENT
? TRUE : FALSE;
create_mode &= ~OS_FILE_ON_ERROR_NO_EXIT;
create_mode &= ~OS_FILE_ON_ERROR_SILENT;
if (create_mode == OS_FILE_OPEN_RAW) {
ut_a(!srv_read_only_mode);
create_flag = OPEN_EXISTING;
/* On Windows Physical devices require admin privileges and
have to have the write-share mode set. See the remarks
section for the CreateFile() function documentation in MSDN. */
share_mode |= FILE_SHARE_WRITE;
} else if (create_mode == OS_FILE_OPEN
|| create_mode == OS_FILE_OPEN_RETRY) {
create_flag = OPEN_EXISTING;
} else if (srv_read_only_mode) {
create_flag = OPEN_EXISTING;
} else if (create_mode == OS_FILE_CREATE) {
create_flag = CREATE_NEW;
} else if (create_mode == OS_FILE_OVERWRITE) {
create_flag = CREATE_ALWAYS;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
return((os_file_t) -1);
}
DWORD attributes = 0;
#ifdef UNIV_HOTBACKUP
attributes |= FILE_FLAG_NO_BUFFERING;
#else
if (purpose == OS_FILE_AIO) {
#ifdef WIN_ASYNC_IO
/* If specified, use asynchronous (overlapped) io and no
buffering of writes in the OS */
if (srv_use_native_aio) {
attributes |= FILE_FLAG_OVERLAPPED;
}
#endif /* WIN_ASYNC_IO */
} else if (purpose == OS_FILE_NORMAL) {
/* Use default setting. */
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown purpose flag (%lu) while opening file '%s'",
purpose, name);
return((os_file_t)(-1));
}
#ifdef UNIV_NON_BUFFERED_IO
// TODO: Create a bug, this looks wrong. The flush log
// parameter is dynamic.
if (type == OS_LOG_FILE && srv_flush_log_at_trx_commit == 2) {
/* Do not use unbuffered i/o for the log files because
value 2 denotes that we do not flush the log at every
commit, but only once per second */
} else if (srv_win_file_flush_method == SRV_WIN_IO_UNBUFFERED) {
attributes |= FILE_FLAG_NO_BUFFERING;
}
#endif /* UNIV_NON_BUFFERED_IO */
#endif /* UNIV_HOTBACKUP */
DWORD access = GENERIC_READ;
if (!srv_read_only_mode) {
access |= GENERIC_WRITE;
}
do {
/* Use default security attributes and no template file. */
file = CreateFile(
(LPCTSTR) name, access, share_mode, NULL,
create_flag, attributes, NULL);
if (file == INVALID_HANDLE_VALUE) {
const char* operation;
operation = (create_mode == OS_FILE_CREATE
&& !srv_read_only_mode)
? "create" : "open";
*success = FALSE;
if (on_error_no_exit) {
retry = os_file_handle_error_no_exit(
name, operation, on_error_silent);
} else {
retry = os_file_handle_error(name, operation);
}
} else {
*success = TRUE;
retry = FALSE;
}
} while (retry);
#else /* __WIN__ */
int create_flag;
const char* mode_str = NULL;
on_error_no_exit = create_mode & OS_FILE_ON_ERROR_NO_EXIT
? TRUE : FALSE;
on_error_silent = create_mode & OS_FILE_ON_ERROR_SILENT
? TRUE : FALSE;
create_mode &= ~OS_FILE_ON_ERROR_NO_EXIT;
create_mode &= ~OS_FILE_ON_ERROR_SILENT;
if (create_mode == OS_FILE_OPEN
|| create_mode == OS_FILE_OPEN_RAW
|| create_mode == OS_FILE_OPEN_RETRY) {
mode_str = "OPEN";
create_flag = srv_read_only_mode ? O_RDONLY : O_RDWR;
} else if (srv_read_only_mode) {
mode_str = "OPEN";
create_flag = O_RDONLY;
} else if (create_mode == OS_FILE_CREATE) {
mode_str = "CREATE";
create_flag = O_RDWR | O_CREAT | O_EXCL;
} else if (create_mode == OS_FILE_OVERWRITE) {
mode_str = "OVERWRITE";
create_flag = O_RDWR | O_CREAT | O_TRUNC;
} else {
ib_logf(IB_LOG_LEVEL_ERROR,
"Unknown file create mode (%lu) for file '%s'",
create_mode, name);
return((os_file_t) -1);
}
ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE);
ut_a(purpose == OS_FILE_AIO || purpose == OS_FILE_NORMAL);
#ifdef O_SYNC
/* We let O_SYNC only affect log files; note that we map O_DSYNC to
O_SYNC because the datasync options seemed to corrupt files in 2001
in both Linux and Solaris */
if (!srv_read_only_mode
&& type == OS_LOG_FILE
&& srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) {
create_flag |= O_SYNC;
}
#endif /* O_SYNC */
do {
file = ::open(name, create_flag, os_innodb_umask);
if (file == -1) {
const char* operation;
operation = (create_mode == OS_FILE_CREATE
&& !srv_read_only_mode)
? "create" : "open";
*success = FALSE;
if (on_error_no_exit) {
retry = os_file_handle_error_no_exit(
name, operation, on_error_silent);
} else {
retry = os_file_handle_error(name, operation);
}
} else {
*success = TRUE;
retry = false;
}
} while (retry);
/* We disable OS caching (O_DIRECT) only on data files */
if (!srv_read_only_mode
&& *success
&& type != OS_LOG_FILE
&& (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT
|| srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC)) {
os_file_set_nocache(file, name, mode_str);
}
#ifdef USE_FILE_LOCK
if (!srv_read_only_mode
&& *success
&& create_mode != OS_FILE_OPEN_RAW
&& os_file_lock(file, name)) {
if (create_mode == OS_FILE_OPEN_RETRY) {
ut_a(!srv_read_only_mode);
ib_logf(IB_LOG_LEVEL_INFO,
"Retrying to lock the first data file");
for (int i = 0; i < 100; i++) {
os_thread_sleep(1000000);
if (!os_file_lock(file, name)) {
*success = TRUE;
return(file);
}
}
ib_logf(IB_LOG_LEVEL_INFO,
"Unable to open the first data file");
}
*success = FALSE;
close(file);
file = -1;
}
#endif /* USE_FILE_LOCK */
#endif /* __WIN__ */
return(file);
}
/***********************************************************************//**
Deletes a file if it exists. The file has to be closed before calling this.
@return TRUE if success */
UNIV_INTERN
bool
os_file_delete_if_exists_func(
/*==========================*/
const char* name) /*!< in: file path as a null-terminated
string */
{
#ifdef __WIN__
bool ret;
ulint count = 0;
loop:
/* In Windows, deleting an .ibd file may fail if mysqlbackup is copying
it */
ret = DeleteFile((LPCTSTR) name);
if (ret) {
return(true);
}
DWORD lasterr = GetLastError();
if (lasterr == ERROR_FILE_NOT_FOUND
|| lasterr == ERROR_PATH_NOT_FOUND) {
/* the file does not exist, this not an error */
return(true);
}
count++;
if (count > 100 && 0 == (count % 10)) {
os_file_get_last_error(true); /* print error information */
ib_logf(IB_LOG_LEVEL_WARN, "Delete of file %s failed.", name);
}
os_thread_sleep(500000); /* sleep for 0.5 second */
if (count > 2000) {
return(false);
}
goto loop;
#else
int ret;
ret = unlink(name);
if (ret != 0 && errno != ENOENT) {
os_file_handle_error_no_exit(name, "delete", FALSE);
return(false);
}
return(true);
#endif /* __WIN__ */
}
/***********************************************************************//**
Deletes a file. The file has to be closed before calling this.
@return TRUE if success */
UNIV_INTERN
bool
os_file_delete_func(
/*================*/
const char* name) /*!< in: file path as a null-terminated
string */
{
#ifdef __WIN__
BOOL ret;
ulint count = 0;
loop:
/* In Windows, deleting an .ibd file may fail if mysqlbackup is copying
it */
ret = DeleteFile((LPCTSTR) name);
if (ret) {
return(true);
}
if (GetLastError() == ERROR_FILE_NOT_FOUND) {
/* If the file does not exist, we classify this as a 'mild'
error and return */
return(false);
}
count++;
if (count > 100 && 0 == (count % 10)) {
os_file_get_last_error(true); /* print error information */
fprintf(stderr,
"InnoDB: Warning: cannot delete file %s\n"
"InnoDB: Are you running mysqlbackup"
" to back up the file?\n", name);
}
os_thread_sleep(1000000); /* sleep for a second */
if (count > 2000) {
return(false);
}
goto loop;
#else
int ret;
ret = unlink(name);
if (ret != 0) {
os_file_handle_error_no_exit(name, "delete", FALSE);
return(false);
}
return(true);
#endif
}
/***********************************************************************//**
NOTE! Use the corresponding macro os_file_rename(), not directly this function!
Renames a file (can also move it to another directory). It is safest that the
file is closed before calling this function.
@return TRUE if success */
UNIV_INTERN
ibool
os_file_rename_func(
/*================*/
const char* oldpath,/*!< in: old file path as a null-terminated
string */
const char* newpath)/*!< in: new file path */
{
#ifdef UNIV_DEBUG
os_file_type_t type;
ibool exists;
/* New path must not exist. */
ut_ad(os_file_status(newpath, &exists, &type));
ut_ad(!exists);
/* Old path must exist. */
ut_ad(os_file_status(oldpath, &exists, &type));
ut_ad(exists);
#endif /* UNIV_DEBUG */
#ifdef __WIN__
BOOL ret;
ret = MoveFile((LPCTSTR) oldpath, (LPCTSTR) newpath);
if (ret) {
return(TRUE);
}
os_file_handle_error_no_exit(oldpath, "rename", FALSE);
return(FALSE);
#else
int ret;
ret = rename(oldpath, newpath);
if (ret != 0) {
os_file_handle_error_no_exit(oldpath, "rename", FALSE);
return(FALSE);
}
return(TRUE);
#endif /* __WIN__ */
}
/***********************************************************************//**
NOTE! Use the corresponding macro os_file_close(), not directly this function!
Closes a file handle. In case of error, error number can be retrieved with
os_file_get_last_error.
@return TRUE if success */
UNIV_INTERN
ibool
os_file_close_func(
/*===============*/
os_file_t file) /*!< in, own: handle to a file */
{
#ifdef __WIN__
BOOL ret;
ret = CloseHandle(file);
if (ret) {
return(TRUE);
}
os_file_handle_error(NULL, "close");
return(FALSE);
#else
int ret;
ret = close(file);
if (ret == -1) {
os_file_handle_error(NULL, "close");
return(FALSE);
}
return(TRUE);
#endif /* __WIN__ */
}
#ifdef UNIV_HOTBACKUP
/***********************************************************************//**
Closes a file handle.
@return TRUE if success */
UNIV_INTERN
ibool
os_file_close_no_error_handling(
/*============================*/
os_file_t file) /*!< in, own: handle to a file */
{
#ifdef __WIN__
BOOL ret;
ret = CloseHandle(file);
if (ret) {
return(TRUE);
}
return(FALSE);
#else
int ret;
ret = close(file);
if (ret == -1) {
return(FALSE);
}
return(TRUE);
#endif /* __WIN__ */
}
#endif /* UNIV_HOTBACKUP */
/***********************************************************************//**
Gets a file size.
@return file size, or (os_offset_t) -1 on failure */
UNIV_INTERN
os_offset_t
os_file_get_size(
/*=============*/
os_file_t file) /*!< in: handle to a file */
{
#ifdef __WIN__
os_offset_t offset;
DWORD high;
DWORD low;
low = GetFileSize(file, &high);
if ((low == 0xFFFFFFFF) && (GetLastError() != NO_ERROR)) {
return((os_offset_t) -1);
}
offset = (os_offset_t) low | ((os_offset_t) high << 32);
return(offset);
#else
return((os_offset_t) lseek(file, 0, SEEK_END));
#endif /* __WIN__ */
}
/***********************************************************************//**
Write the specified number of zeros to a newly created file.
@return TRUE if success */
UNIV_INTERN
ibool
os_file_set_size(
/*=============*/
const char* name, /*!< in: name of the file or path as a
null-terminated string */
os_file_t file, /*!< in: handle to a file */
os_offset_t size) /*!< in: file size */
{
ibool ret;
byte* buf;
byte* buf2;
ulint buf_size;
#ifdef HAVE_POSIX_FALLOCATE
if (srv_use_posix_fallocate) {
int err;
do {
err = posix_fallocate(file, 0, size);
} while (err == EINTR
&& srv_shutdown_state == SRV_SHUTDOWN_NONE);
if (err) {
ib_logf(IB_LOG_LEVEL_ERROR,
"preallocating " INT64PF " bytes for"
"file %s failed with error %d",
size, name, err);
}
return(!err);
}
#endif
#ifdef _WIN32
/* Write 1 page of zeroes at the desired end. */
buf_size = UNIV_PAGE_SIZE;
os_offset_t current_size = size - buf_size;
#else
/* Write up to 1 megabyte at a time. */
buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
os_offset_t current_size = 0;
#endif
buf2 = static_cast<byte*>(calloc(1, buf_size + UNIV_PAGE_SIZE));
if (!buf2) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Cannot allocate " ULINTPF " bytes to extend file\n",
buf_size + UNIV_PAGE_SIZE);
return(FALSE);
}
/* Align the buffer for possible raw i/o */
buf = static_cast<byte*>(ut_align(buf2, UNIV_PAGE_SIZE));
do {
ulint n_bytes;
if (size - current_size < (os_offset_t) buf_size) {
n_bytes = (ulint) (size - current_size);
} else {
n_bytes = buf_size;
}
ret = os_file_write(name, file, buf, current_size, n_bytes);
if (!ret) {
break;
}
current_size += n_bytes;
} while (current_size < size);
free(buf2);
return(ret && os_file_flush(file));
}
/***********************************************************************//**
Truncates a file at its current position.
@return TRUE if success */
UNIV_INTERN
ibool
os_file_set_eof(
/*============*/
FILE* file) /*!< in: file to be truncated */
{
#ifdef __WIN__
HANDLE h = (HANDLE) _get_osfhandle(fileno(file));
return(SetEndOfFile(h));
#else /* __WIN__ */
return(!ftruncate(fileno(file), ftell(file)));
#endif /* __WIN__ */
}
#ifndef __WIN__
/***********************************************************************//**
Wrapper to fsync(2) that retries the call on some errors.
Returns the value 0 if successful; otherwise the value -1 is returned and
the global variable errno is set to indicate the error.
@return 0 if success, -1 otherwise */
static
int
os_file_fsync(
/*==========*/
os_file_t file) /*!< in: handle to a file */
{
int ret;
int failures;
ibool retry;
failures = 0;
do {
ret = fsync(file);
os_n_fsyncs++;
if (ret == -1 && errno == ENOLCK) {
if (failures % 100 == 0) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: fsync(): "
"No locks available; retrying\n");
}
os_thread_sleep(200000 /* 0.2 sec */);
failures++;
retry = TRUE;
} else {
retry = FALSE;
}
} while (retry);
return(ret);
}
#endif /* !__WIN__ */
/***********************************************************************//**
NOTE! Use the corresponding macro os_file_flush(), not directly this function!
Flushes the write buffers of a given file to the disk.
@return TRUE if success */
UNIV_INTERN
ibool
os_file_flush_func(
/*===============*/
os_file_t file) /*!< in, own: handle to a file */
{
#ifdef __WIN__
BOOL ret;
os_n_fsyncs++;
ret = FlushFileBuffers(file);
if (ret) {
return(TRUE);
}
/* Since Windows returns ERROR_INVALID_FUNCTION if the 'file' is
actually a raw device, we choose to ignore that error if we are using
raw disks */
if (srv_start_raw_disk_in_use && GetLastError()
== ERROR_INVALID_FUNCTION) {
return(TRUE);
}
os_file_handle_error(NULL, "flush");
/* It is a fatal error if a file flush does not succeed, because then
the database can get corrupt on disk */
ut_error;
return(FALSE);
#else
int ret;
#if defined(HAVE_DARWIN_THREADS)
# ifndef F_FULLFSYNC
/* The following definition is from the Mac OS X 10.3 <sys/fcntl.h> */
# define F_FULLFSYNC 51 /* fsync + ask the drive to flush to the media */
# elif F_FULLFSYNC != 51
# error "F_FULLFSYNC != 51: ABI incompatibility with Mac OS X 10.3"
# endif
/* Apple has disabled fsync() for internal disk drives in OS X. That
caused corruption for a user when he tested a power outage. Let us in
OS X use a nonstandard flush method recommended by an Apple
engineer. */
if (!srv_have_fullfsync) {
/* If we are not on an operating system that supports this,
then fall back to a plain fsync. */
ret = os_file_fsync(file);
} else {
ret = fcntl(file, F_FULLFSYNC, NULL);
if (ret) {
/* If we are not on a file system that supports this,
then fall back to a plain fsync. */
ret = os_file_fsync(file);
}
}
#else
ret = os_file_fsync(file);
#endif
if (ret == 0) {
return(TRUE);
}
/* Since Linux returns EINVAL if the 'file' is actually a raw device,
we choose to ignore that error if we are using raw disks */
if (srv_start_raw_disk_in_use && errno == EINVAL) {
return(TRUE);
}
ib_logf(IB_LOG_LEVEL_ERROR, "The OS said file flush did not succeed");
os_file_handle_error(NULL, "flush");
/* It is a fatal error if a file flush does not succeed, because then
the database can get corrupt on disk */
ut_error;
return(FALSE);
#endif
}
#ifndef __WIN__
/*******************************************************************//**
Does a synchronous read operation in Posix.
@return number of bytes read, -1 if error */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
ssize_t
os_file_pread(
/*==========*/
os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read */
ulint n, /*!< in: number of bytes to read */
os_offset_t offset) /*!< in: file offset from where to read */
{
off_t offs;
#if defined(HAVE_PREAD) && !defined(HAVE_BROKEN_PREAD)
ssize_t n_bytes;
#endif /* HAVE_PREAD && !HAVE_BROKEN_PREAD */
ut_ad(n);
/* If off_t is > 4 bytes in size, then we assume we can pass a
64-bit address */
offs = (off_t) offset;
if (sizeof(off_t) <= 4) {
if (offset != (os_offset_t) offs) {
ib_logf(IB_LOG_LEVEL_ERROR,
"File read at offset > 4 GB");
}
}
os_n_file_reads++;
#if defined(HAVE_PREAD) && !defined(HAVE_BROKEN_PREAD)
#if defined(HAVE_ATOMIC_BUILTINS) && UNIV_WORD_SIZE == 8
(void) os_atomic_increment_ulint(&os_n_pending_reads, 1);
(void) os_atomic_increment_ulint(&os_file_n_pending_preads, 1);
MONITOR_ATOMIC_INC(MONITOR_OS_PENDING_READS);
#else
os_mutex_enter(os_file_count_mutex);
os_file_n_pending_preads++;
os_n_pending_reads++;
MONITOR_INC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
#endif /* HAVE_ATOMIC_BUILTINS && UNIV_WORD == 8 */
n_bytes = pread(file, buf, n, offs);
#if defined(HAVE_ATOMIC_BUILTINS) && UNIV_WORD_SIZE == 8
(void) os_atomic_decrement_ulint(&os_n_pending_reads, 1);
(void) os_atomic_decrement_ulint(&os_file_n_pending_preads, 1);
MONITOR_ATOMIC_DEC(MONITOR_OS_PENDING_READS);
#else
os_mutex_enter(os_file_count_mutex);
os_file_n_pending_preads--;
os_n_pending_reads--;
MONITOR_DEC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
#endif /* !HAVE_ATOMIC_BUILTINS || UNIV_WORD == 8 */
return(n_bytes);
#else
{
off_t ret_offset;
ssize_t ret;
#ifndef UNIV_HOTBACKUP
ulint i;
#endif /* !UNIV_HOTBACKUP */
#if defined(HAVE_ATOMIC_BUILTINS) && UNIV_WORD_SIZE == 8
(void) os_atomic_increment_ulint(&os_n_pending_reads, 1);
MONITOR_ATOMIC_INC(MONITOR_OS_PENDING_READS);
#else
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads++;
MONITOR_INC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
#endif /* HAVE_ATOMIC_BUILTINS && UNIV_WORD == 8 */<|fim▁hole|>
os_mutex_enter(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
ret_offset = lseek(file, offs, SEEK_SET);
if (ret_offset < 0) {
ret = -1;
} else {
ret = read(file, buf, (ssize_t) n);
}
#ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
#if defined(HAVE_ATOMIC_BUILTINS) && UNIV_WORD_SIZE == 8
(void) os_atomic_decrement_ulint(&os_n_pending_reads, 1);
MONITOR_ATOIC_DEC(MONITOR_OS_PENDING_READS);
#else
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads--;
MONITOR_DEC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
#endif /* HAVE_ATOMIC_BUILTINS && UNIV_WORD_SIZE == 8 */
return(ret);
}
#endif
}
/*******************************************************************//**
Does a synchronous write operation in Posix.
@return number of bytes written, -1 if error */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
ssize_t
os_file_pwrite(
/*===========*/
os_file_t file, /*!< in: handle to a file */
const void* buf, /*!< in: buffer from where to write */
ulint n, /*!< in: number of bytes to write */
os_offset_t offset) /*!< in: file offset where to write */
{
ssize_t ret;
off_t offs;
ut_ad(n);
ut_ad(!srv_read_only_mode);
/* If off_t is > 4 bytes in size, then we assume we can pass a
64-bit address */
offs = (off_t) offset;
if (sizeof(off_t) <= 4) {
if (offset != (os_offset_t) offs) {
ib_logf(IB_LOG_LEVEL_ERROR,
"File write at offset > 4 GB.");
}
}
os_n_file_writes++;
#if defined(HAVE_PWRITE) && !defined(HAVE_BROKEN_PREAD)
#if !defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8
os_mutex_enter(os_file_count_mutex);
os_file_n_pending_pwrites++;
os_n_pending_writes++;
MONITOR_INC(MONITOR_OS_PENDING_WRITES);
os_mutex_exit(os_file_count_mutex);
#else
(void) os_atomic_increment_ulint(&os_n_pending_writes, 1);
(void) os_atomic_increment_ulint(&os_file_n_pending_pwrites, 1);
MONITOR_ATOMIC_INC(MONITOR_OS_PENDING_WRITES);
#endif /* !HAVE_ATOMIC_BUILTINS || UNIV_WORD < 8 */
ret = pwrite(file, buf, (ssize_t) n, offs);
#if !defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8
os_mutex_enter(os_file_count_mutex);
os_file_n_pending_pwrites--;
os_n_pending_writes--;
MONITOR_DEC(MONITOR_OS_PENDING_WRITES);
os_mutex_exit(os_file_count_mutex);
#else
(void) os_atomic_decrement_ulint(&os_n_pending_writes, 1);
(void) os_atomic_decrement_ulint(&os_file_n_pending_pwrites, 1);
MONITOR_ATOMIC_DEC(MONITOR_OS_PENDING_WRITES);
#endif /* !HAVE_ATOMIC_BUILTINS || UNIV_WORD < 8 */
return(ret);
#else
{
off_t ret_offset;
# ifndef UNIV_HOTBACKUP
ulint i;
# endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_writes++;
MONITOR_INC(MONITOR_OS_PENDING_WRITES);
os_mutex_exit(os_file_count_mutex);
# ifndef UNIV_HOTBACKUP
/* Protect the seek / write operation with a mutex */
i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES;
os_mutex_enter(os_file_seek_mutexes[i]);
# endif /* UNIV_HOTBACKUP */
ret_offset = lseek(file, offs, SEEK_SET);
if (ret_offset < 0) {
ret = -1;
goto func_exit;
}
ret = write(file, buf, (ssize_t) n);
func_exit:
# ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
# endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_writes--;
MONITOR_DEC(MONITOR_OS_PENDING_WRITES);
os_mutex_exit(os_file_count_mutex);
return(ret);
}
#endif /* !UNIV_HOTBACKUP */
}
#endif
/*******************************************************************//**
NOTE! Use the corresponding macro os_file_read(), not directly this
function!
Requests a synchronous positioned read operation.
@return TRUE if request was successful, FALSE if fail */
UNIV_INTERN
ibool
os_file_read_func(
/*==============*/
os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read */
os_offset_t offset, /*!< in: file offset where to read */
ulint n) /*!< in: number of bytes to read */
{
#ifdef __WIN__
BOOL ret;
DWORD len;
DWORD ret2;
DWORD low;
DWORD high;
ibool retry;
#ifndef UNIV_HOTBACKUP
ulint i;
#endif /* !UNIV_HOTBACKUP */
/* On 64-bit Windows, ulint is 64 bits. But offset and n should be
no more than 32 bits. */
ut_a((n & 0xFFFFFFFFUL) == n);
os_n_file_reads++;
os_bytes_read_since_printout += n;
try_again:
ut_ad(buf);
ut_ad(n > 0);
low = (DWORD) offset & 0xFFFFFFFF;
high = (DWORD) (offset >> 32);
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads++;
MONITOR_INC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
#ifndef UNIV_HOTBACKUP
/* Protect the seek / read operation with a mutex */
i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES;
os_mutex_enter(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
ret2 = SetFilePointer(
file, low, reinterpret_cast<PLONG>(&high), FILE_BEGIN);
if (ret2 == 0xFFFFFFFF && GetLastError() != NO_ERROR) {
#ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads--;
MONITOR_DEC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
goto error_handling;
}
ret = ReadFile(file, buf, (DWORD) n, &len, NULL);
#ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads--;
MONITOR_DEC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
if (ret && len == n) {
return(TRUE);
}
#else /* __WIN__ */
ibool retry;
ssize_t ret;
os_bytes_read_since_printout += n;
try_again:
ret = os_file_pread(file, buf, n, offset);
if ((ulint) ret == n) {
return(TRUE);
} else if (ret == -1) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Error in system call pread(). The operating"
" system error number is %lu.",(ulint) errno);
} else {
/* Partial read occurred */
ib_logf(IB_LOG_LEVEL_ERROR,
"Tried to read " ULINTPF " bytes at offset "
UINT64PF ". Was only able to read %ld.",
n, offset, (lint) ret);
}
#endif /* __WIN__ */
#ifdef __WIN__
error_handling:
#endif
retry = os_file_handle_error(NULL, "read");
if (retry) {
goto try_again;
}
fprintf(stderr,
"InnoDB: Fatal error: cannot read from file."
" OS error number %lu.\n",
#ifdef __WIN__
(ulong) GetLastError()
#else
(ulong) errno
#endif /* __WIN__ */
);
fflush(stderr);
ut_error;
return(FALSE);
}
/*******************************************************************//**
NOTE! Use the corresponding macro os_file_read_no_error_handling(),
not directly this function!
Requests a synchronous positioned read operation. This function does not do
any error handling. In case of error it returns FALSE.
@return TRUE if request was successful, FALSE if fail */
UNIV_INTERN
ibool
os_file_read_no_error_handling_func(
/*================================*/
os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read */
os_offset_t offset, /*!< in: file offset where to read */
ulint n) /*!< in: number of bytes to read */
{
#ifdef __WIN__
BOOL ret;
DWORD len;
DWORD ret2;
DWORD low;
DWORD high;
ibool retry;
#ifndef UNIV_HOTBACKUP
ulint i;
#endif /* !UNIV_HOTBACKUP */
/* On 64-bit Windows, ulint is 64 bits. But offset and n should be
no more than 32 bits. */
ut_a((n & 0xFFFFFFFFUL) == n);
os_n_file_reads++;
os_bytes_read_since_printout += n;
try_again:
ut_ad(buf);
ut_ad(n > 0);
low = (DWORD) offset & 0xFFFFFFFF;
high = (DWORD) (offset >> 32);
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads++;
MONITOR_INC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
#ifndef UNIV_HOTBACKUP
/* Protect the seek / read operation with a mutex */
i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES;
os_mutex_enter(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
ret2 = SetFilePointer(
file, low, reinterpret_cast<PLONG>(&high), FILE_BEGIN);
if (ret2 == 0xFFFFFFFF && GetLastError() != NO_ERROR) {
#ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads--;
MONITOR_DEC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
goto error_handling;
}
ret = ReadFile(file, buf, (DWORD) n, &len, NULL);
#ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_reads--;
MONITOR_DEC(MONITOR_OS_PENDING_READS);
os_mutex_exit(os_file_count_mutex);
if (ret && len == n) {
return(TRUE);
}
#else /* __WIN__ */
ibool retry;
ssize_t ret;
os_bytes_read_since_printout += n;
try_again:
ret = os_file_pread(file, buf, n, offset);
if ((ulint) ret == n) {
return(TRUE);
} else if (ret == -1) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Error in system call pread(). The operating"
" system error number is %lu.",(ulint) errno);
} else {
/* Partial read occurred */
ib_logf(IB_LOG_LEVEL_ERROR,
"Tried to read " ULINTPF " bytes at offset "
UINT64PF ". Was only able to read %ld.",
n, offset, (lint) ret);
}
#endif /* __WIN__ */
#ifdef __WIN__
error_handling:
#endif
retry = os_file_handle_error_no_exit(NULL, "read", FALSE);
if (retry) {
goto try_again;
}
return(FALSE);
}
/*******************************************************************//**
Rewind file to its start, read at most size - 1 bytes from it to str, and
NUL-terminate str. All errors are silently ignored. This function is
mostly meant to be used with temporary files. */
UNIV_INTERN
void
os_file_read_string(
/*================*/
FILE* file, /*!< in: file to read from */
char* str, /*!< in: buffer where to read */
ulint size) /*!< in: size of buffer */
{
size_t flen;
if (size == 0) {
return;
}
rewind(file);
flen = fread(str, 1, size - 1, file);
str[flen] = '\0';
}
/*******************************************************************//**
NOTE! Use the corresponding macro os_file_write(), not directly
this function!
Requests a synchronous write operation.
@return TRUE if request was successful, FALSE if fail */
UNIV_INTERN
ibool
os_file_write_func(
/*===============*/
const char* name, /*!< in: name of the file or path as a
null-terminated string */
os_file_t file, /*!< in: handle to a file */
const void* buf, /*!< in: buffer from which to write */
os_offset_t offset, /*!< in: file offset where to write */
ulint n) /*!< in: number of bytes to write */
{
ut_ad(!srv_read_only_mode);
#ifdef __WIN__
BOOL ret;
DWORD len;
DWORD ret2;
DWORD low;
DWORD high;
ulint n_retries = 0;
ulint err;
DWORD saved_error = 0;
#ifndef UNIV_HOTBACKUP
ulint i;
#endif /* !UNIV_HOTBACKUP */
/* On 64-bit Windows, ulint is 64 bits. But offset and n should be
no more than 32 bits. */
ut_a((n & 0xFFFFFFFFUL) == n);
os_n_file_writes++;
ut_ad(buf);
ut_ad(n > 0);
retry:
low = (DWORD) offset & 0xFFFFFFFF;
high = (DWORD) (offset >> 32);
os_mutex_enter(os_file_count_mutex);
os_n_pending_writes++;
MONITOR_INC(MONITOR_OS_PENDING_WRITES);
os_mutex_exit(os_file_count_mutex);
#ifndef UNIV_HOTBACKUP
/* Protect the seek / write operation with a mutex */
i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES;
os_mutex_enter(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
ret2 = SetFilePointer(
file, low, reinterpret_cast<PLONG>(&high), FILE_BEGIN);
if (ret2 == 0xFFFFFFFF && GetLastError() != NO_ERROR) {
#ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_writes--;
MONITOR_DEC(MONITOR_OS_PENDING_WRITES);
os_mutex_exit(os_file_count_mutex);
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: File pointer positioning to"
" file %s failed at\n"
"InnoDB: offset %llu. Operating system"
" error number %lu.\n"
"InnoDB: Some operating system error numbers"
" are described at\n"
"InnoDB: "
REFMAN "operating-system-error-codes.html\n",
name, offset, (ulong) GetLastError());
return(FALSE);
}
ret = WriteFile(file, buf, (DWORD) n, &len, NULL);
#ifndef UNIV_HOTBACKUP
os_mutex_exit(os_file_seek_mutexes[i]);
#endif /* !UNIV_HOTBACKUP */
os_mutex_enter(os_file_count_mutex);
os_n_pending_writes--;
MONITOR_DEC(MONITOR_OS_PENDING_WRITES);
os_mutex_exit(os_file_count_mutex);
if (ret && len == n) {
return(TRUE);
}
/* If some background file system backup tool is running, then, at
least in Windows 2000, we may get here a specific error. Let us
retry the operation 100 times, with 1 second waits. */
if (GetLastError() == ERROR_LOCK_VIOLATION && n_retries < 100) {
os_thread_sleep(1000000);
n_retries++;
goto retry;
}
if (!os_has_said_disk_full) {
char *winmsg = NULL;
saved_error = GetLastError();
err = (ulint) saved_error;
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: Write to file %s failed"
" at offset %llu.\n"
"InnoDB: %lu bytes should have been written,"
" only %lu were written.\n"
"InnoDB: Operating system error number %lu.\n"
"InnoDB: Check that your OS and file system"
" support files of this size.\n"
"InnoDB: Check also that the disk is not full"
" or a disk quota exceeded.\n",
name, offset,
(ulong) n, (ulong) len, (ulong) err);
/* Ask Windows to prepare a standard message for a
GetLastError() */
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, saved_error,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPSTR)&winmsg, 0, NULL);
if (winmsg) {
fprintf(stderr,
"InnoDB: FormatMessage: Error number %lu means '%s'.\n",
(ulong) saved_error, winmsg);
LocalFree(winmsg);
}
if (strerror((int) err) != NULL) {
fprintf(stderr,
"InnoDB: Error number %lu means '%s'.\n",
(ulong) err, strerror((int) err));
}
fprintf(stderr,
"InnoDB: Some operating system error numbers"
" are described at\n"
"InnoDB: "
REFMAN "operating-system-error-codes.html\n");
os_has_said_disk_full = TRUE;
}
return(FALSE);
#else
ssize_t ret;
ret = os_file_pwrite(file, buf, n, offset);
if ((ulint) ret == n) {
return(TRUE);
}
if (!os_has_said_disk_full) {
ut_print_timestamp(stderr);
if(ret == -1) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Failure of system call pwrite(). Operating"
" system error number is %lu.",
(ulint) errno);
} else {
fprintf(stderr,
" InnoDB: Error: Write to file %s failed"
" at offset " UINT64PF ".\n"
"InnoDB: %lu bytes should have been written,"
" only %ld were written.\n"
"InnoDB: Operating system error number %lu.\n"
"InnoDB: Check that your OS and file system"
" support files of this size.\n"
"InnoDB: Check also that the disk is not full"
" or a disk quota exceeded.\n",
name, offset, n, (lint) ret,
(ulint) errno);
}
if (strerror(errno) != NULL) {
fprintf(stderr,
"InnoDB: Error number %d means '%s'.\n",
errno, strerror(errno));
}
fprintf(stderr,
"InnoDB: Some operating system error numbers"
" are described at\n"
"InnoDB: "
REFMAN "operating-system-error-codes.html\n");
os_has_said_disk_full = TRUE;
}
return(FALSE);
#endif
}
/*******************************************************************//**
Check the existence and type of the given file.
@return TRUE if call succeeded */
UNIV_INTERN
ibool
os_file_status(
/*===========*/
const char* path, /*!< in: pathname of the file */
ibool* exists, /*!< out: TRUE if file exists */
os_file_type_t* type) /*!< out: type of the file (if it exists) */
{
#ifdef __WIN__
int ret;
struct _stat64 statinfo;
ret = _stat64(path, &statinfo);
if (ret && (errno == ENOENT || errno == ENOTDIR || errno == ENAMETOOLONG)) {
/* file does not exist */
*exists = FALSE;
return(TRUE);
} else if (ret) {
/* file exists, but stat call failed */
os_file_handle_error_no_exit(path, "stat", FALSE);
return(FALSE);
}
if (_S_IFDIR & statinfo.st_mode) {
*type = OS_FILE_TYPE_DIR;
} else if (_S_IFREG & statinfo.st_mode) {
*type = OS_FILE_TYPE_FILE;
} else {
*type = OS_FILE_TYPE_UNKNOWN;
}
*exists = TRUE;
return(TRUE);
#else
int ret;
struct stat statinfo;
ret = stat(path, &statinfo);
if (ret && (errno == ENOENT || errno == ENOTDIR || errno == ENAMETOOLONG)) {
/* file does not exist */
*exists = FALSE;
return(TRUE);
} else if (ret) {
/* file exists, but stat call failed */
os_file_handle_error_no_exit(path, "stat", FALSE);
return(FALSE);
}
if (S_ISDIR(statinfo.st_mode)) {
*type = OS_FILE_TYPE_DIR;
} else if (S_ISLNK(statinfo.st_mode)) {
*type = OS_FILE_TYPE_LINK;
} else if (S_ISREG(statinfo.st_mode)) {
*type = OS_FILE_TYPE_FILE;
} else {
*type = OS_FILE_TYPE_UNKNOWN;
}
*exists = TRUE;
return(TRUE);
#endif
}
/*******************************************************************//**
This function returns information about the specified file
@return DB_SUCCESS if all OK */
UNIV_INTERN
dberr_t
os_file_get_status(
/*===============*/
const char* path, /*!< in: pathname of the file */
os_file_stat_t* stat_info, /*!< information of a file in a
directory */
bool check_rw_perm) /*!< in: for testing whether the
file can be opened in RW mode */
{
int ret;
#ifdef __WIN__
struct _stat64 statinfo;
ret = _stat64(path, &statinfo);
if (ret && (errno == ENOENT || errno == ENOTDIR)) {
/* file does not exist */
return(DB_NOT_FOUND);
} else if (ret) {
/* file exists, but stat call failed */
os_file_handle_error_no_exit(path, "stat", FALSE);
return(DB_FAIL);
} else if (_S_IFDIR & statinfo.st_mode) {
stat_info->type = OS_FILE_TYPE_DIR;
} else if (_S_IFREG & statinfo.st_mode) {
DWORD access = GENERIC_READ;
if (!srv_read_only_mode) {
access |= GENERIC_WRITE;
}
stat_info->type = OS_FILE_TYPE_FILE;
/* Check if we can open it in read-only mode. */
if (check_rw_perm) {
HANDLE fh;
fh = CreateFile(
(LPCTSTR) path, // File to open
access,
0, // No sharing
NULL, // Default security
OPEN_EXISTING, // Existing file only
FILE_ATTRIBUTE_NORMAL, // Normal file
NULL); // No attr. template
if (fh == INVALID_HANDLE_VALUE) {
stat_info->rw_perm = false;
} else {
stat_info->rw_perm = true;
CloseHandle(fh);
}
}
} else {
stat_info->type = OS_FILE_TYPE_UNKNOWN;
}
#else
struct stat statinfo;
ret = stat(path, &statinfo);
if (ret && (errno == ENOENT || errno == ENOTDIR)) {
/* file does not exist */
return(DB_NOT_FOUND);
} else if (ret) {
/* file exists, but stat call failed */
os_file_handle_error_no_exit(path, "stat", FALSE);
return(DB_FAIL);
}
switch (statinfo.st_mode & S_IFMT) {
case S_IFDIR:
stat_info->type = OS_FILE_TYPE_DIR;
break;
case S_IFLNK:
stat_info->type = OS_FILE_TYPE_LINK;
break;
case S_IFBLK:
/* Handle block device as regular file. */
case S_IFCHR:
/* Handle character device as regular file. */
case S_IFREG:
stat_info->type = OS_FILE_TYPE_FILE;
break;
default:
stat_info->type = OS_FILE_TYPE_UNKNOWN;
}
if (check_rw_perm && stat_info->type == OS_FILE_TYPE_FILE) {
int fh;
int access;
access = !srv_read_only_mode ? O_RDWR : O_RDONLY;
fh = ::open(path, access, os_innodb_umask);
if (fh == -1) {
stat_info->rw_perm = false;
} else {
stat_info->rw_perm = true;
close(fh);
}
}
#endif /* _WIN_ */
stat_info->ctime = statinfo.st_ctime;
stat_info->atime = statinfo.st_atime;
stat_info->mtime = statinfo.st_mtime;
stat_info->size = statinfo.st_size;
return(DB_SUCCESS);
}
/* path name separator character */
#ifdef __WIN__
# define OS_FILE_PATH_SEPARATOR '\\'
#else
# define OS_FILE_PATH_SEPARATOR '/'
#endif
/****************************************************************//**
This function returns a new path name after replacing the basename
in an old path with a new basename. The old_path is a full path
name including the extension. The tablename is in the normal
form "databasename/tablename". The new base name is found after
the forward slash. Both input strings are null terminated.
This function allocates memory to be returned. It is the callers
responsibility to free the return value after it is no longer needed.
@return own: new full pathname */
UNIV_INTERN
char*
os_file_make_new_pathname(
/*======================*/
const char* old_path, /*!< in: pathname */
const char* tablename) /*!< in: contains new base name */
{
ulint dir_len;
char* last_slash;
char* base_name;
char* new_path;
ulint new_path_len;
/* Split the tablename into its database and table name components.
They are separated by a '/'. */
last_slash = strrchr((char*) tablename, '/');
base_name = last_slash ? last_slash + 1 : (char*) tablename;
/* Find the offset of the last slash. We will strip off the
old basename.ibd which starts after that slash. */
last_slash = strrchr((char*) old_path, OS_FILE_PATH_SEPARATOR);
dir_len = last_slash ? last_slash - old_path : strlen(old_path);
/* allocate a new path and move the old directory path to it. */
new_path_len = dir_len + strlen(base_name) + sizeof "/.ibd";
new_path = static_cast<char*>(mem_alloc(new_path_len));
memcpy(new_path, old_path, dir_len);
ut_snprintf(new_path + dir_len,
new_path_len - dir_len,
"%c%s.ibd",
OS_FILE_PATH_SEPARATOR,
base_name);
return(new_path);
}
/****************************************************************//**
This function returns a remote path name by combining a data directory
path provided in a DATA DIRECTORY clause with the tablename which is
in the form 'database/tablename'. It strips the file basename (which
is the tablename) found after the last directory in the path provided.
The full filepath created will include the database name as a directory
under the path provided. The filename is the tablename with the '.ibd'
extension. All input and output strings are null-terminated.
This function allocates memory to be returned. It is the callers
responsibility to free the return value after it is no longer needed.
@return own: A full pathname; data_dir_path/databasename/tablename.ibd */
UNIV_INTERN
char*
os_file_make_remote_pathname(
/*=========================*/
const char* data_dir_path, /*!< in: pathname */
const char* tablename, /*!< in: tablename */
const char* extention) /*!< in: file extention; ibd,cfg */
{
ulint data_dir_len;
char* last_slash;
char* new_path;
ulint new_path_len;
ut_ad(extention && strlen(extention) == 3);
/* Find the offset of the last slash. We will strip off the
old basename or tablename which starts after that slash. */
last_slash = strrchr((char*) data_dir_path, OS_FILE_PATH_SEPARATOR);
data_dir_len = last_slash ? last_slash - data_dir_path : strlen(data_dir_path);
/* allocate a new path and move the old directory path to it. */
new_path_len = data_dir_len + strlen(tablename)
+ sizeof "/." + strlen(extention);
new_path = static_cast<char*>(mem_alloc(new_path_len));
memcpy(new_path, data_dir_path, data_dir_len);
ut_snprintf(new_path + data_dir_len,
new_path_len - data_dir_len,
"%c%s.%s",
OS_FILE_PATH_SEPARATOR,
tablename,
extention);
srv_normalize_path_for_win(new_path);
return(new_path);
}
/****************************************************************//**
This function reduces a null-terminated full remote path name into
the path that is sent by MySQL for DATA DIRECTORY clause. It replaces
the 'databasename/tablename.ibd' found at the end of the path with just
'tablename'.
Since the result is always smaller than the path sent in, no new memory
is allocated. The caller should allocate memory for the path sent in.
This function manipulates that path in place.
If the path format is not as expected, just return. The result is used
to inform a SHOW CREATE TABLE command. */
UNIV_INTERN
void
os_file_make_data_dir_path(
/*========================*/
char* data_dir_path) /*!< in/out: full path/data_dir_path */
{
char* ptr;
char* tablename;
ulint tablename_len;
/* Replace the period before the extension with a null byte. */
ptr = strrchr((char*) data_dir_path, '.');
if (!ptr) {
return;
}
ptr[0] = '\0';
/* The tablename starts after the last slash. */
ptr = strrchr((char*) data_dir_path, OS_FILE_PATH_SEPARATOR);
if (!ptr) {
return;
}
ptr[0] = '\0';
tablename = ptr + 1;
/* The databasename starts after the next to last slash. */
ptr = strrchr((char*) data_dir_path, OS_FILE_PATH_SEPARATOR);
if (!ptr) {
return;
}
tablename_len = ut_strlen(tablename);
ut_memmove(++ptr, tablename, tablename_len);
ptr[tablename_len] = '\0';
}
/****************************************************************//**
The function os_file_dirname returns a directory component of a
null-terminated pathname string. In the usual case, dirname returns
the string up to, but not including, the final '/', and basename
is the component following the final '/'. Trailing '/' characters
are not counted as part of the pathname.
If path does not contain a slash, dirname returns the string ".".
Concatenating the string returned by dirname, a "/", and the basename
yields a complete pathname.
The return value is a copy of the directory component of the pathname.
The copy is allocated from heap. It is the caller responsibility
to free it after it is no longer needed.
The following list of examples (taken from SUSv2) shows the strings
returned by dirname and basename for different paths:
path dirname basename
"/usr/lib" "/usr" "lib"
"/usr/" "/" "usr"
"usr" "." "usr"
"/" "/" "/"
"." "." "."
".." "." ".."
@return own: directory component of the pathname */
UNIV_INTERN
char*
os_file_dirname(
/*============*/
const char* path) /*!< in: pathname */
{
/* Find the offset of the last slash */
const char* last_slash = strrchr(path, OS_FILE_PATH_SEPARATOR);
if (!last_slash) {
/* No slash in the path, return "." */
return(mem_strdup("."));
}
/* Ok, there is a slash */
if (last_slash == path) {
/* last slash is the first char of the path */
return(mem_strdup("/"));
}
/* Non-trivial directory component */
return(mem_strdupl(path, last_slash - path));
}
/****************************************************************//**
Creates all missing subdirectories along the given path.
@return TRUE if call succeeded FALSE otherwise */
UNIV_INTERN
ibool
os_file_create_subdirs_if_needed(
/*=============================*/
const char* path) /*!< in: path name */
{
if (srv_read_only_mode) {
ib_logf(IB_LOG_LEVEL_ERROR,
"read only mode set. Can't create subdirectories '%s'",
path);
return(FALSE);
}
char* subdir = os_file_dirname(path);
if (strlen(subdir) == 1
&& (*subdir == OS_FILE_PATH_SEPARATOR || *subdir == '.')) {
/* subdir is root or cwd, nothing to do */
mem_free(subdir);
return(TRUE);
}
/* Test if subdir exists */
os_file_type_t type;
ibool subdir_exists;
ibool success = os_file_status(subdir, &subdir_exists, &type);
if (success && !subdir_exists) {
/* subdir does not exist, create it */
success = os_file_create_subdirs_if_needed(subdir);
if (!success) {
mem_free(subdir);
return(FALSE);
}
success = os_file_create_directory(subdir, FALSE);
}
mem_free(subdir);
return(success);
}
#ifndef UNIV_HOTBACKUP
/****************************************************************//**
Returns a pointer to the nth slot in the aio array.
@return pointer to slot */
static
os_aio_slot_t*
os_aio_array_get_nth_slot(
/*======================*/
os_aio_array_t* array, /*!< in: aio array */
ulint index) /*!< in: index of the slot */
{
ut_a(index < array->n_slots);
return(&array->slots[index]);
}
#if defined(LINUX_NATIVE_AIO)
/******************************************************************//**
Creates an io_context for native linux AIO.
@return TRUE on success. */
static
ibool
os_aio_linux_create_io_ctx(
/*=======================*/
ulint max_events, /*!< in: number of events. */
io_context_t* io_ctx) /*!< out: io_ctx to initialize. */
{
int ret;
ulint retries = 0;
retry:
memset(io_ctx, 0x0, sizeof(*io_ctx));
/* Initialize the io_ctx. Tell it how many pending
IO requests this context will handle. */
ret = io_setup(max_events, io_ctx);
if (ret == 0) {
#if defined(UNIV_AIO_DEBUG)
fprintf(stderr,
"InnoDB: Linux native AIO:"
" initialized io_ctx for segment\n");
#endif
/* Success. Return now. */
return(TRUE);
}
/* If we hit EAGAIN we'll make a few attempts before failing. */
switch (ret) {
case -EAGAIN:
if (retries == 0) {
/* First time around. */
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Warning: io_setup() failed"
" with EAGAIN. Will make %d attempts"
" before giving up.\n",
OS_AIO_IO_SETUP_RETRY_ATTEMPTS);
}
if (retries < OS_AIO_IO_SETUP_RETRY_ATTEMPTS) {
++retries;
fprintf(stderr,
"InnoDB: Warning: io_setup() attempt"
" %lu failed.\n",
retries);
os_thread_sleep(OS_AIO_IO_SETUP_RETRY_SLEEP);
goto retry;
}
/* Have tried enough. Better call it a day. */
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: io_setup() failed"
" with EAGAIN after %d attempts.\n",
OS_AIO_IO_SETUP_RETRY_ATTEMPTS);
break;
case -ENOSYS:
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: Linux Native AIO interface"
" is not supported on this platform. Please"
" check your OS documentation and install"
" appropriate binary of InnoDB.\n");
break;
default:
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: Linux Native AIO setup"
" returned following error[%d]\n", -ret);
break;
}
fprintf(stderr,
"InnoDB: You can disable Linux Native AIO by"
" setting innodb_use_native_aio = 0 in my.cnf\n");
return(FALSE);
}
/******************************************************************//**
Checks if the system supports native linux aio. On some kernel
versions where native aio is supported it won't work on tmpfs. In such
cases we can't use native aio as it is not possible to mix simulated
and native aio.
@return: TRUE if supported, FALSE otherwise. */
static
ibool
os_aio_native_aio_supported(void)
/*=============================*/
{
int fd;
io_context_t io_ctx;
char name[1000];
if (!os_aio_linux_create_io_ctx(1, &io_ctx)) {
/* The platform does not support native aio. */
return(FALSE);
} else if (!srv_read_only_mode) {
/* Now check if tmpdir supports native aio ops. */
fd = innobase_mysql_tmpfile(NULL);
if (fd < 0) {
ib_logf(IB_LOG_LEVEL_WARN,
"Unable to create temp file to check "
"native AIO support.");
return(FALSE);
}
} else {
srv_normalize_path_for_win(srv_log_group_home_dir);
ulint dirnamelen = strlen(srv_log_group_home_dir);
ut_a(dirnamelen < (sizeof name) - 10 - sizeof "ib_logfile");
memcpy(name, srv_log_group_home_dir, dirnamelen);
/* Add a path separator if needed. */
if (dirnamelen && name[dirnamelen - 1] != SRV_PATH_SEPARATOR) {
name[dirnamelen++] = SRV_PATH_SEPARATOR;
}
strcpy(name + dirnamelen, "ib_logfile0");
fd = ::open(name, O_RDONLY);
if (fd == -1) {
ib_logf(IB_LOG_LEVEL_WARN,
"Unable to open \"%s\" to check "
"native AIO read support.", name);
return(FALSE);
}
}
struct io_event io_event;
memset(&io_event, 0x0, sizeof(io_event));
byte* buf = static_cast<byte*>(ut_malloc(UNIV_PAGE_SIZE * 2));
byte* ptr = static_cast<byte*>(ut_align(buf, UNIV_PAGE_SIZE));
struct iocb iocb;
/* Suppress valgrind warning. */
memset(buf, 0x00, UNIV_PAGE_SIZE * 2);
memset(&iocb, 0x0, sizeof(iocb));
struct iocb* p_iocb = &iocb;
if (!srv_read_only_mode) {
io_prep_pwrite(p_iocb, fd, ptr, UNIV_PAGE_SIZE, 0);
} else {
ut_a(UNIV_PAGE_SIZE >= 512);
io_prep_pread(p_iocb, fd, ptr, 512, 0);
}
int err = io_submit(io_ctx, 1, &p_iocb);
if (err >= 1) {
/* Now collect the submitted IO request. */
err = io_getevents(io_ctx, 1, 1, &io_event, NULL);
}
ut_free(buf);
close(fd);
switch (err) {
case 1:
return(TRUE);
case -EINVAL:
case -ENOSYS:
ib_logf(IB_LOG_LEVEL_ERROR,
"Linux Native AIO not supported. You can either "
"move %s to a file system that supports native "
"AIO or you can set innodb_use_native_aio to "
"FALSE to avoid this message.",
srv_read_only_mode ? name : "tmpdir");
/* fall through. */
default:
ib_logf(IB_LOG_LEVEL_ERROR,
"Linux Native AIO check on %s returned error[%d]",
srv_read_only_mode ? name : "tmpdir", -err);
}
return(FALSE);
}
#endif /* LINUX_NATIVE_AIO */
/******************************************************************//**
Creates an aio wait array. Note that we return NULL in case of failure.
We don't care about freeing memory here because we assume that a
failure will result in server refusing to start up.
@return own: aio array, NULL on failure */
static
os_aio_array_t*
os_aio_array_create(
/*================*/
ulint n, /*!< in: maximum number of pending aio
operations allowed; n must be
divisible by n_segments */
ulint n_segments) /*!< in: number of segments in the aio array */
{
os_aio_array_t* array;
#ifdef WIN_ASYNC_IO
OVERLAPPED* over;
#elif defined(LINUX_NATIVE_AIO)
struct io_event* io_event = NULL;
#endif /* WIN_ASYNC_IO */
ut_a(n > 0);
ut_a(n_segments > 0);
array = static_cast<os_aio_array_t*>(ut_malloc(sizeof(*array)));
memset(array, 0x0, sizeof(*array));
array->mutex = os_mutex_create();
array->not_full = os_event_create();
array->is_empty = os_event_create();
os_event_set(array->is_empty);
array->n_slots = n;
array->n_segments = n_segments;
array->slots = static_cast<os_aio_slot_t*>(
ut_malloc(n * sizeof(*array->slots)));
memset(array->slots, 0x0, sizeof(n * sizeof(*array->slots)));
#ifdef __WIN__
array->handles = static_cast<HANDLE*>(ut_malloc(n * sizeof(HANDLE)));
#endif /* __WIN__ */
#if defined(LINUX_NATIVE_AIO)
array->aio_ctx = NULL;
array->aio_events = NULL;
/* If we are not using native aio interface then skip this
part of initialization. */
if (!srv_use_native_aio) {
goto skip_native_aio;
}
/* Initialize the io_context array. One io_context
per segment in the array. */
array->aio_ctx = static_cast<io_context**>(
ut_malloc(n_segments * sizeof(*array->aio_ctx)));
for (ulint i = 0; i < n_segments; ++i) {
if (!os_aio_linux_create_io_ctx(n/n_segments,
&array->aio_ctx[i])) {
/* If something bad happened during aio setup
we disable linux native aio.
The disadvantage will be a small memory leak
at shutdown but that's ok compared to a crash
or a not working server.
This frequently happens when running the test suite
with many threads on a system with low fs.aio-max-nr!
*/
fprintf(stderr,
" InnoDB: Warning: Linux Native AIO disabled "
"because os_aio_linux_create_io_ctx() "
"failed. To get rid of this warning you can "
"try increasing system "
"fs.aio-max-nr to 1048576 or larger or "
"setting innodb_use_native_aio = 0 in my.cnf\n");
srv_use_native_aio = FALSE;
goto skip_native_aio;
}
}
/* Initialize the event array. One event per slot. */
io_event = static_cast<struct io_event*>(
ut_malloc(n * sizeof(*io_event)));
memset(io_event, 0x0, sizeof(*io_event) * n);
array->aio_events = io_event;
skip_native_aio:
#endif /* LINUX_NATIVE_AIO */
for (ulint i = 0; i < n; i++) {
os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(array, i);
slot->pos = i;
slot->reserved = FALSE;
#ifdef WIN_ASYNC_IO
slot->handle = CreateEvent(NULL,TRUE, FALSE, NULL);
over = &slot->control;
over->hEvent = slot->handle;
array->handles[i] = over->hEvent;
#elif defined(LINUX_NATIVE_AIO)
memset(&slot->control, 0x0, sizeof(slot->control));
slot->n_bytes = 0;
slot->ret = 0;
#endif /* WIN_ASYNC_IO */
}
return(array);
}
/************************************************************************//**
Frees an aio wait array. */
static
void
os_aio_array_free(
/*==============*/
os_aio_array_t*& array) /*!< in, own: array to free */
{
#ifdef WIN_ASYNC_IO
ulint i;
for (i = 0; i < array->n_slots; i++) {
os_aio_slot_t* slot = os_aio_array_get_nth_slot(array, i);
CloseHandle(slot->handle);
}
#endif /* WIN_ASYNC_IO */
#ifdef __WIN__
ut_free(array->handles);
#endif /* __WIN__ */
os_mutex_free(array->mutex);
os_event_free(array->not_full);
os_event_free(array->is_empty);
#if defined(LINUX_NATIVE_AIO)
if (srv_use_native_aio) {
ut_free(array->aio_events);
ut_free(array->aio_ctx);
}
#endif /* LINUX_NATIVE_AIO */
ut_free(array->slots);
ut_free(array);
array = 0;
}
/***********************************************************************
Initializes the asynchronous io system. Creates one array each for ibuf
and log i/o. Also creates one array each for read and write where each
array is divided logically into n_read_segs and n_write_segs
respectively. The caller must create an i/o handler thread for each
segment in these arrays. This function also creates the sync array.
No i/o handler thread needs to be created for that */
UNIV_INTERN
ibool
os_aio_init(
/*========*/
ulint n_per_seg, /*<! in: maximum number of pending aio
operations allowed per segment */
ulint n_read_segs, /*<! in: number of reader threads */
ulint n_write_segs, /*<! in: number of writer threads */
ulint n_slots_sync) /*<! in: number of slots in the sync aio
array */
{
os_io_init_simple();
#if defined(LINUX_NATIVE_AIO)
/* Check if native aio is supported on this system and tmpfs */
if (srv_use_native_aio && !os_aio_native_aio_supported()) {
ib_logf(IB_LOG_LEVEL_WARN, "Linux Native AIO disabled.");
srv_use_native_aio = FALSE;
}
#endif /* LINUX_NATIVE_AIO */
srv_reset_io_thread_op_info();
os_aio_read_array = os_aio_array_create(
n_read_segs * n_per_seg, n_read_segs);
if (os_aio_read_array == NULL) {
return(FALSE);
}
ulint start = (srv_read_only_mode) ? 0 : 2;
ulint n_segs = n_read_segs + start;
/* 0 is the ibuf segment and 1 is the insert buffer segment. */
for (ulint i = start; i < n_segs; ++i) {
ut_a(i < SRV_MAX_N_IO_THREADS);
srv_io_thread_function[i] = "read thread";
}
ulint n_segments = n_read_segs;
if (!srv_read_only_mode) {
os_aio_log_array = os_aio_array_create(n_per_seg, 1);
if (os_aio_log_array == NULL) {
return(FALSE);
}
++n_segments;
srv_io_thread_function[1] = "log thread";
os_aio_ibuf_array = os_aio_array_create(n_per_seg, 1);
if (os_aio_ibuf_array == NULL) {
return(FALSE);
}
++n_segments;
srv_io_thread_function[0] = "insert buffer thread";
os_aio_write_array = os_aio_array_create(
n_write_segs * n_per_seg, n_write_segs);
if (os_aio_write_array == NULL) {
return(FALSE);
}
n_segments += n_write_segs;
for (ulint i = start + n_read_segs; i < n_segments; ++i) {
ut_a(i < SRV_MAX_N_IO_THREADS);
srv_io_thread_function[i] = "write thread";
}
ut_ad(n_segments >= 4);
} else {
ut_ad(n_segments > 0);
}
os_aio_sync_array = os_aio_array_create(n_slots_sync, 1);
if (os_aio_sync_array == NULL) {
return(FALSE);
}
os_aio_n_segments = n_segments;
os_aio_validate();
os_last_printout = ut_time();
if (srv_use_native_aio) {
return(TRUE);
}
os_aio_segment_wait_events = static_cast<os_event_t*>(
ut_malloc(n_segments * sizeof *os_aio_segment_wait_events));
for (ulint i = 0; i < n_segments; ++i) {
os_aio_segment_wait_events[i] = os_event_create();
}
return(TRUE);
}
/***********************************************************************
Frees the asynchronous io system. */
UNIV_INTERN
void
os_aio_free(void)
/*=============*/
{
if (os_aio_ibuf_array != 0) {
os_aio_array_free(os_aio_ibuf_array);
}
if (os_aio_log_array != 0) {
os_aio_array_free(os_aio_log_array);
}
if (os_aio_write_array != 0) {
os_aio_array_free(os_aio_write_array);
}
if (os_aio_sync_array != 0) {
os_aio_array_free(os_aio_sync_array);
}
os_aio_array_free(os_aio_read_array);
if (!srv_use_native_aio) {
for (ulint i = 0; i < os_aio_n_segments; i++) {
os_event_free(os_aio_segment_wait_events[i]);
}
}
ut_free(os_aio_segment_wait_events);
os_aio_segment_wait_events = 0;
os_aio_n_segments = 0;
}
#ifdef WIN_ASYNC_IO
/************************************************************************//**
Wakes up all async i/o threads in the array in Windows async i/o at
shutdown. */
static
void
os_aio_array_wake_win_aio_at_shutdown(
/*==================================*/
os_aio_array_t* array) /*!< in: aio array */
{
ulint i;
for (i = 0; i < array->n_slots; i++) {
SetEvent((array->slots + i)->handle);
}
}
#endif
/************************************************************************//**
Wakes up all async i/o threads so that they know to exit themselves in
shutdown. */
UNIV_INTERN
void
os_aio_wake_all_threads_at_shutdown(void)
/*=====================================*/
{
#ifdef WIN_ASYNC_IO
/* This code wakes up all ai/o threads in Windows native aio */
os_aio_array_wake_win_aio_at_shutdown(os_aio_read_array);
if (os_aio_write_array != 0) {
os_aio_array_wake_win_aio_at_shutdown(os_aio_write_array);
}
if (os_aio_ibuf_array != 0) {
os_aio_array_wake_win_aio_at_shutdown(os_aio_ibuf_array);
}
if (os_aio_log_array != 0) {
os_aio_array_wake_win_aio_at_shutdown(os_aio_log_array);
}
#elif defined(LINUX_NATIVE_AIO)
/* When using native AIO interface the io helper threads
wait on io_getevents with a timeout value of 500ms. At
each wake up these threads check the server status.
No need to do anything to wake them up. */
#endif /* !WIN_ASYNC_AIO */
if (srv_use_native_aio) {
return;
}
/* This loop wakes up all simulated ai/o threads */
for (ulint i = 0; i < os_aio_n_segments; i++) {
os_event_set(os_aio_segment_wait_events[i]);
}
}
/************************************************************************//**
Waits until there are no pending writes in os_aio_write_array. There can
be other, synchronous, pending writes. */
UNIV_INTERN
void
os_aio_wait_until_no_pending_writes(void)
/*=====================================*/
{
ut_ad(!srv_read_only_mode);
os_event_wait(os_aio_write_array->is_empty);
}
/**********************************************************************//**
Calculates segment number for a slot.
@return segment number (which is the number used by, for example,
i/o-handler threads) */
static
ulint
os_aio_get_segment_no_from_slot(
/*============================*/
os_aio_array_t* array, /*!< in: aio wait array */
os_aio_slot_t* slot) /*!< in: slot in this array */
{
ulint segment;
ulint seg_len;
if (array == os_aio_ibuf_array) {
ut_ad(!srv_read_only_mode);
segment = IO_IBUF_SEGMENT;
} else if (array == os_aio_log_array) {
ut_ad(!srv_read_only_mode);
segment = IO_LOG_SEGMENT;
} else if (array == os_aio_read_array) {
seg_len = os_aio_read_array->n_slots
/ os_aio_read_array->n_segments;
segment = (srv_read_only_mode ? 0 : 2) + slot->pos / seg_len;
} else {
ut_ad(!srv_read_only_mode);
ut_a(array == os_aio_write_array);
seg_len = os_aio_write_array->n_slots
/ os_aio_write_array->n_segments;
segment = os_aio_read_array->n_segments + 2
+ slot->pos / seg_len;
}
return(segment);
}
/**********************************************************************//**
Calculates local segment number and aio array from global segment number.
@return local segment number within the aio array */
static
ulint
os_aio_get_array_and_local_segment(
/*===============================*/
os_aio_array_t** array, /*!< out: aio wait array */
ulint global_segment)/*!< in: global segment number */
{
ulint segment;
ut_a(global_segment < os_aio_n_segments);
if (srv_read_only_mode) {
*array = os_aio_read_array;
return(global_segment);
} else if (global_segment == IO_IBUF_SEGMENT) {
*array = os_aio_ibuf_array;
segment = 0;
} else if (global_segment == IO_LOG_SEGMENT) {
*array = os_aio_log_array;
segment = 0;
} else if (global_segment < os_aio_read_array->n_segments + 2) {
*array = os_aio_read_array;
segment = global_segment - 2;
} else {
*array = os_aio_write_array;
segment = global_segment - (os_aio_read_array->n_segments + 2);
}
return(segment);
}
/*******************************************************************//**
Requests for a slot in the aio array. If no slot is available, waits until
not_full-event becomes signaled.
@return pointer to slot */
static
os_aio_slot_t*
os_aio_array_reserve_slot(
/*======================*/
ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE */
os_aio_array_t* array, /*!< in: aio array */
fil_node_t* message1,/*!< in: message to be passed along with
the aio operation */
void* message2,/*!< in: message to be passed along with
the aio operation */
os_file_t file, /*!< in: file handle */
const char* name, /*!< in: name of the file or path as a
null-terminated string */
void* buf, /*!< in: buffer where to read or from which
to write */
os_offset_t offset, /*!< in: file offset */
ulint len) /*!< in: length of the block to read or write */
{
os_aio_slot_t* slot = NULL;
#ifdef WIN_ASYNC_IO
OVERLAPPED* control;
#elif defined(LINUX_NATIVE_AIO)
struct iocb* iocb;
off_t aio_offset;
#endif /* WIN_ASYNC_IO */
ulint i;
ulint counter;
ulint slots_per_seg;
ulint local_seg;
#ifdef WIN_ASYNC_IO
ut_a((len & 0xFFFFFFFFUL) == len);
#endif /* WIN_ASYNC_IO */
/* No need of a mutex. Only reading constant fields */
slots_per_seg = array->n_slots / array->n_segments;
/* We attempt to keep adjacent blocks in the same local
segment. This can help in merging IO requests when we are
doing simulated AIO */
local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6))
% array->n_segments;
loop:
os_mutex_enter(array->mutex);
if (array->n_reserved == array->n_slots) {
os_mutex_exit(array->mutex);
if (!srv_use_native_aio) {
/* If the handler threads are suspended, wake them
so that we get more slots */
os_aio_simulated_wake_handler_threads();
}
os_event_wait(array->not_full);
goto loop;
}
/* We start our search for an available slot from our preferred
local segment and do a full scan of the array. We are
guaranteed to find a slot in full scan. */
for (i = local_seg * slots_per_seg, counter = 0;
counter < array->n_slots;
i++, counter++) {
i %= array->n_slots;
slot = os_aio_array_get_nth_slot(array, i);
if (slot->reserved == FALSE) {
goto found;
}
}
/* We MUST always be able to get hold of a reserved slot. */
ut_error;
found:
ut_a(slot->reserved == FALSE);
array->n_reserved++;
if (array->n_reserved == 1) {
os_event_reset(array->is_empty);
}
if (array->n_reserved == array->n_slots) {
os_event_reset(array->not_full);
}
slot->reserved = TRUE;
slot->reservation_time = ut_time();
slot->message1 = message1;
slot->message2 = message2;
slot->file = file;
slot->name = name;
slot->len = len;
slot->type = type;
slot->buf = static_cast<byte*>(buf);
slot->offset = offset;
slot->io_already_done = FALSE;
#ifdef WIN_ASYNC_IO
control = &slot->control;
control->Offset = (DWORD) offset & 0xFFFFFFFF;
control->OffsetHigh = (DWORD) (offset >> 32);
ResetEvent(slot->handle);
#elif defined(LINUX_NATIVE_AIO)
/* If we are not using native AIO skip this part. */
if (!srv_use_native_aio) {
goto skip_native_aio;
}
/* Check if we are dealing with 64 bit arch.
If not then make sure that offset fits in 32 bits. */
aio_offset = (off_t) offset;
ut_a(sizeof(aio_offset) >= sizeof(offset)
|| ((os_offset_t) aio_offset) == offset);
iocb = &slot->control;
if (type == OS_FILE_READ) {
io_prep_pread(iocb, file, buf, len, aio_offset);
} else {
ut_a(type == OS_FILE_WRITE);
io_prep_pwrite(iocb, file, buf, len, aio_offset);
}
iocb->data = (void*) slot;
slot->n_bytes = 0;
slot->ret = 0;
skip_native_aio:
#endif /* LINUX_NATIVE_AIO */
os_mutex_exit(array->mutex);
return(slot);
}
/*******************************************************************//**
Frees a slot in the aio array. */
static
void
os_aio_array_free_slot(
/*===================*/
os_aio_array_t* array, /*!< in: aio array */
os_aio_slot_t* slot) /*!< in: pointer to slot */
{
os_mutex_enter(array->mutex);
ut_ad(slot->reserved);
slot->reserved = FALSE;
array->n_reserved--;
if (array->n_reserved == array->n_slots - 1) {
os_event_set(array->not_full);
}
if (array->n_reserved == 0) {
os_event_set(array->is_empty);
}
#ifdef WIN_ASYNC_IO
ResetEvent(slot->handle);
#elif defined(LINUX_NATIVE_AIO)
if (srv_use_native_aio) {
memset(&slot->control, 0x0, sizeof(slot->control));
slot->n_bytes = 0;
slot->ret = 0;
/*fprintf(stderr, "Freed up Linux native slot.\n");*/
} else {
/* These fields should not be used if we are not
using native AIO. */
ut_ad(slot->n_bytes == 0);
ut_ad(slot->ret == 0);
}
#endif
os_mutex_exit(array->mutex);
}
/**********************************************************************//**
Wakes up a simulated aio i/o-handler thread if it has something to do. */
static
void
os_aio_simulated_wake_handler_thread(
/*=================================*/
ulint global_segment) /*!< in: the number of the segment in the aio
arrays */
{
os_aio_array_t* array;
ulint segment;
ut_ad(!srv_use_native_aio);
segment = os_aio_get_array_and_local_segment(&array, global_segment);
ulint n = array->n_slots / array->n_segments;
segment *= n;
/* Look through n slots after the segment * n'th slot */
os_mutex_enter(array->mutex);
for (ulint i = 0; i < n; ++i) {
const os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(array, segment + i);
if (slot->reserved) {
/* Found an i/o request */
os_mutex_exit(array->mutex);
os_event_t event;
event = os_aio_segment_wait_events[global_segment];
os_event_set(event);
return;
}
}
os_mutex_exit(array->mutex);
}
/**********************************************************************//**
Wakes up simulated aio i/o-handler threads if they have something to do. */
UNIV_INTERN
void
os_aio_simulated_wake_handler_threads(void)
/*=======================================*/
{
if (srv_use_native_aio) {
/* We do not use simulated aio: do nothing */
return;
}
os_aio_recommend_sleep_for_read_threads = FALSE;
for (ulint i = 0; i < os_aio_n_segments; i++) {
os_aio_simulated_wake_handler_thread(i);
}
}
#ifdef _WIN32
/**********************************************************************//**
This function can be called if one wants to post a batch of reads and
prefers an i/o-handler thread to handle them all at once later. You must
call os_aio_simulated_wake_handler_threads later to ensure the threads
are not left sleeping! */
UNIV_INTERN
void
os_aio_simulated_put_read_threads_to_sleep()
{
/* The idea of putting background IO threads to sleep is only for
Windows when using simulated AIO. Windows XP seems to schedule
background threads too eagerly to allow for coalescing during
readahead requests. */
os_aio_array_t* array;
if (srv_use_native_aio) {
/* We do not use simulated aio: do nothing */
return;
}
os_aio_recommend_sleep_for_read_threads = TRUE;
for (ulint i = 0; i < os_aio_n_segments; i++) {
os_aio_get_array_and_local_segment(&array, i);
if (array == os_aio_read_array) {
os_event_reset(os_aio_segment_wait_events[i]);
}
}
}
#endif /* _WIN32 */
#if defined(LINUX_NATIVE_AIO)
/*******************************************************************//**
Dispatch an AIO request to the kernel.
@return TRUE on success. */
static
ibool
os_aio_linux_dispatch(
/*==================*/
os_aio_array_t* array, /*!< in: io request array. */
os_aio_slot_t* slot) /*!< in: an already reserved slot. */
{
int ret;
ulint io_ctx_index;
struct iocb* iocb;
ut_ad(slot != NULL);
ut_ad(array);
ut_a(slot->reserved);
/* Find out what we are going to work with.
The iocb struct is directly in the slot.
The io_context is one per segment. */
iocb = &slot->control;
io_ctx_index = (slot->pos * array->n_segments) / array->n_slots;
ret = io_submit(array->aio_ctx[io_ctx_index], 1, &iocb);
#if defined(UNIV_AIO_DEBUG)
fprintf(stderr,
"io_submit[%c] ret[%d]: slot[%p] ctx[%p] seg[%lu]\n",
(slot->type == OS_FILE_WRITE) ? 'w' : 'r', ret, slot,
array->aio_ctx[io_ctx_index], (ulong) io_ctx_index);
#endif
/* io_submit returns number of successfully
queued requests or -errno. */
if (UNIV_UNLIKELY(ret != 1)) {
errno = -ret;
return(FALSE);
}
return(TRUE);
}
#endif /* LINUX_NATIVE_AIO */
/*******************************************************************//**
NOTE! Use the corresponding macro os_aio(), not directly this function!
Requests an asynchronous i/o operation.
@return TRUE if request was queued successfully, FALSE if fail */
UNIV_INTERN
ibool
os_aio_func(
/*========*/
ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE */
ulint mode, /*!< in: OS_AIO_NORMAL, ..., possibly ORed
to OS_AIO_SIMULATED_WAKE_LATER: the
last flag advises this function not to wake
i/o-handler threads, but the caller will
do the waking explicitly later, in this
way the caller can post several requests in
a batch; NOTE that the batch must not be
so big that it exhausts the slots in aio
arrays! NOTE that a simulated batch
may introduce hidden chances of deadlocks,
because i/os are not actually handled until
all have been posted: use with great
caution! */
const char* name, /*!< in: name of the file or path as a
null-terminated string */
os_file_t file, /*!< in: handle to a file */
void* buf, /*!< in: buffer where to read or from which
to write */
os_offset_t offset, /*!< in: file offset where to read or write */
ulint n, /*!< in: number of bytes to read or write */
fil_node_t* message1,/*!< in: message for the aio handler
(can be used to identify a completed
aio operation); ignored if mode is
OS_AIO_SYNC */
void* message2)/*!< in: message for the aio handler
(can be used to identify a completed
aio operation); ignored if mode is
OS_AIO_SYNC */
{
os_aio_array_t* array;
os_aio_slot_t* slot;
#ifdef WIN_ASYNC_IO
ibool retval;
BOOL ret = TRUE;
DWORD len = (DWORD) n;
struct fil_node_t* dummy_mess1;
void* dummy_mess2;
ulint dummy_type;
#endif /* WIN_ASYNC_IO */
ulint wake_later;
ut_ad(buf);
ut_ad(n > 0);
ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(offset % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(os_aio_validate_skip());
#ifdef WIN_ASYNC_IO
ut_ad((n & 0xFFFFFFFFUL) == n);
#endif
wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER;
mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER);
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
mode = OS_AIO_SYNC; os_has_said_disk_full = FALSE;);
if (mode == OS_AIO_SYNC
#ifdef WIN_ASYNC_IO
&& !srv_use_native_aio
#endif /* WIN_ASYNC_IO */
) {
ibool ret;
/* This is actually an ordinary synchronous read or write:
no need to use an i/o-handler thread. NOTE that if we use
Windows async i/o, Windows does not allow us to use
ordinary synchronous os_file_read etc. on the same file,
therefore we have built a special mechanism for synchronous
wait in the Windows case.
Also note that the Performance Schema instrumentation has
been performed by current os_aio_func()'s wrapper function
pfs_os_aio_func(). So we would no longer need to call
Performance Schema instrumented os_file_read() and
os_file_write(). Instead, we should use os_file_read_func()
and os_file_write_func() */
if (type == OS_FILE_READ) {
ret = os_file_read_func(file, buf, offset, n);
} else {
ut_ad(!srv_read_only_mode);
ut_a(type == OS_FILE_WRITE);
ret = os_file_write_func(name, file, buf, offset, n);
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
os_has_said_disk_full = FALSE; ret = 0; errno = 28;);
if (!ret) {
os_file_handle_error_cond_exit(name, "os_file_write_func", TRUE, FALSE);
}
}
return ret;
}
try_again:
switch (mode) {
case OS_AIO_NORMAL:
if (type == OS_FILE_READ) {
array = os_aio_read_array;
} else {
ut_ad(!srv_read_only_mode);
array = os_aio_write_array;
}
break;
case OS_AIO_IBUF:
ut_ad(type == OS_FILE_READ);
/* Reduce probability of deadlock bugs in connection with ibuf:
do not let the ibuf i/o handler sleep */
wake_later = FALSE;
if (srv_read_only_mode) {
array = os_aio_read_array;
} else {
array = os_aio_ibuf_array;
}
break;
case OS_AIO_LOG:
if (srv_read_only_mode) {
array = os_aio_read_array;
} else {
array = os_aio_log_array;
}
break;
case OS_AIO_SYNC:
array = os_aio_sync_array;
#if defined(LINUX_NATIVE_AIO)
/* In Linux native AIO we don't use sync IO array. */
ut_a(!srv_use_native_aio);
#endif /* LINUX_NATIVE_AIO */
break;
default:
ut_error;
array = NULL; /* Eliminate compiler warning */
}
slot = os_aio_array_reserve_slot(type, array, message1, message2, file,
name, buf, offset, n);
if (type == OS_FILE_READ) {
if (srv_use_native_aio) {
os_n_file_reads++;
os_bytes_read_since_printout += n;
#ifdef WIN_ASYNC_IO
ret = ReadFile(file, buf, (DWORD) n, &len,
&(slot->control));
#elif defined(LINUX_NATIVE_AIO)
if (!os_aio_linux_dispatch(array, slot)) {
goto err_exit;
}
#endif /* WIN_ASYNC_IO */
} else {
if (!wake_later) {
os_aio_simulated_wake_handler_thread(
os_aio_get_segment_no_from_slot(
array, slot));
}
}
} else if (type == OS_FILE_WRITE) {
ut_ad(!srv_read_only_mode);
if (srv_use_native_aio) {
os_n_file_writes++;
#ifdef WIN_ASYNC_IO
ret = WriteFile(file, buf, (DWORD) n, &len,
&(slot->control));
#elif defined(LINUX_NATIVE_AIO)
if (!os_aio_linux_dispatch(array, slot)) {
goto err_exit;
}
#endif /* WIN_ASYNC_IO */
} else {
if (!wake_later) {
os_aio_simulated_wake_handler_thread(
os_aio_get_segment_no_from_slot(
array, slot));
}
}
} else {
ut_error;
}
#ifdef WIN_ASYNC_IO
if (srv_use_native_aio) {
if ((ret && len == n)
|| (!ret && GetLastError() == ERROR_IO_PENDING)) {
/* aio was queued successfully! */
if (mode == OS_AIO_SYNC) {
/* We want a synchronous i/o operation on a
file where we also use async i/o: in Windows
we must use the same wait mechanism as for
async i/o */
retval = os_aio_windows_handle(
ULINT_UNDEFINED, slot->pos,
&dummy_mess1, &dummy_mess2,
&dummy_type);
return(retval);
}
return(TRUE);
}
goto err_exit;
}
#endif /* WIN_ASYNC_IO */
/* aio was queued successfully! */
return(TRUE);
#if defined LINUX_NATIVE_AIO || defined WIN_ASYNC_IO
err_exit:
#endif /* LINUX_NATIVE_AIO || WIN_ASYNC_IO */
os_aio_array_free_slot(array, slot);
if (os_file_handle_error(
name,type == OS_FILE_READ ? "aio read" : "aio write")) {
goto try_again;
}
return(FALSE);
}
#ifdef WIN_ASYNC_IO
/**********************************************************************//**
This function is only used in Windows asynchronous i/o.
Waits for an aio operation to complete. This function is used to wait the
for completed requests. The aio array of pending requests is divided
into segments. The thread specifies which segment or slot it wants to wait
for. NOTE: this function will also take care of freeing the aio slot,
therefore no other thread is allowed to do the freeing!
@return TRUE if the aio operation succeeded */
UNIV_INTERN
ibool
os_aio_windows_handle(
/*==================*/
ulint segment, /*!< in: the number of the segment in the aio
arrays to wait for; segment 0 is the ibuf
i/o thread, segment 1 the log i/o thread,
then follow the non-ibuf read threads, and as
the last are the non-ibuf write threads; if
this is ULINT_UNDEFINED, then it means that
sync aio is used, and this parameter is
ignored */
ulint pos, /*!< this parameter is used only in sync aio:
wait for the aio slot at this position */
fil_node_t**message1, /*!< out: the messages passed with the aio
request; note that also in the case where
the aio operation failed, these output
parameters are valid and can be used to
restart the operation, for example */
void** message2,
ulint* type) /*!< out: OS_FILE_WRITE or ..._READ */
{
ulint orig_seg = segment;
os_aio_array_t* array;
os_aio_slot_t* slot;
ulint n;
ulint i;
ibool ret_val;
BOOL ret;
DWORD len;
BOOL retry = FALSE;
if (segment == ULINT_UNDEFINED) {
segment = 0;
array = os_aio_sync_array;
} else {
segment = os_aio_get_array_and_local_segment(&array, segment);
}
/* NOTE! We only access constant fields in os_aio_array. Therefore
we do not have to acquire the protecting mutex yet */
ut_ad(os_aio_validate_skip());
ut_ad(segment < array->n_segments);
n = array->n_slots / array->n_segments;
if (array == os_aio_sync_array) {
WaitForSingleObject(
os_aio_array_get_nth_slot(array, pos)->handle,
INFINITE);
i = pos;
} else {
if (orig_seg != ULINT_UNDEFINED) {
srv_set_io_thread_op_info(orig_seg, "wait Windows aio");
}
i = WaitForMultipleObjects(
(DWORD) n, array->handles + segment * n,
FALSE, INFINITE);
}
os_mutex_enter(array->mutex);
if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS
&& array->n_reserved == 0) {
*message1 = NULL;
*message2 = NULL;
os_mutex_exit(array->mutex);
return(TRUE);
}
ut_a(i >= WAIT_OBJECT_0 && i <= WAIT_OBJECT_0 + n);
slot = os_aio_array_get_nth_slot(array, i + segment * n);
ut_a(slot->reserved);
if (orig_seg != ULINT_UNDEFINED) {
srv_set_io_thread_op_info(
orig_seg, "get windows aio return value");
}
ret = GetOverlappedResult(slot->file, &(slot->control), &len, TRUE);
*message1 = slot->message1;
*message2 = slot->message2;
*type = slot->type;
if (ret && len == slot->len) {
ret_val = TRUE;
} else if (os_file_handle_error(slot->name, "Windows aio")) {
retry = TRUE;
} else {
ret_val = FALSE;
}
os_mutex_exit(array->mutex);
if (retry) {
/* retry failed read/write operation synchronously.
No need to hold array->mutex. */
#ifdef UNIV_PFS_IO
/* This read/write does not go through os_file_read
and os_file_write APIs, need to register with
performance schema explicitly here. */
struct PSI_file_locker* locker = NULL;
register_pfs_file_io_begin(locker, slot->file, slot->len,
(slot->type == OS_FILE_WRITE)
? PSI_FILE_WRITE
: PSI_FILE_READ,
__FILE__, __LINE__);
#endif
ut_a((slot->len & 0xFFFFFFFFUL) == slot->len);
switch (slot->type) {
case OS_FILE_WRITE:
ret = WriteFile(slot->file, slot->buf,
(DWORD) slot->len, &len,
&(slot->control));
break;
case OS_FILE_READ:
ret = ReadFile(slot->file, slot->buf,
(DWORD) slot->len, &len,
&(slot->control));
break;
default:
ut_error;
}
#ifdef UNIV_PFS_IO
register_pfs_file_io_end(locker, len);
#endif
if (!ret && GetLastError() == ERROR_IO_PENDING) {
/* aio was queued successfully!
We want a synchronous i/o operation on a
file where we also use async i/o: in Windows
we must use the same wait mechanism as for
async i/o */
ret = GetOverlappedResult(slot->file,
&(slot->control),
&len, TRUE);
}
ret_val = ret && len == slot->len;
}
os_aio_array_free_slot(array, slot);
return(ret_val);
}
#endif
#if defined(LINUX_NATIVE_AIO)
/******************************************************************//**
This function is only used in Linux native asynchronous i/o. This is
called from within the io-thread. If there are no completed IO requests
in the slot array, the thread calls this function to collect more
requests from the kernel.
The io-thread waits on io_getevents(), which is a blocking call, with
a timeout value. Unless the system is very heavy loaded, keeping the
io-thread very busy, the io-thread will spend most of its time waiting
in this function.
The io-thread also exits in this function. It checks server status at
each wakeup and that is why we use timed wait in io_getevents(). */
static
void
os_aio_linux_collect(
/*=================*/
os_aio_array_t* array, /*!< in/out: slot array. */
ulint segment, /*!< in: local segment no. */
ulint seg_size) /*!< in: segment size. */
{
int i;
int ret;
ulint start_pos;
ulint end_pos;
struct timespec timeout;
struct io_event* events;
struct io_context* io_ctx;
/* sanity checks. */
ut_ad(array != NULL);
ut_ad(seg_size > 0);
ut_ad(segment < array->n_segments);
/* Which part of event array we are going to work on. */
events = &array->aio_events[segment * seg_size];
/* Which io_context we are going to use. */
io_ctx = array->aio_ctx[segment];
/* Starting point of the segment we will be working on. */
start_pos = segment * seg_size;
/* End point. */
end_pos = start_pos + seg_size;
retry:
/* Initialize the events. The timeout value is arbitrary.
We probably need to experiment with it a little. */
memset(events, 0, sizeof(*events) * seg_size);
timeout.tv_sec = 0;
timeout.tv_nsec = OS_AIO_REAP_TIMEOUT;
ret = io_getevents(io_ctx, 1, seg_size, events, &timeout);
if (ret > 0) {
for (i = 0; i < ret; i++) {
os_aio_slot_t* slot;
struct iocb* control;
control = (struct iocb*) events[i].obj;
ut_a(control != NULL);
slot = (os_aio_slot_t*) control->data;
/* Some sanity checks. */
ut_a(slot != NULL);
ut_a(slot->reserved);
#if defined(UNIV_AIO_DEBUG)
fprintf(stderr,
"io_getevents[%c]: slot[%p] ctx[%p]"
" seg[%lu]\n",
(slot->type == OS_FILE_WRITE) ? 'w' : 'r',
slot, io_ctx, segment);
#endif
/* We are not scribbling previous segment. */
ut_a(slot->pos >= start_pos);
/* We have not overstepped to next segment. */
ut_a(slot->pos < end_pos);
/* Mark this request as completed. The error handling
will be done in the calling function. */
os_mutex_enter(array->mutex);
slot->n_bytes = events[i].res;
slot->ret = events[i].res2;
slot->io_already_done = TRUE;
os_mutex_exit(array->mutex);
}
return;
}
if (UNIV_UNLIKELY(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS)) {
return;
}
/* This error handling is for any error in collecting the
IO requests. The errors, if any, for any particular IO
request are simply passed on to the calling routine. */
switch (ret) {
case -EAGAIN:
/* Not enough resources! Try again. */
case -EINTR:
/* Interrupted! I have tested the behaviour in case of an
interrupt. If we have some completed IOs available then
the return code will be the number of IOs. We get EINTR only
if there are no completed IOs and we have been interrupted. */
case 0:
/* No pending request! Go back and check again. */
goto retry;
}
/* All other errors should cause a trap for now. */
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: unexpected ret_code[%d] from io_getevents()!\n",
ret);
ut_error;
}
/**********************************************************************//**
This function is only used in Linux native asynchronous i/o.
Waits for an aio operation to complete. This function is used to wait for
the completed requests. The aio array of pending requests is divided
into segments. The thread specifies which segment or slot it wants to wait
for. NOTE: this function will also take care of freeing the aio slot,
therefore no other thread is allowed to do the freeing!
@return TRUE if the IO was successful */
UNIV_INTERN
ibool
os_aio_linux_handle(
/*================*/
ulint global_seg, /*!< in: segment number in the aio array
to wait for; segment 0 is the ibuf
i/o thread, segment 1 is log i/o thread,
then follow the non-ibuf read threads,
and the last are the non-ibuf write
threads. */
fil_node_t**message1, /*!< out: the messages passed with the */
void** message2, /*!< aio request; note that in case the
aio operation failed, these output
parameters are valid and can be used to
restart the operation. */
ulint* type) /*!< out: OS_FILE_WRITE or ..._READ */
{
ulint segment;
os_aio_array_t* array;
os_aio_slot_t* slot;
ulint n;
ulint i;
ibool ret = FALSE;
/* Should never be doing Sync IO here. */
ut_a(global_seg != ULINT_UNDEFINED);
/* Find the array and the local segment. */
segment = os_aio_get_array_and_local_segment(&array, global_seg);
n = array->n_slots / array->n_segments;
/* Loop until we have found a completed request. */
for (;;) {
ibool any_reserved = FALSE;
os_mutex_enter(array->mutex);
for (i = 0; i < n; ++i) {
slot = os_aio_array_get_nth_slot(
array, i + segment * n);
if (!slot->reserved) {
continue;
} else if (slot->io_already_done) {
/* Something for us to work on. */
goto found;
} else {
any_reserved = TRUE;
}
}
os_mutex_exit(array->mutex);
/* There is no completed request.
If there is no pending request at all,
and the system is being shut down, exit. */
if (UNIV_UNLIKELY
(!any_reserved
&& srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS)) {
*message1 = NULL;
*message2 = NULL;
return(TRUE);
}
/* Wait for some request. Note that we return
from wait iff we have found a request. */
srv_set_io_thread_op_info(global_seg,
"waiting for completed aio requests");
os_aio_linux_collect(array, segment, n);
}
found:
/* Note that it may be that there are more then one completed
IO requests. We process them one at a time. We may have a case
here to improve the performance slightly by dealing with all
requests in one sweep. */
srv_set_io_thread_op_info(global_seg,
"processing completed aio requests");
/* Ensure that we are scribbling only our segment. */
ut_a(i < n);
ut_ad(slot != NULL);
ut_ad(slot->reserved);
ut_ad(slot->io_already_done);
*message1 = slot->message1;
*message2 = slot->message2;
*type = slot->type;
if (slot->ret == 0 && slot->n_bytes == (long) slot->len) {
ret = TRUE;
} else {
errno = -slot->ret;
/* os_file_handle_error does tell us if we should retry
this IO. As it stands now, we don't do this retry when
reaping requests from a different context than
the dispatcher. This non-retry logic is the same for
windows and linux native AIO.
We should probably look into this to transparently
re-submit the IO. */
os_file_handle_error(slot->name, "Linux aio");
ret = FALSE;
}
os_mutex_exit(array->mutex);
os_aio_array_free_slot(array, slot);
return(ret);
}
#endif /* LINUX_NATIVE_AIO */
/**********************************************************************//**
Does simulated aio. This function should be called by an i/o-handler
thread.
@return TRUE if the aio operation succeeded */
UNIV_INTERN
ibool
os_aio_simulated_handle(
/*====================*/
ulint global_segment, /*!< in: the number of the segment in the aio
arrays to wait for; segment 0 is the ibuf
i/o thread, segment 1 the log i/o thread,
then follow the non-ibuf read threads, and as
the last are the non-ibuf write threads */
fil_node_t**message1, /*!< out: the messages passed with the aio
request; note that also in the case where
the aio operation failed, these output
parameters are valid and can be used to
restart the operation, for example */
void** message2,
ulint* type) /*!< out: OS_FILE_WRITE or ..._READ */
{
os_aio_array_t* array;
ulint segment;
os_aio_slot_t* consecutive_ios[OS_AIO_MERGE_N_CONSECUTIVE];
ulint n_consecutive;
ulint total_len;
ulint offs;
os_offset_t lowest_offset;
ulint biggest_age;
ulint age;
byte* combined_buf;
byte* combined_buf2;
ibool ret;
ibool any_reserved;
ulint n;
os_aio_slot_t* aio_slot;
/* Fix compiler warning */
*consecutive_ios = NULL;
segment = os_aio_get_array_and_local_segment(&array, global_segment);
restart:
/* NOTE! We only access constant fields in os_aio_array. Therefore
we do not have to acquire the protecting mutex yet */
srv_set_io_thread_op_info(global_segment,
"looking for i/o requests (a)");
ut_ad(os_aio_validate_skip());
ut_ad(segment < array->n_segments);
n = array->n_slots / array->n_segments;
/* Look through n slots after the segment * n'th slot */
if (array == os_aio_read_array
&& os_aio_recommend_sleep_for_read_threads) {
/* Give other threads chance to add several i/os to the array
at once. */
goto recommended_sleep;
}
srv_set_io_thread_op_info(global_segment,
"looking for i/o requests (b)");
/* Check if there is a slot for which the i/o has already been
done */
any_reserved = FALSE;
os_mutex_enter(array->mutex);
for (ulint i = 0; i < n; i++) {
os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(array, i + segment * n);
if (!slot->reserved) {
continue;
} else if (slot->io_already_done) {
if (os_aio_print_debug) {
fprintf(stderr,
"InnoDB: i/o for slot %lu"
" already done, returning\n",
(ulong) i);
}
aio_slot = slot;
ret = TRUE;
goto slot_io_done;
} else {
any_reserved = TRUE;
}
}
/* There is no completed request.
If there is no pending request at all,
and the system is being shut down, exit. */
if (!any_reserved && srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) {
os_mutex_exit(array->mutex);
*message1 = NULL;
*message2 = NULL;
return(TRUE);
}
n_consecutive = 0;
/* If there are at least 2 seconds old requests, then pick the oldest
one to prevent starvation. If several requests have the same age,
then pick the one at the lowest offset. */
biggest_age = 0;
lowest_offset = IB_UINT64_MAX;
for (ulint i = 0; i < n; i++) {
os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(array, i + segment * n);
if (slot->reserved) {
age = (ulint) difftime(
ut_time(), slot->reservation_time);
if ((age >= 2 && age > biggest_age)
|| (age >= 2 && age == biggest_age
&& slot->offset < lowest_offset)) {
/* Found an i/o request */
consecutive_ios[0] = slot;
n_consecutive = 1;
biggest_age = age;
lowest_offset = slot->offset;
}
}
}
if (n_consecutive == 0) {
/* There were no old requests. Look for an i/o request at the
lowest offset in the array (we ignore the high 32 bits of the
offset in these heuristics) */
lowest_offset = IB_UINT64_MAX;
for (ulint i = 0; i < n; i++) {
os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(
array, i + segment * n);
if (slot->reserved && slot->offset < lowest_offset) {
/* Found an i/o request */
consecutive_ios[0] = slot;
n_consecutive = 1;
lowest_offset = slot->offset;
}
}
}
if (n_consecutive == 0) {
/* No i/o requested at the moment */
goto wait_for_io;
}
/* if n_consecutive != 0, then we have assigned
something valid to consecutive_ios[0] */
ut_ad(n_consecutive != 0);
ut_ad(consecutive_ios[0] != NULL);
aio_slot = consecutive_ios[0];
/* Check if there are several consecutive blocks to read or write */
consecutive_loop:
for (ulint i = 0; i < n; i++) {
os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(array, i + segment * n);
if (slot->reserved
&& slot != aio_slot
&& slot->offset == aio_slot->offset + aio_slot->len
&& slot->type == aio_slot->type
&& slot->file == aio_slot->file) {
/* Found a consecutive i/o request */
consecutive_ios[n_consecutive] = slot;
n_consecutive++;
aio_slot = slot;
if (n_consecutive < OS_AIO_MERGE_N_CONSECUTIVE) {
goto consecutive_loop;
} else {
break;
}
}
}
srv_set_io_thread_op_info(global_segment, "consecutive i/o requests");
/* We have now collected n_consecutive i/o requests in the array;
allocate a single buffer which can hold all data, and perform the
i/o */
total_len = 0;
aio_slot = consecutive_ios[0];
for (ulint i = 0; i < n_consecutive; i++) {
total_len += consecutive_ios[i]->len;
}
if (n_consecutive == 1) {
/* We can use the buffer of the i/o request */
combined_buf = aio_slot->buf;
combined_buf2 = NULL;
} else {
combined_buf2 = static_cast<byte*>(
ut_malloc(total_len + UNIV_PAGE_SIZE));
ut_a(combined_buf2);
combined_buf = static_cast<byte*>(
ut_align(combined_buf2, UNIV_PAGE_SIZE));
}
/* We release the array mutex for the time of the i/o: NOTE that
this assumes that there is just one i/o-handler thread serving
a single segment of slots! */
os_mutex_exit(array->mutex);
if (aio_slot->type == OS_FILE_WRITE && n_consecutive > 1) {
/* Copy the buffers to the combined buffer */
offs = 0;
for (ulint i = 0; i < n_consecutive; i++) {
ut_memcpy(combined_buf + offs, consecutive_ios[i]->buf,
consecutive_ios[i]->len);
offs += consecutive_ios[i]->len;
}
}
srv_set_io_thread_op_info(global_segment, "doing file i/o");
/* Do the i/o with ordinary, synchronous i/o functions: */
if (aio_slot->type == OS_FILE_WRITE) {
ut_ad(!srv_read_only_mode);
ret = os_file_write(
aio_slot->name, aio_slot->file, combined_buf,
aio_slot->offset, total_len);
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
os_has_said_disk_full = FALSE;
ret = 0;
errno = 28;);
if (!ret) {
os_file_handle_error_cond_exit(aio_slot->name, "os_file_write_func", TRUE, FALSE);
}
} else {
ret = os_file_read(
aio_slot->file, combined_buf,
aio_slot->offset, total_len);
}
srv_set_io_thread_op_info(global_segment, "file i/o done");
if (aio_slot->type == OS_FILE_READ && n_consecutive > 1) {
/* Copy the combined buffer to individual buffers */
offs = 0;
for (ulint i = 0; i < n_consecutive; i++) {
ut_memcpy(consecutive_ios[i]->buf, combined_buf + offs,
consecutive_ios[i]->len);
offs += consecutive_ios[i]->len;
}
}
if (combined_buf2) {
ut_free(combined_buf2);
}
os_mutex_enter(array->mutex);
/* Mark the i/os done in slots */
for (ulint i = 0; i < n_consecutive; i++) {
consecutive_ios[i]->io_already_done = TRUE;
}
/* We return the messages for the first slot now, and if there were
several slots, the messages will be returned with subsequent calls
of this function */
slot_io_done:
ut_a(aio_slot->reserved);
*message1 = aio_slot->message1;
*message2 = aio_slot->message2;
*type = aio_slot->type;
os_mutex_exit(array->mutex);
os_aio_array_free_slot(array, aio_slot);
return(ret);
wait_for_io:
srv_set_io_thread_op_info(global_segment, "resetting wait event");
/* We wait here until there again can be i/os in the segment
of this thread */
os_event_reset(os_aio_segment_wait_events[global_segment]);
os_mutex_exit(array->mutex);
recommended_sleep:
srv_set_io_thread_op_info(global_segment, "waiting for i/o request");
os_event_wait(os_aio_segment_wait_events[global_segment]);
goto restart;
}
/**********************************************************************//**
Validates the consistency of an aio array.
@return true if ok */
static
bool
os_aio_array_validate(
/*==================*/
os_aio_array_t* array) /*!< in: aio wait array */
{
ulint i;
ulint n_reserved = 0;
os_mutex_enter(array->mutex);
ut_a(array->n_slots > 0);
ut_a(array->n_segments > 0);
for (i = 0; i < array->n_slots; i++) {
os_aio_slot_t* slot;
slot = os_aio_array_get_nth_slot(array, i);
if (slot->reserved) {
n_reserved++;
ut_a(slot->len > 0);
}
}
ut_a(array->n_reserved == n_reserved);
os_mutex_exit(array->mutex);
return(true);
}
/**********************************************************************//**
Validates the consistency the aio system.
@return TRUE if ok */
UNIV_INTERN
ibool
os_aio_validate(void)
/*=================*/
{
os_aio_array_validate(os_aio_read_array);
if (os_aio_write_array != 0) {
os_aio_array_validate(os_aio_write_array);
}
if (os_aio_ibuf_array != 0) {
os_aio_array_validate(os_aio_ibuf_array);
}
if (os_aio_log_array != 0) {
os_aio_array_validate(os_aio_log_array);
}
if (os_aio_sync_array != 0) {
os_aio_array_validate(os_aio_sync_array);
}
return(TRUE);
}
/**********************************************************************//**
Prints pending IO requests per segment of an aio array.
We probably don't need per segment statistics but they can help us
during development phase to see if the IO requests are being
distributed as expected. */
static
void
os_aio_print_segment_info(
/*======================*/
FILE* file, /*!< in: file where to print */
ulint* n_seg, /*!< in: pending IO array */
os_aio_array_t* array) /*!< in: array to process */
{
ulint i;
ut_ad(array);
ut_ad(n_seg);
ut_ad(array->n_segments > 0);
if (array->n_segments == 1) {
return;
}
fprintf(file, " [");
for (i = 0; i < array->n_segments; i++) {
if (i != 0) {
fprintf(file, ", ");
}
fprintf(file, "%lu", n_seg[i]);
}
fprintf(file, "] ");
}
/**********************************************************************//**
Prints info about the aio array. */
UNIV_INTERN
void
os_aio_print_array(
/*==============*/
FILE* file, /*!< in: file where to print */
os_aio_array_t* array) /*!< in: aio array to print */
{
ulint n_reserved = 0;
ulint n_res_seg[SRV_MAX_N_IO_THREADS];
os_mutex_enter(array->mutex);
ut_a(array->n_slots > 0);
ut_a(array->n_segments > 0);
memset(n_res_seg, 0x0, sizeof(n_res_seg));
for (ulint i = 0; i < array->n_slots; ++i) {
os_aio_slot_t* slot;
ulint seg_no;
slot = os_aio_array_get_nth_slot(array, i);
seg_no = (i * array->n_segments) / array->n_slots;
if (slot->reserved) {
++n_reserved;
++n_res_seg[seg_no];
ut_a(slot->len > 0);
}
}
ut_a(array->n_reserved == n_reserved);
fprintf(file, " %lu", (ulong) n_reserved);
os_aio_print_segment_info(file, n_res_seg, array);
os_mutex_exit(array->mutex);
}
/**********************************************************************//**
Prints info of the aio arrays. */
UNIV_INTERN
void
os_aio_print(
/*=========*/
FILE* file) /*!< in: file where to print */
{
time_t current_time;
double time_elapsed;
double avg_bytes_read;
for (ulint i = 0; i < srv_n_file_io_threads; ++i) {
fprintf(file, "I/O thread %lu state: %s (%s)",
(ulong) i,
srv_io_thread_op_info[i],
srv_io_thread_function[i]);
#ifndef _WIN32
if (!srv_use_native_aio
&& os_aio_segment_wait_events[i]->is_set) {
fprintf(file, " ev set");
}
#endif /* _WIN32 */
fprintf(file, "\n");
}
fputs("Pending normal aio reads:", file);
os_aio_print_array(file, os_aio_read_array);
if (os_aio_write_array != 0) {
fputs(", aio writes:", file);
os_aio_print_array(file, os_aio_write_array);
}
if (os_aio_ibuf_array != 0) {
fputs(",\n ibuf aio reads:", file);
os_aio_print_array(file, os_aio_ibuf_array);
}
if (os_aio_log_array != 0) {
fputs(", log i/o's:", file);
os_aio_print_array(file, os_aio_log_array);
}
if (os_aio_sync_array != 0) {
fputs(", sync i/o's:", file);
os_aio_print_array(file, os_aio_sync_array);
}
putc('\n', file);
current_time = ut_time();
time_elapsed = 0.001 + difftime(current_time, os_last_printout);
fprintf(file,
"Pending flushes (fsync) log: %lu; buffer pool: %lu\n"
"%lu OS file reads, %lu OS file writes, %lu OS fsyncs\n",
(ulong) fil_n_pending_log_flushes,
(ulong) fil_n_pending_tablespace_flushes,
(ulong) os_n_file_reads,
(ulong) os_n_file_writes,
(ulong) os_n_fsyncs);
if (os_file_n_pending_preads != 0 || os_file_n_pending_pwrites != 0) {
fprintf(file,
"%lu pending preads, %lu pending pwrites\n",
(ulong) os_file_n_pending_preads,
(ulong) os_file_n_pending_pwrites);
}
if (os_n_file_reads == os_n_file_reads_old) {
avg_bytes_read = 0.0;
} else {
avg_bytes_read = (double) os_bytes_read_since_printout
/ (os_n_file_reads - os_n_file_reads_old);
}
fprintf(file,
"%.2f reads/s, %lu avg bytes/read,"
" %.2f writes/s, %.2f fsyncs/s\n",
(os_n_file_reads - os_n_file_reads_old)
/ time_elapsed,
(ulong) avg_bytes_read,
(os_n_file_writes - os_n_file_writes_old)
/ time_elapsed,
(os_n_fsyncs - os_n_fsyncs_old)
/ time_elapsed);
os_n_file_reads_old = os_n_file_reads;
os_n_file_writes_old = os_n_file_writes;
os_n_fsyncs_old = os_n_fsyncs;
os_bytes_read_since_printout = 0;
os_last_printout = current_time;
}
/**********************************************************************//**
Refreshes the statistics used to print per-second averages. */
UNIV_INTERN
void
os_aio_refresh_stats(void)
/*======================*/
{
os_n_file_reads_old = os_n_file_reads;
os_n_file_writes_old = os_n_file_writes;
os_n_fsyncs_old = os_n_fsyncs;
os_bytes_read_since_printout = 0;
os_last_printout = time(NULL);
}
#ifdef UNIV_DEBUG
/**********************************************************************//**
Checks that all slots in the system have been freed, that is, there are
no pending io operations.
@return TRUE if all free */
UNIV_INTERN
ibool
os_aio_all_slots_free(void)
/*=======================*/
{
os_aio_array_t* array;
ulint n_res = 0;
array = os_aio_read_array;
os_mutex_enter(array->mutex);
n_res += array->n_reserved;
os_mutex_exit(array->mutex);
if (!srv_read_only_mode) {
ut_a(os_aio_write_array == 0);
array = os_aio_write_array;
os_mutex_enter(array->mutex);
n_res += array->n_reserved;
os_mutex_exit(array->mutex);
ut_a(os_aio_ibuf_array == 0);
array = os_aio_ibuf_array;
os_mutex_enter(array->mutex);
n_res += array->n_reserved;
os_mutex_exit(array->mutex);
}
ut_a(os_aio_log_array == 0);
array = os_aio_log_array;
os_mutex_enter(array->mutex);
n_res += array->n_reserved;
os_mutex_exit(array->mutex);
array = os_aio_sync_array;
os_mutex_enter(array->mutex);
n_res += array->n_reserved;
os_mutex_exit(array->mutex);
if (n_res == 0) {
return(TRUE);
}
return(FALSE);
}
#endif /* UNIV_DEBUG */
#endif /* !UNIV_HOTBACKUP */<|fim▁end|> | #ifndef UNIV_HOTBACKUP
/* Protect the seek / read operation with a mutex */
i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES; |
<|file_name|>entail_utils.py<|end_file_name|><|fim▁begin|>#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
Some utility functions for entailment project
"""
from collections import defaultdict as dd
from metrics import *
def get_eval_metric(metric_name):
if metric_name == "jaccard":
return jaccard_index
elif metric_name == "1":
return entail_score1
elif metric_name == "2":
return entail_score2
elif metric_name == "3":
return entail_score3
def get_test_pairs(test_pairs):
pairs = []
for line in open(test_pairs):
w1, w2, tag = line.split()
pairs.append((w1, w2, tag))<|fim▁hole|>
def get_contexts_above_threshold(test_set, subs_file, threshold):
words = dd(set)
for line_num, line in enumerate(subs_file):
line = line.split()
#tw = line[0]
for i in xrange(1, len(line)-1, 2):
word = line[i]
if word in test_set:
prob = float(line[i+1])
if prob >= threshold:
words[word].add(line_num)
return words, line_num + 1<|fim▁end|> | return pairs |
<|file_name|>pipeurlinput.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pipe2py.modules.pipeurlinput
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
http://pipes.yahoo.com/pipes/docs?doc=user_inputs#URL
"""
from pipe2py.lib import utils
def pipe_urlinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for a url and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {<|fim▁hole|> 'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : url
"""
value = utils.get_input(context, conf)
value = utils.url_quote(value)
while True:
yield value<|fim▁end|> | |
<|file_name|>0013_atencion_formulario.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-07 02:03
from __future__ import unicode_literals
from django.db import migrations, models
<|fim▁hole|>
class Migration(migrations.Migration):
dependencies = [
('medgointranet', '0012_motivoanulacion'),
]
operations = [
migrations.AddField(
model_name='atencion',
name='formulario',
field=models.BooleanField(default=False, verbose_name='¿Doctor completo formulario?'),
),
]<|fim▁end|> | |
<|file_name|>Utility.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|>std::string prefix;
time_t rawtime ;
std::string return_file_name(const std::string file_name)
{
std::string result_file_name;
if(prefix.length()>0)
result_file_name=prefix+"."+file_name;
else
result_file_name=file_name;
return result_file_name;
}
void delete_created_file(const std::string file_name)
{
if( remove( file_name.c_str() ) != 0 )
std::cerr<<"--- error deleting the file. "<<file_name<<std::endl;
}<|fim▁end|> | #include "Utility.hpp"
|
<|file_name|>HemfPlusRecordType.java<|end_file_name|><|fim▁begin|>/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.hemf.hemfplus.record;
import org.apache.poi.util.Internal;
@Internal
public enum HemfPlusRecordType {
header(0x4001, HemfPlusHeader.class),
endOfFile(0x4002, UnimplementedHemfPlusRecord.class),
comment(0x4003, UnimplementedHemfPlusRecord.class),
getDC(0x4004, UnimplementedHemfPlusRecord.class),
multiFormatStart(0x4005, UnimplementedHemfPlusRecord.class),
multiFormatSection(0x4006, UnimplementedHemfPlusRecord.class),
multiFormatEnd(0x4007, UnimplementedHemfPlusRecord.class),
object(0x4008, UnimplementedHemfPlusRecord.class),
clear(0x4009, UnimplementedHemfPlusRecord.class),
fillRects(0x400A, UnimplementedHemfPlusRecord.class),
drawRects(0x400B, UnimplementedHemfPlusRecord.class),
fillPolygon(0x400C, UnimplementedHemfPlusRecord.class),
drawLines(0x400D, UnimplementedHemfPlusRecord.class),
fillEllipse(0x400E, UnimplementedHemfPlusRecord.class),
drawEllipse(0x400F, UnimplementedHemfPlusRecord.class),
fillPie(0x4010, UnimplementedHemfPlusRecord.class),
drawPie(0x4011, UnimplementedHemfPlusRecord.class),
drawArc(0x4012, UnimplementedHemfPlusRecord.class),<|fim▁hole|> drawPath(0x4015, UnimplementedHemfPlusRecord.class),
fillClosedCurve(0x4016, UnimplementedHemfPlusRecord.class),
drawClosedCurve(0x4017, UnimplementedHemfPlusRecord.class),
drawCurve(0x4018, UnimplementedHemfPlusRecord.class),
drawBeziers(0x4019, UnimplementedHemfPlusRecord.class),
drawImage(0x401A, UnimplementedHemfPlusRecord.class),
drawImagePoints(0x401B, UnimplementedHemfPlusRecord.class),
drawString(0x401C, UnimplementedHemfPlusRecord.class),
setRenderingOrigin(0x401D, UnimplementedHemfPlusRecord.class),
setAntiAliasMode(0x401E, UnimplementedHemfPlusRecord.class),
setTextRenderingHint(0x401F, UnimplementedHemfPlusRecord.class),
setTextContrast(0x4020, UnimplementedHemfPlusRecord.class),
setInterpolationMode(0x4021, UnimplementedHemfPlusRecord.class),
setPixelOffsetMode(0x4022, UnimplementedHemfPlusRecord.class),
setComositingMode(0x4023, UnimplementedHemfPlusRecord.class),
setCompositingQuality(0x4024, UnimplementedHemfPlusRecord.class),
save(0x4025, UnimplementedHemfPlusRecord.class),
restore(0x4026, UnimplementedHemfPlusRecord.class),
beginContainer(0x4027, UnimplementedHemfPlusRecord.class),
beginContainerNoParams(0x428, UnimplementedHemfPlusRecord.class),
endContainer(0x4029, UnimplementedHemfPlusRecord.class),
setWorldTransform(0x402A, UnimplementedHemfPlusRecord.class),
resetWorldTransform(0x402B, UnimplementedHemfPlusRecord.class),
multiplyWorldTransform(0x402C, UnimplementedHemfPlusRecord.class),
translateWorldTransform(0x402D, UnimplementedHemfPlusRecord.class),
scaleWorldTransform(0x402E, UnimplementedHemfPlusRecord.class),
rotateWorldTransform(0x402F, UnimplementedHemfPlusRecord.class),
setPageTransform(0x4030, UnimplementedHemfPlusRecord.class),
resetClip(0x4031, UnimplementedHemfPlusRecord.class),
setClipRect(0x4032, UnimplementedHemfPlusRecord.class),
setClipRegion(0x4033, UnimplementedHemfPlusRecord.class),
setClipPath(0x4034, UnimplementedHemfPlusRecord.class),
offsetClip(0x4035, UnimplementedHemfPlusRecord.class),
drawDriverstring(0x4036, UnimplementedHemfPlusRecord.class),
strokeFillPath(0x4037, UnimplementedHemfPlusRecord.class),
serializableObject(0x4038, UnimplementedHemfPlusRecord.class),
setTSGraphics(0x4039, UnimplementedHemfPlusRecord.class),
setTSClip(0x403A, UnimplementedHemfPlusRecord.class);
public final long id;
public final Class<? extends HemfPlusRecord> clazz;
HemfPlusRecordType(long id, Class<? extends HemfPlusRecord> clazz) {
this.id = id;
this.clazz = clazz;
}
public static HemfPlusRecordType getById(long id) {
for (HemfPlusRecordType wrt : values()) {
if (wrt.id == id) return wrt;
}
return null;
}
}<|fim▁end|> | fillRegion(0x4013, UnimplementedHemfPlusRecord.class),
fillPath(0x4014, UnimplementedHemfPlusRecord.class), |
<|file_name|>rigidBodyState.C<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | Website: https://openfoam.org
\\ / A nd | Copyright (C) 2019-2021 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "rigidBodyState.H"
#include "fvMeshMoversMotionSolver.H"
#include "motionSolver.H"
#include "unitConversion.H"
#include "addToRunTimeSelectionTable.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam
{
namespace functionObjects
{
defineTypeNameAndDebug(rigidBodyState, 0);
addToRunTimeSelectionTable
(
functionObject,
rigidBodyState,
dictionary
);
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::functionObjects::rigidBodyState::rigidBodyState
(
const word& name,
const Time& runTime,
const dictionary& dict
)
:
fvMeshFunctionObject(name, runTime, dict),
logFiles(obr_, name),
names_(motion().movingBodyNames())
{
read(dict);
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
<|fim▁hole|>// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
const Foam::RBD::rigidBodyMotion&
Foam::functionObjects::rigidBodyState::motion() const
{
const fvMeshMovers::motionSolver& mover =
refCast<const fvMeshMovers::motionSolver>(mesh_.mover());
return (refCast<const RBD::rigidBodyMotion>(mover.motion()));
}
bool Foam::functionObjects::rigidBodyState::read(const dictionary& dict)
{
fvMeshFunctionObject::read(dict);
angleFormat_ = dict.lookupOrDefault<word>("angleFormat", "radians");
resetNames(names_);
return true;
}
void Foam::functionObjects::rigidBodyState::writeFileHeader(const label i)
{
writeHeader(this->files()[i], "Motion State");
writeHeaderValue(this->files()[i], "Angle Units", angleFormat_);
writeCommented(this->files()[i], "Time");
this->files()[i]<< tab
<< "Centre of rotation" << tab
<< "Orientation" << tab
<< "Linear velocity" << tab
<< "Angular velocity" << endl;
}
bool Foam::functionObjects::rigidBodyState::execute()
{
return true;
}
bool Foam::functionObjects::rigidBodyState::write()
{
logFiles::write();
if (Pstream::master())
{
const RBD::rigidBodyMotion& motion = this->motion();
forAll(names_, i)
{
const label bodyID = motion.bodyID(names_[i]);
const spatialTransform CofR(motion.X0(bodyID));
const spatialVector vCofR(motion.v(bodyID, Zero));
vector rotationAngle
(
quaternion(CofR.E()).eulerAngles(quaternion::XYZ)
);
vector angularVelocity(vCofR.w());
if (angleFormat_ == "degrees")
{
rotationAngle.x() = radToDeg(rotationAngle.x());
rotationAngle.y() = radToDeg(rotationAngle.y());
rotationAngle.z() = radToDeg(rotationAngle.z());
angularVelocity.x() = radToDeg(angularVelocity.x());
angularVelocity.y() = radToDeg(angularVelocity.y());
angularVelocity.z() = radToDeg(angularVelocity.z());
}
writeTime(files()[i]);
files()[i]
<< tab
<< CofR.r() << tab
<< rotationAngle << tab
<< vCofR.l() << tab
<< angularVelocity << endl;
}
}
return true;
}
// ************************************************************************* //<|fim▁end|> | Foam::functionObjects::rigidBodyState::~rigidBodyState()
{}
|
<|file_name|>auth.js<|end_file_name|><|fim▁begin|>module.exports = {
'Auth': {<|fim▁hole|> }
}<|fim▁end|> | 'Token': process.env['NODE_AUTH'] || '' |
<|file_name|>PostFile.py<|end_file_name|><|fim▁begin|>from models import db
from models.Post import Post
class PostFile(db.Model):
__tablename__ = 'PostFile'
Id = db.Column(db.Integer, primary_key = True)
Post = db.Column(db.Integer, db.ForeignKey(Post.Id))
FileName = db.Column(db.String(128))
def __init__(self, post, file):<|fim▁hole|> self.FileName = file<|fim▁end|> |
self.Post = post |
<|file_name|>mixins.py<|end_file_name|><|fim▁begin|># copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""This module contains some mixins for the different nodes.
"""
from .exceptions import (AstroidBuildingException, InferenceError,
NotFoundError)
class BlockRangeMixIn(object):
"""override block range """
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self._blockstart_toline()
def _elsed_block_range(self, lineno, orelse, last=None):
"""handle block line numbers range for try/finally, for, if and while
statements
"""
if lineno == self.fromlineno:
return lineno, lineno
if orelse:
if lineno >= orelse[0].fromlineno:
return lineno, orelse[-1].tolineno
return lineno, orelse[0].fromlineno - 1
return lineno, last or self.tolineno
class FilterStmtsMixin(object):
"""Mixin for statement filtering and assignment type"""
def _get_filtered_stmts(self, _, node, _stmts, mystmt):
"""method used in _filter_stmts to get statemtents and trigger break"""
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
def ass_type(self):
return self
class AssignTypeMixin(object):
def ass_type(self):
return self
def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt):
"""method used in filter_stmts"""
if self is mystmt:
return _stmts, True
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
class ParentAssignTypeMixin(AssignTypeMixin):
def ass_type(self):
return self.parent.ass_type()
class FromImportMixIn(FilterStmtsMixin):
"""MixIn for From and Import Nodes"""
<|fim▁hole|> return name
def do_import_module(self, modname):
"""return the ast for a module whose name is <modname> imported by <self>
"""
# handle special case where we are on a package node importing a module
# using the same name as the package, which may end in an infinite loop
# on relative imports
# XXX: no more needed ?
mymodule = self.root()
level = getattr(self, 'level', None) # Import as no level
# XXX we should investigate deeper if we really want to check
# importing itself: modname and mymodule.name be relative or absolute
if mymodule.relative_to_absolute_name(modname, level) == mymodule.name:
# FIXME: we used to raise InferenceError here, but why ?
return mymodule
try:
return mymodule.import_module(modname, level=level)
except AstroidBuildingException:
raise InferenceError(modname)
except SyntaxError, ex:
raise InferenceError(str(ex))
def real_name(self, asname):
"""get name from 'as' name"""
for name, _asname in self.names:
if name == '*':
return asname
if not _asname:
name = name.split('.', 1)[0]
_asname = name
if asname == _asname:
return name
raise NotFoundError(asname)<|fim▁end|> | def _infer_name(self, frame, name): |
<|file_name|>base.py<|end_file_name|><|fim▁begin|>"""
Base class for ensemble-based estimators.
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
import multiprocessing
import numpy as np
from ..base import clone
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
class BaseEnsemble(BaseEstimator, MetaEstimatorMixin):
"""Base class for all ensemble classes.
Warning: This class should not be used directly. Use derived classes
instead.
Parameters
----------
base_estimator : object, optional (default=None)
The base estimator from which the ensemble is built.
n_estimators : integer
The number of estimators in the ensemble.
estimator_params : list of strings
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
"""
def __init__(self, base_estimator, n_estimators=10,
estimator_params=tuple()):
# Set parameters
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimator_params = estimator_params
# Don't instantiate estimators now! Parameters of base_estimator might
# still change. Eg., when grid-searching with the nested object syntax.
# This needs to be filled by the derived classes.
self.estimators_ = []
def _validate_estimator(self, default=None):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero, "
"got {0}.".format(self.n_estimators))
if self.base_estimator is not None:
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if self.base_estimator_ is None:
raise ValueError("base_estimator cannot be None")
def _make_estimator(self, append=True):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.base_estimator_)
estimator.set_params(**dict((p, getattr(self, p))
for p in self.estimator_params))
if append:
self.estimators_.append(estimator)
return estimator
def __len__(self):
"""Returns the number of estimators in the ensemble."""
return len(self.estimators_)
def __getitem__(self, index):
"""Returns the index'th estimator in the ensemble."""
return self.estimators_[index]
def __iter__(self):
"""Returns iterator over estimators in the ensemble."""
return iter(self.estimators_)
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
n_jobs = min(_get_n_jobs(n_jobs), n_estimators)
# Partition estimators between jobs
n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs,
dtype=np.int)
n_estimators_per_job[:n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples<|fim▁hole|> >>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(multiprocessing.cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs<|fim▁end|> | --------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4 |
<|file_name|>cloned.rs<|end_file_name|><|fim▁begin|>#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::clone::Clone;
// #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub enum Option<T> {
// /// No value
// #[stable(feature = "rust1", since = "1.0.0")]
// None,
// /// Some value `T`
// #[stable(feature = "rust1", since = "1.0.0")]
// Some(T)
// }
// impl<'a, T: Clone> Option<&'a T> {
// /// Maps an Option<&T> to an Option<T> by cloning the contents of the Option.
// #[stable(feature = "rust1", since = "1.0.0")]
// pub fn cloned(self) -> Option<T> {
// self.map(|t| t.clone())
// }
// }
struct A<T> {
value: T
}
impl Clone for A<T> {
fn clone(&self) -> Self
{<|fim▁hole|> }
type T = i32; // T: Clone
#[test]
fn cloned_test1() {
let a: A<T> = A { value: 68 };
let x: Option<&A<T>> = Some::<&A<T>>(&a);
let cloned: Option<A<T>> = x.cloned();
assert_eq!(x.unwrap().value, 68);
assert_eq!(cloned.unwrap().value, 68);
}
}<|fim▁end|> | A { value: self.value }
} |
<|file_name|>coord_units.rs<|end_file_name|><|fim▁begin|>//! `userSpaceOnUse` or `objectBoundingBox` values.
use cssparser::Parser;
use crate::error::*;
use crate::parsers::Parse;
/// Defines the units to be used for things that can consider a
/// coordinate system in terms of the current transformation, or in
/// terms of the current object's bounding box.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CoordUnits {
UserSpaceOnUse,
ObjectBoundingBox,
}
impl Parse for CoordUnits {
fn parse<'i>(parser: &mut Parser<'i, '_>) -> Result<Self, ParseError<'i>> {
Ok(parse_identifiers!(
parser,
"userSpaceOnUse" => CoordUnits::UserSpaceOnUse,
"objectBoundingBox" => CoordUnits::ObjectBoundingBox,
)?)
}
}
/// Creates a newtype around `CoordUnits`, with a default value.
///
/// SVG attributes that can take `userSpaceOnUse` or
/// `objectBoundingBox` values often have different default values
/// depending on the type of SVG element. We use this macro to create
/// a newtype for each SVG element and attribute that requires values
/// of this type. The newtype provides an `impl Default` with the
/// specified `$default` value.
#[macro_export]
macro_rules! coord_units {
($name:ident, $default:expr) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct $name(pub CoordUnits);
impl Default for $name {
fn default() -> Self {
$name($default)
}
}
impl From<$name> for CoordUnits {
fn from(u: $name) -> Self {
u.0
}
}
impl $crate::parsers::Parse for $name {
fn parse<'i>(
parser: &mut ::cssparser::Parser<'i, '_>,
) -> Result<Self, $crate::error::ParseError<'i>> {
Ok($name($crate::coord_units::CoordUnits::parse(parser)?))
}
}
};
}
#[cfg(test)]
mod tests {
use super::*;
coord_units!(MyUnits, CoordUnits::ObjectBoundingBox);
#[test]
fn parsing_invalid_strings_yields_error() {
assert!(MyUnits::parse_str("").is_err());
assert!(MyUnits::parse_str("foo").is_err());
}
#[test]
fn parses_paint_server_units() {
assert_eq!(
MyUnits::parse_str("userSpaceOnUse").unwrap(),
MyUnits(CoordUnits::UserSpaceOnUse)
);
assert_eq!(
MyUnits::parse_str("objectBoundingBox").unwrap(),
MyUnits(CoordUnits::ObjectBoundingBox)
);
}
#[test]
fn has_correct_default() {
assert_eq!(MyUnits::default(), MyUnits(CoordUnits::ObjectBoundingBox));
}
#[test]
fn converts_to_coord_units() {
assert_eq!(<|fim▁hole|> CoordUnits::ObjectBoundingBox
);
}
}<|fim▁end|> | CoordUnits::from(MyUnits(CoordUnits::ObjectBoundingBox)), |
<|file_name|>option_Dock.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
/***************************************************************************
PisteCreatorDockWidget_OptionDock
Option dock for Qgis plugins
Option dock initialize
-------------------
begin : 2017-07-25
last : 2017-10-20
copyright : (C) 2017 by Peillet Sebastien
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import os
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtWidgets import QDialog
from qgis.PyQt import uic
from qgis.PyQt.QtCore import pyqtSignal, QSettings
from qgis.gui import QgsColorButton
def hex_to_rgb(value):
value = value.lstrip("#")
lv = len(value)
return list(int(value[i : i + lv / 3], 16) for i in range(0, lv, lv / 3))
class OptionDock(QDialog):
closingPlugin = pyqtSignal()
def __init__(self, plugin, graph_widget, canvas, parent=None):
"""Constructor."""
super(OptionDock, self).__init__(parent)
uic.loadUi(os.path.join(os.path.dirname(__file__), "Option_dock.ui"), self)
self.settings = QSettings()
self.initConfig()
self.graph_widget = graph_widget
self.PisteCreatorTool = plugin.PisteCreatorTool
self.canvas = canvas
self.plugin = plugin
self.saveButton.clicked.connect(self.saveconfig)
def initConfig(self):
self.sideDistInt = self.settings.value(
"PisteCreator/calculation_variable/side_distance", 6
)
self.sideDistSpinBox.setValue(int(self.sideDistInt))
self.aslopeInt = self.settings.value(
"PisteCreator/graphical_visualisation/tolerated_a_slope", 10
)
self.toleratedASlopeSpinBox.setValue(int(self.aslopeInt))
self.cslopeInt = self.settings.value(
"PisteCreator/graphical_visualisation/tolerated_c_slope", 4
)
self.toleratedCSlopeSpinBox.setValue(int(self.cslopeInt))
self.lengthInt = self.settings.value(<|fim▁hole|> )
self.maxLengthSpinBox.setValue(int(self.lengthInt))
self.lengthBool = self.settings.value(
"PisteCreator/graphical_visualisation/max_length_hold", False
)
self.maxLengthCheckBox.setChecked(bool(self.lengthBool))
self.swathInt = self.settings.value(
"PisteCreator/graphical_visualisation/swath_distance", 30
)
self.swathDistSpinBox.setValue(int(self.swathInt))
self.swathBool = self.settings.value(
"PisteCreator/graphical_visualisation/swath_display", True
)
self.swathDistCheckBox.setChecked(bool(self.swathBool))
self.interpolBool = self.settings.value(
"PisteCreator/calculation_variable/interpolate_act", True
)
self.interpolCheckBox.setChecked(bool(self.interpolBool))
self.t_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/t_color", "#00d003"
)
)
self.f_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/f_color", "#ff0000"
)
)
self.tl_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/tl_color", "#236433"
)
)
self.fl_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/fl_color", "#b80000"
)
)
self.b_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/b_color", "#0fff33"
)
)
self.a_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/a_color", "#48b0d2"
)
)
self.T_ColorButton.setColor(self.t_color)
self.F_ColorButton.setColor(self.f_color)
self.TL_ColorButton.setColor(self.tl_color)
self.FL_ColorButton.setColor(self.fl_color)
self.B_ColorButton.setColor(self.b_color)
self.A_ColorButton.setColor(self.a_color)
def saveconfig(self):
# self.checkChanges()
self.sideDistInt = self.sideDistSpinBox.value()
self.aslopeInt = self.toleratedASlopeSpinBox.value()
self.cslopeInt = self.toleratedCSlopeSpinBox.value()
self.lengthInt = self.maxLengthSpinBox.value()
self.lengthBool = self.maxLengthCheckBox.isChecked()
self.swathInt = self.swathDistSpinBox.value()
self.swathBool = self.swathDistCheckBox.isChecked()
self.interpolBool = self.interpolCheckBox.isChecked()
self.t_color = self.T_ColorButton.color().name()
self.f_color = self.F_ColorButton.color().name()
self.tl_color = self.TL_ColorButton.color().name()
self.fl_color = self.FL_ColorButton.color().name()
self.a_color = self.A_ColorButton.color().name()
self.b_color = self.B_ColorButton.color().name()
self.settings.setValue(
"PisteCreator/calculation_variable/side_distance",
self.sideDistSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/tolerated_a_slope",
self.toleratedASlopeSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/tolerated_c_slope",
self.toleratedCSlopeSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/max_length",
self.maxLengthSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/max_length_hold",
self.maxLengthCheckBox.isChecked(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/swath_distance",
self.swathDistSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/swath_display",
self.swathDistCheckBox.isChecked(),
)
self.settings.setValue(
"PisteCreator/calculation_variable/interpolate_act",
self.interpolCheckBox.isChecked(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/t_color",
self.T_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/f_color",
self.F_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/tl_color",
self.TL_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/fl_color",
self.FL_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/b_color",
self.B_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/a_color",
self.A_ColorButton.color().name(),
)
try:
if self.canvas.mapTool().map_tool_name == "SlopeMapTool":
self.plugin.PisteCreatorTool.configChange(
self.sideDistInt,
self.aslopeInt,
self.cslopeInt,
self.lengthInt,
self.lengthBool,
self.swathInt,
self.swathBool,
self.interpolBool,
self.t_color,
self.f_color,
self.tl_color,
self.fl_color,
self.b_color,
self.a_color,
)
except AttributeError:
pass
self.close()
def closeEvent(self, event):
self.closingPlugin.emit()
event.accept()<|fim▁end|> | "PisteCreator/graphical_visualisation/max_length", 50 |
<|file_name|>FIBTableAction.java<|end_file_name|><|fim▁begin|>/**
*
* Copyright (c) 2013-2014, Openflexo
* Copyright (c) 2011-2012, AgileBirds
*
* This file is part of Gina-core, a component of the software infrastructure
* developed at Openflexo.
*
*
* Openflexo is dual-licensed under the European Union Public License (EUPL, either
* version 1.1 of the License, or any later version ), which is available at
* https://joinup.ec.europa.eu/software/page/eupl/licence-eupl
* and the GNU General Public License (GPL, either version 3 of the License, or any
* later version), which is available at http://www.gnu.org/licenses/gpl.html .
*
* You can redistribute it and/or modify under the terms of either of these licenses
*
* If you choose to redistribute it and/or modify under the terms of the GNU GPL, you
* must include the following additional permission.
*
* Additional permission under GNU GPL version 3 section 7
*
* If you modify this Program, or any covered work, by linking or
* combining it with software containing parts covered by the terms
* of EPL 1.0, the licensors of this Program grant you additional permission
* to convey the resulting work. *
*
* This software is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE.
*
* See http://www.openflexo.org/license.html for details.
*
*
* Please contact Openflexo ([email protected])
* or visit www.openflexo.org if you need additional information.
*
*/
package org.openflexo.gina.model.widget;
import java.util.logging.Logger;
import org.openflexo.connie.BindingModel;
import org.openflexo.connie.DataBinding;
import org.openflexo.gina.model.FIBComponent.LocalizationEntryRetriever;
import org.openflexo.gina.model.FIBModelObject;
import org.openflexo.pamela.annotations.CloningStrategy;
import org.openflexo.pamela.annotations.CloningStrategy.StrategyType;
import org.openflexo.pamela.annotations.DefineValidationRule;
import org.openflexo.pamela.annotations.DeserializationFinalizer;
import org.openflexo.pamela.annotations.Getter;
import org.openflexo.pamela.annotations.ImplementationClass;
import org.openflexo.pamela.annotations.Import;
import org.openflexo.pamela.annotations.Imports;
import org.openflexo.pamela.annotations.ModelEntity;
import org.openflexo.pamela.annotations.PropertyIdentifier;
import org.openflexo.pamela.annotations.Setter;
import org.openflexo.pamela.annotations.XMLAttribute;
import org.openflexo.pamela.annotations.XMLElement;
@ModelEntity(isAbstract = true)
@ImplementationClass(FIBTableAction.FIBTableActionImpl.class)
@Imports({ @Import(FIBTableAction.FIBAddAction.class), @Import(FIBTableAction.FIBRemoveAction.class),
@Import(FIBTableAction.FIBCustomAction.class) })
public abstract interface FIBTableAction extends FIBModelObject {
public static enum ActionType {
Add, Delete, Custom
}
@PropertyIdentifier(type = FIBTable.class)
public static final String OWNER_KEY = "owner";
@PropertyIdentifier(type = DataBinding.class)
public static final String METHOD_KEY = "method";
@PropertyIdentifier(type = DataBinding.class)
public static final String IS_AVAILABLE_KEY = "isAvailable";
@PropertyIdentifier(type = Boolean.class)
public static final String ALLOWS_BATCH_EXECUTION_KEY = "allowsBatchExecution";
@Getter(value = OWNER_KEY /*, inverse = FIBTable.ACTIONS_KEY*/)
@CloningStrategy(StrategyType.IGNORE)
public FIBTable getOwner();
@Setter(OWNER_KEY)
public void setOwner(FIBTable table);
@Getter(value = METHOD_KEY)
@XMLAttribute
public DataBinding<Object> getMethod();
@Setter(METHOD_KEY)
public void setMethod(DataBinding<Object> method);
@Getter(value = IS_AVAILABLE_KEY)
@XMLAttribute
public DataBinding<Boolean> getIsAvailable();
@Setter(IS_AVAILABLE_KEY)
public void setIsAvailable(DataBinding<Boolean> isAvailable);
public abstract ActionType getActionType();
@DeserializationFinalizer
public void finalizeDeserialization();
public void searchLocalized(LocalizationEntryRetriever retriever);
@Getter(value = ALLOWS_BATCH_EXECUTION_KEY, defaultValue = "true")
@XMLAttribute
public boolean getAllowsBatchExecution();
@Setter(ALLOWS_BATCH_EXECUTION_KEY)<|fim▁hole|> private static final Logger logger = Logger.getLogger(FIBTableAction.class.getPackage().getName());
private DataBinding<Object> method;
private DataBinding<Boolean> isAvailable;
@Override
public FIBTable getComponent() {
return getOwner();
}
@Override
public void setOwner(FIBTable ownerTable) {
// BindingModel oldBindingModel = getBindingModel();
performSuperSetter(OWNER_KEY, ownerTable);
}
@Override
public DataBinding<Object> getMethod() {
if (method == null) {
method = new DataBinding<>(this, Object.class, DataBinding.BindingDefinitionType.EXECUTE);
method.setBindingName("method");
}
return method;
}
@Override
public void setMethod(DataBinding<Object> method) {
if (method != null) {
method.setOwner(this);
method.setDeclaredType(Object.class);
method.setBindingDefinitionType(DataBinding.BindingDefinitionType.EXECUTE);
method.setBindingName("method");
}
this.method = method;
}
@Override
public DataBinding<Boolean> getIsAvailable() {
if (isAvailable == null) {
isAvailable = new DataBinding<>(this, Boolean.class, DataBinding.BindingDefinitionType.GET);
isAvailable.setBindingName("isAvailable");
}
return isAvailable;
}
@Override
public void setIsAvailable(DataBinding<Boolean> isAvailable) {
if (isAvailable != null) {
isAvailable.setOwner(this);
isAvailable.setDeclaredType(Boolean.class);
isAvailable.setBindingDefinitionType(DataBinding.BindingDefinitionType.GET);
isAvailable.setBindingName("isAvailable");
}
this.isAvailable = isAvailable;
}
@Override
public BindingModel getBindingModel() {
if (getOwner() != null) {
return getOwner().getActionBindingModel();
}
return null;
}
@Override
public void finalizeDeserialization() {
logger.fine("finalizeDeserialization() for FIBTableAction " + getName());
if (method != null) {
method.decode();
}
}
@Override
public abstract ActionType getActionType();
@Override
public void searchLocalized(LocalizationEntryRetriever retriever) {
retriever.foundLocalized(getName());
}
@Override
public String getPresentationName() {
return getName();
}
}
@ModelEntity
@ImplementationClass(FIBAddAction.FIBAddActionImpl.class)
@XMLElement(xmlTag = "AddAction")
public static interface FIBAddAction extends FIBTableAction {
public static abstract class FIBAddActionImpl extends FIBTableActionImpl implements FIBAddAction {
@Override
public ActionType getActionType() {
return ActionType.Add;
}
}
}
@ModelEntity
@ImplementationClass(FIBRemoveAction.FIBRemoveActionImpl.class)
@XMLElement(xmlTag = "RemoveAction")
public static interface FIBRemoveAction extends FIBTableAction {
public static abstract class FIBRemoveActionImpl extends FIBTableActionImpl implements FIBRemoveAction {
@Override
public ActionType getActionType() {
return ActionType.Delete;
}
}
}
@ModelEntity
@ImplementationClass(FIBCustomAction.FIBCustomActionImpl.class)
@XMLElement(xmlTag = "CustomAction")
public static interface FIBCustomAction extends FIBTableAction {
@PropertyIdentifier(type = boolean.class)
public static final String IS_STATIC_KEY = "isStatic";
@Getter(value = IS_STATIC_KEY, defaultValue = "false")
@XMLAttribute
public boolean isStatic();
@Setter(IS_STATIC_KEY)
public void setStatic(boolean isStatic);
public static abstract class FIBCustomActionImpl extends FIBTableActionImpl implements FIBCustomAction {
@Override
public ActionType getActionType() {
return ActionType.Custom;
}
}
}
@DefineValidationRule
public static class MethodBindingMustBeValid extends BindingMustBeValid<FIBTableAction> {
public MethodBindingMustBeValid() {
super("'method'_binding_is_not_valid", FIBTableAction.class);
}
@Override
public DataBinding<?> getBinding(FIBTableAction object) {
return object.getMethod();
}
}
@DefineValidationRule
public static class IsAvailableBindingMustBeValid extends BindingMustBeValid<FIBTableAction> {
public IsAvailableBindingMustBeValid() {
super("'is_available'_binding_is_not_valid", FIBTableAction.class);
}
@Override
public DataBinding<?> getBinding(FIBTableAction object) {
return object.getIsAvailable();
}
}
}<|fim▁end|> | public void setAllowsBatchExecution(boolean allowsBatchExecution);
public static abstract class FIBTableActionImpl extends FIBModelObjectImpl implements FIBTableAction {
|
<|file_name|>XantoI2C.cpp<|end_file_name|><|fim▁begin|>#include "XantoI2C.h"
XantoI2C::XantoI2C(uint8_t clock_pin, uint8_t data_pin, uint16_t delay_time_us):
clock_pin(clock_pin), data_pin(data_pin), delay_time_us(delay_time_us) {
sdaHi();
sclHi();
}
<|fim▁hole|>void XantoI2C::sclHi() {
pinMode(clock_pin, INPUT_PULLUP);
i2cDelay();
}
void XantoI2C::sdaHi() {
pinMode(data_pin, INPUT_PULLUP);
i2cDelay();
}
void XantoI2C::sclLo() {
digitalWrite(clock_pin, LOW);
pinMode(clock_pin, OUTPUT);
i2cDelay();
}
void XantoI2C::sdaLo() {
digitalWrite(data_pin, LOW);
pinMode(data_pin, OUTPUT);
i2cDelay();
}
void XantoI2C::start() {
sdaHi();
sclHi();
i2cDelay();
sdaLo();
i2cDelay();
sclLo();
i2cDelay();
}
void XantoI2C::stop() {
sdaLo();
i2cDelay();
sclHi();
i2cDelay();
sdaHi();
i2cDelay();
}
void XantoI2C::clockPulse() {
sclHi();
i2cDelay();
sclLo();
}
void XantoI2C::writeByte(uint8_t data_byte) {
for (uint8_t i = 0; i < 8; i++) {
if (bitRead(data_byte, 7 - i)) {
sdaHi();
} else {
sdaLo();
}
clockPulse();
}
}
uint8_t XantoI2C::readBit() {
uint8_t out_bit;
sclHi();
out_bit = digitalRead(data_pin);
sclLo();
return out_bit;
}
uint8_t XantoI2C::readByte() {
uint8_t out_byte = 0;
sdaHi();
for (uint8_t i = 0; i < 8; i++) {
bitWrite(out_byte, 7 - i, readBit());
}
return out_byte;
}
/**
* Return 0 if ACK was received, else 1
*/
uint8_t XantoI2C::readAck() {
sdaHi();
return readBit() == 0 ? 0 : 1;
}
/**
* Return 0 if NACK was received, else 1
*/
uint8_t XantoI2C::readNack() {
sdaHi();
return readBit() == 1 ? 0 : 1;
}
/**
* Return 0 if all steps were executed, else 1
*/
uint8_t XantoI2C::doStartWriteAckStop(uint8_t data_byte) {
start();
writeByte(data_byte);
if (readAck()) {
return 1;
}
stop();
return 0;
}
/**
* Return 0 if all steps were executed, else 1
*/
uint8_t XantoI2C::doStartWriteAckStop(uint8_t data_bytes[], uint8_t data_length) {
start();
for (uint8_t i = 0; i < data_length; i++) {
writeByte(data_bytes[i]);
if (readAck()) {
return 1;
}
}
stop();
return 0;
}<|fim▁end|> | void XantoI2C::i2cDelay() {
delayMicroseconds(delay_time_us);
}
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod naive_copying;
mod generational;<|fim▁hole|><|fim▁end|> | pub use self::naive_copying::{GcState, FullGcArgs};
pub const INFO_FRESH_TAG: usize = 0;
pub const INFO_MARKED_TAG: usize = 1; |
<|file_name|>rich.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | //=require rich/base |
<|file_name|>balance.cpp<|end_file_name|><|fim▁begin|>// Binary-search solution for balance.<|fim▁hole|>// David Garcia Soriano.
#include <algorithm>
#include <cstdio>
#include <cmath>
using namespace std;
const int maxn = 50000;
const double eps = 1e-11, infinity = 1e300;
double px[maxn], py[maxn];
int n;
bool possible(double d, double& px1, double& px2) {
double x1 = -infinity, x2 = infinity;
for (int i = 0; i < n; ++i) {
if (d < py[i]) return false;
double p = sqrt(d * d - py[i] * py[i]),
a = px[i] - p, b = px[i] + p;
x1 = max(x1, a);
x2 = min(x2, b);
if (x1 > x2) return false;
}
px1 = x1; px2 = x2;
return true;
}
int main() {
while (scanf("%i", &n) == 1 && n > 0) {
double a, b = 0, x1, x2;
for (int i = 0; i < n; ++i) {
scanf("%lf%lf", &px[i], &py[i]);
if (py[i] < 0) py[i] = -py[i];
b = max(b, py[i]);
}
if (b == 0) {
if (n == 1) { printf("%.9lf %.9lf\n", px[0], .0); continue; }
b = 1;
}
while (!possible(b, x1, x2)) b *= 2;
a = b / 2;
while (possible(a, x1, x2)) a /= 2;
for (int i = 0; i < 100 && (b - a > eps || x2 - x1 > eps); ++i) {
double m = (a + b) / 2;
if (possible(m, x1, x2))
b = m;
else
a = m;
}
printf("%.9lf %.9lf\n", (x1 + x2) / 2, b);
}
return 0;
}<|fim▁end|> | // O(n * log (max coordinate / epsilon)) |
<|file_name|>test-whatwg-url-searchparams-set.js<|end_file_name|><|fim▁begin|>'use strict';
const common = require('../common');
const assert = require('assert');
const URLSearchParams = require('url').URLSearchParams;
const { test, assert_equals, assert_true } = require('../common/wpt');
/* The following tests are copied from WPT. Modifications to them should be
upstreamed first. Refs:
https://github.com/w3c/web-platform-tests/blob/8791bed/url/urlsearchparams-set.html
License: http://www.w3.org/Consortium/Legal/2008/04-testsuite-copyright.html
*/
/* eslint-disable */
test(function() {
var params = new URLSearchParams('a=b&c=d');
params.set('a', 'B');
assert_equals(params + '', 'a=B&c=d');
params = new URLSearchParams('a=b&c=d&a=e');
params.set('a', 'B');
assert_equals(params + '', 'a=B&c=d')
params.set('e', 'f');
assert_equals(params + '', 'a=B&c=d&e=f')
}, 'Set basics');
test(function() {
var params = new URLSearchParams('a=1&a=2&a=3');
assert_true(params.has('a'), 'Search params object has name "a"');
assert_equals(params.get('a'), '1', 'Search params object has name "a" with value "1"');
params.set('first', 4);
assert_true(params.has('a'), 'Search params object has name "a"');
assert_equals(params.get('a'), '1', 'Search params object has name "a" with value "1"');
params.set('a', 4);
assert_true(params.has('a'), 'Search params object has name "a"');
assert_equals(params.get('a'), '4', 'Search params object has name "a" with value "4"');
}, 'URLSearchParams.set');
/* eslint-enable */
// Tests below are not from WPT.
{
const params = new URLSearchParams();
assert.throws(() => {
params.set.call(undefined);
}, common.expectsError({
code: 'ERR_INVALID_THIS',
type: TypeError,
message: 'Value of "this" must be of type URLSearchParams'
}));<|fim▁hole|> }, common.expectsError({
code: 'ERR_MISSING_ARGS',
type: TypeError,
message: 'The "name" and "value" arguments must be specified'
}));
const obj = {
toString() { throw new Error('toString'); },
valueOf() { throw new Error('valueOf'); }
};
const sym = Symbol();
assert.throws(() => params.append(obj, 'b'), /^Error: toString$/);
assert.throws(() => params.append('a', obj), /^Error: toString$/);
assert.throws(() => params.append(sym, 'b'),
/^TypeError: Cannot convert a Symbol value to a string$/);
assert.throws(() => params.append('a', sym),
/^TypeError: Cannot convert a Symbol value to a string$/);
}<|fim▁end|> | assert.throws(() => {
params.set('a'); |
<|file_name|>events.rs<|end_file_name|><|fim▁begin|>//! (De)serializable types for the events in the [Matrix](https://matrix.org) specification.
//! These types are used by other Ruma crates.
//!
//! All data exchanged over Matrix is expressed as an event.
//! Different event types represent different actions, such as joining a room or sending a message.
//! Events are stored and transmitted as simple JSON structures.
//! While anyone can create a new event type for their own purposes, the Matrix specification
//! defines a number of event types which are considered core to the protocol, and Matrix clients
//! and servers must understand their semantics.
//! This module contains Rust types for each of the event types defined by the specification and
//! facilities for extending the event system for custom event types.
//!
//! # Event types
//!
//! This module includes a Rust enum called [`EventType`], which provides a simple enumeration of
//! all the event types defined by the Matrix specification. Matrix event types are serialized to
//! JSON strings in [reverse domain name
//! notation](https://en.wikipedia.org/wiki/Reverse_domain_name_notation), although the core event
//! types all use the special "m" TLD, e.g. `m.room.message`.
//!
//! # Core event types
//!
//! This module includes Rust types for every one of the event types in the Matrix specification.
//! To better organize the crate, these types live in separate modules with a hierarchy that
//! matches the reverse domain name notation of the event type.
//! For example, the `m.room.message` event lives at
//! `ruma_common::events::::room::message::MessageLikeEvent`. Each type's module also contains a
//! Rust type for that event type's `content` field, and any other supporting types required by the
//! event's other fields.
//!
//! # Extending Ruma with custom events
//!
//! For our examples we will start with a simple custom state event. `ruma_event`
//! specifies the state event's `type` and it's [`kind`](EventKind).
//!
//! ```rust
//! use ruma_common::events::macros::EventContent;
//! use serde::{Deserialize, Serialize};
//!
//! #[derive(Clone, Debug, Deserialize, Serialize, EventContent)]<|fim▁hole|>//! ```
//!
//! This can be used with events structs, such as passing it into
//! `ruma::api::client::state::send_state_event`'s `Request`.
//!
//! As a more advanced example we create a reaction message event. For this event we will use a
//! [`SyncMessageLikeEvent`] struct but any [`MessageLikeEvent`] struct would work.
//!
//! ```rust
//! use ruma_common::events::{macros::EventContent, SyncMessageLikeEvent};
//! use ruma_common::EventId;
//! use serde::{Deserialize, Serialize};
//!
//! #[derive(Clone, Debug, Deserialize, Serialize)]
//! #[serde(tag = "rel_type")]
//! pub enum RelatesTo {
//! #[serde(rename = "m.annotation")]
//! Annotation {
//! /// The event this reaction relates to.
//! event_id: Box<EventId>,
//! /// The displayable content of the reaction.
//! key: String,
//! },
//!
//! /// Since this event is not fully specified in the Matrix spec
//! /// it may change or types may be added, we are ready!
//! #[serde(rename = "m.whatever")]
//! Whatever,
//! }
//!
//! /// The payload for our reaction event.
//! #[derive(Clone, Debug, Deserialize, Serialize, EventContent)]
//! #[ruma_event(type = "m.reaction", kind = MessageLike)]
//! pub struct ReactionEventContent {
//! #[serde(rename = "m.relates_to")]
//! pub relates_to: RelatesTo,
//! }
//!
//! let json = serde_json::json!({
//! "content": {
//! "m.relates_to": {
//! "event_id": "$xxxx-xxxx",
//! "key": "👍",
//! "rel_type": "m.annotation"
//! }
//! },
//! "event_id": "$xxxx-xxxx",
//! "origin_server_ts": 1,
//! "sender": "@someone:example.org",
//! "type": "m.reaction",
//! "unsigned": {
//! "age": 85
//! }
//! });
//!
//! // The downside of this event is we cannot use it with event enums,
//! // but could be deserialized from a `Raw<_>` that has failed to deserialize.
//! matches::assert_matches!(
//! serde_json::from_value::<SyncMessageLikeEvent<ReactionEventContent>>(json),
//! Ok(SyncMessageLikeEvent {
//! content: ReactionEventContent {
//! relates_to: RelatesTo::Annotation { key, .. },
//! },
//! ..
//! }) if key == "👍"
//! );
//! ```
//!
//! # Serialization and deserialization
//!
//! All concrete event types in this module can be serialized via the `Serialize` trait from
//! [serde](https://serde.rs/) and can be deserialized from a `Raw<EventType>`. In order to
//! handle incoming data that may not conform to this module's strict definitions of event
//! structures, deserialization will return `Raw::Err` on error. This error covers both
//! structurally invalid JSON data as well as structurally valid JSON that doesn't fulfill
//! additional constraints the matrix specification defines for some event types. The error exposes
//! the deserialized `serde_json::Value` so that developers can still work with the received
//! event data. This makes it possible to deserialize a collection of events without the entire
//! collection failing to deserialize due to a single invalid event. The "content" type for each
//! event also implements `Serialize` and either `TryFromRaw` (enabling usage as
//! `Raw<ContentType>` for dedicated content types) or `Deserialize` (when the content is a
//! type alias), allowing content to be converted to and from JSON independently of the surrounding
//! event structure, if needed.
use ruma_serde::Raw;
use serde::{de::IgnoredAny, Deserialize, Serialize, Serializer};
use serde_json::value::RawValue as RawJsonValue;
use self::room::redaction::SyncRoomRedactionEvent;
use crate::{EventEncryptionAlgorithm, RoomVersionId};
// Needs to be public for trybuild tests
#[doc(hidden)]
pub mod _custom;
mod enums;
mod event_kinds;
mod unsigned;
/// Re-export of all the derives needed to create your own event types.
pub mod macros {
pub use ruma_macros::{Event, EventContent};
}
pub mod call;
pub mod direct;
pub mod dummy;
#[cfg(feature = "unstable-msc1767")]
pub mod emote;
#[cfg(feature = "unstable-msc3551")]
pub mod file;
pub mod forwarded_room_key;
pub mod fully_read;
pub mod ignored_user_list;
pub mod key;
#[cfg(feature = "unstable-msc1767")]
pub mod message;
#[cfg(feature = "unstable-msc1767")]
pub mod notice;
#[cfg(feature = "unstable-pdu")]
pub mod pdu;
pub mod policy;
pub mod presence;
pub mod push_rules;
#[cfg(feature = "unstable-msc2677")]
pub mod reaction;
pub mod receipt;
#[cfg(feature = "unstable-msc2675")]
pub mod relation;
pub mod room;
pub mod room_key;
pub mod room_key_request;
pub mod secret;
pub mod space;
pub mod sticker;
pub mod tag;
pub mod typing;
#[cfg(feature = "unstable-msc2675")]
pub use self::relation::Relations;
#[doc(hidden)]
#[cfg(feature = "compat")]
pub use self::unsigned::{RedactedUnsignedWithPrevContent, UnsignedWithPrevContent};
pub use self::{
enums::*,
event_kinds::*,
unsigned::{RedactedUnsigned, Unsigned},
};
/// The base trait that all event content types implement.
///
/// Implementing this trait allows content types to be serialized as well as deserialized.
pub trait EventContent: Sized + Serialize {
/// A matrix event identifier, like `m.room.message`.
fn event_type(&self) -> &str;
/// Constructs the given event content.
fn from_parts(event_type: &str, content: &RawJsonValue) -> serde_json::Result<Self>;
}
/// Trait to define the behavior of redacting an event.
pub trait Redact {
/// The redacted form of the event.
type Redacted;
/// Transforms `self` into a redacted form (removing most fields) according to the spec.
///
/// A small number of events have room-version specific redaction behavior, so a version has to
/// be specified.
fn redact(self, redaction: SyncRoomRedactionEvent, version: &RoomVersionId) -> Self::Redacted;
}
/// Trait to define the behavior of redact an event's content object.
pub trait RedactContent {
/// The redacted form of the event's content.
type Redacted;
/// Transform `self` into a redacted form (removing most or all fields) according to the spec.
///
/// A small number of events have room-version specific redaction behavior, so a version has to
/// be specified.
///
/// Where applicable, it is preferred to use [`Redact::redact`] on the outer event.
fn redact(self, version: &RoomVersionId) -> Self::Redacted;
}
/// Extension trait for [`Raw<_>`][ruma_serde::Raw].
pub trait RawExt<T: EventContent> {
/// Try to deserialize the JSON as an event's content.
fn deserialize_content(&self, event_type: &str) -> serde_json::Result<T>;
}
impl<T: EventContent> RawExt<T> for Raw<T> {
fn deserialize_content(&self, event_type: &str) -> serde_json::Result<T> {
T::from_parts(event_type, self.json())
}
}
/// Marker trait for the content of an ephemeral room event.
pub trait EphemeralRoomEventContent: EventContent {}
/// Marker trait for the content of a global account data event.
pub trait GlobalAccountDataEventContent: EventContent {}
/// Marker trait for the content of a room account data event.
pub trait RoomAccountDataEventContent: EventContent {}
/// Marker trait for the content of a to device event.
pub trait ToDeviceEventContent: EventContent {}
/// Marker trait for the content of a message-like event.
pub trait MessageLikeEventContent: EventContent {}
/// Marker trait for the content of a state event.
pub trait StateEventContent: EventContent {}
/// The base trait that all redacted event content types implement.
///
/// This trait's associated functions and methods should not be used to build
/// redacted events, prefer the `redact` method on `AnyStateEvent` and
/// `AnyMessageLikeEvent` and their "sync" and "stripped" counterparts. The
/// `RedactedEventContent` trait is an implementation detail, ruma makes no
/// API guarantees.
pub trait RedactedEventContent: EventContent {
/// Constructs the redacted event content.
///
/// If called for anything but "empty" redacted content this will error.
#[doc(hidden)]
fn empty(_event_type: &str) -> serde_json::Result<Self> {
Err(serde::de::Error::custom("this event is not redacted"))
}
/// Determines if the redacted event content needs to serialize fields.
#[doc(hidden)]
fn has_serialize_fields(&self) -> bool;
/// Determines if the redacted event content needs to deserialize fields.
#[doc(hidden)]
fn has_deserialize_fields() -> HasDeserializeFields;
}
/// Marker trait for the content of a redacted message-like event.
pub trait RedactedMessageLikeEventContent: RedactedEventContent {}
/// Marker trait for the content of a redacted state event.
pub trait RedactedStateEventContent: RedactedEventContent {}
/// Trait for abstracting over event content structs.
///
/// … but *not* enums which don't always have an event type and kind (e.g. message vs state) that's
/// fixed / known at compile time.
pub trait StaticEventContent: EventContent {
/// The event's "kind".
///
/// See the type's documentation.
const KIND: EventKind;
/// The event type.
const TYPE: &'static str;
}
/// The "kind" of an event.
///
/// This corresponds directly to the event content marker traits.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
pub enum EventKind {
/// Global account data event kind.
GlobalAccountData,
/// Room account data event kind.
RoomAccountData,
/// Ephemeral room event kind.
EphemeralRoomData,
/// Message-like event kind.
///
/// Since redacted / non-redacted message-like events are used in the same places but have
/// different sets of fields, these two variations are treated as two closely-related event
/// kinds.
MessageLike {
/// Redacted variation?
redacted: bool,
},
/// State event kind.
///
/// Since redacted / non-redacted state events are used in the same places but have different
/// sets of fields, these two variations are treated as two closely-related event kinds.
State {
/// Redacted variation?
redacted: bool,
},
/// To-device event kind.
ToDevice,
/// Presence event kind.
Presence,
/// Hierarchy space child kind.
HierarchySpaceChild,
}
/// `HasDeserializeFields` is used in the code generated by the `Event` derive
/// to aid in deserializing redacted events.
#[doc(hidden)]
#[derive(Debug)]
#[allow(clippy::exhaustive_enums)]
pub enum HasDeserializeFields {
/// Deserialize the event's content, failing if invalid.
True,
/// Return the redacted version of this event's content.
False,
/// `Optional` is used for `RedactedAliasesEventContent` since it has
/// an empty version and one with content left after redaction that
/// must be supported together.
Optional,
}
/// Helper struct to determine the event kind from a `serde_json::value::RawValue`.
#[doc(hidden)]
#[derive(Deserialize)]
#[allow(clippy::exhaustive_structs)]
pub struct EventTypeDeHelper<'a> {
#[serde(borrow, rename = "type")]
pub ev_type: std::borrow::Cow<'a, str>,
}
/// Helper struct to determine if an event has been redacted.
#[doc(hidden)]
#[derive(Deserialize)]
#[allow(clippy::exhaustive_structs)]
pub struct RedactionDeHelper {
/// Used to check whether redacted_because exists.
pub unsigned: Option<UnsignedDeHelper>,
}
#[doc(hidden)]
#[derive(Deserialize)]
#[allow(clippy::exhaustive_structs)]
pub struct UnsignedDeHelper {
/// This is the field that signals an event has been redacted.
pub redacted_because: Option<IgnoredAny>,
}
/// Helper function for erroring when trying to serialize an event enum _Custom variant that can
/// only be created by deserializing from an unknown event type.
#[doc(hidden)]
#[allow(clippy::ptr_arg)]
pub fn serialize_custom_event_error<T, S: Serializer>(_: &T, _: S) -> Result<S::Ok, S::Error> {
Err(serde::ser::Error::custom(
"Failed to serialize event [content] enum: Unknown event type.\n\
To send custom events, turn them into `Raw<EnumType>` by going through
`serde_json::value::to_raw_value` and `Raw::from_json`.",
))
}<|fim▁end|> | //! #[ruma_event(type = "org.example.event", kind = State)]
//! pub struct ExampleContent {
//! field: String,
//! } |
<|file_name|>fuzz_regex_match.rs<|end_file_name|><|fim▁begin|>#![no_main]
use libfuzzer_sys::fuzz_target;
fuzz_target!(|data: &[u8]| {
if data.len() < 2 {
return;
}
let split_point = data[0] as usize;
if let Ok(data) = std::str::from_utf8(&data[1..]) {
use std::cmp::max;
// split data into regular expression and actual input to search through
let len = data.chars().count();
let split_off_point = max(split_point, 1) % len as usize;
let char_index = data.char_indices().nth(split_off_point);
if let Some((char_index, _)) = char_index {
let (pattern, input) = data.split_at(char_index);
if let Ok(re) = regex::Regex::new(pattern) {<|fim▁hole|> }
});<|fim▁end|> | re.is_match(input);
}
} |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding:utf-8 -*-
from setuptools import setup
setup(
name='Earo',
version='0.1.0',<|fim▁hole|> author='Everley',
author_email='[email protected]',
description='A microframework based on EDA for business logic development.',
packages=['earo'],
package_data={'earo':['static/css/*.css', 'static/fonts/*', 'static/js/*.js', 'static/*.html']},
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'flask',
'enum',
'atomic',
]
)<|fim▁end|> | url='https://github.com/Everley1993/Laky-Earo',
license='Apache', |
<|file_name|>issue-37139.rs<|end_file_name|><|fim▁begin|>enum TestEnum {
Item(i32),
}
fn test(_: &mut i32) {
}
fn main() {
let mut x = TestEnum::Item(10);
match x {
TestEnum::Item(ref mut x) => {
test(&mut x); //~ ERROR cannot borrow `x` as mutable, as it is not declared as mutable
//~| HELP try removing `&mut` here<|fim▁hole|><|fim▁end|> | }
}
} |
<|file_name|>AddonManager_nl.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="1.1" language="nl" sourcelanguage="en">
<context>
<name>AddonInstaller</name>
<message>
<location filename="addonmanager_workers.py" line="535"/>
<source>Installed location</source>
<translation>Geïnstalleerde locatie</translation>
</message>
</context>
<context>
<name>AddonsInstaller</name>
<message>
<location filename="addonmanager_macro.py" line="157"/>
<source>Unable to fetch the code of this macro.</source>
<translation>De code van de macro kan niet worden opgehaald.</translation>
</message>
<message>
<location filename="addonmanager_macro.py" line="164"/>
<source>Unable to retrieve a description for this macro.</source>
<translation>De beschrijving van deze macro kan niet worden opgehaald.</translation>
</message>
<message>
<location filename="AddonManager.py" line="86"/>
<source>The addons that can be installed here are not officially part of FreeCAD, and are not reviewed by the FreeCAD team. Make sure you know what you are installing!</source>
<translation>De addons die geïnstalleerd kunnen worden maken niet officiëel onderdeel uit van FreeCAD en zijn niet gereviewed door het FreeCad team. Zorg ervoor dat je weet wat je installeert!</translation>
</message>
<message>
<location filename="AddonManager.py" line="199"/>
<source>Addon manager</source>
<translation>Uitbreidingsmanager</translation>
</message>
<message>
<location filename="AddonManager.py" line="204"/>
<source>You must restart FreeCAD for changes to take effect. Press Ok to restart FreeCAD now, or Cancel to restart later.</source>
<translation>Je moet FreeCAD opnieuw starten om de wijzigingen door te voeren. Klik Ok om FreeCAD nu opnieuw te starten, of Cancel om dit later te doen.</translation>
</message>
<message>
<location filename="AddonManager.py" line="243"/>
<source>Checking for updates...</source>
<translation>Zoeken naar updates...</translation>
</message>
<message>
<location filename="AddonManager.py" line="262"/>
<source>Apply</source>
<translation>Toepassen</translation>
</message>
<message>
<location filename="AddonManager.py" line="263"/>
<source>update(s)</source>
<translation>update(s)</translation>
</message>
<message>
<location filename="AddonManager.py" line="266"/>
<source>No update available</source>
<translation>Geen update beschikbaar</translation>
</message>
<message>
<location filename="AddonManager.py" line="433"/>
<source>Macro successfully installed. The macro is now available from the Macros dialog.</source>
<translation>Macro succesvol geïnstalleerd. De macro is nu beschikbaar via het Macros-venster.</translation>
</message>
<message>
<location filename="AddonManager.py" line="435"/>
<source>Unable to install</source>
<translation>Installeren niet mogelijk</translation>
</message>
<message>
<location filename="AddonManager.py" line="494"/>
<source>Addon successfully removed. Please restart FreeCAD</source>
<translation>Addon succesvol verwijderd. Herstart aub FreeCAD</translation>
</message>
<message>
<location filename="AddonManager.py" line="496"/>
<source>Unable to remove this addon</source>
<translation>Deze addon kan niet worden verwijderd</translation>
</message>
<message>
<location filename="AddonManager.py" line="502"/>
<source>Macro successfully removed.</source>
<translation>Macro succesvol verwijderd.</translation>
</message>
<message>
<location filename="AddonManager.py" line="504"/>
<source>Macro could not be removed.</source>
<translation>Macro kon niet worden verwijderd.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="167"/>
<source>Unable to download addon list.</source>
<translation>Addon lijst kan niet worden opgehaald.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="172"/>
<source>Workbenches list was updated.</source>
<translation>Werkbankenlijst is bijgewerkt.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="738"/>
<source>Outdated GitPython detected, consider upgrading with pip.</source>
<translation>Verouderde GitPython gevonden, overweeg een upgrade met pip.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="296"/>
<source>List of macros successfully retrieved.</source>
<translation>Lijst van macro's succesvol opgehaald.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="651"/>
<source>Retrieving description...</source>
<translation>Omschrijving ophalen...</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="391"/>
<source>Retrieving info from</source>
<translation>Informatie ophalen vanaf</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="533"/>
<source>An update is available for this addon.</source>
<translation>Er is een update beschikbaar voor deze addon.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="521"/>
<source>This addon is already installed.</source>
<translation>Deze addon is al geïnstalleerd.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="653"/>
<source>Retrieving info from git</source>
<translation>Informatie ophalen van git</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="656"/>
<source>Retrieving info from wiki</source>
<translation>Informatie ophalen van wiki</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="700"/>
<source>GitPython not found. Using standard download instead.</source>
<translation>GitPython niet gevonden. Standaard download wordt nu gebruikt.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="705"/>
<source>Your version of python doesn't appear to support ZIP files. Unable to proceed.</source>
<translation>Je versie van Python lijkt geen ZIP-bestanden te ondersteunen. Verder gaan niet mogelijk.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="786"/>
<source>Workbench successfully installed. Please restart FreeCAD to apply the changes.</source>
<translation>Werkbank succesvol geïnstalleerd. Herstart FreeCAD om de wijzigingen toe te passen.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="835"/>
<source>Missing workbench</source>
<translation>Werkbank ontbreekt</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="844"/>
<source>Missing python module</source>
<translation>Ontbrekende python module</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="854"/>
<source>Missing optional python module (doesn't prevent installing)</source>
<translation>Ontbrekende optionele python module (voorkomt niet het installeren)</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="857"/>
<source>Some errors were found that prevent to install this workbench</source>
<translation>Er zijn enkele fouten gevonden die voorkomen dat deze werkbank wordt geïnstalleerd</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="859"/>
<source>Please install the missing components first.</source>
<translation>Installeer eerst de ontbrekende componenten.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="880"/>
<source>Error: Unable to download</source>
<translation>Fout: Kan niet downloaden</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="893"/>
<source>Successfully installed</source>
<translation>Succesvol geïnstalleerd</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="310"/>
<source>GitPython not installed! Cannot retrieve macros from git</source>
<translation>GitPython niet geïnstalleerd! Macro's kunnen niet worden opgehaald van git</translation>
</message>
<message>
<location filename="AddonManager.py" line="567"/>
<source>Installed</source>
<translation>Geïnstalleerd</translation>
</message>
<message>
<location filename="AddonManager.py" line="586"/>
<source>Update available</source>
<translation>Update beschikbaar</translation>
</message>
<message>
<location filename="AddonManager.py" line="542"/>
<source>Restart required</source>
<translation>Opnieuw opstarten vereist</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="665"/>
<source>This macro is already installed.</source>
<translation>Deze macro is al geïnstalleerd.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="799"/>
<source>A macro has been installed and is available under Macro -> Macros menu</source>
<translation>Een macro is geïnstalleerd en is beschikbaar onder Macro -> Macro' s menu</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="547"/>
<source>This addon is marked as obsolete</source>
<translation>Deze uitbreiding is gemarkeerd als verouderd</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="551"/>
<source>This usually means it is no longer maintained, and some more advanced addon in this list provides the same functionality.</source>
<translation>Dit betekent gewoonlijk dat het niet meer onderhouden wordt, en een geavanceerdere uitbreidingen in deze lijst biedt dezelfde functionaliteit.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="873"/>
<source>Error: Unable to locate zip from</source>
<translation>Fout: Kan zip niet vinden vanuit</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="319"/>
<source>Something went wrong with the Git Macro Retrieval, possibly the Git executable is not in the path</source>
<translation>Er is iets misgegaan met het ophalen van de Git Macro. Mogelijk bevindt het uitvoerbare Git-bestand zich niet in het pad</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="559"/>
<source>This addon is marked as Python 2 Only</source>
<translation>Deze toevoeging is alleen geschikt voor Python 2</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="564"/>
<source>This workbench may no longer be maintained and installing it on a Python 3 system will more than likely result in errors at startup or while in use.</source>
<translation>Deze werkbank wordt wellicht niet langer onderhouden en de installatie ervan op een Python 3-systeem zal hoogstwaarschijnlijk leiden tot fouten bij het opstarten of tijdens het gebruik.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="727"/>
<source>User requested updating a Python 2 workbench on a system running Python 3 - </source>
<translation>De gebruiker verzocht om een Python 2 werkbank bij te werken die alleen geschikt is voor Python 3 </translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="763"/>
<source>Workbench successfully updated. Please restart FreeCAD to apply the changes.</source>
<translation>Werkbank succesvol bijgewerkt. Herstart FreeCAD om de wijzigingen toe te passen.</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="771"/>
<source>User requested installing a Python 2 workbench on a system running Python 3 - </source>
<translation>De gebruiker verzocht om een Python 2 werkbank te installeren op een systeem dat Python 3 draait </translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="343"/>
<source>Appears to be an issue connecting to the Wiki, therefore cannot retrieve Wiki macro list at this time</source>
<translation>Er lijkt een probleem te zijn met de verbinding met de Wiki, daarom kan de Wiki-macrolijst op dit moment niet worden opgehaald</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="433"/>
<source>Raw markdown displayed</source>
<translation>Ongeformateerde tekst weergegeven</translation>
</message>
<message>
<location filename="addonmanager_workers.py" line="435"/>
<source>Python Markdown library is missing.</source>
<translation>Python Markdown bibliotheek mist.</translation>
</message>
</context>
<context>
<name>Dialog</name>
<message>
<location filename="AddonManager.ui" line="37"/>
<source>Workbenches</source>
<translation>Werkbanken</translation>
</message>
<message>
<location filename="AddonManager.ui" line="47"/>
<source>Macros</source>
<translation>Macro's</translation>
</message>
<message>
<location filename="AddonManager.ui" line="59"/>
<source>Execute</source>
<translation>Uitvoeren</translation>
</message>
<message>
<location filename="AddonManager.ui" line="113"/>
<source>Downloading info...</source>
<translation>Info downloaden...</translation>
</message>
<message>
<location filename="AddonManager.ui" line="150"/>
<source>Update all</source>
<translation>Alles bijwerken</translation>
</message>
<message>
<location filename="AddonManager.ui" line="56"/>
<source>Executes the selected macro, if installed</source>
<translation>Voert de geselecteerde macro uit, indien geïnstalleerd</translation>
</message>
<message>
<location filename="AddonManager.ui" line="127"/>
<source>Uninstalls a selected macro or workbench</source>
<translation>Verwijder een geselecteerde macro of werkbank</translation>
</message>
<message>
<location filename="AddonManager.ui" line="137"/>
<source>Installs or updates the selected macro or workbench</source>
<translation>Installeert of de geselecteerde macro of werkbank, of werkt deze bij</translation>
</message>
<message>
<location filename="AddonManager.ui" line="147"/>
<source>Download and apply all available updates</source>
<translation>Download en pas alle beschikbare updates toe</translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="35"/>
<source>Custom repositories (one per line):</source>
<translation>Aangepaste opslagplaatsen (één per lijn):</translation>
</message>
<message>
<location filename="AddonManager.ui" line="89"/>
<source>Sets configuration options for the Addon Manager</source>
<translation>Stelt de configuratieopties voor de uitbreidingsmanager in</translation>
</message>
<message>
<location filename="AddonManager.ui" line="92"/><|fim▁hole|> <message>
<location filename="AddonManagerOptions.ui" line="14"/>
<source>Addon manager options</source>
<translation>Opties voor de uitbreidingsmanager</translation>
</message>
<message>
<location filename="AddonManager.ui" line="130"/>
<source>Uninstall selected</source>
<translation>Verwijder geselecteerde</translation>
</message>
<message>
<location filename="AddonManager.ui" line="140"/>
<source>Install/update selected</source>
<translation>Geselecteerde installeren/bijwerken</translation>
</message>
<message>
<location filename="AddonManager.ui" line="160"/>
<source>Close</source>
<translation>Sluiten</translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="20"/>
<source>If this option is selected, when launching the Addon Manager,
installed addons will be checked for available updates
(this requires the GitPython package installed on your system)</source>
<translation>Als deze optie geselecteerd is, zullen bij het starten van de uitbreidingsmanager
geïnstalleerde uitbreidingen gecontroleerd worden op beschikbare updates
(dit vereist het GitPython-pakket dat op uw systeem geïnstalleerd is)</translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="25"/>
<source>Automatically check for updates at start (requires GitPython)</source>
<translation>Controleer automatisch op updates bij het opstarten (vereist GitPython)</translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="57"/>
<source>Proxy </source>
<translation>Proxy </translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="64"/>
<source>No proxy</source>
<translation>Geen proxy</translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="71"/>
<source>User system proxy</source>
<translation>Proxy van gebruikerssysteem</translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="78"/>
<source>User defined proxy :</source>
<translation>Gebruikergedefinieerde proxy :</translation>
</message>
<message>
<location filename="AddonManager.ui" line="14"/>
<source>Addon Manager</source>
<translation>Uitbreidingsmanager</translation>
</message>
<message>
<location filename="AddonManager.ui" line="157"/>
<source>Close the Addon Manager</source>
<translation>Sluit uitbreidingsmanager</translation>
</message>
<message>
<location filename="AddonManagerOptions.ui" line="42"/>
<source>You can use this window to specify additional addon repositories
to be scanned for available addons</source>
<translation>U kunt dit venster gebruiken om extra add-on-opslagplaatsen op te geven
die gescand moeten worden voor beschikbare add-ons</translation>
</message>
</context>
<context>
<name>Std_AddonMgr</name>
<message>
<location filename="AddonManager.py" line="68"/>
<source>&Addon manager</source>
<translation>&Uitbreidingsmanager</translation>
</message>
<message>
<location filename="AddonManager.py" line="69"/>
<source>Manage external workbenches and macros</source>
<translation>Externe werkbanken en macro's beheren</translation>
</message>
</context>
</TS><|fim▁end|> | <source>Configure...</source>
<translation>Configureer...</translation>
</message> |
<|file_name|>metadata.rs<|end_file_name|><|fim▁begin|>use crate::{error::{Error,
Result},
package::PackageIdent};
use serde_derive::Serialize;
use std::{self,
collections::BTreeMap,
env,
fmt,
fs::File,
io::Read,
iter::{FromIterator,
IntoIterator},
path::{Path,
PathBuf},
str::FromStr,
string::ToString,
vec::IntoIter};
#[cfg(not(windows))]
const ENV_PATH_SEPARATOR: char = ':';
#[cfg(windows)]
const ENV_PATH_SEPARATOR: char = ';';
pub fn parse_key_value(s: &str) -> Result<BTreeMap<String, String>> {
Ok(BTreeMap::from_iter(s.lines()
.map(|l| l.splitn(2, '=').collect::<Vec<_>>())
.map(|kv| {
(kv[0].to_string(), kv[1].to_string())
})))
}
#[derive(Clone, Debug, Serialize)]
pub struct Bind {
pub service: String,
pub exports: Vec<String>,
}
impl FromStr for Bind {
type Err = Error;
fn from_str(line: &str) -> Result<Self> {
let mut parts = line.split('=');
let service = match parts.next() {
None => return Err(Error::MetaFileBadBind),
Some(service) => service.to_string(),
};
let exports = match parts.next() {
None => return Err(Error::MetaFileBadBind),
Some(exports) => exports.split_whitespace().map(str::to_string).collect(),
};
Ok(Bind { service, exports })
}
}
impl fmt::Display for Bind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_exports = self.exports.join(" ");
write!(f, "[{}]={}", self.service, formatted_exports)
}
}
/// Describes a bind mapping in a composite package.
#[derive(Debug, PartialEq)]
pub struct BindMapping {
/// The name of the bind of a given service.
pub bind_name: String,
/// The identifier of the service within the composite package
/// that should satisfy the named bind.
pub satisfying_service: PackageIdent,
}
impl FromStr for BindMapping {
type Err = Error;
fn from_str(line: &str) -> Result<Self> {
let mut parts = line.split(':');
let bind_name = parts.next()
.map(ToString::to_string)
.ok_or(Error::MetaFileBadBind)?;
let satisfying_service = match parts.next() {
None => return Err(Error::MetaFileBadBind),
Some(satisfying_service) => satisfying_service.parse()?,
};
Ok(BindMapping { bind_name,
satisfying_service })
}
}
#[derive(Debug, PartialEq)]
pub struct EnvVar {
pub key: String,
pub value: String,
pub separator: Option<char>,
}
#[derive(Debug)]
pub struct PkgEnv {
inner: Vec<EnvVar>,
}
impl PkgEnv {
pub fn new(values: BTreeMap<String, String>, separators: &BTreeMap<String, String>) -> Self {
Self { inner: values.into_iter()
.map(|(key, value)| {
if let Some(sep) = separators.get(&key) {
EnvVar { key,
value,
separator: sep.to_owned().pop() }
} else {
EnvVar { key,
value,
separator: None }
}
})
.collect(), }
}
pub fn from_paths(paths: &[PathBuf]) -> Self {
let p = env::join_paths(paths).expect("Failed to build path string");
Self { inner: vec![EnvVar { key: "PATH".to_string(),
value: p.into_string()
.expect("Failed to convert path to utf8 string"),
separator: Some(ENV_PATH_SEPARATOR), }], }
}
pub fn is_empty(&self) -> bool { self.inner.is_empty() }
}
impl IntoIterator for PkgEnv {
type IntoIter = IntoIter<EnvVar>;
type Item = EnvVar;
fn into_iter(self) -> Self::IntoIter { self.inner.into_iter() }
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum MetaFile {
BindMap, // Composite-only
Binds,
BindsOptional,
BuildDeps,
BuildTDeps,
CFlags,
Config,
Deps,
Environment,
EnvironmentSep,
Exports,
Exposes,
Ident,
LdFlags,
LdRunPath,
Manifest,
Path,
ResolvedServices, // Composite-only
RuntimeEnvironment,
RuntimeEnvironmentPaths,
RuntimePath,
ShutdownSignal,
ShutdownTimeout,
Services, // Composite-only
SvcGroup,
SvcUser,
Target,
TDeps,
Type,
}
impl fmt::Display for MetaFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let id = match *self {
MetaFile::BindMap => "BIND_MAP",
MetaFile::Binds => "BINDS",
MetaFile::BindsOptional => "BINDS_OPTIONAL",
MetaFile::BuildDeps => "BUILD_DEPS",
MetaFile::BuildTDeps => "BUILD_TDEPS",
MetaFile::CFlags => "CFLAGS",
MetaFile::Config => "default.toml",
MetaFile::Deps => "DEPS",
MetaFile::Environment => "ENVIRONMENT",
MetaFile::EnvironmentSep => "ENVIRONMENT_SEP",
MetaFile::Exports => "EXPORTS",
MetaFile::Exposes => "EXPOSES",
MetaFile::Ident => "IDENT",
MetaFile::LdFlags => "LDFLAGS",
MetaFile::LdRunPath => "LD_RUN_PATH",
MetaFile::Manifest => "MANIFEST",
MetaFile::Path => "PATH",
MetaFile::ResolvedServices => "RESOLVED_SERVICES",
MetaFile::RuntimeEnvironment => "RUNTIME_ENVIRONMENT",
MetaFile::RuntimeEnvironmentPaths => "RUNTIME_ENVIRONMENT_PATHS",
MetaFile::RuntimePath => "RUNTIME_PATH",
MetaFile::Services => "SERVICES",
MetaFile::ShutdownSignal => "SHUTDOWN_SIGNAL",
MetaFile::ShutdownTimeout => "SHUTDOWN_TIMEOUT",
MetaFile::SvcGroup => "SVC_GROUP",
MetaFile::SvcUser => "SVC_USER",
MetaFile::Target => "TARGET",
MetaFile::TDeps => "TDEPS",
MetaFile::Type => "TYPE",
};
write!(f, "{}", id)
}
}
/// Read a metadata file from within a package directory if it exists
///
/// Returns the contents of the file
pub fn read_metafile<P: AsRef<Path>>(installed_path: P, file: MetaFile) -> Result<String> {
match existing_metafile(installed_path, file) {
Some(filepath) => {
match File::open(&filepath) {
Ok(mut f) => {
let mut data = String::new();
if f.read_to_string(&mut data).is_err() {
return Err(Error::MetaFileMalformed(file));
}
Ok(data.trim().to_string())
}
Err(e) => Err(Error::MetaFileIO(e)),
}
}
None => Err(Error::MetaFileNotFound(file)),
}
}
/// Returns the path to a specified MetaFile in an installed path if it exists.
///
/// Useful for fallback logic for dealing with older Habitat packages.
fn existing_metafile<P: AsRef<Path>>(installed_path: P, file: MetaFile) -> Option<PathBuf> {
let filepath = installed_path.as_ref().join(file.to_string());
match std::fs::metadata(&filepath) {
Ok(_) => Some(filepath),
Err(_) => None,
}
}
pub enum PackageType {
Standalone,
Composite,
}
impl fmt::Display for PackageType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let id = match *self {
PackageType::Standalone => "Standalone",
PackageType::Composite => "Composite",
};
write!(f, "{}", id)
}
}
impl FromStr for PackageType {
type Err = Error;
fn from_str(value: &str) -> Result<Self> {
match value {
"standalone" => Ok(PackageType::Standalone),
"composite" => Ok(PackageType::Composite),
_ => Err(Error::InvalidPackageType(value.to_string())),
}
}<|fim▁hole|>mod test {
use super::*;
use std::io::Write;
use tempfile::Builder;
static ENVIRONMENT: &str = r#"PATH=/hab/pkgs/python/setuptools/35.0.1/20170424072606/bin
PYTHONPATH=/hab/pkgs/python/setuptools/35.0.1/20170424072606/lib/python3.6/site-packages
"#;
static ENVIRONMENT_SEP: &str = r#"PATH=:
PYTHONPATH=:
"#;
static EXPORTS: &str = r#"status-port=status.port
port=front-end.port
"#;
static PATH: &str = "/hab/pkgs/python/setuptools/35.0.1/20170424072606/bin";
/// Write the given contents into the specified metadata file for
/// the package.
fn write_metafile(install_dir: &Path, metafile: MetaFile, content: &str) {
let path = install_dir.join(metafile.to_string());
let mut f = File::create(path).expect("Could not create metafile");
f.write_all(content.as_bytes())
.expect("Could not write metafile contents");
}
#[test]
#[should_panic]
fn malformed_file() { parse_key_value(&"PATH").unwrap(); }
#[test]
fn can_parse_environment_file() {
let mut m: BTreeMap<String, String> = BTreeMap::new();
m.insert("PATH".to_string(),
"/hab/pkgs/python/setuptools/35.0.1/20170424072606/bin".to_string());
m.insert(
"PYTHONPATH".to_string(),
"/hab/pkgs/python/setuptools/35.0.1/20170424072606/lib/python3.6/site-packages"
.to_string(),
);
assert_eq!(parse_key_value(&ENVIRONMENT).unwrap(), m);
}
#[test]
fn can_parse_environment_sep_file() {
let mut m: BTreeMap<String, String> = BTreeMap::new();
m.insert("PATH".to_string(), ":".to_string());
m.insert("PYTHONPATH".to_string(), ":".to_string());
assert_eq!(parse_key_value(&ENVIRONMENT_SEP).unwrap(), m);
}
#[test]
fn can_parse_exports_file() {
let mut m: BTreeMap<String, String> = BTreeMap::new();
m.insert("status-port".to_string(), "status.port".to_string());
m.insert("port".to_string(), "front-end.port".to_string());
assert_eq!(parse_key_value(&EXPORTS).unwrap(), m);
}
#[test]
fn build_pkg_env() {
let mut result =
PkgEnv::new(parse_key_value(&ENVIRONMENT).unwrap(),
&parse_key_value(&ENVIRONMENT_SEP).unwrap()).into_iter()
.collect::<Vec<_>>();
// Sort the result by key, so we have a guarantee of order
result.sort_by_key(|v| v.key.to_owned());
let expected =
vec![EnvVar { key: "PATH".to_string(),
value:
"/hab/pkgs/python/setuptools/35.0.1/20170424072606/bin".to_string(),
separator: Some(':'), },
EnvVar { key: "PYTHONPATH".to_string(),
value: "/hab/pkgs/python/setuptools/35.0.1/20170424072606/lib/\
python3.6/site-packages"
.to_string(),
separator: Some(':'), },];
assert_eq!(result, expected);
}
#[test]
fn build_pkg_env_is_empty() {
let result = PkgEnv::new(BTreeMap::new(), &BTreeMap::new());
assert!(result.is_empty());
}
#[test]
fn build_pkg_env_from_path() {
let result = PkgEnv::from_paths(&[PathBuf::from(PATH)]).into_iter()
.collect::<Vec<_>>();
let expected = vec![EnvVar { key: "PATH".to_string(),
value: "/hab/pkgs/python/setuptools/35.0.1/\
20170424072606/bin"
.to_string(),
separator: Some(ENV_PATH_SEPARATOR), }];
assert_eq!(result, expected);
}
#[test]
fn can_parse_a_valid_bind_mapping() {
let input = "my_bind:core/test";
let output: BindMapping = input.parse().unwrap();
assert_eq!(output.bind_name, "my_bind");
assert_eq!(output.satisfying_service,
PackageIdent::from_str("core/test").unwrap());
}
#[test]
fn fails_to_parse_a_bind_mapping_with_an_invalid_service_identifier() {
let input = "my_bind:this-is-a-bad-identifier";
let output = input.parse::<BindMapping>();
assert!(output.is_err());
}
#[test]
fn can_read_metafile() {
let pkg_root = Builder::new().prefix("pkg-root").tempdir().unwrap();
let install_dir = pkg_root.path();
let expected = "core/foo=db:core/database";
write_metafile(install_dir, MetaFile::Binds, expected);
let bind_map = read_metafile(install_dir, MetaFile::Binds).unwrap();
assert_eq!(expected, bind_map);
}
#[test]
fn reading_a_non_existing_metafile_is_an_error() {
let pkg_root = Builder::new().prefix("pkg-root").tempdir().unwrap();
let install_dir = pkg_root.path();
let bind_map = read_metafile(install_dir, MetaFile::Binds);
assert!(bind_map.is_err());
}
}<|fim▁end|> | }
#[cfg(test)] |
<|file_name|>bluetooth.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use bluetooth_blacklist::{Blacklist, uuid_is_blacklisted};
use core::clone::Clone;
use dom::bindings::codegen::Bindings::BluetoothBinding;
use dom::bindings::codegen::Bindings::BluetoothBinding::RequestDeviceOptions;
use dom::bindings::codegen::Bindings::BluetoothBinding::{BluetoothScanFilter, BluetoothMethods};
use dom::bindings::error::Error::{self, Security, Type};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::reflector::{Reflectable, Reflector, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::bluetoothadvertisingdata::BluetoothAdvertisingData;
use dom::bluetoothdevice::BluetoothDevice;
use dom::bluetoothuuid::BluetoothUUID;
use ipc_channel::ipc::{self, IpcSender};
use net_traits::bluetooth_scanfilter::{BluetoothScanfilter, BluetoothScanfilterSequence};
use net_traits::bluetooth_scanfilter::{RequestDeviceoptions, ServiceUUIDSequence};
use net_traits::bluetooth_thread::{BluetoothError, BluetoothMethodMsg};
const FILTER_EMPTY_ERROR: &'static str = "'filters' member must be non - empty to find any devices.";
const FILTER_ERROR: &'static str = "A filter must restrict the devices in some way.";
const FILTER_NAME_TOO_LONG_ERROR: &'static str = "A 'name' or 'namePrefix' can't be longer then 29 bytes.";
// 248 is the maximum number of UTF-8 code units in a Bluetooth Device Name.
const MAX_DEVICE_NAME_LENGTH: usize = 248;
// A device name can never be longer than 29 bytes.
// An advertising packet is at most 31 bytes long.
// The length and identifier of the length field take 2 bytes.
// That leaves 29 bytes for the name.
const MAX_FILTER_NAME_LENGTH: usize = 29;
const NAME_PREFIX_ERROR: &'static str = "'namePrefix', if present, must be non - empty.";
const NAME_TOO_LONG_ERROR: &'static str = "A device name can't be longer than 248 bytes.";
const SERVICE_ERROR: &'static str = "'services', if present, must contain at least one service.";
// https://webbluetoothcg.github.io/web-bluetooth/#bluetooth
#[dom_struct]
pub struct Bluetooth {
reflector_: Reflector,
}
impl Bluetooth {
pub fn new_inherited() -> Bluetooth {
Bluetooth {
reflector_: Reflector::new(),
}
}
pub fn new(global: GlobalRef) -> Root<Bluetooth> {
reflect_dom_object(box Bluetooth::new_inherited(),
global,
BluetoothBinding::Wrap)
}
fn get_bluetooth_thread(&self) -> IpcSender<BluetoothMethodMsg> {
let global_root = self.global();
let global_ref = global_root.r();
global_ref.as_window().bluetooth_thread()
}
}
fn canonicalize_filter(filter: &BluetoothScanFilter, global: GlobalRef) -> Fallible<BluetoothScanfilter> {
if filter.services.is_none() && filter.name.is_none() && filter.namePrefix.is_none() {
return Err(Type(FILTER_ERROR.to_owned()));
}
let mut services_vec = vec!();
if let Some(ref services) = filter.services {
if services.is_empty() {
return Err(Type(SERVICE_ERROR.to_owned()));
}
for service in services {
let uuid = try!(BluetoothUUID::GetService(global, service.clone())).to_string();
if uuid_is_blacklisted(uuid.as_ref(), Blacklist::All) {
return Err(Security)
}
services_vec.push(uuid);
}
}
let mut name = String::new();
if let Some(ref filter_name) = filter.name {
//NOTE: DOMString::len() gives back the size in bytes
if filter_name.len() > MAX_DEVICE_NAME_LENGTH {
return Err(Type(NAME_TOO_LONG_ERROR.to_owned()));
}
if filter_name.len() > MAX_FILTER_NAME_LENGTH {
return Err(Type(FILTER_NAME_TOO_LONG_ERROR.to_owned()));
}
name = filter_name.to_string();
}
let mut name_prefix = String::new();
if let Some(ref filter_name_prefix) = filter.namePrefix {
if filter_name_prefix.is_empty() {
return Err(Type(NAME_PREFIX_ERROR.to_owned()));
}
if filter_name_prefix.len() > MAX_DEVICE_NAME_LENGTH {
return Err(Type(NAME_TOO_LONG_ERROR.to_owned()));
}
if filter_name_prefix.len() > MAX_FILTER_NAME_LENGTH {
return Err(Type(FILTER_NAME_TOO_LONG_ERROR.to_owned()));<|fim▁hole|> }
name_prefix = filter_name_prefix.to_string();
}
Ok(BluetoothScanfilter::new(name, name_prefix, services_vec))
}
fn convert_request_device_options(options: &RequestDeviceOptions,
global: GlobalRef)
-> Fallible<RequestDeviceoptions> {
if options.filters.is_empty() {
return Err(Type(FILTER_EMPTY_ERROR.to_owned()));
}
let mut filters = vec!();
for filter in &options.filters {
filters.push(try!(canonicalize_filter(&filter, global)));
}
let mut optional_services = vec!();
if let Some(ref opt_services) = options.optionalServices {
for opt_service in opt_services {
let uuid = try!(BluetoothUUID::GetService(global, opt_service.clone())).to_string();
if !uuid_is_blacklisted(uuid.as_ref(), Blacklist::All) {
optional_services.push(uuid);
}
}
}
Ok(RequestDeviceoptions::new(BluetoothScanfilterSequence::new(filters),
ServiceUUIDSequence::new(optional_services)))
}
impl From<BluetoothError> for Error {
fn from(error: BluetoothError) -> Self {
match error {
BluetoothError::Type(message) => Error::Type(message),
BluetoothError::Network => Error::Network,
BluetoothError::NotFound => Error::NotFound,
BluetoothError::NotSupported => Error::NotSupported,
BluetoothError::Security => Error::Security,
}
}
}
impl BluetoothMethods for Bluetooth {
// https://webbluetoothcg.github.io/web-bluetooth/#dom-bluetooth-requestdevice
fn RequestDevice(&self, option: &RequestDeviceOptions) -> Fallible<Root<BluetoothDevice>> {
let (sender, receiver) = ipc::channel().unwrap();
let option = try!(convert_request_device_options(option, self.global().r()));
self.get_bluetooth_thread().send(BluetoothMethodMsg::RequestDevice(option, sender)).unwrap();
let device = receiver.recv().unwrap();
match device {
Ok(device) => {
let ad_data = BluetoothAdvertisingData::new(self.global().r(),
device.appearance,
device.tx_power,
device.rssi);
Ok(BluetoothDevice::new(self.global().r(),
DOMString::from(device.id),
device.name.map(DOMString::from),
&ad_data))
},
Err(error) => {
Err(Error::from(error))
},
}
}
}<|fim▁end|> | |
<|file_name|>entity.cpp<|end_file_name|><|fim▁begin|>//! @file
//! @copyright See <a href="LICENSE.txt">LICENSE.txt</a>.
#pragma once
#include "entity.hpp"
namespace ql {
//! Makes @p entity_id an entity: a being or object that can exist in the world.
auto make_entity(reg& reg, id entity_id, location location) -> id {
reg.assign<ql::location>(entity_id, location);<|fim▁hole|>}<|fim▁end|> | return entity_id;
} |
<|file_name|>variables_b.js<|end_file_name|><|fim▁begin|><|fim▁hole|>var searchData=
[
['own_5faddress',['own_address',['../classRiots__BabyRadio.html#a0e4c8a64bf259ce380431879871bf872',1,'Riots_BabyRadio::own_address()'],['../classRiots__MamaRadio.html#ae1e3a65b5c9768df2a514fb984fe6a8c',1,'Riots_MamaRadio::own_address()']]],
['own_5fconfig_5fmessage',['own_config_message',['../classRiots__MamaRadio.html#a513d856339fc044e1ba21da1227be711',1,'Riots_MamaRadio']]],
['own_5fring_5fevent_5fongoing',['own_ring_event_ongoing',['../classRiots__BabyRadio.html#a20d65ad231afd698d7fd9440cf89d84b',1,'Riots_BabyRadio']]]
];<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate log;
pub mod gl_glue;
pub use servo::embedder_traits::{
MediaSessionPlaybackState, PermissionPrompt, PermissionRequest, PromptResult,
};
pub use servo::script_traits::{MediaSessionActionType, MouseButton};
use getopts::Options;
use servo::canvas::{SurfaceProviders, WebGlExecutor};
use servo::compositing::windowing::{
AnimationState, EmbedderCoordinates, EmbedderMethods, MouseWindowEvent, WindowEvent,
WindowMethods,
};
use servo::embedder_traits::resources::{self, Resource, ResourceReaderMethods};
use servo::embedder_traits::{EmbedderMsg, MediaSessionEvent, PromptDefinition, PromptOrigin};
use servo::euclid::{Point2D, Rect, Scale, Size2D, Vector2D};
use servo::keyboard_types::{Key, KeyState, KeyboardEvent};
use servo::msg::constellation_msg::TraversalDirection;
use servo::script_traits::{TouchEventType, TouchId};
use servo::servo_config::opts;
use servo::servo_config::{pref, set_pref};
use servo::servo_url::ServoUrl;
use servo::webrender_api::units::DevicePixel;
use servo::webrender_api::ScrollLocation;
use servo::webvr::{VRExternalShmemPtr, VRMainThreadHeartbeat, VRService, VRServiceManager};
use servo::{self, gl, BrowserId, Servo};
use servo_media::player::context as MediaPlayerContext;
use std::cell::RefCell;
use std::mem;
use std::os::raw::c_void;
use std::path::PathBuf;
use std::rc::Rc;
thread_local! {
pub static SERVO: RefCell<Option<ServoGlue>> = RefCell::new(None);
}
/// The EventLoopWaker::wake function will be called from any thread.
/// It will be called to notify embedder that some events are available,
/// and that perform_updates need to be called
pub use servo::embedder_traits::EventLoopWaker;
pub struct InitOptions {
pub args: Vec<String>,
pub url: Option<String>,
pub coordinates: Coordinates,
pub density: f32,
pub vr_init: VRInitOptions,
pub xr_discovery: Option<webxr::Discovery>,
pub enable_subpixel_text_antialiasing: bool,
pub gl_context_pointer: Option<*const c_void>,
pub native_display_pointer: Option<*const c_void>,
}
pub enum VRInitOptions {
None,
VRExternal(*mut c_void),
VRService(Box<dyn VRService>, Box<dyn VRMainThreadHeartbeat>),
}
#[derive(Clone, Debug)]
pub struct Coordinates {
pub viewport: Rect<i32, DevicePixel>,
pub framebuffer: Size2D<i32, DevicePixel>,
}
impl Coordinates {
pub fn new(
x: i32,
y: i32,
width: i32,
height: i32,
fb_width: i32,
fb_height: i32,
) -> Coordinates {
Coordinates {
viewport: Rect::new(Point2D::new(x, y), Size2D::new(width, height)),
framebuffer: Size2D::new(fb_width, fb_height),
}
}
}
/// Callbacks. Implemented by embedder. Called by Servo.
pub trait HostTrait {
/// Will be called from the thread used for the init call.
/// Will be called when the GL buffer has been updated.
fn flush(&self);
/// Will be called before drawing.
/// Time to make the targetted GL context current.
fn make_current(&self);
/// Show alert.
fn prompt_alert(&self, msg: String, trusted: bool);
/// Ask Yes/No question.
fn prompt_yes_no(&self, msg: String, trusted: bool) -> PromptResult;
/// Ask Ok/Cancel question.
fn prompt_ok_cancel(&self, msg: String, trusted: bool) -> PromptResult;
/// Ask for string
fn prompt_input(&self, msg: String, default: String, trusted: bool) -> Option<String>;
/// Page starts loading.
/// "Reload button" should be disabled.
/// "Stop button" should be enabled.
/// Throbber starts spinning.
fn on_load_started(&self);
/// Page has loaded.
/// "Reload button" should be enabled.
/// "Stop button" should be disabled.
/// Throbber stops spinning.
fn on_load_ended(&self);
/// Page title has changed.
fn on_title_changed(&self, title: String);
/// Allow Navigation.
fn on_allow_navigation(&self, url: String) -> bool;
/// Page URL has changed.
fn on_url_changed(&self, url: String);
/// Back/forward state has changed.
/// Back/forward buttons need to be disabled/enabled.
fn on_history_changed(&self, can_go_back: bool, can_go_forward: bool);
/// Page animation state has changed. If animating, it's recommended
/// that the embedder doesn't wait for the wake function to be called
/// to call perform_updates. Usually, it means doing:
/// while true { servo.perform_updates() }. This will end up calling flush
/// which will call swap_buffer which will be blocking long enough to limit
/// drawing at 60 FPS.
/// If not animating, call perform_updates only when needed (when the embedder
/// has events for Servo, or Servo has woken up the embedder event loop via
/// EventLoopWaker).
fn on_animating_changed(&self, animating: bool);
/// Servo finished shutting down.
fn on_shutdown_complete(&self);
/// A text input is focused.
fn on_ime_state_changed(&self, show: bool);
/// Gets sytem clipboard contents.
fn get_clipboard_contents(&self) -> Option<String>;
/// Sets system clipboard contents.
fn set_clipboard_contents(&self, contents: String);
/// Called when we get the media session metadata/
fn on_media_session_metadata(&self, title: String, artist: String, album: String);
/// Called when the media session playback state changes.
fn on_media_session_playback_state_change(&self, state: MediaSessionPlaybackState);
/// Called when the media session position state is set.
fn on_media_session_set_position_state(&self, duration: f64, position: f64, playback_rate: f64);
}
pub struct ServoGlue {
servo: Servo<ServoWindowCallbacks>,
batch_mode: bool,
callbacks: Rc<ServoWindowCallbacks>,
/// id of the top level browsing context. It is unique as tabs
/// are not supported yet. None until created.
browser_id: Option<BrowserId>,
// A rudimentary stack of "tabs".
// EmbedderMsg::BrowserCreated will push onto it.
// EmbedderMsg::CloseBrowser will pop from it,
// and exit if it is empty afterwards.
browsers: Vec<BrowserId>,
events: Vec<WindowEvent>,
current_url: Option<ServoUrl>,
}
pub fn servo_version() -> String {
servo::config::servo_version()
}
/// Test if a url is valid.
pub fn is_uri_valid(url: &str) -> bool {
info!("load_uri: {}", url);
ServoUrl::parse(url).is_ok()
}
/// Initialize Servo. At that point, we need a valid GL context.
/// In the future, this will be done in multiple steps.
pub fn init(
mut init_opts: InitOptions,
gl: Rc<dyn gl::Gl>,
waker: Box<dyn EventLoopWaker>,
callbacks: Box<dyn HostTrait>,
) -> Result<(), &'static str> {
resources::set(Box::new(ResourceReaderInstance::new()));
let mut args = mem::replace(&mut init_opts.args, vec![]);
if !args.is_empty() {
// opts::from_cmdline_args expects the first argument to be the binary name.
args.insert(0, "servo".to_string());
set_pref!(
gfx.subpixel_text_antialiasing.enabled,
init_opts.enable_subpixel_text_antialiasing
);
opts::from_cmdline_args(Options::new(), &args);
}
let embedder_url = init_opts.url.as_ref().and_then(|s| ServoUrl::parse(s).ok());
let cmdline_url = opts::get().url.clone();
let pref_url = ServoUrl::parse(&pref!(shell.homepage)).ok();
let blank_url = ServoUrl::parse("about:blank").ok();
let url = embedder_url
.or(cmdline_url)
.or(pref_url)
.or(blank_url)
.unwrap();
gl.clear_color(1.0, 1.0, 1.0, 1.0);
gl.clear(gl::COLOR_BUFFER_BIT);
gl.finish();
let window_callbacks = Rc::new(ServoWindowCallbacks {
host_callbacks: callbacks,
gl: gl.clone(),
coordinates: RefCell::new(init_opts.coordinates),
density: init_opts.density,
gl_context_pointer: init_opts.gl_context_pointer,
native_display_pointer: init_opts.native_display_pointer,
});
let embedder_callbacks = Box::new(ServoEmbedderCallbacks {
vr_init: init_opts.vr_init,
xr_discovery: init_opts.xr_discovery,
waker,
gl: gl.clone(),
});
let servo = Servo::new(embedder_callbacks, window_callbacks.clone());
SERVO.with(|s| {
let mut servo_glue = ServoGlue {
servo,
batch_mode: false,
callbacks: window_callbacks,
browser_id: None,
browsers: vec![],
events: vec![],
current_url: Some(url.clone()),
};
let browser_id = BrowserId::new();
let _ = servo_glue.process_event(WindowEvent::NewBrowser(url, browser_id));
*s.borrow_mut() = Some(servo_glue);
});
Ok(())
}
pub fn deinit() {
SERVO.with(|s| s.replace(None).unwrap().deinit());
}
impl ServoGlue {
fn get_browser_id(&self) -> Result<BrowserId, &'static str> {
let browser_id = match self.browser_id {
Some(id) => id,
None => return Err("No BrowserId set yet."),
};
Ok(browser_id)
}
/// Request shutdown. Will call on_shutdown_complete.
pub fn request_shutdown(&mut self) -> Result<(), &'static str> {
self.process_event(WindowEvent::Quit)
}
/// Call after on_shutdown_complete
pub fn deinit(self) {
self.servo.deinit();
}
/// This is the Servo heartbeat. This needs to be called
/// everytime wakeup is called or when embedder wants Servo
/// to act on its pending events.
pub fn perform_updates(&mut self) -> Result<(), &'static str> {
debug!("perform_updates");
let events = mem::replace(&mut self.events, Vec::new());
self.servo.handle_events(events);
let r = self.handle_servo_events();
debug!("done perform_updates");
r
}
/// In batch mode, Servo won't call perform_updates automatically.
/// This can be useful when the embedder wants to control when Servo
/// acts on its pending events. For example, if the embedder wants Servo
/// to act on the scroll events only at a certain time, not everytime
/// scroll() is called.
pub fn set_batch_mode(&mut self, batch: bool) -> Result<(), &'static str> {
debug!("set_batch_mode");
self.batch_mode = batch;
Ok(())
}
/// Load an URL. This needs to be a valid url.
pub fn load_uri(&mut self, url: &str) -> Result<(), &'static str> {
info!("load_uri: {}", url);
ServoUrl::parse(url)
.map_err(|_| "Can't parse URL")
.and_then(|url| {
let browser_id = self.get_browser_id()?;
let event = WindowEvent::LoadUrl(browser_id, url);
self.process_event(event)
})
}
/// Reload the page.
pub fn reload(&mut self) -> Result<(), &'static str> {
info!("reload");
let browser_id = self.get_browser_id()?;
let event = WindowEvent::Reload(browser_id);
self.process_event(event)
}
/// Redraw the page.
pub fn refresh(&mut self) -> Result<(), &'static str> {
info!("refresh");
self.process_event(WindowEvent::Refresh)
}
/// Stop loading the page.
pub fn stop(&mut self) -> Result<(), &'static str> {
warn!("TODO can't stop won't stop");
Ok(())
}
/// Go back in history.
pub fn go_back(&mut self) -> Result<(), &'static str> {
info!("go_back");
let browser_id = self.get_browser_id()?;
let event = WindowEvent::Navigation(browser_id, TraversalDirection::Back(1));
self.process_event(event)
}
/// Go forward in history.
pub fn go_forward(&mut self) -> Result<(), &'static str> {
info!("go_forward");
let browser_id = self.get_browser_id()?;
let event = WindowEvent::Navigation(browser_id, TraversalDirection::Forward(1));
self.process_event(event)
}
/// Let Servo know that the window has been resized.
pub fn resize(&mut self, coordinates: Coordinates) -> Result<(), &'static str> {
info!("resize");
*self.callbacks.coordinates.borrow_mut() = coordinates;
self.process_event(WindowEvent::Resize)
}
/// Start scrolling.
/// x/y are scroll coordinates.
/// dx/dy are scroll deltas.
pub fn scroll_start(&mut self, dx: f32, dy: f32, x: i32, y: i32) -> Result<(), &'static str> {
let delta = Vector2D::new(dx, dy);
let scroll_location = ScrollLocation::Delta(delta);
let event = WindowEvent::Scroll(scroll_location, Point2D::new(x, y), TouchEventType::Down);
self.process_event(event)
}
/// Scroll.
/// x/y are scroll coordinates.
/// dx/dy are scroll deltas.
pub fn scroll(&mut self, dx: f32, dy: f32, x: i32, y: i32) -> Result<(), &'static str> {
let delta = Vector2D::new(dx, dy);
let scroll_location = ScrollLocation::Delta(delta);
let event = WindowEvent::Scroll(scroll_location, Point2D::new(x, y), TouchEventType::Move);
self.process_event(event)
}
/// End scrolling.
/// x/y are scroll coordinates.
/// dx/dy are scroll deltas.
pub fn scroll_end(&mut self, dx: f32, dy: f32, x: i32, y: i32) -> Result<(), &'static str> {
let delta = Vector2D::new(dx, dy);
let scroll_location = ScrollLocation::Delta(delta);
let event = WindowEvent::Scroll(scroll_location, Point2D::new(x, y), TouchEventType::Up);
self.process_event(event)
}
/// Touch event: press down
pub fn touch_down(&mut self, x: f32, y: f32, pointer_id: i32) -> Result<(), &'static str> {
let event = WindowEvent::Touch(
TouchEventType::Down,
TouchId(pointer_id),
Point2D::new(x as f32, y as f32),
);
self.process_event(event)
}
/// Touch event: move touching finger
pub fn touch_move(&mut self, x: f32, y: f32, pointer_id: i32) -> Result<(), &'static str> {
let event = WindowEvent::Touch(
TouchEventType::Move,
TouchId(pointer_id),
Point2D::new(x as f32, y as f32),
);
self.process_event(event)
}
/// Touch event: Lift touching finger
pub fn touch_up(&mut self, x: f32, y: f32, pointer_id: i32) -> Result<(), &'static str> {
let event = WindowEvent::Touch(
TouchEventType::Up,
TouchId(pointer_id),
Point2D::new(x as f32, y as f32),
);
self.process_event(event)
}
/// Cancel touch event
pub fn touch_cancel(&mut self, x: f32, y: f32, pointer_id: i32) -> Result<(), &'static str> {
let event = WindowEvent::Touch(
TouchEventType::Cancel,
TouchId(pointer_id),
Point2D::new(x as f32, y as f32),
);
self.process_event(event)
}
/// Register a mouse movement.
pub fn mouse_move(&mut self, x: f32, y: f32) -> Result<(), &'static str> {
let point = Point2D::new(x, y);
let event = WindowEvent::MouseWindowMoveEventClass(point);
self.process_event(event)
}
/// Register a mouse button press.
pub fn mouse_down(&mut self, x: f32, y: f32, button: MouseButton) -> Result<(), &'static str> {
let point = Point2D::new(x, y);
let event = WindowEvent::MouseWindowEventClass(MouseWindowEvent::MouseDown(button, point));
self.process_event(event)
}
/// Register a mouse button release.
pub fn mouse_up(&mut self, x: f32, y: f32, button: MouseButton) -> Result<(), &'static str> {
let point = Point2D::new(x, y);
let event = WindowEvent::MouseWindowEventClass(MouseWindowEvent::MouseUp(button, point));
self.process_event(event)
}
/// Start pinchzoom.
/// x/y are pinch origin coordinates.
pub fn pinchzoom_start(&mut self, factor: f32, _x: u32, _y: u32) -> Result<(), &'static str> {
self.process_event(WindowEvent::PinchZoom(factor))
}
/// Pinchzoom.
/// x/y are pinch origin coordinates.
pub fn pinchzoom(&mut self, factor: f32, _x: u32, _y: u32) -> Result<(), &'static str> {
self.process_event(WindowEvent::PinchZoom(factor))
}
/// End pinchzoom.
/// x/y are pinch origin coordinates.
pub fn pinchzoom_end(&mut self, factor: f32, _x: u32, _y: u32) -> Result<(), &'static str> {
self.process_event(WindowEvent::PinchZoom(factor))
}
/// Perform a click.
pub fn click(&mut self, x: f32, y: f32) -> Result<(), &'static str> {
let mouse_event = MouseWindowEvent::Click(MouseButton::Left, Point2D::new(x, y));
let event = WindowEvent::MouseWindowEventClass(mouse_event);
self.process_event(event)
}
pub fn key_down(&mut self, key: Key) -> Result<(), &'static str> {
let key_event = KeyboardEvent {
state: KeyState::Down,
key,
..KeyboardEvent::default()
};
self.process_event(WindowEvent::Keyboard(key_event))
}
pub fn key_up(&mut self, key: Key) -> Result<(), &'static str> {
let key_event = KeyboardEvent {
state: KeyState::Up,
key,
..KeyboardEvent::default()
};
self.process_event(WindowEvent::Keyboard(key_event))
}
pub fn media_session_action(
&mut self,
action: MediaSessionActionType,
) -> Result<(), &'static str> {
info!("Media session action {:?}", action);
self.process_event(WindowEvent::MediaSessionAction(action))
}
pub fn change_visibility(&mut self, visible: bool) -> Result<(), &'static str> {
info!("change_visibility");
if let Ok(id) = self.get_browser_id() {
let event = WindowEvent::ChangeBrowserVisibility(id, visible);
self.process_event(event)
} else {
// Ignore visibility change if no browser has been created yet.
Ok(())
}
}
fn process_event(&mut self, event: WindowEvent) -> Result<(), &'static str> {
self.events.push(event);
if !self.batch_mode {
self.perform_updates()
} else {
Ok(())
}
}
fn handle_servo_events(&mut self) -> Result<(), &'static str> {
for (browser_id, event) in self.servo.get_events() {
match event {
EmbedderMsg::ChangePageTitle(title) => {
let fallback_title: String = if let Some(ref current_url) = self.current_url {
current_url.to_string()
} else {
String::from("Untitled")
};
let title = match title {
Some(ref title) if title.len() > 0 => &**title,
_ => &fallback_title,
};
let title = format!("{} - Servo", title);
self.callbacks.host_callbacks.on_title_changed(title);
},
EmbedderMsg::AllowNavigationRequest(pipeline_id, url) => {
if let Some(_browser_id) = browser_id {
let data: bool = self
.callbacks
.host_callbacks
.on_allow_navigation(url.to_string());
let window_event = WindowEvent::AllowNavigationResponse(pipeline_id, data);
self.events.push(window_event);
let _ = self.perform_updates();
}
},
EmbedderMsg::HistoryChanged(entries, current) => {
let can_go_back = current > 0;
let can_go_forward = current < entries.len() - 1;
self.callbacks
.host_callbacks
.on_history_changed(can_go_back, can_go_forward);
self.callbacks
.host_callbacks
.on_url_changed(entries[current].clone().to_string());
self.current_url = Some(entries[current].clone());
},
EmbedderMsg::LoadStart => {
self.callbacks.host_callbacks.on_load_started();
},
EmbedderMsg::LoadComplete => {
self.callbacks.host_callbacks.on_load_ended();
},
EmbedderMsg::GetSelectedBluetoothDevice(_, sender) => {
let _ = sender.send(None);
},
EmbedderMsg::AllowUnload(sender) => {
let _ = sender.send(true);
},
EmbedderMsg::Prompt(definition, origin) => {
let cb = &self.callbacks.host_callbacks;
let trusted = origin == PromptOrigin::Trusted;
let res = match definition {
PromptDefinition::Alert(message, sender) => {
sender.send(cb.prompt_alert(message, trusted))
},
PromptDefinition::OkCancel(message, sender) => {
sender.send(cb.prompt_ok_cancel(message, trusted))
},
PromptDefinition::YesNo(message, sender) => {
sender.send(cb.prompt_yes_no(message, trusted))
},
PromptDefinition::Input(message, default, sender) => {
sender.send(cb.prompt_input(message, default, trusted))
},
};
if let Err(e) = res {
let reason = format!("Failed to send Prompt response: {}", e);
self.events.push(WindowEvent::SendError(browser_id, reason));
}
},
EmbedderMsg::AllowOpeningBrowser(response_chan) => {
// Note: would be a place to handle pop-ups config.
// see Step 7 of #the-rules-for-choosing-a-browsing-context-given-a-browsing-context-name
if let Err(e) = response_chan.send(true) {
warn!("Failed to send AllowOpeningBrowser response: {}", e);
};
},
EmbedderMsg::BrowserCreated(new_browser_id) => {
// TODO: properly handle a new "tab"
self.browsers.push(new_browser_id);
if self.browser_id.is_none() {
self.browser_id = Some(new_browser_id);
}
self.events.push(WindowEvent::SelectBrowser(new_browser_id));
},
EmbedderMsg::GetClipboardContents(sender) => {
let contents = self.callbacks.host_callbacks.get_clipboard_contents();
let _ = sender.send(contents.unwrap_or("".to_owned()));
},
EmbedderMsg::SetClipboardContents(text) => {
self.callbacks.host_callbacks.set_clipboard_contents(text);
},
EmbedderMsg::CloseBrowser => {
// TODO: close the appropriate "tab".
let _ = self.browsers.pop();
if let Some(prev_browser_id) = self.browsers.last() {
self.browser_id = Some(*prev_browser_id);
self.events
.push(WindowEvent::SelectBrowser(*prev_browser_id));
} else {
self.events.push(WindowEvent::Quit);
}
},
EmbedderMsg::Shutdown => {
self.callbacks.host_callbacks.on_shutdown_complete();
},
EmbedderMsg::PromptPermission(prompt, sender) => {
let message = match prompt {
PermissionPrompt::Request(permission_name) => {
format!("Do you want to grant permission for {:?}?", permission_name)
},
PermissionPrompt::Insecure(permission_name) => {
format!(
"The {:?} feature is only safe to use in secure context, but servo can't guarantee\n\
that the current context is secure. Do you want to proceed and grant permission?",
permission_name
)
},
};
let result = match self.callbacks.host_callbacks.prompt_yes_no(message, true) {
PromptResult::Primary => PermissionRequest::Granted,
PromptResult::Secondary | PromptResult::Dismissed => {
PermissionRequest::Denied
},
};
let _ = sender.send(result);
},
EmbedderMsg::ShowIME(..) => {
self.callbacks.host_callbacks.on_ime_state_changed(true);
},
EmbedderMsg::HideIME => {
self.callbacks.host_callbacks.on_ime_state_changed(false);
},
EmbedderMsg::MediaSessionEvent(event) => {
match event {
MediaSessionEvent::SetMetadata(metadata) => {
self.callbacks.host_callbacks.on_media_session_metadata(
metadata.title,
metadata.artist,
metadata.album,
)
},
MediaSessionEvent::PlaybackStateChange(state) => self
.callbacks
.host_callbacks
.on_media_session_playback_state_change(state),
MediaSessionEvent::SetPositionState(position_state) => self
.callbacks
.host_callbacks
.on_media_session_set_position_state(
position_state.duration,
position_state.position,
position_state.playback_rate,
),
};
},
EmbedderMsg::Status(..) |
EmbedderMsg::SelectFiles(..) |
EmbedderMsg::MoveTo(..) |
EmbedderMsg::ResizeTo(..) |
EmbedderMsg::Keyboard(..) |
EmbedderMsg::SetCursor(..) |
EmbedderMsg::NewFavicon(..) |
EmbedderMsg::HeadParsed |
EmbedderMsg::SetFullscreenState(..) |
EmbedderMsg::Panic(..) |
EmbedderMsg::ReportProfile(..) => {},
}
}
Ok(())
}
}
struct ServoEmbedderCallbacks {
waker: Box<dyn EventLoopWaker>,
xr_discovery: Option<webxr::Discovery>,
vr_init: VRInitOptions,
#[allow(unused)]
gl: Rc<dyn gl::Gl>,
}
struct ServoWindowCallbacks {
gl: Rc<dyn gl::Gl>,
host_callbacks: Box<dyn HostTrait>,
coordinates: RefCell<Coordinates>,
density: f32,
gl_context_pointer: Option<*const c_void>,
native_display_pointer: Option<*const c_void>,
}
impl EmbedderMethods for ServoEmbedderCallbacks {
fn register_vr_services(
&mut self,
services: &mut VRServiceManager,
heartbeats: &mut Vec<Box<dyn VRMainThreadHeartbeat>>,
) {
debug!("EmbedderMethods::register_vrexternal");
match mem::replace(&mut self.vr_init, VRInitOptions::None) {
VRInitOptions::None => {},
VRInitOptions::VRExternal(ptr) => {
services.register_vrexternal(VRExternalShmemPtr::new(ptr));
},
VRInitOptions::VRService(service, heartbeat) => {
services.register(service);
heartbeats.push(heartbeat);
},
}
}
#[cfg(feature = "uwp")]
fn register_webxr(
&mut self,
registry: &mut webxr::MainThreadRegistry,
executor: WebGlExecutor,
surface_providers: SurfaceProviders,
) {
debug!("EmbedderMethods::register_xr");
assert!(
self.xr_discovery.is_none(),
"UWP builds should not be initialized with a WebXR Discovery object"
);
struct ProviderRegistration(SurfaceProviders);
impl webxr::openxr::SurfaceProviderRegistration for ProviderRegistration {
fn register(&self, id: webxr_api::SessionId, provider: servo::canvas::SurfaceProvider) {
self.0.lock().unwrap().insert(id, provider);
}
fn clone(&self) -> Box<dyn webxr::openxr::SurfaceProviderRegistration> {
Box::new(ProviderRegistration(self.0.clone()))
}
}
struct GlThread(WebGlExecutor);
impl webxr::openxr::GlThread for GlThread {
fn execute(&self, runnable: Box<dyn FnOnce() + Send>) {
let _ = self.0.send(runnable);
}
fn clone(&self) -> Box<dyn webxr::openxr::GlThread> {
Box::new(GlThread(self.0.clone()))
}
}
let discovery = webxr::openxr::OpenXrDiscovery::new(
Box::new(GlThread(executor)),
Box::new(ProviderRegistration(surface_providers)),
);
registry.register(discovery);
}
#[cfg(not(feature = "uwp"))]
fn register_webxr(
&mut self,
registry: &mut webxr::MainThreadRegistry,
_executor: WebGlExecutor,
_surface_provider_registration: SurfaceProviders,
) {
debug!("EmbedderMethods::register_xr");
if let Some(discovery) = self.xr_discovery.take() {
registry.register(discovery);
}
}
fn create_event_loop_waker(&mut self) -> Box<dyn EventLoopWaker> {
debug!("EmbedderMethods::create_event_loop_waker");
self.waker.clone()
}
}
impl WindowMethods for ServoWindowCallbacks {
fn make_gl_context_current(&self) {
debug!("WindowMethods::prepare_for_composite");
self.host_callbacks.make_current();
}
fn present(&self) {
debug!("WindowMethods::present");
self.host_callbacks.flush();
}
fn gl(&self) -> Rc<dyn gl::Gl> {
debug!("WindowMethods::gl");
self.gl.clone()
}
fn set_animation_state(&self, state: AnimationState) {
debug!("WindowMethods::set_animation_state: {:?}", state);
self.host_callbacks
.on_animating_changed(state == AnimationState::Animating);
}
fn get_coordinates(&self) -> EmbedderCoordinates {
let coords = self.coordinates.borrow();
EmbedderCoordinates {
viewport: coords.viewport,
framebuffer: coords.framebuffer,
window: (coords.viewport.size, Point2D::new(0, 0)),
screen: coords.viewport.size,
screen_avail: coords.viewport.size,
hidpi_factor: Scale::new(self.density),
}
}
fn get_gl_context(&self) -> MediaPlayerContext::GlContext {
match self.gl_context_pointer {
Some(context) => MediaPlayerContext::GlContext::Egl(context as usize),
None => MediaPlayerContext::GlContext::Unknown,
}
}
fn get_native_display(&self) -> MediaPlayerContext::NativeDisplay {
match self.native_display_pointer {
Some(display) => MediaPlayerContext::NativeDisplay::Egl(display as usize),
None => MediaPlayerContext::NativeDisplay::Unknown,
}
}
fn get_gl_api(&self) -> MediaPlayerContext::GlApi {
MediaPlayerContext::GlApi::Gles2
}
}
struct ResourceReaderInstance;
impl ResourceReaderInstance {
fn new() -> ResourceReaderInstance {
ResourceReaderInstance
}
}<|fim▁hole|>
impl ResourceReaderMethods for ResourceReaderInstance {
fn read(&self, res: Resource) -> Vec<u8> {
Vec::from(match res {
Resource::Preferences => &include_bytes!("../../../../resources/prefs.json")[..],
Resource::HstsPreloadList => {
&include_bytes!("../../../../resources/hsts_preload.json")[..]
},
Resource::SSLCertificates => &include_bytes!("../../../../resources/certs")[..],
Resource::BadCertHTML => &include_bytes!("../../../../resources/badcert.html")[..],
Resource::NetErrorHTML => &include_bytes!("../../../../resources/neterror.html")[..],
Resource::UserAgentCSS => &include_bytes!("../../../../resources/user-agent.css")[..],
Resource::ServoCSS => &include_bytes!("../../../../resources/servo.css")[..],
Resource::PresentationalHintsCSS => {
&include_bytes!("../../../../resources/presentational-hints.css")[..]
},
Resource::QuirksModeCSS => &include_bytes!("../../../../resources/quirks-mode.css")[..],
Resource::RippyPNG => &include_bytes!("../../../../resources/rippy.png")[..],
Resource::DomainList => &include_bytes!("../../../../resources/public_domains.txt")[..],
Resource::BluetoothBlocklist => {
&include_bytes!("../../../../resources/gatt_blocklist.txt")[..]
},
Resource::MediaControlsCSS => {
&include_bytes!("../../../../resources/media-controls.css")[..]
},
Resource::MediaControlsJS => {
&include_bytes!("../../../../resources/media-controls.js")[..]
},
})
}
fn sandbox_access_files(&self) -> Vec<PathBuf> {
vec![]
}
fn sandbox_access_files_dirs(&self) -> Vec<PathBuf> {
vec![]
}
}<|fim▁end|> | |
<|file_name|>default_type_params_xc.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(default_type_params)];
<|fim▁hole|>pub struct FakeVec<T, A = FakeHeap>;<|fim▁end|> | pub struct Heap;
pub struct FakeHeap;
|
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import sys
from os import sep
from PyQt4.QtGui import QApplication
from src import mainwindow
if __name__ == "__main__":
print "los gehts"
app = QApplication(sys.argv)
window = mainwindow.CCMainWindow()<|fim▁hole|><|fim▁end|> | sys.exit(app.exec_()) |
<|file_name|>api.go<|end_file_name|><|fim▁begin|>// github package provides an API client for github.com
//
// Copyright (C) 2014 Yohei Sasaki
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and<|fim▁hole|>
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"time"
)
const BaseUrl = "https://api.github.com"
type Client struct {
RateLimit int
RateLimitRemaining int
RateLimitReset time.Time
baseUrl string
client *http.Client
}
func NewClient(c *http.Client) *Client {
return &Client{
baseUrl: BaseUrl,
client: c,
}
}
type MarkdownMode string
var Markdown = MarkdownMode("markdown")
var Gfm = MarkdownMode("gfm")
type ApiError struct {
Status int
Body string
*Client
}
func (e *ApiError) Error() string {
return fmt.Sprintf("Github API Error: %d - %v", e.Status, e.Body)
}
func NewApiError(status int, body string, c *Client) *ApiError {
return &ApiError{Status: status, Body: body, Client: c}
}
func IsApiError(err error) bool {
switch err.(type) {
case *ApiError:
return true
default:
return false
}
}
// Call /markdown API
// See: https://developer.github.com/v3/markdown/
func (g *Client) Markdown(text string, mode MarkdownMode, context string) (string, error) {
url := g.baseUrl + "/markdown"
body := map[string]string{
"text": text,
"mode": string(mode),
"context": context,
}
buff, _ := json.Marshal(body)
resp, err := g.client.Post(url, "application/json", bytes.NewBuffer(buff))
if err != nil {
return "", err
}
defer resp.Body.Close()
buff, err = ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
g.updateRateLimit(resp)
if resp.StatusCode != http.StatusOK {
return "", NewApiError(resp.StatusCode, string(buff), g)
}
return string(buff), nil
}
// Returns if the client exceeds the limit or not.
func (g *Client) LimitExceeded() bool {
if g.RateLimit == 0 && g.RateLimitRemaining == 0 { // initial value
return false
}
return g.RateLimitRemaining == 0
}
func (g *Client) updateRateLimit(resp *http.Response) {
limit := resp.Header.Get("X-Ratelimit-Limit")
i, err := strconv.ParseInt(limit, 10, 32)
if err == nil {
g.RateLimit = int(i)
}
remaining := resp.Header.Get("X-Ratelimit-Remaining")
i, err = strconv.ParseInt(remaining, 10, 32)
if err == nil {
g.RateLimitRemaining = int(i)
}
reset := resp.Header.Get("X-Ratelimit-Reset")
i, err = strconv.ParseInt(reset, 10, 32)
if err == nil {
g.RateLimitReset = time.Unix(i, 0)
}
}<|fim▁end|> | // limitations under the License.
//
package github |
<|file_name|>response.py<|end_file_name|><|fim▁begin|>__author__ = 'Joe Linn'
#import pylastica
import pylastica.response
class Response(pylastica.response.Response):
def __init__(self, response_data, action, op_type):
"""
@param response_data:
@type response_data: dict or str
@param action:
@type action: pylastica.bulk.action.Action
@param op_type: bulk operation type
@type op_type: str
"""
assert isinstance(action, pylastica.bulk.action.Action), "action must be an instance of Action: %r" % action
super(Response, self).__init__(response_data)
self._action = action
self._op_type = op_type
@property
def action(self):
"""
@return:
@rtype: pylastica.bulk.action.Action
"""
return self._action
@property
def op_type(self):<|fim▁hole|> @return:
@rtype: str
"""
return self._op_type<|fim▁end|> | """
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import Vue from 'vue';
import merge from 'element-ui/src/utils/merge';
import PopupManager from 'element-ui/src/utils/popup/popup-manager';
import getScrollBarWidth from '../scrollbar-width';
let idSeed = 1;
const transitions = [];
const hookTransition = (transition) => {
if (transitions.indexOf(transition) !== -1) return;
const getVueInstance = (element) => {
let instance = element.__vue__;
if (!instance) {
const textNode = element.previousSibling;
if (textNode.__vue__) {
instance = textNode.__vue__;
}
}
return instance;
};
Vue.transition(transition, {
afterEnter(el) {
const instance = getVueInstance(el);
if (instance) {
instance.doAfterOpen && instance.doAfterOpen();
}
},
afterLeave(el) {
const instance = getVueInstance(el);
if (instance) {
instance.doAfterClose && instance.doAfterClose();
}
}
});
};
let scrollBarWidth;
const getDOM = function(dom) {
if (dom.nodeType === 3) {
dom = dom.nextElementSibling || dom.nextSibling;
getDOM(dom);
}
return dom;
};
export default {
model: {
prop: 'visible',
event: 'visible-change'
},
props: {
visible: {
type: Boolean,
default: false
},
transition: {
type: String,
default: ''
},
openDelay: {},
closeDelay: {},
zIndex: {},
modal: {
type: Boolean,
default: false
},
modalFade: {
type: Boolean,
default: true
},
modalClass: {},
modalAppendToBody: {
type: Boolean,
default: false
},
lockScroll: {
type: Boolean,
default: true
},
closeOnPressEscape: {
type: Boolean,
default: false
},
closeOnClickModal: {
type: Boolean,
default: false
}
},
created() {
if (this.transition) {
hookTransition(this.transition);
}
},
beforeMount() {
this._popupId = 'popup-' + idSeed++;
PopupManager.register(this._popupId, this);
},
beforeDestroy() {
PopupManager.deregister(this._popupId);
PopupManager.closeModal(this._popupId);
if (this.modal && this.bodyOverflow !== null && this.bodyOverflow !== 'hidden') {
document.body.style.overflow = this.bodyOverflow;
document.body.style.paddingRight = this.bodyPaddingRight;
}
this.bodyOverflow = null;
this.bodyPaddingRight = null;
},
data() {
return {
opened: false,
bodyOverflow: null,
bodyPaddingRight: null,
rendered: false
};
},
watch: {
visible(val) {
if (val) {
if (this._opening) return;
if (!this.rendered) {
this.rendered = true;
Vue.nextTick(() => {
this.open();
});
} else {
this.open();
}
} else {
this.close();
}
}
},
methods: {
open(options) {
if (!this.rendered) {
this.rendered = true;
this.$emit('visible-change', true);
}
const props = merge({}, this.$props || this, options);
if (this._closeTimer) {
clearTimeout(this._closeTimer);
this._closeTimer = null;
}
clearTimeout(this._openTimer);
const openDelay = Number(props.openDelay);
if (openDelay > 0) {
this._openTimer = setTimeout(() => {
this._openTimer = null;
this.doOpen(props);
}, openDelay);
} else {
this.doOpen(props);
}
},
doOpen(props) {
if (this.$isServer) return;
if (this.willOpen && !this.willOpen()) return;
if (this.opened) return;
this._opening = true;
this.$emit('visible-change', true);
const dom = getDOM(this.$el);
const modal = props.modal;
const zIndex = props.zIndex;
if (zIndex) {
PopupManager.zIndex = zIndex;
}
if (modal) {
if (this._closing) {
PopupManager.closeModal(this._popupId);
this._closing = false;
}
PopupManager.openModal(this._popupId, PopupManager.nextZIndex(), this.modalAppendToBody ? undefined : dom, props.modalClass, props.modalFade);
if (props.lockScroll) {
if (!this.bodyOverflow) {
this.bodyPaddingRight = document.body.style.paddingRight;
this.bodyOverflow = document.body.style.overflow;
}
scrollBarWidth = getScrollBarWidth();
let bodyHasOverflow = document.documentElement.clientHeight < document.body.scrollHeight;
if (scrollBarWidth > 0 && bodyHasOverflow) {
document.body.style.paddingRight = scrollBarWidth + 'px';
}
document.body.style.overflow = 'hidden';
}
}
if (getComputedStyle(dom).position === 'static') {
dom.style.position = 'absolute';
}
dom.style.zIndex = PopupManager.nextZIndex();
this.opened = true;
this.onOpen && this.onOpen();
if (!this.transition) {
this.doAfterOpen();
}
},
doAfterOpen() {
this._opening = false;
},
close() {
if (this.willClose && !this.willClose()) return;
if (this._openTimer !== null) {
clearTimeout(this._openTimer);
this._openTimer = null;
}
clearTimeout(this._closeTimer);
const closeDelay = Number(this.closeDelay);
if (closeDelay > 0) {
this._closeTimer = setTimeout(() => {
this._closeTimer = null;
this.doClose();
}, closeDelay);
} else {
this.doClose();
}
},
doClose() {
this.$emit('visible-change', false);
this._closing = true;
this.onClose && this.onClose();
if (this.lockScroll) {
setTimeout(() => {
if (this.modal && this.bodyOverflow !== 'hidden') {<|fim▁hole|> document.body.style.paddingRight = this.bodyPaddingRight;
}
this.bodyOverflow = null;
this.bodyPaddingRight = null;
}, 200);
}
this.opened = false;
if (!this.transition) {
this.doAfterClose();
}
},
doAfterClose() {
PopupManager.closeModal(this._popupId);
this._closing = false;
}
}
};
export {
PopupManager
};<|fim▁end|> | document.body.style.overflow = this.bodyOverflow; |
<|file_name|>msstats_plots_wrapper.py<|end_file_name|><|fim▁begin|>import os, sys, re
import optparse
import shutil
import pandas
import numpy
import gc
import subprocess
#####################################
#This is a script to combine the output reports from
#Skyline, in preparation for MSstats! Let's get started.
#
#VERSION 0.70A
version="0.70A"
#DATE: 10/11/2016
date="10/11/2016"
#####################################
print "-----------------------------------------------------------------------"
print "Welcome to the MSstats wrapper for Galaxy, Wohlschlegel Lab UCLA"
print "Written by William Barshop"
print "Version: ",version
print "Date: ",date
basedir=os.getcwd()
####################################
#Argument parsing! So much fun!
#We'll use OptParse even though some
#people really rave about argparse...
#
#
# NB: With Optparse, if an option is
# not specified, it will take a
# value of None
####################################
parser = optparse.OptionParser()
parser.add_option("--experiment_file",action="store",type="string",dest="experiment_file")
parser.add_option("--folder",action="store",type="string",dest="operation_folder",default=".")
parser.add_option("--msstats-image-RData",action="store",type="string",dest="image_RData")
parser.add_option("--msstats-comparison-csv",action="store",type="string",dest="comparison_csv")
################# OUTPUTS ################################
parser.add_option("--comparisonPlotOutput",action="store",type="string",dest="comparisonPlotOutput")
parser.add_option("--heatmapOutput",action="store",type="string",dest="heatmapOutput")
parser.add_option("--volcanoPlotOutput",action="store",type="string",dest="volcanoPlotOutput")
parser.add_option("--RScriptOutput",action="store",type="string",dest="RScriptOutput")
################## BELOW THIS ARE PLOTTING OPTIONS ############################## These are actually all going to be moved into a separate tool
#general options
parser.add_option("--significance",action="store",type="float",dest="significance") # For the volcano plots...
parser.add_option("--FCthreshold",action="store",type="float",dest="FCthreshold") # FC threshold For the volcano plots...
parser.add_option("--ylimUp",action="store",type="float",dest="ylimUp") # ylimUp threshold for the plots
parser.add_option("--ylimDown",action="store",type="float",dest="ylimDown") # ylimDown threshold for plots
parser.add_option("--xlimUp",action="store",type="float",dest="xlimUp") # xlimUp threshold for Volcano plots
parser.add_option("--autoAxes",action="store_true",dest="autoAxes")
parser.add_option("--xAxisSize",action="store",type="int",dest="xAxisSize")
parser.add_option("--yAxisSize",action="store",type="int",dest="yAxisSize")
parser.add_option("--width",action="store",type="int",dest="width",default=10)
parser.add_option("--height",action="store",type="int",dest="height",default=10)
#HeatMap
parser.add_option("--numProtein",action="store",type="int",dest="numProtein",default=180) # Number of proteins per heatmap... Max is 180
parser.add_option("--clustering",action="store",type="string",dest="clustering",default="protein") # clustering type for heatmap... Can be "protein", "comparison", "both"
#VolcanoPlot
parser.add_option("--dotSize",action="store",type="int",dest="dotSize",default=3)#volcanoplot
parser.add_option("--textSize",action="store",type="int",dest="textSize",default=4)#volcanoplot
parser.add_option("--proteinName",action="store_true",dest="proteinName") # On volcano plot, draw protein names?
parser.add_option("--legendSize",action="store",type="int",dest="legendSize",default=7)
(options,args) = parser.parse_args()
if options.autoAxes:
xlimUp="FALSE"
ylimUp="FALSE"
ylimDown="FALSE"
else:<|fim▁hole|> xlimUp=options.xlimUp
ylimUp=options.ylimUp
ylimDown=options.ylimDown
if options.proteinName:
proteinName="TRUE"
else:
proteinName="FALSE"
print "Now we're going to prepare the R script for MSstats graphing..."
#Let's start by reading in the experiment structure.
group_information = pandas.read_csv(options.experiment_file,sep='\t')
comparison_df = pandas.read_csv(options.comparison_csv)
with open("MSstats_Script.R",'wb') as script_writer:
script_writer.write("library(MSstats)\n")
script_writer.write("setwd(\""+str(basedir)+"\")\n") #We're going to set the current directory...
script_writer.write("load(\""+str(options.image_RData)+"\")\n")
#script_writer.write("comparisonResult<-read.csv(\""+str(options.comparison_csv)+"\")\n") #We will load in the input CSV file! (In this case by absolute path, though that's not necessary...)
#script_writer.write("write.csv(comparisonResult$ComparisonResult,file=\"comparisonResult_output.csv\")\n")
#OKAY! So, now we're going to write out the plots... This may take a bit...
#So, first, let's check if we can output a heatmap (number of comparisons >2)
if len(comparison_df['Label'].unique().tolist())>=2:
#script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,type=\"Heatmap\", logBase.pvalue=2, sig="+str(options.significance)+", FCcutoff="+str(options.FCthreshold)+",ylimUp="+str(ylimUp)+",ylimDown="+str(ylimDown)+",xlimUp="+str(xlimUp)+",x.axis.size="+str(options.xAxisSize)+",y.axis.size="+str(options.yAxisSize)+",numProtein="+str(options.numProtein)+",clustering=\""+options.clustering+"\",width="+str(options.width)+",height="+str(options.height)+")\n") #add width, height, address
script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,type=\"Heatmap\", logBase.pvalue=2,x.axis.size="+str(options.xAxisSize)+",y.axis.size="+str(options.yAxisSize)+",numProtein="+str(options.numProtein)+",clustering=\""+options.clustering+"\",width="+str(options.width)+",height="+str(options.height)+")\n") #add width, height, address
#pass
script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,ProteinName=\""+proteinName+"\",type=\"VolcanoPlot\", logBase.pvalue=2, sig="+str(options.significance)+", FCcutoff="+str(options.FCthreshold)+",ylimUp="+str(ylimUp)+",ylimDown="+str(ylimDown)+",xlimUp="+str(xlimUp)+",x.axis.size="+str(options.xAxisSize)+",dot.size="+str(options.dotSize)+",text.size="+str(options.textSize)+",legend.size="+str(options.legendSize)+",width="+str(options.width)+",height="+str(options.height)+",which.Comparison=\"all\")\n")
script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,type=\"ComparisonPlot\", sig="+str(options.significance)+",x.axis.size="+str(options.xAxisSize)+",dot.size="+str(options.dotSize)+",legend.size="+str(options.legendSize)+",width="+str(options.width)+",height="+str(options.height)+",which.Comparison=\"all\")\n")
#OKAY.... The R Script has been written!
#We're going to execute the R script now!
print "Copying RScript back to Galaxy..."
shutil.copy('MSstats_Script.R',options.RScriptOutput)
subprocess.check_call(['Rscript', 'MSstats_Script.R'],shell=False,stderr=sys.stdout.fileno())
print "Moving files to final output locations...."
#print os.listdir(os.getcwd())
#shutil.copy('TMP_dataProcess_output.csv',options.processedOutput)
#shutil.copy('comparisonResult_output.csv',options.comparisonOutput)
shutil.copy('VolcanoPlot.pdf',options.volcanoPlotOutput)
if len(comparison_df['Label'].unique().tolist())>2:
shutil.copy('Heatmap.pdf',options.heatmapOutput)
shutil.copy('ComparisonPlot.pdf',options.comparisonPlotOutput)
print "All done!"<|fim▁end|> | |
<|file_name|>discovery.py<|end_file_name|><|fim▁begin|>import threading
import select
import time
import socket
pyb_present = False
try:
import pybonjour
pyb_present = True
except ImportError:
pyb_present = False
TIMEOUT = 5
discovered_lock = threading.Semaphore()
discovered = []
discovered_event = threading.Event()
discovery_running = False
def discover(type = "Manual", name = None):
if type == "Manual":
return discover_Manual_TCP()
elif type == "mDNS":
if pyb_present:
return discover_mDNS(name)
else:
print "mDNS discovery not possible"
return []
def discover_Manual_TCP():
print "Manual Discovery. Enter details:"
ssname = raw_input("SmartSpace name >")
ip = raw_input("SmartSpace IP Address >" )
port = raw_input("SmartSpace Port >" )
print ssname, ip, port
rtuple = ( ssname, ("TCP", (ip,int(port)) ))
return rtuple
def discover_mDNS(name = None, reg_type = "_kspace._tcp"):
global discovery_running
if not discovery_running:
# print "Starting mDNS discovery"
d = mDNS_Discovery(reg_type)
d.start()
discovery_running = True
if not name:
discovered_lock.acquire()
global discovered
tmp = []
print discovered
for i in discovered:
tmp.append(i)
discovered_lock.release()
print tmp
return tmp
else:
discovered_lock.acquire()
# print discovered
tmp = filter(lambda x: x[0] == name, discovered)
discovered_lock.release()
print tmp
return tmp
class mDNS_Discovery(threading.Thread):
def __init__(self, reg_type):
global discovery_running
discovery_running = True
self.resolved = []
self.discovered = {}
self.reg_type = reg_type
threading.Thread.__init__(self)
def address_cb(self, sdRef, flags, interfaceIndex, errorCode,
fullname, rrtype, rrclass, rdata, ttl):
if errorCode == pybonjour.kDNSServiceErr_NoError:
#print "RDATA type for A is ", type(rdata)
#print "Converted: ", socket.inet_ntoa(rdata)
# Extract Smart Space name, discard _serv._tcp crap
ss_name = self.service_name.split('.')[0]
discovered_lock.acquire()<|fim▁hole|>
def resolve_cb(self, sdRef, flags, interfaceIndex, errorCode, fullname,
hosttarget, port, txtRecord):
if errorCode == pybonjour.kDNSServiceErr_NoError:
#print 'Resolved service:'
#print ' fullname =', fullname
#print ' hosttarget =', hosttarget
#print ' port =', port
self.service_name = fullname
self.hostname = hosttarget
self.port = port
address_sdRef = pybonjour.DNSServiceQueryRecord(fullname = hosttarget,
rrtype = pybonjour.kDNSServiceType_A,
callBack = self.address_cb)
try:
ready = select.select([address_sdRef], [], [], TIMEOUT)
if address_sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(address_sdRef)
else:
print 'Resolve timed out'
finally:
address_sdRef.close()
self.resolved.append(True)
def browse_cb(self, sdRef, flags, interfaceIndex,
errorCode, serviceName, regtype, replyDomain):
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
if not (flags & pybonjour.kDNSServiceFlagsAdd):
# print 'Service removed: ', serviceName, " ", regtype
discovered_lock.acquire()
del self.discovered[hash(serviceName+regtype)]
for item in discovered:
if item[0] == serviceName:
discovered.remove(item)
discovered_lock.release()
return
if hash(serviceName+regtype) not in self.discovered:
self.discovered[hash(serviceName+regtype)] = True
# print 'Service added; resolving'
resolve_sdRef = pybonjour.DNSServiceResolve(0, interfaceIndex,
serviceName, regtype,
replyDomain, self.resolve_cb)
try:
while not self.resolved:
ready = select.select([resolve_sdRef], [], [], TIMEOUT)
if resolve_sdRef not in ready[0]:
print 'Resolve timed out'
break
pybonjour.DNSServiceProcessResult(resolve_sdRef)
else:
self.resolved.pop()
finally:
resolve_sdRef.close()
discovered_event.clear()
def run(self):
browse_sdRef = pybonjour.DNSServiceBrowse(regtype = self.reg_type, callBack = self.browse_cb)
try:
try:
while True:
discovered_event.clear()
ready = select.select([browse_sdRef], [], [])
if browse_sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(browse_sdRef)
# time.sleep(0.1)
except KeyboardInterrupt:
pass
finally:
browse_sdRef.close()<|fim▁end|> | # Use TCP for communication, as zeroconf is IP based tech
discovered.append((ss_name, ("TCP", (socket.inet_ntoa(rdata), self.port))))
discovered_lock.release()
discovered_event.set() |
<|file_name|>UpdateNFSFileShareRequest.cpp<|end_file_name|><|fim▁begin|>/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/storagegateway/model/UpdateNFSFileShareRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::StorageGateway::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
UpdateNFSFileShareRequest::UpdateNFSFileShareRequest() :
m_fileShareARNHasBeenSet(false),
m_kMSEncrypted(false),
m_kMSEncryptedHasBeenSet(false),
m_kMSKeyHasBeenSet(false),
m_nFSFileShareDefaultsHasBeenSet(false),
m_defaultStorageClassHasBeenSet(false),
m_objectACL(ObjectACL::NOT_SET),
m_objectACLHasBeenSet(false),
m_clientListHasBeenSet(false),
m_squashHasBeenSet(false),
m_readOnly(false),
m_readOnlyHasBeenSet(false),
m_guessMIMETypeEnabled(false),
m_guessMIMETypeEnabledHasBeenSet(false),
m_requesterPays(false),
m_requesterPaysHasBeenSet(false),
m_fileShareNameHasBeenSet(false),
m_cacheAttributesHasBeenSet(false),
m_notificationPolicyHasBeenSet(false)
{
}
Aws::String UpdateNFSFileShareRequest::SerializePayload() const
{
JsonValue payload;
if(m_fileShareARNHasBeenSet)
{
payload.WithString("FileShareARN", m_fileShareARN);
}<|fim▁hole|> payload.WithBool("KMSEncrypted", m_kMSEncrypted);
}
if(m_kMSKeyHasBeenSet)
{
payload.WithString("KMSKey", m_kMSKey);
}
if(m_nFSFileShareDefaultsHasBeenSet)
{
payload.WithObject("NFSFileShareDefaults", m_nFSFileShareDefaults.Jsonize());
}
if(m_defaultStorageClassHasBeenSet)
{
payload.WithString("DefaultStorageClass", m_defaultStorageClass);
}
if(m_objectACLHasBeenSet)
{
payload.WithString("ObjectACL", ObjectACLMapper::GetNameForObjectACL(m_objectACL));
}
if(m_clientListHasBeenSet)
{
Array<JsonValue> clientListJsonList(m_clientList.size());
for(unsigned clientListIndex = 0; clientListIndex < clientListJsonList.GetLength(); ++clientListIndex)
{
clientListJsonList[clientListIndex].AsString(m_clientList[clientListIndex]);
}
payload.WithArray("ClientList", std::move(clientListJsonList));
}
if(m_squashHasBeenSet)
{
payload.WithString("Squash", m_squash);
}
if(m_readOnlyHasBeenSet)
{
payload.WithBool("ReadOnly", m_readOnly);
}
if(m_guessMIMETypeEnabledHasBeenSet)
{
payload.WithBool("GuessMIMETypeEnabled", m_guessMIMETypeEnabled);
}
if(m_requesterPaysHasBeenSet)
{
payload.WithBool("RequesterPays", m_requesterPays);
}
if(m_fileShareNameHasBeenSet)
{
payload.WithString("FileShareName", m_fileShareName);
}
if(m_cacheAttributesHasBeenSet)
{
payload.WithObject("CacheAttributes", m_cacheAttributes.Jsonize());
}
if(m_notificationPolicyHasBeenSet)
{
payload.WithString("NotificationPolicy", m_notificationPolicy);
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection UpdateNFSFileShareRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "StorageGateway_20130630.UpdateNFSFileShare"));
return headers;
}<|fim▁end|> |
if(m_kMSEncryptedHasBeenSet)
{ |
<|file_name|>lsprotate90.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
def inv(s):
if s[0] == '-':
return s[1:]
elif s[0] == '+':
return '-' + s[1:]
else: # plain number
return '-' + s
if len(sys.argv) != 1:
print 'Usage:', sys.argv[0]
sys.exit(1)
for line in sys.stdin:
linesplit = line.strip().split()
if len(linesplit) == 3:
assert(linesplit[0] == 'p')
print('p ' + inv(linesplit[2]) + ' ' + linesplit[1])
elif len(linesplit) == 5:
assert(linesplit[0] == 's')
print('s ' + \
inv(linesplit[2]) + ' ' + linesplit[1] + ' ' + \
inv(linesplit[4]) + ' ' + linesplit[3] )
elif len(linesplit) == 0:
print<|fim▁end|> | #!/usr/bin/env python
import sys |
<|file_name|>share-constraints.js<|end_file_name|><|fim▁begin|>var detect = require('rtc-core/detect');
var extend = require('cog/extend');
var test = require('tape');
var expect = require('./helpers/expect-constraints');
var format = require('./helpers/format');
function mozMediaSource(type) {
return {
mozMediaSource: type,
mediaSource: type
};
}
test('share', expect({
audio: false,
video: extend(detect.moz ? mozMediaSource('window') : {}, {
mandatory: detect.moz ? {} : {
chromeMediaSource: 'screen'
},
optional: [
{ maxWidth: 1920 },
{ maxHeight: 1080 }
]
})
}, format.LEGACY));
test('share', expect({
audio: false,
video: extend(detect.moz ? mozMediaSource('window') : {},
detect.moz ? {
width: { max: 1920 },
height: { max: 1080 }
} : {
chromeMediaSource: 'screen',
width: { max: 1920 },
height: { max: 1080 }
}
)
}, format.STANDARD));
test('share:window', expect({
audio: false,
video: extend(detect.moz ? mozMediaSource('window') : {}, {
mandatory: detect.moz ? {} : {
chromeMediaSource: 'screen'
},
optional: [
{ maxWidth: 1920 },
{ maxHeight: 1080 }
]
})
}, format.LEGACY));
test('share:window', expect({
audio: false,
video: extend(detect.moz ? mozMediaSource('window') : {},
detect.moz ? {
width: { max: 1920 },<|fim▁hole|> chromeMediaSource: 'screen',
width: { max: 1920 },
height: { max: 1080 }
}
)
}, format.STANDARD));<|fim▁end|> | height: { max: 1080 }
} : { |
<|file_name|>Unix.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel.unix;
import io.netty.util.internal.ClassInitializerUtil;
import io.netty.util.internal.UnstableApi;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.PortUnreachableException;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Tells if <a href="https://netty.io/wiki/native-transports.html">{@code netty-transport-native-unix}</a> is
* supported.
*/
public final class Unix {
private static final AtomicBoolean registered = new AtomicBoolean();
static {
// Preload all classes that will be used in the OnLoad(...) function of JNI to eliminate the possiblity of a
// class-loader deadlock. This is a workaround for https://github.com/netty/netty/issues/11209.
// This needs to match all the classes that are loaded via NETTY_JNI_UTIL_LOAD_CLASS or looked up via
// NETTY_JNI_UTIL_FIND_CLASS.
ClassInitializerUtil.tryLoadClasses(Unix.class,
// netty_unix_errors
OutOfMemoryError.class, RuntimeException.class, ClosedChannelException.class,
IOException.class, PortUnreachableException.class,
// netty_unix_socket
DatagramSocketAddress.class, InetSocketAddress.class
);
}
/**
* Internal method... Should never be called from the user.
*
* @param registerTask
*/
@UnstableApi
public static void registerInternal(Runnable registerTask) {
if (registered.compareAndSet(false, true)) {
registerTask.run();
Socket.initialize();
}
}
/**
* Returns {@code true} if and only if the <a href="https://netty.io/wiki/native-transports.html">{@code
* netty_transport_native_unix}</a> is available.
*/
@Deprecated
public static boolean isAvailable() {
return false;
}
/**
* Ensure that <a href="https://netty.io/wiki/native-transports.html">{@code netty_transport_native_unix}</a> is
* available.
*
* @throws UnsatisfiedLinkError if unavailable
*/
@Deprecated
public static void ensureAvailability() {
throw new UnsupportedOperationException();
}
/**
* Returns the cause of unavailability of <a href="https://netty.io/wiki/native-transports.html">
* {@code netty_transport_native_unix}</a>.
*
* @return the cause if unavailable. {@code null} if available.
*/
@Deprecated
public static Throwable unavailabilityCause() {
return new UnsupportedOperationException();
}<|fim▁hole|><|fim▁end|> |
private Unix() {
}
} |
<|file_name|>common.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.<|fim▁hole|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from functools import wraps
import math
import random
import time
from gcs_client import errors as errors
def is_complete(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
attributes = getattr(self, '_required_attributes') or []
for attribute in attributes:
if not getattr(self, attribute, None):
raise Exception('%(func_name)s needs %(attr)s to be set.' %
{'func_name': f.__name__, 'attr': attribute})
return f(self, *args, **kwargs)
return wrapped
# Generate default codes to retry from transient HTTP errors
DEFAULT_RETRY_CODES = tuple(
code for code, (cls_name, cls) in errors.http_errors.items()
if cls is errors.Transient)
class RetryParams(object):
"""Truncated Exponential Backoff configuration class.
This configuration is used to provide truncated exponential backoff retries
for communications.
The algorithm requires 4 arguments: max retries, initial delay, max backoff
wait time and backoff factor.
As long as we have pending retries we will wait
(backoff_factor ^ n-1) * initial delay
Where n is the number of retry.
As long as this wait is not greater than max backoff wait time, if it is
max backoff time wait will be used.
We'll add a random wait time to this delay to help avoid cases where many
clients get synchronized by some situation and all retry at once, sending
requests in synchronized waves.
For example with default values of max_retries=5, initial_delay=1,
max_backoff=32 and backoff_factor=2
- 1st failure: 1 second + random delay [ (2^(1-1)) * 1 ]
- 2nd failure: 2 seconds + random delay [ (2^(2-1)) * 1 ]
- 3rd failure: 4 seconds + random delay [ (2^(3-1)) * 1 ]
- 4th failure: 8 seconds + random delay [ (2^(4-1)) * 1 ]
- 5th failure: 16 seconds + random delay [ (2^(5-1)) * 1 ]
- 6th failure: Fail operation
"""
def __init__(self, max_retries=5, initial_delay=1, max_backoff=32,
backoff_factor=2, randomize=True):
"""Initialize retry configuration.
:param max_retries: Maximum number of retries before giving up.
:type max_retries: int
:param initial_delay: Seconds to wait for the first retry.
:type initial_delay: int or float
:param max_backoff: Maximum number of seconds to wait between retries.
:type max_backoff: int or float
:param backoff_factor: Base to use for the power used to calculate the
delay for the backoff.
:type backoff_factor: int or float
:param randomize: Whether to use randomization of the delay time to
avoid synchronized waves.
:type randomize: bool
"""
self.max_retries = max_retries
self.initial_delay = initial_delay
self.max_backoff = max_backoff
self.backoff_factor = backoff_factor
self.randomize = randomize
@classmethod
def get_default(cls):
"""Return default configuration (simpleton patern)."""
if not hasattr(cls, 'default'):
cls.default = cls()
return cls.default
@classmethod
def set_default(cls, *args, **kwargs):
"""Set default retry configuration.
Methods acepts a RetryParams instance or the same arguments as the
__init__ method.
"""
default = cls.get_default()
# For RetryParams argument copy dictionary to default instance so all
# references to the default configuration will have new values.
if len(args) == 1 and isinstance(args[0], RetryParams):
default.__dict__.update(args[0].__dict__)
# For individual arguments call __init__ method on default instance
else:
default.__init__(*args, **kwargs)
def retry(param='_retry_params', error_codes=DEFAULT_RETRY_CODES):
"""Truncated Exponential Backoff decorator.
There are multiple ways to use this decorator:
@retry
def my_func(self):
In this case we will try to use `self._retry_params` and if that's not
available we'll use default retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry('_retry_cfg')
def my_func(self):
In this case we will try to use `self._retry_cfg` and if that's
not available we'll use default retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry(RetryParams(5, 1, 32, 2, False))
def my_func(self):
In this case we will use a specific retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry('_retry_cfg', [408, 504])
def my_func(self):
In this case we will try to use `self._retry_cfg` and if that's
not available we'll use default retry configuration and retry only on
timeout status codes.
@retry(RetryParams(5, 1, 32, 2, False), [408, 504])
def my_func(self):
In this case we will use a specific retry configuration and retry only
on timeout status codes.
@retry(error_codes=[408, 504])
def my_func(self):
In this case we will try to use `self._retry_params` and if that's not
available we'll use default retry configuration and retry only on
timeout status codes.
If we pass None as the retry parameter or the value of the attribute on the
instance is None we will not do any retries.
"""
def _retry(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
# If retry configuration is none or a RetryParams instance, use it
if isinstance(param, (type(None), RetryParams)):
retry_params = param
# If it's an attribute name try to retrieve it
else:
retry_params = getattr(self, param, RetryParams.get_default())
delay = 0
random_delay = 0
n = 0 # Retry number
while True:
try:
result = f(self, *args, **kwargs)
return result
except errors.Http as exc:
if (not retry_params or n >= retry_params.max_retries or
exc.code not in error_codes):
raise exc
n += 1
# If we haven't reached maximum backoff yet calculate new delay
if delay < retry_params.max_backoff:
backoff = (math.pow(retry_params.backoff_factor, n-1) *
retry_params.initial_delay)
delay = min(retry_params.max_backoff, backoff)
if retry_params.randomize:
random_delay = random.random() * retry_params.initial_delay
time.sleep(delay + random_delay)
return wrapped
# If no argument has been used
if callable(param):
f, param = param, '_retry_params'
return _retry(f)
return _retry<|fim▁end|> | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0 |
<|file_name|>robuffer.rs<|end_file_name|><|fim▁begin|>// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use um::objidl::IMarshal;
use um::winnt::HRESULT;<|fim▁hole|> bufferMarshaler: *mut *mut IMarshal,
) -> HRESULT;
}<|fim▁end|> | extern "system" {
pub fn RoGetBufferMarshaler( |
<|file_name|>node.d.ts<|end_file_name|><|fim▁begin|>import { ElementType } from "domelementtype";
/**
* This object will be used as the prototype for Nodes when creating a
* DOM-Level-1-compliant structure.
*/
export declare class Node {
type: ElementType;
/** Parent of the node */
parent: NodeWithChildren | null;
/** Previous sibling */
prev: Node | null;
/** Next sibling */
next: Node | null;
/** The start index of the node. Requires `withStartIndices` on the handler to be `true. */
startIndex: number | null;
/** The end index of the node. Requires `withEndIndices` on the handler to be `true. */
endIndex: number | null;
/**
*
* @param type The type of the node.
*/
constructor(type: ElementType);
get nodeType(): number;
get parentNode(): NodeWithChildren | null;
set parentNode(parent: NodeWithChildren | null);
get previousSibling(): Node | null;
set previousSibling(prev: Node | null);
get nextSibling(): Node | null;
set nextSibling(next: Node | null);
/**
* Clone this node, and optionally its children.
*
* @param recursive Clone child nodes as well.
* @returns A clone of the node.
*/
cloneNode<T extends Node>(this: T, recursive?: boolean): T;
}
export declare class DataNode extends Node {
data: string;
/**
* @param type The type of the node
* @param data The content of the data node
*/
constructor(type: ElementType.Comment | ElementType.Text | ElementType.Directive, data: string);
get nodeValue(): string;
set nodeValue(data: string);
}
export declare class Text extends DataNode {
constructor(data: string);
}
export declare class Comment extends DataNode {
constructor(data: string);
}
export declare class ProcessingInstruction extends DataNode {
name: string;
constructor(name: string, data: string);
"x-name"?: string;
"x-publicId"?: string;
"x-systemId"?: string;
}
/**
* A `Node` that can have children.
*/
export declare class NodeWithChildren extends Node {
children: Node[];
/**
* @param type Type of the node.
* @param children Children of the node. Only certain node types can have children.
*/
constructor(type: ElementType.Root | ElementType.CDATA | ElementType.Script | ElementType.Style | ElementType.Tag, children: Node[]);
get firstChild(): Node | null;
get lastChild(): Node | null;
get childNodes(): Node[];
set childNodes(children: Node[]);
}
export declare class Document extends NodeWithChildren {
constructor(children: Node[]);
"x-mode"?: "no-quirks" | "quirks" | "limited-quirks";
}
interface Attribute {
name: string;
value: string;
namespace?: string;
prefix?: string;
}
export declare class Element extends NodeWithChildren {
name: string;
attribs: {
[name: string]: string;
};
/**
* @param name Name of the tag, eg. `div`, `span`.
* @param attribs Object mapping attribute names to attribute values.
* @param children Children of the node.
*/
constructor(name: string, attribs: {
[name: string]: string;
}, children?: Node[], type?: ElementType.Tag | ElementType.Script | ElementType.Style);
get tagName(): string;
set tagName(name: string);
get attributes(): Attribute[];
"x-attribsNamespace"?: Record<string, string>;
"x-attribsPrefix"?: Record<string, string>;
}
/**
* @param node Node to check.
* @returns `true` if the node is a `Element`, `false` otherwise.
*/
export declare function isTag(node: Node): node is Element;
/**
* @param node Node to check.
* @returns `true` if the node has the type `CDATA`, `false` otherwise.
*/
export declare function isCDATA(node: Node): node is NodeWithChildren;
/**
* @param node Node to check.
* @returns `true` if the node has the type `Text`, `false` otherwise.
*/
export declare function isText(node: Node): node is DataNode;
/**
* @param node Node to check.
* @returns `true` if the node has the type `Comment`, `false` otherwise.
*/
export declare function isComment(node: Node): node is DataNode;
/**
* @param node Node to check.
* @returns `true` if the node has the type `ProcessingInstruction`, `false` otherwise.
*/
export declare function isDirective(node: Node): node is ProcessingInstruction;
/**
* @param node Node to check.
* @returns `true` if the node has the type `ProcessingInstruction`, `false` otherwise.
*/
export declare function isDocument(node: Node): node is Document;
/**
* @param node Node to check.
* @returns `true` if the node is a `NodeWithChildren` (has children), `false` otherwise.
*/
export declare function hasChildren(node: Node): node is NodeWithChildren;
/**
* Clone a node, and optionally its children.
*
* @param recursive Clone child nodes as well.
* @returns A clone of the node.<|fim▁hole|><|fim▁end|> | */
export declare function cloneNode<T extends Node>(node: T, recursive?: boolean): T;
export {};
//# sourceMappingURL=node.d.ts.map |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import os
import logging
import tqdm
from urllib.request import urlretrieve, URLError
import multiprocessing as mp
import itertools
import numpy as np
log = logging.getLogger(__name__)
def download_PHOENIX_models(path, ranges=None, parameters=None):
"""
Download the PHOENIX grid models from the Goettingen servers. This will skip over
any ill-defined files or any files that already exist on disk in the given folder.
Parameters
----------
path : str or path-like
The base directory to save the files in.
ranges : iterable of (min, max), optional
Each entry in ranges should be (min, max) for the associated parameter, in the
order [Teff, logg, Z, (Alpha)]. Cannot be used with :attr:`parameters`. Default
is None
parameters : iterable of iterables of length 3 or length 4, optional
The parameters to download. Should be a list of parameters where parameters can
either be [Teff, logg, Z] or [Teff, logg, Z, Alpha]. All values should be
floats or integers and not string. If no value provided, will download all
models. Default is None
Raises
------
ValueError
If both ``parameters`` and ``ranges`` are specified
Warning
-------
This will create any directories if they do not exist
Warning
-------
Please use this responsibly to avoid over-saturating the connection to the
Gottingen servers.
Examples
--------
.. code-block:: python
from Starfish.grid_tools import download_PHOENIX_models
ranges = [
[5000, 5200] # T
[4.0, 5.0] # logg
[0, 0] # Z
]
download_PHOENIX_models(path='models', ranges=ranges)
or equivalently using ``parameters`` syntax
.. code-block:: python
from itertools import product
from Starfish.grid_tools import download_PHOENIX_models
T = [6000, 6100, 6200]
logg = [4.0, 4.5, 5.0]
Z = [0]
params = product(T, logg, Z)
download_PHOENIX_models(path='models', parameters=params)
"""
if parameters is not None and ranges is not None:
raise ValueError("Cannot specify both 'parameters' and 'ranges'")
wave_url = "http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits"
wave_file = os.path.join(path, "WAVE_PHOENIX-ACES-AGSS-COND-2011.fits")
flux_file_formatter = (
"http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011"
"/Z{2:s}{3:s}/lte{0:05.0f}-{1:03.2f}{2:s}{3:s}.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
)
output_formatter = "Z{2:s}{3:s}/lte{0:05.0f}-{1:03.2f}{2:s}{3:s}.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
os.makedirs(path, exist_ok=True)
# Download step
log.info("Starting Download of PHOENIX ACES models to {}".format(path))
if not os.path.exists(wave_file):
log.info("Downloading wavelength file")
urlretrieve(wave_url, wave_file)
# Have to wait until wavelength file is downloaded before importing
from .interfaces import PHOENIXGridInterface, PHOENIXGridInterfaceNoAlpha
# Kind of messy, sorry
if parameters is None:
if ranges is not None:
if len(ranges) == 3:
grid = PHOENIXGridInterfaceNoAlpha(path)
elif len(ranges) == 4:
grid = PHOENIXGridInterface(path)
parameters = list(itertools.product(*grid.points))
elif len(parameters[0]) == 3:
grid = PHOENIXGridInterfaceNoAlpha(path)
elif len(parameters[0]) == 4:
grid = PHOENIXGridInterface(path)
if ranges is not None:
_ranges = np.asarray(ranges)
min_params = _ranges.T[0]
max_params = _ranges.T[1]
else:
min_params = np.tile(-np.inf, len(parameters[0]))
max_params = np.tile(np.inf, len(parameters[0]))
# I hate to iterate here, but this way the progress bar doesn't show something like
# 7000 parameters and skips thousands at a time
params = []
for p in parameters:
if np.all(p >= min_params) and np.all(p <= max_params):
params.append(p)
pbar = tqdm.tqdm(params)
for p in pbar:
# Skip irregularities from grid
try:
grid.check_params(p)
except ValueError:
continue
tmp_p = [p[0], p[1]]
# Create the Z string. Have to do this because PHOENIX models use - sign for 0.0
Zstr = "-0.0" if p[2] == 0 else "{:+.1f}".format(p[2])
tmp_p.append(Zstr)
# Create the Alpha string, which is nothing if alpha is 0 or unspecified
if len(p) == 4:
Astr = "" if p[3] == 0 else ".Alpha={:+.2f}".format(p[3])
else:
Astr = ""
tmp_p.append(Astr)
url = flux_file_formatter.format(*tmp_p)
pbar.set_description(url.split("/")[-1])
output_file = os.path.join(path, output_formatter.format(*tmp_p))
if not os.path.exists(output_file):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
try:
urlretrieve(url, output_file)
except URLError:<|fim▁hole|>
def chunk_list(mylist, n=mp.cpu_count()):
"""
Divide a lengthy parameter list into chunks for parallel processing and
backfill if necessary.
:param mylist: a lengthy list of parameter combinations
:type mylist: 1-D list
:param n: number of chunks to divide list into. Default is ``mp.cpu_count()``
:type n: integer
:returns: **chunks** (*2-D list* of shape (n, -1)) a list of chunked parameter lists.
"""
if isinstance(mylist, np.ndarray):
mylist = list(mylist)
length = len(mylist)
size = int(length / n)
# fill with evenly divisible
chunks = [mylist[0 + size * i : size * (i + 1)] for i in range(n)]
leftover = length - size * n
edge = size * n
for i in range(leftover): # backfill each with the last item
chunks[i % n].append(mylist[edge + i])
return chunks
def determine_chunk_log(wl, wl_min, wl_max):
"""
Take in a wavelength array and then, given two minimum bounds, determine
the boolean indices that will allow us to truncate this grid to near the
requested bounds while forcing the wl length to be a power of 2.
:param wl: wavelength array
:type wl: np.ndarray
:param wl_min: minimum required wavelength
:type wl_min: float
:param wl_max: maximum required wavelength
:type wl_max: float
:returns: numpy.ndarray boolean array
"""
# wl_min and wl_max must of course be within the bounds of wl
assert wl_min >= np.min(wl) and wl_max <= np.max(
wl
), "determine_chunk_log: wl_min {:.2f} and wl_max {:.2f} are not within the bounds of the grid {:.2f} to {:.2f}.".format(
wl_min, wl_max, np.min(wl), np.max(wl)
)
# Find the smallest length synthetic spectrum that is a power of 2 in length
# and longer than the number of points contained between wl_min and wl_max
len_wl = len(wl)
npoints = np.sum((wl >= wl_min) & (wl <= wl_max))
chunk = len_wl
inds = (0, chunk)
# This loop will exit with chunk being the smallest power of 2 that is
# larger than npoints
while chunk > npoints:
if chunk / 2 > npoints:
chunk = chunk // 2
else:
break
assert type(chunk) == np.int, "Chunk is not an integer!. Chunk is {}".format(chunk)
if chunk < len_wl:
# Now that we have determined the length of the chunk of the synthetic
# spectrum, determine indices that straddle the data spectrum.
# Find the index that corresponds to the wl at the center of the data spectrum
center_wl = (wl_min + wl_max) / 2.0
center_ind = (np.abs(wl - center_wl)).argmin()
# Take a chunk that straddles either side.
inds = (center_ind - chunk // 2, center_ind + chunk // 2)
ind = (np.arange(len_wl) >= inds[0]) & (np.arange(len_wl) < inds[1])
else:
print("keeping grid as is")
ind = np.ones_like(wl, dtype="bool")
assert (min(wl[ind]) <= wl_min) and (max(wl[ind]) >= wl_max), (
"Model"
"Interpolator chunking ({:.2f}, {:.2f}) didn't encapsulate full"
" wl range ({:.2f}, {:.2f}).".format(min(wl[ind]), max(wl[ind]), wl_min, wl_max)
)
return ind
def vacuum_to_air(wl):
"""
Converts vacuum wavelengths to air wavelengths using the Ciddor 1996 formula.
:param wl: input vacuum wavelengths
:type wl: numpy.ndarray
:returns: numpy.ndarray
.. note::
CA Prieto recommends this as more accurate than the IAU standard.
"""
wl = np.asarray(wl)
sigma = (1e4 / wl) ** 2
f = 1.0 + 0.05792105 / (238.0185 - sigma) + 0.00167917 / (57.362 - sigma)
return wl / f
def calculate_n(wl):
"""
Calculate *n*, the refractive index of light at a given wavelength.
:param wl: input wavelength (in vacuum)
:type wl: np.array
:return: numpy.ndarray
"""
wl = np.asarray(wl)
sigma = (1e4 / wl) ** 2
f = 1.0 + 0.05792105 / (238.0185 - sigma) + 0.00167917 / (57.362 - sigma)
new_wl = wl / f
n = wl / new_wl
print(n)
def vacuum_to_air_SLOAN(wl):
"""
Converts vacuum wavelengths to air wavelengths using the outdated SLOAN definition.
From the SLOAN website:
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4)
:param wl:
The input wavelengths to convert
"""
wl = np.asarray(wl)
air = wl / (1.0 + 2.735182e-4 + 131.4182 / wl ** 2 + 2.76249e8 / wl ** 4)
return air
def air_to_vacuum(wl):
"""
Convert air wavelengths to vacuum wavelengths.
:param wl: input air wavelegths
:type wl: np.array
:return: numpy.ndarray
.. warning::
It is generally not recommended to do this, as the function is imprecise.
"""
wl = np.asarray(wl)
sigma = 1e4 / wl
vac = wl + wl * (
6.4328e-5 + 2.94981e-2 / (146 - sigma ** 2) + 2.5540e-4 / (41 - sigma ** 2)
)
return vac
@np.vectorize
def idl_float(idl_num: str) -> float:
"""
Convert an IDL string number in scientific notation to a float
Parameters
----------
idl_num : str
Input str
Returns
-------
float
Output float
Examples
--------
```python
>>> idl_float("1.6D4")
1.6e4
```
"""
idl_str = idl_num.lower()
return np.float(idl_str.replace("d", "e"))<|fim▁end|> | log.warning(
f"Parameters {p} not found. Double check they are on PHOENIX grid"
)
|
<|file_name|>tcp.go<|end_file_name|><|fim▁begin|>package tcp
import (
"fmt"
"io"
"net"
"os"
"sync"
"github.com/rootless-containers/rootlesskit/pkg/port"
"github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg"
)
func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, logWriter io.Writer) error {
ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", spec.ParentIP, spec.ParentPort))
if err != nil {
fmt.Fprintf(logWriter, "listen: %v\n", err)
return err
}
newConns := make(chan net.Conn)
go func() {
for {
c, err := ln.Accept()
if err != nil {
fmt.Fprintf(logWriter, "accept: %v\n", err)
close(newConns)
return
}
newConns <- c
}
}()
go func() {
defer ln.Close()
for {
select {
case c, ok := <-newConns:
if !ok {
return
}
go func() {
if err := copyConnToChild(c, socketPath, spec, stopCh); err != nil {
fmt.Fprintf(logWriter, "copyConnToChild: %v\n", err)
return
}
}()
case <-stopCh:
return<|fim▁hole|> }
}()
// no wait
return nil
}
func copyConnToChild(c net.Conn, socketPath string, spec port.Spec, stopCh <-chan struct{}) error {
defer c.Close()
// get fd from the child as an SCM_RIGHTS cmsg
fd, err := msg.ConnectToChildWithRetry(socketPath, spec, 10)
if err != nil {
return err
}
f := os.NewFile(uintptr(fd), "")
defer f.Close()
fc, err := net.FileConn(f)
if err != nil {
return err
}
defer fc.Close()
bicopy(c, fc, stopCh)
return nil
}
// bicopy is based on libnetwork/cmd/proxy/tcp_proxy.go .
// NOTE: sendfile(2) cannot be used for sockets
func bicopy(x, y net.Conn, quit <-chan struct{}) {
var wg sync.WaitGroup
var broker = func(to, from net.Conn) {
io.Copy(to, from)
if fromTCP, ok := from.(*net.TCPConn); ok {
fromTCP.CloseRead()
}
if toTCP, ok := to.(*net.TCPConn); ok {
toTCP.CloseWrite()
}
wg.Done()
}
wg.Add(2)
go broker(x, y)
go broker(y, x)
finish := make(chan struct{})
go func() {
wg.Wait()
close(finish)
}()
select {
case <-quit:
case <-finish:
}
x.Close()
y.Close()
<-finish
}<|fim▁end|> | } |
<|file_name|>compose.go<|end_file_name|><|fim▁begin|>package config
import (
"fmt"
"strconv"
"strings"
"github.com/dnephin/configtf"
pth "github.com/dnephin/configtf/path"
)
// ComposeConfig A **compose** resource runs ``docker-compose`` to create an
// isolated environment. The **compose** resource keeps containers running
// until **dobi** exits so the containers can be used by other tasks that depend
// on the **compose** resource, or are listed after it in an `alias`_.
//
// .. note::<|fim▁hole|>// `Docker Compose <https://github.com/docker/compose>`_ must be installed
// and available in ``$PATH`` to use this resource.
//
// name: compose
// example: Start a Compose environment setting the project name to ``web-devenv``
// and using two Compose files.
//
// .. code-block:: yaml
//
// compose=devenv:
// files: [docker-compose.yml, docker-compose-dev.yml]
// project: 'web-devenv'
//
type ComposeConfig struct {
// Files The Compose files to use. This field supports :doc:`variables`.
// type: list of filenames
Files []string
// Project The project name used by Compose. This field supports
// :doc:`variables`.
Project string `config:"required"`
// StopGrace Seconds to wait for containers to stop before killing them.
// default: ``5``
StopGrace int
Dependent
Annotations
}
// StopGraceString returns StopGrace as a string
func (c *ComposeConfig) StopGraceString() string {
return strconv.Itoa(c.StopGrace)
}
// Validate the resource
func (c *ComposeConfig) Validate(path pth.Path, config *Config) *pth.Error {
return nil
}
func (c *ComposeConfig) String() string {
return fmt.Sprintf("Run Compose project %q from: %v",
c.Project, strings.Join(c.Files, ", "))
}
// Resolve resolves variables in the resource
func (c *ComposeConfig) Resolve(resolver Resolver) (Resource, error) {
conf := *c
var err error
conf.Files, err = resolver.ResolveSlice(c.Files)
if err != nil {
return &conf, err
}
conf.Project, err = resolver.Resolve(c.Project)
return &conf, err
}
func composeFromConfig(name string, values map[string]interface{}) (Resource, error) {
compose := &ComposeConfig{Project: "{unique}", StopGrace: 5}
return compose, configtf.Transform(name, values, compose)
}
func init() {
RegisterResource("compose", composeFromConfig)
}<|fim▁end|> | // |
<|file_name|>__openerp__.py<|end_file_name|><|fim▁begin|>{
'name': "Sync POS orders across multiple sessions",
'version': '1.0.0',<|fim▁hole|> 'author': 'Ivan Yelizariev',
'category': 'Point Of Sale',
'website': 'https://yelizariev.github.io',
'depends': ['pos_disable_payment', 'bus'],
'data': [
'security/ir.model.access.csv',
'views.xml',
],
'qweb': [
'static/src/xml/pos_multi_session.xml',
],
'installable': True,
}<|fim▁end|> | |
<|file_name|>factory.go<|end_file_name|><|fim▁begin|>/*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|>limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
versioned "knative.dev/eventing/pkg/client/clientset/versioned"
eventing "knative.dev/eventing/pkg/client/informers/externalversions/eventing"
flows "knative.dev/eventing/pkg/client/informers/externalversions/flows"
internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
messaging "knative.dev/eventing/pkg/client/informers/externalversions/messaging"
sources "knative.dev/eventing/pkg/client/informers/externalversions/sources"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Eventing() eventing.Interface
Flows() flows.Interface
Messaging() messaging.Interface
Sources() sources.Interface
}
func (f *sharedInformerFactory) Eventing() eventing.Interface {
return eventing.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Flows() flows.Interface {
return flows.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Messaging() messaging.Interface {
return messaging.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Sources() sources.Interface {
return sources.New(f, f.namespace, f.tweakListOptions)
}<|fim▁end|> | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and |
<|file_name|>Autostop_Test.py<|end_file_name|><|fim▁begin|>from yandextank.plugins.Aggregator import SecondAggregateData
from yandextank.plugins.Autostop import AutostopPlugin<|fim▁hole|>
class AutostopTestCase(TankTestCase):
def setUp(self):
core = self.get_core()
core.load_configs(['config/autostop.conf'])
core.load_plugins()
core.plugins_configure()
self.foo = AutostopPlugin(core)
def tearDown(self):
del self.foo
self.foo = None
def test_run(self):
data = SecondAggregateData()
data.overall.avg_response_time = 11
self.foo.core.set_option(self.foo.SECTION, "autostop", "time(1,10)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_http(self):
data = SecondAggregateData()
data.overall.http_codes = {'200':11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (200, 10, 5 )\nhttp (3xx, 1.5%, 10m)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_net(self):
data = SecondAggregateData()
data.overall.net_codes = {71:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "net (71, 1, 5)\nnet (xx, 1.5%, 10m )")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_quan(self):
data = SecondAggregateData()
data.overall.quantiles = {99.0:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "quantile(99,2,3)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_false_trigger_bug(self):
data = SecondAggregateData()
data.overall.http_codes = {}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (5xx, 100%, 1)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() >= 0:
raise RuntimeError()
self.foo.end_test(0)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | from Tank_Test import TankTestCase
import tempfile
import unittest |
<|file_name|>Reflector.java<|end_file_name|><|fim▁begin|>package com.github.dozzatq.phoenix.advertising;
/**
* Created by Rodion Bartoshik on 04.07.2017.
*/
interface Reflector {
FactoryAd reflection();
int state();<|fim▁hole|><|fim▁end|> | } |
<|file_name|>merchant.py<|end_file_name|><|fim▁begin|>from item import Item
class Merchant:
def __init__(self, markup=1.2, markdown=0.8):
self.inventory = []
self.markup = markup
self.markdown = markdown
def add_item(self, item):
# Adds an item to the merchant's inventory<|fim▁hole|>
if (not isinstance(item, Item)):
raise TypeError("Unexpected " + type(item))
self.inventory.append(item)
def get_selling_offers(self):
# Lists all items in the merchant's inventory
# and adds the markup fee
offers = []
for item in self.inventory:
offer = (item, item.value*self.markup)
offers.append(offer)
return offers
def get_buying_offers(self, items):
# Generates buying offers on the items in 'items'
offers = []
for item in items:
offer = (item, item.value*self.markdown)
offers.append(offer)
return offers
class Banker:
def get_items(self, player):
items_formatted = []
bank_list = []
for item, amount in player.bank.items():
items_formatted.append((item.name, amount))
bank_list.append(item)
return items_formatted, bank_list
def add_item(self, player, item):
if (not isinstance(item, Item)):
raise TypeError("Unexpected " + type(item))
if item in player.bank:
player.bank[item] += 1
else:
player.bank[item] = 1
return True
def remove_item(self, player, item):
if (not isinstance(item, Item)):
raise TypeError("Unexpected " + type(item))
if item in player.bank:
if player.bank[item] == 1:
player.bank.pop(item)
else:
player.bank[item] -= 0
return True
else:
return False<|fim▁end|> | |
<|file_name|>symbolKlass.cpp<|end_file_name|><|fim▁begin|>/* Copyright 1994, 1995 LongView Technologies L.L.C. $Revision: 1.22 $ */
/* Copyright (c) 2006, Sun Microsystems, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems nor the names of its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
*/
# include "incls/_precompiled.incl"
# include "incls/_symbolKlass.cpp.incl"
# include <ctype.h>
symbolOop symbolKlass::allocateSymbol(char* value, int len) {
symbolOop sym = as_symbolOop(Universe::allocate_tenured(object_size(len)));
sym->init_untagged_contents_mark();
sym->set_klass_field(Universe::symbolKlassObj());
sym->set_length(len);
initialize_object(sym, value, len);
return sym;
}
klassOop symbolKlass::create_subclass(mixinOop mixin, Format format) {
if (format == mem_klass || format == symbol_klass) {
return symbolKlass::create_class(as_klassOop(), mixin);
}
return NULL;
}
klassOop symbolKlass::create_class(klassOop super_class, mixinOop mixin) {
symbolKlass o;
return create_generic_class(super_class, mixin, o.vtbl_value());
}
void set_symbolKlass_vtbl(Klass* k) {
symbolKlass o;
k->set_vtbl_value(o.vtbl_value());
}
oop symbolKlass::scavenge(oop /*obj*/) {
ShouldNotCallThis(); // shouldn't need to scavenge canonical symbols
// (should be tenured)
return NULL;<|fim▁hole|> return symbolOop(obj)->verify();
}
void symbolKlass::oop_print_value_on(oop obj, outputStream* st) {
assert_symbol(obj, "dispatch check");
symbolOop array = symbolOop(obj);
int len = array->length();
int n = min(MaxElementPrintSize, len);
st->print("#");
for(int index = 1; index <= n; index++) {
char c = array->byte_at(index);
if (isprint(c)) st->print("%c", c);
else st->print("\\%o", c);
}
if (n < len) st->print("...");
}
void symbolKlass::print(oop obj) {
assert_symbol(obj, "dispatch check");
std->print("'");
symbolOop(obj)->print_symbol_on();
std->print("' ");
}
oop symbolKlass::oop_shallow_copy(oop obj, bool tenured) {
assert_symbol(obj, "dispatch check");
return obj;
}<|fim▁end|> | }
bool symbolKlass::verify(oop obj) { |
<|file_name|>request.js<|end_file_name|><|fim▁begin|>function noReservedDays(date) {
var m = date.getMonth(), d = date.getDate(), y = date.getFullYear();
for (i = 0; i < reservedDays.length; i++) {
if ($.inArray((m + 1) + '-' + d + '-' + y, reservedDays) !== -1) {
return [false];
}
<|fim▁hole|>}
$("#input_from").datepicker({
dayNames: ["Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"],
dayNamesShort: ["Son", "Mon", "Din", "Mit", "Don", "Fra", "Sam"],
dayNamesMin: ["So", "Mo", "Di", "Mi", "Do", "Fr", "Sa"],
monthNames: ["Januar", "Februar", ";ärz", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"],
monthNamesShort: ["Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sept", "Okt", "Nov", "Dez"],
firstDay: 1,
dateFormat: "dd.mm.yy",
constrainInput: true,
beforeShowDay: noReservedDays,
minDate: 0,
onSelect: function(selected) {
$("#input_until").datepicker("option", "minDate", selected);
}
});
$("#input_until").datepicker({
dayNames: ["Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"],
dayNamesShort: ["Son", "Mon", "Din", "Mit", "Don", "Fra", "Sam"],
dayNamesMin: ["So", "Mo", "Di", "Mi", "Do", "Fr", "Sa"],
monthNames: ["Januar", "Februar", ";ärz", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"],
monthNamesShort: ["Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sept", "Okt", "Nov", "Dez"],
firstDay: 1,
dateFormat: "dd.mm.yy",
constrainInput: true,
beforeShowDay: noReservedDays,
minDate: 1,
onSelect: function(selected) {
$("#input_from").datepicker("option", "maxDate", selected);
}
});<|fim▁end|> | }
return [true];
|
<|file_name|>scrollbar.js<|end_file_name|><|fim▁begin|>// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import {createElementWithClassName} from 'chrome://resources/js/util.m.js';
/**
* Create by |LineChart.LineChart|.
* Create a dummy scrollbar to show the position of the line chart and to scroll
* the line chart, so we can draw the visible part of the line chart only
* instead of drawing the whole chart.
* @const
*/
export class Scrollbar {
constructor(/** function(): undefined */ callback) {
/** @const {function(): undefined} - Handle the scrolling event. */
this.callback_ = callback;
/** @type {number} - The range the scrollbar can scroll. */
this.range_ = 0;
/** @type {number} - The current position of the scrollbar. */
this.position_ = 0;
/** @type {number} - The real width of this scrollbar, in pixels. */
this.width_ = 0;
/** @type {Element} - The outer div to show the scrollbar. */
this.outerDiv_ =
createElementWithClassName('div', 'horizontal-scrollbar-outer');
this.outerDiv_.addEventListener('scroll', this.onScroll_.bind(this));
/** @type {Element} - The inner div to make outer div scrollable. */
this.innerDiv_ =
createElementWithClassName('div', 'horizontal-scrollbar-inner');
this.outerDiv_.appendChild(this.innerDiv_);
}
/**
* Scrolling event handler.
*/
onScroll_() {
const /** number */ newPosition = this.outerDiv_.scrollLeft;
if (newPosition == this.position_)
return;
this.position_ = newPosition;
this.callback_();
}
/** @return {Element} */
getRootDiv() {
return this.outerDiv_;
}
/**
* Return the height of scrollbar element.
* @return {number}
*/
getHeight() {
return this.outerDiv_.offsetHeight;
}
/** @return {number} */
getRange() {
return this.range_;
}
<|fim▁hole|> * Position may be float point number because |document.scrollLeft| may be
* float point number.
* @return {number}
*/
getPosition() {
return Math.round(this.position_);
}
/**
* Change the size of the outer div and update the scrollbar position.
* @param {number} width
*/
resize(width) {
if (this.width_ == width)
return;
this.width_ = width;
this.updateOuterDivWidth_();
}
updateOuterDivWidth_() {
this.constructor.setNodeWidth(this.outerDiv_, this.width_);
}
/**
* Set the scrollable range to |range|. Use the inner div's width to control
* the scrollable range. If position go out of range after range update, set
* it to the boundary value.
* @param {number} range
*/
setRange(range) {
this.range_ = range;
this.updateInnerDivWidth_();
if (range < this.position_) {
this.position_ = range;
this.updateScrollbarPosition_();
}
}
updateInnerDivWidth_() {
const width = this.outerDiv_.clientWidth;
this.constructor.setNodeWidth(this.innerDiv_, width + this.range_);
}
/**
* @param {Element} node
* @param {number} width
*/
static setNodeWidth(node, width) {
node.style.width = width + 'px';
}
/**
* Set the scrollbar position to |position|. If the new position go out of
* range, set it to the boundary value.
* @param {number} position
*/
setPosition(position) {
const /** number */ newPosition =
Math.max(0, Math.min(position, this.range_));
this.position_ = newPosition;
this.updateScrollbarPosition_();
}
/**
* Update the scrollbar position via Javascript scrollbar api. Position may
* not be the same value as what we assigned even if the value is in the
* range. See crbug.com/760425.
*/
updateScrollbarPosition_() {
if (this.outerDiv_.scrollLeft == this.position_)
return;
this.outerDiv_.scrollLeft = this.position_;
}
/**
* Return true if scrollbar is at the right edge of the chart.
* @return {boolean}
*/
isScrolledToRightEdge() {
/* |scrollLeft| may become a float point number even if we set it to some
* integer value. If the distance to the right edge less than 2 pixels, we
* consider that it is scrolled to the right edge.
*/
const scrollLeftErrorAmount = 2;
return this.position_ + scrollLeftErrorAmount > this.range_;
}
/**
* Scroll the scrollbar to the right edge.
*/
scrollToRightEdge() {
this.setPosition(this.range_);
}
}<|fim▁end|> | /** |
<|file_name|>test_index.py<|end_file_name|><|fim▁begin|>from fjord.base.tests import TestCase
from fjord.feedback.models import ResponseDocType
from fjord.feedback.tests import ResponseFactory
from fjord.search.index import chunked
from fjord.search.tests import ElasticTestCase
class ChunkedTests(TestCase):
def test_chunked(self):
# chunking nothing yields nothing.
assert list(chunked([], 1)) == []
# chunking list where len(list) < n
assert list(chunked([1], 10)) == [(1,)]
# chunking a list where len(list) == n
assert list(chunked([1, 2], 2)) == [(1, 2)]
# chunking list where len(list) > n
assert list(chunked([1, 2, 3, 4, 5], 2)) == [(1, 2), (3, 4), (5,)]
class TestLiveIndexing(ElasticTestCase):
def test_live_indexing(self):
search = ResponseDocType.docs.search()
count_pre = search.count()
s = ResponseFactory(happy=True, description='Test live indexing.')
self.refresh()
assert count_pre + 1 == search.count()
<|fim▁hole|> s.delete()
self.refresh()
assert count_pre == search.count()<|fim▁end|> | |
<|file_name|>comedycentral.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/t6d9sg/the-daily-show-20038-highlights/be3cwo',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:<|fim▁hole|> if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
})
return {
'_type': 'playlist',
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}<|fim▁end|> | dlNewest = not mobj.group('episode') |
<|file_name|>redis_view.py<|end_file_name|><|fim▁begin|>#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, socket, sys, time
import data_loader
from datetime import datetime
hubblemon_path = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(hubblemon_path)
import common.core
redis_preset = [['memory', 'memory_human', 'memory_lua', 'memory_rss'], 'mem_frag', ['cpu_user', 'cpu_sys', 'cpu_user_children', 'cpu_sys_children'],
'connections', (lambda x : x['keyspace_hits'] / (x['keyspace_hits'] + x['keyspace_misses']) * 100, 'hit ratio'), 'expired_keys', 'evicted_keys', 'cmds_processed',
['cmd_get', 'cmd_set', 'cmd_mget', 'cmd_mset'], ['cmd_del', 'cmd_expire', 'cmd_checkpoint'],
['cmd_linsert', 'cmd_lpush', 'cmd_lpop', 'cmd_llen'], ['cmd_lindex', 'cmd_lrange'],
['cmd_sadd', 'cmd_scard', 'cmd_set', 'cmd_srem'], ['cmd_sismember', 'cmd_smembers'],
['cmd_zadd', 'cmd_zcard', 'cmd_zrem'], ['cmd_zrange', 'cmd_zrank', 'cmd_zscore']]
def redis_view(path, title = ''):
return common.core.loader(path, redis_preset, title)
#
# chart list
#
redis_cloud_map = {}
last_ts = 0
def init_plugin():
print('#### redis init ########')
ret = get_chart_list({})
print(ret)
def get_chart_data(param):
#print(param)
global redis_cloud_map
type = 'redis_stat'
if 'type' in param:
type = param['type']
if 'instance' not in param or 'server' not in param:
return None
instance_name = param['instance']
server_name = param['server']
if type == 'redis_stat':
for node in redis_cloud_map[server_name]:
if node.startswith(instance_name):
results = common.core.loader(server_name + '/' + node, redis_preset, title=node)
break
return results
def get_chart_list(param):
#print(param)
global redis_cloud_map
global last_ts
ts = time.time()
if ts - last_ts >= 300:<|fim▁hole|> if len(instance_list) > 0:
redis_cloud_map_tmp[entity] = instance_list
redis_cloud_map = redis_cloud_map_tmp
last_ts = ts
if 'type' in param:
type = param['type']
return (['server', 'instance'], redis_cloud_map)<|fim▁end|> | redis_cloud_map_tmp = {}
entity_list = common.core.get_entity_list()
for entity in entity_list:
instance_list = common.core.get_table_list_of_entity(entity, 'redis_') |
<|file_name|>monarchDefinition.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
/**
* Create a syntax highighter with a fully declarative JSON style lexer description
* using regular expressions.
*/
import {TPromise} from 'vs/base/common/winjs.base';
import {AbstractMode} from 'vs/editor/common/modes/abstractMode';
import Supports = require('vs/editor/common/modes/supports');
import MonarchCommonTypes = require('vs/editor/common/modes/monarch/monarchCommon');
import EditorCommon = require('vs/editor/common/editorCommon');
import {IModelService} from 'vs/editor/common/services/modelService';
import Modes = require('vs/editor/common/modes');
import {IOnEnterSupportOptions} from 'vs/editor/common/modes/supports/onEnter';
export function createCommentsSupport(lexer: MonarchCommonTypes.ILexer): Supports.ICommentsSupportContribution {
return {
commentsConfiguration: {
lineCommentTokens: [lexer.lineComment],
blockCommentStartToken: lexer.blockCommentStart,
blockCommentEndToken: lexer.blockCommentEnd
}
};
}
export function createBracketElectricCharacterContribution(lexer: MonarchCommonTypes.ILexer): Supports.IBracketElectricCharacterContribution {
return {
brackets: lexer.standardBrackets,
regexBrackets: lexer.enhancedBrackets,
caseInsensitive: lexer.ignoreCase,
embeddedElectricCharacters: lexer.outdentTriggers.split('')
};
}
export function createTokenTypeClassificationSupportContribution(lexer: MonarchCommonTypes.ILexer): Supports.ITokenTypeClassificationSupportContribution {
return {
wordDefinition: lexer.wordDefinition
};
}
export function createCharacterPairContribution(lexer: MonarchCommonTypes.ILexer): Modes.ICharacterPairContribution {
return {
autoClosingPairs: lexer.autoClosingPairs
};
}
function _addSuggestionsAtPosition(model: EditorCommon.IModel, position:EditorCommon.IPosition, lexer: MonarchCommonTypes.ILexer, superSuggestions:Modes.ISuggestions[]): Modes.ISuggestions[] {
var extra = lexer.suggestSupport.snippets;
if (!extra || extra.length === 0) {
return superSuggestions;
}
if (!superSuggestions) {
superSuggestions = [];
}
superSuggestions.push({
currentWord: model.getWordUntilPosition(position).word,
suggestions: extra.slice(0)
});
return superSuggestions;
}
export function createOnEnterSupportOptions(lexer:MonarchCommonTypes.ILexer): IOnEnterSupportOptions {
return {
brackets: lexer.standardBrackets
};
}
export function createSuggestSupport(modelService: IModelService, mode:Modes.IMode, lexer:MonarchCommonTypes.ILexer): Supports.IComposableSuggestContribution {
if (lexer.suggestSupport.textualCompletions && mode instanceof AbstractMode) {
return {
triggerCharacters:lexer.suggestSupport.triggerCharacters,
disableAutoTrigger: lexer.suggestSupport.disableAutoTrigger,
excludeTokens: [],
suggest: (resource, position) => (<AbstractMode<any>>mode).suggest(resource, position),
composeSuggest: (resource, position, superSuggestions) => {
return TPromise.as(_addSuggestionsAtPosition(modelService.getModel(resource), position, lexer, superSuggestions));
}
};
} else {
return {
triggerCharacters:lexer.suggestSupport.triggerCharacters,
disableAutoTrigger: lexer.suggestSupport.disableAutoTrigger,
excludeTokens: [],
suggest: (resource, position) => {
return TPromise.as(_addSuggestionsAtPosition(modelService.getModel(resource), position, lexer, null));
},<|fim▁hole|> }
}<|fim▁end|> | composeSuggest: (resource, position, superSuggestions) => {
return TPromise.as(superSuggestions);
}
}; |
<|file_name|>stack.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
class Solution:
# @param {string} s
# @return {boolean}
def isValid(self, s):
slist=' '.join(s).split(' ')
print slist
stack=[]
for item in slist:
if item in ('[','{','('):
stack.append(item)
else:
if len(stack)==0:
return False
elif stack[-1:][0]==self.rev(item):
stack = stack[:-1]
else:
return False
if len(stack)==0:<|fim▁hole|> return True
else:
return False
def rev(self,item):
if item == ']':
return '['
elif item == '}':
return '{'
else:
return '('
s=Solution()
print s.isValid(']')<|fim▁end|> | |
<|file_name|>stream.go<|end_file_name|><|fim▁begin|>/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wsstream
import (
"encoding/base64"
"io"
"net/http"
"time"
"github.com/jordic/k8s/cloudsqlip/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util"
"golang.org/x/net/websocket"
)
// The WebSocket subprotocol "binary.k8s.io" will only send messages to the
// client and ignore messages sent to the server. The received messages are
// the exact bytes written to the stream. Zero byte messages are possible.
const binaryWebSocketProtocol = "binary.k8s.io"
// The WebSocket subprotocol "base64.binary.k8s.io" will only send messages to the
// client and ignore messages sent to the server. The received messages are
// a base64 version of the bytes written to the stream. Zero byte messages are
// possible.
const base64BinaryWebSocketProtocol = "base64.binary.k8s.io"
// Reader supports returning an arbitrary byte stream over a websocket channel.
// Supports the "binary.k8s.io" and "base64.binary.k8s.io" subprotocols.
type Reader struct {
err chan error
r io.Reader
ping bool
timeout time.Duration
}
// NewReader creates a WebSocket pipe that will copy the contents of r to a provided
// WebSocket connection. If ping is true, a zero length message will be sent to the client
// before the stream begins reading.
func NewReader(r io.Reader, ping bool) *Reader {
return &Reader{
r: r,
err: make(chan error),
ping: ping,
}
}
// SetIdleTimeout sets the interval for both reads and writes before timeout. If not specified,
// there is no timeout on the reader.
func (r *Reader) SetIdleTimeout(duration time.Duration) {
r.timeout = duration
}
func (r *Reader) handshake(config *websocket.Config, req *http.Request) error {
return handshake(config, req, []string{binaryWebSocketProtocol, base64BinaryWebSocketProtocol})
}
// Copy the reader to the response. The created WebSocket is closed after this
// method completes.
func (r *Reader) Copy(w http.ResponseWriter, req *http.Request) error {
go func() {
defer util.HandleCrash()
websocket.Server{Handshake: r.handshake, Handler: r.handle}.ServeHTTP(w, req)
}()
return <-r.err
}
// handle implements a WebSocket handler.
func (r *Reader) handle(ws *websocket.Conn) {
encode := len(ws.Config().Protocol) > 0 && ws.Config().Protocol[0] == base64BinaryWebSocketProtocol
defer close(r.err)
defer ws.Close()
go ignoreReceives(ws, r.timeout)
r.err <- messageCopy(ws, r.r, encode, r.ping, r.timeout)
}
func resetTimeout(ws *websocket.Conn, timeout time.Duration) {
if timeout > 0 {
ws.SetDeadline(time.Now().Add(timeout))
}
}
func messageCopy(ws *websocket.Conn, r io.Reader, base64Encode, ping bool, timeout time.Duration) error {
buf := make([]byte, 2048)
if ping {
resetTimeout(ws, timeout)
if err := websocket.Message.Send(ws, []byte{}); err != nil {
return err
}
}
for {
resetTimeout(ws, timeout)
n, err := r.Read(buf)
if err != nil {
if err == io.EOF {
return nil
}
return err<|fim▁hole|> }
if n > 0 {
if base64Encode {
if err := websocket.Message.Send(ws, base64.StdEncoding.EncodeToString(buf[:n])); err != nil {
return err
}
} else {
if err := websocket.Message.Send(ws, buf[:n]); err != nil {
return err
}
}
}
}
}<|fim▁end|> | |
<|file_name|>IOST_AboutDialog.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WAboutDialog/IOST_AboutDialog.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64<|fim▁hole|>from IOST_Config import *
import gtk
import gtk.glade
import gobject
#======================================================================
class IOST_AboutDialog():
def __init__(self, glade_filename, window_name, object_name ,main_builder=None):
"This is a function get of Diaglog Help -> About Window"
self.IOST_AboutDialog_WindowName = window_name
self.IOST_AboutDialog_ObjectName = object_name
if not main_builder:
self.IOST_AboutDialog_Builder = gtk.Builder()
self.IOST_AboutDialog_Builder.add_from_file(glade_filename)
self.IOST_AboutDialog_Builder.connect_signals(self)
else:
self.IOST_AboutDialog_Builder = main_builder
# self.IOST_Objs[window_name][window_name+ object_name] = self.IOST_AboutDialog_Builder.get_object(window_name+object_name)
# self.IOST_Objs[window_name][window_name+ object_name].set_version(self.IOST_Data["ProjectVersion"])
self.CreateObjsDictFromDict(self.IOST_AboutDialog_WindowName,
self.IOST_Objs[self.IOST_AboutDialog_WindowName],
self.IOST_AboutDialog_Builder,
0)
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_ObjectName].set_version(self.IOST_Data["ProjectVersion"])
def Run(self, window_name, object_name):
self.IOST_Objs[window_name][object_name].run()
self.IOST_Objs[window_name][object_name].hide()
def ActiveLink(self, object_name):
self.IOST_Objs[self.IOST_AboutDialog_WindowName][ self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogActionArea_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogVB_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_objectt_name].hide()<|fim▁end|> | import time
from IOST_Basic import * |
<|file_name|>TestAsin.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
package android.renderscript.cts;
import android.renderscript.Allocation;
import android.renderscript.RSRuntimeException;
import android.renderscript.Element;
public class TestAsin extends RSBaseCompute {
private ScriptC_TestAsin script;
private ScriptC_TestAsinRelaxed scriptRelaxed;
@Override
protected void setUp() throws Exception {
super.setUp();
script = new ScriptC_TestAsin(mRS);
scriptRelaxed = new ScriptC_TestAsinRelaxed(mRS);
}
public class ArgumentsFloatFloat {
public float inV;
public Target.Floaty out;
}
private void checkAsinFloatFloat() {
Allocation inV = createRandomFloatAllocation(mRS, Element.DataType.FLOAT_32, 1, 0x80b5674ff98b5a12l, -1, 1);
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 1), INPUTSIZE);
script.forEach_testAsinFloatFloat(inV, out);
verifyResultsAsinFloatFloat(inV, out, false);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloatFloat: " + e.toString());
}
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 1), INPUTSIZE);
scriptRelaxed.forEach_testAsinFloatFloat(inV, out);
verifyResultsAsinFloatFloat(inV, out, true);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloatFloat: " + e.toString());
}
}
private void verifyResultsAsinFloatFloat(Allocation inV, Allocation out, boolean relaxed) {
float[] arrayInV = new float[INPUTSIZE * 1];
inV.copyTo(arrayInV);
float[] arrayOut = new float[INPUTSIZE * 1];
out.copyTo(arrayOut);
for (int i = 0; i < INPUTSIZE; i++) {
for (int j = 0; j < 1 ; j++) {
// Extract the inputs.
ArgumentsFloatFloat args = new ArgumentsFloatFloat();
args.inV = arrayInV[i];
// Figure out what the outputs should have been.
Target target = new Target(relaxed);
CoreMathVerifier.computeAsin(args, target);
// Validate the outputs.
boolean valid = true;
if (!args.out.couldBe(arrayOut[i * 1 + j])) {
valid = false;
}
if (!valid) {
StringBuilder message = new StringBuilder();
message.append("Input inV: ");
message.append(String.format("%14.8g {%8x} %15a",
args.inV, Float.floatToRawIntBits(args.inV), args.inV));
message.append("\n");
message.append("Expected output out: ");
message.append(args.out.toString());
message.append("\n");
message.append("Actual output out: ");
message.append(String.format("%14.8g {%8x} %15a",
arrayOut[i * 1 + j], Float.floatToRawIntBits(arrayOut[i * 1 + j]), arrayOut[i * 1 + j]));
if (!args.out.couldBe(arrayOut[i * 1 + j])) {
message.append(" FAIL");
}
message.append("\n");
assertTrue("Incorrect output for checkAsinFloatFloat" +
(relaxed ? "_relaxed" : "") + ":\n" + message.toString(), valid);
}
}
}
}
private void checkAsinFloat2Float2() {
Allocation inV = createRandomFloatAllocation(mRS, Element.DataType.FLOAT_32, 2, 0x9e11e5e823f7cce6l, -1, 1);
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 2), INPUTSIZE);
script.forEach_testAsinFloat2Float2(inV, out);
verifyResultsAsinFloat2Float2(inV, out, false);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloat2Float2: " + e.toString());
}
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 2), INPUTSIZE);
scriptRelaxed.forEach_testAsinFloat2Float2(inV, out);
verifyResultsAsinFloat2Float2(inV, out, true);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloat2Float2: " + e.toString());
}
}
private void verifyResultsAsinFloat2Float2(Allocation inV, Allocation out, boolean relaxed) {
float[] arrayInV = new float[INPUTSIZE * 2];
inV.copyTo(arrayInV);
float[] arrayOut = new float[INPUTSIZE * 2];
out.copyTo(arrayOut);
for (int i = 0; i < INPUTSIZE; i++) {
for (int j = 0; j < 2 ; j++) {
// Extract the inputs.
ArgumentsFloatFloat args = new ArgumentsFloatFloat();
args.inV = arrayInV[i * 2 + j];
// Figure out what the outputs should have been.
Target target = new Target(relaxed);
CoreMathVerifier.computeAsin(args, target);
// Validate the outputs.
boolean valid = true;
if (!args.out.couldBe(arrayOut[i * 2 + j])) {
valid = false;
}
if (!valid) {
StringBuilder message = new StringBuilder();
message.append("Input inV: ");
message.append(String.format("%14.8g {%8x} %15a",
args.inV, Float.floatToRawIntBits(args.inV), args.inV));
message.append("\n");
message.append("Expected output out: ");
message.append(args.out.toString());
message.append("\n");
message.append("Actual output out: ");
message.append(String.format("%14.8g {%8x} %15a",
arrayOut[i * 2 + j], Float.floatToRawIntBits(arrayOut[i * 2 + j]), arrayOut[i * 2 + j]));
if (!args.out.couldBe(arrayOut[i * 2 + j])) {
message.append(" FAIL");
}
message.append("\n");
assertTrue("Incorrect output for checkAsinFloat2Float2" +
(relaxed ? "_relaxed" : "") + ":\n" + message.toString(), valid);
}
}
}
}
private void checkAsinFloat3Float3() {
Allocation inV = createRandomFloatAllocation(mRS, Element.DataType.FLOAT_32, 3, 0x9e13af031a12edc4l, -1, 1);
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 3), INPUTSIZE);
script.forEach_testAsinFloat3Float3(inV, out);
verifyResultsAsinFloat3Float3(inV, out, false);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloat3Float3: " + e.toString());
}
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 3), INPUTSIZE);
scriptRelaxed.forEach_testAsinFloat3Float3(inV, out);
verifyResultsAsinFloat3Float3(inV, out, true);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloat3Float3: " + e.toString());
}
}
private void verifyResultsAsinFloat3Float3(Allocation inV, Allocation out, boolean relaxed) {
float[] arrayInV = new float[INPUTSIZE * 4];
inV.copyTo(arrayInV);
float[] arrayOut = new float[INPUTSIZE * 4];
out.copyTo(arrayOut);
for (int i = 0; i < INPUTSIZE; i++) {
for (int j = 0; j < 3 ; j++) {
// Extract the inputs.
ArgumentsFloatFloat args = new ArgumentsFloatFloat();
args.inV = arrayInV[i * 4 + j];
// Figure out what the outputs should have been.
Target target = new Target(relaxed);
CoreMathVerifier.computeAsin(args, target);
// Validate the outputs.
boolean valid = true;
if (!args.out.couldBe(arrayOut[i * 4 + j])) {
valid = false;
}
if (!valid) {
StringBuilder message = new StringBuilder();
message.append("Input inV: ");
message.append(String.format("%14.8g {%8x} %15a",
args.inV, Float.floatToRawIntBits(args.inV), args.inV));
message.append("\n");
message.append("Expected output out: ");
message.append(args.out.toString());
message.append("\n");
message.append("Actual output out: ");
message.append(String.format("%14.8g {%8x} %15a",
arrayOut[i * 4 + j], Float.floatToRawIntBits(arrayOut[i * 4 + j]), arrayOut[i * 4 + j]));
if (!args.out.couldBe(arrayOut[i * 4 + j])) {
message.append(" FAIL");
}
message.append("\n");
assertTrue("Incorrect output for checkAsinFloat3Float3" +
(relaxed ? "_relaxed" : "") + ":\n" + message.toString(), valid);
}
}
}
}
private void checkAsinFloat4Float4() {
Allocation inV = createRandomFloatAllocation(mRS, Element.DataType.FLOAT_32, 4, 0x9e15781e102e0ea2l, -1, 1);
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 4), INPUTSIZE);
script.forEach_testAsinFloat4Float4(inV, out);
verifyResultsAsinFloat4Float4(inV, out, false);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloat4Float4: " + e.toString());
}
try {
Allocation out = Allocation.createSized(mRS, getElement(mRS, Element.DataType.FLOAT_32, 4), INPUTSIZE);
scriptRelaxed.forEach_testAsinFloat4Float4(inV, out);
verifyResultsAsinFloat4Float4(inV, out, true);
} catch (Exception e) {
throw new RSRuntimeException("RenderScript. Can't invoke forEach_testAsinFloat4Float4: " + e.toString());
}
}
private void verifyResultsAsinFloat4Float4(Allocation inV, Allocation out, boolean relaxed) {
float[] arrayInV = new float[INPUTSIZE * 4];
inV.copyTo(arrayInV);
float[] arrayOut = new float[INPUTSIZE * 4];
out.copyTo(arrayOut);
for (int i = 0; i < INPUTSIZE; i++) {
for (int j = 0; j < 4 ; j++) {
// Extract the inputs.
ArgumentsFloatFloat args = new ArgumentsFloatFloat();
args.inV = arrayInV[i * 4 + j];
// Figure out what the outputs should have been.
Target target = new Target(relaxed);
CoreMathVerifier.computeAsin(args, target);
// Validate the outputs.
boolean valid = true;
if (!args.out.couldBe(arrayOut[i * 4 + j])) {
valid = false;
}
if (!valid) {
StringBuilder message = new StringBuilder();
message.append("Input inV: ");
message.append(String.format("%14.8g {%8x} %15a",
args.inV, Float.floatToRawIntBits(args.inV), args.inV));
message.append("\n");
message.append("Expected output out: ");
message.append(args.out.toString());
message.append("\n");
message.append("Actual output out: ");
message.append(String.format("%14.8g {%8x} %15a",
arrayOut[i * 4 + j], Float.floatToRawIntBits(arrayOut[i * 4 + j]), arrayOut[i * 4 + j]));
if (!args.out.couldBe(arrayOut[i * 4 + j])) {
message.append(" FAIL");
}
message.append("\n");
assertTrue("Incorrect output for checkAsinFloat4Float4" +
(relaxed ? "_relaxed" : "") + ":\n" + message.toString(), valid);
}
}
}
}
public void testAsin() {
checkAsinFloatFloat();
checkAsinFloat2Float2();
checkAsinFloat3Float3();
checkAsinFloat4Float4();
}
}<|fim▁end|> | *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, |
<|file_name|>Integer.js<|end_file_name|><|fim▁begin|>define( 'type.Integer', {
// class configuration
alias : 'int integer',
extend : __lib__.type.Number,
// public properties
precision : 0,
// public methods<|fim▁hole|> valid : function( v ) {
return this.parent( v, true ) && Math.floor( v ) === v;
},
// internal methods
init : function() {
var max = this.max, min = this.min;
this.precision = 0;
this.parent( arguments );
// since we want our Types to be instantiated with as much correctness as possible,
// we don't want to cast our max/min as Integers
if ( max !== Number.POSITIVE_INFINITY )
this.max = max;
if ( min !== Number.NEGATIVE_INFINITY )
this.min = min;
},
value : function( v ) {
return Math.round( this.parent( arguments ) );
}
} );<|fim▁end|> | |
<|file_name|>android_calls.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for Android contacts2.db database events."""
from plaso.lib import eventdata
class AndroidCallFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for Android call history events."""
DATA_TYPE = 'android:event:call'
FORMAT_STRING_PIECES = [
u'{call_type}',
u'Number: {number}',
u'Name: {name}',
u'Duration: {duration} seconds']
FORMAT_STRING_SHORT_PIECES = [u'{call_type} Call']
SOURCE_LONG = 'Android Call History'<|fim▁hole|> SOURCE_SHORT = 'LOG'<|fim▁end|> | |
<|file_name|>main.ts<|end_file_name|><|fim▁begin|>import { enableProdMode } from "@angular/core";
import { platformBrowserDynamic } from "@angular/platform-browser-dynamic";
<|fim▁hole|>import { AppModule } from "./app/app.module";
import { environment } from "./environments/environment";
if (environment.production) {
enableProdMode();
}
platformBrowserDynamic().bootstrapModule(AppModule);<|fim▁end|> | |
<|file_name|>modelo_valid.py<|end_file_name|><|fim▁begin|>import numpy as np
import pandas as pd
import xarray as xr
from tikon.central import Módulo, SimulMódulo, Modelo, Exper, Parcela
from tikon.central.res import Resultado
from tikon.datos import Obs
from tikon.utils import EJE_TIEMPO, EJE_PARC<|fim▁hole|>crds = {'eje 1': ['a', 'b'], 'eje 2': ['x', 'y', 'z']}
class Res(Resultado):
def __init__(símismo, sim, coords, vars_interés):
coords = {**crds, **coords}
super().__init__(sim, coords, vars_interés)
nombre = 'res'
unids = None
class SimulMóduloValid(SimulMódulo):
resultados = [Res]
def incrementar(símismo, paso, f):
super().incrementar(paso, f)
símismo.poner_valor('res', 1, rel=True)
class MóduloValid(Módulo):
nombre = 'módulo'
cls_simul = SimulMóduloValid
class MiObs(Obs):
mód = 'módulo'
var = 'res'
obs = MiObs(
datos=xr.DataArray(
np.arange(10),
coords={EJE_TIEMPO: pd.date_range(f_inic, periods=10, freq='D')}, dims=[EJE_TIEMPO]
).expand_dims({EJE_PARC: ['parcela'], **crds})
)
exper = Exper('exper', Parcela('parcela'), obs=obs)
modelo = Modelo(MóduloValid)<|fim▁end|> |
f_inic = '2000-01-01'
|
<|file_name|>config.js<|end_file_name|><|fim▁begin|>'use strict';
/**
* Module dependencies.
*/
var debug = require('debug')('swara:config'),
path = require('path'),
_ = require('lodash'),
glob = require('glob');
/**
* Load app configurations
*/
module.exports = _.extend(
require('./env/all'),
require('./env/' + process.env.NODE_ENV) || {}
);
/**
* Get files by glob patterns
*/
module.exports.getGlobbedFiles = function (globPatterns, removeRoot) {
// For context switching
var _this = this;
// URL paths regex
var urlRegex = new RegExp('^(?:[a-z]+:)?\/\/', 'i');
// The output array
var output = [];
// If glob pattern is array so we use each pattern in a recursive way, otherwise we use glob<|fim▁hole|> output = _.union(output, _this.getGlobbedFiles(globPattern, removeRoot));
});
} else if (_.isString(globPatterns)) {
if (urlRegex.test(globPatterns)) {
output.push(globPatterns);
} else {
var publicRE = /^public\//;
if (publicRE.test(globPatterns)) {
globPatterns = __dirname + '/../' + globPatterns;
var newRoot = __dirname + '/../' + removeRoot;
removeRoot = path.normalize(newRoot);
}
var files = glob.sync(globPatterns);
if (removeRoot) {
files = files.map(function (file) {
return file.replace(removeRoot, '');
});
}
output = _.union(output, files);
}
}
debug('Returning with output: %j', output);
return output;
};
/**
* Get the modules JavaScript files
*/
module.exports.getJavaScriptAssets = function (includeTests) {
var output = this.getGlobbedFiles(this.assets.lib.js.concat(this.assets.js), 'public/');
// To include tests
if (includeTests) {
output = _.union(output, this.getGlobbedFiles(this.assets.tests));
}
debug('getJavaScriptAssets returning with: %j', output);
return output;
};
/**
* Get the modules CSS files
*/
module.exports.getCSSAssets = function () {
var output = this.getGlobbedFiles(this.assets.lib.css.concat(this.assets.css), 'public/');
debug('getCSSAssets returning with: %j', output);
return output;
};<|fim▁end|> | if (_.isArray(globPatterns)) {
globPatterns.forEach(function (globPattern) { |
<|file_name|>complex_number.cpp<|end_file_name|><|fim▁begin|>/*=============================================================================
Copyright (c) 2002-2015 Joel de Guzman
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
///////////////////////////////////////////////////////////////////////////////
//
// A complex number micro parser.
//
// [ JDG May 10, 2002 ] spirit1
// [ JDG May 9, 2007 ] spirit2
// [ JDG May 12, 2015 ] spirit X3
//
///////////////////////////////////////////////////////////////////////////////
#include <boost/config/warning_disable.hpp>
#include <boost/spirit/home/x3.hpp>
#include <iostream>
#include <string>
#include <complex>
///////////////////////////////////////////////////////////////////////////////
// Our complex number parser/compiler
///////////////////////////////////////////////////////////////////////////////
namespace client
{
template <typename Iterator>
bool parse_complex(Iterator first, Iterator last, std::complex<double>& c)
{
using boost::spirit::x3::double_;
using boost::spirit::x3::_attr;
using boost::spirit::x3::phrase_parse;
using boost::spirit::x3::ascii::space;
double rN = 0.0;
double iN = 0.0;
auto fr = [&](auto& ctx){ rN = _attr(ctx); };
auto fi = [&](auto& ctx){ iN = _attr(ctx); };
bool r = phrase_parse(first, last,
// Begin grammar
(
'(' >> double_[fr]
>> -(',' >> double_[fi]) >> ')'
| double_[fr]
),
// End grammar
space);
if (!r || first != last) // fail if we did not get a full match
return false;
c = std::complex<double>(rN, iN);
return r;
}
}
////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////
int
main()
{
std::cout << "/////////////////////////////////////////////////////////\n\n";
std::cout << "\t\tA complex number micro parser for Spirit...\n\n";
std::cout << "/////////////////////////////////////////////////////////\n\n";
std::cout << "Give me a complex number of the form r or (r) or (r,i) \n";
std::cout << "Type [q or Q] to quit\n\n";
std::string str;<|fim▁hole|> while (getline(std::cin, str))
{
if (str.empty() || str[0] == 'q' || str[0] == 'Q')
break;
std::complex<double> c;
if (client::parse_complex(str.begin(), str.end(), c))
{
std::cout << "-------------------------\n";
std::cout << "Parsing succeeded\n";
std::cout << "got: " << c << std::endl;
std::cout << "\n-------------------------\n";
}
else
{
std::cout << "-------------------------\n";
std::cout << "Parsing failed\n";
std::cout << "-------------------------\n";
}
}
std::cout << "Bye... :-) \n\n";
return 0;
}<|fim▁end|> | |
<|file_name|>sananmuunnos.py<|end_file_name|><|fim▁begin|>#!/usr/bin/envpython
#-*- coding: utf-8 -*-
"""
Sananmuunnos: Transforming Finnish spoonerisms made easy (and systematic).
"""
__author__ = "Tuukka Ojala"
__email__ = "[email protected]"
__version__ = "2015.0918"
__license__ = "MIT"
import re
#Regular expressions for detecting different types of sananmuunnoses
#Double vowel: the word begins with a consonant and continues with
#two identical vowels.
_double_vowel = re.compile(r"^[^aeiouyäö]?([aeiouyäö])\1")
#initial vowel: the word begins with a vowel and continues with a letter which
#is not the same as the previous one.
_initial_vowel = re.compile(r"^[aeiouyäö]")
#Initial consonant: The word begins with a consonant and continues with
#two non-identical vowels.
_initial_consonant = re.compile(r"^[^aeiouyäö]([aeiouyäö])[^\1]")
#Matches any vowel.
_vowel = re.compile(r"[aeiouyäö]")
"""The following 3 functions test a pair of words against the regular expressions above. If they match, the words are transformed accordingly. Otherwise the function returns false."""
def _is_double_vowel(word1, word2):
"""Test word1 and word2 against the "double vowel" rule."""
match = _double_vowel.search(word2)
if match:
vowel1 = _vowel.search(word1)
vowel2 = _vowel.search(word2)
initial1 = word1[:vowel1.start() +1] + word1[vowel1.start()]
initial2 = word2[:vowel2.start() +1]
transformed1 = initial2 +word1[vowel1.end():]
transformed2 = initial1 + word2[vowel2.end() +1:]
return (transformed1, transformed2)
else:
return False
def _is_initial_vowel(word1, word2):
"""Test word1 and word2 against the "initial vowel" rule."""
if _initial_vowel.search(word1):
transformed1 = word2[:2] +word1[1:]
transformed2 = word1[0] +word2[2:]
return (transformed1, transformed2)
else:
return False
def _is_initial_consonant(word1, word2):
"""Test word1 and word2 against the "initial consonant" rule."""
if _initial_consonant.search(word1):
transformed1 = word2[:2] +word1[2:]
transformed2 = word1[:2] +word2[2:]
return (transformed1, transformed2)
else:
return False
def _vowel_harmony(word):
"""Attempts to make the given word comply with Finnish vowel harmony.
If the first vowel of the word is a front vowel (a, o or u) all the vowels
get transformed to their equivalent back vowels (ä, ö, y) and vice versa."""
vowel = _vowel.search(word)
if vowel and word[vowel.start()] in ["a","o","u"]:
word = word.replace("ä", "a")
word = word.replace("ö", "o")
word = word.replace("y", "u")
elif vowel and word[vowel.start()] in ["y", "ä", "ö"]:
word = word.replace("u", "y")
word = word.replace("a", "ä")
word = word.replace("o", "ö")
return word
def _test(transformation, word1, word2):
"""Tries transforming word1 and word2 with the given transform function.
It tries swapping the words if the transformation fails.
This function returnsthe transformed words or false if
the transformation failed both ways."""
result = transformation(word1, word2)
if not result:
result = transformation(word2, word1)
if result:
return (result[1], result[0])
return result
def transform(words):
"""Make a sananmuunnos ("word transformation") out of the given words.
This function returns either the created sananmuunnos or None
if the transformation failed."""
transformed = None
words = words.lower()
words_list = []
try:
words_list = words.split(" ")
if len(words_list) < 2:
return None
word1 = words_list[0]
word2 = words_list[-1]
except ValueError:
return None
for transformation in _transformations:
transformed = _test(transformation, word1, word2)
if transformed:
break
word1, word2 = transformed
word1 = _vowel_harmony(word1)
word2 = _vowel_harmony(word2)
return " ".join((word1, " ".join(words_list[1:-1]), word2))<|fim▁hole|>#List of transformations used by the "transform" function.
_transformations = [_is_double_vowel, _is_initial_vowel, _is_initial_consonant]
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("Usage: {} word1 word2 [...]".format(sys.argv[0]))
else:
print(transform(" ".join(sys.argv[1:])))<|fim▁end|> | |
<|file_name|>egl.rs<|end_file_name|><|fim▁begin|>// Licensed under the Apache License, Version 2.0.
// This file may not be copied, modified, or distributed except according to those terms.
//! Based on https://github.com/tomaka/glutin/blob/1b2d62c0e9/src/api/egl/mod.rs
#![cfg(windows)]
#![allow(unused_variables)]
use glutin::ContextError;
use glutin::CreationError;
use glutin::GlAttributes;
use glutin::GlRequest;
use glutin::PixelFormat;
use glutin::PixelFormatRequirements;
use glutin::ReleaseBehavior;
use glutin::Robustness;
use glutin::Api;
use std::ffi::{CStr, CString};
use std::os::raw::c_int;
use std::ptr;
use std::cell::Cell;
use mozangle::egl::ffi as egl;
mod ffi {
pub use mozangle::egl::ffi as egl;
pub use mozangle::egl::ffi::*;
}
pub struct Context {
display: ffi::egl::types::EGLDisplay,
context: ffi::egl::types::EGLContext,
surface: Cell<ffi::egl::types::EGLSurface>,
api: Api,
pixel_format: PixelFormat,
}
impl Context {
/// Start building an EGL context.
///
/// This function initializes some things and chooses the pixel format.
///
/// To finish the process, you must call `.finish(window)` on the `ContextPrototype`.
pub fn new<'a>(
pf_reqs: &PixelFormatRequirements,
opengl: &'a GlAttributes<&'a Context>,
) -> Result<ContextPrototype<'a>, CreationError>
{
if opengl.sharing.is_some() {
unimplemented!()
}
// calling `eglGetDisplay` or equivalent
let display = unsafe { egl::GetDisplay(ptr::null_mut()) };
if display.is_null() {
return Err(CreationError::PlatformSpecific("Could not create EGL display object".to_string()));
}
let egl_version = unsafe {
let mut major: ffi::egl::types::EGLint = 0; // out param
let mut minor: ffi::egl::types::EGLint = 0; // out param
if egl::Initialize(display, &mut major, &mut minor) == 0 {
return Err(CreationError::OsError(format!("eglInitialize failed")))
}
(major, minor)
};
// the list of extensions supported by the client once initialized is different from the
// list of extensions obtained earlier
let extensions = if egl_version >= (1, 2) {
let p = unsafe { CStr::from_ptr(egl::QueryString(display, ffi::egl::EXTENSIONS as i32)) };
let list = String::from_utf8(p.to_bytes().to_vec()).unwrap_or_else(|_| format!(""));
list.split(' ').map(|e| e.to_string()).collect::<Vec<_>>()
} else {
vec![]
};
// binding the right API and choosing the version
let (version, api) = unsafe {
match opengl.version {
GlRequest::Latest => {
if egl_version >= (1, 4) {
if egl::BindAPI(ffi::egl::OPENGL_API) != 0 {
(None, Api::OpenGl)
} else if egl::BindAPI(ffi::egl::OPENGL_ES_API) != 0 {
(None, Api::OpenGlEs)
} else {
return Err(CreationError::OpenGlVersionNotSupported);
}
} else {
(None, Api::OpenGlEs)
}
},
GlRequest::Specific(Api::OpenGlEs, version) => {
if egl_version >= (1, 2) {
if egl::BindAPI(ffi::egl::OPENGL_ES_API) == 0 {
return Err(CreationError::OpenGlVersionNotSupported);
}
}
(Some(version), Api::OpenGlEs)
},
GlRequest::Specific(Api::OpenGl, version) => {
if egl_version < (1, 4) {
return Err(CreationError::OpenGlVersionNotSupported);
}
if egl::BindAPI(ffi::egl::OPENGL_API) == 0 {
return Err(CreationError::OpenGlVersionNotSupported);
}
(Some(version), Api::OpenGl)
},
GlRequest::Specific(_, _) => return Err(CreationError::OpenGlVersionNotSupported),
GlRequest::GlThenGles { opengles_version, opengl_version } => {
if egl_version >= (1, 4) {
if egl::BindAPI(ffi::egl::OPENGL_API) != 0 {
(Some(opengl_version), Api::OpenGl)
} else if egl::BindAPI(ffi::egl::OPENGL_ES_API) != 0 {
(Some(opengles_version), Api::OpenGlEs)
} else {
return Err(CreationError::OpenGlVersionNotSupported);
}
} else {
(Some(opengles_version), Api::OpenGlEs)
}
},
}
};
let (config_id, pixel_format) = unsafe {
choose_fbconfig(display, &egl_version, api, version, pf_reqs)?
};
Ok(ContextPrototype {
opengl: opengl,
display: display,
egl_version: egl_version,
extensions: extensions,
api: api,
version: version,
config_id: config_id,
pixel_format: pixel_format,
})
}
#[inline]
pub fn swap_buffers(&self) -> Result<(), ContextError> {
if self.surface.get() == ffi::egl::NO_SURFACE {
return Err(ContextError::ContextLost);
}
let ret = unsafe {
egl::SwapBuffers(self.display, self.surface.get())
};
if ret == 0 {
match unsafe { egl::GetError() } as u32 {
ffi::egl::CONTEXT_LOST => return Err(ContextError::ContextLost),
err => panic!("eglSwapBuffers failed (eglGetError returned 0x{:x})", err)
}
} else {
Ok(())
}
}
pub unsafe fn make_current(&self) -> Result<(), ContextError> {
let ret = egl::MakeCurrent(self.display, self.surface.get(), self.surface.get(), self.context);
if ret == 0 {
match egl::GetError() as u32 {
ffi::egl::CONTEXT_LOST => return Err(ContextError::ContextLost),
err => panic!("eglMakeCurrent failed (eglGetError returned 0x{:x})", err)
}
} else {
Ok(())
}
}
#[inline]
pub fn is_current(&self) -> bool {
unsafe { egl::GetCurrentContext() == self.context }
}
pub fn get_proc_address(&self, addr: &str) -> *const () {
let addr = CString::new(addr.as_bytes()).unwrap();
let addr = addr.as_ptr();
unsafe {
egl::GetProcAddress(addr) as *const _
}
}
#[inline]
pub fn get_api(&self) -> Api {
self.api
}
#[inline]
pub fn get_pixel_format(&self) -> PixelFormat {
self.pixel_format.clone()
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
// we don't call MakeCurrent(0, 0) because we are not sure that the context
// is still the current one
egl::DestroyContext(self.display, self.context);
egl::DestroySurface(self.display, self.surface.get());
egl::Terminate(self.display);
}
}
}
pub struct ContextPrototype<'a> {
opengl: &'a GlAttributes<&'a Context>,
display: ffi::egl::types::EGLDisplay,
egl_version: (ffi::egl::types::EGLint, ffi::egl::types::EGLint),
extensions: Vec<String>,
api: Api,
version: Option<(u8, u8)>,
config_id: ffi::egl::types::EGLConfig,
pixel_format: PixelFormat,
}
impl<'a> ContextPrototype<'a> {
pub fn get_native_visual_id(&self) -> ffi::egl::types::EGLint {
let mut value = 0;
let ret = unsafe { egl::GetConfigAttrib(self.display, self.config_id,
ffi::egl::NATIVE_VISUAL_ID
as ffi::egl::types::EGLint, &mut value) };
if ret == 0 { panic!("eglGetConfigAttrib failed") };
value
}
pub fn finish(self, native_window: ffi::EGLNativeWindowType)
-> Result<Context, CreationError>
{
let surface = unsafe {
let surface = egl::CreateWindowSurface(self.display, self.config_id, native_window,
ptr::null());
if surface.is_null() {
return Err(CreationError::OsError(format!("eglCreateWindowSurface failed")))
}
surface
};
self.finish_impl(surface)
}
pub fn finish_pbuffer(self, dimensions: (u32, u32)) -> Result<Context, CreationError> {
let attrs = &[
ffi::egl::WIDTH as c_int, dimensions.0 as c_int,
ffi::egl::HEIGHT as c_int, dimensions.1 as c_int,
ffi::egl::NONE as c_int,
];
let surface = unsafe {
let surface = egl::CreatePbufferSurface(self.display, self.config_id,
attrs.as_ptr());
if surface.is_null() {
return Err(CreationError::OsError(format!("eglCreatePbufferSurface failed")))
}
surface
};
self.finish_impl(surface)
}
fn finish_impl(self, surface: ffi::egl::types::EGLSurface)
-> Result<Context, CreationError>
{
let context = unsafe {
if let Some(version) = self.version {
create_context(self.display, &self.egl_version,
&self.extensions, self.api, version, self.config_id,
self.opengl.debug, self.opengl.robustness)?
} else if self.api == Api::OpenGlEs {
if let Ok(ctxt) = create_context(self.display, &self.egl_version,
&self.extensions, self.api, (2, 0), self.config_id,
self.opengl.debug, self.opengl.robustness)
{
ctxt
} else if let Ok(ctxt) = create_context(self.display, &self.egl_version,
&self.extensions, self.api, (1, 0),
self.config_id, self.opengl.debug,
self.opengl.robustness)
{
ctxt
} else {
return Err(CreationError::OpenGlVersionNotSupported);
}
} else {
if let Ok(ctxt) = create_context(self.display, &self.egl_version,
&self.extensions, self.api, (3, 2), self.config_id,
self.opengl.debug, self.opengl.robustness)
{
ctxt
} else if let Ok(ctxt) = create_context(self.display, &self.egl_version,
&self.extensions, self.api, (3, 1),
self.config_id, self.opengl.debug,
self.opengl.robustness)
{
ctxt
} else if let Ok(ctxt) = create_context(self.display, &self.egl_version,
&self.extensions, self.api, (1, 0),
self.config_id, self.opengl.debug,
self.opengl.robustness)
{
ctxt
} else {
return Err(CreationError::OpenGlVersionNotSupported);
}
}
};
Ok(Context {
display: self.display,
context: context,
surface: Cell::new(surface),
api: self.api,
pixel_format: self.pixel_format,
})
}
}
unsafe fn choose_fbconfig(display: ffi::egl::types::EGLDisplay,
egl_version: &(ffi::egl::types::EGLint, ffi::egl::types::EGLint),
api: Api, version: Option<(u8, u8)>, reqs: &PixelFormatRequirements)
-> Result<(ffi::egl::types::EGLConfig, PixelFormat), CreationError>
{
let descriptor = {
let mut out: Vec<c_int> = Vec::with_capacity(37);
if egl_version >= &(1, 2) {
out.push(ffi::egl::COLOR_BUFFER_TYPE as c_int);
out.push(ffi::egl::RGB_BUFFER as c_int);
}
out.push(ffi::egl::SURFACE_TYPE as c_int);
// TODO: Some versions of Mesa report a BAD_ATTRIBUTE error
// if we ask for PBUFFER_BIT as well as WINDOW_BIT
out.push((ffi::egl::WINDOW_BIT) as c_int);
match (api, version) {
(Api::OpenGlEs, Some((3, _))) => {
if egl_version < &(1, 3) { return Err(CreationError::NoAvailablePixelFormat); }
out.push(ffi::egl::RENDERABLE_TYPE as c_int);
out.push(ffi::egl::OPENGL_ES3_BIT as c_int);
out.push(ffi::egl::CONFORMANT as c_int);
out.push(ffi::egl::OPENGL_ES3_BIT as c_int);
},
(Api::OpenGlEs, Some((2, _))) => {
if egl_version < &(1, 3) { return Err(CreationError::NoAvailablePixelFormat); }
out.push(ffi::egl::RENDERABLE_TYPE as c_int);
out.push(ffi::egl::OPENGL_ES2_BIT as c_int);
out.push(ffi::egl::CONFORMANT as c_int);
out.push(ffi::egl::OPENGL_ES2_BIT as c_int);
},
(Api::OpenGlEs, Some((1, _))) => {
if egl_version >= &(1, 3) {
out.push(ffi::egl::RENDERABLE_TYPE as c_int);
out.push(ffi::egl::OPENGL_ES_BIT as c_int);
out.push(ffi::egl::CONFORMANT as c_int);
out.push(ffi::egl::OPENGL_ES_BIT as c_int);
}
},
(Api::OpenGlEs, _) => unimplemented!(),
(Api::OpenGl, _) => {
if egl_version < &(1, 3) { return Err(CreationError::NoAvailablePixelFormat); }
out.push(ffi::egl::RENDERABLE_TYPE as c_int);
out.push(ffi::egl::OPENGL_BIT as c_int);
out.push(ffi::egl::CONFORMANT as c_int);
out.push(ffi::egl::OPENGL_BIT as c_int);
},
(_, _) => unimplemented!(),
};
if let Some(hardware_accelerated) = reqs.hardware_accelerated {
out.push(ffi::egl::CONFIG_CAVEAT as c_int);
out.push(if hardware_accelerated {
ffi::egl::NONE as c_int
} else {
ffi::egl::SLOW_CONFIG as c_int
});
}
if let Some(color) = reqs.color_bits {
out.push(ffi::egl::RED_SIZE as c_int);
out.push((color / 3) as c_int);
out.push(ffi::egl::GREEN_SIZE as c_int);
out.push((color / 3 + if color % 3 != 0 { 1 } else { 0 }) as c_int);
out.push(ffi::egl::BLUE_SIZE as c_int);
out.push((color / 3 + if color % 3 == 2 { 1 } else { 0 }) as c_int);
}
if let Some(alpha) = reqs.alpha_bits {
out.push(ffi::egl::ALPHA_SIZE as c_int);
out.push(alpha as c_int);
}
if let Some(depth) = reqs.depth_bits {
out.push(ffi::egl::DEPTH_SIZE as c_int);
out.push(depth as c_int);
}
if let Some(stencil) = reqs.stencil_bits {
out.push(ffi::egl::STENCIL_SIZE as c_int);
out.push(stencil as c_int);
}
if let Some(true) = reqs.double_buffer {
return Err(CreationError::NoAvailablePixelFormat);
}
if let Some(multisampling) = reqs.multisampling {
out.push(ffi::egl::SAMPLES as c_int);
out.push(multisampling as c_int);
}
if reqs.stereoscopy {
return Err(CreationError::NoAvailablePixelFormat);
}
// FIXME: srgb is not taken into account
match reqs.release_behavior {
ReleaseBehavior::Flush => (),
ReleaseBehavior::None => {
// TODO: with EGL you need to manually set the behavior
unimplemented!()
},
}
out.push(ffi::egl::NONE as c_int);
out
};
// calling `eglChooseConfig`
let mut config_id = ptr::null(); // out param
let mut num_configs = 0; // out param
if egl::ChooseConfig(display, descriptor.as_ptr(), &mut config_id, 1, &mut num_configs) == 0 {
return Err(CreationError::OsError(format!("eglChooseConfig failed")));
}
if num_configs == 0 {
return Err(CreationError::NoAvailablePixelFormat);
}
// analyzing each config
macro_rules! attrib {
($display:expr, $config:expr, $attr:expr) => (
{
let mut value = 0; // out param
let res = egl::GetConfigAttrib($display, $config,
$attr as ffi::egl::types::EGLint, &mut value);
if res == 0 {
return Err(CreationError::OsError(format!("eglGetConfigAttrib failed")));
}
value
}
)
}
let desc = PixelFormat {
hardware_accelerated: attrib!(display, config_id, ffi::egl::CONFIG_CAVEAT)
!= ffi::egl::SLOW_CONFIG as i32,
color_bits: attrib!(display, config_id, ffi::egl::RED_SIZE) as u8 +
attrib!(display, config_id, ffi::egl::BLUE_SIZE) as u8 +
attrib!(display, config_id, ffi::egl::GREEN_SIZE) as u8,
alpha_bits: attrib!(display, config_id, ffi::egl::ALPHA_SIZE) as u8,
depth_bits: attrib!(display, config_id, ffi::egl::DEPTH_SIZE) as u8,
stencil_bits: attrib!(display, config_id, ffi::egl::STENCIL_SIZE) as u8,
stereoscopy: false,
double_buffer: true,
multisampling: match attrib!(display, config_id, ffi::egl::SAMPLES) {
0 | 1 => None,
a => Some(a as u16),
},
srgb: false, // TODO: use EGL_KHR_gl_colorspace to know that
};
Ok((config_id, desc))
}
unsafe fn create_context(display: ffi::egl::types::EGLDisplay,
egl_version: &(ffi::egl::types::EGLint, ffi::egl::types::EGLint),
extensions: &[String], api: Api, version: (u8, u8),
config_id: ffi::egl::types::EGLConfig, gl_debug: bool,
gl_robustness: Robustness)
-> Result<ffi::egl::types::EGLContext, CreationError>
{
let mut context_attributes = Vec::with_capacity(10);
let mut flags = 0;
if egl_version >= &(1, 5) || extensions.iter().find(|s| s == &"EGL_KHR_create_context")
.is_some()
{
context_attributes.push(ffi::egl::CONTEXT_MAJOR_VERSION as i32);
context_attributes.push(version.0 as i32);
context_attributes.push(ffi::egl::CONTEXT_MINOR_VERSION as i32);
context_attributes.push(version.1 as i32);<|fim▁hole|>
// handling robustness
let supports_robustness = egl_version >= &(1, 5) ||
extensions.iter()
.find(|s| s == &"EGL_EXT_create_context_robustness")
.is_some();
match gl_robustness {
Robustness::NotRobust => (),
Robustness::NoError => {
if extensions.iter().find(|s| s == &"EGL_KHR_create_context_no_error").is_some() {
context_attributes.push(ffi::egl::CONTEXT_OPENGL_NO_ERROR_KHR as c_int);
context_attributes.push(1);
}
},
Robustness::RobustNoResetNotification => {
if supports_robustness {
context_attributes.push(ffi::egl::CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY
as c_int);
context_attributes.push(ffi::egl::NO_RESET_NOTIFICATION as c_int);
flags = flags | ffi::egl::CONTEXT_OPENGL_ROBUST_ACCESS as c_int;
} else {
return Err(CreationError::RobustnessNotSupported);
}
},
Robustness::TryRobustNoResetNotification => {
if supports_robustness {
context_attributes.push(ffi::egl::CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY
as c_int);
context_attributes.push(ffi::egl::NO_RESET_NOTIFICATION as c_int);
flags = flags | ffi::egl::CONTEXT_OPENGL_ROBUST_ACCESS as c_int;
}
},
Robustness::RobustLoseContextOnReset => {
if supports_robustness {
context_attributes.push(ffi::egl::CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY
as c_int);
context_attributes.push(ffi::egl::LOSE_CONTEXT_ON_RESET as c_int);
flags = flags | ffi::egl::CONTEXT_OPENGL_ROBUST_ACCESS as c_int;
} else {
return Err(CreationError::RobustnessNotSupported);
}
},
Robustness::TryRobustLoseContextOnReset => {
if supports_robustness {
context_attributes.push(ffi::egl::CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY
as c_int);
context_attributes.push(ffi::egl::LOSE_CONTEXT_ON_RESET as c_int);
flags = flags | ffi::egl::CONTEXT_OPENGL_ROBUST_ACCESS as c_int;
}
},
}
if gl_debug {
if egl_version >= &(1, 5) {
context_attributes.push(ffi::egl::CONTEXT_OPENGL_DEBUG as i32);
context_attributes.push(ffi::egl::TRUE as i32);
}
// TODO: using this flag sometimes generates an error
// there was a change in the specs that added this flag, so it may not be
// supported everywhere ; however it is not possible to know whether it is
// supported or not
//flags = flags | ffi::egl::CONTEXT_OPENGL_DEBUG_BIT_KHR as i32;
}
context_attributes.push(ffi::egl::CONTEXT_FLAGS_KHR as i32);
context_attributes.push(flags);
} else if egl_version >= &(1, 3) && api == Api::OpenGlEs {
// robustness is not supported
match gl_robustness {
Robustness::RobustNoResetNotification | Robustness::RobustLoseContextOnReset => {
return Err(CreationError::RobustnessNotSupported);
},
_ => ()
}
context_attributes.push(ffi::egl::CONTEXT_CLIENT_VERSION as i32);
context_attributes.push(version.0 as i32);
}
context_attributes.push(ffi::egl::NONE as i32);
let context = egl::CreateContext(display, config_id, ptr::null(),
context_attributes.as_ptr());
if context.is_null() {
match egl::GetError() as u32 {
ffi::egl::BAD_ATTRIBUTE => return Err(CreationError::OpenGlVersionNotSupported),
e => panic!("eglCreateContext failed: 0x{:x}", e),
}
}
Ok(context)
}<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns,url
from main import views
urlpatterns = patterns('',
url(r'^$',views.index,name='index'),
url(r'^tags/$',views.tags,name='tags'),<|fim▁hole|><|fim▁end|> | url(r'^tags/(?P<tag_name>\w+)/$',views.tag,name='tag'),
url(r'^add_link/$',views.add_link,name='add_link'),
) |
<|file_name|>adminsite.py<|end_file_name|><|fim▁begin|># Copyright 2015 Rémy Lapeyrade <remy at lapeyrade dot net>
# Copyright 2015 LAAS-CNRS
#
#
# This file is part of TouSIX-Manager.
#
# TouSIX-Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TouSIX-Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TouSIX-Manager. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.admin import AdminSite
class TouSIXAdmin(AdminSite):
"""<|fim▁hole|> site_title = "TouIX"
index_template = "index_touSIX.html"
admin_tousix = TouSIXAdmin(name='Administration')<|fim▁end|> | Special admin site, created for display widgets in the main panel.
"""
site_header = "TouIX - Administration de TouSIX" |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from zerver.lib.test_classes import WebhookTestCase
class PagerDutyHookTests(WebhookTestCase):
STREAM_NAME = 'pagerduty'
URL_TEMPLATE = "/api/v1/external/pagerduty?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'pagerduty'
def test_trigger(self) -> None:
expected_message = 'Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) triggered by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) (assigned to [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ)):\n\n``` quote\nfoo\n```'
self.send_and_test_stream_message('trigger', "Incident 3", expected_message)
def test_trigger_v2(self) -> None:
expected_message = 'Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) triggered by [Production XDB Cluster](https://webdemo.pagerduty.com/services/PN49J75) (assigned to [Laura Haley](https://webdemo.pagerduty.com/users/P553OPV)):\n\n``` quote\nMy new incident\n```'
self.send_and_test_stream_message('trigger_v2', 'Incident 33', expected_message)
def test_trigger_without_assignee_v2(self) -> None:
expected_message = 'Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) triggered by [Production XDB Cluster](https://webdemo.pagerduty.com/services/PN49J75) (assigned to nobody):\n\n``` quote\nMy new incident\n```'<|fim▁hole|>
def test_unacknowledge(self) -> None:
expected_message = 'Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) unacknowledged by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) (assigned to [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ)):\n\n``` quote\nfoo\n```'
self.send_and_test_stream_message('unacknowledge', "Incident 3", expected_message)
def test_resolved(self) -> None:
expected_message = 'Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) resolved by [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ):\n\n``` quote\nIt is on fire\n```'
self.send_and_test_stream_message('resolved', "Incident 1", expected_message)
def test_resolved_v2(self) -> None:
expected_message = 'Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) resolved by [Laura Haley](https://webdemo.pagerduty.com/users/P553OPV):\n\n``` quote\nMy new incident\n```'
self.send_and_test_stream_message('resolve_v2', 'Incident 33', expected_message)
def test_auto_resolved(self) -> None:
expected_message = 'Incident [2](https://zulip-test.pagerduty.com/incidents/PX7K9J2) resolved:\n\n``` quote\nnew\n```'
self.send_and_test_stream_message('auto_resolved', "Incident 2", expected_message)
def test_acknowledge(self) -> None:
expected_message = 'Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ):\n\n``` quote\nIt is on fire\n```'
self.send_and_test_stream_message('acknowledge', "Incident 1", expected_message)
def test_acknowledge_without_trigger_summary_data(self) -> None:
expected_message = 'Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ):\n\n``` quote\n\n```'
self.send_and_test_stream_message('acknowledge_without_trigger_summary_data',
"Incident 1", expected_message)
def test_acknowledge_v2(self) -> None:
expected_message = 'Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) acknowledged by [Laura Haley](https://webdemo.pagerduty.com/users/P553OPV):\n\n``` quote\nMy new incident\n```'
self.send_and_test_stream_message('acknowledge_v2', 'Incident 33', expected_message)
def test_incident_assigned_v2(self) -> None:
expected_message = 'Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) assigned to [Wiley Jacobson](https://webdemo.pagerduty.com/users/PFBSJ2Z):\n\n``` quote\nMy new incident\n```'
self.send_and_test_stream_message('assign_v2', 'Incident 33', expected_message)
def test_no_subject(self) -> None:
expected_message = 'Incident [48219](https://dropbox.pagerduty.com/incidents/PJKGZF9) resolved:\n\n``` quote\nmp_error_block_down_critical\u2119\u01b4\n```'
self.send_and_test_stream_message('mp_fail', "Incident 48219", expected_message)<|fim▁end|> | self.send_and_test_stream_message('trigger_without_assignee_v2', 'Incident 33', expected_message) |
<|file_name|>kingdf.py<|end_file_name|><|fim▁begin|># Class that represents a King DF
import numpy
from scipy import special, integrate, interpolate
from ..util import conversion
from .df import df
from .sphericaldf import isotropicsphericaldf
_FOURPI= 4.*numpy.pi
_TWOOVERSQRTPI= 2./numpy.sqrt(numpy.pi)
class kingdf(isotropicsphericaldf):
"""Class that represents a King DF"""
def __init__(self,W0,M=1.,rt=1.,npt=1001,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a King DF
INPUT:
W0 - dimensionless central potential W0 = Psi(0)/sigma^2 (in practice, needs to be <~ 200, where the DF is essentially isothermal)
M= (1.) total mass (can be a Quantity)
rt= (1.) tidal radius (can be a Quantity)
npt= (1001) number of points to use to solve for Psi(r)
ro=, vo= standard galpy unit scaling parameters
OUTPUT:
(none; sets up instance)
HISTORY:
2020-07-09 - Written - Bovy (UofT)
"""
# Just run df init to set up unit-conversion parameters
df.__init__(self,ro=ro,vo=vo)
self.W0= W0
self.M= conversion.parse_mass(M,ro=self._ro,vo=self._vo)
self.rt= conversion.parse_length(rt,ro=self._ro)
# Solve (mass,rtidal)-scale-free model, which is the basis for
# the full solution
self._scalefree_kdf= _scalefreekingdf(self.W0)
self._scalefree_kdf.solve(npt)
# Set up scaling factors
self._radius_scale= self.rt/self._scalefree_kdf.rt
self._mass_scale= self.M/self._scalefree_kdf.mass
self._velocity_scale= numpy.sqrt(self._mass_scale/self._radius_scale)
self._density_scale= self._mass_scale/self._radius_scale**3.<|fim▁hole|> self.sigma= self._velocity_scale
self._sigma2= self.sigma**2.
self.rho1= self._density_scale
# Setup the potential, use original params in case they had units
# because then the initialization will turn on units for this object
from ..potential import KingPotential
pot= KingPotential(W0=self.W0,M=M,rt=rt,_sfkdf=self._scalefree_kdf,
ro=ro,vo=vo)
# Now initialize the isotropic DF
isotropicsphericaldf.__init__(self,pot=pot,scale=self.r0,
rmax=self.rt,ro=ro,vo=vo)
self._potInf= self._pot(self.rt,0.,use_physical=False)
# Setup inverse cumulative mass function for radius sampling
self._icmf= interpolate.InterpolatedUnivariateSpline(\
self._mass_scale*self._scalefree_kdf._cumul_mass/self.M,
self._radius_scale*self._scalefree_kdf._r,
k=3)
# Setup velocity DF interpolator for velocity sampling here
self._rmin_sampling= 0.
self._v_vesc_pvr_interpolator= self._make_pvr_interpolator(\
r_a_end=numpy.log10(self.rt/self._scale))
def dens(self,r):
return self._scalefree_kdf.dens(r/self._radius_scale)\
*self._density_scale
def fE(self,E):
out= numpy.zeros(numpy.atleast_1d(E).shape)
varE= self._potInf-E
if numpy.sum(varE > 0.) > 0:
out[varE > 0.]= (numpy.exp(varE[varE > 0.]/self._sigma2)-1.)\
*(2.*numpy.pi*self._sigma2)**-1.5*self.rho1
return out# mass density, not /self.M as for number density
class _scalefreekingdf(object):
"""Internal helper class to solve the scale-free King DF model, that is, the one that only depends on W = Psi/sigma^2"""
def __init__(self,W0):
self.W0= W0
def solve(self,npt=1001):
"""Solve the model W(r) at npt points (note: not equally spaced in
either r or W, because combination of two ODEs for different r ranges)"""
# Set up arrays for outputs
r= numpy.zeros(npt)
W= numpy.zeros(npt)
dWdr= numpy.zeros(npt)
# Initialize (r[0]=0 already)
W[0]= self.W0
# Determine central density and r0
self.rho0= self._dens_W(self.W0)
self.r0= numpy.sqrt(9./4./numpy.pi/self.rho0)
# First solve Poisson equation ODE from r=0 to r0 using form
# d^2 Psi / dr^2 = ... (d psi / dr = v, r^2 dv / dr = RHS-2*r*v)
if self.W0 < 2.:
rbreak= self.r0/100.
else:
rbreak= self.r0
#Using linspace focuses on what happens ~rbreak rather than on <<rbreak
# which is what you want, because W ~ constant at r <~ r0
r[:npt//2]= numpy.linspace(0.,rbreak,npt//2)
sol= integrate.solve_ivp(\
lambda t,y: [y[1],-_FOURPI*self._dens_W(y[0])
-(2.*y[1]/t if t > 0. else 0.)],
[0.,rbreak],[self.W0,0.],method='LSODA',t_eval=r[:npt//2])
W[:npt//2]= sol.y[0]
dWdr[:npt//2]= sol.y[1]
# Then solve Poisson equation ODE from Psi(r0) to Psi=0 using form
# d^2 r / d Psi^2 = ... (d r / d psi = 1/v, dv / dpsi = 1/v(RHS-2*r*v))
# Added advantage that this becomes ~log-spaced in r, which is what
# you want
W[npt//2-1:]= numpy.linspace(sol.y[0,-1],0.,npt-npt//2+1)
sol= integrate.solve_ivp(\
lambda t,y: [1./y[1],
-1./y[1]*(_FOURPI*self._dens_W(t)
+2.*y[1]/y[0])],
[sol.y[0,-1],0.],[rbreak,sol.y[1,-1]],
method='LSODA',t_eval=W[npt//2-1:])
r[npt//2-1:]= sol.y[0]
dWdr[npt//2-1:]= sol.y[1]
# Store solution
self._r= r
self._W= W
self._dWdr= dWdr
# Also store density at these points, and the tidal radius
self._rho= self._dens_W(self._W)
self.rt= r[-1]
self.c= numpy.log10(self.rt/self.r0)
# Interpolate solution
self._W_from_r=\
interpolate.InterpolatedUnivariateSpline(self._r,self._W,k=3)
# Compute the cumulative mass and store the total mass
mass_shells= numpy.array([\
integrate.quad(lambda r: _FOURPI*r**2*self.dens(r),
rlo,rhi)[0] for rlo,rhi in zip(r[:-1],r[1:])])
self._cumul_mass= numpy.hstack((\
integrate.quad(lambda r: _FOURPI*r**2*self.dens(r),0.,r[0])[0],
numpy.cumsum(mass_shells)))
self.mass= self._cumul_mass[-1]
return None
def _dens_W(self,W):
"""Density as a function of W"""
sqW= numpy.sqrt(W)
return numpy.exp(W)*special.erf(sqW)-_TWOOVERSQRTPI*sqW*(1.+2./3.*W)
def dens(self,r):
return self._dens_W(self._W_from_r(r))<|fim▁end|> | # Store central density, r0...
self.rho0= self._scalefree_kdf.rho0*self._density_scale
self.r0= self._scalefree_kdf.r0*self._radius_scale
self.c= self._scalefree_kdf.c # invariant |
<|file_name|>es-BR.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
const u = undefined;
function plural(n: number): number {
if (n === 1) return 1;
return 5;
}
export default [
'es-BR', [['a. m.', 'p. m.'], ['a.m.', 'p.m.'], u], u,
[
['d', 'l', 'm', 'm', 'j', 'v', 's'], ['dom.', 'lun.', 'mar.', 'mié.', 'jue.', 'vie.', 'sáb.'],
['domingo', 'lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado'],
['DO', 'LU', 'MA', 'MI', 'JU', 'VI', 'SA']
],
[
['D', 'L', 'M', 'M', 'J', 'V', 'S'], ['dom.', 'lun.', 'mar.', 'mié.', 'jue.', 'vie.', 'sáb.'],
['domingo', 'lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado'],
['DO', 'LU', 'MA', 'MI', 'JU', 'VI', 'SA']
],
[
['E', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D'],<|fim▁hole|> 'enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre',
'octubre', 'noviembre', 'diciembre'
]
],
u, [['a. C.', 'd. C.'], u, ['antes de Cristo', 'después de Cristo']], 0, [6, 0],
['d/M/yy', 'd MMM y', 'd \'de\' MMMM \'de\' y', 'EEEE, d \'de\' MMMM \'de\' y'],
['HH:mm', 'HH:mm:ss', 'HH:mm:ss z', 'HH:mm:ss zzzz'], ['{1} {0}', u, '{1} \'a\' \'las\' {0}', u],
['.', ',', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0 %', '¤#,##0.00', '#E0'], 'R$', 'real brasileño', {
'AUD': [u, '$'],
'CAD': [u, '$'],
'CNY': [u, '¥'],
'ESP': ['₧'],
'EUR': [u, '€'],
'FKP': [u, 'FK£'],
'GBP': [u, '£'],
'HKD': [u, '$'],
'ILS': [u, '₪'],
'INR': [u, '₹'],
'JPY': [u, '¥'],
'KRW': [u, '₩'],
'MXN': [u, '$'],
'NZD': [u, '$'],
'RON': [u, 'L'],
'SSP': [u, 'SD£'],
'SYP': [u, 'S£'],
'TWD': [u, 'NT$'],
'USD': [u, '$'],
'VEF': [u, 'BsF'],
'VND': [u, '₫'],
'XAF': [],
'XCD': [u, '$'],
'XOF': []
},
plural
];<|fim▁end|> | [
'ene.', 'feb.', 'mar.', 'abr.', 'may.', 'jun.', 'jul.', 'ago.', 'sep.', 'oct.', 'nov.', 'dic.'
],
[ |
<|file_name|>update_ingest.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import superdesk
from flask import current_app as app
from settings import DAYS_TO_KEEP
from datetime import timedelta
from werkzeug.exceptions import HTTPException
from superdesk.notification import push_notification
from superdesk.io import providers
from superdesk.celery_app import celery
from superdesk.utc import utcnow
from superdesk.workflow import set_default_state
from superdesk.errors import ProviderError
from superdesk.stats import stats
from superdesk.upload import url_for_media
from superdesk.media.media_operations import download_file_from_url, process_file
from superdesk.media.renditions import generate_renditions
UPDATE_SCHEDULE_DEFAULT = {'minutes': 5}
LAST_UPDATED = 'last_updated'
STATE_INGESTED = 'ingested'
logger = logging.getLogger(__name__)
superdesk.workflow_state(STATE_INGESTED)
superdesk.workflow_action(
name='ingest'
)
def is_valid_type(provider, provider_type_filter=None):
"""Test if given provider has valid type and should be updated.
:param provider: provider to be updated
:param provider_type_filter: active provider type filter
"""
provider_type = provider.get('type')
if provider_type not in providers:
return False
if provider_type_filter and provider_type != provider_type_filter:
return False
return True
def is_scheduled(provider):
"""Test if given provider should be scheduled for update.
:param provider: ingest provider
"""
now = utcnow()
last_updated = provider.get(LAST_UPDATED, now - timedelta(days=100)) # if never updated run now
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return last_updated + timedelta(**update_schedule) < now
def is_closed(provider):
"""Test if provider is closed.
:param provider: ingest provider
"""
return provider.get('is_closed', False)
def filter_expired_items(provider, items):
try:
days_to_keep_content = provider.get('days_to_keep', DAYS_TO_KEEP)
expiration_date = utcnow() - timedelta(days=days_to_keep_content)
return [item for item in items if item.get('versioncreated', utcnow()) > expiration_date]
except Exception as ex:
raise ProviderError.providerFilterExpiredContentError(ex, provider)
def get_provider_rule_set(provider):
if provider.get('rule_set'):
return superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
def get_task_ttl(provider):
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return update_schedule.get('minutes', 0) * 60 + update_schedule.get('hours', 0) * 3600
def get_task_id(provider):
return 'update-ingest-{0}-{1}'.format(provider.get('name'), provider.get('_id'))
class UpdateIngest(superdesk.Command):
"""Update ingest providers."""
option_list = (
superdesk.Option('--provider', '-p', dest='provider_type'),
)
def run(self, provider_type=None):
for provider in superdesk.get_resource_service('ingest_providers').get(req=None, lookup={}):
if is_valid_type(provider, provider_type) and is_scheduled(provider) and not is_closed(provider):
kwargs = {
'provider': provider,
'rule_set': get_provider_rule_set(provider)
}
update_provider.apply_async(
task_id=get_task_id(provider),
expires=get_task_ttl(provider),
kwargs=kwargs)
@celery.task
def update_provider(provider, rule_set=None):
"""
Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
updates the provider.
"""
superdesk.get_resource_service('ingest_providers').update(provider['_id'], {
LAST_UPDATED: utcnow(),
# Providing the _etag as system updates to the documents shouldn't override _etag.
app.config['ETAG']: provider.get(app.config['ETAG'])
})
for items in providers[provider.get('type')].update(provider):
ingest_items(items, provider, rule_set)
stats.incr('ingest.ingested_items', len(items))
logger.info('Provider {0} updated'.format(provider['_id']))
push_notification('ingest:update')
def process_anpa_category(item, provider):
try:
anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if anpa_categories:
for anpa_category in anpa_categories['items']:
if anpa_category['is_active'] is True \
and item['anpa-category']['qcode'].lower() == anpa_category['value'].lower():
item['anpa-category'] = {'qcode': item['anpa-category']['qcode'], 'name': anpa_category['name']}
break
except Exception as ex:
raise ProviderError.anpaError(ex, provider)
def apply_rule_set(item, provider, rule_set=None):
"""
Applies rules set on the item to be ingested into the system. If there's no rule set then the item will
be returned without any change.
:param item: Item to be ingested
:param provider: provider object from whom the item was received
:return: item
"""
try:
if rule_set is None and provider.get('rule_set') is not None:
rule_set = superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)<|fim▁hole|> if rule_set and 'body_html' in item:
body = item['body_html']
for rule in rule_set['rules']:
body = body.replace(rule['old'], rule['new'])
item['body_html'] = body
return item
except Exception as ex:
raise ProviderError.ruleError(ex, provider)
def ingest_items(items, provider, rule_set=None):
all_items = filter_expired_items(provider, items)
items_dict = {doc['guid']: doc for doc in all_items}
for item in [doc for doc in all_items if doc.get('type') != 'composite']:
ingest_item(item, provider, rule_set)
for item in [doc for doc in all_items if doc.get('type') == 'composite']:
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]:
ref.setdefault('location', 'ingest')
itemRendition = items_dict.get(ref['residRef'], {}).get('renditions')
if itemRendition:
ref.setdefault('renditions', itemRendition)
ingest_item(item, provider, rule_set)
def ingest_item(item, provider, rule_set=None):
try:
item.setdefault('_id', item['guid'])
providers[provider.get('type')].provider = provider
item['ingest_provider'] = str(provider['_id'])
item.setdefault('source', provider.get('source', ''))
set_default_state(item, STATE_INGESTED)
if 'anpa-category' in item:
process_anpa_category(item, provider)
apply_rule_set(item, provider, rule_set)
ingest_service = superdesk.get_resource_service('ingest')
if item.get('ingest_provider_sequence') is None:
ingest_service.set_ingest_provider_sequence(item, provider)
rend = item.get('renditions', {})
if rend:
baseImageRend = rend.get('baseImage') or next(iter(rend.values()))
if baseImageRend:
href = providers[provider.get('type')].prepare_href(baseImageRend['href'])
update_renditions(item, href)
old_item = ingest_service.find_one(_id=item['guid'], req=None)
if old_item:
ingest_service.put(item['guid'], item)
else:
try:
ingest_service.post([item])
except HTTPException as e:
logger.error("Exception while persisting item in ingest collection", e)
ingest_service.put(item['guid'], item)
except ProviderError:
raise
except Exception as ex:
raise ProviderError.ingestError(ex, provider)
def update_renditions(item, href):
inserted = []
try:
content, filename, content_type = download_file_from_url(href)
file_type, ext = content_type.split('/')
metadata = process_file(content, file_type)
file_guid = app.media.put(content, filename, content_type, metadata)
inserted.append(file_guid)
rendition_spec = app.config.get('RENDITIONS', {}).get('picture', {})
renditions = generate_renditions(content, file_guid, inserted, file_type,
content_type, rendition_spec, url_for_media)
item['renditions'] = renditions
item['mimetype'] = content_type
item['filemeta'] = metadata
except Exception as io:
logger.exception(io)
for file_id in inserted:
app.media.delete(file_id)
raise
superdesk.command('ingest:update', UpdateIngest())<|fim▁end|> | |
<|file_name|>NavBar.test.js<|end_file_name|><|fim▁begin|>import React from "react";
import { shallow } from "enzyme";
import { Link } from "react-router";
import { createMockUser } from "../../utilities/tests/test-utils";
import NavBar from "./NavBar";
const notLoggedUser = createMockUser();
const authenticatedUser = createMockUser("[email protected]", true, true, "ADMIN");
const authenticatedViewer = createMockUser("[email protected]", true, true, "VIEWER");
const defaultProps = {
config: {
enableDatasetImport: false,
},
user: notLoggedUser,
rootPath: "/florence",
location: {},
};
const withPreviewNavProps = {
...defaultProps,
location: {
...defaultProps.location,
pathname: "/florence/collections/foo-1234/preview",
},
workingOn: {
id: "foo-1234",
name: "foo",
},
};
const NavbarItems = ["Collections", "Users and access", "Teams", "Security", "Sign out"];
describe("NavBar", () => {
describe("when user is not authenticated", () => {
it("should render only one link to Sign in", () => {
const component = shallow(<NavBar {...defaultProps} />);
expect(component.hasClass("global-nav__list")).toBe(true);
expect(component.find(Link)).toHaveLength(1);
expect(component.find("Link[to='/florence/login']").exists()).toBe(true);
});
});
describe("when user is authenticated as Admin", () => {
it("should render navigation with links", () => {
const component = shallow(<NavBar {...defaultProps} user={authenticatedUser} />);
const nav = component.find(Link);
expect(component.hasClass("global-nav__list")).toBe(true);
expect(component.find(Link)).toHaveLength(NavbarItems.length);
nav.forEach((n, i) => expect(n.getElement().props.children).toBe(NavbarItems[i]));
});
it("should not render Sign in link", () => {
const component = shallow(<NavBar {...defaultProps} user={authenticatedUser} />);
expect(component.hasClass("sign-in")).toBe(false);
});
it("should not display Datasets", () => {
const component = shallow(<NavBar {...defaultProps} user={authenticatedUser} />);
expect(component.find("Link[to='/florence/uploads/data']").exists()).toBe(false);
});
describe("when enableNewSignIn feature flag is enabled", () => {
const props = {
...defaultProps,
config: {
...defaultProps.config,
enableNewSignIn: true,
},
};
const component = shallow(<NavBar {...props} user={authenticatedUser} />);
it("Preview teams option should be present", () => {
const link = component.find("Link[to='/florence/groups']");
expect(link.getElement().props.children[0].includes("Preview teams"));
});
});
describe("when enabled dataset import", () => {
it("should display Datasets", () => {
const props = {
...defaultProps,
user: authenticatedUser,
config: {<|fim▁hole|> },
};
const component = shallow(<NavBar {...props} />);
expect(component.find("Link[to='/florence/uploads/data']").exists()).toBe(true);
});
});
describe("when enabled dataset import", () => {
it("should display Datasets", () => {
const props = {
...defaultProps,
user: authenticatedUser,
config: {
...defaultProps.config,
enableDatasetImport: true,
},
};
const component = shallow(<NavBar {...props} />);
expect(component.find("Link[to='/florence/uploads/data']").exists()).toBe(true);
});
});
describe("when on collections", () => {
it("should display Working On: ", () => {
const props = {
...defaultProps,
user: authenticatedUser,
location: {
pathname: "/florence/collections/foo-1234",
},
workingOn: {
id: "foo-1234",
name: "foo",
},
};
const wrapper = shallow(<NavBar {...props} />);
const link = wrapper.find("Link[to='/florence/collections/foo-1234']");
link.getElement().props.children[0].includes("Working on:");
link.getElement().props.children[0].includes("foo");
});
});
});
describe("when user is authenticated as Viewer", () => {
it("should render navigation with links", () => {
const NavbarItems = ["Collections", "Sign out"];
const component = shallow(<NavBar {...defaultProps} user={authenticatedViewer} />);
const nav = component.find(Link);
expect(component.hasClass("global-nav__list")).toBe(true);
expect(component.find(Link)).toHaveLength(NavbarItems.length);
nav.forEach((n, i) => expect(n.getElement().props.children).toBe(NavbarItems[i]));
});
describe("when on collections url", () => {
it("should render PreviewNav component", () => {
const component = shallow(<NavBar {...withPreviewNavProps} user={authenticatedViewer} />);
expect(component.find("Connect(PreviewNav)")).toHaveLength(1);
});
});
});
});<|fim▁end|> | ...defaultProps.config,
enableDatasetImport: true, |
<|file_name|>lte-rlc-um.cc<|end_file_name|><|fim▁begin|>/* -*- Mode: C++; c-file-style: "gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2011 Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Manuel Requena <[email protected]>
*/
#include "ns3/simulator.h"
#include "ns3/log.h"
#include "ns3/lte-rlc-header.h"
#include "ns3/lte-rlc-um.h"
#include "ns3/lte-rlc-sdu-status-tag.h"
#include "ns3/lte-rlc-tag.h"
NS_LOG_COMPONENT_DEFINE ("LteRlcUm");
namespace ns3 {
NS_OBJECT_ENSURE_REGISTERED (LteRlcUm);
LteRlcUm::LteRlcUm ()
: m_maxTxBufferSize (2 * 1024 * 1024),
m_txBufferSize (0),
m_sequenceNumber (0),
m_vrUr (0),
m_vrUx (0),
m_vrUh (0),
m_windowSize (512),
m_expectedSeqNumber (0)
{
NS_LOG_FUNCTION (this);
m_reassemblingState = WAITING_S0_FULL;
}
LteRlcUm::~LteRlcUm ()
{
NS_LOG_FUNCTION (this);
}
TypeId
LteRlcUm::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::LteRlcUm")
.SetParent<LteRlc> ()
.AddConstructor<LteRlcUm> ()
.AddAttribute ("MaxTxBufferSize",
"Maximum Size of the Transmission Buffer (in Bytes)",
UintegerValue (2 * 1024 * 1024),
MakeUintegerAccessor (&LteRlcUm::m_maxTxBufferSize),
MakeUintegerChecker<uint32_t> ())
;
return tid;
}
void
LteRlcUm::DoDispose ()
{
NS_LOG_FUNCTION (this);
m_reorderingTimer.Cancel ();
m_rbsTimer.Cancel ();
LteRlc::DoDispose ();
}
/**
* RLC SAP
*/
void
LteRlcUm::DoTransmitPdcpPdu (Ptr<Packet> p)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid << p->GetSize ());
if (m_txBufferSize + p->GetSize () <= m_maxTxBufferSize)
{
/** Store arrival time */
RlcTag timeTag (Simulator::Now ());
p->AddPacketTag (timeTag);
/** Store PDCP PDU */
LteRlcSduStatusTag tag;
tag.SetStatus (LteRlcSduStatusTag::FULL_SDU);
p->AddPacketTag (tag);
NS_LOG_LOGIC ("Tx Buffer: New packet added");
m_txBuffer.push_back (p);
m_txBufferSize += p->GetSize ();
NS_LOG_LOGIC ("NumOfBuffers = " << m_txBuffer.size() );
NS_LOG_LOGIC ("txBufferSize = " << m_txBufferSize);
}
else
{
// Discard full RLC SDU
NS_LOG_LOGIC ("TxBuffer is full. RLC SDU discarded");
NS_LOG_LOGIC ("MaxTxBufferSize = " << m_maxTxBufferSize);
NS_LOG_LOGIC ("txBufferSize = " << m_txBufferSize);
NS_LOG_LOGIC ("packet size = " << p->GetSize ());
}
/** Report Buffer Status */
DoReportBufferStatus ();
m_rbsTimer.Cancel ();
}
/**
* MAC SAP
*/
void
LteRlcUm::DoNotifyTxOpportunity (uint32_t bytes, uint8_t layer, uint8_t harqId)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid << bytes);
if (bytes <= 2)
{
// Stingy MAC: Header fix part is 2 bytes, we need more bytes for the data
NS_LOG_LOGIC ("TX opportunity too small = " << bytes);
return;
}
Ptr<Packet> packet = Create<Packet> ();
LteRlcHeader rlcHeader;
// Build Data field
uint32_t nextSegmentSize = bytes - 2;
uint32_t nextSegmentId = 1;
uint32_t dataFieldTotalSize = 0;
uint32_t dataFieldAddedSize = 0;
std::vector < Ptr<Packet> > dataField;
// Remove the first packet from the transmission buffer.
// If only a segment of the packet is taken, then the remaining is given back later
if ( m_txBuffer.size () == 0 )
{
NS_LOG_LOGIC ("No data pending");
return;
}
NS_LOG_LOGIC ("SDUs in TxBuffer = " << m_txBuffer.size ());
NS_LOG_LOGIC ("First SDU buffer = " << *(m_txBuffer.begin()));
NS_LOG_LOGIC ("First SDU size = " << (*(m_txBuffer.begin()))->GetSize ());
NS_LOG_LOGIC ("Next segment size = " << nextSegmentSize);
NS_LOG_LOGIC ("Remove SDU from TxBuffer");
Ptr<Packet> firstSegment = (*(m_txBuffer.begin ()))->Copy ();
m_txBufferSize -= (*(m_txBuffer.begin()))->GetSize ();
NS_LOG_LOGIC ("txBufferSize = " << m_txBufferSize );
m_txBuffer.erase (m_txBuffer.begin ());
while ( firstSegment && (firstSegment->GetSize () > 0) && (nextSegmentSize > 0) )
{
NS_LOG_LOGIC ("WHILE ( firstSegment && firstSegment->GetSize > 0 && nextSegmentSize > 0 )");
NS_LOG_LOGIC (" firstSegment size = " << firstSegment->GetSize ());
NS_LOG_LOGIC (" nextSegmentSize = " << nextSegmentSize);
if ( (firstSegment->GetSize () > nextSegmentSize) ||
// Segment larger than 2047 octets can only be mapped to the end of the Data field
(firstSegment->GetSize () > 2047)
)
{
// Take the minimum size, due to the 2047-bytes 3GPP exception
// This exception is due to the length of the LI field (just 11 bits)
uint32_t currSegmentSize = std::min (firstSegment->GetSize (), nextSegmentSize);
NS_LOG_LOGIC (" IF ( firstSegment > nextSegmentSize ||");
NS_LOG_LOGIC (" firstSegment > 2047 )");
// Segment txBuffer.FirstBuffer and
// Give back the remaining segment to the transmission buffer
Ptr<Packet> newSegment = firstSegment->CreateFragment (0, currSegmentSize);
NS_LOG_LOGIC (" newSegment size = " << newSegment->GetSize ());
// Status tag of the new and remaining segments
// Note: This is the only place where a PDU is segmented and
// therefore its status can change
LteRlcSduStatusTag oldTag, newTag;
firstSegment->RemovePacketTag (oldTag);
newSegment->RemovePacketTag (newTag);
if (oldTag.GetStatus () == LteRlcSduStatusTag::FULL_SDU)
{
newTag.SetStatus (LteRlcSduStatusTag::FIRST_SEGMENT);
oldTag.SetStatus (LteRlcSduStatusTag::LAST_SEGMENT);
}
else if (oldTag.GetStatus () == LteRlcSduStatusTag::LAST_SEGMENT)
{
newTag.SetStatus (LteRlcSduStatusTag::MIDDLE_SEGMENT);
//oldTag.SetStatus (LteRlcSduStatusTag::LAST_SEGMENT);
}
// Give back the remaining segment to the transmission buffer
firstSegment->RemoveAtStart (currSegmentSize);
NS_LOG_LOGIC (" firstSegment size (after RemoveAtStart) = " << firstSegment->GetSize ());
if (firstSegment->GetSize () > 0)
{
firstSegment->AddPacketTag (oldTag);
m_txBuffer.insert (m_txBuffer.begin (), firstSegment);
m_txBufferSize += (*(m_txBuffer.begin()))->GetSize ();
NS_LOG_LOGIC (" TX buffer: Give back the remaining segment");
NS_LOG_LOGIC (" TX buffers = " << m_txBuffer.size ());
NS_LOG_LOGIC (" Front buffer size = " << (*(m_txBuffer.begin()))->GetSize ());
NS_LOG_LOGIC (" txBufferSize = " << m_txBufferSize );
}
else
{
// Whole segment was taken, so adjust tag
if (newTag.GetStatus () == LteRlcSduStatusTag::FIRST_SEGMENT)
{
newTag.SetStatus (LteRlcSduStatusTag::FULL_SDU);
}
else if (newTag.GetStatus () == LteRlcSduStatusTag::MIDDLE_SEGMENT)
{
newTag.SetStatus (LteRlcSduStatusTag::LAST_SEGMENT);
}
}
// Segment is completely taken or
// the remaining segment is given back to the transmission buffer
firstSegment = 0;
// Put status tag once it has been adjusted
newSegment->AddPacketTag (newTag);
// Add Segment to Data field
dataFieldAddedSize = newSegment->GetSize ();
dataFieldTotalSize += dataFieldAddedSize;
dataField.push_back (newSegment);
newSegment = 0;
// ExtensionBit (Next_Segment - 1) = 0
rlcHeader.PushExtensionBit (LteRlcHeader::DATA_FIELD_FOLLOWS);
// no LengthIndicator for the last one
nextSegmentSize -= dataFieldAddedSize;
nextSegmentId++;
// nextSegmentSize MUST be zero (only if segment is smaller or equal to 2047)
// (NO more segments) → exit
// break;
}
else if ( (nextSegmentSize - firstSegment->GetSize () <= 2) || (m_txBuffer.size () == 0) )
{
NS_LOG_LOGIC (" IF nextSegmentSize - firstSegment->GetSize () <= 2 || txBuffer.size == 0");
// Add txBuffer.FirstBuffer to DataField
dataFieldAddedSize = firstSegment->GetSize ();
dataFieldTotalSize += dataFieldAddedSize;
dataField.push_back (firstSegment);
firstSegment = 0;
// ExtensionBit (Next_Segment - 1) = 0
rlcHeader.PushExtensionBit (LteRlcHeader::DATA_FIELD_FOLLOWS);
// no LengthIndicator for the last one
nextSegmentSize -= dataFieldAddedSize;
nextSegmentId++;
NS_LOG_LOGIC (" SDUs in TxBuffer = " << m_txBuffer.size ());
if (m_txBuffer.size () > 0)
{
NS_LOG_LOGIC (" First SDU buffer = " << *(m_txBuffer.begin()));
NS_LOG_LOGIC (" First SDU size = " << (*(m_txBuffer.begin()))->GetSize ());
}
NS_LOG_LOGIC (" Next segment size = " << nextSegmentSize);
// nextSegmentSize <= 2 (only if txBuffer is not empty)
// (NO more segments) → exit
// break;
}
else // (firstSegment->GetSize () < m_nextSegmentSize) && (m_txBuffer.size () > 0)
{
NS_LOG_LOGIC (" IF firstSegment < NextSegmentSize && txBuffer.size > 0");
// Add txBuffer.FirstBuffer to DataField
dataFieldAddedSize = firstSegment->GetSize ();
dataFieldTotalSize += dataFieldAddedSize;
dataField.push_back (firstSegment);
// ExtensionBit (Next_Segment - 1) = 1
rlcHeader.PushExtensionBit (LteRlcHeader::E_LI_FIELDS_FOLLOWS);
// LengthIndicator (Next_Segment) = txBuffer.FirstBuffer.length()
rlcHeader.PushLengthIndicator (firstSegment->GetSize ());
nextSegmentSize -= ((nextSegmentId % 2) ? (2) : (1)) + dataFieldAddedSize;
nextSegmentId++;
NS_LOG_LOGIC (" SDUs in TxBuffer = " << m_txBuffer.size ());
if (m_txBuffer.size () > 0)
{
NS_LOG_LOGIC (" First SDU buffer = " << *(m_txBuffer.begin()));
NS_LOG_LOGIC (" First SDU size = " << (*(m_txBuffer.begin()))->GetSize ());
}
NS_LOG_LOGIC (" Next segment size = " << nextSegmentSize);
NS_LOG_LOGIC (" Remove SDU from TxBuffer");
// (more segments)
firstSegment = (*(m_txBuffer.begin ()))->Copy ();
m_txBufferSize -= (*(m_txBuffer.begin()))->GetSize ();
m_txBuffer.erase (m_txBuffer.begin ());
NS_LOG_LOGIC (" txBufferSize = " << m_txBufferSize );
}
}
// Build RLC header
rlcHeader.SetSequenceNumber (m_sequenceNumber++);
// Build RLC PDU with DataField and Header
std::vector< Ptr<Packet> >::iterator it;
it = dataField.begin ();
uint8_t framingInfo = 0;
// FIRST SEGMENT
LteRlcSduStatusTag tag;
(*it)->RemovePacketTag (tag);
if ( (tag.GetStatus () == LteRlcSduStatusTag::FULL_SDU) ||
(tag.GetStatus () == LteRlcSduStatusTag::FIRST_SEGMENT) )
{
framingInfo |= LteRlcHeader::FIRST_BYTE;
}
else
{
framingInfo |= LteRlcHeader::NO_FIRST_BYTE;
}
(*it)->AddPacketTag (tag);
while (it < dataField.end ())
{
NS_LOG_LOGIC ("Adding SDU/segment to packet, length = " << (*it)->GetSize ());
packet->AddAtEnd (*it);
it++;
}
// LAST SEGMENT (Note: There could be only one and be the first one)
it--;
(*it)->RemovePacketTag (tag);
if ( (tag.GetStatus () == LteRlcSduStatusTag::FULL_SDU) ||
(tag.GetStatus () == LteRlcSduStatusTag::LAST_SEGMENT) )
{
framingInfo |= LteRlcHeader::LAST_BYTE;
}
else
{
framingInfo |= LteRlcHeader::NO_LAST_BYTE;
}
(*it)->AddPacketTag (tag);
rlcHeader.SetFramingInfo (framingInfo);
NS_LOG_LOGIC ("RLC header: " << rlcHeader);
packet->AddHeader (rlcHeader);
// Sender timestamp
RlcTag rlcTag (Simulator::Now ());
packet->AddByteTag (rlcTag);
m_txPdu (m_rnti, m_lcid, packet->GetSize ());
// Send RLC PDU to MAC layer
LteMacSapProvider::TransmitPduParameters params;
params.pdu = packet;
params.rnti = m_rnti;
params.lcid = m_lcid;
params.layer = layer;
params.harqProcessId = harqId;
m_macSapProvider->TransmitPdu (params);
if (! m_txBuffer.empty ())
{
m_rbsTimer.Cancel ();
m_rbsTimer = Simulator::Schedule (MilliSeconds (10), &LteRlcUm::ExpireRbsTimer, this);
}
}
void
LteRlcUm::DoNotifyHarqDeliveryFailure ()
{
NS_LOG_FUNCTION (this);
}
void
LteRlcUm::DoReceivePdu (Ptr<Packet> p)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid << p->GetSize ());
// Receiver timestamp
RlcTag rlcTag;
Time delay;
if (p->FindFirstMatchingByteTag (rlcTag))
{
delay = Simulator::Now() - rlcTag.GetSenderTimestamp ();
}
m_rxPdu (m_rnti, m_lcid, p->GetSize (), delay.GetNanoSeconds ());
// 5.1.2.2 Receive operations
// Get RLC header parameters
LteRlcHeader rlcHeader;
p->PeekHeader (rlcHeader);
NS_LOG_LOGIC ("RLC header: " << rlcHeader);
SequenceNumber10 seqNumber = rlcHeader.GetSequenceNumber ();
// 5.1.2.2.1 General
// The receiving UM RLC entity shall maintain a reordering window according to state variable VR(UH) as follows:
// - a SN falls within the reordering window if (VR(UH) - UM_Window_Size) <= SN < VR(UH);
// - a SN falls outside of the reordering window otherwise.
// When receiving an UMD PDU from lower layer, the receiving UM RLC entity shall:
// - either discard the received UMD PDU or place it in the reception buffer (see sub clause 5.1.2.2.2);
// - if the received UMD PDU was placed in the reception buffer:
// - update state variables, reassemble and deliver RLC SDUs to upper layer and start/stop t-Reordering as needed (see sub clause 5.1.2.2.3);
// When t-Reordering expires, the receiving UM RLC entity shall:
// - update state variables, reassemble and deliver RLC SDUs to upper layer and start t-Reordering as needed (see sub clause 5.1.2.2.4).
// 5.1.2.2.2 Actions when an UMD PDU is received from lower layer
// When an UMD PDU with SN = x is received from lower layer, the receiving UM RLC entity shall:
// - if VR(UR) < x < VR(UH) and the UMD PDU with SN = x has been received before; or
// - if (VR(UH) - UM_Window_Size) <= x < VR(UR):
// - discard the received UMD PDU;
// - else:
// - place the received UMD PDU in the reception buffer.
NS_LOG_LOGIC ("VR(UR) = " << m_vrUr);
NS_LOG_LOGIC ("VR(UX) = " << m_vrUx);
NS_LOG_LOGIC ("VR(UH) = " << m_vrUh);
NS_LOG_LOGIC ("SN = " << seqNumber);
m_vrUr.SetModulusBase (m_vrUh - m_windowSize);
m_vrUh.SetModulusBase (m_vrUh - m_windowSize);
seqNumber.SetModulusBase (m_vrUh - m_windowSize);
if ( ( (m_vrUr < seqNumber) && (seqNumber < m_vrUh) && (m_rxBuffer.count (seqNumber.GetValue ()) > 0) ) ||
( ((m_vrUh - m_windowSize) <= seqNumber) && (seqNumber < m_vrUr) )
)
{
NS_LOG_LOGIC ("PDU discarded");
p = 0;
return;
}
else
{
NS_LOG_LOGIC ("Place PDU in the reception buffer");
m_rxBuffer[seqNumber.GetValue ()] = p;
}
// 5.1.2.2.3 Actions when an UMD PDU is placed in the reception buffer
// When an UMD PDU with SN = x is placed in the reception buffer, the receiving UM RLC entity shall:
// - if x falls outside of the reordering window:
// - update VR(UH) to x + 1;
// - reassemble RLC SDUs from any UMD PDUs with SN that falls outside of the reordering window, remove
// RLC headers when doing so and deliver the reassembled RLC SDUs to upper layer in ascending order of the
// RLC SN if not delivered before;
// - if VR(UR) falls outside of the reordering window:
// - set VR(UR) to (VR(UH) - UM_Window_Size);
if ( ! IsInsideReorderingWindow (seqNumber))
{
NS_LOG_LOGIC ("SN is outside the reordering window");
m_vrUh = seqNumber + 1;
NS_LOG_LOGIC ("New VR(UH) = " << m_vrUh);
ReassembleOutsideWindow ();
if ( ! IsInsideReorderingWindow (m_vrUr) )
{
m_vrUr = m_vrUh - m_windowSize;
NS_LOG_LOGIC ("VR(UR) is outside the reordering window");
NS_LOG_LOGIC ("New VR(UR) = " << m_vrUr);
}
}
// - if the reception buffer contains an UMD PDU with SN = VR(UR):
// - update VR(UR) to the SN of the first UMD PDU with SN > current VR(UR) that has not been received;
// - reassemble RLC SDUs from any UMD PDUs with SN < updated VR(UR), remove RLC headers when doing
// so and deliver the reassembled RLC SDUs to upper layer in ascending order of the RLC SN if not delivered
// before;
if ( m_rxBuffer.count (m_vrUr.GetValue ()) > 0 )
{
NS_LOG_LOGIC ("Reception buffer contains SN = " << m_vrUr);
std::map <uint16_t, Ptr<Packet> >::iterator it;
uint16_t newVrUr;
SequenceNumber10 oldVrUr = m_vrUr;
it = m_rxBuffer.find (m_vrUr.GetValue ());
newVrUr = (it->first) + 1;
while ( m_rxBuffer.count (newVrUr) > 0 )
{
newVrUr++;
}
m_vrUr = newVrUr;
NS_LOG_LOGIC ("New VR(UR) = " << m_vrUr);
ReassembleSnInterval (oldVrUr, m_vrUr);
}
// m_vrUh can change previously, set new modulus base
// for the t-Reordering timer-related comparisons
m_vrUr.SetModulusBase (m_vrUh - m_windowSize);
m_vrUx.SetModulusBase (m_vrUh - m_windowSize);
m_vrUh.SetModulusBase (m_vrUh - m_windowSize);
// - if t-Reordering is running:
// - if VR(UX) <= VR(UR); or
// - if VR(UX) falls outside of the reordering window and VR(UX) is not equal to VR(UH)::
// - stop and reset t-Reordering;
if ( m_reorderingTimer.IsRunning () )
{
NS_LOG_LOGIC ("Reordering timer is running");
if ( (m_vrUx <= m_vrUr) ||
((! IsInsideReorderingWindow (m_vrUx)) && (m_vrUx != m_vrUh)) )
{
NS_LOG_LOGIC ("Stop reordering timer");
m_reorderingTimer.Cancel ();
}
}
// - if t-Reordering is not running (includes the case when t-Reordering is stopped due to actions above):
// - if VR(UH) > VR(UR):
// - start t-Reordering;
// - set VR(UX) to VR(UH).
if ( ! m_reorderingTimer.IsRunning () )
{
NS_LOG_LOGIC ("Reordering timer is not running");
if ( m_vrUh > m_vrUr )
{
NS_LOG_LOGIC ("VR(UH) > VR(UR)");
NS_LOG_LOGIC ("Start reordering timer");
m_reorderingTimer = Simulator::Schedule (Time ("0.1s"),
&LteRlcUm::ExpireReorderingTimer ,this);
m_vrUx = m_vrUh;
NS_LOG_LOGIC ("New VR(UX) = " << m_vrUx);
}
}
}
bool
LteRlcUm::IsInsideReorderingWindow (SequenceNumber10 seqNumber)
{
NS_LOG_FUNCTION (this << seqNumber);
NS_LOG_LOGIC ("Reordering Window: " <<
m_vrUh << " - " << m_windowSize << " <= " << seqNumber << " < " << m_vrUh);
m_vrUh.SetModulusBase (m_vrUh - m_windowSize);
seqNumber.SetModulusBase (m_vrUh - m_windowSize);
if ( ((m_vrUh - m_windowSize) <= seqNumber) && (seqNumber < m_vrUh))
{
NS_LOG_LOGIC (seqNumber << " is INSIDE the reordering window");
return true;
}
else
{
NS_LOG_LOGIC (seqNumber << " is OUTSIDE the reordering window");
return false;
}
}
void
LteRlcUm::ReassembleAndDeliver (Ptr<Packet> packet)
{
LteRlcHeader rlcHeader;
packet->RemoveHeader (rlcHeader);
uint8_t framingInfo = rlcHeader.GetFramingInfo ();
SequenceNumber10 currSeqNumber = rlcHeader.GetSequenceNumber ();
bool expectedSnLost;
if ( currSeqNumber != m_expectedSeqNumber )
{
expectedSnLost = true;
NS_LOG_LOGIC ("There are losses. Expected SN = " << m_expectedSeqNumber << ". Current SN = " << currSeqNumber);
m_expectedSeqNumber = currSeqNumber + 1;
}
else
{
expectedSnLost = false;
NS_LOG_LOGIC ("No losses. Expected SN = " << m_expectedSeqNumber << ". Current SN = " << currSeqNumber);
m_expectedSeqNumber++;
}
// Build list of SDUs
uint8_t extensionBit;
uint16_t lengthIndicator;
do
{
extensionBit = rlcHeader.PopExtensionBit ();
NS_LOG_LOGIC ("E = " << (uint16_t)extensionBit);
if ( extensionBit == 0 )
{
m_sdusBuffer.push_back (packet);
}
else // extensionBit == 1
{
lengthIndicator = rlcHeader.PopLengthIndicator ();
NS_LOG_LOGIC ("LI = " << lengthIndicator);
// Check if there is enough data in the packet
if ( lengthIndicator >= packet->GetSize () )
{
NS_LOG_LOGIC ("INTERNAL ERROR: Not enough data in the packet (" << packet->GetSize () << "). Needed LI=" << lengthIndicator);
}
// Split packet in two fragments
Ptr<Packet> data_field = packet->CreateFragment (0, lengthIndicator);
packet->RemoveAtStart (lengthIndicator);
m_sdusBuffer.push_back (data_field);
}
}
while ( extensionBit == 1 );
std::list < Ptr<Packet> >::iterator it;
// Current reassembling state
if (m_reassemblingState == WAITING_S0_FULL) NS_LOG_LOGIC ("Reassembling State = 'WAITING_S0_FULL'");
else if (m_reassemblingState == WAITING_SI_SF) NS_LOG_LOGIC ("Reassembling State = 'WAITING_SI_SF'");
else NS_LOG_LOGIC ("Reassembling State = Unknown state");
// Received framing Info
NS_LOG_LOGIC ("Framing Info = " << (uint16_t)framingInfo);
// Reassemble the list of SDUs (when there is no losses)
if (!expectedSnLost)
{
switch (m_reassemblingState)
{
case WAITING_S0_FULL:
switch (framingInfo)
{
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Deliver one or multiple PDUs
*/
for ( it = m_sdusBuffer.begin () ; it != m_sdusBuffer.end () ; it++ )
{
m_rlcSapUser->ReceivePdcpPdu (*it);
}
m_sdusBuffer.clear ();
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Deliver full PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
if ( m_sdusBuffer.size () == 1 )
{
m_reassemblingState = WAITING_S0_FULL;
}
else
{
m_reassemblingState = WAITING_SI_SF;
}
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
if ( m_sdusBuffer.size () > 0 )
{
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
default:<|fim▁hole|> * ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
case WAITING_SI_SF:
switch (framingInfo)
{
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Deliver (Kept)S0 + SN
*/
m_keepS0->AddAtEnd (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
m_rlcSapUser->ReceivePdcpPdu (m_keepS0);
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Keep SI
*/
if ( m_sdusBuffer.size () == 1 )
{
m_keepS0->AddAtEnd (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
else // m_sdusBuffer.size () > 1
{
/**
* Deliver (Kept)S0 + SN
*/
m_keepS0->AddAtEnd (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
m_rlcSapUser->ReceivePdcpPdu (m_keepS0);
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
default:
/**
* ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
default:
NS_LOG_LOGIC ("INTERNAL ERROR: Wrong reassembling state = " << (uint32_t) m_reassemblingState);
break;
}
}
else // Reassemble the list of SDUs (when there are losses, i.e. the received SN is not the expected one)
{
switch (m_reassemblingState)
{
case WAITING_S0_FULL:
switch (framingInfo)
{
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Deliver one or multiple PDUs
*/
for ( it = m_sdusBuffer.begin () ; it != m_sdusBuffer.end () ; it++ )
{
m_rlcSapUser->ReceivePdcpPdu (*it);
}
m_sdusBuffer.clear ();
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Deliver full PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard SN
*/
m_sdusBuffer.pop_front ();
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
if ( m_sdusBuffer.size () == 1 )
{
m_reassemblingState = WAITING_S0_FULL;
}
else
{
m_reassemblingState = WAITING_SI_SF;
}
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
if ( m_sdusBuffer.size () > 0 )
{
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
default:
/**
* ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
case WAITING_SI_SF:
switch (framingInfo)
{
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Deliver one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
if ( m_sdusBuffer.size () == 1 )
{
m_reassemblingState = WAITING_S0_FULL;
}
else
{
m_reassemblingState = WAITING_SI_SF;
}
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
if ( m_sdusBuffer.size () > 0 )
{
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
default:
/**
* ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
default:
NS_LOG_LOGIC ("INTERNAL ERROR: Wrong reassembling state = " << (uint32_t) m_reassemblingState);
break;
}
}
}
void
LteRlcUm::ReassembleOutsideWindow (void)
{
NS_LOG_LOGIC ("Reassemble Outside Window");
std::map <uint16_t, Ptr<Packet> >::iterator it;
it = m_rxBuffer.begin ();
while ( (it != m_rxBuffer.end ()) && ! IsInsideReorderingWindow (SequenceNumber10 (it->first)) )
{
NS_LOG_LOGIC ("SN = " << it->first);
// Reassemble RLC SDUs and deliver the PDCP PDU to upper layer
ReassembleAndDeliver (it->second);
std::map <uint16_t, Ptr<Packet> >::iterator it_tmp = it;
++it;
m_rxBuffer.erase (it_tmp);
}
if (it != m_rxBuffer.end ())
{
NS_LOG_LOGIC ("(SN = " << it->first << ") is inside the reordering window");
}
}
void
LteRlcUm::ReassembleSnInterval (SequenceNumber10 lowSeqNumber, SequenceNumber10 highSeqNumber)
{
NS_LOG_LOGIC ("Reassemble SN between " << lowSeqNumber << " and " << highSeqNumber);
std::map <uint16_t, Ptr<Packet> >::iterator it;
SequenceNumber10 reassembleSn = lowSeqNumber;
NS_LOG_LOGIC ("reassembleSN = " << reassembleSn);
NS_LOG_LOGIC ("highSeqNumber = " << highSeqNumber);
while (reassembleSn < highSeqNumber)
{
NS_LOG_LOGIC ("reassembleSn < highSeqNumber");
it = m_rxBuffer.find (reassembleSn.GetValue ());
NS_LOG_LOGIC ("it->first = " << it->first);
NS_LOG_LOGIC ("it->second = " << it->second);
if (it != m_rxBuffer.end () )
{
NS_LOG_LOGIC ("SN = " << it->first);
// Reassemble RLC SDUs and deliver the PDCP PDU to upper layer
ReassembleAndDeliver (it->second);
m_rxBuffer.erase (it);
}
reassembleSn++;
}
}
void
LteRlcUm::DoReportBufferStatus (void)
{
Time holDelay (0);
uint32_t queueSize = 0;
if (! m_txBuffer.empty ())
{
RlcTag holTimeTag;
m_txBuffer.front ()->PeekPacketTag (holTimeTag);
holDelay = Simulator::Now () - holTimeTag.GetSenderTimestamp ();
queueSize = m_txBufferSize + 2 * m_txBuffer.size (); // Data in tx queue + estimated headers size
}
LteMacSapProvider::ReportBufferStatusParameters r;
r.rnti = m_rnti;
r.lcid = m_lcid;
r.txQueueSize = queueSize;
r.txQueueHolDelay = holDelay.GetMilliSeconds () ;
r.retxQueueSize = 0;
r.retxQueueHolDelay = 0;
r.statusPduSize = 0;
NS_LOG_LOGIC ("Send ReportBufferStatus = " << r.txQueueSize << ", " << r.txQueueHolDelay );
m_macSapProvider->ReportBufferStatus (r);
}
void
LteRlcUm::ExpireReorderingTimer (void)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid);
NS_LOG_LOGIC ("Reordering timer has expired");
// 5.1.2.2.4 Actions when t-Reordering expires
// When t-Reordering expires, the receiving UM RLC entity shall:
// - update VR(UR) to the SN of the first UMD PDU with SN >= VR(UX) that has not been received;
// - reassemble RLC SDUs from any UMD PDUs with SN < updated VR(UR), remove RLC headers when doing so
// and deliver the reassembled RLC SDUs to upper layer in ascending order of the RLC SN if not delivered before;
// - if VR(UH) > VR(UR):
// - start t-Reordering;
// - set VR(UX) to VR(UH).
std::map <uint16_t, Ptr<Packet> >::iterator it;
SequenceNumber10 newVrUr = m_vrUx;
while ( (it = m_rxBuffer.find (newVrUr.GetValue ())) != m_rxBuffer.end () )
{
newVrUr++;
}
SequenceNumber10 oldVrUr = m_vrUr;
m_vrUr = newVrUr;
NS_LOG_LOGIC ("New VR(UR) = " << m_vrUr);
ReassembleSnInterval (oldVrUr, m_vrUr);
if ( m_vrUh > m_vrUr)
{
NS_LOG_LOGIC ("Start reordering timer");
m_reorderingTimer = Simulator::Schedule (Time ("0.1s"),
&LteRlcUm::ExpireReorderingTimer, this);
m_vrUx = m_vrUh;
NS_LOG_LOGIC ("New VR(UX) = " << m_vrUx);
}
}
void
LteRlcUm::ExpireRbsTimer (void)
{
NS_LOG_LOGIC ("RBS Timer expires");
if (! m_txBuffer.empty ())
{
DoReportBufferStatus ();
m_rbsTimer = Simulator::Schedule (MilliSeconds (10), &LteRlcUm::ExpireRbsTimer, this);
}
}
} // namespace ns3<|fim▁end|> | /** |
<|file_name|>SPNewsScraper.java<|end_file_name|><|fim▁begin|>package dk.dmaa0214.controllerLayer;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import javax.xml.bind.DatatypeConverter;
import com.gargoylesoftware.htmlunit.DefaultCredentialsProvider;
import com.gargoylesoftware.htmlunit.FailingHttpStatusCodeException;
import com.gargoylesoftware.htmlunit.StringWebResponse;
import com.gargoylesoftware.htmlunit.WebClient;
import com.gargoylesoftware.htmlunit.html.DomNode;
import com.gargoylesoftware.htmlunit.html.DomNodeList;
import com.gargoylesoftware.htmlunit.html.HTMLParser;
import com.gargoylesoftware.htmlunit.html.HtmlElement;
import com.gargoylesoftware.htmlunit.html.HtmlImage;
import com.gargoylesoftware.htmlunit.html.HtmlPage;
import dk.dmaa0214.modelLayer.SPNews;
public class SPNewsScraper {
//public static void main(String [] args) {<|fim▁hole|> // new SPNewsScraper();
//}
private WebClient webClient;
public SPNewsScraper(String user, String pass) {
webClient = new WebClient();
webClient.getOptions().setJavaScriptEnabled(false);
webClient.getOptions().setCssEnabled(false);
DefaultCredentialsProvider credentialProvider = (DefaultCredentialsProvider) webClient.getCredentialsProvider();
credentialProvider.addNTLMCredentials(user, pass, null, -1, "localhost", "UCN");
}
public void getSingleNews(SPNews spNews) throws FailingHttpStatusCodeException, NullPointerException, IOException {
int id = spNews.getId();
String siteDialogURL = "http://ecampus.ucn.dk/Noticeboard/Lists/NoticeBoard/DispForm.aspx?"
+ "NoticeBoardItem=" + id + "&WebID=87441127-db6f-4499-8c99-3dea925e04a8&IsDlg=1";
HtmlPage page = webClient.getPage(siteDialogURL);
DomNode div = page.getFirstByXPath("//td[@class='wt-2column-t1-td1']/div/div");
if(div == null) {
throw new NullPointerException("Nyhedstekst kunne ikke hentes. Internkode: #3");
}
DomNodeList<DomNode> list = div.getChildNodes();
String fullText = "";
for (int i = 5; i < list.size()-3; i++) {
DomNode dn = list.get(i);
fullText += dn.asXml();
}
StringWebResponse response = new StringWebResponse(fullText, page.getUrl());
HtmlPage newPage = HTMLParser.parseHtml(response, webClient.getCurrentWindow());
makeImgToBase64(newPage);
HtmlElement body = newPage.getBody();
spNews.setFullText(body.asXml());
}
private void makeImgToBase64(HtmlPage page) throws FailingHttpStatusCodeException, MalformedURLException, IOException {
@SuppressWarnings("unchecked")
List<HtmlImage> imageList = (List<HtmlImage>) page.getByXPath("//img");
for (HtmlImage image : imageList) {
InputStream ins = webClient.getPage("http://ecampus.ucn.dk" + image.getSrcAttribute()).getWebResponse().getContentAsStream();
byte[] imageBytes = new byte[0];
for(byte[] ba = new byte[ins.available()]; ins.read(ba) != -1;) {
byte[] baTmp = new byte[imageBytes.length + ba.length];
System.arraycopy(imageBytes, 0, baTmp, 0, imageBytes.length);
System.arraycopy(ba, 0, baTmp, imageBytes.length, ba.length);
imageBytes = baTmp;
}
image.setAttribute("src", "data:image/gif;base64," + DatatypeConverter.printBase64Binary(imageBytes));
}
}
public ArrayList<SPNews> getNewsList() throws NullPointerException, FailingHttpStatusCodeException, MalformedURLException, IOException {
String siteURL = "http://ecampus.ucn.dk/Noticeboard/_Layouts/NoticeBoard/Ajax.aspx?Action="
+ "GetNewsList&ShowBodyContent=SHORT100&WebId=87441127-db6f-4499-8c99-3dea925e04a8"
+ "&ChannelList=11776,4096,3811,3817,4311,4312,4313,4768,4314,4315,4316,4317,4310,"
+ "&DateFormat=dd/MM/yyyy HH:mm&List=Current,Archived&IncludeRead=true&MaxToShow=10"
+ "&Page=1&frontpageonly=false";
HtmlPage page = webClient.getPage(siteURL);
return ScrapeNewsList(page.asText());
}
private ArrayList<SPNews> ScrapeNewsList(String input) throws NullPointerException {
ArrayList<SPNews> newslist = new ArrayList<SPNews>();
int iStart = getNextIndex(input, 0);
if(!input.substring(0, iStart).equals("OK")) {
throw new NullPointerException("Nyhederne kan ikke læses. Internkode: #1. Status: " + input.substring(0, iStart));
}
String[] allNews = input.split("\\|\\$\\$\\|");
//System.out.println("count: " + (allNews.length-1));
for (int i = 1; i < allNews.length; i++) {
String[] singleNews = allNews[i].split("\\|\\$\\|");
if(singleNews.length != 11) {
throw new NullPointerException("Nyhederne kan ikke læses. Internkode: #2. Rapport: " + singleNews.length);
}
int id = getIntFromString(singleNews[0]);
String title = singleNews[1].trim();
String date = singleNews[2].trim();
boolean read = (getIntFromString(singleNews[3]) == 1);
String[] channelArray = singleNews[4].trim().split("\\|");
ArrayList<String> channels = new ArrayList<String>(Arrays.asList(channelArray));
String addedBy = singleNews[6].trim();
String text = singleNews[7].trim(); //7 and 8 is equal.
SPNews newsObj = new SPNews(id, title, date, channels, text, addedBy, read);
newslist.add(newsObj);
}
return newslist;
}
private int getIntFromString(String str) {
int ret = -1;
try {
ret = Integer.parseInt(str);
} catch (NumberFormatException e) {
ret = -1;
}
return ret;
}
private int getNextIndex(String text, int fromIndex){
int i = text.indexOf("|$|", fromIndex);
if (i == -1) {
throw new NullPointerException("Nyhederne kan ikke læses");
}
return i;
}
}<|fim▁end|> | |
<|file_name|>set.rs<|end_file_name|><|fim▁begin|>use std::io::BufReader;
use std::fs;
use std::rc;
use sym;
use exec::Arg;
use exec::Redir;
use shell::Shell;
fn rd_set(_rd: Redir) -> i32 {
println!("Redirection set is unimplemented");
0
}
fn set_spec(av: &mut Vec<Arg>) -> sym::ScopeSpec {
let mut ret = sym::ScopeSpec::Default;
while av.len() > 0 {
if av[0].is_str() {
if !av[0].as_str().starts_with("-") {
break;
}
let s = av.remove(0).unwrap_str();
// FIXME: graphemes()?
for c in s.chars().skip(1) {
ret = match c {
'l' => sym::ScopeSpec::Local,
'g' => sym::ScopeSpec::Global,
'e' => sym::ScopeSpec::Environment,
_ => {
warn!("set: Unrecognized argument '{}' found.", c);
ret
}
}
}
} else {
break;
}
}
ret
}
fn set_keys(av: &mut Vec<Arg>) -> Vec<String> {
let mut ret = Vec::new();
while av.len() > 0 {
let arg = av.remove(0);
// check for '='
if let Arg::Str(ref s) = arg {
if s == "=" {
break;
}
}
for k in arg.into_vec() {
ret.push(k);
}
}
ret
}
fn fn_set(sh: &mut Shell, kv: Vec<String>, mut av: Vec<Arg>, spec: sym::ScopeSpec) -> i32 {
if av.len() == 0 || !av.last().unwrap().is_bl() {
warn!("fn declaration must contain a block as its last arg.");
return 2;
}
let exec_bl = av.pop().unwrap().unwrap_bl();
// TODO: patterns in function args!
let mut args = Vec::new();
let mut vararg = None;
let mut postargs = None;
let mut flat_args = av.drain(..).flat_map(|x| x.into_vec()).collect::<Vec<_>>();
let inline = if flat_args.len() > 0 && flat_args[0] == "--inline" {
flat_args.remove(0);
true
} else {
false
};
for sl in flat_args.windows(2) {
let ref elt = sl[0];
let ref lookahead = sl[1];
if lookahead == "..." {
if vararg.is_some() {
warn!("set: fn can have at most one vararg");
return 2;
}
vararg = Some(elt.to_owned());
postargs = Some(Vec::new());
} else if elt != "..." {
if let Some(ref mut x) = postargs {
x.push(elt.to_owned());
} else {
args.push(elt.to_owned());
}
}
}
// last arg
if let Some(last) = flat_args.last() {
if last != "..." {
if let Some(ref mut x) = postargs {
x.push(last.to_owned());
} else {
args.push(last.to_owned());
}
}
}
for k in &kv {
sh.st.set_fn(k,
sym::Fn {
name: k.clone(),
inline: inline,
args: args.clone(),
vararg: vararg.clone(),
postargs: postargs.clone(),
lines: exec_bl.clone(),
},
spec);
}
0
}
<|fim▁hole|> if args.len() == 1 {
if args[0].is_rd() {
let rd = args.remove(0).unwrap_rd();
return rd_set(rd);
}
}
// get args and keys
let spec = set_spec(&mut args);
let mut keyv = set_keys(&mut args);
// filter out invalid keys
let keyv = keyv.drain(..)
.filter(|a| {
a.find(|x| {
if "?! {}()".contains(x) {
// TODO: more invalid chars
warn!("set: Key '{}' contains invalid characters", a);
true
} else {
false
}
})
.is_none()
})
.collect::<Vec<String>>();
// if we just said 'set a b c', we want to set them to empty
if args.is_empty() {
args.push(Arg::Str(String::new()));
}
if args[0].is_str() && args[0].as_str() == "fn" {
args.remove(0);
return fn_set(sh, keyv, args, spec);
}
let val = args.drain(..).flat_map(|x| x.into_vec()).collect::<Vec<String>>().join(" ");
let mut r = 0;
for k in keyv {
if sh.st.set_scope(&k, val.clone(), spec).is_err() {
r = 2;
}
}
r
})
}<|fim▁end|> | pub fn set_main() -> rc::Rc<Fn(Vec<Arg>, &mut Shell, Option<BufReader<fs::File>>) -> i32> {
rc::Rc::new(|mut args: Vec<Arg>, sh: &mut Shell, _in: Option<BufReader<fs::File>>| -> i32 {
// rd-set |
<|file_name|>const.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
class Constant:
conf_dir = os.path.join(os.path.expanduser('~'), '.netease-musicbox')
download_dir = conf_dir + "/cached"<|fim▁end|> | # encoding: UTF-8
import os |
<|file_name|>test_serializers.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from collections import defaultdict
from nose.tools import eq_, ok_
from rest_framework.serializers import ValidationError
import amo
import amo.tests
import mkt
import mkt.feed.constants as feed
from mkt.feed import serializers
from mkt.feed.constants import COLLECTION_LISTING, COLLECTION_PROMO
from mkt.feed.models import FeedShelf
from mkt.feed.tests.test_models import FeedAppMixin, FeedTestMixin
from mkt.regions import RESTOFWORLD
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Preview
class TestFeedAppSerializer(FeedTestMixin, amo.tests.TestCase):
def test_basic(self):
data = {
'app': 337141,
'background_color': '#B90000',
'type': 'icon',
'description': {
'en-US': u'pan-fried potatoes'
},
'slug': 'aaa'
}
serializer = serializers.FeedAppSerializer(data=data)
assert serializer.is_valid()
class TestFeedAppESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.feedapp = self.feed_app_factory(
app_type=feed.FEEDAPP_DESC, description={'en-US': 'test'})
self.feedapp.update(preview=Preview.objects.create(
addon=self.feedapp.app, sizes={'thumbnail': [50, 50]}))
self.data_es = self.feedapp.get_indexer().extract_document(
None, obj=self.feedapp)
self.app_map = {
self.feedapp.app_id: WebappIndexer.extract_document(
self.feedapp.app_id)
}
def test_deserialize(self):
data = serializers.FeedAppESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
eq_(data['app']['id'], self.feedapp.app_id)
eq_(data['description']['en-US'], 'test')
eq_(data['preview'], {
'id': self.feedapp.preview.id,
'thumbnail_size': [50, 50],
'thumbnail_url': self.feedapp.preview.thumbnail_url})
def test_deserialize_many(self):
data = serializers.FeedAppESSerializer(
[self.data_es, self.data_es], context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')},
many=True).data
eq_(data[0]['app']['id'], self.feedapp.app_id)
eq_(data[1]['description']['en-US'], 'test')
def test_background_image(self):
self.feedapp.update(type=feed.FEEDAPP_IMAGE, image_hash='LOL')
self.data_es = self.feedapp.get_indexer().extract_document(
None, obj=self.feedapp)
self.app_map = {
self.feedapp.app_id: WebappIndexer.extract_document(
self.feedapp.app_id)
}
data = serializers.FeedAppESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
assert data['background_image'].endswith('image.png?LOL')
class TestFeedBrandSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.app_ids = [amo.tests.app_factory().id for i in range(3)]
self.brand = self.feed_brand_factory(app_ids=self.app_ids)
super(TestFeedBrandSerializer, self).setUp()
def test_deserialize(self):
data = serializers.FeedBrandSerializer(self.brand).data
eq_(data['slug'], self.brand.slug)
eq_(data['layout'], self.brand.layout)
eq_(data['type'], self.brand.type)
self.assertSetEqual([app['id'] for app in data['apps']], self.app_ids)
class TestFeedBrandESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.apps = [amo.tests.app_factory() for i in range(3)]
self.app_ids = [app.id for app in self.apps]
self.brand = self.feed_brand_factory(app_ids=self.app_ids)
self.data_es = self.brand.get_indexer().extract_document(
None, obj=self.brand)
self.app_map = dict((app.id, WebappIndexer.extract_document(app.id))
for app in self.apps)
def test_deserialize(self):
data = serializers.FeedBrandESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
self.assertSetEqual([app['id'] for app in data['apps']],
[app.id for app in self.apps])
eq_(data['type'], self.brand.type)
def test_home_serializer_app_count(self):
data = serializers.FeedBrandESHomeSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
eq_(data['app_count'], 3)
class TestFeedCollectionSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestFeedCollectionSerializer, self).setUp()
self.data = {
'background_color': feed.FEED_COLOR_CHOICES[0][0],
'name': {'en-US': 'Potato'},
'description': {'en-US': 'Potato, tomato'},
'type': COLLECTION_PROMO
}
def validate(self, **attrs):
return (serializers.FeedCollectionSerializer()
.validate_background_color(attrs=self.data,
source='background_color'))
def test_validate_promo_bg(self):
self.validate()
def test_validate_promo_nobg(self):
del self.data['background_color']
with self.assertRaises(ValidationError):
self.validate()
def test_validate_listing_bg(self):
self.data['type'] = COLLECTION_LISTING
self.validate()
def test_validate_listing_nobg(self):
self.data['type'] = COLLECTION_LISTING
del self.data['background_color']
self.validate()
def test_invalid_bg_color(self):
self.data['background_color'] = '#FFFFFF'
with self.assertRaises(ValidationError):
self.validate()
def test_with_price(self):
app = amo.tests.app_factory()
self.make_premium(app)
coll = self.feed_collection_factory(app_ids=[app.id])
data = serializers.FeedCollectionSerializer(coll, context={
'request': amo.tests.req_factory_factory('',
REGION=mkt.regions.US)
}).data
eq_(data['apps'][0]['price'], 1)
class TestFeedCollectionESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.apps = [amo.tests.app_factory() for i in range(4)]
self.app_ids = [app.id for app in self.apps]
self.collection = self.feed_collection_factory(
app_ids=self.app_ids, description={'de': 'test'},
name={'en-US': 'test'})
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
self.app_map = dict((app.id, WebappIndexer.extract_document(app.id))
for app in self.apps)
def test_deserialize(self):
data = serializers.FeedCollectionESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
self.assertSetEqual([app['id'] for app in data['apps']],
[app.id for app in self.apps])
eq_(data['description']['de'], 'test')
eq_(data['name']['en-US'], 'test')
return data
def test_deserialize_grouped_apps(self):
self.collection = self.feed_collection_factory(
app_ids=self.app_ids, grouped=True, description={'de': 'test'},
name={'en-US': 'test'})
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = self.test_deserialize()
for i, app in enumerate(data['apps']):
actual = app['group']['en-US']
if (i + 1) == len(self.app_ids):
expected = 'second-group'
else:
expected = 'first-group'
eq_(expected, actual, 'Expected %s, got %s' % (expected, actual))
def test_background_image(self):
self.collection.update(type=feed.COLLECTION_PROMO, image_hash='LOL')
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = serializers.FeedCollectionESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
assert data['background_image'].endswith('image.png?LOL')
def test_home_serializer_listing_coll(self):
"""Test the listing collection is using ESAppFeedSerializer."""
self.collection.update(type=feed.COLLECTION_LISTING)
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = serializers.FeedCollectionESHomeSerializer(self.data_es,
context={'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')}
).data
ok_('author' in data['apps'][0])
ok_(data['apps'][0]['name'])
ok_(data['apps'][0]['ratings'])
ok_(data['apps'][0]['icons'])
eq_(data['app_count'], len(self.app_map))
def test_home_serializer_promo_coll(self):
"""
Test the listing collection is using
ESAppFeedCollectionSerializer if no background image.
"""
self.collection.update(type=feed.COLLECTION_PROMO)
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = serializers.FeedCollectionESHomeSerializer(self.data_es,
context={'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')}
).data
assert 'author' not in data['apps'][0]
assert 'name' not in data['apps'][0]
assert 'ratings' not in data['apps'][0]
assert data['apps'][0]['icons']
class TestFeedShelfSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.app_ids = [amo.tests.app_factory().id for i in range(3)]
self.shelf = self.feed_shelf_factory(app_ids=self.app_ids)
super(TestFeedShelfSerializer, self).setUp()
def test_deserialize(self):
data = serializers.FeedShelfSerializer(self.shelf).data
eq_(data['slug'], self.shelf.slug)
self.assertSetEqual([app['id'] for app in data['apps']], self.app_ids)
def test_is_published(self):
data = serializers.FeedShelfSerializer(self.shelf).data
assert not data['is_published']
self.shelf.feeditem_set.create()
data = serializers.FeedShelfSerializer(self.shelf).data
assert data['is_published']
class TestFeedShelfESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.apps = [amo.tests.app_factory() for i in range(3)]
self.app_ids = [app.id for app in self.apps]
self.shelf = self.feed_shelf_factory(
app_ids=self.app_ids, description={'de': 'test'},
name={'en-US': 'test'})
self.data_es = self.shelf.get_indexer().extract_document(
None, obj=self.shelf)
self.app_map = dict((app.id, WebappIndexer.extract_document(app.id))
for app in self.apps)
def test_deserialize(self):
data = serializers.FeedShelfESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
self.assertSetEqual([app['id'] for app in data['apps']],
[app.id for app in self.apps])<|fim▁hole|>
def test_background_image(self):
self.shelf.update(image_hash='LOL', image_landing_hash='ROFL')
self.data_es = self.shelf.get_indexer().extract_document(
None, obj=self.shelf)
data = serializers.FeedShelfESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
assert data['background_image'].endswith('image.png?LOL')
assert data['background_image_landing'].endswith(
'image_landing.png?ROFL')
class TestFeedItemSerializer(FeedAppMixin, amo.tests.TestCase):
def setUp(self):
super(TestFeedItemSerializer, self).setUp()
self.create_feedapps()
def serializer(self, item=None, **context):
if not item:
return serializers.FeedItemSerializer(context=context)
return serializers.FeedItemSerializer(item, context=context)
def validate(self, **attrs):
return self.serializer().validate(attrs=attrs)
def test_validate_passes(self):
self.validate(app=self.feedapps[0])
def test_validate_fails_no_items(self):
with self.assertRaises(ValidationError):
self.validate(app=None)
def validate_shelf(self, **attrs):
shelf = FeedShelf.objects.create(carrier=1, region=2)
data = {
'carrier': 'telefonica',
'region': 'us',
'shelf': shelf.id
}
data.update(attrs)
return self.serializer().validate_shelf(data, 'shelf')
def test_validate_shelf_passes(self):
self.validate_shelf()
def test_validate_shelf_fails_region(self):
with self.assertRaises(ValidationError):
self.validate_shelf(region='br')
def test_validate_shelf_fails_carrier(self):
with self.assertRaises(ValidationError):
self.validate_shelf(carrier='telenor')
def test_region_handles_worldwide(self):
data = {
'region': 'worldwide',
'item_type': 'app',
'app': self.feedapps[0].id,
}
serializer = serializers.FeedItemSerializer(data=data)
assert serializer.is_valid()
assert serializer.object.region == RESTOFWORLD.id
class TestFeedItemESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.feed = self.feed_factory()
self.data_es = [
feed_item.get_indexer().extract_document(None, obj=feed_item)
for feed_item in self.feed]
# Denormalize feed elements into the serializer context.
self.app_map = {}
self.feed_element_map = defaultdict(dict)
for i, feed_item in enumerate(self.data_es):
feed_element = getattr(self.feed[i], feed_item['item_type'])
self.feed_element_map[feed_item['item_type']][feed_element.id] = (
feed_element.get_indexer().extract_document(None,
obj=feed_element))
# Denormalize apps into serializer context.
if hasattr(feed_element, 'apps'):
for app in feed_element.apps():
self.app_map[app.id] = WebappIndexer.extract_document(
None, obj=app)
else:
self.app_map[feed_element.app_id] = (
WebappIndexer.extract_document(feed_element.app_id))
def test_deserialize_many(self):
data = serializers.FeedItemESSerializer(self.data_es, context={
'app_map': self.app_map,
'feed_element_map': self.feed_element_map,
'request': amo.tests.req_factory_factory('')
}, many=True).data
eq_(data[0]['app']['app']['id'], self.feed[0].app.app.id)
eq_(data[1]['brand']['apps'][0]['id'],
self.feed[1].brand.apps()[0].id)
eq_(data[2]['collection']['apps'][0]['id'],
self.feed[2].collection.apps()[0].id)
assert data[3]['shelf']['carrier']
assert data[3]['shelf']['region']<|fim▁end|> | eq_(data['carrier'], 'telefonica')
eq_(data['region'], 'restofworld')
eq_(data['description']['de'], 'test')
eq_(data['name']['en-US'], 'test') |
<|file_name|>git.go<|end_file_name|><|fim▁begin|>// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.<|fim▁hole|>
import (
"bytes"
"compress/zlib"
"crypto/sha1"
"encoding/git85"
"fmt"
"io"
"os"
)
func gitSHA1(data []byte) []byte {
if len(data) == 0 {
// special case: 0 length is all zeros sum
return make([]byte, 20)
}
h := sha1.New()
fmt.Fprintf(h, "blob %d\x00", len(data))
h.Write(data)
return h.Sum()
}
// BUG(rsc): The Git binary delta format is not implemented, only Git binary literals.
// GitBinaryLiteral represents a Git binary literal diff.
type GitBinaryLiteral struct {
OldSHA1 []byte // if non-empty, the SHA1 hash of the original
New []byte // the new contents
}
// Apply implements the Diff interface's Apply method.
func (d *GitBinaryLiteral) Apply(old []byte) ([]byte, os.Error) {
if sum := gitSHA1(old); !bytes.HasPrefix(sum, d.OldSHA1) {
return nil, ErrPatchFailure
}
return d.New, nil
}
func unhex(c byte) uint8 {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 255
}
func getHex(s []byte) (data []byte, rest []byte) {
n := 0
for n < len(s) && unhex(s[n]) != 255 {
n++
}
n &^= 1 // Only take an even number of hex digits.
data = make([]byte, n/2)
for i := range data {
data[i] = unhex(s[2*i])<<4 | unhex(s[2*i+1])
}
rest = s[n:]
return
}
// ParseGitBinary parses raw as a Git binary patch.
func ParseGitBinary(raw []byte) (Diff, os.Error) {
var oldSHA1, newSHA1 []byte
var sawBinary bool
for {
var first []byte
first, raw, _ = getLine(raw, 1)
first = bytes.TrimSpace(first)
if s, ok := skip(first, "index "); ok {
oldSHA1, s = getHex(s)
if s, ok = skip(s, ".."); !ok {
continue
}
newSHA1, s = getHex(s)
continue
}
if _, ok := skip(first, "GIT binary patch"); ok {
sawBinary = true
continue
}
if n, _, ok := atoi(first, "literal ", 10); ok && sawBinary {
data := make([]byte, n)
d := git85.NewDecoder(bytes.NewBuffer(raw))
z, err := zlib.NewReader(d)
if err != nil {
return nil, err
}
defer z.Close()
if _, err = io.ReadFull(z, data); err != nil {
if err == os.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
var buf [1]byte
m, err := z.Read(buf[0:])
if m != 0 || err != os.EOF {
return nil, os.NewError("Git binary literal longer than expected")
}
if sum := gitSHA1(data); !bytes.HasPrefix(sum, newSHA1) {
return nil, os.NewError("Git binary literal SHA1 mismatch")
}
return &GitBinaryLiteral{oldSHA1, data}, nil
}
if !sawBinary {
return nil, os.NewError("unexpected Git patch header: " + string(first))
}
}
panic("unreachable")
}<|fim▁end|> |
package patch |
<|file_name|>newsfetch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# newsfetch.py
#
# kindle-newsfetch is a simple Python script which fetches calibre recipes,<|fim▁hole|># (c) 2011 Stefan Schleifer, see LICENSE-file
import sys, os
import ConfigParser
import subprocess
import glob
from datetime import datetime
import shutil
# full path to configuration file
CONFIGFILE = 'newsfetch.cfg'
# print help information
def usage():
print "\nUsage: %s <command> [options]\n" % sys.argv[0]
print "\tinit: Create configuration file."
print "\tall: Fetch and convert all configured items."
print "\tsection|-s <section_name>: Fetch and convert all items of given section."
print "\titem|-i <item_name>: Only fetch and convert item named <item_name>."
print "\tadd <recipe_name> <item_name> <section_name>: Add a new item <item_name> with recipe-id <recipe_name> to section <section_name>."
print "\tlist: Get a list of all configured items."
sys.exit(1)
# create configuraton file
def create_configuration():
try:
i = raw_input("I'm going to ask you a few questions and create %s, is this ok (y/n)? " % CONFIGFILE)
if i is not 'y':
print "Ok, not creating configuration file. Bye!"
sys.exit(1)
config = ConfigParser.SafeConfigParser()
config.add_section('config')
config.set('config', 'KINDLE_ADDR', raw_input("Please enter your Kindle e-mail address where you want the converted files to be delivered to: "))
recipes_path = raw_input("Please enter the absolute path to the directory where your recipes are stored [%s/recipes]: " % os.getcwd())
if not recipes_path: # user chose to use default value
recipes_path = "%s/recipes" % os.getcwd()
# create the directory if it does not exist
if not os.access(recipes_path, os.W_OK): os.mkdir(recipes_path)
config.set('config', 'RECIPES_PATH', recipes_path)
output_path = raw_input("Please enter the absolute path to the directory for storing the converted files [%s/tmp]: " % os.getcwd())
if not output_path: # user chose to use default value
output_path = "%s/tmp" % os.getcwd()
# create the directory if it does not exist
if not os.access(output_path, os.W_OK): os.mkdir(output_path)
config.set('config', 'OUTPUT_PATH', output_path)
config.set('config', 'SMTP_SERVER', raw_input("Please enter the address of your desired SMTP server: "))
config.set('config', 'SMTP_USER', raw_input("Please enter the username for the given server: "))
config.set('config', 'SMTP_PW', raw_input("Please enter the password for the given user (WILL BE STORED IN PLAINTEXT!): "))
config.set('config', 'SMTP_MAILADDR', raw_input("Please enter your mail address for this server: "))
ebook_convert = raw_input("Please enter the absolute path to 'ebook-convert' [/usr/bin/ebook-convert]: ")
if not ebook_convert:
ebook_convert = '/usr/bin/ebook-convert'
config.set('config', 'EBOOK_CONVERT', ebook_convert)
calibre_smtp = raw_input("Please enter the absolute path to 'calibre-smtp' [/usr/bin/calibre-smtp]: ")
if not calibre_smtp:
calibre_smtp = '/usr/bin/calibre-smtp'
config.set('config', 'CALIBRE-SMTP', calibre_smtp)
keep_backup = raw_input("Keep backup of converted newspapers (y/n)? ")
if 'y' == keep_backup:
backup_path = raw_input("Please enter the absolute path where to store the backup [%s/backup]: " % os.getcwd())
if not backup_path:
backup_path = "%s/backup" % os.getcwd()
if not os.access(backup_path, os.W_OK): os.mkdir(backup_path)
config.set('config', 'backup_path', backup_path)
config.set('config', 'backup', 'true')
else:
config.set('config', 'backup', 'false')
config.add_section('example')
config.set('example', 'nytimes', 'New York Times')
config.set('example', 'sueddeutsche', 'Sueddeutsche Zeitung')
with open(CONFIGFILE, 'w') as configfile:
config.write(configfile)
except Exception, e:
print "Could not create %s: %s" % (CONFIGFILE, e)
else:
print "Successfully created %s. We've added a few example entries too." % CONFIGFILE
sys.exit(0)
# list all configured items with their names
def list_all_items():
config = ConfigParser.SafeConfigParser()
config.read(CONFIGFILE)
for section in config.sections():
# ignore config and example sections
if section != 'config' and section != 'example':
print "Section: %s" % section
for recipe, name in config.items(section):
print "\t%s (%s)" % (name, recipe)
# add a new configuration item
def add_item(recipe, name, section):
config = ConfigParser.SafeConfigParser()
config.read(CONFIGFILE)
# check if section already exists
try:
config.add_section(section)
except ConfigParser.DuplicateSectionError, ValueError:
pass
# entry already exists, asking whether to replace it
if config.has_option(section, recipe):
i = raw_input("Recipe %s with name %s already exists in section %s, do you want to update it (y/n)? " % (recipe, config.get(section, recipe), section))
if i is not 'y':
raise Exception("Adding item aborted by user as the item already exists.")
config.set(section, recipe, name)
with open(CONFIGFILE, 'w') as configfile:
config.write(configfile)
print "Successfully added item %s. Please add the required %s.recipe in %s now." % (name, recipe, config.get('config', 'recipes_path'))
# return a list of unique recipe names which
# should be converted in the current run
def collect_recipes(section='all', item=None):
recipes = []
config = ConfigParser.SafeConfigParser()
config.read(CONFIGFILE)
if item is None: # no request for specific item
# all entries requested
if 'all' == section:
for section in config.sections():
if section != 'config' and section != 'example':
for recipe, name in config.items(section):
recipes.append(recipe)
else: # all entries for specific section
if config.has_section(section):
for recipe, name in config.items(section):
recipes.append(recipe)
else:
raise Exception("Section %s is not available in current configuration." % section)
else: # specific entry
for section in config.sections():
if section != 'config' and section != 'example':
for recipe, name in config.items(section):
if item == recipe:
recipes.append(item)
if 0 == len(recipes): # no such recipe found
raise Exception("Recipe named %s could not be found, please check the name and your configuration." % item)
# Attention: We're removing duplicate entries here, user hopefully expect this behavior!
return list(set(recipes))
# convert a list of recipes to .mobi-format using ebook-convert
def convert_recipes(recipes):
config = ConfigParser.SafeConfigParser()
config.read(CONFIGFILE)
recipes_path = config.get('config', 'recipes_path')
output_path = config.get('config', 'output_path')
ebook_convert = config.get('config', 'ebook_convert')
for recipe in recipes:
try:
retcode = subprocess.call([ebook_convert, os.path.join(recipes_path, recipe + ".recipe"), os.path.join(output_path, recipe + ".mobi"), "--output-profile=kindle"])
if 0 != retcode:
raise Exception("Error while converting recipe %s" % recipe)
except Exception ,e:
print "Could not convert %s: %s." % ( os.path.join(recipes_path, recipe + ".mobi"), e)
# send all .mobi-files in defined output-directory
# to user via calibre-smtp
def send_ebooks():
config = ConfigParser.SafeConfigParser()
config.read(CONFIGFILE)
calibre_smtp = config.get('config', 'calibre-smtp')
# get all .mobi-files in output-dir
files = glob.glob(config.get('config', 'output_path') + "/*.mobi")
for f in files:
try:
retcode = subprocess.call([calibre_smtp, '-r', config.get('config', 'smtp_server'), '-u', config.get('config', 'smtp_user'), '-p', config.get('config', 'smtp_pw'), '-s', 'Send to Kindle', '-a', f, '-vv', config.get('config', 'smtp_mailaddr'), config.get('config', 'kindle_addr'), 'Send to Kindle'])
if 0 != retcode:
raise Exception("Error while sending .mobi-files via calibre-smtp.")
except Exception, e:
print "Could not send convertes files via mail: %s" % e
# clean output direcotry
def cleanup():
config = ConfigParser.SafeConfigParser()
config.read(CONFIGFILE)
output_path = config.get('config', 'output_path')
# get all .mobi-files in output directory
files = glob.glob(config.get('config', 'output_path') + "/*.mobi")
# create a backup of created .mobi-files?
if 'true' == config.get('config', 'backup'):
backup_path = config.get('config', 'backup_path')
for f in files:
# add current time to file
now = datetime.now().strftime('%Y%m%d%H%M%S')
shutil.move(f, os.path.join(backup_path, now + "-" + os.path.basename(f)))
else:
# remove files
for f in files:
os.remove(f)
if '__main__' == __name__:
if not len(sys.argv) > 1:
usage()
if 'init' == sys.argv[1]:
create_configuration()
# check if configuration file exists
# or promt to create one
try:
with open(CONFIGFILE, 'r') as configfile:
pass
except:
i = raw_input("Neccessary configuration file %s could not be found, do you want to create it now (y/n)? " % CONFIGFILE)
if 'y' == i:
create_configuration()
else:
print "Cannot continue without configuration file. Either rerun %s and let it create the configuration file for you or create it manually. See example.cfg for possible options/values." % sys.argv[0]
sys.exit(1)
if sys.argv[1] in ['all', 'section', 'item', '-i', '-s']:
if 'section' == sys.argv[1] or '-s' == sys.argv[1]:
recipes = collect_recipes(sys.argv[2])
elif 'item' == sys.argv[1] or '-i' == sys.argv[1]:
recipes = collect_recipes(item=sys.argv[2])
else:
recipes = collect_recipes()
convert_recipes(recipes)
send_ebooks()
cleanup()
elif 'add' == sys.argv[1]: # add a new configuration item
try:
add_item(sys.argv[2], sys.argv[3], sys.argv[4])
except Exception, e:
print "Could not add new item: %s" % e
else:
print "Successfully added item to configuration."
elif 'list' == sys.argv[1]: # list all configured items
try:
list_all_items()
except Exception, e:
print "Could not list all items: %s" % e
else:
usage()<|fim▁end|> | # turns them into Kindle newspapers using 'ebook-convert' and sends them to
# the configured Kindle e-mail adress using 'calibre-smtp'.
# |
<|file_name|>layout_task.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The layout task. Performs layout on the DOM, builds display lists and sends them to be
//! painted.
#![allow(unsafe_code)]
use animation;
use construct::ConstructionResult;
use context::{SharedLayoutContext, SharedLayoutContextWrapper};
use css::node_style::StyledNode;
use data::{LayoutDataAccess, LayoutDataWrapper};
use display_list_builder::ToGfxColor;
use flow::{self, Flow, ImmutableFlowUtils, MutableFlowUtils, MutableOwnedFlowUtils};
use flow_ref::FlowRef;
use fragment::{Fragment, FragmentBorderBoxIterator};
use incremental::{LayoutDamageComputation, REFLOW, REFLOW_ENTIRE_DOCUMENT, REPAINT};
use layout_debug;
use opaque_node::OpaqueNodeMethods;
use parallel::{self, UnsafeFlow};
use sequential;
use wrapper::{LayoutNode, TLayoutNode};
use azure::azure::AzColor;
use canvas_traits::CanvasMsg;
use encoding::EncodingRef;
use encoding::all::UTF_8;
use geom::matrix;
use geom::point::Point2D;
use geom::rect::Rect;
use geom::scale_factor::ScaleFactor;
use geom::size::Size2D;
use gfx_traits::color;
use gfx::display_list::{ClippingRegion, DisplayItemMetadata, DisplayList, OpaqueNode};
use gfx::display_list::{StackingContext};
use gfx::font_cache_task::FontCacheTask;
use gfx::paint_task::Msg as PaintMsg;
use gfx::paint_task::{PaintChan, PaintLayer};
use layout_traits::{LayoutControlMsg, LayoutTaskFactory};
use log;
use msg::compositor_msg::{Epoch, ScrollPolicy, LayerId};
use msg::constellation_msg::Msg as ConstellationMsg;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineExitType, PipelineId};
use profile_traits::mem::{self, Report, ReportsChan};
use profile_traits::time::{self, ProfilerMetadata, profile};
use profile_traits::time::{TimerMetadataFrameType, TimerMetadataReflowType};
use net_traits::{load_bytes_iter, PendingAsyncLoad};
use net_traits::image_cache_task::{ImageCacheTask, ImageCacheResult, ImageCacheChan};
use script::dom::bindings::js::LayoutJS;
use script::dom::node::{LayoutData, Node};
use script::layout_interface::{Animation, ContentBoxResponse, ContentBoxesResponse};
use script::layout_interface::{HitTestResponse, LayoutChan, LayoutRPC};
use script::layout_interface::{MouseOverResponse, Msg, Reflow, ReflowGoal, ReflowQueryType};
use script::layout_interface::{ScriptLayoutChan, ScriptReflow, TrustedNodeAddress};
use script_traits::{ConstellationControlMsg, OpaqueScriptLayoutChannel};
use script_traits::{ScriptControlChan, StylesheetLoadResponder};
use std::borrow::ToOwned;
use std::cell::Cell;
use std::collections::HashMap;
use std::collections::hash_state::DefaultState;
use std::mem::transmute;
use std::ops::{Deref, DerefMut};
use std::ptr;
use std::sync::mpsc::{channel, Sender, Receiver, Select};
use std::sync::{Arc, Mutex, MutexGuard};
use style::computed_values::{filter, mix_blend_mode};
use style::media_queries::{MediaType, MediaQueryList, Device};
use style::node::TNode;
use style::selector_matching::Stylist;
use style::stylesheets::{Origin, Stylesheet, CSSRuleIteratorExt};
use url::Url;
use util::cursor::Cursor;
use util::fnv::FnvHasher;
use util::geometry::{Au, MAX_RECT};
use util::logical_geometry::LogicalPoint;
use util::mem::HeapSizeOf;
use util::opts;
use util::task::spawn_named_with_send_on_failure;
use util::task_state;
use util::workqueue::WorkQueue;
/// The number of screens of data we're allowed to generate display lists for in each direction.
pub const DISPLAY_PORT_SIZE_FACTOR: i32 = 8;
/// The number of screens we have to traverse before we decide to generate new display lists.
const DISPLAY_PORT_THRESHOLD_SIZE_FACTOR: i32 = 4;
/// Mutable data belonging to the LayoutTask.
///
/// This needs to be protected by a mutex so we can do fast RPCs.
pub struct LayoutTaskData {
/// The root of the flow tree.
pub root_flow: Option<FlowRef>,
/// The image cache.
pub image_cache_task: ImageCacheTask,
/// The channel on which messages can be sent to the constellation.
pub constellation_chan: ConstellationChan,
/// The size of the viewport.
pub screen_size: Size2D<Au>,
/// The root stacking context.
pub stacking_context: Option<Arc<StackingContext>>,
/// Performs CSS selector matching and style resolution.
pub stylist: Box<Stylist>,
/// The workers that we use for parallel operation.
pub parallel_traversal: Option<WorkQueue<SharedLayoutContextWrapper, UnsafeFlow>>,
/// The dirty rect. Used during display list construction.
pub dirty: Rect<Au>,
/// Starts at zero, and increased by one every time a layout completes.
/// This can be used to easily check for invalid stale data.
pub generation: u32,
/// A queued response for the union of the content boxes of a node.
pub content_box_response: Rect<Au>,
/// A queued response for the content boxes of a node.
pub content_boxes_response: Vec<Rect<Au>>,
/// The list of currently-running animations.
pub running_animations: Vec<Animation>,
/// Receives newly-discovered animations.
pub new_animations_receiver: Receiver<Animation>,
/// A channel on which new animations that have been triggered by style recalculation can be
/// sent.
pub new_animations_sender: Sender<Animation>,
/// A counter for epoch messages
epoch: Epoch,
/// The position and size of the visible rect for each layer. We do not build display lists
/// for any areas more than `DISPLAY_PORT_SIZE_FACTOR` screens away from this area.
pub visible_rects: Arc<HashMap<LayerId, Rect<Au>, DefaultState<FnvHasher>>>,
}
/// Information needed by the layout task.
pub struct LayoutTask {
/// The ID of the pipeline that we belong to.
pub id: PipelineId,
/// The URL of the pipeline that we belong to.
pub url: Url,
/// Is the current reflow of an iframe, as opposed to a root window?
pub is_iframe: bool,
/// The port on which we receive messages from the script task.
pub port: Receiver<Msg>,
/// The port on which we receive messages from the constellation
pub pipeline_port: Receiver<LayoutControlMsg>,
/// The port on which we receive messages from the image cache
image_cache_receiver: Receiver<ImageCacheResult>,
/// The channel on which the image cache can send messages to ourself.
image_cache_sender: ImageCacheChan,
/// The channel on which we or others can send messages to ourselves.
pub chan: LayoutChan,
/// The channel on which messages can be sent to the constellation.
pub constellation_chan: ConstellationChan,
/// The channel on which messages can be sent to the script task.
pub script_chan: ScriptControlChan,
/// The channel on which messages can be sent to the painting task.
pub paint_chan: PaintChan,
/// The channel on which messages can be sent to the time profiler.
pub time_profiler_chan: time::ProfilerChan,
/// The channel on which messages can be sent to the memory profiler.
pub mem_profiler_chan: mem::ProfilerChan,
/// The name used for the task's memory reporter.
pub reporter_name: String,
/// The channel on which messages can be sent to the image cache.
pub image_cache_task: ImageCacheTask,
/// Public interface to the font cache task.
pub font_cache_task: FontCacheTask,
/// Is this the first reflow in this LayoutTask?
pub first_reflow: Cell<bool>,
/// To receive a canvas renderer associated to a layer, this message is propagated
/// to the paint chan
pub canvas_layers_receiver: Receiver<(LayerId, Option<Arc<Mutex<Sender<CanvasMsg>>>>)>,
pub canvas_layers_sender: Sender<(LayerId, Option<Arc<Mutex<Sender<CanvasMsg>>>>)>,
/// A mutex to allow for fast, read-only RPC of layout's internal data
/// structures, while still letting the LayoutTask modify them.
///
/// All the other elements of this struct are read-only.
pub rw_data: Arc<Mutex<LayoutTaskData>>,
}
impl LayoutTaskFactory for LayoutTask {
/// Spawns a new layout task.
fn create(_phantom: Option<&mut LayoutTask>,
id: PipelineId,
url: Url,
is_iframe: bool,
chan: OpaqueScriptLayoutChannel,
pipeline_port: Receiver<LayoutControlMsg>,
constellation_chan: ConstellationChan,
failure_msg: Failure,
script_chan: ScriptControlChan,
paint_chan: PaintChan,
image_cache_task: ImageCacheTask,
font_cache_task: FontCacheTask,
time_profiler_chan: time::ProfilerChan,
memory_profiler_chan: mem::ProfilerChan,
shutdown_chan: Sender<()>) {
let ConstellationChan(con_chan) = constellation_chan.clone();
spawn_named_with_send_on_failure(format!("LayoutTask {:?}", id), task_state::LAYOUT, move || {
{ // Ensures layout task is destroyed before we send shutdown message
let sender = chan.sender();
let layout = LayoutTask::new(id,
url,
is_iframe,
chan.receiver(),
LayoutChan(sender),
pipeline_port,
constellation_chan,
script_chan,
paint_chan,
image_cache_task,
font_cache_task,
time_profiler_chan,
memory_profiler_chan);
layout.start();
}
shutdown_chan.send(()).unwrap();
}, ConstellationMsg::Failure(failure_msg), con_chan);
}
}
/// The `LayoutTask` `rw_data` lock must remain locked until the first reflow,
/// as RPC calls don't make sense until then. Use this in combination with
/// `LayoutTask::lock_rw_data` and `LayoutTask::return_rw_data`.
enum RWGuard<'a> {
/// If the lock was previously held, from when the task started.
Held(MutexGuard<'a, LayoutTaskData>),
/// If the lock was just used, and has been returned since there has been
/// a reflow already.
Used(MutexGuard<'a, LayoutTaskData>),
}
impl<'a> Deref for RWGuard<'a> {
type Target = LayoutTaskData;
fn deref(&self) -> &LayoutTaskData {
match *self {
RWGuard::Held(ref x) => &**x,
RWGuard::Used(ref x) => &**x,
}
}
}
impl<'a> DerefMut for RWGuard<'a> {
fn deref_mut(&mut self) -> &mut LayoutTaskData {
match *self {
RWGuard::Held(ref mut x) => &mut **x,
RWGuard::Used(ref mut x) => &mut **x,
}
}
}
impl LayoutTask {
/// Creates a new `LayoutTask` structure.
fn new(id: PipelineId,
url: Url,
is_iframe: bool,
port: Receiver<Msg>,
chan: LayoutChan,
pipeline_port: Receiver<LayoutControlMsg>,
constellation_chan: ConstellationChan,
script_chan: ScriptControlChan,
paint_chan: PaintChan,
image_cache_task: ImageCacheTask,
font_cache_task: FontCacheTask,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan)
-> LayoutTask {
let screen_size = Size2D(Au(0), Au(0));
let device = Device::new(
MediaType::Screen,
opts::get().initial_window_size.as_f32() * ScaleFactor::new(1.0));
let parallel_traversal = if opts::get().layout_threads != 1 {
Some(WorkQueue::new("LayoutWorker", task_state::LAYOUT,
opts::get().layout_threads,
SharedLayoutContextWrapper(ptr::null())))
} else {
None
};
// Register this thread as a memory reporter, via its own channel.
let reporter = box chan.clone();
let reporter_name = format!("layout-reporter-{}", id.0);
mem_profiler_chan.send(mem::ProfilerMsg::RegisterReporter(reporter_name.clone(), reporter));
// Create the channel on which new animations can be sent.
let (new_animations_sender, new_animations_receiver) = channel();
let (image_cache_sender, image_cache_receiver) = channel();
let (canvas_layers_sender, canvas_layers_receiver) = channel();
LayoutTask {
id: id,
url: url,
is_iframe: is_iframe,
port: port,
pipeline_port: pipeline_port,
chan: chan,
script_chan: script_chan,
constellation_chan: constellation_chan.clone(),
paint_chan: paint_chan,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
reporter_name: reporter_name,
image_cache_task: image_cache_task.clone(),
font_cache_task: font_cache_task,
first_reflow: Cell::new(true),
image_cache_receiver: image_cache_receiver,
image_cache_sender: ImageCacheChan(image_cache_sender),
canvas_layers_receiver: canvas_layers_receiver,
canvas_layers_sender: canvas_layers_sender,
rw_data: Arc::new(Mutex::new(
LayoutTaskData {
root_flow: None,
image_cache_task: image_cache_task,
constellation_chan: constellation_chan,
screen_size: screen_size,
stacking_context: None,
stylist: box Stylist::new(device),
parallel_traversal: parallel_traversal,
dirty: Rect::zero(),
generation: 0,
content_box_response: Rect::zero(),
content_boxes_response: Vec::new(),
running_animations: Vec::new(),
visible_rects: Arc::new(HashMap::with_hash_state(Default::default())),
new_animations_receiver: new_animations_receiver,
new_animations_sender: new_animations_sender,
epoch: Epoch(0),
})),
}
}
/// Starts listening on the port.
fn start(self) {
let mut possibly_locked_rw_data = Some((*self.rw_data).lock().unwrap());
while self.handle_request(&mut possibly_locked_rw_data) {
// Loop indefinitely.
}
}
// Create a layout context for use in building display lists, hit testing, &c.
fn build_shared_layout_context(&self,
rw_data: &LayoutTaskData,
screen_size_changed: bool,
reflow_root: Option<&LayoutNode>,
url: &Url,
goal: ReflowGoal)
-> SharedLayoutContext {
SharedLayoutContext {
image_cache_task: rw_data.image_cache_task.clone(),
image_cache_sender: self.image_cache_sender.clone(),
screen_size: rw_data.screen_size.clone(),
screen_size_changed: screen_size_changed,
constellation_chan: rw_data.constellation_chan.clone(),
layout_chan: self.chan.clone(),
font_cache_task: self.font_cache_task.clone(),
canvas_layers_sender: self.canvas_layers_sender.clone(),
stylist: &*rw_data.stylist,
url: (*url).clone(),
reflow_root: reflow_root.map(|node| OpaqueNodeMethods::from_layout_node(node)),
dirty: Rect::zero(),
visible_rects: rw_data.visible_rects.clone(),
generation: rw_data.generation,
new_animations_sender: rw_data.new_animations_sender.clone(),
goal: goal,
}
}
/// Receives and dispatches messages from the script and constellation tasks
fn handle_request<'a>(&'a self,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>)
-> bool {
enum PortToRead {
Pipeline,
Script,
ImageCache,
}
let port_to_read = {
let sel = Select::new();
let mut port1 = sel.handle(&self.port);
let mut port2 = sel.handle(&self.pipeline_port);
let mut port3 = sel.handle(&self.image_cache_receiver);
unsafe {
port1.add();
port2.add();
port3.add();
}
let ret = sel.wait();
if ret == port1.id() {
PortToRead::Script
} else if ret == port2.id() {
PortToRead::Pipeline
} else if ret == port3.id() {
PortToRead::ImageCache
} else {
panic!("invalid select result");
}
};
match port_to_read {
PortToRead::Pipeline => {
match self.pipeline_port.recv().unwrap() {
LayoutControlMsg::SetVisibleRects(new_visible_rects) => {
self.handle_request_helper(Msg::SetVisibleRects(new_visible_rects),
possibly_locked_rw_data)
}
LayoutControlMsg::TickAnimations => {
self.handle_request_helper(Msg::TickAnimations, possibly_locked_rw_data)
}
LayoutControlMsg::GetCurrentEpoch(sender) => {
self.handle_request_helper(Msg::GetCurrentEpoch(sender),
possibly_locked_rw_data)
}
LayoutControlMsg::ExitNow(exit_type) => {
self.handle_request_helper(Msg::ExitNow(exit_type),
possibly_locked_rw_data)
}
}
}
PortToRead::Script => {
let msg = self.port.recv().unwrap();
self.handle_request_helper(msg, possibly_locked_rw_data)
}
PortToRead::ImageCache => {
let _ = self.image_cache_receiver.recv().unwrap();
self.repaint(possibly_locked_rw_data)
}
}
}
/// If no reflow has happened yet, this will just return the lock in
/// `possibly_locked_rw_data`. Otherwise, it will acquire the `rw_data` lock.
///
/// If you do not wish RPCs to remain blocked, just drop the `RWGuard`
/// returned from this function. If you _do_ wish for them to remain blocked,
/// use `return_rw_data`.
fn lock_rw_data<'a>(&'a self,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>)
-> RWGuard<'a> {
match possibly_locked_rw_data.take() {
None => RWGuard::Used((*self.rw_data).lock().unwrap()),
Some(x) => RWGuard::Held(x),
}
}
/// If no reflow has ever been triggered, this will keep the lock, locked
/// (and saved in `possibly_locked_rw_data`). If it has been, the lock will
/// be unlocked.
fn return_rw_data<'a>(possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>,
rw_data: RWGuard<'a>) {
match rw_data {
RWGuard::Used(x) => drop(x),
RWGuard::Held(x) => *possibly_locked_rw_data = Some(x),
}
}
/// Repaint the scene, without performing style matching. This is typically
/// used when an image arrives asynchronously and triggers a relayout and
/// repaint.
/// TODO: In the future we could detect if the image size hasn't changed
/// since last time and avoid performing a complete layout pass.
fn repaint<'a>(&'a self,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>) -> bool {
let mut rw_data = self.lock_rw_data(possibly_locked_rw_data);
let reflow_info = Reflow {
goal: ReflowGoal::ForDisplay,
page_clip_rect: MAX_RECT,
};
let mut layout_context = self.build_shared_layout_context(&*rw_data,
false,
None,
&self.url,
reflow_info.goal);
self.perform_post_style_recalc_layout_passes(&reflow_info,
&mut *rw_data,
&mut layout_context);
true
}
/// Receives and dispatches messages from other tasks.
fn handle_request_helper<'a>(&'a self,
request: Msg,
possibly_locked_rw_data: &mut Option<MutexGuard<'a,
LayoutTaskData>>)
-> bool {
match request {
Msg::AddStylesheet(sheet, mq) => {
self.handle_add_stylesheet(sheet, mq, possibly_locked_rw_data)
}
Msg::LoadStylesheet(url, mq, pending, link_element) => {
self.handle_load_stylesheet(url, mq, pending, link_element, possibly_locked_rw_data)
}
Msg::SetQuirksMode => self.handle_set_quirks_mode(possibly_locked_rw_data),
Msg::GetRPC(response_chan) => {
response_chan.send(box LayoutRPCImpl(self.rw_data.clone()) as
Box<LayoutRPC + Send>).unwrap();
},
Msg::Reflow(data) => {
profile(time::ProfilerCategory::LayoutPerform,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
|| self.handle_reflow(&*data, possibly_locked_rw_data));
},
Msg::TickAnimations => self.tick_all_animations(possibly_locked_rw_data),
Msg::SetVisibleRects(new_visible_rects) => {
self.set_visible_rects(new_visible_rects, possibly_locked_rw_data);
}
Msg::ReapLayoutData(dead_layout_data) => {
unsafe {
self.handle_reap_layout_data(dead_layout_data)
}
},
Msg::CollectReports(reports_chan) => {
self.collect_reports(reports_chan, possibly_locked_rw_data);
},
Msg::GetCurrentEpoch(sender) => {
let rw_data = self.lock_rw_data(possibly_locked_rw_data);
sender.send(rw_data.epoch).unwrap();
},
Msg::PrepareToExit(response_chan) => {
self.prepare_to_exit(response_chan, possibly_locked_rw_data);
return false
},
Msg::ExitNow(exit_type) => {
debug!("layout: ExitNow received");
self.exit_now(possibly_locked_rw_data, exit_type);
return false
}
}
true
}
fn collect_reports<'a>(&'a self,
reports_chan: ReportsChan,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>) {
let mut reports = vec![];
// FIXME(njn): Just measuring the display tree for now.
let rw_data = self.lock_rw_data(possibly_locked_rw_data);
let stacking_context = rw_data.stacking_context.as_ref();
reports.push(Report {
path: path!["pages", format!("url({})", self.url), "display-list"],
size: stacking_context.map_or(0, |sc| sc.heap_size_of_children()),
});
reports_chan.send(reports);
}
/// Enters a quiescent state in which no new messages except for
/// `layout_interface::Msg::ReapLayoutData` will be processed until an `ExitNow` is
/// received. A pong is immediately sent on the given response channel.
fn prepare_to_exit<'a>(&'a self,
response_chan: Sender<()>,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>) {
response_chan.send(()).unwrap();
loop {
match self.port.recv().unwrap() {
Msg::ReapLayoutData(dead_layout_data) => {
unsafe {
self.handle_reap_layout_data(dead_layout_data)
}
}
Msg::ExitNow(exit_type) => {
debug!("layout task is exiting...");
self.exit_now(possibly_locked_rw_data, exit_type);
break
}
_ => {
panic!("layout: message that wasn't `ExitNow` received after \
`PrepareToExitMsg`")
}
}
}
}
/// Shuts down the layout task now. If there are any DOM nodes left, layout will now (safely)
/// crash.
fn exit_now<'a>(&'a self,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>,
exit_type: PipelineExitType) {
let (response_chan, response_port) = channel();
{
let mut rw_data = self.lock_rw_data(possibly_locked_rw_data);
if let Some(ref mut traversal) = (&mut *rw_data).parallel_traversal {
traversal.shutdown()
}
LayoutTask::return_rw_data(possibly_locked_rw_data, rw_data);
}
let msg = mem::ProfilerMsg::UnregisterReporter(self.reporter_name.clone());
self.mem_profiler_chan.send(msg);
self.paint_chan.send(PaintMsg::Exit(Some(response_chan), exit_type));
response_port.recv().unwrap()
}
fn handle_load_stylesheet<'a>(&'a self,
url: Url,
mq: MediaQueryList,
pending: PendingAsyncLoad,
responder: Box<StylesheetLoadResponder+Send>,
possibly_locked_rw_data:
&mut Option<MutexGuard<'a, LayoutTaskData>>) {
// TODO: Get the actual value. http://dev.w3.org/csswg/css-syntax/#environment-encoding
let environment_encoding = UTF_8 as EncodingRef;
// TODO we don't really even need to load this if mq does not match
let (metadata, iter) = load_bytes_iter(pending);
let protocol_encoding_label = metadata.charset.as_ref().map(|s| &**s);
let final_url = metadata.final_url;
let sheet = Stylesheet::from_bytes_iter(iter,
final_url,
protocol_encoding_label,
Some(environment_encoding),
Origin::Author);
//TODO: mark critical subresources as blocking load as well (#5974)
let ScriptControlChan(ref chan) = self.script_chan;
chan.send(ConstellationControlMsg::StylesheetLoadComplete(self.id, url, responder)).unwrap();
self.handle_add_stylesheet(sheet, mq, possibly_locked_rw_data);
}
fn handle_add_stylesheet<'a>(&'a self,
sheet: Stylesheet,
mq: MediaQueryList,
possibly_locked_rw_data:
&mut Option<MutexGuard<'a, LayoutTaskData>>) {
// Find all font-face rules and notify the font cache of them.
// GWTODO: Need to handle unloading web fonts (when we handle unloading stylesheets!)
let mut rw_data = self.lock_rw_data(possibly_locked_rw_data);
if mq.evaluate(&rw_data.stylist.device) {
for font_face in sheet.effective_rules(&rw_data.stylist.device).font_face() {
for source in font_face.sources.iter() {
self.font_cache_task.add_web_font(font_face.family.clone(), source.clone());
}
}
rw_data.stylist.add_stylesheet(sheet);
}
LayoutTask::return_rw_data(possibly_locked_rw_data, rw_data);
}
/// Sets quirks mode for the document, causing the quirks mode stylesheet to be loaded.
fn handle_set_quirks_mode<'a>(&'a self,
possibly_locked_rw_data:
&mut Option<MutexGuard<'a, LayoutTaskData>>) {
let mut rw_data = self.lock_rw_data(possibly_locked_rw_data);
rw_data.stylist.add_quirks_mode_stylesheet();
LayoutTask::return_rw_data(possibly_locked_rw_data, rw_data);
}
fn try_get_layout_root(&self, node: LayoutNode) -> Option<FlowRef> {
let mut layout_data_ref = node.mutate_layout_data();
let layout_data =
match layout_data_ref.as_mut() {
None => return None,
Some(layout_data) => layout_data,
};
let result = layout_data.data.flow_construction_result.swap_out();
let mut flow = match result {
ConstructionResult::Flow(mut flow, abs_descendants) => {
// Note: Assuming that the root has display 'static' (as per
// CSS Section 9.3.1). Otherwise, if it were absolutely
// positioned, it would return a reference to itself in
// `abs_descendants` and would lead to a circular reference.
// Set Root as CB for any remaining absolute descendants.
flow.set_absolute_descendants(abs_descendants);
flow
}
_ => return None,
};
flow.mark_as_root();
Some(flow)
}
fn get_layout_root(&self, node: LayoutNode) -> FlowRef {
self.try_get_layout_root(node).expect("no layout root")
}
/// Performs layout constraint solving.
///
/// This corresponds to `Reflow()` in Gecko and `layout()` in WebKit/Blink and should be
/// benchmarked against those two. It is marked `#[inline(never)]` to aid profiling.
#[inline(never)]
fn solve_constraints<'a>(&self,
layout_root: &mut FlowRef,
shared_layout_context: &SharedLayoutContext) {
let _scope = layout_debug_scope!("solve_constraints");
sequential::traverse_flow_tree_preorder(layout_root, shared_layout_context);
}
/// Performs layout constraint solving in parallel.
///
/// This corresponds to `Reflow()` in Gecko and `layout()` in WebKit/Blink and should be
/// benchmarked against those two. It is marked `#[inline(never)]` to aid profiling.
#[inline(never)]
fn solve_constraints_parallel(&self,
rw_data: &mut LayoutTaskData,
layout_root: &mut FlowRef,
shared_layout_context: &SharedLayoutContext) {
let _scope = layout_debug_scope!("solve_constraints_parallel");
match rw_data.parallel_traversal {
None => panic!("solve_contraints_parallel() called with no parallel traversal ready"),
Some(ref mut traversal) => {
// NOTE: this currently computes borders, so any pruning should separate that
// operation out.
parallel::traverse_flow_tree_preorder(layout_root,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
shared_layout_context,
traversal);
}
}
}
/// Verifies that every node was either marked as a leaf or as a nonleaf in the flow tree.
/// This is only on in debug builds.
#[inline(never)]
#[cfg(debug)]
fn verify_flow_tree(&self, layout_root: &mut FlowRef) {
let mut traversal = traversal::FlowTreeVerification;
layout_root.traverse_preorder(&mut traversal);
}
#[cfg(not(debug))]
fn verify_flow_tree(&self, _: &mut FlowRef) {
}
fn process_content_box_request<'a>(&'a self,
requested_node: TrustedNodeAddress,
layout_root: &mut FlowRef,
rw_data: &mut RWGuard<'a>) {
// FIXME(pcwalton): This has not been updated to handle the stacking context relative
// stuff. So the position is wrong in most cases.
let requested_node: OpaqueNode = OpaqueNodeMethods::from_script_node(requested_node);
let mut iterator = UnioningFragmentBorderBoxIterator::new(requested_node);
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root, &mut iterator);
rw_data.content_box_response = match iterator.rect {
Some(rect) => rect,
None => Rect::zero()
};
}
fn process_content_boxes_request<'a>(&'a self,
requested_node: TrustedNodeAddress,
layout_root: &mut FlowRef,
rw_data: &mut RWGuard<'a>) {
// FIXME(pcwalton): This has not been updated to handle the stacking context relative
// stuff. So the position is wrong in most cases.
let requested_node: OpaqueNode = OpaqueNodeMethods::from_script_node(requested_node);
let mut iterator = CollectingFragmentBorderBoxIterator::new(requested_node);
sequential::iterate_through_flow_tree_fragment_border_boxes(layout_root, &mut iterator);
rw_data.content_boxes_response = iterator.rects;
}
fn compute_abs_pos_and_build_display_list<'a>(&'a self,
data: &Reflow,
layout_root: &mut FlowRef,
shared_layout_context: &mut SharedLayoutContext,
rw_data: &mut LayoutTaskData) {
let writing_mode = flow::base(&**layout_root).writing_mode;
profile(time::ProfilerCategory::LayoutDispListBuild,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
|| {
shared_layout_context.dirty =
flow::base(&**layout_root).position.to_physical(writing_mode,
rw_data.screen_size);
flow::mut_base(&mut **layout_root).stacking_relative_position =
LogicalPoint::zero(writing_mode).to_physical(writing_mode,
rw_data.screen_size);
flow::mut_base(&mut **layout_root).clip =
ClippingRegion::from_rect(&data.page_clip_rect);
match rw_data.parallel_traversal {
None => {
sequential::build_display_list_for_subtree(layout_root,
shared_layout_context);
}
Some(ref mut traversal) => {
parallel::build_display_list_for_subtree(layout_root,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
shared_layout_context,
traversal);
}
}
if data.goal == ReflowGoal::ForDisplay {
debug!("Done building display list.");
let root_background_color = get_root_flow_background_color(&mut **layout_root);
let root_size = {
let root_flow = flow::base(&**layout_root);
root_flow.position.size.to_physical(root_flow.writing_mode)
};
let mut display_list = box DisplayList::new();
flow::mut_base(&mut **layout_root).display_list_building_result
.add_to(&mut *display_list);
let paint_layer = Arc::new(PaintLayer::new(layout_root.layer_id(0),
root_background_color,
ScrollPolicy::Scrollable));
let origin = Rect(Point2D(Au(0), Au(0)), root_size);
let stacking_context = Arc::new(StackingContext::new(display_list,
&origin,
&origin,
0,
&matrix::identity(),
filter::T::new(Vec::new()),
mix_blend_mode::T::normal,
Some(paint_layer)));
if opts::get().dump_display_list {
println!("#### start printing display list.");
stacking_context.print(String::from_str("#"));
}
rw_data.stacking_context = Some(stacking_context.clone());
debug!("Layout done!");
rw_data.epoch.next();
self.paint_chan.send(PaintMsg::PaintInit(rw_data.epoch, stacking_context));
}
});
}
/// The high-level routine that performs layout tasks.
fn handle_reflow<'a>(&'a self,
data: &ScriptReflow,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>) {
// FIXME: Isolate this transmutation into a "bridge" module.
// FIXME(rust#16366): The following line had to be moved because of a
// rustc bug. It should be in the next unsafe block.
let mut node: LayoutJS<Node> = unsafe {
LayoutJS::from_trusted_node_address(data.document_root)
};
let node: &mut LayoutNode = unsafe {
transmute(&mut node)
};
debug!("layout: received layout request for: {}", self.url.serialize());
if log_enabled!(log::DEBUG) {
node.dump();
}
let mut rw_data = self.lock_rw_data(possibly_locked_rw_data);
let initial_viewport = data.window_size.initial_viewport;
let old_screen_size = rw_data.screen_size;
let current_screen_size = Size2D(Au::from_f32_px(initial_viewport.width.get()),
Au::from_f32_px(initial_viewport.height.get()));
rw_data.screen_size = current_screen_size;
// Handle conditions where the entire flow tree is invalid.
let screen_size_changed = current_screen_size != old_screen_size;
if screen_size_changed {
// Calculate the actual viewport as per DEVICE-ADAPT § 6
let device = Device::new(MediaType::Screen, initial_viewport);
rw_data.stylist.set_device(device);
if let Some(constraints) = rw_data.stylist.constrain_viewport() {
debug!("Viewport constraints: {:?}", constraints);
// other rules are evaluated against the actual viewport
rw_data.screen_size = Size2D(Au::from_f32_px(constraints.size.width.get()),
Au::from_f32_px(constraints.size.height.get()));
let device = Device::new(MediaType::Screen, constraints.size);
rw_data.stylist.set_device(device);
// let the constellation know about the viewport constraints
let ConstellationChan(ref constellation_chan) = rw_data.constellation_chan;
constellation_chan.send(ConstellationMsg::ViewportConstrained(
self.id, constraints)).unwrap();
}
}
// If the entire flow tree is invalid, then it will be reflowed anyhow.
let needs_dirtying = rw_data.stylist.update();
let needs_reflow = screen_size_changed && !needs_dirtying;
unsafe {
if needs_dirtying {
LayoutTask::dirty_all_nodes(node);
}
}
if needs_reflow {
if let Some(mut flow) = self.try_get_layout_root(*node) {
LayoutTask::reflow_all_nodes(&mut *flow);
}
}
// Create a layout context for use throughout the following passes.
let mut shared_layout_context = self.build_shared_layout_context(&*rw_data,
screen_size_changed,
Some(&node),
&self.url,
data.reflow_info.goal);
if node.is_dirty() || node.has_dirty_descendants() || rw_data.stylist.is_dirty() {
// Recalculate CSS styles and rebuild flows and fragments.
profile(time::ProfilerCategory::LayoutStyleRecalc,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
|| {
// Perform CSS selector matching and flow construction.
let rw_data = &mut *rw_data;
match rw_data.parallel_traversal {
None => {
sequential::traverse_dom_preorder(*node, &shared_layout_context);
}
Some(ref mut traversal) => {
parallel::traverse_dom_preorder(*node, &shared_layout_context, traversal);
}
}
});
// Retrieve the (possibly rebuilt) root flow.
rw_data.root_flow = Some(self.get_layout_root((*node).clone()));
// Kick off animations if any were triggered.
animation::process_new_animations(&mut *rw_data, self.id);
}
// Send new canvas renderers to the paint task
while let Ok((layer_id, renderer)) = self.canvas_layers_receiver.try_recv() {
// Just send if there's an actual renderer
if let Some(renderer) = renderer {
self.paint_chan.send(PaintMsg::CanvasLayer(layer_id, renderer));
}
}
// Perform post-style recalculation layout passes.
self.perform_post_style_recalc_layout_passes(&data.reflow_info,
&mut rw_data,
&mut shared_layout_context);
let mut root_flow = (*rw_data.root_flow.as_ref().unwrap()).clone();
match data.query_type {
ReflowQueryType::ContentBoxQuery(node) => {
self.process_content_box_request(node, &mut root_flow, &mut rw_data)
}
ReflowQueryType::ContentBoxesQuery(node) => {
self.process_content_boxes_request(node, &mut root_flow, &mut rw_data)
}
ReflowQueryType::NoQuery => {}
}
// Tell script that we're done.
//
// FIXME(pcwalton): This should probably be *one* channel, but we can't fix this without
// either select or a filtered recv() that only looks for messages of a given type.
data.script_join_chan.send(()).unwrap();
let ScriptControlChan(ref chan) = data.script_chan;
chan.send(ConstellationControlMsg::ReflowComplete(self.id, data.id)).unwrap();
}
fn set_visible_rects<'a>(&'a self,
new_visible_rects: Vec<(LayerId, Rect<Au>)>,
possibly_locked_rw_data: &mut Option<MutexGuard<'a, LayoutTaskData>>)
-> bool {
let mut rw_data = self.lock_rw_data(possibly_locked_rw_data);
// First, determine if we need to regenerate the display lists. This will happen if the
// layers have moved more than `DISPLAY_PORT_THRESHOLD_SIZE_FACTOR` away from their last
// positions.
let mut must_regenerate_display_lists = false;
let mut old_visible_rects = HashMap::with_hash_state(Default::default());
let inflation_amount =
Size2D(rw_data.screen_size.width * DISPLAY_PORT_THRESHOLD_SIZE_FACTOR,
rw_data.screen_size.height * DISPLAY_PORT_THRESHOLD_SIZE_FACTOR);
for &(ref layer_id, ref new_visible_rect) in new_visible_rects.iter() {
match rw_data.visible_rects.get(layer_id) {
None => {
old_visible_rects.insert(*layer_id, *new_visible_rect);
}
Some(old_visible_rect) => {
old_visible_rects.insert(*layer_id, *old_visible_rect);
if !old_visible_rect.inflate(inflation_amount.width, inflation_amount.height)
.intersects(new_visible_rect) {
must_regenerate_display_lists = true;
}
}
}
}
if !must_regenerate_display_lists {
// Update `visible_rects` in case there are new layers that were discovered.
rw_data.visible_rects = Arc::new(old_visible_rects);
return true
}
debug!("regenerating display lists!");
for &(ref layer_id, ref new_visible_rect) in new_visible_rects.iter() {
old_visible_rects.insert(*layer_id, *new_visible_rect);
}
rw_data.visible_rects = Arc::new(old_visible_rects);
// Regenerate the display lists.
let reflow_info = Reflow {
goal: ReflowGoal::ForDisplay,
page_clip_rect: MAX_RECT,
};
let mut layout_context = self.build_shared_layout_context(&*rw_data,
false,
None,
&self.url,
reflow_info.goal);
self.perform_post_main_layout_passes(&reflow_info, &mut *rw_data, &mut layout_context);
true
}
fn tick_all_animations<'a>(&'a self,
possibly_locked_rw_data: &mut Option<MutexGuard<'a,
LayoutTaskData>>) {
let mut rw_data = self.lock_rw_data(possibly_locked_rw_data);
animation::tick_all_animations(self, &mut rw_data)
}
pub fn tick_animation<'a>(&'a self, animation: &Animation, rw_data: &mut LayoutTaskData) {
let reflow_info = Reflow {
goal: ReflowGoal::ForDisplay,
page_clip_rect: MAX_RECT,
};
// Perform an abbreviated style recalc that operates without access to the DOM.
let mut layout_context = self.build_shared_layout_context(&*rw_data,
false,
None,
&self.url,
reflow_info.goal);
let mut root_flow = (*rw_data.root_flow.as_ref().unwrap()).clone();
profile(time::ProfilerCategory::LayoutStyleRecalc,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
|| animation::recalc_style_for_animation(root_flow.deref_mut(), &animation));
self.perform_post_style_recalc_layout_passes(&reflow_info,
&mut *rw_data,
&mut layout_context);
}
fn perform_post_style_recalc_layout_passes<'a>(&'a self,
data: &Reflow,
rw_data: &mut LayoutTaskData,
layout_context: &mut SharedLayoutContext) {
let mut root_flow = (*rw_data.root_flow.as_ref().unwrap()).clone();
profile(time::ProfilerCategory::LayoutRestyleDamagePropagation,
self.profiler_metadata(),<|fim▁hole|> self.time_profiler_chan.clone(),
|| {
if opts::get().nonincremental_layout || root_flow.deref_mut()
.compute_layout_damage()
.contains(REFLOW_ENTIRE_DOCUMENT) {
root_flow.deref_mut().reflow_entire_document()
}
});
// Verification of the flow tree, which ensures that all nodes were either marked as leaves
// or as non-leaves. This becomes a no-op in release builds. (It is inconsequential to
// memory safety but is a useful debugging tool.)
self.verify_flow_tree(&mut root_flow);
if opts::get().trace_layout {
layout_debug::begin_trace(root_flow.clone());
}
// Resolve generated content.
profile(time::ProfilerCategory::LayoutGeneratedContent,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
|| sequential::resolve_generated_content(&mut root_flow, &layout_context));
// Perform the primary layout passes over the flow tree to compute the locations of all
// the boxes.
profile(time::ProfilerCategory::LayoutMain,
self.profiler_metadata(),
self.time_profiler_chan.clone(),
|| {
match rw_data.parallel_traversal {
None => {
// Sequential mode.
self.solve_constraints(&mut root_flow, &layout_context)
}
Some(_) => {
// Parallel mode.
self.solve_constraints_parallel(rw_data,
&mut root_flow,
&mut *layout_context);
}
}
});
self.perform_post_main_layout_passes(data, rw_data, layout_context);
}
fn perform_post_main_layout_passes<'a>(&'a self,
data: &Reflow,
rw_data: &mut LayoutTaskData,
layout_context: &mut SharedLayoutContext) {
// Build the display list if necessary, and send it to the painter.
let mut root_flow = (*rw_data.root_flow.as_ref().unwrap()).clone();
self.compute_abs_pos_and_build_display_list(data,
&mut root_flow,
&mut *layout_context,
rw_data);
self.first_reflow.set(false);
if opts::get().trace_layout {
layout_debug::end_trace();
}
if opts::get().dump_flow_tree {
root_flow.dump();
}
rw_data.generation += 1;
}
unsafe fn dirty_all_nodes(node: &mut LayoutNode) {
for node in node.traverse_preorder() {
// TODO(cgaebel): mark nodes which are sensitive to media queries as
// "changed":
// > node.set_changed(true);
node.set_dirty(true);
node.set_dirty_siblings(true);
node.set_dirty_descendants(true);
}
}
fn reflow_all_nodes(flow: &mut Flow) {
debug!("reflowing all nodes!");
flow::mut_base(flow).restyle_damage.insert(REFLOW | REPAINT);
for child in flow::child_iter(flow) {
LayoutTask::reflow_all_nodes(child);
}
}
/// Handles a message to destroy layout data. Layout data must be destroyed on *this* task
/// because the struct type is transmuted to a different type on the script side.
unsafe fn handle_reap_layout_data(&self, layout_data: LayoutData) {
let layout_data_wrapper: LayoutDataWrapper = transmute(layout_data);
layout_data_wrapper.remove_compositor_layers(self.constellation_chan.clone());
}
/// Returns profiling information which is passed to the time profiler.
fn profiler_metadata(&self) -> ProfilerMetadata {
Some((&self.url,
if self.is_iframe {
TimerMetadataFrameType::IFrame
} else {
TimerMetadataFrameType::RootWindow
},
if self.first_reflow.get() {
TimerMetadataReflowType::FirstReflow
} else {
TimerMetadataReflowType::Incremental
}))
}
}
struct LayoutRPCImpl(Arc<Mutex<LayoutTaskData>>);
impl LayoutRPC for LayoutRPCImpl {
// The neat thing here is that in order to answer the following two queries we only
// need to compare nodes for equality. Thus we can safely work only with `OpaqueNode`.
fn content_box(&self) -> ContentBoxResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
ContentBoxResponse(rw_data.content_box_response)
}
/// Requests the dimensions of all the content boxes, as in the `getClientRects()` call.
fn content_boxes(&self) -> ContentBoxesResponse {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
ContentBoxesResponse(rw_data.content_boxes_response.clone())
}
/// Requests the node containing the point of interest.
fn hit_test(&self, _: TrustedNodeAddress, point: Point2D<f32>) -> Result<HitTestResponse, ()> {
let point = Point2D(Au::from_f32_px(point.x), Au::from_f32_px(point.y));
let resp = {
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
match rw_data.stacking_context {
None => panic!("no root stacking context!"),
Some(ref stacking_context) => {
let mut result = Vec::new();
stacking_context.hit_test(point, &mut result, true);
if !result.is_empty() {
Some(HitTestResponse(result[0].node.to_untrusted_node_address()))
} else {
None
}
}
}
};
if resp.is_some() {
return Ok(resp.unwrap());
}
Err(())
}
fn mouse_over(&self, _: TrustedNodeAddress, point: Point2D<f32>)
-> Result<MouseOverResponse, ()> {
let mut mouse_over_list: Vec<DisplayItemMetadata> = vec!();
let point = Point2D(Au::from_f32_px(point.x), Au::from_f32_px(point.y));
{
let &LayoutRPCImpl(ref rw_data) = self;
let rw_data = rw_data.lock().unwrap();
match rw_data.stacking_context {
None => panic!("no root stacking context!"),
Some(ref stacking_context) => {
stacking_context.hit_test(point, &mut mouse_over_list, false);
}
}
// Compute the new cursor.
let cursor = if !mouse_over_list.is_empty() {
mouse_over_list[0].pointing.unwrap()
} else {
Cursor::DefaultCursor
};
let ConstellationChan(ref constellation_chan) = rw_data.constellation_chan;
constellation_chan.send(ConstellationMsg::SetCursor(cursor)).unwrap();
}
if mouse_over_list.is_empty() {
Err(())
} else {
let response_list =
mouse_over_list.iter()
.map(|metadata| metadata.node.to_untrusted_node_address())
.collect();
Ok(MouseOverResponse(response_list))
}
}
}
struct UnioningFragmentBorderBoxIterator {
node_address: OpaqueNode,
rect: Option<Rect<Au>>,
}
impl UnioningFragmentBorderBoxIterator {
fn new(node_address: OpaqueNode) -> UnioningFragmentBorderBoxIterator {
UnioningFragmentBorderBoxIterator {
node_address: node_address,
rect: None
}
}
}
impl FragmentBorderBoxIterator for UnioningFragmentBorderBoxIterator {
fn process(&mut self, _: &Fragment, border_box: &Rect<Au>) {
self.rect = match self.rect {
Some(rect) => {
Some(rect.union(border_box))
}
None => {
Some(*border_box)
}
};
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.contains_node(self.node_address)
}
}
struct CollectingFragmentBorderBoxIterator {
node_address: OpaqueNode,
rects: Vec<Rect<Au>>,
}
impl CollectingFragmentBorderBoxIterator {
fn new(node_address: OpaqueNode) -> CollectingFragmentBorderBoxIterator {
CollectingFragmentBorderBoxIterator {
node_address: node_address,
rects: Vec::new(),
}
}
}
impl FragmentBorderBoxIterator for CollectingFragmentBorderBoxIterator {
fn process(&mut self, _: &Fragment, border_box: &Rect<Au>) {
self.rects.push(*border_box);
}
fn should_process(&mut self, fragment: &Fragment) -> bool {
fragment.contains_node(self.node_address)
}
}
// The default computed value for background-color is transparent (see
// http://dev.w3.org/csswg/css-backgrounds/#background-color). However, we
// need to propagate the background color from the root HTML/Body
// element (http://dev.w3.org/csswg/css-backgrounds/#special-backgrounds) if
// it is non-transparent. The phrase in the spec "If the canvas background
// is not opaque, what shows through is UA-dependent." is handled by rust-layers
// clearing the frame buffer to white. This ensures that setting a background
// color on an iframe element, while the iframe content itself has a default
// transparent background color is handled correctly.
fn get_root_flow_background_color(flow: &mut Flow) -> AzColor {
if !flow.is_block_like() {
return color::transparent()
}
let block_flow = flow.as_block();
let kid = match block_flow.base.children.iter_mut().next() {
None => return color::transparent(),
Some(kid) => kid,
};
if !kid.is_block_like() {
return color::transparent()
}
let kid_block_flow = kid.as_block();
kid_block_flow.fragment
.style
.resolve_color(kid_block_flow.fragment.style.get_background().background_color)
.to_gfx_color()
}<|fim▁end|> | |
<|file_name|>cache.go<|end_file_name|><|fim▁begin|>package api
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/meiraka/vv/internal/gzip"
"github.com/meiraka/vv/internal/request"
)
type cache struct {
changed chan struct{}
changedB bool
json []byte
gzjson []byte
date time.Time
mu sync.RWMutex
}
func newCache(i interface{}) (*cache, error) {
b, gz, err := cacheBinary(i)
if err != nil {
return nil, err
}
c := &cache{
changed: make(chan struct{}, 1),
changedB: true,
json: b,
gzjson: gz,
date: time.Now().UTC(),
}
return c, nil
}
func (c *cache) Close() {
c.mu.Lock()
if c.changedB {
close(c.changed)
c.changedB = false
}
c.mu.Unlock()
}
func (c *cache) Changed() <-chan struct{} {
return c.changed
}
func (c *cache) Set(i interface{}) error {
_, err := c.set(i, true)
return err
}
func (c *cache) SetIfModified(i interface{}) (changed bool, err error) {
return c.set(i, false)
}
func (c *cache) get() ([]byte, []byte, time.Time) {
c.mu.RLock()
defer c.mu.RUnlock()
return c.json, c.gzjson, c.date
}
<|fim▁hole|> c.mu.RUnlock()
etag := fmt.Sprintf(`"%d.%d"`, date.Unix(), date.Nanosecond())
if request.NoneMatch(r, etag) {
w.WriteHeader(http.StatusNotModified)
return
}
if !request.ModifiedSince(r, date) {
w.WriteHeader(http.StatusNotModified)
return
}
w.Header().Add("Cache-Control", "max-age=0")
w.Header().Add("Content-Type", "application/json; charset=utf-8")
w.Header().Add("Last-Modified", date.Format(http.TimeFormat))
w.Header().Add("Vary", "Accept-Encoding")
w.Header().Add("ETag", etag)
status := http.StatusOK
if getUpdateTime(r).After(date) {
status = http.StatusAccepted
}
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && gz != nil {
w.Header().Add("Content-Encoding", "gzip")
w.Header().Add("Content-Length", strconv.Itoa(len(gz)))
w.WriteHeader(status)
w.Write(gz)
return
}
w.Header().Add("Content-Length", strconv.Itoa(len(b)))
w.WriteHeader(status)
w.Write(b)
}
func (c *cache) set(i interface{}, force bool) (bool, error) {
n, gz, err := cacheBinary(i)
if err != nil {
return false, err
}
c.mu.Lock()
defer c.mu.Unlock()
o := c.json
if force || !bytes.Equal(o, n) {
c.json = n
c.date = time.Now().UTC()
c.gzjson = gz
if c.changedB {
select {
case c.changed <- struct{}{}:
default:
}
}
return true, nil
}
return false, nil
}
func cacheBinary(i interface{}) ([]byte, []byte, error) {
n, err := json.Marshal(i)
if err != nil {
return nil, nil, err
}
gz, err := gzip.Encode(n)
if err != nil {
return n, nil, nil
}
return n, gz, nil
}
type httpContextKey string
const httpUpdateTime = httpContextKey("updateTime")
func getUpdateTime(r *http.Request) time.Time {
if v := r.Context().Value(httpUpdateTime); v != nil {
if i, ok := v.(time.Time); ok {
return i
}
}
return time.Time{}
}
func setUpdateTime(r *http.Request, u time.Time) *http.Request {
ctx := context.WithValue(r.Context(), httpUpdateTime, u)
return r.WithContext(ctx)
}<|fim▁end|> | func (c *cache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
c.mu.RLock()
b, gz, date := c.json, c.gzjson, c.date |
<|file_name|>xmlcreate.py<|end_file_name|><|fim▁begin|>from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
from xml.dom import minidom
import io
"""
using xml.etree.ElementTree
"""
<|fim▁hole|> rough_string = etree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
root = Element('person')
tree = ElementTree(root)
name = Element('name')
root.append(name)
name.text = 'Julie'
root.set('id', '123')
# print etree.tostring(root)
print(prettify(root))
tree.write(open('person.xml', 'w'))
f2 = io.open('person2.xml', 'w', encoding = 'utf-8')
f2.write(prettify(root))<|fim▁end|> | def prettify(elem):
"""Return a pretty-printed XML string for the Element.
""" |
<|file_name|>problem3.rs<|end_file_name|><|fim▁begin|>/* Run tests with;
*
* rustc --test problem3.rs ; ./problem3
*
*/
fn prime_factors(mut n: i64) -> Vec<i64> {
let mut divisor = 2;
let mut factors: Vec<i64> = Vec::new();
while divisor <= (n as f64).sqrt() as i64 {
if n%divisor == 0 {
factors.push(divisor);
n = n / divisor;
divisor = 2;
} else {
divisor += 1;
}
}
factors.push(n);
return factors;
}
pub fn main() {
let factors = prime_factors(600851475143);
let largest_prime_factor = factors.last().unwrap();
println!("largest prime factor == {}", largest_prime_factor);
}
#[cfg(test)]
mod test {
use super::prime_factors;
#[test]
fn correct_answer() {
let factors = prime_factors(600851475143);
let expected_answer = 6857;
let computed_answer = *factors.last().unwrap();<|fim▁hole|> assert_eq!(computed_answer, expected_answer);
}
}<|fim▁end|> | |
<|file_name|>machine.py<|end_file_name|><|fim▁begin|>import argparse
from dvc.command.base import CmdBase, append_doc_link, fix_subparsers
from dvc.command.config import CmdConfig
from dvc.compare import TabularData
from dvc.config import ConfigError
from dvc.exceptions import DvcException
from dvc.types import Dict, List
from dvc.ui import ui
from dvc.utils import format_link
class MachineDisabledError(ConfigError):
def __init__(self):
super().__init__("Machine feature is disabled")
class CmdMachineConfig(CmdConfig):
def __init__(self, args):
super().__init__(args)
if not self.config["feature"].get("machine", False):
raise MachineDisabledError
if getattr(self.args, "name", None):
self.args.name = self.args.name.lower()
def _check_exists(self, conf):
if self.args.name not in conf["machine"]:
raise ConfigError(f"machine '{self.args.name}' doesn't exist.")
class CmdMachineAdd(CmdMachineConfig):
def run(self):
from dvc.machine import validate_name
validate_name(self.args.name)
if self.args.default:
ui.write(f"Setting '{self.args.name}' as a default machine.")
with self.config.edit(self.args.level) as conf:
if self.args.name in conf["machine"] and not self.args.force:
raise ConfigError(
"machine '{}' already exists. Use `-f|--force` to "
"overwrite it.".format(self.args.name)
)
conf["machine"][self.args.name] = {"cloud": self.args.cloud}
if self.args.default:
conf["core"]["machine"] = self.args.name
return 0
class CmdMachineRemove(CmdMachineConfig):
def run(self):
with self.config.edit(self.args.level) as conf:
self._check_exists(conf)
del conf["machine"][self.args.name]
up_to_level = self.args.level or "repo"
# Remove core.machine refs to this machine in any shadowing configs
for level in reversed(self.config.LEVELS):
with self.config.edit(level) as conf:
if conf["core"].get("machine") == self.args.name:
del conf["core"]["machine"]
if level == up_to_level:
break
return 0
class CmdMachineList(CmdMachineConfig):
TABLE_COLUMNS = [
"name",
"cloud",
"region",
"image",
"spot",
"spot_price",
"instance_hdd_size",
"instance_type",
"ssh_private",
"startup_script",
]
PRIVATE_COLUMNS = ["ssh_private", "startup_script"]
def _hide_private(self, conf):
for machine in conf:
for column in self.PRIVATE_COLUMNS:
if column in conf[machine]:
conf[machine][column] = "***"
def _show_origin(self):
levels = [self.args.level] if self.args.level else self.config.LEVELS
for level in levels:
conf = self.config.read(level)["machine"]
if self.args.name:
conf = conf.get(self.args.name, {})
self._hide_private(conf)
prefix = self._config_file_prefix(True, self.config, level)
configs = list(self._format_config(conf, prefix))
if configs:
ui.write("\n".join(configs))
def _show_table(self):
td = TabularData(self.TABLE_COLUMNS, fill_value="-")
conf = self.config.read()["machine"]
if self.args.name:
conf = {self.args.name: conf.get(self.args.name, {})}
self._hide_private(conf)
for machine, machine_config in conf.items():
machine_config["name"] = machine
td.row_from_dict(machine_config)
td.dropna("cols", "all")
td.render()
def run(self):
if self.args.show_origin:
self._show_origin()
else:
self._show_table()
return 0
class CmdMachineModify(CmdMachineConfig):
def run(self):
from dvc.config import merge
with self.config.edit(self.args.level) as conf:
merged = self.config.load_config_to_level(self.args.level)
merge(merged, conf)
self._check_exists(merged)
if self.args.name not in conf["machine"]:
conf["machine"][self.args.name] = {}
section = conf["machine"][self.args.name]
if self.args.unset:
section.pop(self.args.option, None)
else:
section[self.args.option] = self.args.value
return 0
class CmdMachineRename(CmdBase):
def _check_exists(self, conf):
if self.args.name not in conf["machine"]:
raise ConfigError(f"machine '{self.args.name}' doesn't exist.")
def _rename_default(self, conf):
if conf["core"].get("machine") == self.args.name:
conf["core"]["machine"] = self.args.new
def _check_before_rename(self):
from dvc.machine import validate_name
validate_name(self.args.new)
all_config = self.config.load_config_to_level(None)
if self.args.new in all_config.get("machine", {}):
raise ConfigError(
"Rename failed. Machine '{}' already exists.".format(
self.args.new
)
)
ui.write(f"Rename machine '{self.args.name}' to '{self.args.new}'.")
def run(self):
self._check_before_rename()
with self.config.edit(self.args.level) as conf:
self._check_exists(conf)
conf["machine"][self.args.new] = conf["machine"][self.args.name]
try:
self.repo.machine.rename(self.args.name, self.args.new)
except DvcException as error:
del conf["machine"][self.args.new]
raise ConfigError("terraform rename failed") from error
del conf["machine"][self.args.name]
self._rename_default(conf)
up_to_level = self.args.level or "repo"
for level in reversed(self.config.LEVELS):
if level == up_to_level:
break
with self.config.edit(level) as level_conf:
self._rename_default(level_conf)
return 0
class CmdMachineDefault(CmdMachineConfig):
def run(self):
if self.args.name is None and not self.args.unset:
conf = self.config.read(self.args.level)
try:
print(conf["core"]["machine"])
except KeyError:
ui.write("No default machine set")
return 1
else:
with self.config.edit(self.args.level) as conf:
if self.args.unset:
conf["core"].pop("machine", None)
else:
merged_conf = self.config.load_config_to_level(
self.args.level
)
if (
self.args.name in conf["machine"]
or self.args.name in merged_conf["machine"]
):
conf["core"]["machine"] = self.args.name
else:
raise ConfigError(
"default machine must be present in machine "
"list."
)
return 0
class CmdMachineCreate(CmdBase):
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.create(self.args.name)
return 0
class CmdMachineStatus(CmdBase):
INSTANCE_FIELD = ["name", "instance", "status"]
SHOWN_FIELD = [
"cloud",
"instance_ip",
"instance_type",
"instance_hdd_size",
"instance_gpu",
]
def _add_row(
self,
name: str,
all_status: List[Dict],
td: TabularData,
):
if not all_status:
row = [name, None, "offline"]
td.append(row)
for i, status in enumerate(all_status, start=1):
row = [name, f"num_{i}", "running" if status else "offline"]
for field in self.SHOWN_FIELD:
value = str(status.get(field, ""))
row.append(value)
td.append(row)
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
td = TabularData(
self.INSTANCE_FIELD + self.SHOWN_FIELD, fill_value="-"
)
if self.args.name:
all_status = list(self.repo.machine.status(self.args.name))
self._add_row(self.args.name, all_status, td)
else:
name_set = set()
for level in self.repo.config.LEVELS:
conf = self.repo.config.read(level)["machine"]
name_set.update(conf.keys())
name_list = list(name_set)
for name in sorted(name_list):
all_status = list(self.repo.machine.status(name))
self._add_row(name, all_status, td)
td.dropna("cols", "all")
td.render()
return 0
class CmdMachineDestroy(CmdBase):
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.destroy(self.args.name)
return 0
class CmdMachineSsh(CmdBase):
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.run_shell(self.args.name)
return 0
def add_parser(subparsers, parent_parser):
from dvc.command.config import parent_config_parser
machine_HELP = "Set up and manage cloud machines."
machine_parser = subparsers.add_parser(
"machine",
parents=[parent_parser],
description=append_doc_link(machine_HELP, "machine"),
# NOTE: suppress help during development to hide command
# help=machine_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_subparsers = machine_parser.add_subparsers(
dest="cmd",
help="Use `dvc machine CMD --help` for " "command-specific help.",
)
fix_subparsers(machine_subparsers)
machine_ADD_HELP = "Add a new data machine."
machine_add_parser = machine_subparsers.add_parser(
"add",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_ADD_HELP, "machine/add"),
help=machine_ADD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_add_parser.add_argument("name", help="Name of the machine")
machine_add_parser.add_argument(
"cloud",
help="Machine cloud. See full list of supported clouds at {}".format(
format_link(
"https://github.com/iterative/"
"terraform-provider-iterative#machine"
)
),
)
machine_add_parser.add_argument(
"-d",
"--default",
action="store_true",
default=False,
help="Set as default machine.",
)
machine_add_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Force overwriting existing configs",
)
machine_add_parser.set_defaults(func=CmdMachineAdd)
machine_DEFAULT_HELP = "Set/unset the default machine."
machine_default_parser = machine_subparsers.add_parser(
"default",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_DEFAULT_HELP, "machine/default"),
help=machine_DEFAULT_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_default_parser.add_argument(
"name", nargs="?", help="Name of the machine"
)
machine_default_parser.add_argument(
"-u",
"--unset",
action="store_true",
default=False,
help="Unset default machine.",
)
machine_default_parser.set_defaults(func=CmdMachineDefault)
machine_LIST_HELP = "List the configuration of one/all machines."
machine_list_parser = machine_subparsers.add_parser(
"list",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_LIST_HELP, "machine/list"),
help=machine_LIST_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_list_parser.add_argument(
"--show-origin",
default=False,
action="store_true",
help="Show the source file containing each config value.",
)
machine_list_parser.add_argument(
"name",
nargs="?",
type=str,
help="name of machine to specify",
)
machine_list_parser.set_defaults(func=CmdMachineList)
machine_MODIFY_HELP = "Modify the configuration of an machine."
machine_modify_parser = machine_subparsers.add_parser(
"modify",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_MODIFY_HELP, "machine/modify"),
help=machine_MODIFY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_modify_parser.add_argument("name", help="Name of the machine")
machine_modify_parser.add_argument(
"option", help="Name of the option to modify."
)
machine_modify_parser.add_argument(
"value", nargs="?", help="(optional) Value of the option."
)
machine_modify_parser.add_argument(
"-u",
"--unset",
default=False,
action="store_true",
help="Unset option.",
)
machine_modify_parser.set_defaults(func=CmdMachineModify)
machine_RENAME_HELP = "Rename a machine "
machine_rename_parser = machine_subparsers.add_parser(
"rename",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_RENAME_HELP, "remote/rename"),
help=machine_RENAME_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_rename_parser.add_argument("name", help="Machine to be renamed")
machine_rename_parser.add_argument("new", help="New name of the machine")
machine_rename_parser.set_defaults(func=CmdMachineRename)
machine_REMOVE_HELP = "Remove an machine."
machine_remove_parser = machine_subparsers.add_parser(
"remove",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_REMOVE_HELP, "machine/remove"),
help=machine_REMOVE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_remove_parser.add_argument(
"name", help="Name of the machine to remove."
)
machine_remove_parser.set_defaults(func=CmdMachineRemove)
machine_CREATE_HELP = "Create and start a machine instance."
machine_create_parser = machine_subparsers.add_parser(
"create",
parents=[parent_parser],
description=append_doc_link(machine_CREATE_HELP, "machine/create"),
help=machine_CREATE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_create_parser.add_argument(
"name", help="Name of the machine to create."
)
machine_create_parser.set_defaults(func=CmdMachineCreate)
machine_STATUS_HELP = (
"List the status of running instances for one/all machines."
)
machine_status_parser = machine_subparsers.add_parser(
"status",
parents=[parent_parser],
description=append_doc_link(machine_STATUS_HELP, "machine/status"),
help=machine_STATUS_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_status_parser.add_argument(
"name", nargs="?", help="(optional) Name of the machine."
)
machine_status_parser.set_defaults(func=CmdMachineStatus)
machine_DESTROY_HELP = "Destroy an machine instance."
machine_destroy_parser = machine_subparsers.add_parser(
"destroy",
parents=[parent_parser],
description=append_doc_link(machine_DESTROY_HELP, "machine/destroy"),
help=machine_DESTROY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_destroy_parser.add_argument(
"name", help="Name of the machine instance to destroy."
)
machine_destroy_parser.set_defaults(func=CmdMachineDestroy)
<|fim▁hole|> machine_SSH_HELP = "Connect to a machine via SSH."
machine_ssh_parser = machine_subparsers.add_parser(
"ssh",
parents=[parent_parser],
description=append_doc_link(machine_SSH_HELP, "machine/ssh"),
help=machine_SSH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_ssh_parser.add_argument(
"name", help="Name of the machine instance to connect to."
)
machine_ssh_parser.set_defaults(func=CmdMachineSsh)<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![feature(core_intrinsics)]
#![feature(generators, generator_trait)]
pub mod structs;
pub mod traits;
pub mod operators;<|fim▁hole|>pub mod algorithms;
pub mod utils;
pub mod strings;
pub mod IO;
pub mod concurrency;
pub mod low_level;
pub mod macros;
pub mod unsafes;
pub mod ffi;<|fim▁end|> | pub mod closures;
pub mod iterators; |
<|file_name|>overlap-permitted-for-annotated-marker-traits.rs<|end_file_name|><|fim▁begin|>// run-pass
// Tests for RFC 1268: we allow overlapping impls of marker traits,
// that is, traits with #[marker]. In this case, a type `T` is
// `MyMarker` if it is either `Debug` or `Display`.
#![feature(marker_trait_attr)]
use std::fmt::{Debug, Display};
#[marker] trait MyMarker {}
<|fim▁hole|>fn foo<T: MyMarker>(t: T) -> T {
t
}
fn main() {
// Debug && Display:
assert_eq!(1, foo(1));
assert_eq!(2.0, foo(2.0));
// Debug && !Display:
assert_eq!(vec![1], foo(vec![1]));
}<|fim▁end|> | impl<T: Debug> MyMarker for T {}
impl<T: Display> MyMarker for T {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.