text
stringlengths
2
100k
meta
dict
/* * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ package org.antlr.v4.runtime; import org.antlr.v4.runtime.misc.Pair; /** The default mechanism for creating tokens. It's used by default in Lexer and * the error handling strategy (to create missing tokens). Notifying the parser * of a new factory means that it notifies its token source and error strategy. */ public interface TokenFactory<Symbol extends Token> { /** This is the method used to create tokens in the lexer and in the * error handling strategy. If text!=null, than the start and stop positions * are wiped to -1 in the text override is set in the CommonToken. */ Symbol create(Pair<TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine); /** Generically useful */ Symbol create(int type, String text); }
{ "pile_set_name": "Github" }
using System; namespace Microsoft.Research.MultiWorldTesting.ClientLibrary { /// <summary> /// When a model cannot be found. /// </summary> public class ModelNotFoundException : Exception { /// <summary> /// Creates a new instance. /// </summary> public ModelNotFoundException(string message) : base(message) { } } }
{ "pile_set_name": "Github" }
requestor-fake text ^ Text fromString: ''
{ "pile_set_name": "Github" }
##################################################################### ## ## NormaliserDialog ## ##################################################################### NormaliserDialog.Shell.Title=Normalisation de ligne NormaliserDialog.Stepname.Label=Nom étape NormaliserDialog.TypeField.Label=Champ type NormaliserDialog.Fields.Label=Champs NormaliserDialog.GetFields.Button=\ &Récupérer champs NormaliserDialog.ColumnInfo.Fieldname=Nom champ NormaliserDialog.ColumnInfo.Type=Type NormaliserDialog.ColumnInfo.NewField=nouveau champ NormaliserDialog.FailedToGetFields.DialogTitle=Récupérer les champs en échec NormaliserDialog.FailedToGetFields.DialogMessage=Impossible de récupérer les champs depuis les étapes précédentes ##################################################################### ## ## Normaliser ## ##################################################################### Normaliser.Log.CouldNotFindFieldInRow=Impossible de trouver le champ [{0} dans la ligne! Normaliser.Log.LineNumber=N°Ligne ##################################################################### ## ## NormaliserMeta ## ##################################################################### NormaliserMeta.Exception.UnableToLoadStepInfoFromXML=Impossible de charger l''étape depuis le fichier XML NormaliserMeta.Exception.UnexpectedErrorReadingStepInfoFromRepository=Erreur inattendue lors de la lecture des informations de l''étape depuis le référentiel NormaliserMeta.Exception.UnableToSaveStepInfoToRepository=Impossible de sauvegarder dans le référentiel les informations de l''étape avec l''id= NormaliserMeta.CheckResult.StepReceivingFieldsOK=L'étape est connecté aux étapes précédentes er récupère {0} champs NormaliserMeta.CheckResult.FieldsNotFound=Les champs à normaliser sont introuvables dans le flux d''entrée: NormaliserMeta.CheckResult.AllFieldsFound=Tous les champs à normaliser ont été trouvés dans le flux d''entrée. NormaliserMeta.CheckResult.CouldNotReadFieldsFromPreviousStep=Impossible de lire les champs depuis l''étape précédente. NormaliserMeta.CheckResult.StepReceivingInfoOK=L'étape reçoit des informations des aures étapes. NormaliserMeta.CheckResult.NoInputReceivedError=Aucun flux d''entrée en provenance des autres étapes\!
{ "pile_set_name": "Github" }
// Copyright © 2008-2020 Pioneer Developers. See AUTHORS.txt for details // Licensed under the terms of the GPL v3. See licenses/GPL-3.txt #ifndef _FLOATCOMPARISON_H #define _FLOATCOMPARISON_H #include <SDL_stdinc.h> #include <limits> #ifdef _MSC_VER #include <float.h> // for _finite #else #include <cmath> // for std::isfinite #endif // Fuzzy floating point comparisons based on: // http://realtimecollisiondetection.net/blog/?p=89 // (absolute & relative error tolerance) // // http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm // (ULP based tolerance) // // ULP-based tolerance implementation takes some architectural ideas from the // implementation in the Google test framework, and // http://stackoverflow.com/questions/17333/most-effective-way-for-float-and-double-comparison // provides (for float & double): // bool is_equal_exact(float a, float b); // bool is_equal_ulps(float a, float b, int ulps = DefaultUlpTolerance); // int32_t float_ulp_difference(float a, float b); // bool is_equal_relative(float a, float b, float tolerance = DefaultRelTolerance()); // bool is_equal_absolute(float a, float b, float tolerance = DefaultAbsTolerance()); // bool is_equal_general(float a, float b, float tolerance = DefaultTolerance()); // bool is_equal_general(float a, float b, float relative_tolerance, float absolute_tolerance); // bool is_zero_exact(float x); // bool is_zero_general(float x, float tolerance = IEEEFloatTraits<float>::DefaultRelTolerance()); // bool is_nan(float x); // bool is_finite(float x); // ==================================================================== // in the following code, IEEEFloatTraits<T>::bool_type is used to limit // the application of the functions by SFINAE template <typename T> struct IEEEFloatTraits; // --- float function helpers template <typename T> inline typename IEEEFloatTraits<T>::float_type float_abs(T x) { return (x < T(0)) ? (-x) : x; } template <typename T> inline typename IEEEFloatTraits<T>::float_type float_max(T x, T y) { return (y > x) ? y : x; } template <typename T> inline typename IEEEFloatTraits<T>::float_type float_max(T x, T y, T z) { return float_max(x, float_max(y, z)); } // --- float property helpers template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_nan_bits(const typename IEEEFloatTraits<T>::uint_type &bits) { typedef typename IEEEFloatTraits<T>::uint_type uint_type; const uint_type top = IEEEFloatTraits<T>::TopBit; const uint_type ebits = IEEEFloatTraits<T>::ExponentBits; // NaN has the exponent bits set, and at least one mantissa bit set // (therefore, if you mask off the top bit, the result must be strictly greater than // just the exponent bits set; if it's equal then it's just an infinity; if it's // less, then it's a valid finite number) return ((bits & ~top) > ebits); } template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_finite_bits(const typename IEEEFloatTraits<T>::uint_type &bits) { typedef typename IEEEFloatTraits<T>::uint_type uint_type; const uint_type ebits = IEEEFloatTraits<T>::ExponentBits; return ((bits & ebits) != ebits); } // --- infinity template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_finite(T x) { #ifdef _MSC_VER return _finite(x); #else return std::isfinite(x); #endif } // --- exact comparisons, and checking for NaN #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wfloat-equal" #endif inline bool is_equal_exact(float a, float b) { return (a == b); } inline bool is_equal_exact(double a, double b) { return (a == b); } inline bool is_zero_exact(float x) { return (x == 0.0f); } inline bool is_zero_exact(double x) { return (x == 0.0); } inline bool is_nan(float x) { return (x != x); } inline bool is_nan(double x) { return (x != x); } #ifdef __GNUC__ #pragma GCC diagnostic pop #endif // --- relative & absolute error comparisons template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_equal_relative(T a, T b, T tol = IEEEFloatTraits<T>::DefaultRelTolerance()) { return (float_abs(a - b) <= tol * float_max(float_abs(a), float_abs(b))); } template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_equal_absolute(T a, T b, T tol = IEEEFloatTraits<T>::DefaultAbsTolerance()) { return (float_abs(a - b) <= tol); } template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_equal_general(T a, T b, T rel_tol, T abs_tol) { return (float_abs(a - b) <= float_max(abs_tol, rel_tol * float_max(float_abs(a), float_abs(b)))); } template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_equal_general(T a, T b, T tol = IEEEFloatTraits<T>::DefaultTolerance()) { return (float_abs(a - b) <= tol * float_max(T(1), float_abs(a), float_abs(b))); } template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_zero_general(T x, T tol = IEEEFloatTraits<T>::DefaultRelTolerance()) { return (float_abs(x) <= tol); } // --- ulp-based comparisons template <typename T> inline typename IEEEFloatTraits<T>::int_type float_ulp_difference(T a, T b) { typedef typename IEEEFloatTraits<T>::FloatOrInt union_type; union_type afi, bfi; afi.f = a; bfi.f = b; // transform from sign-magnitude to two's-complement if (afi.i < 0) afi.ui = (IEEEFloatTraits<T>::TopBit - afi.ui); if (bfi.i < 0) bfi.ui = (IEEEFloatTraits<T>::TopBit - bfi.ui); return (bfi.i - afi.i); } // IEEEFloatTraits<T>::bool_type used for SFINAE template <typename T> inline typename IEEEFloatTraits<T>::bool_type is_equal_ulps(T a, T b, typename IEEEFloatTraits<T>::int_type max_ulps = IEEEFloatTraits<T>::DefaultUlpTolerance) { typedef typename IEEEFloatTraits<T>::FloatOrInt union_type; typedef typename IEEEFloatTraits<T>::int_type int_type; union_type afi, bfi; afi.f = a; bfi.f = b; // Infinities aren't close to anything except themselves if ((!is_finite_bits<T>(afi.ui) && is_finite_bits<T>(bfi.ui)) || (is_finite_bits<T>(afi.ui) && !is_finite_bits<T>(bfi.ui))) return false; // IEEE says NaNs are unequal to everything (even themselves) if (is_nan_bits<T>(afi.ui) || is_nan_bits<T>(bfi.ui)) return false; // transform from sign-magnitude to two's-complement if (afi.i < 0) afi.ui = (IEEEFloatTraits<T>::TopBit - afi.ui); if (bfi.i < 0) bfi.ui = (IEEEFloatTraits<T>::TopBit - bfi.ui); int_type difference = (bfi.i - afi.i); difference = (difference < int_type(0)) ? -difference : difference; return (difference <= max_ulps); } // ==================================================================== template <typename T> struct IEEEFloatTraits {}; template <> struct IEEEFloatTraits<double> { typedef double float_type; typedef bool bool_type; typedef int64_t int_type; typedef uint64_t uint_type; union FloatOrInt { double f; uint_type ui; int_type i; }; static const uint_type TopBit = static_cast<uint_type>(1) << (sizeof(double) * 8 - 1); static const uint_type ExponentBits = (~static_cast<uint_type>(0) << std::numeric_limits<double>::digits) & ~TopBit; static const uint_type MantissaBits = ~TopBit & ~ExponentBits; static const int_type DefaultUlpTolerance = 16; static double DefaultAbsTolerance() { return 1e-12; } static double DefaultRelTolerance() { return 1e-6; } static double DefaultTolerance() { return 1e-8; } static double SmallestNormalisedValue() { return std::numeric_limits<double>::min(); } }; template <> struct IEEEFloatTraits<float> { typedef float float_type; typedef bool bool_type; typedef int32_t int_type; typedef uint32_t uint_type; union FloatOrInt { float f; uint_type ui; int_type i; }; static const uint_type TopBit = uint_type(1) << (sizeof(float) * 8 - 1); static const uint_type ExponentBits = (~uint_type(0) << std::numeric_limits<float>::digits) & ~TopBit; static const uint_type MantissaBits = ~TopBit & ~ExponentBits; static const int_type DefaultUlpTolerance = 4; static float DefaultAbsTolerance() { return 1e-6f; } static float DefaultRelTolerance() { return 1e-5f; } static float DefaultTolerance() { return 1e-5f; } static float SmallestNormalisedValue() { return std::numeric_limits<float>::min(); } }; #endif
{ "pile_set_name": "Github" }
/* * 2007 Victor Hugo Borja <[email protected]> * Copyright 2001-2007 Adrian Thurston <[email protected]> */ /* This file is part of Ragel. * * Ragel is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Ragel is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Ragel; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _RUBY_FTABCODEGEN_H #define _RUBY_FTABCODEGEN_H #include "rubytable.h" class RubyFTabCodeGen : public RubyTabCodeGen { public: RubyFTabCodeGen( ostream &out ): RubyTabCodeGen(out) {} protected: std::ostream &TO_STATE_ACTION_SWITCH(); std::ostream &FROM_STATE_ACTION_SWITCH(); std::ostream &EOF_ACTION_SWITCH(); std::ostream &ACTION_SWITCH(); void GOTO( ostream &out, int gotoDest, bool inFinish ); void GOTO_EXPR( ostream &out, GenInlineItem *ilItem, bool inFinish ); void CALL( ostream &out, int callDest, int targState, bool inFinish ); void CALL_EXPR(ostream &out, GenInlineItem *ilItem, int targState, bool inFinish ); void RET( ostream &out, bool inFinish ); void BREAK( ostream &out, int targState ); int TO_STATE_ACTION( RedStateAp *state ); int FROM_STATE_ACTION( RedStateAp *state ); int EOF_ACTION( RedStateAp *state ); virtual int TRANS_ACTION( RedTransAp *trans ); void writeData(); void writeExec(); void calcIndexSize(); }; /* * Local Variables: * mode: c++ * indent-tabs-mode: 1 * c-file-style: "bsd" * End: */ #endif
{ "pile_set_name": "Github" }
/* * BSG helper library * * Copyright (C) 2008 James Smart, Emulex Corporation * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * Copyright (C) 2011 Mike Christie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/bsg-lib.h> #include <linux/export.h> #include <scsi/scsi_cmnd.h> #include <scsi/sg.h> #define uptr64(val) ((void __user *)(uintptr_t)(val)) static int bsg_transport_check_proto(struct sg_io_v4 *hdr) { if (hdr->protocol != BSG_PROTOCOL_SCSI || hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; return 0; } static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, fmode_t mode) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); job->request_len = hdr->request_len; job->request = memdup_user(uptr64(hdr->request), hdr->request_len); return PTR_ERR_OR_ZERO(job->request); } static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); int ret = 0; /* * The assignments below don't make much sense, but are kept for * bug by bug backwards compatibility: */ hdr->device_status = job->result & 0xff; hdr->transport_status = host_byte(job->result); hdr->driver_status = driver_byte(job->result); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; if (job->result < 0) { /* we're only returning the result field in the reply */ job->reply_len = sizeof(u32); ret = job->result; } if (job->reply_len && hdr->response) { int len = min(hdr->max_response_len, job->reply_len); if (copy_to_user(uptr64(hdr->response), job->reply, len)) ret = -EFAULT; else hdr->response_len = len; } /* we assume all request payload was transferred, residual == 0 */ hdr->dout_resid = 0; if (rq->next_rq) { unsigned int rsp_len = job->reply_payload.payload_len; if (WARN_ON(job->reply_payload_rcv_len > rsp_len)) hdr->din_resid = 0; else hdr->din_resid = rsp_len - job->reply_payload_rcv_len; } else { hdr->din_resid = 0; } return ret; } static void bsg_transport_free_rq(struct request *rq) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); kfree(job->request); } static const struct bsg_ops bsg_transport_ops = { .check_proto = bsg_transport_check_proto, .fill_hdr = bsg_transport_fill_hdr, .complete_rq = bsg_transport_complete_rq, .free_rq = bsg_transport_free_rq, }; /** * bsg_teardown_job - routine to teardown a bsg job * @kref: kref inside bsg_job that is to be torn down */ static void bsg_teardown_job(struct kref *kref) { struct bsg_job *job = container_of(kref, struct bsg_job, kref); struct request *rq = blk_mq_rq_from_pdu(job); put_device(job->dev); /* release reference for the request */ kfree(job->request_payload.sg_list); kfree(job->reply_payload.sg_list); blk_end_request_all(rq, BLK_STS_OK); } void bsg_job_put(struct bsg_job *job) { kref_put(&job->kref, bsg_teardown_job); } EXPORT_SYMBOL_GPL(bsg_job_put); int bsg_job_get(struct bsg_job *job) { return kref_get_unless_zero(&job->kref); } EXPORT_SYMBOL_GPL(bsg_job_get); /** * bsg_job_done - completion routine for bsg requests * @job: bsg_job that is complete * @result: job reply result * @reply_payload_rcv_len: length of payload recvd * * The LLD should call this when the bsg job has completed. */ void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) { job->result = result; job->reply_payload_rcv_len = reply_payload_rcv_len; blk_complete_request(blk_mq_rq_from_pdu(job)); } EXPORT_SYMBOL_GPL(bsg_job_done); /** * bsg_softirq_done - softirq done routine for destroying the bsg requests * @rq: BSG request that holds the job to be destroyed */ static void bsg_softirq_done(struct request *rq) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); bsg_job_put(job); } static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) { size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); BUG_ON(!req->nr_phys_segments); buf->sg_list = kzalloc(sz, GFP_KERNEL); if (!buf->sg_list) return -ENOMEM; sg_init_table(buf->sg_list, req->nr_phys_segments); buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); buf->payload_len = blk_rq_bytes(req); return 0; } /** * bsg_prepare_job - create the bsg_job structure for the bsg request * @dev: device that is being sent the bsg request * @req: BSG request that needs a job structure */ static bool bsg_prepare_job(struct device *dev, struct request *req) { struct request *rsp = req->next_rq; struct bsg_job *job = blk_mq_rq_to_pdu(req); int ret; job->timeout = req->timeout; if (req->bio) { ret = bsg_map_buffer(&job->request_payload, req); if (ret) goto failjob_rls_job; } if (rsp && rsp->bio) { ret = bsg_map_buffer(&job->reply_payload, rsp); if (ret) goto failjob_rls_rqst_payload; } job->dev = dev; /* take a reference for the request */ get_device(job->dev); kref_init(&job->kref); return true; failjob_rls_rqst_payload: kfree(job->request_payload.sg_list); failjob_rls_job: job->result = -ENOMEM; return false; } /** * bsg_request_fn - generic handler for bsg requests * @q: request queue to manage * * On error the create_bsg_job function should return a -Exyz error value * that will be set to ->result. * * Drivers/subsys should pass this to the queue init function. */ static void bsg_request_fn(struct request_queue *q) __releases(q->queue_lock) __acquires(q->queue_lock) { struct device *dev = q->queuedata; struct request *req; int ret; if (!get_device(dev)) return; while (1) { req = blk_fetch_request(q); if (!req) break; spin_unlock_irq(q->queue_lock); if (!bsg_prepare_job(dev, req)) { blk_end_request_all(req, BLK_STS_OK); spin_lock_irq(q->queue_lock); continue; } ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req)); spin_lock_irq(q->queue_lock); if (ret) break; } spin_unlock_irq(q->queue_lock); put_device(dev); spin_lock_irq(q->queue_lock); } /* called right after the request is allocated for the request_queue */ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) { struct bsg_job *job = blk_mq_rq_to_pdu(req); job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); if (!job->reply) return -ENOMEM; return 0; } /* called right before the request is given to the request_queue user */ static void bsg_initialize_rq(struct request *req) { struct bsg_job *job = blk_mq_rq_to_pdu(req); void *reply = job->reply; memset(job, 0, sizeof(*job)); job->reply = reply; job->reply_len = SCSI_SENSE_BUFFERSIZE; job->dd_data = job + 1; } static void bsg_exit_rq(struct request_queue *q, struct request *req) { struct bsg_job *job = blk_mq_rq_to_pdu(req); kfree(job->reply); } /** * bsg_setup_queue - Create and add the bsg hooks so we can receive requests * @dev: device to attach bsg device to * @name: device to give bsg device * @job_fn: bsg job handler * @dd_job_size: size of LLD data needed for each job */ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, bsg_job_fn *job_fn, int dd_job_size) { struct request_queue *q; int ret; q = blk_alloc_queue(GFP_KERNEL); if (!q) return ERR_PTR(-ENOMEM); q->cmd_size = sizeof(struct bsg_job) + dd_job_size; q->init_rq_fn = bsg_init_rq; q->exit_rq_fn = bsg_exit_rq; q->initialize_rq_fn = bsg_initialize_rq; q->request_fn = bsg_request_fn; ret = blk_init_allocated_queue(q); if (ret) goto out_cleanup_queue; q->queuedata = dev; q->bsg_job_fn = job_fn; blk_queue_flag_set(QUEUE_FLAG_BIDI, q); blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); ret = bsg_register_queue(q, dev, name, &bsg_transport_ops); if (ret) { printk(KERN_ERR "%s: bsg interface failed to " "initialize - register queue\n", dev->kobj.name); goto out_cleanup_queue; } return q; out_cleanup_queue: blk_cleanup_queue(q); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(bsg_setup_queue);
{ "pile_set_name": "Github" }
{ "compilerOptions": { "target": "es2017", "module": "commonjs", "moduleResolution": "node", "lib": ["es7", "esnext.asynciterable"], "experimentalDecorators": true, "emitDecoratorMetadata": true, "esModuleInterop": true, "noImplicitAny": true, "noUnusedLocals": true, "baseUrl": ".", "rootDir": ".", "outDir": "dist", "sourceMap": true, "paths": { "*": [ "node_modules/*", "types/*" ] } }, "include": [ "**/*" ] }
{ "pile_set_name": "Github" }
/**************************************************************************** * arch/sim/src/sim/up_stackframe.c * * Copyright (C) 2013 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <sys/types.h> #include <stdint.h> #include <sched.h> #include <debug.h> #include <nuttx/arch.h> #include <arch/irq.h> #include "up_internal.h" /**************************************************************************** * Pre-processor Macros ****************************************************************************/ /* Use a stack alignment of 16 bytes. If necessary frame_size must be * rounded up to the next boundary */ #define STACK_ALIGNMENT 16 /* Stack alignment macros */ #define STACK_ALIGN_MASK (STACK_ALIGNMENT-1) #define STACK_ALIGN_DOWN(a) ((a) & ~STACK_ALIGN_MASK) #define STACK_ALIGN_UP(a) (((a) + STACK_ALIGN_MASK) & ~STACK_ALIGN_MASK) /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: up_stack_frame * * Description: * Allocate a stack frame in the TCB's stack to hold thread-specific data. * This function may be called anytime after up_create_stack() or * up_use_stack() have been called but before the task has been started. * * Thread data may be kept in the stack (instead of in the TCB) if it is * accessed by the user code directly. This includes such things as * argv[]. The stack memory is guaranteed to be in the same protection * domain as the thread. * * The following TCB fields will be re-initialized: * * - adj_stack_size: Stack size after removal of the stack frame from * the stack * - adj_stack_ptr: Adjusted initial stack pointer after the frame has * been removed from the stack. This will still be the initial value * of the stack pointer when the task is started. * * Input Parameters: * - tcb: The TCB of new task * - frame_size: The size of the stack frame to allocate. * * Returned Value: * - A pointer to bottom of the allocated stack frame. NULL will be * returned on any failures. The alignment of the returned value is * the same as the alignment of the stack itself. * ****************************************************************************/ FAR void *up_stack_frame(FAR struct tcb_s *tcb, size_t frame_size) { /* Align the frame_size */ frame_size = STACK_ALIGN_UP(frame_size); /* Is there already a stack allocated? Is it big enough? */ if (!tcb->stack_alloc_ptr || tcb->adj_stack_size <= frame_size) { return NULL; } /* Save the adjusted stack values in the struct tcb_s */ tcb->adj_stack_ptr = (uint8_t *)tcb->adj_stack_ptr - frame_size; tcb->adj_stack_size -= frame_size; /* Reset the initial state */ tcb->xcp.regs[JB_SP] = (xcpt_reg_t)tcb->adj_stack_ptr - sizeof(xcpt_reg_t); /* And return a pointer to the allocated memory */ return tcb->adj_stack_ptr; }
{ "pile_set_name": "Github" }
<snippet> <content><![CDATA[cc.EaseRateAction]]></content> <tabTrigger>EaseRateAction</tabTrigger> <scope>source.lua</scope> <description>.</description> </snippet>
{ "pile_set_name": "Github" }
export * from './domUtils'; export * from './columnUtils'; export * from './keyboardUtils'; export * from './selectedCellUtils'; export function assertIsValidKey<R>(key: unknown): asserts key is keyof R { if (key === undefined) { throw new Error('Please specify the rowKey prop to use selection'); } } export function wrapRefs<T>(...refs: readonly React.Ref<T>[]) { return (handle: T | null) => { for (const ref of refs) { if (typeof ref === 'function') { ref(handle); } else if (ref !== null) { // https://github.com/DefinitelyTyped/DefinitelyTyped/issues/31065 // @ts-expect-error ref.current = handle; } } }; }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>BuildVersion</key> <string>24</string> <key>CFBundleShortVersionString</key> <string>266.5</string> <key>CFBundleVersion</key> <string>266.5</string> <key>ProjectName</key> <string>AppleHDA</string> <key>SourceVersion</key> <string>266005000000000</string> </dict> </plist>
{ "pile_set_name": "Github" }
import { set } from './set'; import { pipe } from './pipe'; describe('data first', () => { test('set', () => { expect(set({ a: 1 }, 'a', 2)).toEqual({ a: 2 }); }); }); describe('data last', () => { test('set', () => { expect(pipe({ a: 1 }, set('a', 2))).toEqual({ a: 2 }); }); });
{ "pile_set_name": "Github" }
// // RCDReceiptDetailHeader.h // SealTalk // // Created by 张改红 on 2019/5/31. // Copyright © 2019 RongCloud. All rights reserved. // #import <UIKit/UIKit.h> @class RCMessageModel; NS_ASSUME_NONNULL_BEGIN @protocol RCDReceiptDetailHeaderDelegate <NSObject> - (void)receiptDetailHeaderDidUpdate:(BOOL)isClosed; @end @interface RCDReceiptDetailHeader : UIView @property (nonatomic, weak) id<RCDReceiptDetailHeaderDelegate> delegate; - (instancetype)initWithMessage:(RCMessageModel *)message; @end NS_ASSUME_NONNULL_END
{ "pile_set_name": "Github" }
// Scintilla source code edit control /** @file MarginView.h ** Defines the appearance of the editor margin. **/ // Copyright 1998-2014 by Neil Hodgson <[email protected]> // The License.txt file describes the conditions under which this software may be distributed. #ifndef MARGINVIEW_H #define MARGINVIEW_H namespace Scintilla { void DrawWrapMarker(Surface *surface, PRectangle rcPlace, bool isEndMarker, ColourDesired wrapColour); typedef void (*DrawWrapMarkerFn)(Surface *surface, PRectangle rcPlace, bool isEndMarker, ColourDesired wrapColour); /** * MarginView draws the margins. */ class MarginView { public: std::unique_ptr<Surface> pixmapSelMargin; std::unique_ptr<Surface> pixmapSelPattern; std::unique_ptr<Surface> pixmapSelPatternOffset1; // Highlight current folding block HighlightDelimiter highlightDelimiter; int wrapMarkerPaddingRight; // right-most pixel padding of wrap markers /** Some platforms, notably PLAT_CURSES, do not support Scintilla's native * DrawWrapMarker function for drawing wrap markers. Allow those platforms to * override it instead of creating a new method in the Surface class that * existing platforms must implement as empty. */ DrawWrapMarkerFn customDrawWrapMarker; MarginView() noexcept; void DropGraphics(bool freeObjects); void AllocateGraphics(const ViewStyle &vsDraw); void RefreshPixMaps(Surface *surfaceWindow, WindowID wid, const ViewStyle &vsDraw); void PaintMargin(Surface *surface, Sci::Line topLine, PRectangle rc, PRectangle rcMargin, const EditModel &model, const ViewStyle &vs); }; } #endif
{ "pile_set_name": "Github" }
package inmemory import ( "time" "github.com/sensu/sensu-go/cli/client/config" "github.com/sensu/sensu-go/types" ) // Config describes details associated with making requests type Config struct { url string format string namespace string timeout time.Duration tokens *types.Tokens } // New returns new instance of a config func New(url string) *Config { config := Config{ url: url, format: config.FormatJSON, namespace: config.DefaultNamespace, } return &config } // APIUrl describes the URL where the API can be found func (c *Config) APIUrl() string { return c.url } // Format describes the expected output from the client func (c *Config) Format() string { return c.format } // Namespace describes the context of the request func (c *Config) Namespace() string { return c.namespace } // Timeout describes the timeout for communicating with the backend func (c *Config) Timeout() time.Duration { return c.timeout } // Tokens describes the authorization tokens used to make requests func (c *Config) Tokens() *types.Tokens { return c.tokens } // SaveAPIUrl updates the current value func (c *Config) SaveAPIUrl(val string) error { c.url = val return nil } // SaveFormat updates the current value func (c *Config) SaveFormat(val string) error { c.format = val return nil } // SaveNamespace updates the current value func (c *Config) SaveNamespace(val string) error { c.namespace = val return nil } // SaveTimeout updates the current timeout value func (c *Config) SaveTimeout(val time.Duration) error { c.timeout = val return nil } // SaveTokens updates the current value func (c *Config) SaveTokens(val *types.Tokens) error { c.tokens = val return nil }
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: dcdf29a5abffd7c4db14c2abb0f982e1 MonoImporter: externalObjects: {} serializedVersion: 2 defaultReferences: [] executionOrder: 0 icon: {instanceID: 0} userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
var searchData= [ ['savedstate',['SavedState',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_menu_1_1_saved_state.html',1,'com::ab::view::slidingmenu::SlidingMenu']]], ['savedstate',['SavedState',['../classcom_1_1ab_1_1view_1_1carousel_1_1_carousel_spinner_1_1_saved_state.html',1,'com::ab::view::carousel::CarouselSpinner']]], ['scatterchart',['ScatterChart',['../classcom_1_1ab_1_1view_1_1chart_1_1_scatter_chart.html',1,'com::ab::view::chart']]], ['selectionnotifier',['SelectionNotifier',['../classcom_1_1ab_1_1view_1_1carousel_1_1_carousel_adapter_3_01_t_01extends_01_adapter_01_4_1_1_selection_notifier.html',1,'com::ab::view::carousel::CarouselAdapter&lt; T extends Adapter &gt;']]], ['seriesselection',['SeriesSelection',['../classcom_1_1ab_1_1view_1_1chart_1_1_series_selection.html',1,'com::ab::view::chart']]], ['simpleonpagechangelistener',['SimpleOnPageChangeListener',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_custom_view_above_1_1_simple_on_page_change_listener.html',1,'com::ab::view::slidingmenu::CustomViewAbove']]], ['simpleseriesrenderer',['SimpleSeriesRenderer',['../classcom_1_1ab_1_1view_1_1chart_1_1_simple_series_renderer.html',1,'com::ab::view::chart']]], ['slidingactivity',['SlidingActivity',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_activity.html',1,'com::ab::view::slidingmenu']]], ['slidingactivitybase',['SlidingActivityBase',['../interfacecom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_activity_base.html',1,'com::ab::view::slidingmenu']]], ['slidingactivityhelper',['SlidingActivityHelper',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_activity_helper.html',1,'com::ab::view::slidingmenu']]], ['slidingfragmentactivity',['SlidingFragmentActivity',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_fragment_activity.html',1,'com::ab::view::slidingmenu']]], ['slidinglistactivity',['SlidingListActivity',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_list_activity.html',1,'com::ab::view::slidingmenu']]], ['slidingmenu',['SlidingMenu',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_menu.html',1,'com::ab::view::slidingmenu']]], ['slidingpreferenceactivity',['SlidingPreferenceActivity',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_preference_activity.html',1,'com::ab::view::slidingmenu']]], ['sortorder',['SortOrder',['../enumcom_1_1ab_1_1db_1_1storage_1_1_ab_storage_query_1_1_sort_order.html',1,'com::ab::db::storage::AbStorageQuery']]], ['string',['string',['../classcom_1_1ab_1_1_r_1_1string.html',1,'com::ab::R']]] ];
{ "pile_set_name": "Github" }
#ifndef BOOST_ARCHIVE_BASIC_SERIALIZER_HPP #define BOOST_ARCHIVE_BASIC_SERIALIZER_HPP // MS compatible compilers support #pragma once #if defined(_MSC_VER) # pragma once #endif /////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8 // basic_serializer.hpp: extenstion of type_info required for serialization. // (C) Copyright 2002 Robert Ramey - http://www.rrsd.com . // Use, modification and distribution is subject to the Boost Software // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org for updates, documentation, and revision history. #include <boost/assert.hpp> #include <cstddef> // NULL #include <boost/noncopyable.hpp> #include <boost/config.hpp> #include <boost/serialization/extended_type_info.hpp> #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4511 4512) #endif namespace boost { namespace archive { namespace detail { class basic_serializer : private boost::noncopyable { const boost::serialization::extended_type_info * m_eti; protected: explicit basic_serializer( const boost::serialization::extended_type_info & eti ) : m_eti(& eti) {} public: inline bool operator<(const basic_serializer & rhs) const { // can't compare address since there can be multiple eti records // for the same type in different execution modules (that is, DLLS) // leave this here as a reminder not to do this! // return & lhs.get_eti() < & rhs.get_eti(); return get_eti() < rhs.get_eti(); } const char * get_debug_info() const { return m_eti->get_debug_info(); } const boost::serialization::extended_type_info & get_eti() const { return * m_eti; } }; class basic_serializer_arg : public basic_serializer { public: basic_serializer_arg(const serialization::extended_type_info & eti) : basic_serializer(eti) {} }; } // namespace detail } // namespace archive } // namespace boost #ifdef BOOST_MSVC #pragma warning(pop) #endif #endif // BOOST_ARCHIVE_BASIC_SERIALIZER_HPP
{ "pile_set_name": "Github" }
--- title: PC cards with incomplete configuration register addresses description: Information on supporting PC cards with incomplete configuration register addresses ms.assetid: 2a708ca5-a119-4ef5-81ee-d9e40e7a5255 keywords: - incomplete configuration registers WDK multifunction devices - system-supplied multifunction bus drivers WDK - mf.sys ms.date: 04/20/2017 ms.localizationpriority: medium --- # PC cards with incomplete configuration register addresses If a multifunction 16-bit PC Card device has configuration registers for each function but does not contain pointers in attribute memory to all register sets (does not support the LONGLINK\_MFC tuple), the vendor of such a device can use the system-supplied multifunction bus driver (mf.sys) but must provide a custom INF file and support for the individual functions. The vendor of such a device on an NT-based platform can use a system-supplied function driver for the multifunction device. A custom INF for the device must specify mf.sys as the function driver for the device. The system-supplied mf.sys driver will then enumerate the functions of the device. See [Using the System-Supplied Multifunction Bus Driver](using-the-system-supplied-multifunction-bus-driver.md) for more information about using the system-supplied mf.sys driver. The vendor of such a device must provide the following: - A custom INF file for the multifunction device. (vendor-supplied) The vendor must supply a multifunction INF file that specifies mf.sys as the multifunction bus driver, specifies the class "MultiFunction" (with its associated GUID as defined in devguid.h), and provides the missing configuration register address(es). See further information later in this section. - A PnP function driver for each function of the device. (vendor-supplied) Since the multifunction bus driver handles the multifunction semantics, the function drivers can be the same drivers that are used when the functions are packaged as individual devices. - An INF file for each function of the device. (vendor-supplied) The INF files can be the same files that are used when the functions are packaged as individual devices. The INF files do not need any special multifunction semantics. The custom INF for such a multifunction device must contain at least one [**INF DDInstall.LogConfigOverride section**](../install/inf-ddinstall-logconfigoverride-section.md). The override section must contain an **MfCardConfig** entry for each function, identifying the location of each set of configuration registers. The INF must restate all the resource requirements specified by the device because if override configurations are present in the INF, the PnP manager does not use any device resource requirements from the device. Specify the **MfCardConfig** entries using the syntax described in [**INF LogConfig Directive**](../install/inf-logconfig-directive.md). For example, consider the following excerpt from a custom INF for a multifunction PC Card device that contains a modem and a network adapter: ```cpp ;... [DDInstall.LogConfigOverride] LogConfig = DDInstall.Override0 [DDInstall.Override0] IOConfig = 3F8-3FF ; Com1 IOConfig = 10@100-FFFF%FFF0 ; NIC I/O IRQConfig = 3,4,5,7,9,10,11 ; IRQ MemConfig = 2000@0-FFFFFFFF%FFFFE000 ; Memory Descriptor 0 MemConfig = 1000@0-FFFFFFFF%FFFFF000 ; Memory Descriptor 1 MfCardConfig = 1000:47:0(A) MfCardConfig = 1080:47:1 ;... ``` The example shows two **MfCardConfig** entries, one for each function of the device. The first **MfCardConfig** entry contains the following information: <a href="" id="1000--configregbase-"></a>1000 (*ConfigRegBase*) Specifies that there is a set of configuration registers in the attribute memory of the card at offset 0x1000. In this example, the information in these registers describes the modem function on the card. <a href="" id="47--configoptions-"></a>47 (*ConfigOptions*) Specifies the hexadecimal value for the bus driver to program into the configuration option register at the *ConfigRegBase* offset (0x1000). <a href="" id="0--ioconfigindex-"></a>0 (*IoConfigIndex*) Specifies that the I/O resources for this function are listed in the first **IOConfig** entry in this section. An index of zero indicates the first entry, which in this example is "**IOConfig** = 3F8-3FF". <a href="" id="a--attrs-"></a>A (*attrs*) Directs the bus driver to turn on audio enable for this function, which is typical for a modem. The second **MfCardConfig** entry contains information about the second function on the device (the network adapter, in this example). This entry specifies that there is a second set of configuration registers at offset 0x1080. The bus driver will write the *ConfigOptions* value of 0x47 to the configuration option register for this function. The *IoConfigIndex* value of one directs the bus driver to use the second **IOConfig** entry in this section (**IOConfig** = 10@100-FFFF%FFF0) to program the I/O base and limit registers for this function. Include more than one *DDInstall*.**Override***N* section in the INF to specify more than one choice of nonsequential I/O port ranges. If the device uses a memory window that is not based at zero, the *DDInstall*.**Override***N* section(s) must also include a **PcCardConfig** entry. If an override section has both an **MfCardConfig** entry and a **PcCardConfig** entry, the PCMCIA bus driver ignores the *ConfigIndex* value in the **PcCardConfig** entry and just uses the *MemoryCardBaseN* information. See [Supporting PC Cards That Have Incomplete Configuration Registers](supporting-pc-cards-that-have-incomplete-configuration-registers.md) for more information about the **PcCardConfig** entry.
{ "pile_set_name": "Github" }
namespace Eigen { /** \eigenManualPage TopicStorageOrders Storage orders There are two different storage orders for matrices and two-dimensional arrays: column-major and row-major. This page explains these storage orders and how to specify which one should be used. \eigenAutoToc \section TopicStorageOrdersIntro Column-major and row-major storage The entries of a matrix form a two-dimensional grid. However, when the matrix is stored in memory, the entries have to somehow be laid out linearly. There are two main ways to do this, by row and by column. We say that a matrix is stored in \b row-major order if it is stored row by row. The entire first row is stored first, followed by the entire second row, and so on. Consider for example the matrix \f[ A = \begin{bmatrix} 8 & 2 & 2 & 9 \\ 9 & 1 & 4 & 4 \\ 3 & 5 & 4 & 5 \end{bmatrix}. \f] If this matrix is stored in row-major order, then the entries are laid out in memory as follows: \code 8 2 2 9 9 1 4 4 3 5 4 5 \endcode On the other hand, a matrix is stored in \b column-major order if it is stored column by column, starting with the entire first column, followed by the entire second column, and so on. If the above matrix is stored in column-major order, it is laid out as follows: \code 8 9 3 2 1 5 2 4 4 9 4 5 \endcode This example is illustrated by the following Eigen code. It uses the PlainObjectBase::data() function, which returns a pointer to the memory location of the first entry of the matrix. <table class="example"> <tr><th>Example</th><th>Output</th></tr> <tr><td> \include TopicStorageOrders_example.cpp </td> <td> \verbinclude TopicStorageOrders_example.out </td></tr></table> \section TopicStorageOrdersInEigen Storage orders in Eigen The storage order of a matrix or a two-dimensional array can be set by specifying the \c Options template parameter for Matrix or Array. As \ref TutorialMatrixClass explains, the %Matrix class template has six template parameters, of which three are compulsory (\c Scalar, \c RowsAtCompileTime and \c ColsAtCompileTime) and three are optional (\c Options, \c MaxRowsAtCompileTime and \c MaxColsAtCompileTime). If the \c Options parameter is set to \c RowMajor, then the matrix or array is stored in row-major order; if it is set to \c ColMajor, then it is stored in column-major order. This mechanism is used in the above Eigen program to specify the storage order. If the storage order is not specified, then Eigen defaults to storing the entry in column-major. This is also the case if one of the convenience typedefs (\c Matrix3f, \c ArrayXXd, etc.) is used. Matrices and arrays using one storage order can be assigned to matrices and arrays using the other storage order, as happens in the above program when \c Arowmajor is initialized using \c Acolmajor. Eigen will reorder the entries automatically. More generally, row-major and column-major matrices can be mixed in an expression as we want. \section TopicStorageOrdersWhich Which storage order to choose? So, which storage order should you use in your program? There is no simple answer to this question; it depends on your application. Here are some points to keep in mind: - Your users may expect you to use a specific storage order. Alternatively, you may use other libraries than Eigen, and these other libraries may expect a certain storage order. In these cases it may be easiest and fastest to use this storage order in your whole program. - Algorithms that traverse a matrix row by row will go faster when the matrix is stored in row-major order because of better data locality. Similarly, column-by-column traversal is faster for column-major matrices. It may be worthwhile to experiment a bit to find out what is faster for your particular application. - The default in Eigen is column-major. Naturally, most of the development and testing of the Eigen library is thus done with column-major matrices. This means that, even though we aim to support column-major and row-major storage orders transparently, the Eigen library may well work best with column-major matrices. */ }
{ "pile_set_name": "Github" }
/* * JBoss, Home of Professional Open Source. * Copyright 2017, Red Hat, Inc., and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.wildfly.extension.undertow; import static org.wildfly.extension.undertow.HostDefinition.HOST_CAPABILITY; import static org.wildfly.extension.undertow.ServerDefinition.SERVER_CAPABILITY; import java.util.LinkedList; import java.util.List; import org.jboss.as.controller.AbstractAddStepHandler; import org.jboss.as.controller.CapabilityServiceBuilder; import org.jboss.as.controller.ControlledProcessStateService; import org.jboss.as.controller.OperationContext; import org.jboss.as.controller.OperationFailedException; import org.jboss.as.controller.PathAddress; import org.jboss.as.controller.ProcessType; import org.jboss.as.controller.registry.Resource; import org.jboss.as.server.mgmt.UndertowHttpManagementService; import org.jboss.as.server.mgmt.domain.HttpManagement; import org.jboss.as.server.suspend.SuspendController; import org.jboss.as.web.host.CommonWebServer; import org.jboss.as.web.host.WebHost; import org.jboss.dmr.ModelNode; import org.jboss.msc.service.ServiceBuilder; import org.jboss.msc.service.ServiceController.Mode; import org.jboss.msc.service.ServiceName; import org.wildfly.extension.requestcontroller.RequestController; import org.wildfly.extension.undertow.deployment.DefaultDeploymentMappingProvider; /** * @author <a href="mailto:[email protected]">Tomaz Cerar</a> (c) 2013 Red Hat Inc. * @author <a href="mailto:[email protected]">Richard Opalka</a> */ class HostAdd extends AbstractAddStepHandler { static final HostAdd INSTANCE = new HostAdd(); private HostAdd() { super(HostDefinition.ALIAS, HostDefinition.DEFAULT_WEB_MODULE, HostDefinition.DEFAULT_RESPONSE_CODE, HostDefinition.DISABLE_CONSOLE_REDIRECT, HostDefinition.QUEUE_REQUESTS_ON_START); } @Override protected void recordCapabilitiesAndRequirements(OperationContext context, ModelNode operation, Resource resource) throws OperationFailedException { super.recordCapabilitiesAndRequirements(context, operation, resource); String ourCap = HOST_CAPABILITY.getDynamicName(context.getCurrentAddress()); String serverCap = SERVER_CAPABILITY.getDynamicName(context.getCurrentAddress().getParent()); context.registerAdditionalCapabilityRequirement(serverCap, ourCap, null); } @Override protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model) throws OperationFailedException { final PathAddress address = context.getCurrentAddress(); final PathAddress serverAddress = address.getParent(); final PathAddress subsystemAddress = serverAddress.getParent(); final ModelNode subsystemModel = Resource.Tools.readModel(context.readResourceFromRoot(subsystemAddress, false), 0); final ModelNode serverModel = Resource.Tools.readModel(context.readResourceFromRoot(serverAddress, false), 0); final String name = address.getLastElement().getValue(); final List<String> aliases = HostDefinition.ALIAS.unwrap(context, model); final String defaultWebModule = HostDefinition.DEFAULT_WEB_MODULE.resolveModelAttribute(context, model).asString(); final String defaultServerName = UndertowRootDefinition.DEFAULT_SERVER.resolveModelAttribute(context, subsystemModel).asString(); final String defaultHostName = ServerDefinition.DEFAULT_HOST.resolveModelAttribute(context, serverModel).asString(); final String serverName = serverAddress.getLastElement().getValue(); final boolean isDefaultHost = defaultServerName.equals(serverName) && name.equals(defaultHostName); final int defaultResponseCode = HostDefinition.DEFAULT_RESPONSE_CODE.resolveModelAttribute(context, model).asInt(); final boolean enableConsoleRedirect = !HostDefinition.DISABLE_CONSOLE_REDIRECT.resolveModelAttribute(context, model).asBoolean(); final boolean queueRequestsOnStart = HostDefinition.QUEUE_REQUESTS_ON_START.resolveModelAttribute(context, model).asBoolean(); if (!defaultWebModule.equals(HostDefinition.DEFAULT_WEB_MODULE_DEFAULT) || DefaultDeploymentMappingProvider.instance().getMapping(HostDefinition.DEFAULT_WEB_MODULE_DEFAULT) == null) { DefaultDeploymentMappingProvider.instance().addMapping(defaultWebModule, serverName, name); } final ServiceName virtualHostServiceName = HostDefinition.HOST_CAPABILITY.fromBaseCapability(address).getCapabilityServiceName(); final Host service = new Host(name, aliases == null ? new LinkedList<>(): aliases, defaultWebModule, defaultResponseCode, queueRequestsOnStart); final ServiceBuilder<?> builder = context.getCapabilityServiceTarget().addCapability(HostDefinition.HOST_CAPABILITY) .setInstance(service) .addCapabilityRequirement(Capabilities.CAPABILITY_SERVER, Server.class, service.getServerInjection(), serverName) .addCapabilityRequirement(Capabilities.CAPABILITY_UNDERTOW, UndertowService.class, service.getUndertowService()) .addDependency(context.getCapabilityServiceName(Capabilities.REF_SUSPEND_CONTROLLER, SuspendController.class), SuspendController.class, service.getSuspendControllerInjectedValue()) .addDependency(ControlledProcessStateService.SERVICE_NAME, ControlledProcessStateService.class, service.getControlledProcessStateServiceInjectedValue()); builder.setInitialMode(Mode.ON_DEMAND); if (isDefaultHost) { addCommonHost(context, aliases, serverName, virtualHostServiceName); builder.addAliases(UndertowService.DEFAULT_HOST);//add alias for default host of default server service } //this is workaround for a bit so old service names still work! builder.addAliases(UndertowService.virtualHostName(serverName, name)); builder.install(); if (enableConsoleRedirect) { // Setup the web console redirect final ServiceName consoleRedirectName = UndertowService.consoleRedirectServiceName(serverName, name); // A standalone server is the only process type with a console redirect if (context.getProcessType() == ProcessType.STANDALONE_SERVER) { final ConsoleRedirectService redirectService = new ConsoleRedirectService(); final ServiceBuilder<ConsoleRedirectService> redirectBuilder = context.getServiceTarget().addService(consoleRedirectName, redirectService) .addDependency(UndertowHttpManagementService.SERVICE_NAME, HttpManagement.class, redirectService.getHttpManagementInjector()) .addDependency(virtualHostServiceName, Host.class, redirectService.getHostInjector()) .setInitialMode(Mode.PASSIVE); redirectBuilder.install(); } else { // Other process types don't have a console, not depending on the UndertowHttpManagementService should // result in a null dependency in the service and redirect accordingly final ConsoleRedirectService redirectService = new ConsoleRedirectService(); final ServiceBuilder<ConsoleRedirectService> redirectBuilder = context.getServiceTarget().addService(consoleRedirectName, redirectService) .addDependency(virtualHostServiceName, Host.class, redirectService.getHostInjector()) .setInitialMode(Mode.PASSIVE); redirectBuilder.install(); } } } private void addCommonHost(OperationContext context, List<String> aliases, String serverName, ServiceName virtualHostServiceName) { WebHostService service = new WebHostService(); final CapabilityServiceBuilder<?> builder = context.getCapabilityServiceTarget() .addCapability(WebHost.CAPABILITY) .setInstance(service) .addCapabilityRequirement(Capabilities.CAPABILITY_SERVER, Server.class, service.getServer(), serverName) .addCapabilityRequirement(CommonWebServer.CAPABILITY_NAME, CommonWebServer.class) .addDependency(virtualHostServiceName, Host.class, service.getHost()); if(context.hasOptionalCapability(Capabilities.REF_REQUEST_CONTROLLER, null, null)) { builder.addCapabilityRequirement(Capabilities.REF_REQUEST_CONTROLLER, RequestController.class, service.getRequestControllerInjectedValue()); } builder.addAliases(WebHost.SERVICE_NAME.append(context.getCurrentAddressValue())); if (aliases != null) { for (String alias : aliases) { builder.addAliases(WebHost.SERVICE_NAME.append(alias)); } } builder.setInitialMode(Mode.PASSIVE); builder.install(); } }
{ "pile_set_name": "Github" }
/** * @param {number} x * @return {number} */ var mySqrt = function(x) { if (x === 0 || x === 1) return x; let left = 1, right = x; while (left <= right) { let mid = left + ((right - left) >>> 1); if (mid * mid > x) { right = mid - 1; } else { left = mid + 1; } } return right; };
{ "pile_set_name": "Github" }
<?php namespace App\Sync\Task; class RelayCleanup extends AbstractTask { public function run(bool $force = false): void { // Relays should update every 15 seconds, so be fairly aggressive with this. $threshold = time() - 90; $this->em->createQuery(/** @lang DQL */ 'DELETE FROM App\Entity\Relay r WHERE r.updated_at < :threshold') ->setParameter('threshold', $threshold) ->execute(); } }
{ "pile_set_name": "Github" }
module.exports = /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { /******/ configurable: false, /******/ enumerable: true, /******/ get: getter /******/ }); /******/ } /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = "/dist/"; /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 248); /******/ }) /************************************************************************/ /******/ ({ /***/ 0: /***/ (function(module, exports) { /* globals __VUE_SSR_CONTEXT__ */ // IMPORTANT: Do NOT use ES2015 features in this file. // This module is a runtime utility for cleaner component module output and will // be included in the final webpack user bundle. module.exports = function normalizeComponent ( rawScriptExports, compiledTemplate, functionalTemplate, injectStyles, scopeId, moduleIdentifier /* server only */ ) { var esModule var scriptExports = rawScriptExports = rawScriptExports || {} // ES6 modules interop var type = typeof rawScriptExports.default if (type === 'object' || type === 'function') { esModule = rawScriptExports scriptExports = rawScriptExports.default } // Vue.extend constructor export interop var options = typeof scriptExports === 'function' ? scriptExports.options : scriptExports // render functions if (compiledTemplate) { options.render = compiledTemplate.render options.staticRenderFns = compiledTemplate.staticRenderFns options._compiled = true } // functional template if (functionalTemplate) { options.functional = true } // scopedId if (scopeId) { options._scopeId = scopeId } var hook if (moduleIdentifier) { // server build hook = function (context) { // 2.3 injection context = context || // cached call (this.$vnode && this.$vnode.ssrContext) || // stateful (this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional // 2.2 with runInNewContext: true if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') { context = __VUE_SSR_CONTEXT__ } // inject component styles if (injectStyles) { injectStyles.call(this, context) } // register component module identifier for async chunk inferrence if (context && context._registeredComponents) { context._registeredComponents.add(moduleIdentifier) } } // used by ssr in case component is cached and beforeCreate // never gets called options._ssrRegister = hook } else if (injectStyles) { hook = injectStyles } if (hook) { var functional = options.functional var existing = functional ? options.render : options.beforeCreate if (!functional) { // inject component registration as beforeCreate hook options.beforeCreate = existing ? [].concat(existing, hook) : [hook] } else { // for template-only hot-reload because in that case the render fn doesn't // go through the normalizer options._injectStyles = hook // register for functioal component in vue file options.render = function renderWithStyleInjection (h, context) { hook.call(context) return existing(h, context) } } } return { esModule: esModule, exports: scriptExports, options: options } } /***/ }), /***/ 1: /***/ (function(module, exports) { module.exports = require("element-ui/lib/mixins/emitter"); /***/ }), /***/ 12: /***/ (function(module, exports) { module.exports = require("element-ui/lib/locale"); /***/ }), /***/ 16: /***/ (function(module, exports) { module.exports = require("element-ui/lib/checkbox"); /***/ }), /***/ 2: /***/ (function(module, exports) { module.exports = require("element-ui/lib/utils/dom"); /***/ }), /***/ 21: /***/ (function(module, exports) { module.exports = require("element-ui/lib/transitions/collapse-transition"); /***/ }), /***/ 248: /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; var _tree = __webpack_require__(249); var _tree2 = _interopRequireDefault(_tree); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } /* istanbul ignore next */ _tree2.default.install = function (Vue) { Vue.component(_tree2.default.name, _tree2.default); }; exports.default = _tree2.default; /***/ }), /***/ 249: /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; Object.defineProperty(__webpack_exports__, "__esModule", { value: true }); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_vue__ = __webpack_require__(250); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_vue___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_vue__); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__node_modules_vue_loader_lib_template_compiler_index_id_data_v_8ac7d2ce_hasScoped_false_preserveWhitespace_false_buble_transforms_node_modules_vue_loader_lib_selector_type_template_index_0_tree_vue__ = __webpack_require__(256); var normalizeComponent = __webpack_require__(0) /* script */ /* template */ /* template functional */ var __vue_template_functional__ = false /* styles */ var __vue_styles__ = null /* scopeId */ var __vue_scopeId__ = null /* moduleIdentifier (server only) */ var __vue_module_identifier__ = null var Component = normalizeComponent( __WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_vue___default.a, __WEBPACK_IMPORTED_MODULE_1__node_modules_vue_loader_lib_template_compiler_index_id_data_v_8ac7d2ce_hasScoped_false_preserveWhitespace_false_buble_transforms_node_modules_vue_loader_lib_selector_type_template_index_0_tree_vue__["a" /* default */], __vue_template_functional__, __vue_styles__, __vue_scopeId__, __vue_module_identifier__ ) /* harmony default export */ __webpack_exports__["default"] = (Component.exports); /***/ }), /***/ 250: /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; var _treeStore = __webpack_require__(251); var _treeStore2 = _interopRequireDefault(_treeStore); var _util = __webpack_require__(33); var _treeNode = __webpack_require__(253); var _treeNode2 = _interopRequireDefault(_treeNode); var _locale = __webpack_require__(12); var _emitter = __webpack_require__(1); var _emitter2 = _interopRequireDefault(_emitter); var _dom = __webpack_require__(2); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // exports.default = { name: 'ElTree', mixins: [_emitter2.default], components: { ElTreeNode: _treeNode2.default }, data: function data() { return { store: null, root: null, currentNode: null, treeItems: null, checkboxItems: [], dragState: { showDropIndicator: false, draggingNode: null, dropNode: null, allowDrop: true } }; }, props: { data: { type: Array }, emptyText: { type: String, default: function _default() { return (0, _locale.t)('el.tree.emptyText'); } }, renderAfterExpand: { type: Boolean, default: true }, nodeKey: String, checkStrictly: Boolean, defaultExpandAll: Boolean, expandOnClickNode: { type: Boolean, default: true }, checkOnClickNode: Boolean, checkDescendants: { type: Boolean, default: false }, autoExpandParent: { type: Boolean, default: true }, defaultCheckedKeys: Array, defaultExpandedKeys: Array, renderContent: Function, showCheckbox: { type: Boolean, default: false }, draggable: { type: Boolean, default: false }, allowDrag: Function, allowDrop: Function, props: { default: function _default() { return { children: 'children', label: 'label', icon: 'icon', disabled: 'disabled' }; } }, lazy: { type: Boolean, default: false }, highlightCurrent: Boolean, load: Function, filterNodeMethod: Function, accordion: Boolean, indent: { type: Number, default: 18 } }, computed: { children: { set: function set(value) { this.data = value; }, get: function get() { return this.data; } }, treeItemArray: function treeItemArray() { return Array.prototype.slice.call(this.treeItems); }, isEmpty: function isEmpty() { var childNodes = this.root.childNodes; return !childNodes || childNodes.length === 0 || childNodes.every(function (_ref) { var visible = _ref.visible; return !visible; }); } }, watch: { defaultCheckedKeys: function defaultCheckedKeys(newVal) { this.store.setDefaultCheckedKey(newVal); }, defaultExpandedKeys: function defaultExpandedKeys(newVal) { this.store.defaultExpandedKeys = newVal; this.store.setDefaultExpandedKeys(newVal); }, data: function data(newVal) { this.store.setData(newVal); }, checkboxItems: function checkboxItems(val) { Array.prototype.forEach.call(val, function (checkbox) { checkbox.setAttribute('tabindex', -1); }); }, checkStrictly: function checkStrictly(newVal) { this.store.checkStrictly = newVal; } }, methods: { filter: function filter(value) { if (!this.filterNodeMethod) throw new Error('[Tree] filterNodeMethod is required when filter'); this.store.filter(value); }, getNodeKey: function getNodeKey(node) { return (0, _util.getNodeKey)(this.nodeKey, node.data); }, getNodePath: function getNodePath(data) { if (!this.nodeKey) throw new Error('[Tree] nodeKey is required in getNodePath'); var node = this.store.getNode(data); if (!node) return []; var path = [node.data]; var parent = node.parent; while (parent && parent !== this.root) { path.push(parent.data); parent = parent.parent; } return path.reverse(); }, getCheckedNodes: function getCheckedNodes(leafOnly) { return this.store.getCheckedNodes(leafOnly); }, getCheckedKeys: function getCheckedKeys(leafOnly) { return this.store.getCheckedKeys(leafOnly); }, getCurrentNode: function getCurrentNode() { var currentNode = this.store.getCurrentNode(); return currentNode ? currentNode.data : null; }, getCurrentKey: function getCurrentKey() { if (!this.nodeKey) throw new Error('[Tree] nodeKey is required in getCurrentKey'); var currentNode = this.getCurrentNode(); return currentNode ? currentNode[this.nodeKey] : null; }, setCheckedNodes: function setCheckedNodes(nodes, leafOnly) { if (!this.nodeKey) throw new Error('[Tree] nodeKey is required in setCheckedNodes'); this.store.setCheckedNodes(nodes, leafOnly); }, setCheckedKeys: function setCheckedKeys(keys, leafOnly) { if (!this.nodeKey) throw new Error('[Tree] nodeKey is required in setCheckedKeys'); this.store.setCheckedKeys(keys, leafOnly); }, setChecked: function setChecked(data, checked, deep) { this.store.setChecked(data, checked, deep); }, getHalfCheckedNodes: function getHalfCheckedNodes() { return this.store.getHalfCheckedNodes(); }, getHalfCheckedKeys: function getHalfCheckedKeys() { return this.store.getHalfCheckedKeys(); }, setCurrentNode: function setCurrentNode(node) { if (!this.nodeKey) throw new Error('[Tree] nodeKey is required in setCurrentNode'); this.store.setUserCurrentNode(node); }, setCurrentKey: function setCurrentKey(key) { if (!this.nodeKey) throw new Error('[Tree] nodeKey is required in setCurrentKey'); this.store.setCurrentNodeKey(key); }, getNode: function getNode(data) { return this.store.getNode(data); }, remove: function remove(data) { this.store.remove(data); }, append: function append(data, parentNode) { this.store.append(data, parentNode); }, insertBefore: function insertBefore(data, refNode) { this.store.insertBefore(data, refNode); }, insertAfter: function insertAfter(data, refNode) { this.store.insertAfter(data, refNode); }, handleNodeExpand: function handleNodeExpand(nodeData, node, instance) { this.broadcast('ElTreeNode', 'tree-node-expand', node); this.$emit('node-expand', nodeData, node, instance); }, updateKeyChildren: function updateKeyChildren(key, data) { if (!this.nodeKey) throw new Error('[Tree] nodeKey is required in updateKeyChild'); this.store.updateChildren(key, data); }, initTabIndex: function initTabIndex() { this.treeItems = this.$el.querySelectorAll('.is-focusable[role=treeitem]'); this.checkboxItems = this.$el.querySelectorAll('input[type=checkbox]'); var checkedItem = this.$el.querySelectorAll('.is-checked[role=treeitem]'); if (checkedItem.length) { checkedItem[0].setAttribute('tabindex', 0); return; } this.treeItems[0] && this.treeItems[0].setAttribute('tabindex', 0); }, handelKeydown: function handelKeydown(ev) { var currentItem = ev.target; if (currentItem.className.indexOf('el-tree-node') === -1) return; ev.preventDefault(); var keyCode = ev.keyCode; this.treeItems = this.$el.querySelectorAll('.is-focusable[role=treeitem]'); var currentIndex = this.treeItemArray.indexOf(currentItem); var nextIndex = void 0; if ([38, 40].indexOf(keyCode) > -1) { // up、down if (keyCode === 38) { // up nextIndex = currentIndex !== 0 ? currentIndex - 1 : 0; } else { nextIndex = currentIndex < this.treeItemArray.length - 1 ? currentIndex + 1 : 0; } this.treeItemArray[nextIndex].focus(); // 选中 } if ([37, 39].indexOf(keyCode) > -1) { // left、right 展开 currentItem.click(); // 选中 } var hasInput = currentItem.querySelector('[type="checkbox"]'); if ([13, 32].indexOf(keyCode) > -1 && hasInput) { // space enter选中checkbox hasInput.click(); } } }, created: function created() { var _this = this; this.isTree = true; this.store = new _treeStore2.default({ key: this.nodeKey, data: this.data, lazy: this.lazy, props: this.props, load: this.load, currentNodeKey: this.currentNodeKey, checkStrictly: this.checkStrictly, checkDescendants: this.checkDescendants, defaultCheckedKeys: this.defaultCheckedKeys, defaultExpandedKeys: this.defaultExpandedKeys, autoExpandParent: this.autoExpandParent, defaultExpandAll: this.defaultExpandAll, filterNodeMethod: this.filterNodeMethod }); this.root = this.store.root; var dragState = this.dragState; this.$on('tree-node-drag-start', function (event, treeNode) { if (typeof _this.allowDrag === 'function' && !_this.allowDrag(treeNode.node)) { event.preventDefault(); return false; } event.dataTransfer.effectAllowed = 'move'; // wrap in try catch to address IE's error when first param is 'text/plain' try { // setData is required for draggable to work in FireFox // the content has to be '' so dragging a node out of the tree won't open a new tab in FireFox event.dataTransfer.setData('text/plain', ''); } catch (e) {} dragState.draggingNode = treeNode; _this.$emit('node-drag-start', treeNode.node, event); }); this.$on('tree-node-drag-over', function (event, treeNode) { var dropNode = (0, _util.findNearestComponent)(event.target, 'ElTreeNode'); var oldDropNode = dragState.dropNode; if (oldDropNode && oldDropNode !== dropNode) { (0, _dom.removeClass)(oldDropNode.$el, 'is-drop-inner'); } var draggingNode = dragState.draggingNode; if (!draggingNode || !dropNode) return; var dropPrev = true; var dropInner = true; var dropNext = true; var userAllowDropInner = true; if (typeof _this.allowDrop === 'function') { dropPrev = _this.allowDrop(draggingNode.node, dropNode.node, 'prev'); userAllowDropInner = dropInner = _this.allowDrop(draggingNode.node, dropNode.node, 'inner'); dropNext = _this.allowDrop(draggingNode.node, dropNode.node, 'next'); } event.dataTransfer.dropEffect = dropInner ? 'move' : 'none'; if ((dropPrev || dropInner || dropNext) && oldDropNode !== dropNode) { if (oldDropNode) { _this.$emit('node-drag-leave', draggingNode.node, oldDropNode.node, event); } _this.$emit('node-drag-enter', draggingNode.node, dropNode.node, event); } if (dropPrev || dropInner || dropNext) { dragState.dropNode = dropNode; } if (dropNode.node.nextSibling === draggingNode.node) { dropNext = false; } if (dropNode.node.previousSibling === draggingNode.node) { dropPrev = false; } if (dropNode.node.contains(draggingNode.node, false)) { dropInner = false; } if (draggingNode.node === dropNode.node || draggingNode.node.contains(dropNode.node)) { dropPrev = false; dropInner = false; dropNext = false; } var targetPosition = dropNode.$el.getBoundingClientRect(); var treePosition = _this.$el.getBoundingClientRect(); var dropType = void 0; var prevPercent = dropPrev ? dropInner ? 0.25 : dropNext ? 0.45 : 1 : -1; var nextPercent = dropNext ? dropInner ? 0.75 : dropPrev ? 0.55 : 0 : 1; var indicatorTop = -9999; var distance = event.clientY - targetPosition.top; if (distance < targetPosition.height * prevPercent) { dropType = 'before'; } else if (distance > targetPosition.height * nextPercent) { dropType = 'after'; } else if (dropInner) { dropType = 'inner'; } else { dropType = 'none'; } var dropIndicator = _this.$refs.dropIndicator; if (dropType === 'before') { indicatorTop = targetPosition.top - treePosition.top; } else if (dropType === 'after') { indicatorTop = targetPosition.bottom - treePosition.top; } dropIndicator.style.top = indicatorTop + 'px'; dropIndicator.style.left = targetPosition.right - treePosition.left + 'px'; if (dropType === 'inner') { (0, _dom.addClass)(dropNode.$el, 'is-drop-inner'); } else { (0, _dom.removeClass)(dropNode.$el, 'is-drop-inner'); } dragState.showDropIndicator = dropType === 'before' || dropType === 'after'; dragState.allowDrop = dragState.showDropIndicator || userAllowDropInner; dragState.dropType = dropType; _this.$emit('node-drag-over', draggingNode.node, dropNode.node, event); }); this.$on('tree-node-drag-end', function (event) { var draggingNode = dragState.draggingNode, dropType = dragState.dropType, dropNode = dragState.dropNode; event.preventDefault(); event.dataTransfer.dropEffect = 'move'; if (draggingNode && dropNode) { var data = draggingNode.node.data; if (dropType === 'before') { draggingNode.node.remove(); dropNode.node.parent.insertBefore({ data: data }, dropNode.node); } else if (dropType === 'after') { draggingNode.node.remove(); dropNode.node.parent.insertAfter({ data: data }, dropNode.node); } else if (dropType === 'inner') { dropNode.node.insertChild({ data: data }); draggingNode.node.remove(); } (0, _dom.removeClass)(dropNode.$el, 'is-drop-inner'); _this.$emit('node-drag-end', draggingNode.node, dropNode.node, dropType, event); if (dropType !== 'none') { _this.$emit('node-drop', draggingNode.node, dropNode.node, dropType, event); } } if (draggingNode && !dropNode) { _this.$emit('node-drag-end', draggingNode.node, null, dropType, event); } dragState.showDropIndicator = false; dragState.draggingNode = null; dragState.dropNode = null; dragState.allowDrop = true; }); }, mounted: function mounted() { this.initTabIndex(); this.$el.addEventListener('keydown', this.handelKeydown); }, updated: function updated() { this.treeItems = this.$el.querySelectorAll('[role=treeitem]'); this.checkboxItems = this.$el.querySelectorAll('input[type=checkbox]'); } }; /***/ }), /***/ 251: /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; var _node = __webpack_require__(252); var _node2 = _interopRequireDefault(_node); var _util = __webpack_require__(33); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var TreeStore = function () { function TreeStore(options) { var _this = this; _classCallCheck(this, TreeStore); this.currentNode = null; this.currentNodeKey = null; for (var option in options) { if (options.hasOwnProperty(option)) { this[option] = options[option]; } } this.nodesMap = {}; this.root = new _node2.default({ data: this.data, store: this }); if (this.lazy && this.load) { var loadFn = this.load; loadFn(this.root, function (data) { _this.root.doCreateChildren(data); _this._initDefaultCheckedNodes(); }); } else { this._initDefaultCheckedNodes(); } } TreeStore.prototype.filter = function filter(value) { var filterNodeMethod = this.filterNodeMethod; var lazy = this.lazy; var traverse = function traverse(node) { var childNodes = node.root ? node.root.childNodes : node.childNodes; childNodes.forEach(function (child) { child.visible = filterNodeMethod.call(child, value, child.data, child); traverse(child); }); if (!node.visible && childNodes.length) { var allHidden = true; childNodes.forEach(function (child) { if (child.visible) allHidden = false; }); if (node.root) { node.root.visible = allHidden === false; } else { node.visible = allHidden === false; } } if (!value) return; if (node.visible && !node.isLeaf && !lazy) node.expand(); }; traverse(this); }; TreeStore.prototype.setData = function setData(newVal) { var instanceChanged = newVal !== this.root.data; if (instanceChanged) { this.root.setData(newVal); this._initDefaultCheckedNodes(); } else { this.root.updateChildren(); } }; TreeStore.prototype.getNode = function getNode(data) { if (data instanceof _node2.default) return data; var key = (typeof data === 'undefined' ? 'undefined' : _typeof(data)) !== 'object' ? data : (0, _util.getNodeKey)(this.key, data); return this.nodesMap[key] || null; }; TreeStore.prototype.insertBefore = function insertBefore(data, refData) { var refNode = this.getNode(refData); refNode.parent.insertBefore({ data: data }, refNode); }; TreeStore.prototype.insertAfter = function insertAfter(data, refData) { var refNode = this.getNode(refData); refNode.parent.insertAfter({ data: data }, refNode); }; TreeStore.prototype.remove = function remove(data) { var node = this.getNode(data); if (node) { node.parent.removeChild(node); } }; TreeStore.prototype.append = function append(data, parentData) { var parentNode = parentData ? this.getNode(parentData) : this.root; if (parentNode) { parentNode.insertChild({ data: data }); } }; TreeStore.prototype._initDefaultCheckedNodes = function _initDefaultCheckedNodes() { var _this2 = this; var defaultCheckedKeys = this.defaultCheckedKeys || []; var nodesMap = this.nodesMap; defaultCheckedKeys.forEach(function (checkedKey) { var node = nodesMap[checkedKey]; if (node) { node.setChecked(true, !_this2.checkStrictly); } }); }; TreeStore.prototype._initDefaultCheckedNode = function _initDefaultCheckedNode(node) { var defaultCheckedKeys = this.defaultCheckedKeys || []; if (defaultCheckedKeys.indexOf(node.key) !== -1) { node.setChecked(true, !this.checkStrictly); } }; TreeStore.prototype.setDefaultCheckedKey = function setDefaultCheckedKey(newVal) { if (newVal !== this.defaultCheckedKeys) { this.defaultCheckedKeys = newVal; this._initDefaultCheckedNodes(); } }; TreeStore.prototype.registerNode = function registerNode(node) { var key = this.key; if (!key || !node || !node.data) return; var nodeKey = node.key; if (nodeKey !== undefined) this.nodesMap[node.key] = node; }; TreeStore.prototype.deregisterNode = function deregisterNode(node) { var key = this.key; if (!key || !node || !node.data) return; var childNodes = node.childNodes; for (var i = 0, j = childNodes.length; i < j; i++) { var child = childNodes[i]; this.deregisterNode(child); } delete this.nodesMap[node.key]; }; TreeStore.prototype.getCheckedNodes = function getCheckedNodes() { var leafOnly = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : false; var checkedNodes = []; var traverse = function traverse(node) { var childNodes = node.root ? node.root.childNodes : node.childNodes; childNodes.forEach(function (child) { if (child.checked && (!leafOnly || leafOnly && child.isLeaf)) { checkedNodes.push(child.data); } traverse(child); }); }; traverse(this); return checkedNodes; }; TreeStore.prototype.getCheckedKeys = function getCheckedKeys() { var _this3 = this; var leafOnly = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : false; return this.getCheckedNodes(leafOnly).map(function (data) { return (data || {})[_this3.key]; }); }; TreeStore.prototype.getHalfCheckedNodes = function getHalfCheckedNodes() { var nodes = []; var traverse = function traverse(node) { var childNodes = node.root ? node.root.childNodes : node.childNodes; childNodes.forEach(function (child) { if (child.indeterminate) { nodes.push(child.data); } traverse(child); }); }; traverse(this); return nodes; }; TreeStore.prototype.getHalfCheckedKeys = function getHalfCheckedKeys() { var _this4 = this; return this.getHalfCheckedNodes().map(function (data) { return (data || {})[_this4.key]; }); }; TreeStore.prototype._getAllNodes = function _getAllNodes() { var allNodes = []; var nodesMap = this.nodesMap; for (var nodeKey in nodesMap) { if (nodesMap.hasOwnProperty(nodeKey)) { allNodes.push(nodesMap[nodeKey]); } } return allNodes; }; TreeStore.prototype.updateChildren = function updateChildren(key, data) { var node = this.nodesMap[key]; if (!node) return; var childNodes = node.childNodes; for (var i = childNodes.length - 1; i >= 0; i--) { var child = childNodes[i]; this.remove(child.data); } for (var _i = 0, j = data.length; _i < j; _i++) { var _child = data[_i]; this.append(_child, node.data); } }; TreeStore.prototype._setCheckedKeys = function _setCheckedKeys(key) { var leafOnly = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; var checkedKeys = arguments[2]; var allNodes = this._getAllNodes().sort(function (a, b) { return b.level - a.level; }); var cache = Object.create(null); var keys = Object.keys(checkedKeys); allNodes.forEach(function (node) { return node.setChecked(false, false); }); for (var i = 0, j = allNodes.length; i < j; i++) { var node = allNodes[i]; var nodeKey = node.data[key].toString(); var checked = keys.indexOf(nodeKey) > -1; if (!checked) { if (node.checked && !cache[nodeKey]) { node.setChecked(false, false); } continue; } var parent = node.parent; while (parent && parent.level > 0) { cache[parent.data[key]] = true; parent = parent.parent; } if (node.isLeaf || this.checkStrictly) { node.setChecked(true, false); continue; } node.setChecked(true, true); if (leafOnly) { (function () { node.setChecked(false, false); var traverse = function traverse(node) { var childNodes = node.childNodes; childNodes.forEach(function (child) { if (!child.isLeaf) { child.setChecked(false, false); } traverse(child); }); }; traverse(node); })(); } } }; TreeStore.prototype.setCheckedNodes = function setCheckedNodes(array) { var leafOnly = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; var key = this.key; var checkedKeys = {}; array.forEach(function (item) { checkedKeys[(item || {})[key]] = true; }); this._setCheckedKeys(key, leafOnly, checkedKeys); }; TreeStore.prototype.setCheckedKeys = function setCheckedKeys(keys) { var leafOnly = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; this.defaultCheckedKeys = keys; var key = this.key; var checkedKeys = {}; keys.forEach(function (key) { checkedKeys[key] = true; }); this._setCheckedKeys(key, leafOnly, checkedKeys); }; TreeStore.prototype.setDefaultExpandedKeys = function setDefaultExpandedKeys(keys) { var _this5 = this; keys = keys || []; this.defaultExpandedKeys = keys; keys.forEach(function (key) { var node = _this5.getNode(key); if (node) node.expand(null, _this5.autoExpandParent); }); }; TreeStore.prototype.setChecked = function setChecked(data, checked, deep) { var node = this.getNode(data); if (node) { node.setChecked(!!checked, deep); } }; TreeStore.prototype.getCurrentNode = function getCurrentNode() { return this.currentNode; }; TreeStore.prototype.setCurrentNode = function setCurrentNode(node) { this.currentNode = node; }; TreeStore.prototype.setUserCurrentNode = function setUserCurrentNode(node) { var key = node[this.key]; var currNode = this.nodesMap[key]; this.setCurrentNode(currNode); }; TreeStore.prototype.setCurrentNodeKey = function setCurrentNodeKey(key) { if (key === null) { this.currentNode = null; return; } var node = this.getNode(key); if (node) { this.currentNode = node; } }; return TreeStore; }(); exports.default = TreeStore; ; /***/ }), /***/ 252: /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.getChildState = undefined; var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); var _merge = __webpack_require__(9); var _merge2 = _interopRequireDefault(_merge); var _util = __webpack_require__(33); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var getChildState = exports.getChildState = function getChildState(node) { var all = true; var none = true; var allWithoutDisable = true; for (var i = 0, j = node.length; i < j; i++) { var n = node[i]; if (n.checked !== true || n.indeterminate) { all = false; if (!n.disabled) { allWithoutDisable = false; } } if (n.checked !== false || n.indeterminate) { none = false; } } return { all: all, none: none, allWithoutDisable: allWithoutDisable, half: !all && !none }; }; var reInitChecked = function reInitChecked(node) { if (node.childNodes.length === 0) return; var _getChildState = getChildState(node.childNodes), all = _getChildState.all, none = _getChildState.none, half = _getChildState.half; if (all) { node.checked = true; node.indeterminate = false; } else if (half) { node.checked = false; node.indeterminate = true; } else if (none) { node.checked = false; node.indeterminate = false; } var parent = node.parent; if (!parent || parent.level === 0) return; if (!node.store.checkStrictly) { reInitChecked(parent); } }; var getPropertyFromData = function getPropertyFromData(node, prop) { var props = node.store.props; var data = node.data || {}; var config = props[prop]; if (typeof config === 'function') { return config(data, node); } else if (typeof config === 'string') { return data[config]; } else if (typeof config === 'undefined') { var dataProp = data[prop]; return dataProp === undefined ? '' : dataProp; } }; var nodeIdSeed = 0; var Node = function () { function Node(options) { _classCallCheck(this, Node); this.id = nodeIdSeed++; this.text = null; this.checked = false; this.indeterminate = false; this.data = null; this.expanded = false; this.parent = null; this.visible = true; for (var name in options) { if (options.hasOwnProperty(name)) { this[name] = options[name]; } } // internal this.level = 0; this.loaded = false; this.childNodes = []; this.loading = false; if (this.parent) { this.level = this.parent.level + 1; } var store = this.store; if (!store) { throw new Error('[Node]store is required!'); } store.registerNode(this); var props = store.props; if (props && typeof props.isLeaf !== 'undefined') { var isLeaf = getPropertyFromData(this, 'isLeaf'); if (typeof isLeaf === 'boolean') { this.isLeafByUser = isLeaf; } } if (store.lazy !== true && this.data) { this.setData(this.data); if (store.defaultExpandAll) { this.expanded = true; } } else if (this.level > 0 && store.lazy && store.defaultExpandAll) { this.expand(); } if (!Array.isArray(this.data)) { (0, _util.markNodeData)(this, this.data); } if (!this.data) return; var defaultExpandedKeys = store.defaultExpandedKeys; var key = store.key; if (key && defaultExpandedKeys && defaultExpandedKeys.indexOf(this.key) !== -1) { this.expand(null, store.autoExpandParent); } if (key && store.currentNodeKey !== undefined && this.key === store.currentNodeKey) { store.currentNode = this; } if (store.lazy) { store._initDefaultCheckedNode(this); } this.updateLeafState(); } Node.prototype.setData = function setData(data) { if (!Array.isArray(data)) { (0, _util.markNodeData)(this, data); } this.data = data; this.childNodes = []; var children = void 0; if (this.level === 0 && this.data instanceof Array) { children = this.data; } else { children = getPropertyFromData(this, 'children') || []; } for (var i = 0, j = children.length; i < j; i++) { this.insertChild({ data: children[i] }); } }; Node.prototype.contains = function contains(target) { var deep = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : true; var walk = function walk(parent) { var children = parent.childNodes || []; var result = false; for (var i = 0, j = children.length; i < j; i++) { var child = children[i]; if (child === target || deep && walk(child)) { result = true; break; } } return result; }; return walk(this); }; Node.prototype.remove = function remove() { var parent = this.parent; if (parent) { parent.removeChild(this); } }; Node.prototype.insertChild = function insertChild(child, index, batch) { if (!child) throw new Error('insertChild error: child is required.'); if (!(child instanceof Node)) { if (!batch) { var children = this.getChildren(true); if (children.indexOf(child.data) === -1) { if (typeof index === 'undefined' || index < 0) { children.push(child.data); } else { children.splice(index, 0, child.data); } } } (0, _merge2.default)(child, { parent: this, store: this.store }); child = new Node(child); } child.level = this.level + 1; if (typeof index === 'undefined' || index < 0) { this.childNodes.push(child); } else { this.childNodes.splice(index, 0, child); } this.updateLeafState(); }; Node.prototype.insertBefore = function insertBefore(child, ref) { var index = void 0; if (ref) { index = this.childNodes.indexOf(ref); } this.insertChild(child, index); }; Node.prototype.insertAfter = function insertAfter(child, ref) { var index = void 0; if (ref) { index = this.childNodes.indexOf(ref); if (index !== -1) index += 1; } this.insertChild(child, index); }; Node.prototype.removeChild = function removeChild(child) { var children = this.getChildren() || []; var dataIndex = children.indexOf(child.data); if (dataIndex > -1) { children.splice(dataIndex, 1); } var index = this.childNodes.indexOf(child); if (index > -1) { this.store && this.store.deregisterNode(child); child.parent = null; this.childNodes.splice(index, 1); } this.updateLeafState(); }; Node.prototype.removeChildByData = function removeChildByData(data) { var targetNode = null; this.childNodes.forEach(function (node) { if (node.data === data) { targetNode = node; } }); if (targetNode) { this.removeChild(targetNode); } }; Node.prototype.expand = function expand(callback, expandParent) { var _this = this; var done = function done() { if (expandParent) { var parent = _this.parent; while (parent.level > 0) { parent.expanded = true; parent = parent.parent; } } _this.expanded = true; if (callback) callback(); }; if (this.shouldLoadData()) { this.loadData(function (data) { if (data instanceof Array) { if (_this.checked) { _this.setChecked(true, true); } else { reInitChecked(_this); } done(); } }); } else { done(); } }; Node.prototype.doCreateChildren = function doCreateChildren(array) { var _this2 = this; var defaultProps = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; array.forEach(function (item) { _this2.insertChild((0, _merge2.default)({ data: item }, defaultProps), undefined, true); }); }; Node.prototype.collapse = function collapse() { this.expanded = false; }; Node.prototype.shouldLoadData = function shouldLoadData() { return this.store.lazy === true && this.store.load && !this.loaded; }; Node.prototype.updateLeafState = function updateLeafState() { if (this.store.lazy === true && this.loaded !== true && typeof this.isLeafByUser !== 'undefined') { this.isLeaf = this.isLeafByUser; return; } var childNodes = this.childNodes; if (!this.store.lazy || this.store.lazy === true && this.loaded === true) { this.isLeaf = !childNodes || childNodes.length === 0; return; } this.isLeaf = false; }; Node.prototype.setChecked = function setChecked(value, deep, recursion, passValue) { var _this3 = this; this.indeterminate = value === 'half'; this.checked = value === true; if (this.store.checkStrictly) return; if (!(this.shouldLoadData() && !this.store.checkDescendants)) { var _ret = function () { var _getChildState2 = getChildState(_this3.childNodes), all = _getChildState2.all, allWithoutDisable = _getChildState2.allWithoutDisable; if (!_this3.isLeaf && !all && allWithoutDisable) { _this3.checked = false; value = false; } var handleDescendants = function handleDescendants() { if (deep) { var childNodes = _this3.childNodes; for (var i = 0, j = childNodes.length; i < j; i++) { var child = childNodes[i]; passValue = passValue || value !== false; var isCheck = child.disabled ? child.checked : passValue; child.setChecked(isCheck, deep, true, passValue); } var _getChildState3 = getChildState(childNodes), half = _getChildState3.half, _all = _getChildState3.all; if (!_all) { _this3.checked = _all; _this3.indeterminate = half; } } }; if (_this3.shouldLoadData()) { // Only work on lazy load data. _this3.loadData(function () { handleDescendants(); reInitChecked(_this3); }, { checked: value !== false }); return { v: void 0 }; } else { handleDescendants(); } }(); if ((typeof _ret === 'undefined' ? 'undefined' : _typeof(_ret)) === "object") return _ret.v; } var parent = this.parent; if (!parent || parent.level === 0) return; if (!recursion) { reInitChecked(parent); } }; Node.prototype.getChildren = function getChildren() { var forceInit = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : false; // this is data if (this.level === 0) return this.data; var data = this.data; if (!data) return null; var props = this.store.props; var children = 'children'; if (props) { children = props.children || 'children'; } if (data[children] === undefined) { data[children] = null; } if (forceInit && !data[children]) { data[children] = []; } return data[children]; }; Node.prototype.updateChildren = function updateChildren() { var _this4 = this; var newData = this.getChildren() || []; var oldData = this.childNodes.map(function (node) { return node.data; }); var newDataMap = {}; var newNodes = []; newData.forEach(function (item, index) { if (item[_util.NODE_KEY]) { newDataMap[item[_util.NODE_KEY]] = { index: index, data: item }; } else { newNodes.push({ index: index, data: item }); } }); oldData.forEach(function (item) { if (!newDataMap[item[_util.NODE_KEY]]) _this4.removeChildByData(item); }); newNodes.forEach(function (_ref) { var index = _ref.index, data = _ref.data; _this4.insertChild({ data: data }, index); }); this.updateLeafState(); }; Node.prototype.loadData = function loadData(callback) { var _this5 = this; var defaultProps = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; if (this.store.lazy === true && this.store.load && !this.loaded && (!this.loading || Object.keys(defaultProps).length)) { this.loading = true; var resolve = function resolve(children) { _this5.loaded = true; _this5.loading = false; _this5.childNodes = []; _this5.doCreateChildren(children, defaultProps); _this5.updateLeafState(); if (callback) { callback.call(_this5, children); } }; this.store.load(this, resolve); } else { if (callback) { callback.call(this); } } }; _createClass(Node, [{ key: 'label', get: function get() { return getPropertyFromData(this, 'label'); } }, { key: 'icon', get: function get() { return getPropertyFromData(this, 'icon'); } }, { key: 'key', get: function get() { var nodeKey = this.store.key; if (this.data) return this.data[nodeKey]; return null; } }, { key: 'disabled', get: function get() { return getPropertyFromData(this, 'disabled'); } }, { key: 'nextSibling', get: function get() { var parent = this.parent; if (parent) { var index = parent.childNodes.indexOf(this); if (index > -1) { return parent.childNodes[index + 1]; } } return null; } }, { key: 'previousSibling', get: function get() { var parent = this.parent; if (parent) { var index = parent.childNodes.indexOf(this); if (index > -1) { return index > 0 ? parent.childNodes[index - 1] : null; } } return null; } }]); return Node; }(); exports.default = Node; /***/ }), /***/ 253: /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; Object.defineProperty(__webpack_exports__, "__esModule", { value: true }); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_node_vue__ = __webpack_require__(254); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_node_vue___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_node_vue__); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__node_modules_vue_loader_lib_template_compiler_index_id_data_v_09d50766_hasScoped_false_preserveWhitespace_false_buble_transforms_node_modules_vue_loader_lib_selector_type_template_index_0_tree_node_vue__ = __webpack_require__(255); var normalizeComponent = __webpack_require__(0) /* script */ /* template */ /* template functional */ var __vue_template_functional__ = false /* styles */ var __vue_styles__ = null /* scopeId */ var __vue_scopeId__ = null /* moduleIdentifier (server only) */ var __vue_module_identifier__ = null var Component = normalizeComponent( __WEBPACK_IMPORTED_MODULE_0__babel_loader_node_modules_vue_loader_lib_selector_type_script_index_0_tree_node_vue___default.a, __WEBPACK_IMPORTED_MODULE_1__node_modules_vue_loader_lib_template_compiler_index_id_data_v_09d50766_hasScoped_false_preserveWhitespace_false_buble_transforms_node_modules_vue_loader_lib_selector_type_template_index_0_tree_node_vue__["a" /* default */], __vue_template_functional__, __vue_styles__, __vue_scopeId__, __vue_module_identifier__ ) /* harmony default export */ __webpack_exports__["default"] = (Component.exports); /***/ }), /***/ 254: /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; var _collapseTransition = __webpack_require__(21); var _collapseTransition2 = _interopRequireDefault(_collapseTransition); var _checkbox = __webpack_require__(16); var _checkbox2 = _interopRequireDefault(_checkbox); var _emitter = __webpack_require__(1); var _emitter2 = _interopRequireDefault(_emitter); var _util = __webpack_require__(33); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // exports.default = { name: 'ElTreeNode', componentName: 'ElTreeNode', mixins: [_emitter2.default], props: { node: { default: function _default() { return {}; } }, props: {}, renderContent: Function, renderAfterExpand: { type: Boolean, default: true } }, components: { ElCollapseTransition: _collapseTransition2.default, ElCheckbox: _checkbox2.default, NodeContent: { props: { node: { required: true } }, render: function render(h) { var parent = this.$parent; var tree = parent.tree; var node = this.node; var data = node.data, store = node.store; return parent.renderContent ? parent.renderContent.call(parent._renderProxy, h, { _self: tree.$vnode.context, node: node, data: data, store: store }) : tree.$scopedSlots.default ? tree.$scopedSlots.default({ node: node, data: data }) : h( 'span', { 'class': 'el-tree-node__label' }, [node.label] ); } } }, data: function data() { return { tree: null, expanded: false, childNodeRendered: false, showCheckbox: false, oldChecked: null, oldIndeterminate: null }; }, watch: { 'node.indeterminate': function nodeIndeterminate(val) { this.handleSelectChange(this.node.checked, val); }, 'node.checked': function nodeChecked(val) { this.handleSelectChange(val, this.node.indeterminate); }, 'node.expanded': function nodeExpanded(val) { var _this = this; this.$nextTick(function () { return _this.expanded = val; }); if (val) { this.childNodeRendered = true; } } }, methods: { getNodeKey: function getNodeKey(node) { return (0, _util.getNodeKey)(this.tree.nodeKey, node.data); }, handleSelectChange: function handleSelectChange(checked, indeterminate) { if (this.oldChecked !== checked && this.oldIndeterminate !== indeterminate) { this.tree.$emit('check-change', this.node.data, checked, indeterminate); } this.oldChecked = checked; this.indeterminate = indeterminate; }, handleClick: function handleClick() { var store = this.tree.store; store.setCurrentNode(this.node); this.tree.$emit('current-change', store.currentNode ? store.currentNode.data : null, store.currentNode); this.tree.currentNode = this; if (this.tree.expandOnClickNode) { this.handleExpandIconClick(); } if (this.tree.checkOnClickNode && !this.node.disabled) { this.handleCheckChange(null, { target: { checked: !this.node.checked } }); } this.tree.$emit('node-click', this.node.data, this.node, this); }, handleContextMenu: function handleContextMenu(event) { if (this.tree._events['node-contextmenu'] && this.tree._events['node-contextmenu'].length > 0) { event.stopPropagation(); event.preventDefault(); } this.tree.$emit('node-contextmenu', event, this.node.data, this.node, this); }, handleExpandIconClick: function handleExpandIconClick() { if (this.node.isLeaf) return; if (this.expanded) { this.tree.$emit('node-collapse', this.node.data, this.node, this); this.node.collapse(); } else { this.node.expand(); this.$emit('node-expand', this.node.data, this.node, this); } }, handleCheckChange: function handleCheckChange(value, ev) { var _this2 = this; this.node.setChecked(ev.target.checked, !this.tree.checkStrictly); this.$nextTick(function () { var store = _this2.tree.store; _this2.tree.$emit('check', _this2.node.data, { checkedNodes: store.getCheckedNodes(), checkedKeys: store.getCheckedKeys(), halfCheckedNodes: store.getHalfCheckedNodes(), halfCheckedKeys: store.getHalfCheckedKeys() }); }); }, handleChildNodeExpand: function handleChildNodeExpand(nodeData, node, instance) { this.broadcast('ElTreeNode', 'tree-node-expand', node); this.tree.$emit('node-expand', nodeData, node, instance); }, handleDragStart: function handleDragStart(event) { if (!this.tree.draggable) return; this.tree.$emit('tree-node-drag-start', event, this); }, handleDragOver: function handleDragOver(event) { if (!this.tree.draggable) return; this.tree.$emit('tree-node-drag-over', event, this); event.preventDefault(); }, handleDrop: function handleDrop(event) { event.preventDefault(); }, handleDragEnd: function handleDragEnd(event) { if (!this.tree.draggable) return; this.tree.$emit('tree-node-drag-end', event, this); } }, created: function created() { var _this3 = this; var parent = this.$parent; if (parent.isTree) { this.tree = parent; } else { this.tree = parent.tree; } var tree = this.tree; if (!tree) { console.warn('Can not find node\'s tree.'); } var props = tree.props || {}; var childrenKey = props['children'] || 'children'; this.$watch('node.data.' + childrenKey, function () { _this3.node.updateChildren(); }); this.showCheckbox = tree.showCheckbox; if (this.node.expanded) { this.expanded = true; this.childNodeRendered = true; } if (this.tree.accordion) { this.$on('tree-node-expand', function (node) { if (_this3.node !== node) { _this3.node.collapse(); } }); } } }; /***/ }), /***/ 255: /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; var render = function () { var this$1 = this; var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;return _c('div',{directives:[{name:"show",rawName:"v-show",value:(_vm.node.visible),expression:"node.visible"}],ref:"node",staticClass:"el-tree-node",class:{ 'is-expanded': _vm.expanded, 'is-current': _vm.tree.store.currentNode === _vm.node, 'is-hidden': !_vm.node.visible, 'is-focusable': !_vm.node.disabled, 'is-checked': !_vm.node.disabled && _vm.node.checked },attrs:{"role":"treeitem","tabindex":"-1","aria-expanded":_vm.expanded,"aria-disabled":_vm.node.disabled,"aria-checked":_vm.node.checked,"draggable":_vm.tree.draggable},on:{"click":function($event){$event.stopPropagation();_vm.handleClick($event)},"contextmenu":function ($event) { return this$1.handleContextMenu($event); },"dragstart":function($event){$event.stopPropagation();_vm.handleDragStart($event)},"dragover":function($event){$event.stopPropagation();_vm.handleDragOver($event)},"dragend":function($event){$event.stopPropagation();_vm.handleDragEnd($event)},"drop":function($event){$event.stopPropagation();_vm.handleDrop($event)}}},[_c('div',{staticClass:"el-tree-node__content",style:({ 'padding-left': (_vm.node.level - 1) * _vm.tree.indent + 'px' })},[_c('span',{staticClass:"el-tree-node__expand-icon el-icon-caret-right",class:{ 'is-leaf': _vm.node.isLeaf, expanded: !_vm.node.isLeaf && _vm.expanded },on:{"click":function($event){$event.stopPropagation();_vm.handleExpandIconClick($event)}}}),(_vm.showCheckbox)?_c('el-checkbox',{attrs:{"indeterminate":_vm.node.indeterminate,"disabled":!!_vm.node.disabled},on:{"change":_vm.handleCheckChange},nativeOn:{"click":function($event){$event.stopPropagation();}},model:{value:(_vm.node.checked),callback:function ($$v) {_vm.$set(_vm.node, "checked", $$v)},expression:"node.checked"}}):_vm._e(),(_vm.node.loading)?_c('span',{staticClass:"el-tree-node__loading-icon el-icon-loading"}):_vm._e(),_c('node-content',{attrs:{"node":_vm.node}})],1),_c('el-collapse-transition',[(!_vm.renderAfterExpand || _vm.childNodeRendered)?_c('div',{directives:[{name:"show",rawName:"v-show",value:(_vm.expanded),expression:"expanded"}],staticClass:"el-tree-node__children",attrs:{"role":"group","aria-expanded":_vm.expanded}},_vm._l((_vm.node.childNodes),function(child){return _c('el-tree-node',{key:_vm.getNodeKey(child),attrs:{"render-content":_vm.renderContent,"render-after-expand":_vm.renderAfterExpand,"node":child},on:{"node-expand":_vm.handleChildNodeExpand}})})):_vm._e()])],1)} var staticRenderFns = [] var esExports = { render: render, staticRenderFns: staticRenderFns } /* harmony default export */ __webpack_exports__["a"] = (esExports); /***/ }), /***/ 256: /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; var render = function () {var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;return _c('div',{staticClass:"el-tree",class:{ 'el-tree--highlight-current': _vm.highlightCurrent, 'is-dragging': !!_vm.dragState.draggingNode, 'is-drop-not-allow': !_vm.dragState.allowDrop, 'is-drop-inner': _vm.dragState.dropType === 'inner' },attrs:{"role":"tree"}},[_vm._l((_vm.root.childNodes),function(child){return _c('el-tree-node',{key:_vm.getNodeKey(child),attrs:{"node":child,"props":_vm.props,"render-after-expand":_vm.renderAfterExpand,"render-content":_vm.renderContent},on:{"node-expand":_vm.handleNodeExpand}})}),(_vm.isEmpty)?_c('div',{staticClass:"el-tree__empty-block"},[_c('span',{staticClass:"el-tree__empty-text"},[_vm._v(_vm._s(_vm.emptyText))])]):_vm._e(),_c('div',{directives:[{name:"show",rawName:"v-show",value:(_vm.dragState.showDropIndicator),expression:"dragState.showDropIndicator"}],ref:"dropIndicator",staticClass:"el-tree__drop-indicator"})],2)} var staticRenderFns = [] var esExports = { render: render, staticRenderFns: staticRenderFns } /* harmony default export */ __webpack_exports__["a"] = (esExports); /***/ }), /***/ 33: /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; var NODE_KEY = exports.NODE_KEY = '$treeNodeId'; var markNodeData = exports.markNodeData = function markNodeData(node, data) { if (!data || data[NODE_KEY]) return; Object.defineProperty(data, NODE_KEY, { value: node.id, enumerable: false, configurable: false, writable: false }); }; var getNodeKey = exports.getNodeKey = function getNodeKey(key, data) { if (!key) return data[NODE_KEY]; return data[key]; }; var findNearestComponent = exports.findNearestComponent = function findNearestComponent(element, componentName) { var target = element; while (target && target.tagName !== 'BODY') { if (target.__vue__ && target.__vue__.$options.name === componentName) { return target.__vue__; } target = target.parentNode; } return null; }; /***/ }), /***/ 9: /***/ (function(module, exports) { module.exports = require("element-ui/lib/utils/merge"); /***/ }) /******/ });
{ "pile_set_name": "Github" }
<!doctype html> <!-- @license Copyright (c) 2015 The Polymer Project Authors. All rights reserved. This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt Code distributed by Google as part of the polymer project is also subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt --> <html> <head> <title>iron-location</title> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <script src="../webcomponentsjs/webcomponents-lite.js"></script> <link rel="import" href="../iron-component-page/iron-component-page.html"> </head> <body> <iron-component-page></iron-component-page> </body> </html>
{ "pile_set_name": "Github" }
{ "loc.messages.AZ_AzureRMProfileModuleNotFound": "モジュール 'AzureRM.Profile' が見つかりません。'AzureRM' モジュールが完全にインストールされていない可能性があります。管理者特権のセッションから次の PowerShell コマンドを実行すると、問題が解決する場合があります。Import-Module AzureRM ; Install-AzureRM", "loc.messages.AZ_CertificateAuthNotSupported": "証明書に基づく認証はサポートされていません。Azure PowerShell モジュールが見つかりません。", "loc.messages.AZ_CredentialsError": "配置に使用した Azure 資格情報にエラーがありました。", "loc.messages.AZ_ModuleNotFound": "バージョン '{0}' のモジュール '{1}' が見つかりませんでした。モジュールを最近インストールした場合は、Azure Pipelines のタスク エージェントを再起動後にもう一度お試しください。", "loc.messages.AZ_RequiresMinVersion0": "必要な最小バージョン ({0}) の Azure PowerShell モジュールがインストールされていません。", "loc.messages.AZ_ServicePrincipalError": "配置に使用したサービス プリンシパルにエラーがありました。", "loc.messages.AZ_ServicePrincipalAuthNotSupportedAzureVersion0": "バージョン '{0}' の Azure モジュールでは、サービス プリンシパル認証はサポートされていません。", "loc.messages.AZ_UnsupportedAuthScheme0": "'{0}' は Azure エンドポイントではサポートされていない認証スキームです。", "loc.messages.AZ_AvailableModules": "利用可能な {0} モジュールの一覧:", "loc.messages.AZ_InvalidARMEndpoint": "指定した AzureRM エンドポイントは無効です。", "loc.messages.AZ_MsiAccessNotConfiguredProperlyFailure": "マネージド ID のアクセス トークンをフェッチできませんでした。仮想マシンのマネージド ID を構成してください 'https://aka.ms/azure-msi-docs'。状態コード: '{0}'、エラー メッセージ: {1}", "loc.messages.AZ_MsiAccessTokenFetchFailure": "マネージド ID のアクセス トークンをフェッチできませんでした。状態コード: '{0}'、エラー メッセージ: {1}", "loc.messages.AZ_MsiFailure": "マネージド ID のアクセス トークンをフェッチできませんでした。{0}" }
{ "pile_set_name": "Github" }
## @package multiprocessing.process # Emulates and replaces the multiprocessing.process core Python API with a DSZ # compatible implementation. # __all__ = ['Process', 'current_process', 'active_children'] # 'normal' imports import os import sys import signal import itertools # DSZ imports import dsz.script # No good analog in DSZ land, so just leave it alone. ORIGINAL_DIR = None ## Generally used as an analog for os.getpid() _DSZ_COMMAND_ID = int(dsz.script.Env['script_command_id']) # ===================================================================== # Public Functions # ===================================================================== ## Return process object representing the current process def current_process(): return _current_process ## Return list of process objects corresponding to live child processes def active_children(): _cleanup() return list(_current_process._children) # ===================================================================== # Private Functions # ===================================================================== ## Check for processes which have finished def _cleanup(): for p in list(_current_process._children): if p._popen.poll() is not None: _current_process._children.discard(p) # ===================================================================== # Public Classes # ===================================================================== ## Process objects represent actibity that is run in a separate process. # # This is emulated in DSZ by different instantiations of the 'python' # command. class Process(object): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, _dsz_newterm=None): # Assertion from the original code assert group is None, 'group argument must be None for now' count = _current_process._counter.next() self._identity = _current_process._identity + (count,) self._authkey = _current_process._authkey self._daemonic = _current_process._daemonic self._tempdir = _current_process._tempdir # Analog: parent "pid" is the command ID of the parent script self._parent_pid = _DSZ_COMMAND_ID self._popen = None self._target = target self._args = args self._kwargs = kwargs self._name = name or type(self).__name__ + '-' + ':'.join(str(i) for i in self._identity) ## Method to run in sub-process; can be overridden in sub-class. def run(self): if self._target: self._target(*self._args, **self._kwargs) ## Start child process def start(self): assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == _DSZ_COMMAND_ID, 'can only start a process object created by current process' assert not _current_process._daemonic, 'daemonic processes are not allowed to have children' _cleanup() from .forking import Popen self._popen = Popen(self) _current_process._children.add(self) ## Terminate child process def terminate(self): self._popen.terminate() ## Wait until child process terminates def join(self, timeout=None): assert self._parent_pid == _DSZ_COMMAND_ID, 'can only join a child process' assert self._popen is not None, 'can only join a started process' result = self._popen.wait(timeout) if result is not None: _current_process._children.discard(self) ## Query if a process is still running def is_alive(self): if self is _current_process: return True assert self._parent_pid == _DSZ_COMMAND_ID, 'can only test a child process' if self._popen is None: return False return self._popen.poll() is None @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, basestring), 'name must be a string' self._name = name @property def daemon(self): return self._daemonic @daemon.setter def daemon(self, daemonic): assert self._popen is None, 'process has already started' self._daemonic = daemonic @property def authkey(self): return self._authkey @authkey.setter def authkey(self, authkey): self._authkey = AuthenticationString(authkey) @property def exitcode(self): if self._popen is None: return None return self._popen.poll() @property def ident(self): if self is _current_process: return _DSZ_COMMAND_ID else: return self._popen and self._popen.pid pid = ident def __repr__(self): if self is _current_process: status = 'started' elif self._parent_pid != _DSZ_COMMAND_ID: status = 'unknown' elif self._popen is None: status = 'initial' else: if self_popen.poll() is not None: status = self.exitcode else: status = 'started' if status == False: status = 'stopped' elif status == True: status = 'started' return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self._daemonic and ' daemon' or '') def _bootstrap(self): global _current_process try: self._children = set() self._counter = itertools.count(1) _current_process = self try: self.run() exitcode = 0 finally: # might need to implement atexit handlers here, check back later pass except SystemExit, e: if not e.args: exitcode = 1 elif type(e.args[0]) is int: exitcode = e.args[0] else: exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) sys.stderr.flush() traceback.print_exc() return exitcode class AuthenticationString(bytes): def __reduce__(self): from .forking import Popen if not Popen.thread_is_spawning(): raise TypeError('Pickling an AuthenticationString object is disallowed for security reasons.') return AuthenticationString, (bytes(self),) class _MainProcess(Process): def __init__(self): self._identity = () self._daemonic = False self._name = 'MainProcess' self._parent_pid = None self._popen = None self._counter = itertools.count(1) self._children = set() self._authkey = AuthenticationString(os.urandom(32)) self._tempdir = None _current_process = _MainProcess() del _MainProcess
{ "pile_set_name": "Github" }
//#pragma comment(exestr, "$Header: /usr4/winnt/SOURCES/ddk35/src/hal/halsni/mips/RCS/xxmemory.c,v 1.2 1994/11/09 07:54:26 holli Exp $") /*++ Copyright (c) 1991 Microsoft Corporation Module Name: xxmemory.c Abstract: Provides routines to allow the HAL to map physical memory. Environment: Phase 0 initialization only. Changes: All stuff comes from the x86 HAL Sources (xxmemory.c) --*/ #include "halp.h" // // Put all code for HAL initialization in the INIT section. It will be // deallocated by memory management when phase 1 initialization is // completed. // #if defined(ALLOC_PRAGMA) #pragma alloc_text(INIT, HalpAllocPhysicalMemory) #endif MEMORY_ALLOCATION_DESCRIPTOR HalpExtraAllocationDescriptor; ULONG HalpAllocPhysicalMemory( IN PLOADER_PARAMETER_BLOCK LoaderBlock, IN ULONG MaxPhysicalAddress, IN ULONG NoPages, IN BOOLEAN bAlignOn64k ) /*++ Routine Description: Carves out N pages of physical memory from the memory descriptor list in the desired location. This function is to be called only during phase zero initialization. (ie, before the kernel's memory management system is running) Arguments: MaxPhysicalAddress - The max address where the physical memory can be NoPages - Number of pages to allocate Return Value: The pyhsical address or NULL if the memory could not be obtained. --*/ { PMEMORY_ALLOCATION_DESCRIPTOR Descriptor; PLIST_ENTRY NextMd; ULONG AlignmentOffset; ULONG MaxPageAddress; ULONG PhysicalAddress; MaxPageAddress = MaxPhysicalAddress >> PAGE_SHIFT; // // Scan the memory allocation descriptors and allocate map buffers // NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { Descriptor = CONTAINING_RECORD(NextMd, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry); AlignmentOffset = bAlignOn64k ? ((Descriptor->BasePage + 0x0f) & ~0x0f) - Descriptor->BasePage : 0; // // Search for a block of memory which contains a memory chunk // that is greater than size pages, and has a physical address less // than MAXIMUM_PHYSICAL_ADDRESS. // if ((Descriptor->MemoryType == LoaderFree || Descriptor->MemoryType == MemoryFirmwareTemporary) && (Descriptor->BasePage) && (Descriptor->PageCount >= NoPages + AlignmentOffset) && (Descriptor->BasePage + NoPages + AlignmentOffset < MaxPageAddress)) { PhysicalAddress = (AlignmentOffset + Descriptor->BasePage) << PAGE_SHIFT; break; } NextMd = NextMd->Flink; } // // Use the extra descriptor to define the memory at the end of the // original block. // ASSERT(NextMd != &LoaderBlock->MemoryDescriptorListHead); if (NextMd == &LoaderBlock->MemoryDescriptorListHead) return (ULONG)NULL; // // Adjust the memory descriptors. // if (AlignmentOffset == 0) { Descriptor->BasePage += NoPages; Descriptor->PageCount -= NoPages; if (Descriptor->PageCount == 0) { // // The whole block was allocated, // Remove the entry from the list completely. // RemoveEntryList(&Descriptor->ListEntry); } } else { if (Descriptor->PageCount - NoPages - AlignmentOffset) { // // Currently we only allow one Align64K allocation // ASSERT (HalpExtraAllocationDescriptor.PageCount == 0); // // The extra descriptor is needed so intialize it and insert // it in the list. // HalpExtraAllocationDescriptor.PageCount = Descriptor->PageCount - NoPages - AlignmentOffset; HalpExtraAllocationDescriptor.BasePage = Descriptor->BasePage + NoPages + AlignmentOffset; HalpExtraAllocationDescriptor.MemoryType = MemoryFree; InsertTailList( &Descriptor->ListEntry, &HalpExtraAllocationDescriptor.ListEntry ); } // // Use the current entry as the descriptor for the first block. // Descriptor->PageCount = AlignmentOffset; } return PhysicalAddress; }
{ "pile_set_name": "Github" }
/***** BEGIN LICENSE BLOCK ***** BSD License Copyright (c) 2005-2015, NIF File Format Library and Tools All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the NIF File Format Library and Tools project may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***** END LICENCE BLOCK *****/ #include "renderer.h" #include "message.h" #include "nifskope.h" #include "gl/glmesh.h" #include "gl/glproperty.h" #include "gl/glscene.h" #include "gl/gltex.h" #include "io/material.h" #include "model/nifmodel.h" #include "ui/settingsdialog.h" #include <QCoreApplication> #include <QDebug> #include <QDir> #include <QFile> #include <QOpenGLContext> #include <QOpenGLFunctions> #include <QSettings> #include <QTextStream> //! @file renderer.cpp Renderer and child classes implementation bool shader_initialized = false; bool shader_ready = true; bool Renderer::initialize() { if ( !shader_initialized ) { // check for OpenGL 2.0 // (we don't use the extension API but the 2.0 API for shaders) if ( cfg.useShaders && fn->hasOpenGLFeature( QOpenGLFunctions::Shaders ) ) { shader_ready = true; shader_initialized = true; } else { shader_ready = false; } //qDebug() << "shader support" << shader_ready; } return shader_ready; } bool Renderer::hasShaderSupport() { return shader_ready; } const QHash<Renderer::ConditionSingle::Type, QString> Renderer::ConditionSingle::compStrs{ { EQ, " == " }, { NE, " != " }, { LE, " <= " }, { GE, " >= " }, { LT, " < " }, { GT, " > " }, { AND, " & " }, { NAND, " !& " } }; Renderer::ConditionSingle::ConditionSingle( const QString & line, bool neg ) : invert( neg ) { QHashIterator<Type, QString> i( compStrs ); int pos = -1; while ( i.hasNext() ) { i.next(); pos = line.indexOf( i.value() ); if ( pos > 0 ) break; } if ( pos > 0 ) { left = line.left( pos ).trimmed(); right = line.right( line.length() - pos - i.value().length() ).trimmed(); if ( right.startsWith( "\"" ) && right.endsWith( "\"" ) ) right = right.mid( 1, right.length() - 2 ); comp = i.key(); } else { left = line; comp = NONE; } } QModelIndex Renderer::ConditionSingle::getIndex( const NifModel * nif, const QVector<QModelIndex> & iBlocks, QString blkid ) const { QString childid; if ( blkid.startsWith( "HEADER/" ) ) return nif->getIndex( nif->getHeader(), blkid.remove( "HEADER/" ) ); int pos = blkid.indexOf( "/" ); if ( pos > 0 ) { childid = blkid.right( blkid.length() - pos - 1 ); blkid = blkid.left( pos ); } for ( QModelIndex iBlock : iBlocks ) { if ( nif->inherits( iBlock, blkid ) ) { if ( childid.isEmpty() ) return iBlock; return nif->getIndex( iBlock, childid ); } } return QModelIndex(); } bool Renderer::ConditionSingle::eval( const NifModel * nif, const QVector<QModelIndex> & iBlocks ) const { QModelIndex iLeft = getIndex( nif, iBlocks, left ); if ( !iLeft.isValid() ) return invert; if ( comp == NONE ) return !invert; NifValue val = nif->getValue( iLeft ); if ( val.isString() ) return compare( val.toString(), right ) ^ invert; else if ( val.isCount() ) return compare( val.toCount(), right.toUInt( nullptr, 0 ) ) ^ invert; else if ( val.isFloat() ) return compare( val.toFloat(), (float)right.toDouble() ) ^ invert; else if ( val.isFileVersion() ) return compare( val.toFileVersion(), right.toUInt( nullptr, 0 ) ) ^ invert; else if ( val.type() == NifValue::tBSVertexDesc ) return compare( (uint)val.get<BSVertexDesc>().GetFlags(), right.toUInt( nullptr, 0 ) ) ^ invert; return false; } bool Renderer::ConditionGroup::eval( const NifModel * nif, const QVector<QModelIndex> & iBlocks ) const { if ( conditions.isEmpty() ) return true; if ( isOrGroup() ) { for ( Condition * cond : conditions ) { if ( cond->eval( nif, iBlocks ) ) return true; } return false; } else { for ( Condition * cond : conditions ) { if ( !cond->eval( nif, iBlocks ) ) return false; } return true; } } void Renderer::ConditionGroup::addCondition( Condition * c ) { conditions.append( c ); } Renderer::Shader::Shader( const QString & n, GLenum t, QOpenGLFunctions * fn ) : f( fn ), name( n ), id( 0 ), status( false ), type( t ) { id = f->glCreateShader( type ); } Renderer::Shader::~Shader() { if ( id ) f->glDeleteShader( id ); } bool Renderer::Shader::load( const QString & filepath ) { try { QFile file( filepath ); if ( !file.open( QIODevice::ReadOnly ) ) throw QString( "couldn't open %1 for read access" ).arg( filepath ); QByteArray data = file.readAll(); const char * src = data.constData(); f->glShaderSource( id, 1, &src, 0 ); f->glCompileShader( id ); GLint result; f->glGetShaderiv( id, GL_COMPILE_STATUS, &result ); if ( result != GL_TRUE ) { GLint logLen; f->glGetShaderiv( id, GL_INFO_LOG_LENGTH, &logLen ); char * log = new char[ logLen ]; f->glGetShaderInfoLog( id, logLen, 0, log ); QString errlog( log ); delete[] log; throw errlog; } } catch ( QString & err ) { status = false; Message::append( QObject::tr( "There were errors during shader compilation" ), QString( "%1:\r\n\r\n%2" ).arg( name ).arg( err ) ); return false; } status = true; return true; } Renderer::Program::Program( const QString & n, QOpenGLFunctions * fn ) : f( fn ), name( n ), id( 0 ) { id = f->glCreateProgram(); } Renderer::Program::~Program() { if ( id ) f->glDeleteShader( id ); } bool Renderer::Program::load( const QString & filepath, Renderer * renderer ) { try { QFile file( filepath ); if ( !file.open( QIODevice::ReadOnly ) ) throw QString( "couldn't open %1 for read access" ).arg( filepath ); QTextStream stream( &file ); QStack<ConditionGroup *> chkgrps; chkgrps.push( &conditions ); while ( !stream.atEnd() ) { QString line = stream.readLine().trimmed(); if ( line.startsWith( "shaders" ) ) { QStringList list = line.simplified().split( " " ); for ( int i = 1; i < list.count(); i++ ) { Shader * shader = renderer->shaders.value( list[ i ] ); if ( shader ) { if ( shader->status ) f->glAttachShader( id, shader->id ); else throw QString( "depends on shader %1 which was not compiled successful" ).arg( list[ i ] ); } else { throw QString( "shader %1 not found" ).arg( list[ i ] ); } } } else if ( line.startsWith( "checkgroup" ) ) { QStringList list = line.simplified().split( " " ); if ( list.value( 1 ) == "begin" ) { ConditionGroup * group = new ConditionGroup( list.value( 2 ) == "or" ); chkgrps.top()->addCondition( group ); chkgrps.push( group ); } else if ( list.value( 1 ) == "end" ) { if ( chkgrps.count() > 1 ) chkgrps.pop(); else throw QString( "mismatching checkgroup end tag" ); } else { throw QString( "expected begin or end after checkgroup" ); } } else if ( line.startsWith( "check" ) ) { line = line.remove( 0, 5 ).trimmed(); bool invert = false; if ( line.startsWith( "not " ) ) { invert = true; line = line.remove( 0, 4 ).trimmed(); } chkgrps.top()->addCondition( new ConditionSingle( line, invert ) ); } else if ( line.startsWith( "texcoords" ) ) { line = line.remove( 0, 9 ).simplified(); QStringList list = line.split( " " ); bool ok; int unit = list.value( 0 ).toInt( &ok ); QString idStr = list.value( 1 ).toLower(); if ( !ok || idStr.isEmpty() ) throw QString( "malformed texcoord tag" ); int id = -1; if ( idStr == "tangents" ) id = CT_TANGENT; else if ( idStr == "bitangents" ) id = CT_BITANGENT; else if ( idStr == "indices" ) id = CT_BONE; else if ( idStr == "weights" ) id = CT_WEIGHT; else if ( idStr == "base" ) id = TexturingProperty::getId( idStr ); if ( id < 0 ) throw QString( "texcoord tag refers to unknown texture id '%1'" ).arg( idStr ); if ( texcoords.contains( unit ) ) throw QString( "texture unit %1 is assigned twiced" ).arg( unit ); texcoords.insert( unit, CoordType(id) ); } } f->glLinkProgram( id ); GLint result; f->glGetProgramiv( id, GL_LINK_STATUS, &result ); if ( result != GL_TRUE ) { GLint logLen = 0; f->glGetProgramiv( id, GL_INFO_LOG_LENGTH, &logLen ); if ( logLen != 0 ) { char * log = new char[ logLen ]; f->glGetProgramInfoLog( id, logLen, 0, log ); QString errlog( log ); delete[] log; id = 0; throw errlog; } } } catch ( QString & x ) { status = false; Message::append( QObject::tr( "There were errors during shader compilation" ), QString( "%1:\r\n\r\n%2" ).arg( name ).arg( x ) ); return false; } status = true; return true; } void Renderer::Program::setUniformLocations() { for ( int i = 0; i < NUM_UNIFORM_TYPES; i++ ) uniformLocations[i] = f->glGetUniformLocation( id, uniforms[i].c_str() ); } Renderer::Renderer( QOpenGLContext * c, QOpenGLFunctions * f ) : cx( c ), fn( f ) { updateSettings(); connect( NifSkope::getOptions(), &SettingsDialog::saveSettings, this, &Renderer::updateSettings ); } Renderer::~Renderer() { releaseShaders(); } void Renderer::updateSettings() { QSettings settings; cfg.useShaders = settings.value( "Settings/Render/General/Use Shaders", true ).toBool(); bool prevStatus = shader_ready; shader_ready = cfg.useShaders && fn->hasOpenGLFeature( QOpenGLFunctions::Shaders ); if ( !shader_initialized && shader_ready && !prevStatus ) { updateShaders(); shader_initialized = true; } } void Renderer::updateShaders() { if ( !shader_ready ) return; releaseShaders(); QDir dir( QCoreApplication::applicationDirPath() ); if ( dir.exists( "shaders" ) ) dir.cd( "shaders" ); #ifdef Q_OS_LINUX else if ( dir.exists( "/usr/share/nifskope/shaders" ) ) dir.cd( "/usr/share/nifskope/shaders" ); #endif dir.setNameFilters( { "*.vert" } ); for ( const QString& name : dir.entryList() ) { Shader * shader = new Shader( name, GL_VERTEX_SHADER, fn ); shader->load( dir.filePath( name ) ); shaders.insert( name, shader ); } dir.setNameFilters( { "*.frag" } ); for ( const QString& name : dir.entryList() ) { Shader * shader = new Shader( name, GL_FRAGMENT_SHADER, fn ); shader->load( dir.filePath( name ) ); shaders.insert( name, shader ); } dir.setNameFilters( { "*.prog" } ); for ( const QString& name : dir.entryList() ) { Program * program = new Program( name, fn ); program->load( dir.filePath( name ), this ); program->setUniformLocations(); programs.insert( name, program ); } } void Renderer::releaseShaders() { if ( !shader_ready ) return; qDeleteAll( programs ); programs.clear(); qDeleteAll( shaders ); shaders.clear(); } QString Renderer::setupProgram( Shape * mesh, const QString & hint ) { PropertyList props; mesh->activeProperties( props ); if ( !shader_ready || hint.isNull() || (mesh->scene->options & Scene::DisableShaders) || (mesh->scene->visMode & Scene::VisSilhouette) || (mesh->nifVersion == 0) ) { setupFixedFunction( mesh, props ); return {}; } QVector<QModelIndex> iBlocks; iBlocks << mesh->index(); iBlocks << mesh->iData; for ( Property * p : props.list() ) { iBlocks.append( p->index() ); } if ( !hint.isEmpty() ) { Program * program = programs.value( hint ); if ( program && program->status && setupProgram( program, mesh, props, iBlocks, false ) ) return program->name; } for ( Program * program : programs ) { if ( program->status && setupProgram( program, mesh, props, iBlocks ) ) return program->name; } stopProgram(); setupFixedFunction( mesh, props ); return {}; } void Renderer::stopProgram() { if ( shader_ready ) { fn->glUseProgram( 0 ); } resetTextureUnits(); } void Renderer::Program::uni1f( UniformType var, float x ) { f->glUniform1f( uniformLocations[var], x ); } void Renderer::Program::uni2f( UniformType var, float x, float y ) { f->glUniform2f( uniformLocations[var], x, y ); } void Renderer::Program::uni3f( UniformType var, float x, float y, float z ) { f->glUniform3f( uniformLocations[var], x, y, z ); } void Renderer::Program::uni4f( UniformType var, float x, float y, float z, float w ) { f->glUniform4f( uniformLocations[var], x, y, z, w ); } void Renderer::Program::uni1i( UniformType var, int val ) { f->glUniform1i( uniformLocations[var], val ); } void Renderer::Program::uni3m( UniformType var, const Matrix & val ) { if ( uniformLocations[var] >= 0 ) f->glUniformMatrix3fv( uniformLocations[var], 1, 0, val.data() ); } void Renderer::Program::uni4m( UniformType var, const Matrix4 & val ) { if ( uniformLocations[var] >= 0 ) f->glUniformMatrix4fv( uniformLocations[var], 1, 0, val.data() ); } bool Renderer::Program::uniSampler( BSShaderLightingProperty * bsprop, UniformType var, int textureSlot, int & texunit, const QString & alternate, uint clamp, const QString & forced ) { GLint uniSamp = uniformLocations[var]; if ( uniSamp >= 0 ) { QString fname = (forced.isEmpty()) ? bsprop->fileName( textureSlot ) : forced; if ( fname.isEmpty() ) fname = alternate; if ( !fname.isEmpty() && (!activateTextureUnit( texunit ) || !(bsprop->bind( textureSlot, fname, TexClampMode(clamp) ) || bsprop->bind( textureSlot, alternate, TexClampMode(3) ))) ) return uniSamplerBlank( var, texunit ); f->glUniform1i( uniSamp, texunit++ ); return true; } return true; } bool Renderer::Program::uniSamplerBlank( UniformType var, int & texunit ) { GLint uniSamp = uniformLocations[var]; if ( uniSamp >= 0 ) { if ( !activateTextureUnit( texunit ) ) return false; glBindTexture( GL_TEXTURE_2D, 0 ); f->glUniform1i( uniSamp, texunit++ ); return true; } return true; } static QString white = "shaders/white.dds"; static QString black = "shaders/black.dds"; static QString gray = "shaders/gray.dds"; static QString magenta = "shaders/magenta.dds"; static QString default_n = "shaders/default_n.dds"; static QString cube = "shaders/cubemap.dds"; bool Renderer::setupProgram( Program * prog, Shape * mesh, const PropertyList & props, const QVector<QModelIndex> & iBlocks, bool eval ) { const NifModel * nif = qobject_cast<const NifModel *>( mesh->index().model() ); if ( !mesh->index().isValid() || !nif ) return false; if ( eval && !prog->conditions.eval( nif, iBlocks ) ) return false; fn->glUseProgram( prog->id ); auto opts = mesh->scene->options; auto vis = mesh->scene->visMode; Material * mat = nullptr; if ( mesh->bslsp && mesh->bslsp->mat() ) mat = mesh->bslsp->mat(); else if ( mesh->bsesp && mesh->bsesp->mat() ) mat = mesh->bsesp->mat(); // texturing TexturingProperty * texprop = props.get<TexturingProperty>(); BSShaderLightingProperty * bsprop = props.get<BSShaderLightingProperty>(); // TODO: BSLSP has been split off from BSShaderLightingProperty so it needs // to be accessible from here TexClampMode clamp = TexClampMode::WRAP_S_WRAP_T; if ( mesh->bslsp ) clamp = mesh->bslsp->getClampMode(); int texunit = 0; if ( bsprop ) { QString forced; if ( (opts & Scene::DoLighting) && (vis & Scene::VisNormalsOnly) ) forced = white; QString alt = white; if ( opts & Scene::DoErrorColor ) alt = magenta; bool result = prog->uniSampler( bsprop, SAMP_BASE, 0, texunit, alt, clamp, forced ); } else { GLint uniBaseMap = prog->uniformLocations[SAMP_BASE]; if ( uniBaseMap >= 0 && (texprop || (bsprop && mesh->bslsp)) ) { if ( !activateTextureUnit( texunit ) || (texprop && !texprop->bind( 0 )) ) prog->uniSamplerBlank( SAMP_BASE, texunit ); else fn->glUniform1i( uniBaseMap, texunit++ ); } } if ( bsprop && mesh->bslsp ) { QString forced; if ( !(opts & Scene::DoLighting) ) forced = default_n; prog->uniSampler( bsprop, SAMP_NORMAL, 1, texunit, default_n, clamp, forced ); } else if ( !bsprop ) { GLint uniNormalMap = prog->uniformLocations[SAMP_NORMAL]; if ( uniNormalMap >= 0 && texprop ) { bool result = true; if ( texprop ) { QString fname = texprop->fileName( 0 ); if ( !fname.isEmpty() ) { int pos = fname.lastIndexOf( "_" ); if ( pos >= 0 ) fname = fname.left( pos ) + "_n.dds"; else if ( (pos = fname.lastIndexOf( "." )) >= 0 ) fname = fname.insert( pos, "_n" ); } if ( !fname.isEmpty() && (!activateTextureUnit( texunit ) || !texprop->bind( 0, fname )) ) result = false; } if ( !result ) prog->uniSamplerBlank( SAMP_NORMAL, texunit ); else fn->glUniform1i( uniNormalMap, texunit++ ); } } if ( bsprop && mesh->bslsp ) { prog->uniSampler( bsprop, SAMP_GLOW, 2, texunit, black, clamp ); } else if ( !bsprop ) { GLint uniGlowMap = prog->uniformLocations[SAMP_GLOW]; if ( uniGlowMap >= 0 && texprop ) { bool result = true; if ( texprop ) { QString fname = texprop->fileName( 0 ); if ( !fname.isEmpty() ) { int pos = fname.lastIndexOf( "_" ); if ( pos >= 0 ) fname = fname.left( pos ) + "_g.dds"; else if ( (pos = fname.lastIndexOf( "." )) >= 0 ) fname = fname.insert( pos, "_g" ); } if ( !fname.isEmpty() && (!activateTextureUnit( texunit ) || !texprop->bind( 0, fname )) ) result = false; } if ( !result ) prog->uniSamplerBlank( SAMP_GLOW, texunit ); else fn->glUniform1i( uniGlowMap, texunit++ ); } } // BSLightingShaderProperty if ( mesh->bslsp ) { prog->uni1f( LIGHT_EFF1, mesh->bslsp->getLightingEffect1() ); prog->uni1f( LIGHT_EFF2, mesh->bslsp->getLightingEffect2() ); prog->uni1f( ALPHA, mesh->bslsp->getAlpha() ); auto uvS = mesh->bslsp->getUvScale(); prog->uni2f( UV_SCALE, uvS.x, uvS.y ); auto uvO = mesh->bslsp->getUvOffset(); prog->uni2f( UV_OFFSET, uvO.x, uvO.y ); prog->uni4m( MAT_VIEW, mesh->viewTrans().toMatrix4() ); prog->uni4m( MAT_WORLD, mesh->worldTrans().toMatrix4() ); prog->uni1i( G2P_COLOR, mesh->bslsp->greyscaleColor ); prog->uniSampler( bsprop, SAMP_GRAYSCALE, 3, texunit, "", TexClampMode::MIRRORED_S_MIRRORED_T ); prog->uni1i( HAS_TINT_COLOR, mesh->bslsp->hasTintColor ); if ( mesh->bslsp->hasTintColor ) { auto tC = mesh->bslsp->getTintColor(); prog->uni3f( TINT_COLOR, tC.red(), tC.green(), tC.blue() ); } prog->uni1i( HAS_MAP_DETAIL, mesh->bslsp->hasDetailMask ); prog->uniSampler( bsprop, SAMP_DETAIL, 3, texunit, "shaders/blankdetailmap.dds", clamp ); prog->uni1i( HAS_MAP_TINT, mesh->bslsp->hasTintMask ); prog->uniSampler( bsprop, SAMP_TINT, 6, texunit, gray, clamp ); // Rim & Soft params prog->uni1i( HAS_SOFT, mesh->bslsp->hasSoftlight ); prog->uni1i( HAS_RIM, mesh->bslsp->hasRimlight ); prog->uniSampler( bsprop, SAMP_LIGHT, 2, texunit, default_n, clamp ); // Backlight params prog->uni1i( HAS_MAP_BACK, mesh->bslsp->hasBacklight ); prog->uniSampler( bsprop, SAMP_BACKLIGHT, 7, texunit, default_n, clamp ); // Glow params if ( (opts & Scene::DoGlow) && (opts & Scene::DoLighting) && mesh->bslsp->hasEmittance ) prog->uni1f( GLOW_MULT, mesh->bslsp->getEmissiveMult() ); else prog->uni1f( GLOW_MULT, 0 ); prog->uni1i( HAS_EMIT, mesh->bslsp->hasEmittance ); prog->uni1i( HAS_MAP_GLOW, mesh->bslsp->hasGlowMap ); auto emC = mesh->bslsp->getEmissiveColor(); prog->uni3f( GLOW_COLOR, emC.red(), emC.green(), emC.blue() ); // Specular params float s = ((opts & Scene::DoSpecular) && (opts & Scene::DoLighting)) ? mesh->bslsp->getSpecularStrength() : 0.0; prog->uni1f( SPEC_SCALE, s ); // Assure specular power does not break the shaders auto gloss = mesh->bslsp->getSpecularGloss(); prog->uni1f( SPEC_GLOSS, gloss ); auto spec = mesh->bslsp->getSpecularColor(); prog->uni3f( SPEC_COLOR, spec.red(), spec.green(), spec.blue() ); prog->uni1i( HAS_MAP_SPEC, mesh->bslsp->hasSpecularMap ); if ( mesh->bslsp->hasSpecularMap && (mesh->nifVersion == 130 || !mesh->bslsp->hasBacklight) ) prog->uniSampler( bsprop, SAMP_SPECULAR, 7, texunit, white, clamp ); else prog->uniSampler( bsprop, SAMP_SPECULAR, 7, texunit, black, clamp ); if ( mesh->nifVersion == 130 ) { prog->uni1i( DOUBLE_SIDE, mesh->bslsp->getIsDoubleSided() ); prog->uni1f( G2P_SCALE, mesh->bslsp->paletteScale ); prog->uni1f( SS_ROLLOFF, mesh->bslsp->getLightingEffect1() ); prog->uni1f( POW_FRESNEL, mesh->bslsp->fresnelPower ); prog->uni1f( POW_RIM, mesh->bslsp->rimPower ); prog->uni1f( POW_BACK, mesh->bslsp->backlightPower ); } // Multi-Layer prog->uniSampler( bsprop, SAMP_INNER, 6, texunit, default_n, clamp ); if ( mesh->bslsp->hasMultiLayerParallax ) { auto inS = mesh->bslsp->getInnerTextureScale(); prog->uni2f( INNER_SCALE, inS.x, inS.y ); prog->uni1f( INNER_THICK, mesh->bslsp->getInnerThickness() ); prog->uni1f( OUTER_REFR, mesh->bslsp->getOuterRefractionStrength() ); prog->uni1f( OUTER_REFL, mesh->bslsp->getOuterReflectionStrength() ); } // Environment Mapping prog->uni1i( HAS_MAP_CUBE, mesh->bslsp->hasEnvironmentMap ); prog->uni1i( HAS_MASK_ENV, mesh->bslsp->useEnvironmentMask ); float refl = 0.0; if ( mesh->bslsp->hasEnvironmentMap && (opts & Scene::DoCubeMapping) && (opts & Scene::DoLighting) ) refl = mesh->bslsp->getEnvironmentReflection(); prog->uni1f( ENV_REFLECTION, refl ); // Always bind cube regardless of shader settings GLint uniCubeMap = prog->uniformLocations[SAMP_CUBE]; if ( uniCubeMap >= 0 ) { QString fname = bsprop->fileName( 4 ); if ( fname.isEmpty() ) fname = cube; if ( !activateTextureUnit( texunit ) || !bsprop->bindCube( 4, fname ) ) if ( !activateTextureUnit( texunit ) || !bsprop->bindCube( 4, cube ) ) return false; fn->glUniform1i( uniCubeMap, texunit++ ); } // Always bind mask regardless of shader settings prog->uniSampler( bsprop, SAMP_ENV_MASK, 5, texunit, white, clamp ); // Parallax prog->uni1i( HAS_MAP_HEIGHT, mesh->bslsp->hasHeightMap ); prog->uniSampler( bsprop, SAMP_HEIGHT, 3, texunit, gray, clamp ); } // BSEffectShaderProperty if ( mesh->bsesp ) { prog->uni4m( MAT_WORLD, mesh->worldTrans().toMatrix4() ); clamp = mesh->bsesp->getClampMode(); prog->uniSampler( bsprop, SAMP_BASE, 0, texunit, white, clamp ); prog->uni1i( DOUBLE_SIDE, mesh->bsesp->getIsDoubleSided() ); auto uvS = mesh->bsesp->getUvScale(); prog->uni2f( UV_SCALE, uvS.x, uvS.y ); auto uvO = mesh->bsesp->getUvOffset(); prog->uni2f( UV_OFFSET, uvO.x, uvO.y ); prog->uni1i( HAS_MAP_BASE, mesh->bsesp->hasSourceTexture ); prog->uni1i( HAS_MAP_G2P, mesh->bsesp->hasGreyscaleMap ); prog->uni1i( G2P_ALPHA, mesh->bsesp->greyscaleAlpha ); prog->uni1i( G2P_COLOR, mesh->bsesp->greyscaleColor ); prog->uni1i( USE_FALLOFF, mesh->bsesp->useFalloff ); prog->uni1i( HAS_RGBFALL, mesh->bsesp->hasRGBFalloff ); prog->uni1i( HAS_WEAP_BLOOD, mesh->bsesp->hasWeaponBlood ); // Glow params auto emC = mesh->bsesp->getEmissiveColor(); prog->uni4f( GLOW_COLOR, emC.red(), emC.green(), emC.blue(), emC.alpha() ); prog->uni1f( GLOW_MULT, mesh->bsesp->getEmissiveMult() ); // Falloff params prog->uni4f( FALL_PARAMS, mesh->bsesp->falloff.startAngle, mesh->bsesp->falloff.stopAngle, mesh->bsesp->falloff.startOpacity, mesh->bsesp->falloff.stopOpacity ); prog->uni1f( FALL_DEPTH, mesh->bsesp->falloff.softDepth ); // BSEffectShader textures prog->uniSampler( bsprop, SAMP_GRAYSCALE, 1, texunit, "", TexClampMode::MIRRORED_S_MIRRORED_T ); if ( mesh->nifVersion == 130 ) { prog->uni1f( LIGHT_INF, mesh->bsesp->getLightingInfluence() ); prog->uni1i( HAS_MAP_NORMAL, mesh->bsesp->hasNormalMap && (opts & Scene::DoLighting) ); prog->uniSampler( bsprop, SAMP_NORMAL, 3, texunit, default_n, clamp ); prog->uni1i( HAS_MAP_CUBE, mesh->bsesp->hasEnvMap ); prog->uni1i( HAS_MASK_ENV, mesh->bsesp->hasEnvMask ); float refl = 0.0; if ( mesh->bsesp->hasEnvMap && (opts & Scene::DoCubeMapping) && (opts & Scene::DoLighting) ) refl = mesh->bsesp->getEnvironmentReflection(); prog->uni1f( ENV_REFLECTION, refl ); GLint uniCubeMap = prog->uniformLocations[SAMP_CUBE]; if ( uniCubeMap >= 0 ) { QString fname = bsprop->fileName( 2 ); if ( fname.isEmpty() ) fname = cube; if ( !activateTextureUnit( texunit ) || !bsprop->bindCube( 2, fname ) ) if ( !activateTextureUnit( texunit ) || !bsprop->bindCube( 2, cube ) ) return false; fn->glUniform1i( uniCubeMap, texunit++ ); } prog->uniSampler( bsprop, SAMP_SPECULAR, 4, texunit, white, clamp ); } } // Defaults for uniforms in older meshes if ( !mesh->bsesp && !mesh->bslsp ) { prog->uni2f( UV_SCALE, 1.0, 1.0 ); prog->uni2f( UV_OFFSET, 0.0, 0.0 ); } QMapIterator<int, Program::CoordType> itx( prog->texcoords ); while ( itx.hasNext() ) { itx.next(); if ( !activateTextureUnit( itx.key() ) ) return false; auto it = itx.value(); if ( it == Program::CT_TANGENT ) { if ( mesh->transTangents.count() ) { glEnableClientState( GL_TEXTURE_COORD_ARRAY ); glTexCoordPointer( 3, GL_FLOAT, 0, mesh->transTangents.constData() ); } else if ( mesh->tangents.count() ) { glEnableClientState( GL_TEXTURE_COORD_ARRAY ); glTexCoordPointer( 3, GL_FLOAT, 0, mesh->tangents.constData() ); } else { return false; } } else if ( it == Program::CT_BITANGENT ) { if ( mesh->transBitangents.count() ) { glEnableClientState( GL_TEXTURE_COORD_ARRAY ); glTexCoordPointer( 3, GL_FLOAT, 0, mesh->transBitangents.constData() ); } else if ( mesh->bitangents.count() ) { glEnableClientState( GL_TEXTURE_COORD_ARRAY ); glTexCoordPointer( 3, GL_FLOAT, 0, mesh->bitangents.constData() ); } else { return false; } } else if ( texprop ) { int txid = it; if ( txid < 0 ) return false; int set = texprop->coordSet( txid ); if ( set < 0 || !(set < mesh->coords.count()) || !mesh->coords[set].count() ) return false; glEnableClientState( GL_TEXTURE_COORD_ARRAY ); glTexCoordPointer( 2, GL_FLOAT, 0, mesh->coords[set].constData() ); } else if ( bsprop ) { int txid = it; if ( txid < 0 ) return false; int set = 0; if ( set < 0 || !(set < mesh->coords.count()) || !mesh->coords[set].count() ) return false; glEnableClientState( GL_TEXTURE_COORD_ARRAY ); glTexCoordPointer( 2, GL_FLOAT, 0, mesh->coords[set].constData() ); } } // setup lighting //glEnable( GL_LIGHTING ); // setup blending glProperty( props.get<AlphaProperty>() ); if ( mat && (mesh->scene->options & Scene::DoBlending) ) { static const GLenum blendMap[11] = { GL_ONE, GL_ZERO, GL_SRC_COLOR, GL_ONE_MINUS_SRC_COLOR, GL_DST_COLOR, GL_ONE_MINUS_DST_COLOR, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_DST_ALPHA, GL_ONE_MINUS_DST_ALPHA, GL_SRC_ALPHA_SATURATE }; if ( mat && mat->bAlphaBlend ) { glDisable( GL_POLYGON_OFFSET_FILL ); glEnable( GL_BLEND ); glBlendFunc( blendMap[mat->iAlphaSrc], blendMap[mat->iAlphaDst] ); } else { glDisable( GL_BLEND ); } if ( mat && mat->bAlphaTest ) { glDisable( GL_POLYGON_OFFSET_FILL ); glEnable( GL_ALPHA_TEST ); glAlphaFunc( GL_GREATER, float( mat->iAlphaTestRef ) / 255.0 ); } else { glDisable( GL_ALPHA_TEST ); } if ( mat && mat->bDecal ) { glEnable( GL_POLYGON_OFFSET_FILL ); glPolygonOffset( -1.0f, -1.0f ); } } // BSESP/BSLSP do not always need an NiAlphaProperty, and appear to override it at times if ( !mat && mesh->translucent ) { glEnable( GL_BLEND ); glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA ); // If mesh is alpha tested, override threshold glAlphaFunc( GL_GREATER, 0.1f ); } glDisable( GL_COLOR_MATERIAL ); if ( mesh->nifVersion < 83 ) { // setup vertex colors //glProperty( props.get< VertexColorProperty >(), glIsEnabled( GL_COLOR_ARRAY ) ); // setup material glProperty( props.get<MaterialProperty>(), props.get<SpecularProperty>() ); // setup z buffer glProperty( props.get<ZBufferProperty>() ); // setup stencil glProperty( props.get<StencilProperty>() ); // wireframe ? glProperty( props.get<WireframeProperty>() ); } else { glEnable( GL_DEPTH_TEST ); glDepthMask( GL_TRUE ); glDepthFunc( GL_LEQUAL ); glEnable( GL_CULL_FACE ); glCullFace( GL_BACK ); glPolygonMode( GL_FRONT_AND_BACK, GL_FILL ); } if ( !mesh->depthTest ) { glDisable( GL_DEPTH_TEST ); } if ( !mesh->depthWrite || mesh->translucent ) { glDepthMask( GL_FALSE ); } return true; } void Renderer::setupFixedFunction( Shape * mesh, const PropertyList & props ) { // setup lighting glEnable( GL_LIGHTING ); // Disable specular because it washes out vertex colors // at perpendicular viewing angles float color[4] = { 0, 0, 0, 0 }; glMaterialfv( GL_FRONT_AND_BACK, GL_SPECULAR, color ); glLightfv( GL_LIGHT0, GL_SPECULAR, color ); // setup blending glProperty( props.get<AlphaProperty>() ); // setup vertex colors glProperty( props.get<VertexColorProperty>(), glIsEnabled( GL_COLOR_ARRAY ) ); // setup material glProperty( props.get<MaterialProperty>(), props.get<SpecularProperty>() ); // setup texturing //glProperty( props.get< TexturingProperty >() ); // setup z buffer glProperty( props.get<ZBufferProperty>() ); if ( !mesh->depthTest ) { glDisable( GL_DEPTH_TEST ); } if ( !mesh->depthWrite ) { glDepthMask( GL_FALSE ); } // setup stencil glProperty( props.get<StencilProperty>() ); // wireframe ? glProperty( props.get<WireframeProperty>() ); // normalize if ( glIsEnabled( GL_NORMAL_ARRAY ) ) glEnable( GL_NORMALIZE ); else glDisable( GL_NORMALIZE ); // setup texturing if ( !(mesh->scene->options & Scene::DoTexturing) ) return; if ( TexturingProperty * texprop = props.get<TexturingProperty>() ) { // standard multi texturing property int stage = 0; if ( texprop->bind( 1, mesh->coords, stage ) ) { // dark stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_ALPHA, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } if ( texprop->bind( 0, mesh->coords, stage ) ) { // base stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_ALPHA, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } if ( texprop->bind( 2, mesh->coords, stage ) ) { // detail stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_ALPHA, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 2.0 ); } if ( texprop->bind( 6, mesh->coords, stage ) ) { // decal 0 stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_INTERPOLATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_REPLACE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } if ( texprop->bind( 7, mesh->coords, stage ) ) { // decal 1 stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_INTERPOLATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_REPLACE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } if ( texprop->bind( 8, mesh->coords, stage ) ) { // decal 2 stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_INTERPOLATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_REPLACE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } if ( texprop->bind( 9, mesh->coords, stage ) ) { // decal 3 stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_INTERPOLATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_REPLACE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } if ( texprop->bind( 4, mesh->coords, stage ) ) { // glow stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_ADD ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_REPLACE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } } else if ( TextureProperty * texprop = props.get<TextureProperty>() ) { // old single texture property texprop->bind( mesh->coords ); } else if ( BSShaderLightingProperty * texprop = props.get<BSShaderLightingProperty>() ) { // standard multi texturing property int stage = 0; if ( texprop->bind( 0, mesh->coords ) ) { //, mesh->coords, stage ) ) // base stage++; glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_RGB, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_RGB, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR ); glTexEnvi( GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_MODULATE ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, GL_PREVIOUS ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA ); glTexEnvi( GL_TEXTURE_ENV, GL_SOURCE1_ALPHA, GL_TEXTURE ); glTexEnvi( GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, GL_SRC_ALPHA ); glTexEnvf( GL_TEXTURE_ENV, GL_RGB_SCALE, 1.0 ); } } else { glDisable( GL_TEXTURE_2D ); } }
{ "pile_set_name": "Github" }
# -*- coding: utf-8 -*- ''' Created by auto_sdk on 2017.09.05 http://open.taobao.com/api.htm?docId=24518&docType=2 ''' from top.api.base import RestApi class TbkItemInfoGetRequest(RestApi): def __init__(self, domain='gw.api.taobao.com', port=80): RestApi.__init__(self, domain, port) self.fields = None self.num_iids = None self.platform = None def getapiname(self): return 'taobao.tbk.item.info.get'
{ "pile_set_name": "Github" }
/* * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. * Copyright (C) 2008, 2009 Anthony Ricaud <[email protected]> * Copyright (C) 2009 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @constructor */ WebInspector.TimelineGrid = function() { this.element = document.createElement("div"); this._itemsGraphsElement = document.createElement("div"); this._itemsGraphsElement.id = "resources-graphs"; this.element.appendChild(this._itemsGraphsElement); this._dividersElement = this.element.createChild("div", "resources-dividers"); this._gridHeaderElement = document.createElement("div"); this._eventDividersElement = this._gridHeaderElement.createChild("div", "resources-event-dividers"); this._dividersLabelBarElement = this._gridHeaderElement.createChild("div", "resources-dividers-label-bar"); this.element.appendChild(this._gridHeaderElement); this._leftCurtainElement = this.element.createChild("div", "timeline-cpu-curtain-left"); this._rightCurtainElement = this.element.createChild("div", "timeline-cpu-curtain-right"); this._gridSliceTime = 1; } WebInspector.TimelineGrid.prototype = { get itemsGraphsElement() { return this._itemsGraphsElement; }, get dividersElement() { return this._dividersElement; }, get dividersLabelBarElement() { return this._dividersLabelBarElement; }, get gridHeaderElement() { return this._gridHeaderElement; }, get gridSliceTime() { return this._gridSliceTime; }, removeDividers: function() { this._dividersElement.removeChildren(); this._dividersLabelBarElement.removeChildren(); }, updateDividers: function(calculator) { const minGridSlicePx = 64; // minimal distance between grid lines. const gridFreeZoneAtLeftPx = 50; var dividersElementClientWidth = this._dividersElement.clientWidth; var dividersCount = dividersElementClientWidth / minGridSlicePx; var gridSliceTime = calculator.boundarySpan() / dividersCount; var pixelsPerTime = dividersElementClientWidth / calculator.boundarySpan(); // Align gridSliceTime to a nearest round value. // We allow spans that fit into the formula: span = (1|2|5)x10^n, // e.g.: ... .1 .2 .5 1 2 5 10 20 50 ... // After a span has been chosen make grid lines at multiples of the span. var logGridSliceTime = Math.ceil(Math.log(gridSliceTime) / Math.LN10); gridSliceTime = Math.pow(10, logGridSliceTime); if (gridSliceTime * pixelsPerTime >= 5 * minGridSlicePx) gridSliceTime = gridSliceTime / 5; if (gridSliceTime * pixelsPerTime >= 2 * minGridSlicePx) gridSliceTime = gridSliceTime / 2; this._gridSliceTime = gridSliceTime; var firstDividerTime = Math.ceil((calculator.minimumBoundary() - calculator.zeroTime()) / gridSliceTime) * gridSliceTime + calculator.zeroTime(); var lastDividerTime = calculator.maximumBoundary(); // Add some extra space past the right boundary as the rightmost divider label text // may be partially shown rather than just pop up when a new rightmost divider gets into the view. if (calculator.paddingLeft > 0) lastDividerTime = lastDividerTime + minGridSlicePx / pixelsPerTime; dividersCount = Math.ceil((lastDividerTime - firstDividerTime) / gridSliceTime); // Reuse divider elements and labels. var divider = this._dividersElement.firstChild; var dividerLabelBar = this._dividersLabelBarElement.firstChild; var skipLeftmostDividers = calculator.paddingLeft === 0; if (!gridSliceTime) dividersCount = 0; for (var i = 0; i < dividersCount; ++i) { var left = calculator.computePosition(firstDividerTime + gridSliceTime * i); if (skipLeftmostDividers && left < gridFreeZoneAtLeftPx) continue; if (!divider) { divider = document.createElement("div"); divider.className = "resources-divider"; this._dividersElement.appendChild(divider); dividerLabelBar = document.createElement("div"); dividerLabelBar.className = "resources-divider"; var label = document.createElement("div"); label.className = "resources-divider-label"; dividerLabelBar._labelElement = label; dividerLabelBar.appendChild(label); this._dividersLabelBarElement.appendChild(dividerLabelBar); } dividerLabelBar._labelElement.textContent = calculator.formatTime(firstDividerTime + gridSliceTime * i - calculator.minimumBoundary()); var percentLeft = 100 * left / dividersElementClientWidth; divider.style.left = percentLeft + "%"; dividerLabelBar.style.left = percentLeft + "%"; divider = divider.nextSibling; dividerLabelBar = dividerLabelBar.nextSibling; } // Remove extras. while (divider) { var nextDivider = divider.nextSibling; this._dividersElement.removeChild(divider); divider = nextDivider; } while (dividerLabelBar) { var nextDivider = dividerLabelBar.nextSibling; this._dividersLabelBarElement.removeChild(dividerLabelBar); dividerLabelBar = nextDivider; } return true; }, addEventDivider: function(divider) { this._eventDividersElement.appendChild(divider); }, addEventDividers: function(dividers) { this._gridHeaderElement.removeChild(this._eventDividersElement); for (var i = 0; i < dividers.length; ++i) { if (dividers[i]) this._eventDividersElement.appendChild(dividers[i]); } this._gridHeaderElement.appendChild(this._eventDividersElement); }, removeEventDividers: function() { this._eventDividersElement.removeChildren(); }, hideEventDividers: function() { this._eventDividersElement.classList.add("hidden"); }, showEventDividers: function() { this._eventDividersElement.classList.remove("hidden"); }, hideCurtains: function() { this._leftCurtainElement.classList.add("hidden"); this._rightCurtainElement.classList.add("hidden"); }, /** * @param {number} gapOffset * @param {number} gapWidth */ showCurtains: function(gapOffset, gapWidth) { this._leftCurtainElement.style.width = gapOffset + "px"; this._leftCurtainElement.classList.remove("hidden"); this._rightCurtainElement.style.left = (gapOffset + gapWidth) + "px"; this._rightCurtainElement.classList.remove("hidden"); }, setScrollAndDividerTop: function(scrollTop, dividersTop) { this._dividersElement.style.top = scrollTop + "px"; this._leftCurtainElement.style.top = scrollTop + "px"; this._rightCurtainElement.style.top = scrollTop + "px"; } } /** * @interface */ WebInspector.TimelineGrid.Calculator = function() { } WebInspector.TimelineGrid.Calculator.prototype = { /** * @param {number} time * @return {number} */ computePosition: function(time) { return 0; }, /** * @param {number} time * @param {boolean=} hires * @return {string} */ formatTime: function(time, hires) { }, /** @return {number} */ minimumBoundary: function() { }, /** @return {number} */ zeroTime: function() { }, /** @return {number} */ maximumBoundary: function() { }, /** @return {number} */ boundarySpan: function() { } }
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using System.Text; namespace NHibernate.Test.NHSpecificTest.NH1579 { public class Orange : Fruit { public Orange(Entity container) : base(container) { } protected Orange() { } } }
{ "pile_set_name": "Github" }
$NetBSD: distinfo,v 1.2 2020/04/26 12:21:16 markd Exp $ SHA1 (mathalpha.r52305.tar.xz) = 8ae17c8cf01e21234519d2a947ef4af83ba54b30 RMD160 (mathalpha.r52305.tar.xz) = 891edb22fd48f0fcffb15ee01f53f9315acc4568 SHA512 (mathalpha.r52305.tar.xz) = c6b653e5a0b7788ac36ea9ef6ce07dd4fd19a75a82c4115cac5921849477839387b0cccac469dd74b9f4221315ca741c49304eb76213ecacb97dc7e218ac4cc9 Size (mathalpha.r52305.tar.xz) = 4368 bytes
{ "pile_set_name": "Github" }
const path = require('path') const fs = require('fs-extra') const build = require('../build') const cheerio = require('cheerio') const express = require('express') const puppeteer = require('puppeteer') const context = path.join(__dirname, '__fixtures__', 'project-blog') const content = file => fs.readFileSync(path.join(context, file), 'utf8') const exists = file => fs.existsSync(path.join(context, file)) const load = file => cheerio.load(content(file)) const app = express() let browser, page, server beforeAll(async () => { await build(context) app.use(express.static(path.join(context, 'dist'))) browser = await puppeteer.launch({ headless: false }) page = await browser.newPage() server = app.listen(8080) }, 20000) afterAll(async () => { server && await server.close() browser && await browser.close() await fs.remove(path.join(context, 'dist')) await fs.remove(path.join(context, 'src', '.temp')) await fs.remove(path.join(context, 'node_modules', '.cache')) await fs.remove(path.join(context, '.cache')) }) test('render pagination', () => { const $blog = load('dist/index.html') const $blog2 = load('dist/2/index.html') expect($blog('.current-page').text()).toEqual('1') expect($blog2('.current-page').text()).toEqual('2') expect($blog('nav[role="navigation"] a').get().length).toEqual(3) expect($blog('a.active--exact.active').attr('href')).toEqual('/') expect($blog('a.active--exact.active').attr('aria-current')).toEqual('true') expect($blog('.post-4 span').text()).toEqual('Fourth post') expect($blog('.post-4 a').attr('href')).toEqual('/fourth-post/') expect($blog('.post-3 span').text()).toEqual('Third post') expect($blog('.post-3 a').attr('href')).toEqual('/third-post/') expect($blog2('.post-2 span').text()).toEqual('Second post') expect($blog2('.post-2 a').attr('href')).toEqual('/second-post/') expect($blog2('.post-1 span').text()).toEqual('First post') expect($blog2('.post-1 a').attr('href')).toEqual('/first-post/') expect(exists('dist/category/first/3/index.html')).toBeFalsy() expect(exists('dist/3/index.html')).toBeFalsy() }) test('render templates', () => { const $post1 = load('dist/first-post/index.html') const $post2 = load('dist/second-post/index.html') const $post3 = load('dist/third-post/index.html') expect($post1('.post-title').text()).toEqual('First post') expect($post1('.post-date').text()).toEqual('2017') expect($post2('.post-title').text()).toEqual('Second post') expect($post2('.post-date').text()).toEqual('2018-03') expect($post3('.post-title').text()).toEqual('Third post') expect($post3('.post-date').text()).toEqual('2018-11-12') }) test('render belongsTo with pagination', () => { const $tag1 = load('dist/tag/first-tag/index.html') const $tag2 = load('dist/tag/second-tag/index.html') const $tag3 = load('dist/tag/third-tag/index.html') const $tag4 = load('dist/tag/fourth-tag/index.html') const $tag4page2 = load('dist/tag/fourth-tag/2/index.html') const $category1 = load('dist/category/first/index.html') const $category1page2 = load('dist/category/first/2/index.html') expect($tag1('.post-3 a').text()).toEqual('Third post') expect($tag1('.post-2 a').text()).toEqual('Second post') expect($tag2('.post-2 a').text()).toEqual('Second post') expect($tag2('.post-1 a').text()).toEqual('First post') expect($tag3('.post-1 a').text()).toEqual('First post') expect($tag3('.post-3 a').text()).toEqual('Third post') expect($tag4('.post-3 a').text()).toEqual('Third post') expect($tag4('.post-2 a').text()).toEqual('Second post') expect($tag4('nav[role="navigation"] a[href="/tag/fourth-tag/"]').attr('aria-label')).toEqual('Current page. Page 1') expect($tag4page2('.post-1 a').text()).toEqual('First post') expect($tag4page2('nav[role="navigation"] a[href="/tag/fourth-tag/2/"]').attr('aria-label')).toEqual('Current page. Page 2') expect($category1('.post-3 a').text()).toEqual('Third post') expect($category1('.post-2 a').text()).toEqual('Second post') expect($category1('nav[role="navigation"] a[href="/category/first/"]').attr('aria-label')).toEqual('Current page. Page 1') expect($category1page2('.post-1 a').text()).toEqual('First post') expect($category1page2('nav[role="navigation"] a[href="/category/first/2/"]').attr('aria-label')).toEqual('Current page. Page 2') }) test('open blog in browser', async () => { await page.goto('http://localhost:8080/', { waitUntil: 'networkidle2' }) await page.waitForSelector('#app.is-mounted') }) test('navigate to /2', async () => { await page.click('nav[role="navigation"] .pager-link.active + .pager-link') await page.waitForSelector('#app.home-2') }) test('navigate to /first-post', async () => { await page.click('.post-link-1') await page.waitForSelector('#app.post-1') }) test('navigate to /', async () => { await page.click('.home-link') await page.waitForSelector('#app.home-1') }) test('navigate to /third-post', async () => { await page.click('.post-link-3') await page.waitForSelector('#app.post-3') }) test('navigate to /tag/fourth-tag', async () => { await page.click('.tag-link-4') await page.waitForSelector('#app.tag-4') }) test('navigate to /tag/fourth-tag/2', async () => { await page.click('nav[role="navigation"] a.active + a') await page.waitForSelector('#app.tag-4.tag-page-2') }) test('navigate to /tag/4/extra', async () => { await page.click('.tag-extra-link') await page.waitForSelector('#app.tag-4.tag-page-1') }) test('navigate to /tag/4/extra/2', async () => { await page.click('nav[role="navigation"] a.active + a') await page.waitForSelector('#app.tag-4.tag-page-2') }) test('navigate to /first-post', async () => { await page.click('.post-link-1') await page.waitForSelector('#app.post-1') }) test('navigate to /category/first', async () => { await page.click('.category-link-1') await page.waitForSelector('#app.category-1') }) test('navigate to /category/first/2', async () => { await page.click('nav[role="navigation"] a.active + a') await page.waitForSelector('#app.category-1.category-page-2') }) test('navigate to /first-post', async () => { await page.click('.post-link-1') await page.waitForSelector('#app.post-1') }) test('navigate to /', async () => { await page.click('.home-link') await page.waitForSelector('#app.home-1') }) test('navigate to /asdf', async () => { await page.click('.not-found-link') await page.waitForSelector('#app.not-found') }) test('navigate to /', async () => { await page.click('.home-link') await page.waitForSelector('#app.home-1') }) test('open /2/ directly', async () => { await page.goto('http://localhost:8080/2/', { waitUntil: 'networkidle2' }) await page.waitForSelector('#app.is-mounted') }) test('open /category/first/ directly', async () => { await page.goto('http://localhost:8080/category/first/', { waitUntil: 'networkidle2' }) await page.waitForSelector('#app.is-mounted') }) test('open /first-post/ directly', async () => { await page.goto('http://localhost:8080/first-post/', { waitUntil: 'networkidle2' }) await page.waitForSelector('#app.is-mounted') })
{ "pile_set_name": "Github" }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( "strings" "sigs.k8s.io/kustomize/pkg/gvk" ) // NameBackReferences is an association between a gvk.GVK and a list // of FieldSpec instances that could refer to it. // // It is used to handle name changes, and can be thought of as a // a contact list. If you change your own contact info (name, // phone number, etc.), you must tell your contacts or they won't // know about the change. // // For example, ConfigMaps can be used by Pods and everything that // contains a Pod; Deployment, Job, StatefulSet, etc. To change // the name of a ConfigMap instance from 'alice' to 'bob', one // must visit all objects that could refer to the ConfigMap, see if // they mention 'alice', and if so, change the reference to 'bob'. // // The NameBackReferences instance to aid in this could look like // { // kind: ConfigMap // version: v1 // FieldSpecs: // - kind: Pod // version: v1 // path: spec/volumes/configMap/name // - kind: Deployment // path: spec/template/spec/volumes/configMap/name // - kind: Job // path: spec/template/spec/volumes/configMap/name // (etc.) // } type NameBackReferences struct { gvk.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` FieldSpecs fsSlice `json:"FieldSpecs,omitempty" yaml:"FieldSpecs,omitempty"` } func (n NameBackReferences) String() string { var r []string for _, f := range n.FieldSpecs { r = append(r, f.String()) } return n.Gvk.String() + ": (\n" + strings.Join(r, "\n") + "\n)" } type nbrSlice []NameBackReferences func (s nbrSlice) Len() int { return len(s) } func (s nbrSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nbrSlice) Less(i, j int) bool { return s[i].Gvk.IsLessThan(s[j].Gvk) } func (s nbrSlice) mergeAll(o nbrSlice) (result nbrSlice, err error) { result = s for _, r := range o { result, err = result.mergeOne(r) if err != nil { return nil, err } } return result, nil } func (s nbrSlice) mergeOne(other NameBackReferences) (nbrSlice, error) { var result nbrSlice var err error found := false for _, c := range s { if c.Gvk.Equals(other.Gvk) { c.FieldSpecs, err = c.FieldSpecs.mergeAll(other.FieldSpecs) if err != nil { return nil, err } found = true } result = append(result, c) } if !found { result = append(result, other) } return result, nil }
{ "pile_set_name": "Github" }
/* * GlusterFS backend for QEMU * * Copyright (C) 2012 Bharata B Rao <[email protected]> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include <glusterfs/api/glfs.h> #include "block/block_int.h" #include "qemu/uri.h" typedef struct GlusterAIOCB { int64_t size; int ret; QEMUBH *bh; Coroutine *coroutine; AioContext *aio_context; } GlusterAIOCB; typedef struct BDRVGlusterState { struct glfs *glfs; struct glfs_fd *fd; } BDRVGlusterState; typedef struct GlusterConf { char *server; int port; char *volname; char *image; char *transport; } GlusterConf; static void qemu_gluster_gconf_free(GlusterConf *gconf) { if (gconf) { g_free(gconf->server); g_free(gconf->volname); g_free(gconf->image); g_free(gconf->transport); g_free(gconf); } } static int parse_volume_options(GlusterConf *gconf, char *path) { char *p, *q; if (!path) { return -EINVAL; } /* volume */ p = q = path + strspn(path, "/"); p += strcspn(p, "/"); if (*p == '\0') { return -EINVAL; } gconf->volname = g_strndup(q, p - q); /* image */ p += strspn(p, "/"); if (*p == '\0') { return -EINVAL; } gconf->image = g_strdup(p); return 0; } /* * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] * * 'gluster' is the protocol. * * 'transport' specifies the transport type used to connect to gluster * management daemon (glusterd). Valid transport types are * tcp, unix and rdma. If a transport type isn't specified, then tcp * type is assumed. * * 'server' specifies the server where the volume file specification for * the given volume resides. This can be either hostname, ipv4 address * or ipv6 address. ipv6 address needs to be within square brackets [ ]. * If transport type is 'unix', then 'server' field should not be specified. * The 'socket' field needs to be populated with the path to unix domain * socket. * * 'port' is the port number on which glusterd is listening. This is optional * and if not specified, QEMU will send 0 which will make gluster to use the * default port. If the transport type is unix, then 'port' should not be * specified. * * 'volname' is the name of the gluster volume which contains the VM image. * * 'image' is the path to the actual VM image that resides on gluster volume. * * Examples: * * file=gluster://1.2.3.4/testvol/a.img * file=gluster+tcp://1.2.3.4/testvol/a.img * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket * file=gluster+rdma://1.2.3.4:24007/testvol/a.img */ static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) { URI *uri; QueryParams *qp = NULL; bool is_unix = false; int ret = 0; uri = uri_parse(filename); if (!uri) { return -EINVAL; } /* transport */ if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { gconf->transport = g_strdup("tcp"); } else if (!strcmp(uri->scheme, "gluster+tcp")) { gconf->transport = g_strdup("tcp"); } else if (!strcmp(uri->scheme, "gluster+unix")) { gconf->transport = g_strdup("unix"); is_unix = true; } else if (!strcmp(uri->scheme, "gluster+rdma")) { gconf->transport = g_strdup("rdma"); } else { ret = -EINVAL; goto out; } ret = parse_volume_options(gconf, uri->path); if (ret < 0) { goto out; } qp = query_params_parse(uri->query); if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { ret = -EINVAL; goto out; } if (is_unix) { if (uri->server || uri->port) { ret = -EINVAL; goto out; } if (strcmp(qp->p[0].name, "socket")) { ret = -EINVAL; goto out; } gconf->server = g_strdup(qp->p[0].value); } else { gconf->server = g_strdup(uri->server ? uri->server : "localhost"); gconf->port = uri->port; } out: if (qp) { query_params_free(qp); } uri_free(uri); return ret; } static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, Error **errp) { struct glfs *glfs = NULL; int ret; int old_errno; ret = qemu_gluster_parseuri(gconf, filename); if (ret < 0) { error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/" "volname/image[?socket=...]"); errno = -ret; goto out; } glfs = glfs_new(gconf->volname); if (!glfs) { goto out; } ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, gconf->port); if (ret < 0) { goto out; } /* * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when * GlusterFS makes GF_LOG_* macros available to libgfapi users. */ ret = glfs_set_logging(glfs, "-", 4); if (ret < 0) { goto out; } ret = glfs_init(glfs); if (ret) { error_setg_errno(errp, errno, "Gluster connection failed for server=%s port=%d " "volume=%s image=%s transport=%s", gconf->server, gconf->port, gconf->volname, gconf->image, gconf->transport); /* glfs_init sometimes doesn't set errno although docs suggest that */ if (errno == 0) errno = EINVAL; goto out; } return glfs; out: if (glfs) { old_errno = errno; glfs_fini(glfs); errno = old_errno; } return NULL; } static void qemu_gluster_complete_aio(void *opaque) { GlusterAIOCB *acb = (GlusterAIOCB *)opaque; qemu_bh_delete(acb->bh); acb->bh = NULL; qemu_coroutine_enter(acb->coroutine, NULL); } /* * AIO callback routine called from GlusterFS thread. */ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) { GlusterAIOCB *acb = (GlusterAIOCB *)arg; if (!ret || ret == acb->size) { acb->ret = 0; /* Success */ } else if (ret < 0) { acb->ret = ret; /* Read/Write failed */ } else { acb->ret = -EIO; /* Partial read/write - fail it */ } acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb); qemu_bh_schedule(acb->bh); } /* TODO Convert to fine grained options */ static QemuOptsList runtime_opts = { .name = "gluster", .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), .desc = { { .name = "filename", .type = QEMU_OPT_STRING, .help = "URL to the gluster image", }, { /* end of list */ } }, }; static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) { assert(open_flags != NULL); *open_flags |= O_BINARY; if (bdrv_flags & BDRV_O_RDWR) { *open_flags |= O_RDWR; } else { *open_flags |= O_RDONLY; } if ((bdrv_flags & BDRV_O_NOCACHE)) { *open_flags |= O_DIRECT; } } static int qemu_gluster_open(BlockDriverState *bs, QDict *options, int bdrv_flags, Error **errp) { BDRVGlusterState *s = bs->opaque; int open_flags = 0; int ret = 0; GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); QemuOpts *opts; Error *local_err = NULL; const char *filename; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto out; } filename = qemu_opt_get(opts, "filename"); s->glfs = qemu_gluster_init(gconf, filename, errp); if (!s->glfs) { ret = -errno; goto out; } qemu_gluster_parse_flags(bdrv_flags, &open_flags); s->fd = glfs_open(s->glfs, gconf->image, open_flags); if (!s->fd) { ret = -errno; } out: qemu_opts_del(opts); qemu_gluster_gconf_free(gconf); if (!ret) { return ret; } if (s->fd) { glfs_close(s->fd); } if (s->glfs) { glfs_fini(s->glfs); } return ret; } typedef struct BDRVGlusterReopenState { struct glfs *glfs; struct glfs_fd *fd; } BDRVGlusterReopenState; static int qemu_gluster_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { int ret = 0; BDRVGlusterReopenState *reop_s; GlusterConf *gconf = NULL; int open_flags = 0; assert(state != NULL); assert(state->bs != NULL); state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState)); reop_s = state->opaque; qemu_gluster_parse_flags(state->flags, &open_flags); gconf = g_malloc0(sizeof(GlusterConf)); reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); if (reop_s->glfs == NULL) { ret = -errno; goto exit; } reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags); if (reop_s->fd == NULL) { /* reops->glfs will be cleaned up in _abort */ ret = -errno; goto exit; } exit: /* state->opaque will be freed in either the _abort or _commit */ qemu_gluster_gconf_free(gconf); return ret; } static void qemu_gluster_reopen_commit(BDRVReopenState *state) { BDRVGlusterReopenState *reop_s = state->opaque; BDRVGlusterState *s = state->bs->opaque; /* close the old */ if (s->fd) { glfs_close(s->fd); } if (s->glfs) { glfs_fini(s->glfs); } /* use the newly opened image / connection */ s->fd = reop_s->fd; s->glfs = reop_s->glfs; g_free(state->opaque); state->opaque = NULL; return; } static void qemu_gluster_reopen_abort(BDRVReopenState *state) { BDRVGlusterReopenState *reop_s = state->opaque; if (reop_s == NULL) { return; } if (reop_s->fd) { glfs_close(reop_s->fd); } if (reop_s->glfs) { glfs_fini(reop_s->glfs); } g_free(state->opaque); state->opaque = NULL; return; } #ifdef CONFIG_GLUSTERFS_ZEROFILL static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; off_t size = nb_sectors * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE; acb->size = size; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); acb->aio_context = bdrv_get_aio_context(bs); ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb); if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; } static inline bool gluster_supports_zerofill(void) { return 1; } static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, int64_t size) { return glfs_zerofill(fd, offset, size); } #else static inline bool gluster_supports_zerofill(void) { return 0; } static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, int64_t size) { return 0; } #endif static int qemu_gluster_create(const char *filename, QemuOpts *opts, Error **errp) { struct glfs *glfs; struct glfs_fd *fd; int ret = 0; int prealloc = 0; int64_t total_size = 0; char *tmp = NULL; GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); glfs = qemu_gluster_init(gconf, filename, errp); if (!glfs) { ret = -errno; goto out; } total_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0) / BDRV_SECTOR_SIZE; tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); if (!tmp || !strcmp(tmp, "off")) { prealloc = 0; } else if (!strcmp(tmp, "full") && gluster_supports_zerofill()) { prealloc = 1; } else { error_setg(errp, "Invalid preallocation mode: '%s'" " or GlusterFS doesn't support zerofill API", tmp); ret = -EINVAL; goto out; } fd = glfs_creat(glfs, gconf->image, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); if (!fd) { ret = -errno; } else { if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) { if (prealloc && qemu_gluster_zerofill(fd, 0, total_size * BDRV_SECTOR_SIZE)) { ret = -errno; } } else { ret = -errno; } if (glfs_close(fd) != 0) { ret = -errno; } } out: g_free(tmp); qemu_gluster_gconf_free(gconf); if (glfs) { glfs_fini(glfs); } return ret; } static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; size_t size = nb_sectors * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE; acb->size = size; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); acb->aio_context = bdrv_get_aio_context(bs); if (write) { ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } else { ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; } static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) { int ret; BDRVGlusterState *s = bs->opaque; ret = glfs_ftruncate(s->fd, offset); if (ret < 0) { return -errno; } return 0; } static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); } static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); } static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; acb->size = 0; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); acb->aio_context = bdrv_get_aio_context(bs); ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; } #ifdef CONFIG_GLUSTERFS_DISCARD static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; size_t size = nb_sectors * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE; acb->size = 0; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); acb->aio_context = bdrv_get_aio_context(bs); ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; } #endif static int64_t qemu_gluster_getlength(BlockDriverState *bs) { BDRVGlusterState *s = bs->opaque; int64_t ret; ret = glfs_lseek(s->fd, 0, SEEK_END); if (ret < 0) { return -errno; } else { return ret; } } static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) { BDRVGlusterState *s = bs->opaque; struct stat st; int ret; ret = glfs_fstat(s->fd, &st); if (ret < 0) { return -errno; } else { return st.st_blocks * 512; } } static void qemu_gluster_close(BlockDriverState *bs) { BDRVGlusterState *s = bs->opaque; if (s->fd) { glfs_close(s->fd); s->fd = NULL; } glfs_fini(s->glfs); } static int qemu_gluster_has_zero_init(BlockDriverState *bs) { /* GlusterFS volume could be backed by a block device */ return 0; } static QemuOptsList qemu_gluster_create_opts = { .name = "qemu-gluster-create-opts", .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), .desc = { { .name = BLOCK_OPT_SIZE, .type = QEMU_OPT_SIZE, .help = "Virtual disk size" }, { .name = BLOCK_OPT_PREALLOC, .type = QEMU_OPT_STRING, .help = "Preallocation mode (allowed values: off, full)" }, { /* end of list */ } } }; static BlockDriver bdrv_gluster = { .format_name = "gluster", .protocol_name = "gluster", .instance_size = sizeof(BDRVGlusterState), .bdrv_needs_filename = true, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_abort = qemu_gluster_reopen_abort, .bdrv_close = qemu_gluster_close, .bdrv_create = qemu_gluster_create, .bdrv_getlength = qemu_gluster_getlength, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_truncate = qemu_gluster_truncate, .bdrv_co_readv = qemu_gluster_co_readv, .bdrv_co_writev = qemu_gluster_co_writev, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, .bdrv_has_zero_init = qemu_gluster_has_zero_init, #ifdef CONFIG_GLUSTERFS_DISCARD .bdrv_co_discard = qemu_gluster_co_discard, #endif #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, #endif .create_opts = &qemu_gluster_create_opts, }; static BlockDriver bdrv_gluster_tcp = { .format_name = "gluster", .protocol_name = "gluster+tcp", .instance_size = sizeof(BDRVGlusterState), .bdrv_needs_filename = true, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_abort = qemu_gluster_reopen_abort, .bdrv_close = qemu_gluster_close, .bdrv_create = qemu_gluster_create, .bdrv_getlength = qemu_gluster_getlength, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_truncate = qemu_gluster_truncate, .bdrv_co_readv = qemu_gluster_co_readv, .bdrv_co_writev = qemu_gluster_co_writev, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, .bdrv_has_zero_init = qemu_gluster_has_zero_init, #ifdef CONFIG_GLUSTERFS_DISCARD .bdrv_co_discard = qemu_gluster_co_discard, #endif #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, #endif .create_opts = &qemu_gluster_create_opts, }; static BlockDriver bdrv_gluster_unix = { .format_name = "gluster", .protocol_name = "gluster+unix", .instance_size = sizeof(BDRVGlusterState), .bdrv_needs_filename = true, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_abort = qemu_gluster_reopen_abort, .bdrv_close = qemu_gluster_close, .bdrv_create = qemu_gluster_create, .bdrv_getlength = qemu_gluster_getlength, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_truncate = qemu_gluster_truncate, .bdrv_co_readv = qemu_gluster_co_readv, .bdrv_co_writev = qemu_gluster_co_writev, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, .bdrv_has_zero_init = qemu_gluster_has_zero_init, #ifdef CONFIG_GLUSTERFS_DISCARD .bdrv_co_discard = qemu_gluster_co_discard, #endif #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, #endif .create_opts = &qemu_gluster_create_opts, }; static BlockDriver bdrv_gluster_rdma = { .format_name = "gluster", .protocol_name = "gluster+rdma", .instance_size = sizeof(BDRVGlusterState), .bdrv_needs_filename = true, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_abort = qemu_gluster_reopen_abort, .bdrv_close = qemu_gluster_close, .bdrv_create = qemu_gluster_create, .bdrv_getlength = qemu_gluster_getlength, .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, .bdrv_truncate = qemu_gluster_truncate, .bdrv_co_readv = qemu_gluster_co_readv, .bdrv_co_writev = qemu_gluster_co_writev, .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, .bdrv_has_zero_init = qemu_gluster_has_zero_init, #ifdef CONFIG_GLUSTERFS_DISCARD .bdrv_co_discard = qemu_gluster_co_discard, #endif #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, #endif .create_opts = &qemu_gluster_create_opts, }; static void bdrv_gluster_init(void) { bdrv_register(&bdrv_gluster_rdma); bdrv_register(&bdrv_gluster_unix); bdrv_register(&bdrv_gluster_tcp); bdrv_register(&bdrv_gluster); } block_init(bdrv_gluster_init);
{ "pile_set_name": "Github" }
Pod::Spec.new do |s| s.name = 'DFCache' s.version = '4.0.2' s.license = 'MIT' s.homepage = 'https://github.com/kean/DFCache' s.authors = 'Alexander Grebenyuk' s.summary = 'Composite cache with LRU cleanup. Fast metadata on top of UNIX extended file attributes. Thoroughly tested and well-documented.' s.ios.deployment_target = '6.0' s.osx.deployment_target = '10.8' s.watchos.deployment_target = '2.0' s.tvos.deployment_target = '9.0' s.requires_arc = true s.source = { :git => 'https://github.com/kean/DFCache.git', :tag => s.version.to_s } s.public_header_files = 'DFCache/*.{h}', 'DFCache/Extended File Attributes/*.{h}', 'DFCache/Key-Value File Storage/*.{h}', 'DFCache/Image Decoder/*.{h}', 'DFCache/Value Transforming/*.{h}' s.source_files = 'DFCache/**/*.{h,m}' end
{ "pile_set_name": "Github" }
/*============================================================================= Boost.Wave: A Standard compliant C++ preprocessor library http://www.boost.org/ Copyright (c) 2001-2012 Hartmut Kaiser. Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #if !defined(CPP_EXPRESSION_VALUE_HPP_452FE66D_8754_4107_AF1E_E42255A0C18A_INCLUDED) #define CPP_EXPRESSION_VALUE_HPP_452FE66D_8754_4107_AF1E_E42255A0C18A_INCLUDED #if defined (BOOST_SPIRIT_DEBUG) #include <iostream> #endif // defined(BOOST_SPIRIT_DEBUG) #include <boost/wave/wave_config.hpp> #include <boost/wave/grammars/cpp_value_error.hpp> // value_error // this must occur after all of the includes and before any code appears #ifdef BOOST_HAS_ABI_HEADERS #include BOOST_ABI_PREFIX #endif /////////////////////////////////////////////////////////////////////////////// namespace boost { namespace wave { namespace grammars { namespace closures { class closure_value; inline bool as_bool(closure_value const& v); /////////////////////////////////////////////////////////////////////////////// // // The closure_value class represents the closure type, which is used for the // expression grammar. // // This class was introduced to allow the expression grammar to respect // the numeric type of a numeric literal or expression result. // /////////////////////////////////////////////////////////////////////////////// class closure_value { public: enum value_type { is_int = 1, is_uint = 2, is_bool = 3 }; closure_value(value_error valid_ = error_noerror) : type(is_int), valid(valid_) { value.i = 0; } explicit closure_value(int i, value_error valid_ = error_noerror) : type(is_int), valid(valid_) { value.i = i; } explicit closure_value(unsigned int ui, value_error valid_ = error_noerror) : type(is_uint), valid(valid_) { value.ui = ui; } explicit closure_value(int_literal_type i, value_error valid_ = error_noerror) : type(is_int), valid(valid_) { value.i = i; } explicit closure_value(uint_literal_type ui, value_error valid_ = error_noerror) : type(is_uint), valid(valid_) { value.ui = ui; } explicit closure_value(bool b, value_error valid_ = error_noerror) : type(is_bool), valid(valid_) { value.b = b; } value_type get_type() const { return type; } value_error is_valid() const { return valid; } // explicit conversion friend int_literal_type as_int(closure_value const& v) { switch (v.type) { case is_uint: return v.value.ui; case is_bool: return v.value.b ? 1 : 0; case is_int: break; } return v.value.i; } friend uint_literal_type as_uint(closure_value const& v) { switch (v.type) { case is_uint: return v.value.ui; case is_bool: return v.value.b ? 1 : 0; case is_int: break; } return v.value.i; } friend int_literal_type as_long(closure_value const& v) { switch (v.type) { case is_uint: return v.value.ui; case is_bool: return v.value.b ? 1 : 0; case is_int: break; } return v.value.i; } friend uint_literal_type as_ulong(closure_value const& v) { switch (v.type) { case is_uint: return v.value.ui; case is_bool: return v.value.b ? 1 : 0; case is_int: break; } return v.value.i; } friend bool as_bool(closure_value const& v) { switch (v.type) { case is_uint: return v.value.ui != 0; case is_bool: return v.value.b; case is_int: break; } return v.value.i != 0.0; } // assignment closure_value &operator= (closure_value const &rhs) { switch (rhs.get_type()) { case is_int: value.i = as_long(rhs); type = is_int; break; case is_uint: value.ui = as_ulong(rhs); type = is_uint; break; case is_bool: value.b = as_bool(rhs); type = is_bool; break; } valid = rhs.valid; return *this; } closure_value &operator= (int rhs) { type = is_int; value.i = rhs; valid = error_noerror; return *this; } closure_value &operator= (unsigned int rhs) { type = is_uint; value.ui = rhs; valid = error_noerror; return *this; } closure_value &operator= (int_literal_type rhs) { type = is_int; value.i = rhs; valid = error_noerror; return *this; } closure_value &operator= (uint_literal_type rhs) { type = is_uint; value.ui = rhs; valid = error_noerror; return *this; } closure_value &operator= (bool rhs) { type = is_bool; value.b = rhs; valid = error_noerror; return *this; } // arithmetics closure_value &operator+= (closure_value const &rhs) { switch (type) { case is_int: switch(rhs.type) { case is_bool: { int_literal_type result = value.i + as_long(rhs); if ((rhs.value.i > 0L && value.i > result) || (rhs.value.i < 0L && value.i < result)) { valid = error_integer_overflow; } else { value.i = result; } } break; case is_int: { int_literal_type result = value.i + rhs.value.i; if ((rhs.value.i > 0L && value.i > result) || (rhs.value.i < 0L && value.i < result)) { valid = error_integer_overflow; } else { value.i = result; } } break; case is_uint: { uint_literal_type result = value.ui + rhs.value.ui; if (result < value.ui) { valid = error_integer_overflow; } else { value.ui = result; type = is_uint; } } break; } break; case is_uint: { uint_literal_type result = value.ui + as_ulong(rhs); if (result < value.ui) { valid = error_integer_overflow; } else { value.ui = result; } } break; case is_bool: value.i = value.b + as_bool(rhs); type = is_int; } valid = (value_error)(valid | rhs.valid); return *this; } closure_value &operator-= (closure_value const &rhs) { switch (type) { case is_int: switch(rhs.type) { case is_bool: { int_literal_type result = value.i - as_long(rhs); if ((rhs.value.i > 0L && result > value.i) || (rhs.value.i < 0L && result < value.i)) { valid = error_integer_overflow; } else { value.i = result; } } break; case is_int: { int_literal_type result = value.i - rhs.value.i; if ((rhs.value.i > 0L && result > value.i) || (rhs.value.i < 0L && result < value.i)) { valid = error_integer_overflow; } else { value.i = result; } } break; case is_uint: { uint_literal_type result = value.ui - rhs.value.ui; if (result > value.ui) { valid = error_integer_overflow; } else { value.ui = result; type = is_uint; } } break; } break; case is_uint: switch(rhs.type) { case is_bool: { uint_literal_type result = value.ui - as_ulong(rhs); if (result > value.ui) { valid = error_integer_overflow; } else { value.ui = result; } } break; case is_int: { uint_literal_type result = value.ui - rhs.value.i; if ((rhs.value.i > 0L && result > value.ui) || (rhs.value.i < 0L && result < value.ui)) { valid = error_integer_overflow; } else { value.ui = result; } } break; case is_uint: { uint_literal_type result = value.ui - rhs.value.ui; if (result > value.ui) { valid = error_integer_overflow; } else { value.ui = result; } } break; } break; case is_bool: value.i = value.b - as_bool(rhs); type = is_int; } valid = (value_error)(valid | rhs.valid); return *this; } closure_value &operator*= (closure_value const &rhs) { switch (type) { case is_int: switch(rhs.type) { case is_bool: value.i *= as_long(rhs); break; case is_int: { int_literal_type result = value.i * rhs.value.i; if (0 != value.i && 0 != rhs.value.i && (result / value.i != rhs.value.i || result / rhs.value.i != value.i) ) { valid = error_integer_overflow; } else { value.i = result; } } break; case is_uint: { uint_literal_type result = value.ui * rhs.value.ui; if (0 != value.ui && 0 != rhs.value.ui && (result / value.ui != rhs.value.ui || result / rhs.value.ui != value.ui) ) { valid = error_integer_overflow; } else { value.ui = result; type = is_uint; } } break; } break; case is_uint: { uint_literal_type rhs_val = as_ulong(rhs); uint_literal_type result = value.ui * rhs_val; if (0 != value.ui && 0 != rhs_val && (result / value.ui != rhs_val || result / rhs_val != value.ui) ) { valid = error_integer_overflow; } else { value.ui = result; type = is_uint; } } break; case is_bool: switch (rhs.type) { case is_int: value.i = (value.b ? 1 : 0) * rhs.value.i; type = is_int; break; case is_uint: value.ui = (value.b ? 1 : 0) * rhs.value.ui; type = is_uint; break; case is_bool: value.b = 0 != ((value.b ? 1 : 0) * (rhs.value.b ? 1 : 0)); break; } } valid = (value_error)(valid | rhs.valid); return *this; } closure_value &operator/= (closure_value const &rhs) { switch (type) { case is_int: switch(rhs.type) { case is_bool: case is_int: if (as_long(rhs) != 0) { if (value.i == -value.i && -1 == rhs.value.i) { // LONG_MIN / -1 on two's complement valid = error_integer_overflow; } else { value.i /= as_long(rhs); } } else { valid = error_division_by_zero; // division by zero } break; case is_uint: if (rhs.value.ui != 0) { value.ui /= rhs.value.ui; type = is_uint; } else { valid = error_division_by_zero; // division by zero } break; } break; case is_uint: if (as_ulong(rhs) != 0) value.ui /= as_ulong(rhs); else valid = error_division_by_zero; // division by zero break; case is_bool: if (as_bool(rhs)) { switch(rhs.type) { case is_int: value.i = (value.b ? 1 : 0) / rhs.value.i; type = is_int; break; case is_uint: value.i = (value.b ? 1 : 0) / rhs.value.ui; type = is_int; break; case is_bool: break; } } else { valid = error_division_by_zero; // division by zero } } return *this; } closure_value &operator%= (closure_value const &rhs) { switch (type) { case is_int: switch(rhs.type) { case is_bool: case is_int: if (as_long(rhs) != 0) { if (value.i == -value.i && -1 == rhs.value.i) { // LONG_MIN % -1 on two's complement valid = error_integer_overflow; } else { value.i %= as_long(rhs); } } else { valid = error_division_by_zero; // division by zero } break; case is_uint: if (rhs.value.ui != 0) { value.ui %= rhs.value.ui; type = is_uint; } else { valid = error_division_by_zero; // division by zero } break; } break; case is_uint: if (as_ulong(rhs) != 0) value.ui %= as_ulong(rhs); else valid = error_division_by_zero; // division by zero break; case is_bool: if (as_bool(rhs)) { switch(rhs.type) { case is_int: value.i = (value.b ? 1 : 0) % rhs.value.i; type = is_int; break; case is_uint: value.i = (value.b ? 1 : 0) % rhs.value.ui; type = is_int; break; case is_bool: break; } } else { valid = error_division_by_zero; // division by zero } } return *this; } friend closure_value operator- (closure_value const &rhs) { switch (rhs.type) { case is_int: { int_literal_type value = as_long(rhs); if (value != 0 && value == -value) return closure_value(-value, error_integer_overflow); return closure_value(-value, rhs.valid); } case is_bool: return closure_value(-as_long(rhs), rhs.valid); case is_uint: break; } int_literal_type value = as_ulong(rhs); if (value != 0 && value == -value) return closure_value(-value, error_integer_overflow); return closure_value(-value, rhs.valid); } friend closure_value operator~ (closure_value const &rhs) { return closure_value(~as_ulong(rhs), rhs.valid); } friend closure_value operator! (closure_value const &rhs) { switch (rhs.type) { case is_int: return closure_value(!as_long(rhs), rhs.valid); case is_bool: return closure_value(!as_bool(rhs), rhs.valid); case is_uint: break; } return closure_value(!as_ulong(rhs), rhs.valid); } // comparison friend closure_value operator== (closure_value const &lhs, closure_value const &rhs) { bool cmp = false; switch (lhs.type) { case is_int: switch(rhs.type) { case is_bool: cmp = as_bool(lhs) == rhs.value.b; break; case is_int: cmp = lhs.value.i == rhs.value.i; break; case is_uint: cmp = lhs.value.ui == rhs.value.ui; break; } break; case is_uint: cmp = lhs.value.ui == as_ulong(rhs); break; case is_bool: cmp = lhs.value.b == as_bool(rhs); break; } return closure_value(cmp, (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator!= (closure_value const &lhs, closure_value const &rhs) { return closure_value(!as_bool(lhs == rhs), (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator> (closure_value const &lhs, closure_value const &rhs) { bool cmp = false; switch (lhs.type) { case is_int: switch(rhs.type) { case is_bool: cmp = lhs.value.i > as_long(rhs); break; case is_int: cmp = lhs.value.i > rhs.value.i; break; case is_uint: cmp = lhs.value.ui > rhs.value.ui; break; } break; case is_uint: cmp = lhs.value.ui > as_ulong(rhs); break; case is_bool: cmp = lhs.value.b > as_bool(rhs); break; } return closure_value(cmp, (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator< (closure_value const &lhs, closure_value const &rhs) { bool cmp = false; switch (lhs.type) { case is_int: switch(rhs.type) { case is_bool: cmp = lhs.value.i < as_long(rhs); break; case is_int: cmp = lhs.value.i < rhs.value.i; break; case is_uint: cmp = lhs.value.ui < rhs.value.ui; break; } break; case is_uint: cmp = lhs.value.ui < as_ulong(rhs); break; case is_bool: cmp = as_bool(lhs) < as_bool(rhs); break; } return closure_value(cmp, (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator<= (closure_value const &lhs, closure_value const &rhs) { return closure_value(!as_bool(lhs > rhs), (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator>= (closure_value const &lhs, closure_value const &rhs) { return closure_value(!as_bool(lhs < rhs), (value_error)(lhs.valid | rhs.valid)); } closure_value & operator<<= (closure_value const &rhs) { switch (type) { case is_bool: case is_int: switch (rhs.type) { case is_bool: case is_int: { int_literal_type shift_by = as_long(rhs); if (shift_by > 64) shift_by = 64; else if (shift_by < -64) shift_by = -64; value.i <<= shift_by; } break; case is_uint: { uint_literal_type shift_by = as_ulong(rhs); if (shift_by > 64) shift_by = 64; value.ui <<= shift_by; // Note: The usual arithmetic conversions are not performed on // bit shift operations. } break; } break; case is_uint: switch (rhs.type) { case is_bool: case is_int: { int_literal_type shift_by = as_long(rhs); if (shift_by > 64) shift_by = 64; else if (shift_by < -64) shift_by = -64; value.ui <<= shift_by; } break; case is_uint: { uint_literal_type shift_by = as_ulong(rhs); if (shift_by > 64) shift_by = 64; value.ui <<= shift_by; } break; } } valid = (value_error)(valid | rhs.valid); return *this; } closure_value & operator>>= (closure_value const &rhs) { switch (type) { case is_bool: case is_int: switch (rhs.type) { case is_bool: case is_int: { int_literal_type shift_by = as_long(rhs); if (shift_by > 64) shift_by = 64; else if (shift_by < -64) shift_by = -64; value.i >>= shift_by; } break; case is_uint: { uint_literal_type shift_by = as_ulong(rhs); if (shift_by > 64) shift_by = 64; value.ui >>= shift_by; // Note: The usual arithmetic conversions are not performed on // bit shift operations. } break; } break; case is_uint: switch (rhs.type) { case is_bool: case is_int: { int_literal_type shift_by = as_long(rhs); if (shift_by > 64) shift_by = 64; else if (shift_by < -64) shift_by = -64; value.ui >>= shift_by; } break; case is_uint: { uint_literal_type shift_by = as_ulong(rhs); if (shift_by > 64) shift_by = 64; value.ui >>= shift_by; } break; } break; } valid = (value_error)(valid | rhs.valid); return *this; } friend closure_value operator|| (closure_value const &lhs, closure_value const &rhs) { bool result = as_bool(lhs) || as_bool(rhs); return closure_value(result, (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator&& (closure_value const &lhs, closure_value const &rhs) { bool result = as_bool(lhs) && as_bool(rhs); return closure_value(result, (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator| (closure_value const &lhs, closure_value const &rhs) { uint_literal_type result = as_ulong(lhs) | as_ulong(rhs); return closure_value(result, (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator& (closure_value const &lhs, closure_value const &rhs) { uint_literal_type result = as_ulong(lhs) & as_ulong(rhs); return closure_value(result, (value_error)(lhs.valid | rhs.valid)); } friend closure_value operator^ (closure_value const &lhs, closure_value const &rhs) { uint_literal_type result = as_ulong(lhs) ^ as_ulong(rhs); return closure_value(result, (value_error)(lhs.valid | rhs.valid)); } // handle the ?: operator closure_value & handle_questionmark(closure_value const &cond, closure_value const &val2) { switch (type) { case is_int: switch (val2.type) { case is_bool: value.b = as_bool(cond) ? value.b : as_bool(val2); break; case is_int: value.i = as_bool(cond) ? value.i : as_long(val2); break; case is_uint: value.ui = as_bool(cond) ? value.ui : as_ulong(val2); type = is_uint; // changing type! break; } break; case is_uint: value.ui = as_bool(cond) ? value.ui : as_ulong(val2); break; case is_bool: value.b = as_bool(cond) ? value.b : as_bool(val2); break; } valid = as_bool(cond) ? valid : val2.valid; return *this; } #if defined (BOOST_SPIRIT_DEBUG) friend std::ostream& operator<< (std::ostream &o, closure_value const &val) { switch (val.type) { case is_int: o << "int(" << as_long(val) << ")"; break; case is_uint: o << "unsigned int(" << as_ulong(val) << ")"; break; case is_bool: o << "bool(" << as_bool(val) << ")"; break; } return o; } #endif // defined(BOOST_SPIRIT_DEBUG) private: value_type type; union { int_literal_type i; uint_literal_type ui; bool b; } value; value_error valid; }; /////////////////////////////////////////////////////////////////////////////// } // namespace closures } // namespace grammars } // namespace wave } // namespace boost // the suffix header occurs after all of the code #ifdef BOOST_HAS_ABI_HEADERS #include BOOST_ABI_SUFFIX #endif #endif // !defined(CPP_EXPRESSION_VALUE_HPP_452FE66D_8754_4107_AF1E_E42255A0C18A_INCLUDED)
{ "pile_set_name": "Github" }
<dom-module id="shared-styles"><template><style> .page-title { @apply(--paper-font-display2); } @media (max-width: 600px) { .page-title { font-size: 24px!important; } } </style></template></dom-module>
{ "pile_set_name": "Github" }
**EXPERIMENTAL**: Anything regarding Guide generation, in this folder or elsewhere, is to be considered experimental and prone to be changed or removed without notice or consideration for BC. This is a follow-up to an earlier POC and can, at best, seen as an incubator project.
{ "pile_set_name": "Github" }
firefly.script package ====================== Submodules ---------- firefly.script.firefly-admin module ----------------------------------- .. automodule:: firefly.script.firefly-admin :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: firefly.script :members: :undoc-members: :show-inheritance:
{ "pile_set_name": "Github" }
union S { unsigned ac : 4; unsigned : 4; unsigned clock : 1; unsigned : 0; unsigned flag : 1; }; struct X { unsigned light : 1; unsigned toaster : 1; int count; union S stat; }; // RUN: c-index-test -test-print-bitwidth %s | FileCheck %s // CHECK: FieldDecl=ac:2:12 (Definition) bitwidth=4 // CHECK: FieldDecl=:3:3 (Definition) bitwidth=4 // CHECK: FieldDecl=clock:4:12 (Definition) bitwidth=1 // CHECK: FieldDecl=:5:3 (Definition) bitwidth=0 // CHECK: FieldDecl=flag:6:12 (Definition) bitwidth=1 // CHECK: FieldDecl=light:10:12 (Definition) bitwidth=1 // CHECK: FieldDecl=toaster:11:12 (Definition) bitwidth=1 // CHECK-NOT: count // CHECK-NOT: stat
{ "pile_set_name": "Github" }
package net.azib.ipscan.core.net; import com.sun.jna.Library; import com.sun.jna.Native; import com.sun.jna.Pointer; import com.sun.jna.Structure; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import static java.lang.reflect.Modifier.isStatic; /** * JNA binding for iphlpapi.dll for ICMP and ARP support under Windows */ public interface WinIpHlpDll extends Library { WinIpHlpDll dll = Loader.load(); class Loader { public static WinIpHlpDll load() { try { return (WinIpHlpDll) Native.loadLibrary("iphlpapi", WinIpHlpDll.class); } catch (UnsatisfiedLinkError e) { return (WinIpHlpDll) Native.loadLibrary("icmp", WinIpHlpDll.class); } } } class AutoOrderedStructure extends Structure { // this is a requirement of newer JNA, possibly it won't work on some JVM, but probability is quite small @Override protected List<String> getFieldOrder() { ArrayList<String> fields = new ArrayList<>(); for (Field field : getClass().getFields()) { if (!isStatic(field.getModifiers())) fields.add(field.getName()); } return fields; } } /** * Wrapper for Microsoft's <a href="http://msdn.microsoft.com/en-US/library/aa366045.aspx">IcmpCreateFile</a> */ Pointer IcmpCreateFile(); /** * Wrapper for Microsoft's <a href="https://docs.microsoft.com/en-us/windows/desktop/api/icmpapi/nf-icmpapi-icmp6createfile">Icmp6CreateFile</a> */ Pointer Icmp6CreateFile(); /** * Wrapper for Microsoft's <a href="http://msdn.microsoft.com/en-us/library/aa366043.aspx">IcmpCloseHandle</a> */ boolean IcmpCloseHandle(Pointer hIcmp); /** * Wrapper for Microsoft's <a href="http://msdn.microsoft.com/EN-US/library/aa366050.aspx">IcmpSendEcho</a> */ int IcmpSendEcho( Pointer hIcmp, IpAddrByVal destinationAddress, Pointer requestData, short requestSize, IpOptionInformationByRef requestOptions, Pointer replyBuffer, int replySize, int timeout ); /** * Wrapper for Microsoft's <a href="https://docs.microsoft.com/en-us/windows/desktop/api/icmpapi/nf-icmpapi-icmp6sendecho2">Icmp6SendEcho2</a> */ int Icmp6SendEcho2( Pointer hIcmp, Pointer event, Pointer apcRoutine, Pointer apcContext, Ip6SockAddrByRef sourceAddress, Ip6SockAddrByRef destinationAddress, Pointer requestData, short requestSize, IpOptionInformationByRef requestOptions, Pointer replyBuffer, int replySize, int timeout ); /** * Wrapper for Microsoft's <a href="http://msdn.microsoft.com/en-us/library/windows/desktop/aa366358(v=vs.85).aspx">SendARP</a> */ int SendARP( IpAddrByVal destIP, int srcIP, Pointer pMacAddr, Pointer pPhyAddrLen ); class IpAddr extends AutoOrderedStructure { public byte[] bytes = new byte[4]; } class IpAddrByVal extends IpAddr implements Structure.ByValue { } class Ip6SockAddr extends AutoOrderedStructure { public short family = 10; public short port; public int flowInfo; public byte[] bytes = new byte[16]; public int scopeId; } class Ip6SockAddrByRef extends Ip6SockAddr implements Structure.ByReference { } class IpOptionInformation extends AutoOrderedStructure { public byte ttl; public byte tos; public byte flags; public byte optionsSize; public Pointer optionsData; } class IpOptionInformationByVal extends IpOptionInformation implements Structure.ByValue { } class IpOptionInformationByRef extends IpOptionInformation implements Structure.ByReference { } class IcmpEchoReply extends AutoOrderedStructure { public IpAddrByVal address; public int status; public int roundTripTime; public short dataSize; public short reserved; public Pointer data; public IpOptionInformationByVal options; public IcmpEchoReply() { } public IcmpEchoReply(Pointer p) { useMemory(p); read(); } } class Icmp6EchoReply extends AutoOrderedStructure { public short port; public byte[] flowInfo = new byte[4]; public byte[] addressBytes = new byte[16]; public int scopeId; public int status; public int roundTripTime; public Icmp6EchoReply() { } public Icmp6EchoReply(Pointer p) { useMemory(p); read(); } } }
{ "pile_set_name": "Github" }
<?php /** * PQRS Measure 0007 -- Call to createPopulationCriteria() * * Copyright (C) 2015 - 2017 Suncoast Connection * * LICENSE: This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0 * See the Mozilla Public License for more details. * If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. * * @author Art Eaton <[email protected]> * @author Bryan lee <[email protected]> * @package LibreHealthEHR * @link http://suncoastconnection.com * @link http://librehealth.io * * Please support this product by sharing your changes with the LibreHealth.io community. */ class PQRS_0007 extends AbstractPQRSReport { public function createPopulationCriteria() { $populationCriteria = array(); $populationCriteria[] = new PQRS_0007_PopulationCriteria1(); $populationCriteria[] = new PQRS_0007_PopulationCriteria2(); return $populationCriteria; } } ?>
{ "pile_set_name": "Github" }
" Vim syntax file " Language: CSP (Communication Sequential Processes, using FDR input syntax) " Maintainer: Jan Bredereke <[email protected]> " Version: 0.6.0 " Last change: Mon Mar 25, 2002 " URL: http://www.tzi.de/~brederek/vim/ " Copying: You may distribute and use this file freely, in the same " way as the vim editor itself. " " To Do: - Probably I missed some keywords or operators, please " fix them and notify me, the maintainer. " - Currently, we do lexical highlighting only. It would be " nice to have more actual syntax checks, including " highlighting of wrong syntax. " - The additional syntax for the RT-Tester (pseudo-comments) " should be optional. " quit when a syntax file was already loaded if exists("b:current_syntax") finish endif " case is significant to FDR: syn case match " Block comments in CSP are between {- and -} syn region cspComment start="{-" end="-}" contains=cspTodo " Single-line comments start with -- syn region cspComment start="--" end="$" contains=cspTodo,cspOldRttComment,cspSdlRttComment keepend " Numbers: syn match cspNumber "\<\d\+\>" " Conditionals: syn keyword cspConditional if then else " Operators on processes: " -> ? : ! ' ; /\ \ [] |~| [> & [[..<-..]] ||| [|..|] || [..<->..] ; : @ ||| syn match cspOperator "->" syn match cspOperator "/\\" syn match cspOperator "[^/]\\"lc=1 syn match cspOperator "\[\]" syn match cspOperator "|\~|" syn match cspOperator "\[>" syn match cspOperator "\[\[" syn match cspOperator "\]\]" syn match cspOperator "<-" syn match cspOperator "|||" syn match cspOperator "[^|]||[^|]"lc=1,me=e-1 syn match cspOperator "[^|{\~]|[^|}\~]"lc=1,me=e-1 syn match cspOperator "\[|" syn match cspOperator "|\]" syn match cspOperator "\[[^>]"me=e-1 syn match cspOperator "\]" syn match cspOperator "<->" syn match cspOperator "[?:!';@]" syn match cspOperator "&" syn match cspOperator "\." " (not on processes:) " syn match cspDelimiter "{|" " syn match cspDelimiter "|}" " syn match cspDelimiter "{[^-|]"me=e-1 " syn match cspDelimiter "[^-|]}"lc=1 " Keywords: syn keyword cspKeyword length null head tail concat elem syn keyword cspKeyword union inter diff Union Inter member card syn keyword cspKeyword empty set Set Seq syn keyword cspKeyword true false and or not within let syn keyword cspKeyword nametype datatype diamond normal syn keyword cspKeyword sbisim tau_loop_factor model_compress syn keyword cspKeyword explicate syn match cspKeyword "transparent" syn keyword cspKeyword external chase prioritize syn keyword cspKeyword channel Events syn keyword cspKeyword extensions productions syn keyword cspKeyword Bool Int " Reserved keywords: syn keyword cspReserved attribute embed module subtype " Include: syn region cspInclude matchgroup=cspIncludeKeyword start="^include" end="$" keepend contains=cspIncludeArg syn region cspIncludeArg start='\s\+\"' end= '\"\s*' contained " Assertions: syn keyword cspAssert assert deterministic divergence free deadlock syn keyword cspAssert livelock syn match cspAssert "\[T=" syn match cspAssert "\[F=" syn match cspAssert "\[FD=" syn match cspAssert "\[FD\]" syn match cspAssert "\[F\]" " Types and Sets " (first char a capital, later at least one lower case, no trailing underscore): syn match cspType "\<_*[A-Z][A-Z_0-9]*[a-z]\(\|[A-Za-z_0-9]*[A-Za-z0-9]\)\>" " Processes (all upper case, no trailing underscore): " (For identifiers that could be types or sets, too, this second rule set " wins.) syn match cspProcess "\<[A-Z_][A-Z_0-9]*[A-Z0-9]\>" syn match cspProcess "\<[A-Z_]\>" " reserved identifiers for tool output (ending in underscore): syn match cspReservedIdentifier "\<[A-Za-z_][A-Za-z_0-9]*_\>" " ToDo markers: syn match cspTodo "FIXME" contained syn match cspTodo "TODO" contained syn match cspTodo "!!!" contained " RT-Tester pseudo comments: " (The now obsolete syntax:) syn match cspOldRttComment "^--\$\$AM_UNDEF"lc=2 contained syn match cspOldRttComment "^--\$\$AM_ERROR"lc=2 contained syn match cspOldRttComment "^--\$\$AM_WARNING"lc=2 contained syn match cspOldRttComment "^--\$\$AM_SET_TIMER"lc=2 contained syn match cspOldRttComment "^--\$\$AM_RESET_TIMER"lc=2 contained syn match cspOldRttComment "^--\$\$AM_ELAPSED_TIMER"lc=2 contained syn match cspOldRttComment "^--\$\$AM_OUTPUT"lc=2 contained syn match cspOldRttComment "^--\$\$AM_INPUT"lc=2 contained " (The current syntax:) syn region cspRttPragma matchgroup=cspRttPragmaKeyword start="^pragma\s\+" end="\s*$" oneline keepend contains=cspRttPragmaArg,cspRttPragmaSdl syn keyword cspRttPragmaArg AM_ERROR AM_WARNING AM_SET_TIMER contained syn keyword cspRttPragmaArg AM_RESET_TIMER AM_ELAPSED_TIMER contained syn keyword cspRttPragmaArg AM_OUTPUT AM_INPUT AM_INTERNAL contained " the "SDL_MATCH" extension: syn region cspRttPragmaSdl matchgroup=cspRttPragmaKeyword start="SDL_MATCH\s\+" end="\s*$" contains=cspRttPragmaSdlArg contained syn keyword cspRttPragmaSdlArg TRANSLATE nextgroup=cspRttPragmaSdlTransName contained syn keyword cspRttPragmaSdlArg PARAM SKIP OPTIONAL CHOICE ARRAY nextgroup=cspRttPragmaSdlName contained syn match cspRttPragmaSdlName "\s*\S\+\s*" nextgroup=cspRttPragmaSdlTail contained syn region cspRttPragmaSdlTail start="" end="\s*$" contains=cspRttPragmaSdlTailArg contained syn keyword cspRttPragmaSdlTailArg SUBSET_USED DEFAULT_VALUE Present contained syn match cspRttPragmaSdlTransName "\s*\w\+\s*" nextgroup=cspRttPragmaSdlTransTail contained syn region cspRttPragmaSdlTransTail start="" end="\s*$" contains=cspRttPragmaSdlTransTailArg contained syn keyword cspRttPragmaSdlTransTailArg sizeof contained syn match cspRttPragmaSdlTransTailArg "\*" contained syn match cspRttPragmaSdlTransTailArg "(" contained syn match cspRttPragmaSdlTransTailArg ")" contained " temporary syntax extension for commented-out "pragma SDL_MATCH": syn match cspSdlRttComment "pragma\s\+SDL_MATCH\s\+" nextgroup=cspRttPragmaSdlArg contained syn sync lines=250 " Define the default highlighting. " Only when an item doesn't have highlighting yet " The default methods for highlighting. Can be overridden later " (For vim version <=5.7, the command groups are defined in " $VIMRUNTIME/syntax/synload.vim ) hi def link cspComment Comment hi def link cspNumber Number hi def link cspConditional Conditional hi def link cspOperator Delimiter hi def link cspKeyword Keyword hi def link cspReserved SpecialChar hi def link cspInclude Error hi def link cspIncludeKeyword Include hi def link cspIncludeArg Include hi def link cspAssert PreCondit hi def link cspType Type hi def link cspProcess Function hi def link cspTodo Todo hi def link cspOldRttComment Define hi def link cspRttPragmaKeyword Define hi def link cspSdlRttComment Define hi def link cspRttPragmaArg Define hi def link cspRttPragmaSdlArg Define hi def link cspRttPragmaSdlName Default hi def link cspRttPragmaSdlTailArg Define hi def link cspRttPragmaSdlTransName Default hi def link cspRttPragmaSdlTransTailArg Define hi def link cspReservedIdentifier Error " (Currently unused vim method: Debug) let b:current_syntax = "csp" " vim: ts=8
{ "pile_set_name": "Github" }
{ "randomStatetest" : { "env" : { "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5", "currentDifficulty" : "5623894562375", "currentGasLimit" : "0x7fffffffffffffff", "currentNumber" : "0", "currentTimestamp" : "1", "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6" }, "logs" : [ ], "out" : "0x", "expect" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "0", "code" : "0x7f00000000000000000000000100000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff20b39838f628b96846cff0455", "nonce" : "0", "storage" : { } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "1869556482", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "999999998130443564", "code" : "0x", "nonce" : "1", "storage" : { } } }, "postStateRoot" : "913792f777fb1b2537eef57d043131df032838d246b94ade5b4b33e8cb140412", "pre" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "0", "code" : "0x7f00000000000000000000000100000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff20b39838f628b96846cff0455", "nonce" : "0", "storage" : { } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "46", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "1000000000000000000", "code" : "0x", "nonce" : "0", "storage" : { } } }, "transaction" : { "data" : "0x7f00000000000000000000000100000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff20b39838f628b96846cff04", "gasLimit" : "0x6f6f2ad4", "gasPrice" : "1", "nonce" : "0", "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87", "value" : "490453529" } } }
{ "pile_set_name": "Github" }
<div class="ui5-yp-root" role="grid" aria-readonly="false" aria-multiselectable="false" style="{{styles.main}}" @keydown={{_onkeydown}} @mousedown={{_onmousedown}} @mouseup={{_onmouseup}} > {{#each _yearIntervals}} <div class="ui5-yp-interval-container"> {{#each this}} <div id="{{this.id}}" tabindex="{{this._tabIndex}}" data-sap-timestamp="{{this.timestamp}}" class="{{this.classes}}" role="gridcell" aria-selected="false"> {{this.year}} </div> {{/each}} </div> {{/each}} </div>
{ "pile_set_name": "Github" }
# Adds the missing space between the cd command and the target directory # when trying to cd to the parent directory. # # Does not really save chars, but is fun :D # # Example: # > cd.. # cd..: command not found def match(command): return command.script == 'cd..' def get_new_command(command): return 'cd ..'
{ "pile_set_name": "Github" }
[bleh-1] text=ffffff text(bold)=ffffff background=000000 black=666666 black(bold)=8c847f red=996578 red(bold)=bd4b76 green=889965 green(bold)=95b548 yellow=998565 yellow(bold)=bd814b blue=657a99 blue(bold)=4c83bf magenta=8b6599 magenta(bold)=a04bbd cyan=65998d cyan(bold)=4dbda8 white=a5a5a4 white(bold)=848484 [Names] name0=bleh-1 count=1
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <style xmlns="http://purl.org/net/xbiblio/csl" version="1.0" default-locale="en-US"> <!-- Generated with https://github.com/citation-style-language/utilities/tree/master/generate_dependent_styles/data/springer --> <info> <title>Metascience</title> <title-short>Metascience</title-short> <id>http://www.zotero.org/styles/metascience</id> <link href="http://www.zotero.org/styles/metascience" rel="self"/> <link href="http://www.zotero.org/styles/springer-humanities-author-date" rel="independent-parent"/> <link href="http://www.springer.com/cda/content/document/cda_downloaddocument/Key_Style_Points_1.0.pdf" rel="documentation"/> <link href="http://www.springer.com/cda/content/document/cda_downloaddocument/manuscript-guidelines-1.0.pdf" rel="documentation"/> <category citation-format="author-date"/> <category field="humanities"/> <issn>0815-0796</issn> <eissn>1467-9981</eissn> <updated>2014-05-15T12:00:00+00:00</updated> <rights license="http://creativecommons.org/licenses/by-sa/3.0/">This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 License</rights> </info> </style>
{ "pile_set_name": "Github" }
.so man__libmansuffix__/XtAddCallback.__libmansuffix__
{ "pile_set_name": "Github" }
/** \file * \brief Declaration of class SPQRTree * * \author Carsten Gutwenger * * \par License: * This file is part of the Open Graph Drawing Framework (OGDF). * * \par * Copyright (C)<br> * See README.md in the OGDF root directory for details. * * \par * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * Version 2 or 3 as published by the Free Software Foundation; * see the file LICENSE.txt included in the packaging of this file * for details. * * \par * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * \par * You should have received a copy of the GNU General Public * License along with this program; if not, see * http://www.gnu.org/copyleft/gpl.html */ #pragma once #include <ogdf/decomposition/Skeleton.h> #include <ogdf/decomposition/PertinentGraph.h> #include <ogdf/basic/SList.h> namespace ogdf { /** * \brief Linear-time implementation of static SPQR-trees. * * @ingroup decomp * * The class SPQRTree maintains the arrangement of the triconnected * components of a biconnected multi-graph \a G [Hopcroft, Tarjan 1973] * as a so-called SPQR tree \a T [Di Battista, Tamassia, 1996]. We * call \a G the original graph of \a T. * * Each node of the tree has an associated type (represented by * SPQRTree::NodeType), which is either SNode, PNode, or * RNode, and a skeleton (represented by the class Skeleton). * The skeletons of the nodes of \a T are in one-to-one * correspondence to the triconnected components of \a G, i.e., * S-nodes correspond to polygons, P-nodes to bonds, and * R-nodes to triconnected graphs. * * In our representation of SPQR-trees, Q-nodes are omitted. Instead, * the skeleton S of a node \a v in \a T contains two types of edges: * real edges, which correspond to edges in \a G, and virtual edges, which * correspond to edges in \a T having \a v as an endpoint. * There is a special edge \a er in G at which \a T is rooted, i.e., the * root node of \a T is the node whose skeleton contains the real edge * corresponding to \a er. * * The reference edge of the skeleton of the root node is \a er, the * reference edge of the skeleton \a S of a non-root node \a v is the virtual * edge in \a S that corresponds to the tree edge (parent(\a v),\a v). */ class OGDF_EXPORT SPQRTree { public: //! The type of a tree node in T. enum class NodeType { SNode, PNode, RNode }; // destructor virtual ~SPQRTree() { } //! \name Access operations //! @{ //! Returns a reference to the original graph \a G. virtual const Graph &originalGraph() const=0; //! Returns a reference to the tree \a T. virtual const Graph &tree() const=0; //! Returns the edge of \a G at which \a T is rooted. virtual edge rootEdge() const=0; //! Returns the root node of \a T. virtual node rootNode() const=0; //! Returns the number of S-nodes in \a T. virtual int numberOfSNodes() const=0; //! Returns the number of P-nodes in \a T. virtual int numberOfPNodes() const=0; //! Returns the number of R-nodes in \a T. virtual int numberOfRNodes() const=0; /** * \brief Returns the type of node \p v. * \pre \p v is a node in \a T */ virtual NodeType typeOf(node v) const=0; //! Returns the list of all nodes with type \p t. virtual List<node> nodesOfType(NodeType t) const=0; /** * \brief Returns the skeleton of node \p v. * \pre \p v is a node in \a T */ virtual Skeleton &skeleton(node v) const=0; /** * \brief Returns the skeleton that contains the real edge \p e. * \pre \p e is an edge in \a G */ virtual const Skeleton &skeletonOfReal(edge e) const=0; /** * \brief Returns the skeleton edge that corresponds to the real edge \p e. * \pre \p e is an edge in \a G */ virtual edge copyOfReal(edge e) const=0; /** * \brief Returns the pertinent graph of tree node \p v in \p Gp. * \pre \p v is a node in \a T */ void pertinentGraph(node v, PertinentGraph &Gp) const { if (m_cpV == nullptr) m_cpV = new NodeArray<node>(originalGraph(),nullptr); NodeArray<node> &cpV = *m_cpV; Gp.init(v); cpRec(v,Gp); const Skeleton &S = skeleton(v); edge e = Gp.m_skRefEdge = S.referenceEdge(); if (e != nullptr) e = Gp.m_P.newEdge(cpV[S.original(e->source())],cpV[S.original(e->target())]); Gp.m_vEdge = e; while (!m_cpVAdded.empty()) cpV[m_cpVAdded.popFrontRet()] = nullptr; } //! @} //! \name Update operations //! @{ /** * \brief Roots \a T at edge \p e and returns the new root node of \a T. * \pre \p e is an edge in \a G */ virtual node rootTreeAt(edge e) =0; /** * \brief Roots \a T at node \p v and returns \p v. * \pre \p v is a node in \a T */ virtual node rootTreeAt(node v) =0; void directSkEdge(node vT, edge e, node src) { OGDF_ASSERT(e != nullptr); OGDF_ASSERT(src == e->source() || src == e->target()); if(e->source() != src) skeleton(vT).getGraph().reverseEdge(e); } void replaceSkEdgeByPeak(node vT, edge e) { Graph &M = skeleton(vT).getGraph(); M.reverseEdge(M.split(e)); } // !@} protected: /** * \brief Recursively performs the task of adding edges (and nodes) * to the pertinent graph \p Gp for each involved skeleton graph. */ virtual void cpRec(node v, PertinentGraph &Gp) const=0; //! Add an edge to \p Gp corresponding to \p eOrig. edge cpAddEdge(edge eOrig, PertinentGraph &Gp) const { edge eP = Gp.m_P.newEdge(cpAddNode(eOrig->source(),Gp),cpAddNode(eOrig->target(),Gp)); Gp.m_origE[eP] = eOrig; return eP; } //! Add a node to \p Gp corresponding to \p vOrig if required. node cpAddNode(node vOrig, PertinentGraph &Gp) const { node &vP = (*m_cpV)[vOrig]; if (vP == nullptr) { m_cpVAdded.pushBack(vOrig); Gp.m_origV[vP = Gp.m_P.newNode()] = vOrig; } return vP; } // auxiliary members used for computing pertinent graphs mutable NodeArray<node> *m_cpV; //!< node in pertinent graph corresponding to an original node (auxiliary member) mutable SList<node> m_cpVAdded; //!< list of added nodes (auxiliary member) }; }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!-- Copyright © 2012 Christian Persch This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. --> <interface> <menu id="notebook-popup"> <section> <item> <attribute name="label" translatable="yes">Move Terminal _Left</attribute> <attribute name="action">win.tab-move-left</attribute> </item> <item> <attribute name="label" translatable="yes">Move Terminal _Right</attribute> <attribute name="action">win.tab-move-right</attribute> </item> </section> <section> <item> <attribute name="label" translatable="yes">_Detach Terminal</attribute> <attribute name="action">win.tab-detach</attribute> </item> <item> <attribute name="label" translatable="yes">Set _Title…</attribute> <attribute name="action">win.set-title</attribute> <attribute name="hidden-when">action-missing</attribute> </item> </section> <section> <item> <attribute name="label" translatable="yes">C_lose Terminal</attribute> <attribute name="action">win.close</attribute> <attribute name="target">tab</attribute> </item> </section> </menu> </interface>
{ "pile_set_name": "Github" }
/***************************************************************************/ /* */ /* cffload.h */ /* */ /* OpenType & CFF data/program tables loader (specification). */ /* */ /* Copyright 1996-2018 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #ifndef CFFLOAD_H_ #define CFFLOAD_H_ #include <ft2build.h> #include FT_INTERNAL_CFF_TYPES_H #include "cffparse.h" #include FT_INTERNAL_CFF_OBJECTS_TYPES_H /* for CFF_Face */ FT_BEGIN_HEADER FT_LOCAL( FT_UShort ) cff_get_standard_encoding( FT_UInt charcode ); FT_LOCAL( FT_String* ) cff_index_get_string( CFF_Font font, FT_UInt element ); FT_LOCAL( FT_String* ) cff_index_get_sid_string( CFF_Font font, FT_UInt sid ); FT_LOCAL( FT_Error ) cff_index_access_element( CFF_Index idx, FT_UInt element, FT_Byte** pbytes, FT_ULong* pbyte_len ); FT_LOCAL( void ) cff_index_forget_element( CFF_Index idx, FT_Byte** pbytes ); FT_LOCAL( FT_String* ) cff_index_get_name( CFF_Font font, FT_UInt element ); FT_LOCAL( FT_UInt ) cff_charset_cid_to_gindex( CFF_Charset charset, FT_UInt cid ); FT_LOCAL( FT_Error ) cff_font_load( FT_Library library, FT_Stream stream, FT_Int face_index, CFF_Font font, CFF_Face face, FT_Bool pure_cff, FT_Bool cff2 ); FT_LOCAL( void ) cff_font_done( CFF_Font font ); FT_LOCAL( FT_Error ) cff_load_private_dict( CFF_Font font, CFF_SubFont subfont, FT_UInt lenNDV, FT_Fixed* NDV ); FT_LOCAL( FT_Byte ) cff_fd_select_get( CFF_FDSelect fdselect, FT_UInt glyph_index ); FT_LOCAL( FT_Bool ) cff_blend_check_vector( CFF_Blend blend, FT_UInt vsindex, FT_UInt lenNDV, FT_Fixed* NDV ); FT_LOCAL( FT_Error ) cff_blend_build_vector( CFF_Blend blend, FT_UInt vsindex, FT_UInt lenNDV, FT_Fixed* NDV ); FT_LOCAL( void ) cff_blend_clear( CFF_SubFont subFont ); FT_LOCAL( FT_Error ) cff_blend_doBlend( CFF_SubFont subfont, CFF_Parser parser, FT_UInt numBlends ); #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT FT_LOCAL( FT_Error ) cff_get_var_blend( CFF_Face face, FT_UInt *num_coords, FT_Fixed* *coords, FT_Fixed* *normalizedcoords, FT_MM_Var* *mm_var ); FT_LOCAL( void ) cff_done_blend( CFF_Face face ); #endif FT_END_HEADER #endif /* CFFLOAD_H_ */ /* END */
{ "pile_set_name": "Github" }
import invariant from 'invariant'; export type LanguageOptions = { isTS: boolean; isModern: boolean; isReact: boolean; }; export function getExtensions( platforms: string[], extensions: string[], workflows: string[] ): string[] { // In the past we used spread operators to collect the values so now we enforce type safety on them. invariant(Array.isArray(platforms), 'Expected: `platforms: string[]`'); invariant(Array.isArray(extensions), 'Expected: `extensions: string[]`'); invariant(Array.isArray(workflows), 'Expected: `workflows: string[]`'); const fileExtensions = []; // support .expo files for (const workflow of [...workflows, '']) { // Ensure order is correct: [platformA.js, platformB.js, js] for (const platform of [...platforms, '']) { // Support both TypeScript and JavaScript for (const extension of extensions) { fileExtensions.push([platform, workflow, extension].filter(Boolean).join('.')); } } } return fileExtensions; } export function getLanguageExtensionsInOrder({ isTS, isModern, isReact, }: LanguageOptions): string[] { // @ts-ignore: filter removes false type const addLanguage = (lang: string): string[] => [lang, isReact && `${lang}x`].filter(Boolean); // Support JavaScript let extensions = addLanguage('js'); if (isModern) { extensions.unshift('mjs'); } if (isTS) { extensions = [...addLanguage('ts'), ...extensions]; } return extensions; } export function getManagedExtensions( platforms: string[], languageOptions: LanguageOptions = { isTS: true, isModern: true, isReact: true } ): string[] { const fileExtensions = getExtensions(platforms, getLanguageExtensionsInOrder(languageOptions), [ 'expo', ]); // Always add these last _addMiscellaneousExtensions(fileExtensions); return fileExtensions; } export function getBareExtensions( platforms: string[], languageOptions: LanguageOptions = { isTS: true, isModern: true, isReact: true } ): string[] { const fileExtensions = getExtensions( platforms, getLanguageExtensionsInOrder(languageOptions), [] ); // Always add these last _addMiscellaneousExtensions(fileExtensions); return fileExtensions; } function _addMiscellaneousExtensions(fileExtensions: string[]): string[] { // Always add these with no platform extension // In the future we may want to add platform and workspace extensions to json. fileExtensions.push('json'); fileExtensions.push('wasm'); return fileExtensions; }
{ "pile_set_name": "Github" }
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ class Google_Service_PhotosLibrary_DateRange extends Google_Model { protected $endDateType = 'Google_Service_PhotosLibrary_Date'; protected $endDateDataType = ''; protected $startDateType = 'Google_Service_PhotosLibrary_Date'; protected $startDateDataType = ''; /** * @param Google_Service_PhotosLibrary_Date */ public function setEndDate(Google_Service_PhotosLibrary_Date $endDate) { $this->endDate = $endDate; } /** * @return Google_Service_PhotosLibrary_Date */ public function getEndDate() { return $this->endDate; } /** * @param Google_Service_PhotosLibrary_Date */ public function setStartDate(Google_Service_PhotosLibrary_Date $startDate) { $this->startDate = $startDate; } /** * @return Google_Service_PhotosLibrary_Date */ public function getStartDate() { return $this->startDate; } }
{ "pile_set_name": "Github" }
.TH "CPUPOWER-IDLE-SET" "1" "0.1" "" "cpupower Manual" .SH "NAME" .LP cpupower\-idle\-set \- Utility to set cpu idle state specific kernel options .SH "SYNTAX" .LP cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP] .SH "DESCRIPTION" .LP The cpupower idle\-set subcommand allows to set cpu idle, also called cpu sleep state, specific options offered by the kernel. One example is disabling sleep states. This can be handy for power vs performance tuning. .SH "OPTIONS" .LP .TP \fB\-d\fR \fB\-\-disable\fR <STATE_NO> Disable a specific processor sleep state. .TP \fB\-e\fR \fB\-\-enable\fR <STATE_NO> Enable a specific processor sleep state. .TP \fB\-D\fR \fB\-\-disable-by-latency\fR <LATENCY> Disable all idle states with a equal or higher latency than <LATENCY>. Enable all idle states with a latency lower than <LATENCY>. .TP \fB\-E\fR \fB\-\-enable-all\fR Enable all idle states if not enabled already. .SH "REMARKS" .LP Cpuidle Governors Policy on Disabling Sleep States .RS 4 Depending on the used cpuidle governor, implementing the kernel policy how to choose sleep states, subsequent sleep states on this core, might get disabled as well. There are two cpuidle governors ladder and menu. While the ladder governor is always available, if CONFIG_CPU_IDLE is selected, the menu governor additionally requires CONFIG_NO_HZ. The behavior and the effect of the disable variable depends on the implementation of a particular governor. In the ladder governor, for example, it is not coherent, i.e. if one is disabling a light state, then all deeper states are disabled as well. Likewise, if one enables a deep state but a lighter state still is disabled, then this has no effect. .RE .LP Disabling the Lightest Sleep State may not have any Affect .RS 4 If criteria are not met to enter deeper sleep states and the lightest sleep state is chosen when idle, the kernel may still enter this sleep state, irrespective of whether it is disabled or not. This is also reflected in the usage count of the disabled sleep state when using the cpupower idle-info command. .RE .LP Selecting specific CPU Cores .RS 4 By default processor sleep states of all CPU cores are set. Please refer to the cpupower(1) manpage in the \-\-cpu option section how to disable C-states of specific cores. .RE .SH "FILES" .nf \fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP \fI/sys/devices/system/cpu/cpuidle/*\fP .fi .SH "AUTHORS" .nf Thomas Renninger <[email protected]> .fi .SH "SEE ALSO" .LP cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1), cpupower\-idle\-info(1)
{ "pile_set_name": "Github" }
/* * Copyright (C) 2011 ~ 2018 Deepin Technology Co., Ltd. * * Author: listenerri <[email protected]> * * Maintainer: listenerri <[email protected]> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "pppsection.h" #include <QDebug> using namespace DCC_NAMESPACE::network; using namespace dcc::widgets; using namespace NetworkManager; PPPSection::PPPSection(NetworkManager::PppSetting::Ptr pppSetting, QFrame *parent) : AbstractSection(tr("PPP"), parent) , m_pppSetting(pppSetting) , m_mppeEnable(new SwitchWidget(this)) , m_mppe128(new SwitchWidget(this)) , m_mppeStateful(new SwitchWidget(this)) , m_refuseEAP(new SwitchWidget(this)) , m_refusePAP(new SwitchWidget(this)) , m_refuseCHAP(new SwitchWidget(this)) , m_refuseMSCHAP(new SwitchWidget(this)) , m_refuseMSCHAP2(new SwitchWidget(this)) , m_noBSDComp(new SwitchWidget(this)) , m_noDeflate(new SwitchWidget(this)) , m_noVJComp(new SwitchWidget(this)) , m_lcpEchoInterval(new SwitchWidget(this)) { initStrMaps(); initUI(); initConnection(); onMppeEnableChanged(m_mppeEnable->checked()); } PPPSection::~PPPSection() { } bool PPPSection::allInputValid() { // nothing to check return true; } void PPPSection::saveSettings() { m_pppSetting->setRequireMppe(m_mppeEnable->checked()); m_pppSetting->setRequireMppe128(m_mppe128->checked()); m_pppSetting->setMppeStateful(m_mppeStateful->checked()); m_pppSetting->setRefuseEap(m_refuseEAP->checked()); m_pppSetting->setRefusePap(m_refusePAP->checked()); m_pppSetting->setRefuseChap(m_refuseCHAP->checked()); m_pppSetting->setRefuseMschap(m_refuseMSCHAP->checked()); m_pppSetting->setRefuseMschapv2(m_refuseMSCHAP2->checked()); m_pppSetting->setNoBsdComp(m_noBSDComp->checked()); m_pppSetting->setNoDeflate(m_noDeflate->checked()); m_pppSetting->setNoVjComp(m_noVJComp->checked()); m_pppSetting->setLcpEchoInterval(m_lcpEchoInterval->checked() ? 30 : 0); m_pppSetting->setLcpEchoFailure(m_lcpEchoInterval->checked() ? 5 : 0); m_pppSetting->setInitialized(true); } void PPPSection::initStrMaps() { OptionsStrMap = { {tr("Refuse EAP Authentication"), "refuse-eap"}, {tr("Refuse PAP Authentication"), "refuse-pap"}, {tr("Refuse CHAP Authentication"), "refuse-chap"}, {tr("Refuse MSCHAP Authentication"), "refuse-mschap"}, {tr("Refuse MSCHAPv2 Authentication"), "refuse-mschapv2"}, {tr("No BSD Data Compression"), "nobsdcomp"}, {tr("No Deflate Data Compression"), "nodeflate"}, {tr("No TCP Header Compression"), "no-vj-comp"}, {tr("No Protocol Field Compression"), "nopcomp"}, {tr("No Address/Control Compression"), "noaccomp"}, {tr("Send PPP Echo Packets"), "lcp-echo-interval"} }; } void PPPSection::initUI() { bool empty = m_pppSetting->toMap().isEmpty(); m_mppeEnable->setTitle(tr("Use MPPE")); m_mppeEnable->setChecked(m_pppSetting->requireMppe()); m_mppe128->setTitle(tr("128-bit MPPE")); m_mppe128->setChecked(m_pppSetting->requireMppe128()); m_mppeStateful->setTitle(tr("Stateful MPPE")); m_mppeStateful->setChecked(m_pppSetting->mppeStateful()); m_refuseEAP->setTitle(OptionsStrMap.key("refuse-eap")); m_refuseEAP->setChecked(m_pppSetting->refuseEap()); m_refusePAP->setTitle(OptionsStrMap.key("refuse-pap")); m_refusePAP->setChecked(m_pppSetting->refusePap()); m_refuseCHAP->setTitle(OptionsStrMap.key("refuse-chap")); m_refuseCHAP->setChecked(m_pppSetting->refuseChap()); m_refuseMSCHAP->setTitle(OptionsStrMap.key("refuse-mschap")); m_refuseMSCHAP->setChecked(m_pppSetting->refuseMschap()); m_refuseMSCHAP2->setTitle(OptionsStrMap.key("refuse-mschapv2")); m_refuseMSCHAP2->setChecked(m_pppSetting->refuseMschapv2()); m_noBSDComp->setTitle(OptionsStrMap.key("nobsdcomp")); m_noBSDComp->setChecked(m_pppSetting->noBsdComp()); m_noDeflate->setTitle(OptionsStrMap.key("nodeflate")); m_noDeflate->setChecked(m_pppSetting->noDeflate()); m_noVJComp->setTitle(OptionsStrMap.key("no-vj-comp")); m_noVJComp->setChecked(m_pppSetting->noVjComp()); m_lcpEchoInterval->setTitle(OptionsStrMap.key("lcp-echo-interval")); if (empty) { m_lcpEchoInterval->setChecked(true); } else { m_lcpEchoInterval->setChecked( m_pppSetting->lcpEchoInterval() == 30 && m_pppSetting->lcpEchoFailure() == 5); } appendItem(m_mppeEnable); appendItem(m_mppe128); appendItem(m_mppeStateful); appendItem(m_refuseEAP); appendItem(m_refusePAP); appendItem(m_refuseCHAP); appendItem(m_refuseMSCHAP); appendItem(m_refuseMSCHAP2); appendItem(m_noBSDComp); appendItem(m_noDeflate); appendItem(m_noVJComp); appendItem(m_lcpEchoInterval); } void PPPSection::initConnection() { connect(m_mppeEnable, &SwitchWidget::checkedChanged, this, &PPPSection::onMppeEnableChanged); connect(m_mppeEnable, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_mppe128, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_mppeStateful, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_refuseEAP, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_refusePAP, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_refuseCHAP, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_refuseMSCHAP, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_refuseMSCHAP2, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_noBSDComp, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_noDeflate, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_noVJComp, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); connect(m_lcpEchoInterval, &SwitchWidget::checkedChanged, this, &PPPSection::editClicked); } void PPPSection::onMppeEnableChanged(const bool checked) { m_mppe128->setVisible(checked); m_mppeStateful->setVisible(checked); }
{ "pile_set_name": "Github" }
from __future__ import absolute_import from __future__ import print_function import numpy as np import theano #import random np.random.seed(1337) # for reproducibility from keras.datasets import reuters from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation,TimeDistributedDense,Cosine,Merge,Reshape,ElementMul from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras.preprocessing.text import Tokenizer def inspect_inputs(i, node, fn): print( "Beging intput:") print (i) print (node) print ("input(s) value(s):") print ([input[0].shape for input in fn.inputs]) theano.printing.debugprint(node) print( "End input:") def inspect_outputs(i, node, fn): print( "Beging output:") print( "output(s) :", [output[0].shape for output in fn.outputs]) print( "End output:") curline=0 batchsize=2048 userfea=3883 itemfea=6039 samples=0 lusers=[] litems=[] litemsN=[] rows=[] def load_dataset(userFile,posFile,negFile, randomize=0): fitems = open(posFile, 'r') fitemsN = open(negFile, 'r') fusers = open(userFile, 'r') global lusers,litems,litemsN,samples,curline,rows lusers=fusers.readlines() litems=fitems.readlines() litemsN=fitemsN.readlines() samples=len(lusers) rows=range(0,samples-1) if randomize==1: np.random.shuffle(rows) print(rows[0:10]) curline=0 def readbatch(): global curline n=min(batchsize, samples-curline ) user= np.zeros([n,userfea]) items= np.zeros([n,2,itemfea]) y_train=np.zeros([n,2]) i=0 for row in rows[curline:curline+n]: if i==n: break luser=lusers[row] litem=litems[row] litemN=litemsN[row] feats=luser.split(" ") pfeats=litem.split(" ") nfeats=litemN.split(" ") for fea in feats: if ':' in fea: x=fea.split(":") id=int(unicode(x[0], errors='ignore'))-1 user[i][id]=float(unicode(x[1], errors='ignore') ) y_train[i][0]=1 for fea in pfeats: if ':' in fea: x=fea.split(":") id=int(unicode(x[0], errors='ignore'))-1 items[i][0][id]=float(unicode(x[1], errors='ignore') ) for fea in nfeats: if ':' in fea: x=fea.split(":") id=int(unicode(x[0], errors='ignore'))-1 items[i][1][id]=float(unicode(x[1], errors='ignore') ) i=i+1 #user= np.array([[1,0],[1,0],[0,1]]) #y_train=np.array([[1,0],[1,0],[1,0]]) #Items=np.array( [ [[1,0],[0,1]] , [[.5,0],[0,1]],[[-1,1],[1,0]] ]) #user= np.array([[1,1,1],[1,3,1],[0,1,0],[0,2,-1]]) #y_train=np.array([[1,0],[1,0],[1,0],[1,0]]) #Items=np.array( [ [[1,2,0],[0,2,0]] , [[2,2,1],[2,0,2]],[[0,1,2],[1,0,0]],[[1,3,3],[1,3,-1]] ]) #user= np.array([[0,1]]) #y_train=np.array([[1,0]]) #Items=np.array( [[[-1,1],[1,0]]]) # The inputs come as vectors, we reshape them to monochrome 2D images, # according to the shape convention: (examples, channels, rows, columns) #user.reshape(-1,3); # We just return all the arrays in order, as expected in main(). # (It doesn't matter how we do this as long as we can read them again.) curline=curline+n return (user ,items, y_train) print("Loading data...") load_dataset(r"D:\users\t-alie\Deepfactorization\movielens.users_50p_5min_centeresMean_manyNeg",r"D:\users\t-alie\Deepfactorization\movielens.items_pos_50p_5min_centeresMean_manyNeg",r"D:\users\t-alie\Deepfactorization\movielens.items_neg_50p_5min_centeresMean_manyNeg0",1) print(samples) #print(len(user), 'train sequences',r"f:\1b.items.n0",) #print('user_train shape:', user.shape) #print('Item shape:', Items.shape) userModel = Sequential() userModel.add(Dense(userfea, 300)) userModel.add(Activation('tanh')) userModel.add(Dropout(0.4)) userModel.add(Dense(300, 300)) userModel.add(Activation('tanh')) itemModel = Sequential() itemModel.add(TimeDistributedDense(itemfea, 300)) itemModel.add(Activation('tanh')) itemModel.add(Dropout(0.4)) itemModel.add(TimeDistributedDense(300, 300)) itemModel.add(Activation('tanh')) ##itemModel.add(Reshape(4)) ##itemModel.add(Dense(4, 2)) model=Sequential() model.add(ElementMul([userModel,itemModel])) #should output 2 values model.add(TimeDistributedDense(300, 30)) model.add(Activation('tanh')) model.add(TimeDistributedDense(30, 1)) model.add(Reshape(2)) y_score= model.get_output(train=False) x_test=model.get_input(train=False) model.add(Activation('softmax')) ##model.add(Merge([userModel, itemModel], mode='sum')) print('done model construction') model.compile(loss='categorical_crossentropy', optimizer='Adadelta') print('done complie') scoring= theano.function(x_test,y_score,allow_input_downcast=True, mode=None) #history = model.fit([user ,Items] ,y_train, nb_epoch=15, batch_size=2048, verbose=2, show_accuracy=True) for i in range(0,30): print("itr",i) for j in range(0,int(samples/batchsize+.05)): print("batch",j) user ,Items, y_train = readbatch() history = model.train_on_batch([user ,Items] ,y_train,accuracy=True)# nb_epoch=10, batch_size=1024, verbose=2, show_accuracy=True) curline=0; print('done training') load_dataset(r"D:\users\t-alie\Deepfactorization\movielens.userstest_50p_5min_centeresMean_manyNeg.2048.centered",r"D:\users\t-alie\Deepfactorization\movielens.itemstest_50p_5min_centeresMean_manyNeg",r"D:\users\t-alie\Deepfactorization\movielens.itemstest_50p_5min_centeresMean_manyNeg.fakeneg") pfile=open(r"D:\users\t-alie\Deepfactorization\yp_hidden.batch","w") for j in range(0,int(samples/batchsize+.05)): print("testing batch",j) user ,Items, y_train = readbatch() y_p=model.custom_predict([user,Items],scoring) for y in y_p: pfile.write("%s\n" %y) pfile.close() #pfile1=open(r"C:\Users\t-alie\Downloads\movieLens_1M\yp1","w") #for y in y_pp: # pfile1.write("%s\n" %y) #pfile1.close() print('done prediction') #model.save_weights(r'f:\1b.model') #print('done saving')
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using NHapi.Base.Log; using NHapi.Model.V24.Group; using NHapi.Model.V24.Segment; using NHapi.Model.V24.Datatype; using NHapi.Base; using NHapi.Base.Parser; using NHapi.Base.Model; namespace NHapi.Model.V24.Message { ///<summary> /// Represents a QRY_Q02 message structure (see chapter [AAA]). This structure contains the /// following elements: ///<ol> ///<li>0: MSH (Message Header) </li> ///<li>1: QRD (Original-Style Query Definition) </li> ///<li>2: QRF (Original Style Query Filter) optional </li> ///<li>3: DSC (Continuation Pointer) optional </li> ///</ol> ///</summary> [Serializable] public class QRY_Q02 : AbstractMessage { ///<summary> /// Creates a new QRY_Q02 Group with custom IModelClassFactory. ///</summary> public QRY_Q02(IModelClassFactory factory) : base(factory){ init(factory); } ///<summary> /// Creates a new QRY_Q02 Group with DefaultModelClassFactory. ///</summary> public QRY_Q02() : base(new DefaultModelClassFactory()) { init(new DefaultModelClassFactory()); } ///<summary> /// initalize method for QRY_Q02. This does the segment setup for the message. ///</summary> private void init(IModelClassFactory factory) { try { this.add(typeof(MSH), true, false); this.add(typeof(QRD), true, false); this.add(typeof(QRF), false, false); this.add(typeof(DSC), false, false); } catch(HL7Exception e) { HapiLogFactory.GetHapiLog(GetType()).Error("Unexpected error creating QRY_Q02 - this is probably a bug in the source code generator.", e); } } public override string Version { get{ return Constants.VERSION; } } ///<summary> /// Returns MSH (Message Header) - creates it if necessary ///</summary> public MSH MSH { get{ MSH ret = null; try { ret = (MSH)this.GetStructure("MSH"); } catch(HL7Exception e) { HapiLogFactory.GetHapiLog(GetType()).Error("Unexpected error accessing data - this is probably a bug in the source code generator.", e); throw new System.Exception("An unexpected error ocurred",e); } return ret; } } ///<summary> /// Returns QRD (Original-Style Query Definition) - creates it if necessary ///</summary> public QRD QRD { get{ QRD ret = null; try { ret = (QRD)this.GetStructure("QRD"); } catch(HL7Exception e) { HapiLogFactory.GetHapiLog(GetType()).Error("Unexpected error accessing data - this is probably a bug in the source code generator.", e); throw new System.Exception("An unexpected error ocurred",e); } return ret; } } ///<summary> /// Returns QRF (Original Style Query Filter) - creates it if necessary ///</summary> public QRF QRF { get{ QRF ret = null; try { ret = (QRF)this.GetStructure("QRF"); } catch(HL7Exception e) { HapiLogFactory.GetHapiLog(GetType()).Error("Unexpected error accessing data - this is probably a bug in the source code generator.", e); throw new System.Exception("An unexpected error ocurred",e); } return ret; } } ///<summary> /// Returns DSC (Continuation Pointer) - creates it if necessary ///</summary> public DSC DSC { get{ DSC ret = null; try { ret = (DSC)this.GetStructure("DSC"); } catch(HL7Exception e) { HapiLogFactory.GetHapiLog(GetType()).Error("Unexpected error accessing data - this is probably a bug in the source code generator.", e); throw new System.Exception("An unexpected error ocurred",e); } return ret; } } } }
{ "pile_set_name": "Github" }
##DESCRIPTION ## Factoring by Grouping ## ##ENDDESCRIPTION ## DBsubject(Algebra) ## DBchapter(Factoring) ## DBsection(Factoring by grouping) ## Institution(The College of Idaho) ## Author(RA Cruz) ## MLT(AlgFac2) ## MLTleader(1) ## Level(2) ## TitleText1('Essentials of Intermediate Algebra') ## AuthorText1('Blitzer') ## EditionText1('1') ## Section1('5.6') ## Problem1('') ## KEYWORDS('factoring') ## Date: 2007/11 --Updated 2013/09 -rac DOCUMENT(); # This should be the first executable line in the problem. loadMacros( "PGstandard.pl", "MathObjects.pl", "CofIdaho_macros.pl", "PGcourse.pl" ); TEXT(beginproblem()); ###################################### # Setup @alphabet = ("a","b","s","t","x","y"); $n = random(0,4,2); $var1 = $alphabet[$n]; $var2 = $alphabet[$n+1]; Context()->variables->are($var1=>'Real',$var2=>'Real'); $a= random(3,7,1); do {$b= random(2,6,1);} while ($a==$b); $polynomial = Formula("$var1^2 + $a $var1 $var2 + $b $var1 + $a*$b $var2")->reduce->TeX; ###################################### # Main text BEGIN_TEXT Factor completely: $PAR \( $polynomial = \) \{ ans_rule(30) \} END_TEXT ###################################### # Answer $answer = "($var1 + $b) * ($var1 + $a *$var2)"; ANS(FactoringEvaluator($answer,[$var1,$var2])); $showPartialCorrectAnswers = 1; ###################################### ; ENDDOCUMENT();
{ "pile_set_name": "Github" }
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 import ( "net" "unsafe" "golang.org/x/net/internal/socket" ) func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { var mreq ipv6Mreq copy(mreq.Multiaddr[:], grp) if ifi != nil { mreq.setIfindex(ifi.Index) } b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] return so.Set(c, b) }
{ "pile_set_name": "Github" }
!!!COM: Palestrina, Giovanni Perluigi da !!!OPR: Missa Emendemus in melius !!!OTL: Gloria **kern **kern **kern **kern *Ibass *Itenor *Icalto *Icant !Bassus !Tenor !Altus !Cantus *clefF4 *clefGv2 *clefG2 *clefG2 *k[b-] *k[b-] *k[b-] *k[b-] *G:dor *G:dor *G:dor *G:dor *M4/2 *M4/2 *M4/2 *M4/2 =1 =1 =1 =1 1.D 1.d 1.f 1.a 2D 2d 2f 2a =2 =2 =2 =2 1F 1c 1f 1a 1G 1B- 1d 1g =3 =3 =3 =3 2D 1A 2d 2f 2F . 2c 2a 2F 2D 1d 2a 4E [2d . 4g 4D . . 4f =4 =4 =4 =4 2E 2d] 1G 2g 2E 2c# . 2g [1D 2d 2A 2f . [2A [2f [2a =5 =5 =5 =5 1D] 2A] 2f] 2a] . 2A 2f 2a 1r 1c 2e 1g . . [2g . =6 =6 =6 =6 0r 1d 2g] 2b- . . 1f 1a . 4A . . . 4B- . . . [2c 4e 4g . . 4d 4f =7 =7 =7 =7 0r 2c] 2e 2g . 4B- 2.f 2a . 4A . . . 1B- . 4d . . 4e 4e . . [2d 4f . . . 4d =8 =8 =8 =8 1r 1A 2d] 1e . . 2c# . 1G 1B- 2d 2r . . 2B- 2d =9 =9 =9 =9 1F 1A 1c 1f 1E 1G 1c 1g =10 =10 =10 =10 2D 2A 2d 2f 1D 1d 1f 1a 2D 2d 2f 2a =11 =11 =11 =11 2F 1c 1f 1a 2F . . . 1G 2B- 2d 2g . 2B- 2d 2g =12 =12 =12 =12 1D 2A 1d 2f . 1B- . 1dd 1r . 2f . . 2B- 2d 2dd =13 =13 =13 =13 0r 1A 1e 1cc . 1G 1g 1b- =14 =14 =14 =14 2r 0d 2f 2a 2D . 2F 2f 2D . 2A 2f 2D . 2B- 2f =15 =15 =15 =15 1C 1G 2c 1e . . 4G . . . 4A . 1BB- 2G 4B- 1d . . 4c . . 2G 4d . . . 4B- . =16 =16 =16 =16 0AA 1F 2c 0a . . 1d . . 1E . . . . 2c# . =17 =17 =17 =17 2r 2D 1d 2r 1D 1A . 1f# . . 2r . 2D 2A [2d 2f# =18 =18 =18 =18 1D 1A 4d] 1f# . . 4d . . . 2d . [1GG [1B- [1d [1g =19 =19 =19 =19 2GG] 2B-] 2d] 2g] 2D 2A 2d 2f 2.D 2A 2d 2.f . [2d [2f . 4E . . 4g =20 =20 =20 =20 2F 4d] 4f] 2a . 4c 4e . 2G 1B- 1d 2b- 1E- . . [1g . 4A 2c . . 4G . . =21 =21 =21 =21 1D 2B- 1d 2g] . 2A . 2f [1BB- [1d [1f 1b- =22 =22 =22 =22 2BB-] 2d] 2f] 1b- 2BB- 2d 2f . 2F 2c 2f 2a 2F 2c 2f 2a =23 =23 =23 =23 1G 2B- 2d 2g . 2B- 2B- 2d 2F 2A 1c 2f 2F [2c . [2a =24 =24 =24 =24 1G 4c] 2e- 4a] . 4B- . 4g . 4B- 4d 1g . 8A 4c . . 8G . . 1D 1A 1d . . . . 2f# =25 =25 =25 =25 1GG 2.B- 2d 2g . . 2.g 2.b- . 4c . . 1r 1d . . . . 4g 4b- . . 2f 2a =26 =26 =26 =26 1r 1r 2e- 2g . . 2d 2f 2r 2r 2e- [1g [2G [2B- 4d . . . 4c . =27 =27 =27 =27 4G] 4B-] 1d 2g] 4G 4B- . . 2F 2A . 2a 2E- 2G 2r [1b- 2D 2F 2f . =28 =28 =28 =28 2E- 2G 2e- 2b-] 2E- 2G 1e- 4cc . . . 4dd 1C 2G . 1ee- . 2A 4d . . . 4c . =29 =29 =29 =29 2BB- 2B- 1f 1dd 1B- 1d . . . . 1f 1r 2A 2c . . =30 =30 =30 =30 2G 2B- 0r 0r 2F# 2A . . 1G 2B- . . . [2d . . =31 =31 =31 =31 2E- 2d] 1r 1r 2E- 2c . . 1D 2d 2r 2r . [2D [2f [2a =32 =32 =32 =32 0r 4D] 4f] 4a] . 4D 4f 4a . 2E 2e 2g . 2F 2d 2f . 2A 2c# 2e =33 =33 =33 =33 0r 2D 2d 2f . 2D 2d 2f . 8G 2B- 2d . 8A . . . 8B- . . . 8c . . . [2d 2B- 2d =34 =34 =34 =34 1r 2d] 1A 1e . 2c# . . [1G 1d 2.B- 2.g . . 4c 4a =35 =35 =35 =35 0G] 2.B- 1d 4b- . . . 4cc . . . 2dd . 4c . . . 1d 1g 4b- . . . 4cc . . . 4dd . . . 4b- =36 =36 =36 =36 1E- 1e- 1g 2cc . . . 1b- 1F 1c 1f . . . . 2a =37 =37 =37 =37 2BB- 2d 2f 0b- 2.BB- 2.B- 2.d . 4BB- 4B- 4d . 2BB- 2B- 2d . =38 =38 =38 =38 1E- 2B- 1g 0r . 2c . . 1D 1d 1f . =39 =39 =39 =39 1.E- 2.B- 1.g 2r . . . 2.b- . 4A . . . 2G . . . . . 4b- 2D [2B- 2f 2b- =40 =40 =40 =40 1C 2B-] 2g 1ee- . 2A 2c . 1BB- 2.B- 2.d 2dd . . . [2b- . 4A 4c . =41 =41 =41 =41 0r 2G 4d 2b-] . . 4e . . 2F 1f 2a . 1c . 1g . . 2e . =42 =42 =42 =42 2r 2r 2f 2f 2.BB- 2.B- 2.d 2.f 4BB- 4B- 4d 4f 2BB- 2B- 2d 2f =43 =43 =43 =43 1E- 1G 2B- 1g . . 2c . 2D 2A 2d 2f [2D [2F [2d [2a =44 =44 =44 =44 4D] 4F] 4d] 4a] 4D 4F 4d 4a 2D 2F 2d 2a 1GG 1B- 1d 1g =45 =45 =45 =45 0D 0A 0d 0f# =46 =46 =46 =46 0D 1r 0r 0r . 1d . . =47 =47 =47 =47 1F 1c 0r 0r [1G 2B- . . . 4G . . . 4A . . =48 =48 =48 =48 2G] 4B- 0r 0r . 4c . . 2F 2d . . 2G 2B- . . 2A 2c . . =49 =49 =49 =49 2.B- 2.G 0r 0d 4A 4A . . 2G 2B- . . 4F 4A . . 4E 4G . . =50 =50 =50 =50 4D 2A 0r 1f 4E . . . 4F 1d . . 4D . . . 1E . . [1g . 2c# . . =51 =51 =51 =51 1D 1d 2r 2g] . . 2d 2f 1r 1r 2e 2g . . 2f 2a =52 =52 =52 =52 0r 2r 2.g 2.b- . 2G . . . . 4f 4a . 2c 2e 2g . 2d [2f 4f . . . 4e =53 =53 =53 =53 0r 1B- 4f] 4d . . 4e 4e . . 1d 4f . . . 4d . 1A . 1e . . 2c# . =54 =54 =54 =54 1.G 2r 2d 2d . 1d 1g 1b- 2G 2d 2g 2b- =55 =55 =55 =55 1D 1d 1f 1a [1E- 2B- 1e- 2.g . [2c . . . . . 4a =56 =56 =56 =56 1E-] 4c] 2.g 1b- . 4B- . . . 1B- . . . . 4f . 1C . 4e- 1cc . . 4d . . 2A 2e- . =57 =57 =57 =57 1BB- 1B- 0d 0f 1D 1r . . =58 =58 =58 =58 1.F 1D 1r 1a . 1F 1A 1a 4E . . . 4D . . . =59 =59 =59 =59 1E- 1.G 1c 2.g . . . 4a 1D . [1d 2b- . 2F# . 2a =60 =60 =60 =60 0r 2G 2d] 2b- . 2A 2c 4a . . . 4g . 2B- 2d 2f . 4A 2e 2g . 4G . . =61 =61 =61 =61 0r [0F 2.f 2.a . . 4e 4g . . 2d 4a . . . 4b- . . 4c [2cc . . 4B- . =62 =62 =62 =62 0r 1F] 4A 4cc] . . 4B- 4b- . . 4c 1a . . 4A . . 1G 1B- . . . . 2g =63 =63 =63 =63 1.D 2r 2A 2a . 1d 1f 1a 2D 2d 2f 2a =64 =64 =64 =64 1D 2d 1f 1a . 2.d . . 1BB- . 1f [1dd . 4c . . . [2B- . . =65 =65 =65 =65 1F 2B-] 1f 2dd] . 2A . 2cc 2G 2B- 2d 2b- 2A 2c 2c 2a =66 =66 =66 =66 1B- 1d 2B- 2g . . 2d 2f 1E- 2c 2.e- 1g . [2B- . . . . 4d . =67 =67 =67 =67 1F 4B-] 1c 1.f . 8A . . . 8G . . . 2A . . 2BB- 2B- 2d . 2BB- 2d 2f 2b- =68 =68 =68 =68 1BB- 1d 1f 1b- 2F 2c 1f 1a 2F 2A . . =69 =69 =69 =69 2E 2c 0r 0r 2D 2B- . . 4F 2A . . 4E . . . 4D [2B- . . 4C . . . =70 =70 =70 =70 1D 4B-] 1r 1r . 4A . . . 1A . . 1BB- . 2.d 2r . 2G . [2dd . . 4e . =71 =71 =71 =71 1AA 1A 2f 2dd] . . 2e 2cc 1r 1r 2g 2b- . . 2f 2a =72 =72 =72 =72 0r 1r 4e 4g . . 4d 4f . . 1d 4f . . . 8e . . . 8d . [1A . 1e . . 2c# . =73 =73 =73 =73 1.D 2A] [0d 2d . 2A . 1f# . [1A . . 2D . . 2f# =74 =74 =74 =74 1D 1A] 1d] 1f# 1GG 1B- 2r 1g . . [2d . =75 =75 =75 =75 2.D 1A 2d] 2.f . . 2d . 4E . . 4g 2F 2D 1d 2a 2G [2B- . 2b- =76 =76 =76 =76 1E- 2B-] 1G 1g . 4A . . . 4G . . 1D 1A 2.d 2f . . . 2a . . 4e . =77 =77 =77 =77 1r 2D 2f 2a . 1G 2g 2b- 1C . 1e- 1cc . [2c . . =78 =78 =78 =78 1.G 2c] 1d 1dd . 4B- . . . 4A . . . 2B- 2r 1r 2F# 2A [2d . =79 =79 =79 =79 2G 2B- 2d] 1g 2E- 2c 1g . 1D 1d . 2b- . . 2f# 2a =80 =80 =80 =80 1r 2r 2g 2b- . 2A 2e- 2cc 2r [1B- [1d [1dd 2G . . . =81 =81 =81 =81 2G 2B-] 0d] 1dd] 2F# 2A . . 1G 1B- . 1r =82 =82 =82 =82 2F 2c 2r 2r 2E- 2c 2g 2g 1D 1d 2g 2b- . . 2f# 2a =83 =83 =83 =83 0r 1r 1g 1b- . 1A 2f 2cc . . 2e- 2cc =84 =84 =84 =84 0G 0B 1d 2dd . . . 1dd . . 1d . . . . [2g =85 =85 =85 =85 0C 1c 2.e- 2g] . . . 2.ee- . . 4c . . 1e- 1g . . . . 4dd . . . [2cc =86 =86 =86 =86 0G 0d 0g 2cc] . . . 4b . . . 4a . . . 1b =87 =87 =87 =87 2C 2r 2e 2cc 2C 2c 2e 2g 2F 2c 2f 2a 2A 2c 2c 2a =88 =88 =88 =88 2.G 2.B- 2d 2.b- . . 2e . 4G 4B- . 4b- [1F 1c 1f 2a . . . 2a =89 =89 =89 =89 1F] 1A 2r 2.cc . . 2c . . . . 4cc 1r 2.d 2.f 2b- . . . [2a . 4d 4f . =90 =90 =90 =90 0r 2c 2e 2a] . 1B- 1d 2g . . . 4f . . . 4g . 2A 2c 4a . . . 8g . . . 8a =91 =91 =91 =91 0r 2G 2d 2b- . 2F 2d 2.a . 1G 1B- . . . . 4g . . . [2g =92 =92 =92 =92 1D 2d 1A 2g] . 2d . 2f# 2.G 2.B- 2r 1g . . 2G . 4G 4B- . . =93 =93 =93 =93 2F 2A 2.c 0r 1E- 1G . . . . 4c . . . 1B- . 2D 2F . . =94 =94 =94 =94 2E- 2G 1B- 2r 2E- 1B- . 2b- 1C . 1c 2.ee- . 2A . . . . . 4ee- =95 =95 =95 =95 2BB- 2B- [0f 2dd 4BB- 1B- . 1dd 4C . . . 4D . . . 4E . . . 2F 2A . 2cc =96 =96 =96 =96 1BB- 2d 1f] 2b- . 2d . 2b- 2C 2.c 1e- 4a . . . 4g 2C . . [2g . 4B- . . =97 =97 =97 =97 1D 1A 1d 2g] . . . 2f# [1GG [1B [1d [1g =98 =98 =98 =98 1GG] 1B] 1d] 1g] == == == == *- *- *- *- !!!CDT: 1525/^1526/-1594/2/2 !!!OCY: Italia !!!AGN: Mass (Parody) !!!AST: renaissance, vocal !!!ASW: Antiphon !!!PWK: Masses, Book 7 !!!RNB: Cadence finals: D !!!YOR: Le Opere Complete, v. 23, p. 61 !!!YOO: Rome, Italy: Fratelli Scalera !!!END: 1992// !!!EED: John Miller !!!YEC: Copyright 2000, John Miller !!!YEN: United States of America !!!YEM: Rights to all derivative electronic formats reserved. !!!YEM: Refer to licensing agreement for further details. !!!YEM: This file must be accompanied by the licensing agreement. !!!YEM: A copy of the licensing agreement may be found at http://www.music-cog.ohio-state.edu/HumdrumDatabases/Palestrina/license.txt !!!EMD: converted to Humdrum by Bret Aarden
{ "pile_set_name": "Github" }
package com.ljb.mvp.kotlin.presenter import com.ljb.mvp.kotlin.common.LoginUser import com.ljb.mvp.kotlin.common.rx.subscribeNet import com.ljb.mvp.kotlin.contract.MyContract import com.ljb.mvp.kotlin.domain.User import com.ljb.mvp.kotlin.protocol.dao.IUserDaoProtocol import com.ljb.mvp.kotlin.protocol.http.IUserHttpProtocol import com.ljb.mvp.kotlin.table.UserTable import com.ljb.mvp.kotlin.common.rx.RxUtils import com.ljb.mvp.kotlin.model.MyModel import dao.ljb.kt.core.DaoFactory import io.reactivex.Observable import io.reactivex.android.schedulers.AndroidSchedulers import io.reactivex.schedulers.Schedulers import mvp.ljb.kt.presenter.BaseMvpPresenter import mvp.ljb.kt.presenter.getContextEx import net.ljb.kt.client.HttpFactory /** * @Author:Kotlin MVP Plugin * @Date:2019/04/20 * @Description input description **/ class MyPresenter : BaseMvpPresenter<MyContract.IView, MyContract.IModel>(), MyContract.IPresenter { override fun registerModel() = MyModel::class.java override fun getUserInfo() { getModel().getUserInfo() .compose(RxUtils.bindToLifecycle(getMvpView())) .compose(RxUtils.schedulerIO2Main()) .subscribeNet(getContextEx()) { onNextEx { getMvpView().showUserInfo(it) } } } override fun logout() { LoginUser.clear() getMvpView().logoutSuccess() } }
{ "pile_set_name": "Github" }
{ "replace": false, "values": [ "immersiveengineering:nugget_aluminum" ] }
{ "pile_set_name": "Github" }
<html> <head> <title>SL.P.10.R.11.SL062003-09</title> </head> <body bgcolor="white"> <a name="1">[1]</a> <a href="#1" id=1>2.1 Indian companies work to/ Dollar savings/ US/ To make the proposal. </a> </body> </html>
{ "pile_set_name": "Github" }
package com.ociweb.gl.api; /** * Separate out the concern for argument fetching * * @author David Giovannini */ public interface ArgumentProvider { /** * @return all strings on command line */ String[] args(); /** * parses named boolean value on command line * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ Boolean getArgumentValue(String longName, String shortName, Boolean defaultValue); /** * parses named Character value on command line * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ Character getArgumentValue(String longName, String shortName, Character defaultValue); /** * parses named Byte value on command line * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ Byte getArgumentValue(String longName, String shortName, Byte defaultValue); /** * parses named Short value on command line * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ Short getArgumentValue(String longName, String shortName, Short defaultValue); /** * parses named Long value on command line * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ Long getArgumentValue(String longName, String shortName, Long defaultValue); /** * parses named Integer value on command line * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ Integer getArgumentValue(String longName, String shortName, Integer defaultValue); /** * parses named String value on command line * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ String getArgumentValue(String longName, String shortName, String defaultValue); /** * parses named enum value on command line * @param <T> the specific enum type * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @param c class type of the enum * @param defaultValue value to use if not specified on command line * @return value on command line or default value */ <T extends Enum<T>> T getArgumentValue(String longName, String shortName, Class<T> c, T defaultValue); /** * determines boolean from short or long name arg * @param longName full name for key of value on command line * @param shortName short name for key of value on command line * @return true if key is present */ boolean hasArgument(String longName, String shortName); }
{ "pile_set_name": "Github" }
package org.wordpress.android.util.image.getters import android.content.Context import android.graphics.drawable.Drawable import android.text.Html import android.widget.TextView import com.bumptech.glide.Glide import org.wordpress.android.WordPress import org.wordpress.android.util.PhotonUtils import org.wordpress.android.util.R import org.wordpress.android.util.image.ImageManager import org.wordpress.android.util.image.ImageType import java.lang.ref.WeakReference import java.util.HashSet import javax.inject.Inject /** * ImageGetter for Html.fromHtml(). Retrieves images for HTML img tags using Glide library. * * * See {@link android.text.Html} for more details. */ class WPCustomImageGetter( textView: TextView, private val maxWidth: Int ) : Html.ImageGetter { private val textView: WeakReference<TextView> = WeakReference(textView) /** * We store all targets, so we can cancel any pending/ongoing requests when we want to load other content * into the TextView. */ private val targets = HashSet<WPRemoteResourceViewTarget>() @Inject lateinit var imageManager: ImageManager init { (WordPress.getContext().applicationContext as WordPress).component().inject(this) clear(textView) // store the WPCustomImageGetter into the textView's tag, so we can cancel any pending/ongoing requests when the // TextView is reused. textView.setTag(R.id.glide_image_loader_view_tag, this) } /** * Cancels all pending/ongoing requests. */ private fun clear(textView: TextView) { val prevGetter = textView.getTag(R.id.glide_image_loader_view_tag) as WPCustomImageGetter? prevGetter?.let { clear(textView.context, it.targets) } clear(textView.context, targets) } private fun clear(context: Context, targets: MutableSet<WPRemoteResourceViewTarget>) { for (target in targets) { Glide.with(context).clear(target) } targets.clear() } /** * This method is called when the HTML parser encounters an * img tag. */ override fun getDrawable(url: String): Drawable? { var source = url // images in reader comments may skip "http:" (no idea why) so make sure to add protocol here if (source.startsWith("//")) { source = "http:$source" } source = if (maxWidth > 0) PhotonUtils.getPhotonImageUrl(url, maxWidth, 0) else url return textView.get()?.let { val target = WPRemoteResourceViewTarget(it, maxWidth) imageManager.loadIntoCustomTarget(target, ImageType.UNKNOWN, source) targets.add(target) target.drawable } } }
{ "pile_set_name": "Github" }
# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make <target>' where <target> is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyDruid.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyDruid.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PyDruid" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyDruid" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
{ "pile_set_name": "Github" }
[ { "id": "t_tree_chestnut", "fg": [ "chestnut_yar" ], "bg": [ "t_dirt_giant" ] }, { "id": "t_tree_chestnut_season_summer", "fg": [ "chestnut_dead_yar" ], "bg": [ "t_dirt_summer_giant" ] }, { "id": "t_tree_chestnut_season_autumn", "fg": [ "chestnut_dead_yar" ], "bg": [ "t_dirt_autumn_giant" ] }, { "id": "t_tree_chestnut_season_winter", "fg": [ "chestnut_dead_yar" ], "bg": [ "snow_shallow_giant" ] }, { "id": "t_tree_pine", "fg": [ "pine_yar" ], "bg": [ "t_dirt_giant" ] }, { "id": "t_tree_pine_season_summer", "fg": [ "pine_yar" ], "bg": [ "t_dirt_summer_giant" ] }, { "id": "t_tree_pine_season_autumn", "fg": [ "pine_yar" ], "bg": [ "t_dirt_autumn_giant" ] }, { "id": "t_tree_pine_season_winter", "fg": [ "pine_yar" ], "bg": [ "snow_shallow_giant" ] }, { "id": "t_tree_deadpine", "fg": [ "deadpine_yar" ], "bg": [ "t_dirt_giant" ] }, { "id": "t_tree_deadpine_season_summer", "fg": [ "deadpine_yar" ], "bg": [ "t_dirt_summer_giant" ] }, { "id": "t_tree_deadpine_season_autumn", "fg": [ "deadpine_yar" ], "bg": [ "t_dirt_autumn_giant" ] }, { "id": "t_tree_deadpine_season_winter", "fg": [ "deadpine_yar" ], "bg": [ "snow_shallow_giant" ] }, { "id": "t_tree_hickory", "fg": [ "hickory_yar" ], "bg": [ "t_dirt_giant" ] }, { "id": "t_tree_hickory_season_summer", "fg": [ "hickory_yar" ], "bg": [ "t_dirt_summer_giant" ] }, { "id": "t_tree_hickory_season_autumn", "fg": [ "hickory_yar" ], "bg": [ "t_dirt_autumn_giant" ] }, { "id": "t_tree_hickory_season_winter", "fg": [ "hickory_dead_yar" ], "bg": [ "snow_shallow_giant" ] }, { "id": "t_tree_hickory_dead", "fg": [ "hickory_dead_yar" ], "bg": [ "t_dirt_giant" ] }, { "id": "t_tree_hickory_dead_season_summer", "fg": [ "hickory_dead_yar" ], "bg": [ "t_dirt_summer_giant" ] }, { "id": "t_tree_hickory_dead_season_autumn", "fg": [ "hickory_dead_yar" ], "bg": [ "t_dirt_autumn_giant" ] }, { "id": "t_tree_hickory_dead_season_winter", "fg": [ "hickory_dead_yar" ], "bg": [ "snow_shallow_giant" ] } ]
{ "pile_set_name": "Github" }
package pay import ( "github.com/yaotian/gowechat/mch/base" "github.com/yaotian/gowechat/wxcontext" ) //Pay pay type Pay struct { base.MchBase } //NewPay 实例化 func NewPay(context *wxcontext.Context) *Pay { pay := new(Pay) pay.Context = context return pay } //UnifiedOrder 统一下单. func (c *Pay) UnifiedOrder(req map[string]string) (resp map[string]string, err error) { return c.PostXML("https://api.mch.weixin.qq.com/pay/unifiedorder", req, false) } //OrderQuery 查询订单. func (c *Pay) OrderQuery(req map[string]string) (resp map[string]string, err error) { return c.PostXML("https://api.mch.weixin.qq.com/pay/orderquery", req, false) } //CloseOrder 关闭订单. func (c *Pay) CloseOrder(req map[string]string) (resp map[string]string, err error) { return c.PostXML("https://api.mch.weixin.qq.com/pay/closeorder", req, false) } //Refund 申请退款. // NOTE: 请求需要双向证书. func (c *Pay) Refund(req map[string]string) (resp map[string]string, err error) { return c.PostXML("https://api.mch.weixin.qq.com/secapi/pay/refund", req, true) } //RefundQuery 查询退款. func (c *Pay) RefundQuery(req map[string]string) (resp map[string]string, err error) { return c.PostXML("https://api.mch.weixin.qq.com/pay/refundquery", req, false) }
{ "pile_set_name": "Github" }
/* DO NOT EDIT THIS FILE - it is machine generated */ #include <jni.h> /* Header for class com_wiyun_engine_transitions_BottomPushInTransition */ #ifndef _Included_com_wiyun_engine_transitions_BottomPushInTransition #define _Included_com_wiyun_engine_transitions_BottomPushInTransition #ifdef __cplusplus extern "C" { #endif #undef com_wiyun_engine_transitions_BottomPushInTransition_INVALID_TAG #define com_wiyun_engine_transitions_BottomPushInTransition_INVALID_TAG -1L /* * Class: com_wiyun_engine_transitions_BottomPushInTransition * Method: nativeInit * Signature: (FLcom/wiyun/engine/nodes/Scene;)V */ JNIEXPORT void JNICALL Java_com_wiyun_engine_transitions_BottomPushInTransition_nativeInit (JNIEnv *, jobject, jfloat, jobject); #ifdef __cplusplus } #endif #endif
{ "pile_set_name": "Github" }
.class public abstract Landroid/support/v4/media/session/MediaControllerCompat$TransportControls; .super Ljava/lang/Object; # direct methods .method constructor <init>()V .locals 0 invoke-direct {p0}, Ljava/lang/Object;-><init>()V return-void .end method # virtual methods .method public abstract fastForward()V .end method .method public abstract pause()V .end method .method public abstract play()V .end method .method public abstract playFromMediaId(Ljava/lang/String;Landroid/os/Bundle;)V .end method .method public abstract playFromSearch(Ljava/lang/String;Landroid/os/Bundle;)V .end method .method public abstract rewind()V .end method .method public abstract seekTo(J)V .end method .method public abstract sendCustomAction(Landroid/support/v4/media/session/PlaybackStateCompat$CustomAction;Landroid/os/Bundle;)V .end method .method public abstract sendCustomAction(Ljava/lang/String;Landroid/os/Bundle;)V .end method .method public abstract setRating(Landroid/support/v4/media/RatingCompat;)V .end method .method public abstract skipToNext()V .end method .method public abstract skipToPrevious()V .end method .method public abstract skipToQueueItem(J)V .end method .method public abstract stop()V .end method
{ "pile_set_name": "Github" }
var baseAt = require('../internal/baseAt'), baseFlatten = require('../internal/baseFlatten'), restParam = require('../function/restParam'); /** * Creates an array of elements corresponding to the given keys, or indexes, * of `collection`. Keys may be specified as individual arguments or as arrays * of keys. * * @static * @memberOf _ * @category Collection * @param {Array|Object|string} collection The collection to iterate over. * @param {...(number|number[]|string|string[])} [props] The property names * or indexes of elements to pick, specified individually or in arrays. * @returns {Array} Returns the new array of picked elements. * @example * * _.at(['a', 'b', 'c'], [0, 2]); * // => ['a', 'c'] * * _.at(['barney', 'fred', 'pebbles'], 0, 2); * // => ['barney', 'pebbles'] */ var at = restParam(function(collection, props) { return baseAt(collection, baseFlatten(props)); }); module.exports = at;
{ "pile_set_name": "Github" }
using System; using System.Linq.Expressions; using Abp.Domain.Entities; using RefactorThis.GraphDiff; namespace Abp.EntityFramework.GraphDiff.Mapping { /// <summary> /// Helper class for creating entity mappings /// </summary> public static class MappingExpressionBuilder { /// <summary> /// A shortcut of <see cref="For{TEntity,TPrimaryKey}"/> for most used primary key type (<see cref="int"/>). /// </summary> /// <typeparam name="TEntity">Entity type</typeparam> /// <param name="expression"></param> /// <returns></returns> public static EntityMapping For<TEntity>(Expression<Func<IUpdateConfiguration<TEntity>, object>> expression) where TEntity : class, IEntity { return For<TEntity, int>(expression); } /// <summary> /// Build a mapping for an entity with a specified primary key /// </summary> /// <typeparam name="TEntity">Entity type</typeparam> /// <typeparam name="TPrimaryKey">Primary key type of the entity</typeparam> /// <param name="expression"></param> /// <returns></returns> public static EntityMapping For<TEntity, TPrimaryKey>(Expression<Func<IUpdateConfiguration<TEntity>, object>> expression) where TPrimaryKey : IEquatable<TPrimaryKey> where TEntity : class, IEntity<TPrimaryKey> { return new EntityMapping(typeof(TEntity), expression); } } }
{ "pile_set_name": "Github" }
/* * Docker Engine API * * The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { \"message\": \"page not found\" } ``` # Versioning The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. For Docker Engine 17.10, the API version is 1.33. To lock to this version, you prefix the URL with `/v1.33`. For example, calling `/info` is the same as calling `/v1.33/info`. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. This documentation is for version 1.34 of the API. Use this table to find documentation for previous versions of the API: Docker version | API version | Changes ----------------|-------------|--------- 17.10.x | [1.33](https://docs.docker.com/engine/api/v1.33/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-33-api-changes) 17.09.x | [1.32](https://docs.docker.com/engine/api/v1.32/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-32-api-changes) 17.07.x | [1.31](https://docs.docker.com/engine/api/v1.31/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-31-api-changes) 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes) 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: ``` { \"username\": \"string\", \"password\": \"string\", \"email\": \"string\", \"serveraddress\": \"string\" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { \"identitytoken\": \"9cbaf023786cd7...\" } ``` * * OpenAPI spec version: 1.34 * * Generated by: https://github.com/swagger-api/swagger-codegen.git */ use std::borrow::Borrow; use std::sync::Arc; use futures::{Future, Stream}; use hyper; use serde_json; use typed_headers::{self, http, mime, HeaderMapExt}; use super::{configuration, Error}; pub struct SystemApiClient<C: hyper::client::connect::Connect> { configuration: Arc<configuration::Configuration<C>>, } impl<C: hyper::client::connect::Connect> SystemApiClient<C> { pub fn new(configuration: Arc<configuration::Configuration<C>>) -> Self { SystemApiClient { configuration } } } pub trait SystemApi: Send + Sync { fn system_auth( &self, auth_config: crate::models::AuthConfig, ) -> Box<dyn Future<Item = crate::models::InlineResponse20010, Error = Error<serde_json::Value>>>; fn system_data_usage( &self, ) -> Box<dyn Future<Item = crate::models::InlineResponse20013, Error = Error<serde_json::Value>>>; fn system_events( &self, since: &str, until: &str, filters: &str, ) -> Box<dyn Future<Item = crate::models::InlineResponse20012, Error = Error<serde_json::Value>>>; fn system_info( &self, ) -> Box<dyn Future<Item = crate::models::SystemInfo, Error = Error<serde_json::Value>> + Send>; fn system_ping(&self) -> Box<dyn Future<Item = String, Error = Error<serde_json::Value>>>; fn system_version( &self, ) -> Box<dyn Future<Item = crate::models::InlineResponse20011, Error = Error<serde_json::Value>>>; } impl<C> SystemApi for SystemApiClient<C> where C: hyper::client::connect::Connect + 'static, <C as hyper::client::connect::Connect>::Transport: 'static, <C as hyper::client::connect::Connect>::Future: 'static, { fn system_auth( &self, auth_config: crate::models::AuthConfig, ) -> Box<dyn Future<Item = crate::models::InlineResponse20010, Error = Error<serde_json::Value>>> { let configuration: &configuration::Configuration<C> = self.configuration.borrow(); let method = hyper::Method::POST; let uri_str = format!("/auth"); let uri = (configuration.uri_composer)(&configuration.base_path, &uri_str); // TODO(farcaller): handle error // if let Err(e) = uri { // return Box::new(futures::future::err(e)); // } let serialized = serde_json::to_string(&auth_config).unwrap(); let serialized_len = serialized.len(); let mut req = hyper::Request::builder(); req.method(method).uri(uri.unwrap()); if let Some(ref user_agent) = configuration.user_agent { req.header(http::header::USER_AGENT, &**user_agent); } let mut req = req .body(hyper::Body::from(serialized)) .expect("could not build hyper::Request"); req.headers_mut() .typed_insert(&typed_headers::ContentType(mime::APPLICATION_JSON)); req.headers_mut() .typed_insert(&typed_headers::ContentLength(serialized_len as u64)); // send request Box::new( configuration .client .request(req) .map_err(|e| Error::from(e)) .and_then(|resp| { let (http::response::Parts { status, .. }, body) = resp.into_parts(); body.concat2() .and_then(move |body| Ok((status, body))) .map_err(|e| Error::from(e)) }) .and_then(|(status, body)| { if status.is_success() { Ok(body) } else { Err(Error::from((status, &*body))) } }) .and_then(|body| { let parsed: Result<crate::models::InlineResponse20010, _> = serde_json::from_slice(&body); parsed.map_err(|e| Error::from(e)) }), ) } fn system_data_usage( &self, ) -> Box<dyn Future<Item = crate::models::InlineResponse20013, Error = Error<serde_json::Value>>> { let configuration: &configuration::Configuration<C> = self.configuration.borrow(); let method = hyper::Method::GET; let uri_str = format!("/system/df"); let uri = (configuration.uri_composer)(&configuration.base_path, &uri_str); // TODO(farcaller): handle error // if let Err(e) = uri { // return Box::new(futures::future::err(e)); // } let mut req = hyper::Request::builder(); req.method(method).uri(uri.unwrap()); if let Some(ref user_agent) = configuration.user_agent { req.header(http::header::USER_AGENT, &**user_agent); } let req = req .body(hyper::Body::empty()) .expect("could not build hyper::Request"); // send request Box::new( configuration .client .request(req) .map_err(|e| Error::from(e)) .and_then(|resp| { let (http::response::Parts { status, .. }, body) = resp.into_parts(); body.concat2() .and_then(move |body| Ok((status, body))) .map_err(|e| Error::from(e)) }) .and_then(|(status, body)| { if status.is_success() { Ok(body) } else { Err(Error::from((status, &*body))) } }) .and_then(|body| { let parsed: Result<crate::models::InlineResponse20013, _> = serde_json::from_slice(&body); parsed.map_err(|e| Error::from(e)) }), ) } fn system_events( &self, since: &str, until: &str, filters: &str, ) -> Box<dyn Future<Item = crate::models::InlineResponse20012, Error = Error<serde_json::Value>>> { let configuration: &configuration::Configuration<C> = self.configuration.borrow(); let method = hyper::Method::GET; let query = ::url::form_urlencoded::Serializer::new(String::new()) .append_pair("since", &since.to_string()) .append_pair("until", &until.to_string()) .append_pair("filters", &filters.to_string()) .finish(); let uri_str = format!("/events?{}", query); let uri = (configuration.uri_composer)(&configuration.base_path, &uri_str); // TODO(farcaller): handle error // if let Err(e) = uri { // return Box::new(futures::future::err(e)); // } let mut req = hyper::Request::builder(); req.method(method).uri(uri.unwrap()); if let Some(ref user_agent) = configuration.user_agent { req.header(http::header::USER_AGENT, &**user_agent); } let req = req .body(hyper::Body::empty()) .expect("could not build hyper::Request"); // send request Box::new( configuration .client .request(req) .map_err(|e| Error::from(e)) .and_then(|resp| { let (http::response::Parts { status, .. }, body) = resp.into_parts(); body.concat2() .and_then(move |body| Ok((status, body))) .map_err(|e| Error::from(e)) }) .and_then(|(status, body)| { if status.is_success() { Ok(body) } else { Err(Error::from((status, &*body))) } }) .and_then(|body| { let parsed: Result<crate::models::InlineResponse20012, _> = serde_json::from_slice(&body); parsed.map_err(|e| Error::from(e)) }), ) } fn system_info( &self, ) -> Box<dyn Future<Item = crate::models::SystemInfo, Error = Error<serde_json::Value>> + Send> { let configuration: &configuration::Configuration<C> = self.configuration.borrow(); let method = hyper::Method::GET; let uri_str = format!("/info"); let uri = (configuration.uri_composer)(&configuration.base_path, &uri_str); // TODO(farcaller): handle error // if let Err(e) = uri { // return Box::new(futures::future::err(e)); // } let mut req = hyper::Request::builder(); req.method(method).uri(uri.unwrap()); if let Some(ref user_agent) = configuration.user_agent { req.header(http::header::USER_AGENT, &**user_agent); } let req = req .body(hyper::Body::empty()) .expect("could not build hyper::Request"); // send request Box::new( configuration .client .request(req) .map_err(|e| Error::from(e)) .and_then(|resp| { let (http::response::Parts { status, .. }, body) = resp.into_parts(); body.concat2() .and_then(move |body| Ok((status, body))) .map_err(|e| Error::from(e)) }) .and_then(|(status, body)| { if status.is_success() { Ok(body) } else { Err(Error::from((status, &*body))) } }) .and_then(|body| { let parsed: Result<crate::models::SystemInfo, _> = serde_json::from_slice(&body); parsed.map_err(|e| Error::from(e)) }), ) } fn system_ping(&self) -> Box<dyn Future<Item = String, Error = Error<serde_json::Value>>> { let configuration: &configuration::Configuration<C> = self.configuration.borrow(); let method = hyper::Method::GET; let uri_str = format!("/_ping"); let uri = (configuration.uri_composer)(&configuration.base_path, &uri_str); // TODO(farcaller): handle error // if let Err(e) = uri { // return Box::new(futures::future::err(e)); // } let mut req = hyper::Request::builder(); req.method(method).uri(uri.unwrap()); if let Some(ref user_agent) = configuration.user_agent { req.header(http::header::USER_AGENT, &**user_agent); } let req = req .body(hyper::Body::empty()) .expect("could not build hyper::Request"); // send request Box::new( configuration .client .request(req) .map_err(|e| Error::from(e)) .and_then(|resp| { let (http::response::Parts { status, .. }, body) = resp.into_parts(); body.concat2() .and_then(move |body| Ok((status, body))) .map_err(|e| Error::from(e)) }) .and_then(|(status, body)| { if status.is_success() { Ok(body) } else { Err(Error::from((status, &*body))) } }) .and_then(|body| { let parsed: Result<String, _> = serde_json::from_slice(&body); parsed.map_err(|e| Error::from(e)) }), ) } fn system_version( &self, ) -> Box<dyn Future<Item = crate::models::InlineResponse20011, Error = Error<serde_json::Value>>> { let configuration: &configuration::Configuration<C> = self.configuration.borrow(); let method = hyper::Method::GET; let uri_str = format!("/version"); let uri = (configuration.uri_composer)(&configuration.base_path, &uri_str); // TODO(farcaller): handle error // if let Err(e) = uri { // return Box::new(futures::future::err(e)); // } let mut req = hyper::Request::builder(); req.method(method).uri(uri.unwrap()); if let Some(ref user_agent) = configuration.user_agent { req.header(http::header::USER_AGENT, &**user_agent); } let req = req .body(hyper::Body::empty()) .expect("could not build hyper::Request"); // send request Box::new( configuration .client .request(req) .map_err(|e| Error::from(e)) .and_then(|resp| { let (http::response::Parts { status, .. }, body) = resp.into_parts(); body.concat2() .and_then(move |body| Ok((status, body))) .map_err(|e| Error::from(e)) }) .and_then(|(status, body)| { if status.is_success() { Ok(body) } else { Err(Error::from((status, &*body))) } }) .and_then(|body| { let parsed: Result<crate::models::InlineResponse20011, _> = serde_json::from_slice(&body); parsed.map_err(|e| Error::from(e)) }), ) } }
{ "pile_set_name": "Github" }
{ "accountLinkingWhitelistedDomains": null, "asin": "B01M2UKUDR", "averageRating": 5, "canDisable": true, "capabilities": null, "category": null, "description": "The skill tells a failed future prediction at a time.\nNote: This skill may not be suitable for all ages.", "enablement": null, "exampleInteractions": [ "Alexa, Failed Future Predictions", "Alexa, ask Failed Future Predictions to tell me a prediction", "Alexa, ask Failed Future Predictions for help" ], "firstReleaseDate": 1476517563.284, "homepageLinkText": null, "homepageLinkUrl": null, "id": "amzn1.ask.skill.bb609761-9521-453c-b60c-3c48978192bc", "imageAltText": "Failed Future Predictions icon", "imageUrl": "https://github.com/dale3h/alexa-skills-list/raw/master/skills/B01M2UKUDR/skill_icon", "inAppPurchasingSupported": false, "launchPhrase": "failed future predictions", "name": "Failed Future Predictions", "numberOfReviews": 2, "pamsPartnerId": null, "permissions": null, "privacyPolicyUrl": null, "shortDescription": "Tells a failed future prediction at a time", "skillTypes": null, "stage": "live", "termsOfUseUrl": null, "vendorId": "M4H8OV97AELKX", "vendorName": "Carson IP" }
{ "pile_set_name": "Github" }
import * as React from 'react'; import classNames from 'classnames'; import AccessibleSVG from '../accessible-svg'; import { bdlGray80 } from '../../styles/variables'; import { Icon } from '../iconTypes'; const IconCollections = ({ className = '', color = bdlGray80, height = 16, title, width = 16 }: Icon) => ( <AccessibleSVG className={classNames('bdl-IconCollections', className)} height={height} title={title} viewBox="0 0 16 16" width={width} > <path className="fill-color" fill={color} fillRule="evenodd" d="M13.888 1C14.502 1 15 1.498 15 2.112v9.776c0 .614-.498 1.112-1.112 1.112H4.112A1.112 1.112 0 0 1 3 11.888V2.112C3 1.498 3.498 1 4.112 1h9.776zM14 2H4v10h10V2zM1.5 7c.25 0 .5.135.5.5v5.103C2 13.504 2.5 14 3.397 14H8.5c.358.01.5.25.5.5s-.142.49-.5.5H3.154A2.154 2.154 0 0 1 1 12.847V7.5c0-.364.25-.5.5-.5z" /> </AccessibleSVG> ); export default IconCollections;
{ "pile_set_name": "Github" }
// Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_openbsd.go package socket const ( sysAF_UNSPEC = 0x0 sysAF_INET = 0x2 sysAF_INET6 = 0x18 sysSOCK_RAW = 0x3 ) type iovec struct { Base *byte Len uint32 } type msghdr struct { Name *byte Namelen uint32 Iov *iovec Iovlen uint32 Control *byte Controllen uint32 Flags int32 } type cmsghdr struct { Len uint32 Level int32 Type int32 } type sockaddrInet struct { Len uint8 Family uint8 Port uint16 Addr [4]byte /* in_addr */ Zero [8]int8 } type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 Flowinfo uint32 Addr [16]byte /* in6_addr */ Scope_id uint32 } const ( sizeofIovec = 0x8 sizeofMsghdr = 0x1c sizeofCmsghdr = 0xc sizeofSockaddrInet = 0x10 sizeofSockaddrInet6 = 0x1c )
{ "pile_set_name": "Github" }
include_directories( ${ROSE_SOURCE_DIR}/src/midend/programAnalysis/CallGraphAnalysis ${ROSE_SOURCE_DIR}/src/midend/programAnalysis/staticInterproceduralSlicing ${ROSE_SOURCE_DIR}/src/midend/programAnalysis/staticSingleAssignment ) add_executable(extractMPISkeleton extractMPISkeleton.cc APIReader.cc APIReader.h APISpec.cc APISpec.h GenericDepAttrib.cc GenericDepAttrib.h APIDepAttrib.h APIDepFinder.cc APIDepFinder.h SignatureReader.cc SignatureReader.h ssexpr.cc ssexpr.h DangerousOperationFinder.h Utils.cc Utils.h APIDepChecker.h Outline.cc Outline.h processPragmas.cc processPragmas.h annotatePragmas.cc annotatePragmas.h) target_link_libraries(extractMPISkeleton ROSE_DLL ${Boost_LIBRARIES}) add_executable(generateSignatures generateSignatures.cc APISpec.cc APISpec.h GenericDepAttrib.cc GenericDepAttrib.h APIDepAttrib.h APIReader.cc APIReader.h APIDepFinder.cc APIDepFinder.h SignatureReader.cc SignatureReader.h ssexpr.cc ssexpr.h DangerousOperationFinder.h Utils.cc Utils.h APIDepChecker.h processPragmas.cc processPragmas.h annotatePragmas.cc annotatePragmas.h) target_link_libraries(generateSignatures ROSE_DLL ${Boost_LIBRARIES}) add_executable(summarizeSignatures summarizeSignatures.cc ssexpr.cc ssexpr.h APISpec.cc APISpec.h GenericDepAttrib.cc GenericDepAttrib.h APIDepAttrib.h APIReader.cc APIReader.h SignatureReader.cc SignatureReader.h) target_link_libraries(summarizeSignatures ROSE_DLL ${Boost_LIBRARIES})
{ "pile_set_name": "Github" }
--- permalink: /:basename/ redirecturl: https://trumptracker.github.io/a-trump-administration-will-ensure-that-israel-receives-maximum-military-strategic-and-tactical-coope/ --- <!-- This file was created by Rakefile at 2017-05-14 14:25:56 UTC to prevent users from getting 404 errors and to redirect them to the correct file. DO NOT DELETE THIS FILE! --> <script> if(document.location.href.indexOf("?reddit") !== -1) { document.location = "{{ page.redirecturl }}?reddit"; } </script> <meta http-equiv="refresh" content="0; url={{ page.redirecturl }}"> <link rel="canonical" href="{{ page.redirecturl }}" />
{ "pile_set_name": "Github" }
/* m88k.s -- assembly support. */ /* * QuickThreads -- Threads-building toolkit. * Copyright (c) 1993 by David Keppel * * Permission to use, copy, modify and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that the above copyright notice and this notice * appear in all copies. This software is provided as a * proof-of-concept and for demonstration purposes; there is no * representation about the suitability of this software for any * purpose. */ /* Callee-save r14..r25, r31(sp), r30(fp). r1 === return pc. * Argument registers r2..r9, return value r2..r3. * * On startup, restore regs so retpc === call to a function to start. * * We're going to call a function (r2) from within the context switch * routine. Call it on the new thread's stack on behalf of the old * thread. */ .globl _qt_block .globl _qt_blocki .globl _qt_abort .globl _qt_start .globl _qt_vstart /* ** r2: ptr to function to call once curr is suspended ** and control is on r5's stack. ** r3: 1'th arg to *r2. ** r4: 2'th arg to *r2. ** r5: sp of thread to suspend. ** ** The helper routine returns a value that is passed on as the ** return value from the blocking routine. Since we don't ** touch r2 between the helper's return and the end of ** function, we get this behavior for free. ** ** Same entry for integer-only and floating-point, since there ** are no separate integer and floating-point registers. ** ** Each procedure call sets aside a ``home region'' of 8 regs ** for r2-r9 for varargs. For context switches we don't use ** the ``home region'' for varargs so use it to save regs. ** Allocate 64 bytes of save space -- use 32 bytes of register ** save area passed in to us plus 32 bytes we allcated, use ** the other 32 bytes for save area for a save area to call ** the helper function. */ _qt_block: _qt_blocki: sub r31, r31,64 /* Allocate reg save space. */ st r1, r31,8+32 /* Save callee-save registers. */ st r14, r31,12+32 st.d r15, r31,16+32 st.d r17, r31,24+32 st.d r19, r31,32+32 st.d r21, r31,40+32 st.d r23, r31,48+32 st r25, r31,56+32 st r30, r31,60+32 _qt_abort: addu r14, r31,0 /* Remember old sp. */ addu r31, r5,0 /* Set new sp. */ jsr.n r2 /* Call helper. */ addu r2, r14,0 /* Pass old sp as an arg0 to helper. */ ld r1, r31,8+32 /* Restore callee-save registers. */ ld r14, r31,12+32 ld.d r15, r31,16+32 ld.d r17, r31,24+32 ld.d r19, r31,32+32 ld.d r21, r31,40+32 ld.d r23, r31,48+32 ld r25, r31,56+32 ld r30, r31,60+32 jmp.n r1 /* Return to new thread's caller. */ addu r31, r31,64 /* Free register save space. */ /* ** Non-varargs thread startup. ** See `m88k.h' for register use conventions. */ _qt_start: addu r2, r14,0 /* Set user arg `pu'. */ addu r3, r15,0 /* ... user function pt. */ jsr.n r17 /* Call `only'. */ addu r4, r16,0 /* ... user function userf. */ bsr _qt_error /* `only' erroniously returned. */ /* ** Varargs thread startup. ** See `m88k.h' for register use conventions. ** ** Call the `startup' function with just argument `pt'. ** Then call `vuserf' with 8 register args plus any ** stack args. ** Then call `cleanup' with `pt' and the return value ** from `vuserf'. */ _qt_vstart: addu r18, r30,0 /* Remember arg7 to `vuserf'. */ addu r30, r0,0 /* Null-terminate call chain. */ jsr.n r17 /* Call `startup'. */ addu r2, r15,0 /* `pt' is arg0 to `startup'. */ addu r2, r19,0 /* Set arg0. */ addu r3, r20,0 /* Set arg1. */ addu r4, r21,0 /* Set arg2. */ addu r5, r22,0 /* Set arg3. */ addu r6, r23,0 /* Set arg4. */ addu r7, r24,0 /* Set arg5. */ addu r8, r25,0 /* Set arg6. */ jsr.n r16 /* Call `vuserf'. */ addu r9, r18,0 /* Set arg7. */ addu r3, r2,0 /* Ret. value is arg1 to `cleanup'. */ jsr.n r14 /* Call `cleanup'. */ addu r2, r15,0 /* `pt' is arg0 to `cleanup'. */ bsr _qt_error /* `cleanup' erroniously returned. */
{ "pile_set_name": "Github" }
(* ------------------------------------------------------------------------- *) (* Macros of Count Monad *) (* ------------------------------------------------------------------------- *) (*===========================================================================*) (* add all dependent libraries for script *) open HolKernel boolLib bossLib Parse; (* declare new theory at start *) val _ = new_theory "countMacro"; (* ------------------------------------------------------------------------- *) (* val _ = load "jcLib"; *) open jcLib; (* val _ = load "SatisfySimps"; (* for SatisfySimps.SATISFY_ss *) *) (* Get dependent theories local *) (* val _ = load "complexityTheory"; *) open bitsizeTheory complexityTheory; (* val _ = load "loopIncreaseTheory"; *) (* val _ = load "loopDecreaseTheory"; *) (* val _ = load "loopDivideTheory"; *) (* val _ = load "loopMultiplyTheory"; *) (* val _ = load "loopListTheory"; *) (* pre-load and open here for other count scripts. *) open loopIncreaseTheory loopDecreaseTheory; open loopDivideTheory loopMultiplyTheory loopListTheory; (* Get dependent theories in lib *) (* (* val _ = load "helperNumTheory"; -- in monoidTheory *) *) (* (* val _ = load "helperSetTheory"; -- in monoidTheory *) *) open helperNumTheory helperSetTheory helperListTheory; open pred_setTheory listTheory arithmeticTheory; (* (* val _ = load "dividesTheory"; -- in helperNumTheory *) *) (* (* val _ = load "gcdTheory"; -- in helperNumTheory *) *) open dividesTheory gcdTheory; (* val _ = load "countMonadTheory"; *) open countMonadTheory; (* val _ = load "monadsyntax"; *) open monadsyntax; open pairTheory optionTheory; open listRangeTheory; open logPowerTheory; (* for halves *) val _ = monadsyntax.enable_monadsyntax(); val _ = monadsyntax.enable_monad "Count"; (* ------------------------------------------------------------------------- *) (* Macros of Count Monad Documentation *) (* ------------------------------------------------------------------------- *) (* Data type: *) (* Overloading: FUN_POS f = !x. 0 < f x O_poly n = big_O ((POLY n) o size) add_ = app2 addM sub_ = app2 subM mul_ = app2 mulM div_ = app2 divM mod_ = app2 modM eq_ = app2 eqM id_ = app1 idM null_ = app1 nullM head_ = app1 headM tail_ = app1 tailM cons_ = app2 consM parity_ = app1 parityM even_ = app1 evenM half_ = app1 halfM sq_ = app1 sqM inc_ = app1 incM dec_ = app1 decM twice_ = app1 twiceM leq_ = app2 leqM lt_ = app2 ltM gt1_ = app1 gt1M *) (* Definitions and Theorems (# are exported): Helper: Constructors: make_0M_def |- make_0M = do tick 1; 0c od make_FM_def |- make_FM = do tick 1; unit F od make_nilM_def |- make_nilM = do tick 1; unit [] od # make_0M_value |- valueOf make_0M = 0 # make_FM_value |- valueOf make_FM <=> F # make_nilM_value |- valueOf make_nilM = [] # make_0M_steps |- stepsOf make_0M = 1 # make_FM_steps |- stepsOf make_FM = 1 # make_nilM_steps |- stepsOf make_nilM = 1 Identity: idM_def |- !x. idM x = do tick 0; unit x od # idM_value |- !x. valueOf (idM x) = x # idM_steps |- !x. stepsOf (idM x) = 0 Basic Arithmetic: addM_def |- !x y. addM x y = do tick (MAX (size x) (size y)); unit (x + y) od subM_def |- !x y. subM x y = do tick (MAX (size x) (size y)); unit (x - y) od mulM_def |- !x y. mulM x y = do tick (size x * size y); unit (x * y) od divM_def |- !x y. divM x y = do tick (size x * size y); unit (x DIV y) od modM_def |- !x y. modM x y = do tick (size x * size y); unit (x MOD y) od # addM_value |- !x y. valueOf (addM x y) = x + y # subM_value |- !x y. valueOf (subM x y) = x - y # mulM_value |- !x y. valueOf (mulM x y) = x * y # divM_value |- !x y. valueOf (divM x y) = x DIV y # modM_value |- !x y. valueOf (modM x y) = x MOD y # addM_steps |- !x y. stepsOf (addM x y) = MAX (size x) (size y) # subM_steps |- !x y. stepsOf (subM x y) = MAX (size x) (size y) # mulM_steps |- !x y. stepsOf (mulM x y) = size x * size y # divM_steps |- !x y. stepsOf (divM x y) = size x * size y # modM_steps |- !x y. stepsOf (modM x y) = size x * size y Basic List: nullM_def |- !ls. nullM ls = do tick 1; unit (ls = []) od headM_def |- !ls. headM ls = do tick 1; unit (HD ls) od tailM_def |- !ls. tailM ls = do tick 1; unit (TL ls) od consM_def |- !x ls. consM x ls = do tick 1; unit (x::ls) od # nullM_value |- !ls. valueOf (nullM ls) <=> ls = [] # headM_value |- !ls. valueOf (headM ls) = HD ls # tailM_value |- !ls. valueOf (tailM ls) = TL ls # consM_value |- !x ls. valueOf (consM x ls) = x::ls # nullM_steps |- !ls. stepsOf (nullM ls) = 1 # headM_steps |- !ls. stepsOf (headM ls) = 1 # tailM_steps |- !ls. stepsOf (tailM ls) = 1 # consM_steps |- !x ls. stepsOf (consM x ls) = 1 Basic Boolean: eqM_def |- !x y. eqM x y = do tick (MAX (size x) (size y)); unit (x = y) od notM_def |- !b. notM b = do tick 1; unit (~b) od boolM_def |- !b. boolM b = do tick 1; unit (if b then 1 else 0) od # eqM_value |- !x y. valueOf (eqM x y) <=> x = y # notM_value |- !b. valueOf (notM b) <=> ~b # boolM_value |- !b. valueOf (boolM b) = if b then 1 else 0 # eqM_steps |- !x y. stepsOf (eqM x y) = MAX (size x) (size y) # notM_steps |- !b. stepsOf (notM b) = 1 # boolM_steps |- !b. stepsOf (boolM b) = 1 Macro Monads: zeroM_def |- !n. zeroM n = eqM n 0 # zeroM_value |- !n. valueOf (zeroM n) <=> n = 0 # zeroM_steps |- !n. stepsOf (zeroM n) = size n zeroM_cc |- (\n. stepsOf (zeroM n)) IN big_O size zeroM_poly_cc |- (\n. stepsOf (zeroM n)) IN big_O (POLY 1 o size) zeroM_steps_big_O |- stepsOf o zeroM IN big_O (\n. ulog n) zeroM_thm |- !n. (valueOf (zeroM n) <=> n = 0) /\ stepsOf o zeroM IN big_O (\n. ulog n) oneM_def | - !n. oneM n = eqM 1 n # oneM_value |- !n. valueOf (oneM n) <=> n = 1 # oneM_steps |- !n. stepsOf (oneM n) = size n oneM_cc |- (\n. stepsOf (oneM n)) IN big_O size oneM_poly_cc |- (\n. stepsOf (oneM n)) IN O_poly 1 oneM_steps_big_O |- stepsOf o oneM IN big_O (\n. ulog n) oneM_thm |- !n. (valueOf (oneM n) <=> n = 1) /\ stepsOf o oneM IN big_O (\n. ulog n) twiceM_def |- !n. twiceM n = mulM n 2 twiceM_cc |- (\n. stepsOf (twiceM n)) IN big_O size twiceM_poly_cc|- (\n. stepsOf (twiceM n)) IN O_poly 1 # twiceM_value |- !n. valueOf (twiceM n) = TWICE n # twiceM_steps |- !n. stepsOf (twiceM n) = TWICE (size n) twiceM_steps_big_O |- stepsOf o twiceM IN big_O (\n. ulog n) twiceM_thm |- !n. (valueOf (twiceM n) = TWICE n) /\ stepsOf o twiceM IN big_O (\n. ulog n) halfM_def |- !n. halfM n = divM n 2 halfM_cc |- (\n. stepsOf (halfM n)) IN big_O size # halfM_value |- !n. valueOf (halfM n) = HALF n # halfM_steps |- !n. stepsOf (halfM n) = TWICE (size n) halfM_steps_big_O |- stepsOf o halfM IN big_O (\n. ulog n) halfM_thm |- !n. (valueOf (halfM n) = HALF n) /\ stepsOf o halfM IN big_O (\n. ulog n) parityM_def |- !n. parityM n = modM n 2 # parityM_value |- !n. valueOf (parityM n) = n MOD 2 # parityM_steps |- !n. stepsOf (parityM n) = TWICE (size n) parityM_cc |- (\n. stepsOf (parityM n)) IN big_O size parityM_poly_cc |- (\n. stepsOf (parityM n)) IN big_O (POLY 1 o size) parity_cc |- (\n. n MOD 2) IN big_O (K 1) parityM_steps_big_O |- stepsOf o parityM IN big_O (\n. ulog n) parityM_thm |- !n. (valueOf (parityM n) = n MOD 2) /\ stepsOf o parityM IN big_O (\n. ulog n) evenM_def |- !n. evenM n = do z <- parityM n; zeroM z od # evenM_value |- !n. valueOf (evenM n) <=> EVEN n # evenM_steps |- !n. stepsOf (evenM n) = TWICE (size n) + 1 evenM_cc |- (\n. stepsOf (evenM n)) IN big_O size zeroM_parity_cc |- (\n. stepsOf (zeroM (n MOD 2))) IN big_O (K 1) evenM_steps_big_O |- stepsOf o evenM IN big_O (\n. ulog n) evenM_thm |- !n. (valueOf (evenM n) <=> EVEN n) /\ stepsOf o evenM IN big_O (\n. ulog n) sqM_def |- !n. sqM n = mulM n n # sqM_value |- !n. valueOf (sqM n) = SQ n # sqM_steps |- !n. stepsOf (sqM n) = size n ** 2 sqM_poly_cc |- (\n. stepsOf (sqM n)) IN big_O (POLY 2 o size) mulM_poly_cc |- !z. (\(x,y). stepsOf (mulM x y)) z < (POLY 2 o (\(x,y). 1 + MAX (size x) (size y))) z sqM_steps_big_O |- stepsOf o sqM IN big_O (\n. ulog n ** 2) sqM_thm |- !n. (valueOf (sqM n) = SQ n) /\ stepsOf o sqM IN big_O (\n. ulog n ** 2) incM_def |- !n. incM n = addM n 1 incM_cc |- (\n. stepsOf (incM n)) IN O_poly 1 # incM_value |- !n. valueOf (incM n) = n + 1 # incM_steps |- !n. stepsOf (incM n) = size n incM_steps_big_O |- stepsOf o incM IN big_O (\n. ulog n) incM_thm |- !n. (valueOf (incM n) = n + 1) /\ stepsOf o incM IN big_O (\n. ulog n) decM_def |- !n. decM n = subM n 1 decM_cc |- (\n. stepsOf (decM n)) IN O_poly 1 # decM_value |- !n. valueOf (decM n) = n - 1 # decM_steps |- !n. stepsOf (decM n) = size n decM_steps_big_O |- stepsOf o decM IN big_O (\n. ulog n) decM_thm |- !n. (valueOf (decM n) = n - 1) /\ stepsOf o decM IN big_O (\n. ulog n) leqM_def |- !n m. leqM n m = do z <- subM n m; zeroM z od # leqM_value |- !n m. valueOf (leqM n m) <=> n <= m # leqM_steps |- !n m. stepsOf (leqM n m) = MAX (size n) (size m) + size (n - m) leqM_steps_alt|- !n m. stepsOf (leqM n m) = size (MAX n m) + size (n - m) leqM_steps_le |- !n m. stepsOf (leqM n m) <= TWICE (size (MAX n m)) leqM_poly_cc |- !z. (\(n,m). stepsOf (leqM n m)) z ** 2 = (POLY 2 o (\(n,m). size (n - m) + MAX (size n) (size m))) z ltM_def |- !n m. ltM n m = do b <- eqM n m; if b then unit F else leqM n m od # ltM_value |- !n m. valueOf (ltM n m) <=> n < m # ltM_steps |- !n m. stepsOf (ltM n m) = if n = m then size n else TWICE (MAX (size n) (size m)) + size (n - m) ltM_poly_cc |- !z. (\(n,m). stepsOf (ltM n m)) z ** 1 = (POLY 1 o (\(n,m). if n = m then size n else size (n - m) + TWICE (MAX (size n) (size m)))) z gt1M_def |- !n. gt1M n = ltM 1 n # gt1M_value |- !n. valueOf (gt1M n) <=> 1 < n # gt1M_steps |- !n. stepsOf (gt1M n) = if n = 1 then 1 else 1 + TWICE (size n) le1M_def |- !n. le1M n = do gd <- gt1M n; notM gd od # le1M_value |- !n. valueOf (le1M n) <=> n <= 1 # le1M_steps |- !n. stepsOf (le1M n) = if n = 1 then 2 else 2 + TWICE (size n) appendM_def |- !l2 l1. appendM l1 l2 = do gd <- nullM l1; if gd then unit l2 else do h <- headM l1; t <- tailM l1; ls <- appendM t l2; consM h ls od od # appendM_value |- !l1 l2. valueOf (appendM l1 l2) = l1 ++ l2: snocM_def |- !x ls. snocM x ls = do gd <- nullM ls; if gd then consM x ls else do h <- headM ls; t <- tailM ls; l <- snocM x t; consM h l od od # snocM_value |- !x ls. valueOf (snocM x ls) = SNOC x ls *) (* Eliminate parenthesis around equality *) val _ = ParseExtras.tight_equality(); (* ------------------------------------------------------------------------- *) (* Helper Theorems *) (* ------------------------------------------------------------------------- *) (* for EVAL ifM *) val _ = computeLib.set_skip computeLib.the_compset ``ifM`` (SOME 1); (* EVAL ifM must be in current script, e.g. EVAL ``expn 1 2 3``; *) (* val count_CASES = TypeBase.nchotomy_of ``:counter``; val cmetis = metis_tac[pair_CASES, count_CASES]; *) (* ------------------------------------------------------------------------- *) (* Constructors *) (* ------------------------------------------------------------------------- *) (* Make a zero *) val make_0M_def = Define` make_0M = do tick 1; return 0; od `; (* Make a False *) val make_FM_def = Define` make_FM = do tick 1; return F; od `; (* Make an empty list *) val make_nilM_def = Define` make_nilM = do tick 1; return []; od `; (* Values of constructors *) val make_0M_value = store_thm("make_0M_value[simp]", ``valueOf (make_0M) = 0``, rw[make_0M_def]); val make_FM_value = store_thm("make_FM_value[simp]", ``valueOf (make_FM) = F``, rw[make_FM_def]); val make_nilM_value = store_thm("make_nilM_value[simp]", ``valueOf (make_nilM) = []``, rw[make_nilM_def]); (* Steps of constructors *) val make_0M_steps = store_thm("make_0M_steps[simp]", ``stepsOf (make_0M) = 1``, rw[make_0M_def]); val make_FM_steps = store_thm("make_FM_steps[simp]", ``stepsOf (make_FM) = 1``, rw[make_FM_def]); val make_nilM_steps = store_thm("make_nilM_steps[simp]", ``stepsOf (make_nilM) = 1``, rw[make_nilM_def]); (* ------------------------------------------------------------------------- *) (* Identity *) (* ------------------------------------------------------------------------- *) (* ID monad *) val idM_def = Define` idM x = do tick 0; return x; od `; val _ = overload_on ("id_", ``app1 idM``); val idM_value = store_thm("idM_value[simp]", ``!x. valueOf (idM x) = x``, rw[idM_def]); val idM_steps = store_thm("idM_steps[simp]", ``!x. stepsOf (idM x) = 0``, rw[idM_def]); (* ------------------------------------------------------------------------- *) (* Basic Arithmetic *) (* ------------------------------------------------------------------------- *) (* ADD monad *) val addM_def = Define` addM x y = do tick (MAX (size x) (size y)); return (x + y); od `; val _ = overload_on ("add_", ``app2 addM``); (* > EVAL ``addM 3 4``; = (7,Count 3): thm > EVAL ``add_ 3c 4c``; = (7,Count 3): thm *) (* SUB monad *) val subM_def = Define` subM x y = do tick (MAX (size x) (size y)); return (x - y); od `; val _ = overload_on ("sub_", ``app2 subM``); (* > EVAL ``sub_ 10c 3c``; = (7,Count 4): thm *) (* MUL monad *) val mulM_def = Define` mulM x y = do tick (size x * size y); return (x * y); od `; val _ = overload_on ("mul_", ``app2 mulM``); (* > EVAL ``mul_ 3c 7c``; = (21,Count 6): thm *) (* DIV monad *) val divM_def = Define` divM x y = do tick (size x * size y); return (x DIV y); od `; val _ = overload_on ("div_", ``app2 divM``); (* MOD monad *) val modM_def = Define` modM x y = do tick (size x * size y); return (x MOD y); od `; val _ = overload_on ("mod_", ``app2 modM``); (* > EVAL ``div_ 17c 3c``; = (5,Count 10): thm > EVAL ``mod_ 17c 3c``; = (2,Count 10): thm *) (* Values of basic arithmetic *) val addM_value = store_thm("addM_value[simp]", ``!x y. valueOf (addM x y) = x + y``, rw[addM_def]); val subM_value = store_thm("subM_value[simp]", ``!x y. valueOf (subM x y) = x - y``, rw[subM_def]); val mulM_value = store_thm("mulM_value[simp]", ``!x y. valueOf (mulM x y) = x * y``, rw[mulM_def]); val divM_value = store_thm("divM_value[simp]", ``!x y. valueOf (divM x y) = x DIV y``, rw[divM_def]); val modM_value = store_thm("modM_value[simp]", ``!x y. valueOf (modM x y) = x MOD y``, rw[modM_def]); (* Steps of basic arithmetic *) val addM_steps = store_thm("addM_steps[simp]", ``!x y. stepsOf (addM x y) = MAX (size x) (size y)``, rw[addM_def]); val subM_steps = store_thm("subM_steps[simp]", ``!x y. stepsOf (subM x y) = MAX (size x) (size y)``, rw[subM_def]); val mulM_steps = store_thm("mulM_steps[simp]", ``!x y. stepsOf (mulM x y) = (size x) * (size y)``, rw[mulM_def]); val divM_steps = store_thm("divM_steps[simp]", ``!x y. stepsOf (divM x y) = (size x) * (size y)``, rw[divM_def]); val modM_steps = store_thm("modM_steps[simp]", ``!x y. stepsOf (modM x y) = (size x) * (size y)``, rw[modM_def]); (* ------------------------------------------------------------------------- *) (* Basic List *) (* ------------------------------------------------------------------------- *) (* Null monad *) val nullM_def = Define` nullM ls = do tick 1; return (ls = []) od `; val _ = overload_on ("null_", ``app1 nullM``); (* Head monad *) val headM_def = Define` headM ls = do tick 1; return (HD ls) od `; val _ = overload_on ("head_", ``app1 headM``); (* Tail monad *) val tailM_def = Define` tailM ls = do tick 1; return (TL ls) od `; val _ = overload_on ("tail_", ``app1 tailM``); (* Cons monad *) val consM_def = Define` consM x ls = do tick 1; return (x::ls) od `; val _ = overload_on ("cons_", ``app2 consM``); (* Values of basic list *) val nullM_value = store_thm("nullM_value[simp]", ``!ls. valueOf (nullM ls) <=> (ls = []) ``, rw[nullM_def]); val headM_value = store_thm("headM_value[simp]", ``!ls. valueOf (headM ls) = HD ls``, rw[headM_def]); val tailM_value = store_thm("tailM_value[simp]", ``!ls. valueOf (tailM ls) = TL ls``, rw[tailM_def]); val consM_value = store_thm("consM_value[simp]", ``!x ls. valueOf (consM x ls) = x :: ls``, rw[consM_def]); (* Steps of basic list *) val nullM_steps = store_thm("nullM_steps[simp]", ``!ls. stepsOf (nullM ls) = 1``, rw[nullM_def]); val headM_steps = store_thm("headM_steps[simp]", ``!ls. stepsOf (headM ls) = 1``, rw[headM_def]); val tailM_steps = store_thm("tailM_steps[simp]", ``!ls. stepsOf (tailM ls) = 1``, rw[tailM_def]); val consM_steps = store_thm("consM_steps[simp]", ``!x ls. stepsOf (consM x ls) = 1``, rw[consM_def]); (* ------------------------------------------------------------------------- *) (* Basic Boolean *) (* ------------------------------------------------------------------------- *) (* EQ monad *) val eqM_def = Define` eqM x y = do tick (MAX (size x) (size y)); return (x = y); od `; val _ = overload_on ("eq_", ``app2 eqM``); (* > EVAL ``eq_ 7c 7c``; = (T,Count 3): thm > EVAL ``eq_ 7c 3c``; = (F,Count 3): thm *) (* Not monad *) val notM_def = Define` notM b = do tick 1; return (~ b) od `; val _ = overload_on ("not_", ``app1 notM``); (* Bool monad *) val boolM_def = Define` boolM b = do tick 1; return (if b then 1 else 0) od `; val _ = overload_on ("bool_", ``app1 boolM``); (* Values of basic boolean *) val eqM_value = store_thm("eqM_value[simp]", ``!x y. valueOf (eqM x y) = (x = y)``, rw[eqM_def]); val notM_value = store_thm("notM_value[simp]", ``!b. valueOf (notM b) <=> (~b)``, rw[notM_def]); val boolM_value = store_thm("boolM_value[simp]", ``!b. valueOf (boolM b) = if b then 1 else 0``, rw[boolM_def]); (* Steps of basic boolean *) val eqM_steps = store_thm("eqM_steps[simp]", ``!x y. stepsOf (eqM x y) = MAX (size x) (size y)``, rw[eqM_def]); val notM_steps = store_thm("notM_steps[simp]", ``!b. stepsOf (notM b) = 1``, rw[notM_def]); val boolM_steps = store_thm("boolM_steps[simp]", ``!b. stepsOf (boolM b) = 1``, rw[boolM_def]); (* ------------------------------------------------------------------------- *) (* Macro Monads *) (* ------------------------------------------------------------------------- *) (* Zero test monad *) val zeroM_def = Define `zeroM n = eqM n 0`; (* Theorem: valueOf (zeroM n) <=> (n = 0) *) (* Proof: by zeroM_def, eqM_value *) val zeroM_value = store_thm( "zeroM_value[simp]", ``!n. valueOf (zeroM n) <=> (n = 0)``, rw[zeroM_def]); (* Theorem: stepsOf (zeroM n) = size n *) (* Proof: stepsOf (zeroM n) = stepsOf (eqM n 0) by zeroM_def = MAX (size n) (size 0) by eqM_steps = MAX (size n) 1 by size_0 = size n by max_1_size_n, MAX_COMM *) val zeroM_steps = store_thm( "zeroM_steps[simp]", ``!n. stepsOf (zeroM n) = size n``, rw[zeroM_def] >> metis_tac[max_1_size_n, MAX_COMM]); (* Theorem: (\n. stepsOf (zeroM n)) IN big_O size *) (* Proof: By big_O_def, zeroM_steps, this is to show: ?k c. !n. k < n ==> size n <= c * size n or ?k c. !n. k < n ==> size n = 0 \/ 1 <= c Take k = 0, c = 1. *) val zeroM_cc = store_thm( "zeroM_cc", ``(\n. stepsOf (zeroM n)) IN big_O size``, rw[big_O_def] >> qexists_tac `0` >> qexists_tac `1` >> fs[]); (* Theorem: (\n. stepsOf (zeroM n)) IN big_O ((POLY 1) o size) *) (* Proof: By big_O_def, POLY_def, zeroM_steps, this is to show: ?k c. !n. k < n ==> size <= c * size n or ?k c. !n. k < n ==> size n = 0 \/ 1 <= c Take k = 0, c = 1. *) val zeroM_poly_cc = store_thm( "zeroM_poly_cc", ``(\n. stepsOf (zeroM n)) IN big_O ((POLY 1) o size)``, rw[big_O_def, POLY_def] >> qexists_tac `0` >> qexists_tac `1` >> fs[]); (* Theorem: (stepsOf o zeroM) IN big_O (\n. ulog n) *) (* Proof: By zeroM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> size n <= c * ulog n Put k = 1, c = 2, then size n <= 2 * ulog n by size_le_ulog *) val zeroM_steps_big_O = store_thm( "zeroM_steps_big_O", ``(stepsOf o zeroM) IN big_O (\n. ulog n)``, rw[big_O_def] >> metis_tac[size_le_ulog]); (* Theorem: (valueOf (zeroM n) <=> (n = 0)) /\ (stepsOf o zeroM) IN big_O (\n. ulog n) *) (* Proof: by zeroM_value, zeroM_steps_big_O *) val zeroM_thm = store_thm( "zeroM_thm", ``!n. (valueOf (zeroM n) <=> (n = 0)) /\ (stepsOf o zeroM) IN big_O (\n. ulog n)``, metis_tac[zeroM_value, zeroM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* Define equal-to-one macro *) val oneM_def = Define` oneM n = eqM 1 n `; (* Theorem: valueOf (oneM n) = (n = 1) *) (* Proof: by oneM_def *) val oneM_value = store_thm( "oneM_value[simp]", ``!n. valueOf (oneM n) = (n = 1)``, rw[oneM_def]); (* Theorem: stepsOf (oneM n) = size n *) (* Proof: by oneM_def, size_1, max_1_size_n *) val oneM_steps = store_thm( "oneM_steps[simp]", ``!n. stepsOf (oneM n) = size n``, rw[oneM_def, max_1_size_n]); (* Theorem: (\n. stepsOf (oneM n)) IN big_O size *) (* Proof: By big_O_def, oneM_steps, this is to show: ?k c. !n. k < n ==> size n <= c * size n or ?k c. !n. k < n ==> size n = 0 \/ 1 <= c Take k = 0, c = 1. *) val oneM_cc = store_thm( "oneM_cc", ``(\n. stepsOf (oneM n)) IN big_O size``, rw[big_O_def] >> qexists_tac `0` >> qexists_tac `1` >> fs[]); (* Theorem: (\n. stepsOf (oneM n)) IN big_O ((POLY 1) o size) *) (* Proof: By big_O_def, POLY_def, oneM_steps, this is to show: ?k c. !n. k < n ==> size <= c * size n or ?k c. !n. k < n ==> size n = 0 \/ 1 <= c Take k = 0, c = 1. *) val oneM_poly_cc = store_thm( "oneM_poly_cc", ``(\n. stepsOf (oneM n)) IN big_O ((POLY 1) o size)``, rw[big_O_def, POLY_def] >> qexists_tac `0` >> qexists_tac `1` >> fs[]); (* Theorem: (stepsOf o oneM) IN big_O (\n. ulog n) *) (* Proof: By oneM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> size n <= c * ulog n Put k = 1, c = 2, then size n <= 2 * ulog n by size_le_ulog *) val oneM_steps_big_O = store_thm( "oneM_steps_big_O", ``(stepsOf o oneM) IN big_O (\n. ulog n)``, rw[big_O_def] >> metis_tac[size_le_ulog]); (* Theorem: (valueOf (oneM n) <=> (n = 1)) /\ (stepsOf o oneM) IN big_O (\n. ulog n) *) (* Proof: by oneM_value, oneM_steps_big_O *) val oneM_thm = store_thm( "oneM_thm", ``!n. (valueOf (oneM n) <=> (n = 1)) /\ (stepsOf o oneM) IN big_O (\n. ulog n)``, metis_tac[oneM_value, oneM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* Twice monad *) val twiceM_def = Define` twiceM n = mulM n 2 `; val _ = overload_on ("twice_", ``app1 twiceM``); (* > EVAL ``twiceM 3``; = (6,Count 4): thm > EVAL ``twice_ 6c``; = (12,Count 6): thm *) (* Theorem: (\n. stepsOf (twiceM n)) IN big_O size *) (* Proof: By twiceM_def, this is to show: (\n. size n * size 2) IN big_O size By big_O_def, this is to show: ?k c. !n. k < n ==> 2 * size n <= c * size n Take k = 0, c = 2. The result follows. *) val twiceM_cc = store_thm( "twiceM_cc", ``(\n. stepsOf (twiceM n)) IN big_O size``, rw[twiceM_def, big_O_def] >> qexists_tac `0` >> qexists_tac `2` >> fs[]); (* Theorem: (\n. stepsOf (twiceM n)) IN O_poly 1 *) (* Proof: By twiceM_def, this is to show: (\n. size n * size 2) IN O_poly 1 By O_poly_thm, this is to show: ?h k. !n. h < n ==> size n * size 2 <= k * size n Take h = 0, k = size 2. The result follows. *) val twiceM_poly_cc = store_thm( "twiceM_poly_cc", ``(\n. stepsOf (twiceM n)) IN O_poly 1``, rw[twiceM_def] >> rw[O_poly_thm] >> map_every qexists_tac [`0`, `size 2`] >> simp[]); (* Theorem: valueOf (twiceM n) = 2 * n *) (* Proof: valueOf (twiceM n) = valueOf (mulM n 2) by twiceM_def = n * 2 = 2 * n by mulM_value *) val twiceM_value = store_thm( "twiceM_value[simp]", ``!n. valueOf (twiceM n) = 2 * n``, rw[twiceM_def]); (* Theorem: stepsOf (twiceM n) = 2 * (size n) *) (* Proof: stepsOf (twiceM n) = stepsOf (mulM n 2) by twiceM_def = (size n) * (size 2) by mulM_steps = (size n) * 2 by size_2 = 2 * (size n) by MULT_COMM *) val twiceM_steps = store_thm( "twiceM_steps[simp]", ``!n. stepsOf (twiceM n) = 2 * (size n)``, rw[twiceM_def, size_2]); (* verifies twiceM_cc: (\n. stepsOf (twiceM n)) IN O_poly 1 *) (* Theorem: (stepsOf o twiceM) IN big_O (\n. ulog n) *) (* Proof: By twiceM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> 2 * size n <= c * ulog n Put k = 1, c = 4, then 2 * size n <= 4 * ulog n by size_le_ulog *) val twiceM_steps_big_O = store_thm( "twiceM_steps_big_O", ``(stepsOf o twiceM) IN big_O (\n. ulog n)``, rw[big_O_def] >> qexists_tac `1` >> qexists_tac `4` >> rpt strip_tac >> `size n <= 2 * ulog n` by metis_tac[size_le_ulog] >> decide_tac); (* Theorem: (valueOf (twiceM n) = (2 * n)) /\ (stepsOf o twiceM) IN big_O (\n. ulog n) *) (* Proof: by twiceM_value, twiceM_steps_big_O *) val twiceM_thm = store_thm( "twiceM_thm", ``!n. (valueOf (twiceM n) = (2 * n)) /\ (stepsOf o twiceM) IN big_O (\n. ulog n)``, metis_tac[twiceM_value, twiceM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* HALF monad *) val halfM_def = Define `halfM n = divM n 2`; val _ = overload_on ("half_", ``app1 halfM``); (* > EVAL ``halfM 5``; = (2,Count 6): thm > EVAL ``half_ 5c``; = (2,Count 6): thm *) (* Theorem: (\n. stepsOf (halfM n)) IN big_O size *) (* Proof: By big_O_def, halfM_def, divM_steps, this is to show: ?k c. !n. k < n ==> size n * size 2 <= c * size n Take k = 0, c = size 2. *) val halfM_cc = store_thm( "halfM_cc", ``(\n. stepsOf (halfM n)) IN big_O size``, rw[big_O_def, halfM_def] >> qexists_tac `0` >> qexists_tac `size 2` >> fs[]); (* Theorem: valueOf (halfM n) = HALF n *) (* Proof: valueOf (halfM n) = valueOf (divM n 2) by halfM_def = n DIV 2 by divM_value = HALF n by notation *) val halfM_value = store_thm( "halfM_value[simp]", ``!n. valueOf (halfM n) = HALF n``, rw[halfM_def]); (* Theorem: stepsOf (halfM n) = 2 * size n *) (* Proof: stepsOf (halfM n) = stepsOf (divM n 2) by halfM_def = size n * size 2 by divM_steps = size n * 2 by size_2 = 2 * size x *) val halfM_steps = store_thm( "halfM_steps[simp]", ``!n. stepsOf (halfM n) = 2 * size n``, rw[halfM_def, size_2]); (* verifies halfM_cc: (\n. stepsOf (halfM n)) IN big_O size *) (* Theorem: (stepsOf o halfM) IN big_O (\n. ulog n) *) (* Proof: By halfM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> 2 * size n <= c * ulog n Put k = 1, c = 4, then 2 * size n <= 4 * ulog n by size_le_ulog *) val halfM_steps_big_O = store_thm( "halfM_steps_big_O", ``(stepsOf o halfM) IN big_O (\n. ulog n)``, rw[big_O_def] >> qexists_tac `1` >> qexists_tac `4` >> rpt strip_tac >> `size n <= 2 * ulog n` by metis_tac[size_le_ulog] >> decide_tac); (* Theorem: (valueOf (halfM n) = HALF n) /\ (stepsOf o halfM) IN big_O (\n. ulog n) *) (* Proof: by halfM_value, halfM_steps_big_O *) val halfM_thm = store_thm( "halfM_thm", ``!n. (valueOf (halfM n) = HALF n) /\ (stepsOf o halfM) IN big_O (\n. ulog n)``, metis_tac[halfM_value, halfM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* Parity monad *) val parityM_def = Define `parityM n = modM n 2`; (* Theorem: valueOf (parityM n) = n MOD 2 *) (* Proof: by parityM_def, modM_value *) val parityM_value = store_thm( "parityM_value[simp]", ``!n. valueOf (parityM n) = n MOD 2``, rw[parityM_def]); (* Theorem: stepsOf (parityM n) = 2 * size n *) (* Proof: stepsOf (parityM n) = stepsOf (modM n 2) by parityM_def = size n * size 2 by modM_steps = size n * 2 by size_2 = 2 * size n by arithmetic *) val parityM_steps = store_thm( "parityM_steps[simp]", ``!n. stepsOf (parityM n) = 2 * size n``, rw[parityM_def, size_2]); (* Theorem: stepsOf (parityM n)) IN big_O size *) (* Proof: By big_O_def, parityM_steps, this is to show: ?k c. !n. k < n ==> TWICE (size n) <= c * size n or ?k c. !n. k < n ==> 2 <= c Take k = 0, c = 2. *) val parityM_cc = store_thm( "parityM_cc", ``(\n. stepsOf (parityM n)) IN big_O size``, rw[big_O_def] >> qexists_tac `0` >> qexists_tac `2` >> fs[]); (* Theorem: (\n. stepsOf (parityM n)) IN big_O ((POLY 1) o size) *) (* Proof: By big_O_def, POLY_def, parityM_steps, this is to show: ?k c. !n. k < n ==> TWICE (size n) <= c * size n or ?k c. !n. k < n ==> 2 <= c Take k = 0, c = 2. *) val parityM_poly_cc = store_thm( "parityM_poly_cc", ``(\n. stepsOf (parityM n)) IN big_O ((POLY 1) o size)``, rw[big_O_def, POLY_def] >> qexists_tac `0` >> qexists_tac `2` >> fs[]); (* Theorem: (\n. n MOD 2) IN big_O (K 1) *) (* Proof: By big_O_def, this is to show: ?k c. !n. k < n ==> n MOD 2 <= c Note n MOD 2 = 0 or 1 by MOD_LESS Take k = 0, c = 1. *) val parity_cc = store_thm( "parity_cc", ``(\n. n MOD 2) IN big_O (K 1)``, rw[big_O_def] >> qexists_tac `0` >> qexists_tac `1` >> rw[] >> `n MOD 2 < 2` by rw[MOD_LESS] >> decide_tac); (* Theorem: (stepsOf o parityM) IN big_O (\n. ulog n) *) (* Proof: By parityM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> 2 * size n <= c * ulog n Put k = 1, c = 4, then 2 * size n <= 4 * ulog n by size_le_ulog *) val parityM_steps_big_O = store_thm( "parityM_steps_big_O", ``(stepsOf o parityM) IN big_O (\n. ulog n)``, rw[big_O_def] >> qexists_tac `1` >> qexists_tac `4` >> rpt strip_tac >> `size n <= 2 * ulog n` by metis_tac[size_le_ulog] >> decide_tac); (* Theorem: (valueOf (parityM n) = n MOD 2) /\ (stepsOf o parityM) IN big_O (\n. ulog n) *) (* Proof: by parityM_value, parityM_steps_big_O *) val parityM_thm = store_thm( "parityM_thm", ``!n. (valueOf (parityM n) = n MOD 2) /\ (stepsOf o parityM) IN big_O (\n. ulog n)``, metis_tac[parityM_value, parityM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* EVEN monad *) (* val evenM_def = Define`evenM x = do tick 1; return (EVEN x); od`; *) (* val evenM_def = Define `evenM x = eq_ (modM x 2) 0c`; *) (* val evenM_def = Define `evenM x = eq_ (mod_ (unit x) 2c) 0c`; *) (* val evenM_def = Define` evenM n = do y <- modM n 2; z <- eqM y 0; od `; *) val evenM_def = Define` evenM n = do z <- parityM n; zeroM z; od `; val _ = overload_on ("even_", ``app1 evenM``); (* > EVAL ``evenM 3``; = (F,Count 5): thm > EVAL ``even_ 4c``; = (T,Count 7): thm *) (* Theorem: valueOf (evenM n) = (EVEN n) *) (* Proof: valueOf (evenM n) = valueOf (do z <- parityM n; zeroM z od) by evenM_def = valueOf (zeroM (valueOf (parityM n))) by valueOf_bind = valueOf (zeroM (n MOD 2)) by parityM_value = (n MOD 2 = 0) by zeroM_value = EVEN n by EVEN_MOD2 *) val evenM_value = store_thm( "evenM_value[simp]", ``!n. valueOf (evenM n) = (EVEN n)``, rw[evenM_def] >> rw[EVEN_MOD2]); (* Theorem: stepsOf (evenM n) = 2 * size n + 1 *) (* Proof: stepsOf (evenM n) = stepsOf (do z <- parityM n; zeroM z od) by evenM_def = stepsOf (parityM n) + stepsOf (zeroM (valueOf (parityM n))) by stepsOf_bind = 2 * size n + by parityM_steps stepsOf (zeroM (n MOD 2)) by parityM_value = 2 * size n + size (n MOD 2) by zeroM_steps = 2 * size n + size (0 or 1) by MOD_LESS = 2 * size n + 1 by size_0, size_1 *) val evenM_steps = store_thm( "evenM_steps[simp]", ``!n. stepsOf (evenM n) = 2 * size n + 1``, rw[evenM_def] >> `n MOD 2 < 2` by rw[] >> metis_tac[size_0, size_1, DECIDE``1 <= 1 /\ (n < 2 <=> (n = 0) \/ (n = 1))``]); (* consistent with later evenM_cc: (\n. stepsOf (evenM n)) IN big_O size *) (* Theorem: (\n. stepsOf (evenM n)) IN big_O size *) (* Proof: By evenM_def, this is to show: (\n. TWICE (size n) + size (n MOD 2)) IN big_O size By big_O_def, this is to show: ?k c. !n. k < n ==> TWICE (size n) + size (n MOD 2) <= c * size n Take k = 0, c = 3. Note n MOD 2 < 2 by MOD_LESS Thus size (n MOD 2) = 1 by size_0, size_1 Apply 1 <= size n by one_le_size The result follows. *) val evenM_cc = store_thm( "evenM_cc", ``(\n. stepsOf (evenM n)) IN big_O size``, rw[evenM_def] >> rw[big_O_def] >> map_every qexists_tac [`0`, `3`] >> rpt strip_tac >> `n MOD 2 < 2` by rw[] >> `size (n MOD 2) = 1` by metis_tac[size_0, size_1, DECIDE``n < 2 ==> (n = 0) \/ (n = 1)``] >> `1 <= size n` by rw[one_le_size] >> rw[]); (* But this depends on n MOD 2 = 0 or 1 *) (* Also: can prove below *) (* Theorem: (\n. stepsOf (zeroM (n MOD 2))) IN big_O (K 1) *) (* Proof: By big_O_def, zeroM_steps, this is to show: ?k c. !n. k < n ==> size (n MOD 2) <= c Take k = 0, c = 1. Note n MOD 2 < 2 by MOD_LESS so n MOD 2 = 0 or n MOD 2 = 1 But size 0 = 1 by size_0 and size 1 = 1 by size_1 Hence true. *) val zeroM_parity_cc = store_thm( "zeroM_parity_cc", ``(\n. stepsOf (zeroM (n MOD 2))) IN big_O (K 1)``, rw[big_O_def] >> qexists_tac `0` >> qexists_tac `1` >> rpt strip_tac >> `n MOD 2 < 2` by rw[] >> metis_tac[size_0, size_1, DECIDE``1 <= 1 /\ (n < 2 <=> (n = 0) \/ (n = 1))``]); (* This can be very bad! or reasonable? *) (* Theorem: (stepsOf o evenM) IN big_O (\n. ulog n) *) (* Proof: By evenM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> 2 * size n + 1 <= c * ulog n Put k = 1, c = 5, then 2 * size n <= 4 * ulog n by size_le_ulog and 1 <= ulog n by ulog_ge_1 hence true. *) val evenM_steps_big_O = store_thm( "evenM_steps_big_O", ``(stepsOf o evenM) IN big_O (\n. ulog n)``, rw[big_O_def] >> qexists_tac `1` >> qexists_tac `5` >> rpt strip_tac >> `size n <= 2 * ulog n` by metis_tac[size_le_ulog] >> `1 <= ulog n` by rw[ulog_ge_1] >> decide_tac); (* Theorem: (valueOf (evenM n) <=> EVEN n) /\ (stepsOf o evenM) IN big_O (\n. ulog n) *) (* Proof: by evenM_value, evenM_steps_big_O *) val evenM_thm = store_thm( "evenM_thm", ``!n. (valueOf (evenM n) <=> EVEN n) /\ (stepsOf o evenM) IN big_O (\n. ulog n)``, metis_tac[evenM_value, evenM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* SQ monad *) val sqM_def = Define `sqM n = mulM n n`; val _ = overload_on ("sq_", ``app1 sqM``); (* > EVAL ``sq_ 7c``; = (49,Count 9): thm *) (* Theorem: valueOf (sqM n) = SQ n *) (* Proof: valueOf (sqM n) = valueOf (mulM n n) by sqM_def = n * n by mulM_value = SQ n by notation *) val sqM_value = store_thm( "sqM_value[simp]", ``!n. valueOf (sqM n) = SQ n``, rw[sqM_def]); (* Theorem: stepsOf (sqM n) = (size n) ** 2 *) (* Proof: stepsOf (sqM n) = stepsOf (mulM n n) by sqM_def = size n * size n by mulM_steps = size n ** 2 by EXP_2 *) val sqM_steps = store_thm( "sqM_steps[simp]", ``!n. stepsOf (sqM n) = (size n) ** 2``, rw[sqM_def, Once EXP_2]); (* verifies sqM_poly_cc: (\n. stepsOf (sqM n)) IN big_O (POLY 2 o size) *) (* Theorem: (\n. stepsOf (sqM n)) IN big_O (POLY 2 o size) *) (* Proof: By big_O_def, POLY_def, sqM_def, mulM_steps, this is to show: ?k c. !n. k < n ==> size n * size n <= c * size n * size n Take k = 0, c = 1. *) val sqM_poly_cc = store_thm( "sqM_poly_cc", ``(\n. stepsOf (sqM n)) IN big_O (POLY 2 o size)``, rw[big_O_def, POLY_def, sqM_def] >> qexists_tac `0` >> qexists_tac `1` >> fs[]); (* Theorem: (UNCURRY (\x y. stepsOf (mulM x y))) z < ((POLY 2) o (UNCURRY (\x y. 1 + MAX (size x) (size y)))) z *) (* Proof: by mulM_steps, POLY_def, and MAX_DEF *) val mulM_poly_cc = store_thm( "mulM_poly_cc", ``!z. (UNCURRY (\x y. stepsOf (mulM x y))) z < ((POLY 2) o (UNCURRY (\x y. 1 + MAX (size x) (size y)))) z``, rw[] >> (Cases_on `z` >> simp[]) >> rw[mulM_steps, POLY_def] >> qabbrev_tac `z = MAX (size q) (size r) + 1` >> `size q < z` by rw[MAX_DEF, Abbr`z`] >> `size r < z` by rw[MAX_DEF, Abbr`z`] >> `size q * size r < z * z` by rw[LT_MONO_MULT2] >> metis_tac[EXP_2]); (* Theorem: (stepsOf o sqM) IN big_O (\n. (ulog n) ** 2) *) (* Proof: By sqM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> size n ** 2 <= c * ulog n ** 2 Put k = 1, c = 4, then size n <= 2 * ulog n by size_le_ulog so (size n) ** 2 <= 4 (ulog n) ** 2 by SQ_LE *) val sqM_steps_big_O = store_thm( "sqM_steps_big_O", ``(stepsOf o sqM) IN big_O (\n. (ulog n) ** 2)``, rw[big_O_def] >> qexists_tac `1` >> qexists_tac `4` >> rpt strip_tac >> `size n <= 2 * ulog n` by metis_tac[size_le_ulog] >> `SQ (size n) <= SQ (2 * ulog n)` by rw[SQ_LE] >> `SQ (2 * ulog n) = 4 * SQ (ulog n)` by decide_tac >> fs[]); (* Theorem: (valueOf (sqM n) = SQ n) /\ (stepsOf o sqM) IN big_O (\n. (ulog n) ** 2) *) (* Proof: by sqM_value, sqM_steps_big_O *) val sqM_thm = store_thm( "sqM_thm", ``!n. (valueOf (sqM n) = SQ n) /\ (stepsOf o sqM) IN big_O (\n. (ulog n) ** 2)``, metis_tac[sqM_value, sqM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* Increment monad *) val incM_def = Define` incM n = addM n 1 `; val _ = overload_on ("inc_", ``app1 incM``); (* > EVAL ``incM 2``; = (3,Count 2): thm > EVAL ``inc_ 5c``; = (6,Count 3): thm *) (* Theorem: (\n. stepsOf (incM n)) IN O_poly 1 *) (* Proof: By incM_def, this is to show: (\n. MAX (size n) (size 1)) IN O_poly 1 By O_poly_thm, this is to show: ?h k. !n. h < n ==> (size n = 0 \/ 0 < k) /\ size 1 <= k * size n Take h = 0, k = 1. Note 0 < 1 by arithmetic and size 1 = 1 by size_1 <= size n by one_le_size The result follows. *) val incM_cc = store_thm( "incM_cc", ``(\n. stepsOf (incM n)) IN O_poly 1``, rw[incM_def] >> rw[O_poly_thm] >> map_every qexists_tac [`0`, `1`] >> simp[one_le_size]); (* Theorem: valueOf (incM n) = n + 1 *) (* Proof: valueOf (incM n) = valueOf (addM n 1) by incM_def = n + 1 by addM_value *) val incM_value = store_thm( "incM_value[simp]", ``!n. valueOf (incM n) = n + 1``, rw[incM_def]); (* Theorem: stepsOf (incM n) = size n *) (* Proof: stepsOf (incM n) = stepsOf (addM n 1) by incM_def = MAX (size n) (size 1) by addM_steps = MAX (size n) 1 by size_1 = size n by max_1_size_n, MAX_COMM *) val incM_steps = store_thm( "incM_steps[simp]", ``!n. stepsOf (incM n) = size n``, rw[incM_def] >> metis_tac[max_1_size_n, MAX_COMM]); (* verifies incM_cc: (\n. stepsOf (incM n)) IN O_poly 1 *) (* Theorem: (stepsOf o incM) IN big_O (\n. ulog n) *) (* Proof: By incM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> size n <= c * ulog n Put k = 1, c = 2, then size n <= 2 * ulog n by size_le_ulog *) val incM_steps_big_O = store_thm( "incM_steps_big_O", ``(stepsOf o incM) IN big_O (\n. ulog n)``, rw[big_O_def] >> metis_tac[size_le_ulog]); (* Theorem: (valueOf (incM n) = n + 1) /\ (stepsOf o incM) IN big_O (\n. ulog n) *) (* Proof: by incM_value, incM_steps_big_O *) val incM_thm = store_thm( "incM_thm", ``!n. (valueOf (incM n) = n + 1) /\ (stepsOf o incM) IN big_O (\n. ulog n)``, metis_tac[incM_value, incM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* Decrement monad *) val decM_def = Define` decM n = subM n 1 `; val _ = overload_on ("dec_", ``app1 decM``); (* > EVAL ``decM 3``; = (2,Count 2): thm > EVAL ``dec_ 6c``; = (5,Count 3): thm *) (* Theorem: (\n. stepsOf (decM n)) IN O_poly 1 *) (* Proof: By decM_def, this is to show: (\n. MAX (size n) (size 1)) IN O_poly 1 By O_poly_thm, this is to show: ?h k. !n. h < n ==> (size n = 0 \/ 0 < k) /\ size 1 <= k * size n Take h = 0, k = 1. Note 0 < 1 by arithmetic and size 1 = 1 by size_1 <= size n by one_le_size The result follows. *) val decM_cc = store_thm( "decM_cc", ``(\n. stepsOf (decM n)) IN O_poly 1``, rw[decM_def] >> rw[O_poly_thm] >> map_every qexists_tac [`0`, `1`] >> simp[one_le_size]); (* Theorem: valueOf (decM n) = n - 1 *) (* Proof: valueOf (decM n) = valueOf (subM n 1) by decM_def = n - 1 by subM_value *) val decM_value = store_thm( "decM_value[simp]", ``!n. valueOf (decM n) = n - 1``, rw[decM_def]); (* Theorem: stepsOf (decM n) = size n *) (* Proof: stepsOf (decM n) = stepsOf (subM n 1) by decM_def = MAX (size n) (size 1) by subM_steps = MAX (size n) 1 by size_1 = size n by max_1_size_n, MAX_COMM *) val decM_steps = store_thm( "decM_steps[simp]", ``!n. stepsOf (decM n) = size n``, rw[decM_def] >> metis_tac[max_1_size_n, MAX_COMM]); (* verifies decM_cc: (\n. stepsOf (decM n)) IN O_poly 1 *) (* Theorem: (stepsOf o decM) IN big_O (\n. ulog n) *) (* Proof: By decM_steps and big_O_def, this is to show: ?k c. !n. k < n ==> size n <= c * ulog n Put k = 1, c = 2, then size n <= 2 * ulog n by size_le_ulog *) val decM_steps_big_O = store_thm( "decM_steps_big_O", ``(stepsOf o decM) IN big_O (\n. ulog n)``, rw[big_O_def] >> metis_tac[size_le_ulog]); (* Theorem: (valueOf (decM n) = n - 1) /\ (stepsOf o decM) IN big_O (\n. ulog n) *) (* Proof: by decM_value, decM_steps_big_O *) val decM_thm = store_thm( "decM_thm", ``!n. (valueOf (decM n) = n - 1) /\ (stepsOf o decM) IN big_O (\n. ulog n)``, metis_tac[decM_value, decM_steps_big_O]); (* ------------------------------------------------------------------------- *) (* Less-or-equal monad *) val leqM_def = Define` leqM n m = do z <- subM n m; zeroM z od `; val _ = overload_on ("leq_", ``app2 leqM``); (* > EVAL ``leqM 3 4``; = (T,Count 4): thm > EVAL ``leq_ 6c 6c``; = (T,Count 4): thm > EVAL ``leqM 7 3``; = (F,Count 6): thm *) (* Theorem: valueOf (leqM n m) = (n <= m) *) (* Proof: valueOf (leqM n m) = valueOf (do z <- subM n m; zeroM z od) by leqM_def = (n - m = 0) by subM_value, zeroM_value = n <= m by arithmetic *) val leqM_value = store_thm( "leqM_value[simp]", ``!n m. valueOf (leqM n m) = (n <= m)``, rw[leqM_def]); (* Theorem: stepsOf (leqM n) = 2 * (size n) *) (* Proof: stepsOf (leqM n m) = stepsOf (do z <- subM n m; zeroM z od) by leqM_def = MAX (size n) (size m) + (size (n - m)) by subM_steps, subM_value, zeroM_steps *) val leqM_steps = store_thm( "leqM_steps[simp]", ``!n m. stepsOf (leqM n m) = MAX (size n) (size m) + (size (n - m))``, rw[leqM_def]); (* Theorem: stepsOf (leqM n m) = size (MAX n m) + size (n - m) *) (* Proof: leqM_steps, size_max *) val leqM_steps_alt = store_thm( "leqM_steps_alt", ``!n m. stepsOf (leqM n m) = size (MAX n m) + size (n - m)``, rw[leqM_steps, size_max]); (* Theorem: stepsOf (leqM n m) <= 2 * size (MAX n m) *) (* Proof: stepsOf (leqM n m) = MAX (size n) (size m) + (size (n - m)) by leqM_steps = size (MAX n m) + size (n - m) by size_max <= size (MAX n m) + size n by size_monotone_le <= size (MAX n m) + size (MAX n m) by size_monotone_le = 2 * size (MAX n m) *) val leqM_steps_le = store_thm( "leqM_steps_le", ``!n m. stepsOf (leqM n m) <= 2 * size (MAX n m)``, rpt strip_tac >> `stepsOf (leqM n m) = size (MAX n m) + size (n - m)` by rw[leqM_steps, size_max] >> `size (n - m) <= size n` by rw[size_monotone_le] >> `size n <= size (MAX n m)` by rw[size_monotone_le] >> decide_tac); (* Theorem: (\(n,m). stepsOf (leqM n m)) z ** 2 = (POLY 2 o (\(n,m). size (n - m) + MAX (size n) (size m))) z *) (* Proof: by leqM_def, POLY_def. *) val leqM_poly_cc = store_thm( "leqM_poly_cc", ``!z. (\(n,m). stepsOf (leqM n m)) z ** 2 = (POLY 2 o (\(n,m). size (n - m) + MAX (size n) (size m))) z``, rw[leqM_def] >> rw[POLY_def]); (* This is not proving anything, the exponent 2 comes from POLY 2 *) (* ------------------------------------------------------------------------- *) (* Less-than monad *) val ltM_def = Define` ltM n m = do b <- eqM n m; if b then unit F else leqM n m od `; val _ = overload_on ("lt_", ``app2 ltM``); (* > EVAL ``ltM 3 4``; = (T,Count 7): thm > EVAL ``lt_ 6c 6c``; = (F,Count 3): thm > EVAL ``ltM 7 3``; = (F,Count 9): thm *) (* Theorem: valueOf (ltM n m) = (n < m) *) (* Proof: valueOf (ltM n m) = valueOf (do b <- eqM n m; if b then unit F else leqM n m od) by ltM_def = if (n = m) then F else (n <= m) by eqM_value, subM_value, zeroM_value = n < m by arithmetic *) val ltM_value = store_thm( "ltM_value[simp]", ``!n m. valueOf (ltM n m) = (n < m)``, rw[ltM_def]); (* Theorem: stepsOf (leqM n) = if (n = m) then size n else 2 * MAX (size n) (size m) + size (n - m) *) (* Proof: stepsOf (ltM n m) = stepsOf (do b <- eqM n m; if b then unit F else leqM n m od) by leqM_def = MAX (size n) (size m) + MAX (size n) (size m) + (size (n - m)) by eqM_steps, subM_steps, subM_value, zeroM_steps = MAX (size n) (size m) = size n if n = m = 2 * MAX (size n) (size m) + size (n - m) if n <> m *) val ltM_steps = store_thm( "ltM_steps[simp]", ``!n m. stepsOf (ltM n m) = if (n = m) then size n else 2 * MAX (size n) (size m) + size (n - m)``, rw[ltM_def]); (* Theorem: (\(n,m). stepsOf (ltM n m)) z ** 1 = (POLY 1 o (\(n,m). if (n = m) then size n else size (n - m) + 2 * MAX (size n) (size m))) z *) (* Proof: by ltM_def, POLY_def. *) val ltM_poly_cc = store_thm( "ltM_poly_cc", ``!z. (\(n,m). stepsOf (ltM n m)) z ** 1 = (POLY 1 o (\(n,m). if (n = m) then size n else size (n - m) + 2 * MAX (size n) (size m))) z``, rw[ltM_def] >> rw[POLY_def]); (* This is not proving anything, the exponent 2 comes from POLY 2 *) (* ------------------------------------------------------------------------- *) (* Greater than 1 monad *) val gt1M_def = Define` gt1M n = ltM 1 n `; val _ = overload_on ("gt1_", ``app1 gt1M``); (* > EVAL ``gt1M 3``; = (T,Count 5): thm > EVAL ``gt1_ 1c``; = (F,Count 1): thm > EVAL ``gt1M 0``; = (F,Count 3): thm *) (* Theorem: valueOf (gt1M n) = (1 < n) *) (* Proof: valueOf (gt1M n) = valueOf (ltM 1 n) by gt1M_def = 1 < n by ltM_value *) val gt1M_value = store_thm( "gt1M_value[simp]", ``!n. valueOf (gt1M n) = (1 < n)``, rw[gt1M_def]); (* Theorem: stepsOf (gt1M n) = if (n = 1) then 1 else 1 + 2 * size n *) (* Proof: stepsOf (gt1M n) = stepsOf (ltM 1 n) by gt1M_def = size n = size 1 = 1 if n = 1, size_1 = 2 * MAX (size 1) (size n) + size (1 - n) if n <> 1 = 2 * size n + size (1 - n) by size_1, max_1_size_n = 2 * size n + 1 by size_0, size_1 *) val gt1M_steps = store_thm( "gt1M_steps[simp]", ``!n. stepsOf (gt1M n) = if (n = 1) then 1 else 1 + 2 * size n``, rpt strip_tac >> `MAX 1 (size n) = size n` by metis_tac[max_1_size_n] >> simp[gt1M_def] >> Cases_on `n = 1` >- metis_tac[] >> Cases_on `n = 0` >- fs[] >> `1 - n = 0` by decide_tac >> simp[]); (* ------------------------------------------------------------------------- *) (* Define less-equal-1 macro *) val le1M_def = Define` le1M n = do gd <- gt1M n; notM gd; od `; val _ = overload_on ("le1_", ``app1 le1M``); (* > EVAL ``le1M 3``; = (F,Count 6): thm > EVAL ``le1_ 1c``; = (T,Count 2): thm > EVAL ``le1M 0``; = (T,Count 4): thm *) (* Theorem: valueOf (le1M n) = (n <= 1) *) (* Proof: valueOf (le1M n) = valueOf (notM (1 < n)) by le1M_def = ~(1 < n) by notM_value = n <= 1 by logic *) val le1M_value = store_thm( "le1M_value[simp]", ``!n. valueOf (le1M n) = (n <= 1)``, simp[le1M_def]); (* Theorem: stepsOf (le1M n) = if n = 1 then 2 else 2 + 2 * size n *) (* Proof: stepsOf (le1M n) = stepsOf (gt1M n) + stepsOf (notM (1 < n)) by le1M_def = (if n = 1 then 1 else 1 + 2 * (size n)) + 1 by gt1M_steps = if n = 1 then 2 else 2 + 2 * size n by arithmetic *) val le1M_steps = store_thm( "le1M_steps[simp]", ``!n. stepsOf (le1M n) = if n = 1 then 2 else 2 + 2 * size n``, simp[le1M_def]); (* ------------------------------------------------------------------------- *) (* Define append monad *) val appendM_def = tDefine "appendM" ` appendM l1 l2 = do gd <- nullM l1; if gd then return l2 else do h <- headM l1; t <- tailM l1; ls <- appendM t l2; consM h ls; od od `(WF_REL_TAC `measure (\(l1, l2). LENGTH l1)` >> simp[LENGTH_TL_LT]); (* Theorem: valueOf (appendM l1 l2) = l1 ++ l2 *) (* Proof: induction on l1, appendM_def, APPEND. *) val appendM_value = store_thm( "appendM_value[simp]", ``!l1 l2. valueOf (appendM l1 l2) = l1 ++ l2``, ho_match_mp_tac (theorem "appendM_ind") >> rw[] >> (Cases_on `l1` >> rw[Once appendM_def])); (* ------------------------------------------------------------------------- *) (* Define snoc monoad *) val snocM_def = tDefine "snocM" ` snocM x ls = do gd <- nullM ls; if gd then consM x ls else do h <- headM ls; t <- tailM ls; l <- snocM x t; consM h l; od od `(WF_REL_TAC `measure (\(x,ls). LENGTH ls)` >> simp[LENGTH_TL_LT]); (* Theorem: valueOf (snocM x ls) = SNOC x ls *) (* Proof: induction on ls, snocM_def, SNOC. *) val snocM_value = store_thm( "snocM_value[simp]", ``!x ls. valueOf (snocM x ls) = SNOC x ls``, ho_match_mp_tac (theorem "snocM_ind") >> rw[] >> (Cases_on `ls` >> rw[Once snocM_def])); (* ------------------------------------------------------------------------- *) (* export theory at end *) val _ = export_theory(); (*===========================================================================*)
{ "pile_set_name": "Github" }
$NetBSD: distinfo,v 1.14 2019/05/10 12:26:34 hauke Exp $ SHA1 (libawl-0.60.tar.gz) = bbd67cee4517c28a9de2d6dba2c69f4ee0d2e673 RMD160 (libawl-0.60.tar.gz) = 8f6e93672ad39a5a6c0d5fdae9ccb62cf6a66307 SHA512 (libawl-0.60.tar.gz) = f8f6e185dd885d07427a381032a69c87da5c26369913ff4ddde991391b50aab2a84a79f8641af44df597f34912cd9ed3db49e72acc72200db7bafe71fdbba5a3 Size (libawl-0.60.tar.gz) = 601282 bytes SHA1 (patch-Makefile) = 5781f80363a91549ad8e8aaba2640d5bb960012e
{ "pile_set_name": "Github" }
<resources> <string name="app_name">微阅</string> <!-- 状态视图 --> <string name="loading">加载中...</string> <string name="text_empty">还没有数据呢!</string> <string name="text_nonet">网络异常!</string> <string name="text_retry">点击重试</string> <string name="title_activity_main2">Main2Activity</string> <string name="navigation_drawer_open">Open navigation drawer</string> <string name="navigation_drawer_close">Close navigation drawer</string> <string name="action_settings">Settings</string> <!-- 新闻 --> <string name="news_commentsize">%1$s评论</string> <string name="detail_header_tv">下拉一下,看点儿不一样的</string> <string name="news_toast">已为您推荐了%1$s条新资讯</string> <string name="finish">完成</string> <string name="edit">编辑</string> <string name="willUrl">http://will-ls.top/</string> <string name="gitHubUrl">https://github.com/Will-Ls</string> <string name="email">[email protected]</string> <!-- 图片浏览 --> <string name="description">%1$s / %2$s %3$s</string> <!-- 个人 --> <string name="blog">Blog</string> <string name="will">Will</string> <string name="contacts">Contacts</string> <string name="github">Github</string> <string name="e_mail">E-mail</string> <string-array name="news_channel"> <item>头条</item> <item>娱乐</item> <item>军事</item> <item>体育</item> <item>财经</item> <item>科技</item> <item>历史</item> <item>台湾</item> <item>汽车</item> <item>社会</item> <item>时尚</item> <item>国学</item> <item>文化</item> <item>星座</item> <item>读书</item> <item>游戏</item> <item>电影</item> <item>国际</item> </string-array> <string-array name="news_channel_id"> <item>SYLB10,SYDT10</item> <item>YL53,FOCUSYL53</item> <item>JS83,FOCUSJS83</item> <item>TY43,FOCUSTY43,TYLIVE,TYTOPIC</item> <item>CJ33,FOCUSCJ33,HNCJ33</item> <item>KJ123,FOCUSKJ123</item> <item>LS153,FOCUSLS153</item> <item>TW73</item> <item>QC45,FOCUSQC45</item> <item>SH133,FOCUSSH133</item> <item>SS78,FOCUSSS78</item> <item>GXPD,FOCUSGXPD</item> <item>WH25,FOCUSWH25</item> <item>XZ09,FOCUSXZ09</item> <item>DS57,FOCUSDS57</item> <item>YX11,FOCUSYX11</item> <item>DYPD</item> <item>GJPD</item> </string-array> </resources>
{ "pile_set_name": "Github" }
Integer/show +12
{ "pile_set_name": "Github" }
package structs import "strings" // tagOptions contains a slice of tag options type tagOptions []string // Has returns true if the given optiton is available in tagOptions func (t tagOptions) Has(opt string) bool { for _, tagOpt := range t { if tagOpt == opt { return true } } return false } // parseTag splits a struct field's tag into its name and a list of options // which comes after a name. A tag is in the form of: "name,option1,option2". // The name can be neglectected. func parseTag(tag string) (string, tagOptions) { // tag is one of followings: // "" // "name" // "name,opt" // "name,opt,opt2" // ",opt" res := strings.Split(tag, ",") return res[0], res[1:] }
{ "pile_set_name": "Github" }
var convert = require('./convert'), func = convert('unary', require('../unary'), require('./_falseOptions')); func.placeholder = require('./placeholder'); module.exports = func;
{ "pile_set_name": "Github" }
import { u } from "umbrellajs"; import Log from "modules/log"; import Episode from "modules/episode"; import ChangelogAudio from "modules/audio"; export default class MiniPlayer { constructor(container) { this.container = u(container); this.audio = new ChangelogAudio(); this.title = this.container.data("title"); this.audioUrl = this.container.data("audio"); this.duration = this.container.data("duration"); this.resetAudio(); this.attachUI(); this.attachEvents(); container.player = this; } attachUI() { this.scrubber = this.container.find(".js-player-scrubber"); this.track = this.container.find(".js-player-track"); this.current = this.container.find(".js-player-current"); this.playButton = this.container.find(".js-player-play-button"); } attachEvents() { this.playButton.handle("click", () => { this.audioLoaded ? this.togglePlayPause() : this.load(); }); this.scrubber.on("input", (event) => { this.scrub(event.target.value); }); this.scrubber.on("change", (event) => { this.scrubEnd(event.target.value); }); this.audio.onTimeUpdate((event) => { this.trackTime(); }); this.audio.onEnd((event) => { this.reset(); }); u("body").on("mini-player-play", (event, player) => { this.pauseForOther(player); }); } canPlay() { return this.audio.canPlay(); } isPlaying() { return !!this.audio && this.audio.playing(); } play() { requestAnimationFrame(this.step.bind(this)); this.playButton.addClass("is-playing").removeClass("is-paused is-loading"); this.audio.play(); u("body").trigger("mini-player-play", this); } pause() { this.playButton.addClass("is-paused").removeClass("is-playing is-loading"); this.audio.pause(); } pauseForOther(player) { if (this.isPlaying() && player != this) this.pause(); } togglePlayPause() { if (this.isPlaying()) { this.pause(); } else { this.play(); } } seekBy(to) { const currentSeek = this.audio.currentSeek() || 0; this.audio.seek(currentSeek + to); } load() { this.playButton.addClass("is-loading"); this.audio.load(this.audioUrl, () => { this.audioLoaded = true; this.log("Play"); this.play(); }); } log(action) { Log.track("Mini Player", action, this.title); } reset() { this.resetAudio(); this.resetUI(); } resetAudio() { this.audioLoaded = false; this.tracked = { 25: false, 50: false, 75: false, 100: false }; } resetUI() { this.current.text("0:00"); this.scrubber.first().value = 0; this.track.first().style.width = "0%"; this.playButton.removeClass("is-playing is-loading"); } currentTime() { return Math.round(this.audio.currentSeek() || 0); } percentComplete() { return this.currentTime() / this.duration * 100; } step() { if (this.audioLoaded && !this.isScrubbing) { this.current.text(Episode.formatTime(this.currentTime())); this.scrubber.first().value = this.currentTime(); this.track.first().style.width = `${this.percentComplete()}%`; } if (this.isPlaying()) requestAnimationFrame(this.step.bind(this)); } scrub(to) { this.isScrubbing = true; this.current.text(Episode.formatTime(to)); this.track.first().style.width = `${this.percentComplete()}%`; } scrubEnd(to) { this.isScrubbing = false; this.audio.seek(to, () => { this.playButton.addClass("is-loading"); }, () => { this.playButton.removeClass("is-loading"); }); } trackTime() { let complete = this.percentComplete(); for (var percent in this.tracked) { if (complete >= percent && !this.tracked[percent]) { this.log(`${percent}% Played`); this.tracked[percent] = true; } } } }
{ "pile_set_name": "Github" }
class DvdMemento def initialize(toSave) @cache = Marshal.load(Marshal.dump(toSave)) end def getState @cache end end
{ "pile_set_name": "Github" }
{ "title": "AnimeKisa", "url": "https://animekisa.tv/", "testCases": [ { "url": "https://animekisa.tv/phantom-requiem-for-the-phantom-episode-9", "expected": { "sync": true, "title": "Phantom: Requiem for the Phantom", "identifier": "phantom-requiem-for-the-phantom", "overviewUrl": "https://animekisa.tv/phantom-requiem-for-the-phantom", "episode": 9, "nextEpUrl": "https://animekisa.tv/phantom-requiem-for-the-phantom-episode-10", "uiSelector": false } }, { "url": "https://animekisa.tv/phantom-requiem-for-the-phantom", "expected": { "sync": false, "title": "Phantom: Requiem for the Phantom", "identifier": "phantom-requiem-for-the-phantom", "uiSelector": true, "epList": { "5": "https://animekisa.tv/phantom-requiem-for-the-phantom-episode-5" } } } ] }
{ "pile_set_name": "Github" }
{ "dashboard": "Panou de bord", "exchange": "Schimb valutar", "settings": "Seări", "swap": "Schimbă", "trades": "Tranzacții" }
{ "pile_set_name": "Github" }
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.vision.v1p2beta1.model; /** * Response to an image annotation request. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Cloud Vision API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class GoogleCloudVisionV1p2beta1AnnotateImageResponse extends com.google.api.client.json.GenericJson { /** * If present, contextual information is needed to understand where this image comes from. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1ImageAnnotationContext context; /** * If present, crop hints have completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1CropHintsAnnotation cropHintsAnnotation; /** * If set, represents the error message for the operation. Note that filled-in image annotations * are guaranteed to be correct, even when `error` is set. * The value may be {@code null}. */ @com.google.api.client.util.Key private Status error; /** * If present, face detection has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudVisionV1p2beta1FaceAnnotation> faceAnnotations; /** * If present, text (OCR) detection or document (OCR) text detection has completed successfully. * This annotation provides the structural hierarchy for the OCR detected text. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1TextAnnotation fullTextAnnotation; /** * If present, image properties were extracted successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1ImageProperties imagePropertiesAnnotation; /** * If present, label detection has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> labelAnnotations; /** * If present, landmark detection has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> landmarkAnnotations; /** * If present, localized object detection has completed successfully. This will be sorted * descending by confidence score. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudVisionV1p2beta1LocalizedObjectAnnotation> localizedObjectAnnotations; /** * If present, logo detection has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> logoAnnotations; /** * If present, product search has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1ProductSearchResults productSearchResults; /** * If present, safe-search annotation has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1SafeSearchAnnotation safeSearchAnnotation; /** * If present, text (OCR) detection has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> textAnnotations; /** * If present, web detection has completed successfully. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1WebDetection webDetection; /** * If present, contextual information is needed to understand where this image comes from. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1ImageAnnotationContext getContext() { return context; } /** * If present, contextual information is needed to understand where this image comes from. * @param context context or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setContext(GoogleCloudVisionV1p2beta1ImageAnnotationContext context) { this.context = context; return this; } /** * If present, crop hints have completed successfully. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1CropHintsAnnotation getCropHintsAnnotation() { return cropHintsAnnotation; } /** * If present, crop hints have completed successfully. * @param cropHintsAnnotation cropHintsAnnotation or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setCropHintsAnnotation(GoogleCloudVisionV1p2beta1CropHintsAnnotation cropHintsAnnotation) { this.cropHintsAnnotation = cropHintsAnnotation; return this; } /** * If set, represents the error message for the operation. Note that filled-in image annotations * are guaranteed to be correct, even when `error` is set. * @return value or {@code null} for none */ public Status getError() { return error; } /** * If set, represents the error message for the operation. Note that filled-in image annotations * are guaranteed to be correct, even when `error` is set. * @param error error or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setError(Status error) { this.error = error; return this; } /** * If present, face detection has completed successfully. * @return value or {@code null} for none */ public java.util.List<GoogleCloudVisionV1p2beta1FaceAnnotation> getFaceAnnotations() { return faceAnnotations; } /** * If present, face detection has completed successfully. * @param faceAnnotations faceAnnotations or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setFaceAnnotations(java.util.List<GoogleCloudVisionV1p2beta1FaceAnnotation> faceAnnotations) { this.faceAnnotations = faceAnnotations; return this; } /** * If present, text (OCR) detection or document (OCR) text detection has completed successfully. * This annotation provides the structural hierarchy for the OCR detected text. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1TextAnnotation getFullTextAnnotation() { return fullTextAnnotation; } /** * If present, text (OCR) detection or document (OCR) text detection has completed successfully. * This annotation provides the structural hierarchy for the OCR detected text. * @param fullTextAnnotation fullTextAnnotation or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setFullTextAnnotation(GoogleCloudVisionV1p2beta1TextAnnotation fullTextAnnotation) { this.fullTextAnnotation = fullTextAnnotation; return this; } /** * If present, image properties were extracted successfully. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1ImageProperties getImagePropertiesAnnotation() { return imagePropertiesAnnotation; } /** * If present, image properties were extracted successfully. * @param imagePropertiesAnnotation imagePropertiesAnnotation or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setImagePropertiesAnnotation(GoogleCloudVisionV1p2beta1ImageProperties imagePropertiesAnnotation) { this.imagePropertiesAnnotation = imagePropertiesAnnotation; return this; } /** * If present, label detection has completed successfully. * @return value or {@code null} for none */ public java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> getLabelAnnotations() { return labelAnnotations; } /** * If present, label detection has completed successfully. * @param labelAnnotations labelAnnotations or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setLabelAnnotations(java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> labelAnnotations) { this.labelAnnotations = labelAnnotations; return this; } /** * If present, landmark detection has completed successfully. * @return value or {@code null} for none */ public java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> getLandmarkAnnotations() { return landmarkAnnotations; } /** * If present, landmark detection has completed successfully. * @param landmarkAnnotations landmarkAnnotations or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setLandmarkAnnotations(java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> landmarkAnnotations) { this.landmarkAnnotations = landmarkAnnotations; return this; } /** * If present, localized object detection has completed successfully. This will be sorted * descending by confidence score. * @return value or {@code null} for none */ public java.util.List<GoogleCloudVisionV1p2beta1LocalizedObjectAnnotation> getLocalizedObjectAnnotations() { return localizedObjectAnnotations; } /** * If present, localized object detection has completed successfully. This will be sorted * descending by confidence score. * @param localizedObjectAnnotations localizedObjectAnnotations or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setLocalizedObjectAnnotations(java.util.List<GoogleCloudVisionV1p2beta1LocalizedObjectAnnotation> localizedObjectAnnotations) { this.localizedObjectAnnotations = localizedObjectAnnotations; return this; } /** * If present, logo detection has completed successfully. * @return value or {@code null} for none */ public java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> getLogoAnnotations() { return logoAnnotations; } /** * If present, logo detection has completed successfully. * @param logoAnnotations logoAnnotations or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setLogoAnnotations(java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> logoAnnotations) { this.logoAnnotations = logoAnnotations; return this; } /** * If present, product search has completed successfully. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1ProductSearchResults getProductSearchResults() { return productSearchResults; } /** * If present, product search has completed successfully. * @param productSearchResults productSearchResults or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setProductSearchResults(GoogleCloudVisionV1p2beta1ProductSearchResults productSearchResults) { this.productSearchResults = productSearchResults; return this; } /** * If present, safe-search annotation has completed successfully. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1SafeSearchAnnotation getSafeSearchAnnotation() { return safeSearchAnnotation; } /** * If present, safe-search annotation has completed successfully. * @param safeSearchAnnotation safeSearchAnnotation or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setSafeSearchAnnotation(GoogleCloudVisionV1p2beta1SafeSearchAnnotation safeSearchAnnotation) { this.safeSearchAnnotation = safeSearchAnnotation; return this; } /** * If present, text (OCR) detection has completed successfully. * @return value or {@code null} for none */ public java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> getTextAnnotations() { return textAnnotations; } /** * If present, text (OCR) detection has completed successfully. * @param textAnnotations textAnnotations or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setTextAnnotations(java.util.List<GoogleCloudVisionV1p2beta1EntityAnnotation> textAnnotations) { this.textAnnotations = textAnnotations; return this; } /** * If present, web detection has completed successfully. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1WebDetection getWebDetection() { return webDetection; } /** * If present, web detection has completed successfully. * @param webDetection webDetection or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateImageResponse setWebDetection(GoogleCloudVisionV1p2beta1WebDetection webDetection) { this.webDetection = webDetection; return this; } @Override public GoogleCloudVisionV1p2beta1AnnotateImageResponse set(String fieldName, Object value) { return (GoogleCloudVisionV1p2beta1AnnotateImageResponse) super.set(fieldName, value); } @Override public GoogleCloudVisionV1p2beta1AnnotateImageResponse clone() { return (GoogleCloudVisionV1p2beta1AnnotateImageResponse) super.clone(); } }
{ "pile_set_name": "Github" }
# frozen-string_literal: true module Decidim class ParticipatoryProcessStepChangedEvent < Decidim::Events::SimpleEvent include Rails.application.routes.mounted_helpers def resource_path @resource_path ||= decidim_participatory_processes.participatory_process_participatory_process_steps_path(participatory_process_slug: participatory_space.slug) end def resource_url @resource_url ||= decidim_participatory_processes .participatory_process_participatory_process_steps_url( participatory_process_slug: participatory_space.slug, host: participatory_space.organization.host ) end def participatory_space resource.participatory_process end end end
{ "pile_set_name": "Github" }
namespace Fuse.Installer.Gui { /// <summary> /// Interaction logic for ErrorView.xaml /// </summary> public partial class ErrorView { public ErrorView(ErrorViewModel model) { DataContext = model; InitializeComponent(); } } }
{ "pile_set_name": "Github" }
/* * Copyright (C) 2007-2015 Lonelycoder AB * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * This program is also available under a commercial proprietary license. * For more information, contact [email protected] */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <ctype.h> #include "main.h" #include "fileaccess.h" #include "fa_probe.h" #include "navigator.h" #include "media/media.h" #include "misc/str.h" #include "misc/isolang.h" #include "image/jpeg.h" #include "htsmsg/htsmsg_json.h" #if ENABLE_LIBAV #include <libavutil/avstring.h> #include <libavformat/avio.h> #include <libavformat/avformat.h> #include "fa_libav.h" #include "libav.h" #endif #if ENABLE_VMIR #include "np/np.h" #endif #if ENABLE_PLUGINS #include "plugins.h" #endif /** * */ static const char * codecname(enum AVCodecID id) { AVCodec *c; switch(id) { case AV_CODEC_ID_AC3: return "AC3"; case AV_CODEC_ID_EAC3: return "EAC3"; case AV_CODEC_ID_DTS: return "DTS"; case AV_CODEC_ID_TEXT: case AV_CODEC_ID_MOV_TEXT: return "Text"; case AV_CODEC_ID_SSA: return "SSA"; default: c = avcodec_find_decoder(id); if(c) return c->name; return "Unsupported Codec"; } } static const uint8_t pngsig[8] = {137, 80, 78, 71, 13, 10, 26, 10}; static const uint8_t isosig[8] = {0x1, 0x43, 0x44, 0x30, 0x30, 0x31, 0x1, 0x0}; static const uint8_t gifsig[6] = {'G', 'I', 'F', '8', '9', 'a'}; static const uint8_t ttfsig[5] = {0,1,0,0,0}; static const uint8_t otfsig[4] = {'O', 'T', 'T', 'O'}; static const uint8_t pdfsig[] = {'%', 'P', 'D', 'F', '-'}; static const uint8_t offsig[8] ={0xd0, 0xcf, 0x11, 0xe0, 0xa1, 0xb1, 0x1a, 0xe1}; /** * */ static rstr_t * libav_metadata_rstr(AVDictionary *m, const char *key) { AVDictionaryEntry *tag; int len; rstr_t *ret; const char *str; char *d; if((tag = av_dict_get(m, key, NULL, 0)) == NULL) { if((tag = av_dict_get(m, key, NULL, AV_DICT_IGNORE_SUFFIX)) == NULL) { return NULL; } } if(!utf8_verify(tag->value)) return NULL; str = tag->value; len = strlen(str); ret = rstr_allocl(str, len); d = rstr_data(ret); while(len > 0) { len--; if(d[len] <= ' ' || d[len] == '-') d[len] = 0; else break; } if(*d == 0 || !strncasecmp(d, "http://", 7)) { rstr_release(ret); return NULL; } return ret; } /** * */ static int libav_metadata_int(AVDictionary *m, const char *key, int def) { AVDictionaryEntry *tag; if((tag = av_dict_get(m, key, NULL, AV_DICT_IGNORE_SUFFIX)) == NULL) return def; return tag->value && tag->value[0] >= '0' && tag->value[0] <= '9' ? atoi(tag->value) : def; } #if 0 /** * Obtain details from playlist */ static void fa_probe_playlist(metadata_t *md, const char *url, uint8_t *pb, size_t pbsize) { const char *t; char tmp1[300]; int i; t = strrchr(url, '/'); t = t ? t + 1 : url; i = 0; while(*t && *t != '.') tmp1[i++] = *t++; tmp1[i] = 0; md->md_title = rstr_alloc(tmp1); t = strstr((char *)pb, "NumberOfEntries="); if(t != NULL) md->md_tracks = atoi(t + 16); } #endif /** * */ static void metdata_set_redirect(metadata_t *md, const char *fmt, ...) { char buf[URL_MAX]; va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); md->md_redirect = strdup(buf); } /** * */ static int jpeginfo_reader(void *handle, void *buf, int64_t offset, size_t size) { if(fa_seek(handle, offset, SEEK_SET) != offset) return -1; return fa_read(handle, buf, size); } static void fa_probe_exif(metadata_t *md, const char *url, const uint8_t *pb, fa_handle_t *fh, int buflen) { jpeginfo_t ji; if(jpeg_info(&ji, jpeginfo_reader, fh, JPEG_INFO_DIMENSIONS | JPEG_INFO_ORIENTATION | JPEG_INFO_METADATA, pb, buflen, NULL, 0)) return; md->md_time = ji.ji_time; md->md_manufacturer = rstr_dup(ji.ji_manufacturer); md->md_equipment = rstr_dup(ji.ji_equipment); jpeg_info_clear(&ji); } /** * Probe file by checking its header */ static int fa_probe_header(metadata_t *md, const char *url, fa_handle_t *fh, const char *filename, const uint8_t *buf, size_t l) { uint16_t flags; if(l >= 256 && (!memcmp(buf, "d8:announce", 11))) { md->md_contenttype = CONTENT_ARCHIVE; metdata_set_redirect(md, "torrentfile://%s/", url); return 1; } if(l >= 256 && (!memcmp(buf, "d13:announce-list", 17))) { md->md_contenttype = CONTENT_ARCHIVE; metdata_set_redirect(md, "torrentfile://%s/", url); return 1; } if(gconf.fa_browse_archives && l >= 16 && buf[0] == 'R' && buf[1] == 'a' && buf[2] == 'r' && buf[3] == '!' && buf[4] == 0x1a && buf[5] == 0x07 && buf[6] == 0x0 && buf[9] == 0x73) { flags = buf[10] | buf[11] << 8; if((flags & 0x101) == 1) { /* Don't include slave volumes */ md->md_contenttype = CONTENT_UNKNOWN; return 1; } metdata_set_redirect(md, "rar://%s", url); md->md_contenttype = CONTENT_ARCHIVE; return 1; } if(gconf.fa_browse_archives && l > 4 && buf[0] == 0x50 && buf[1] == 0x4b && buf[2] == 0x03 && buf[3] == 0x04) { char path[256]; buf_t *buf; snprintf(path, sizeof(path), "zip://%s/plugin.json", url); buf = fa_load(path, NULL); if(buf != NULL) { htsmsg_t *json = htsmsg_json_deserialize(buf_cstr(buf)); buf_release(buf); if(json != NULL) { const char *title = htsmsg_get_str(json, "title"); if(title != NULL && htsmsg_get_str(json, "id") != NULL && htsmsg_get_str(json, "type") != NULL) { md->md_title = rstr_alloc(title); md->md_contenttype = CONTENT_PLUGIN; htsmsg_release(json); return 1; } htsmsg_release(json); } } metdata_set_redirect(md, "zip://%s", url); md->md_contenttype = CONTENT_ARCHIVE; return 1; } #if 0 if(!strncasecmp((char *)buf, "[playlist]", 10)) { /* Playlist */ fa_probe_playlist(md, url, buf, sizeof(buf)); md->md_contenttype = CONTENT_PLAYLIST; return 1; } #endif if(l > 16 && buf[0] == 0xff && buf[1] == 0xd8 && buf[2] == 0xff) { /* JPEG image */ md->md_contenttype = CONTENT_IMAGE; fa_probe_exif(md, url, buf, fh, l); // Try to get more info return 1; } if(!memcmp(buf, pngsig, 8)) { /* PNG */ md->md_contenttype = CONTENT_IMAGE; return 1; } if(!memcmp(buf, pdfsig, sizeof(pdfsig))) { /* PDF */ md->md_contenttype = CONTENT_DOCUMENT; return 1; } if(!memcmp(buf, offsig, sizeof(offsig))) { /* MS OFFICE */ md->md_contenttype = CONTENT_DOCUMENT; return 1; } if(buf[0] == 'B' && buf[1] == 'M') { /* BMP */ uint32_t siz = buf[2] | (buf[3] << 8) | (buf[4] << 16) | (buf[5] << 24); if(siz == fa_fsize(fh)) { md->md_contenttype = CONTENT_IMAGE; return 1; } } if(!memcmp(buf, gifsig, sizeof(gifsig))) { /* GIF */ md->md_contenttype = CONTENT_IMAGE; return 1; } if(!memcmp(buf, "<?xml", 5) && find_str((char *)buf, l, "<svg")) { /* SVG */ md->md_contenttype = CONTENT_IMAGE; return 1; } if(buf[0] == '%' && buf[1] == 'P' && buf[2] == 'D' && buf[3] == 'F') { md->md_contenttype = CONTENT_UNKNOWN; return 1; } if(!memcmp(buf, ttfsig, sizeof(ttfsig)) || !memcmp(buf, otfsig, sizeof(otfsig))) { /* TTF or OTF */ md->md_contenttype = CONTENT_FONT; return 1; } if(l > 16 && mystrbegins((const char *)buf, "#EXTM3U")) { if(strstr((const char *)buf, "#EXT-X-STREAM-INF:") || strstr((const char *)buf, "#EXT-X-TARGETDURATION:") || strstr((const char *)buf, "#EXT-X-MEDIA-SEQUENCE:")) { // Top level HLS playlist md->md_contenttype = CONTENT_VIDEO; return 1; } metdata_set_redirect(md, "playlist:%s", url); md->md_contenttype = CONTENT_PLAYLIST; return 1; } return 0; } /** * Check if file is an iso image * pb is guaranteed to point at 128 byts * of data starting 0x8000 of start of file */ static int fa_probe_iso0(metadata_t *md, uint8_t *pb) { uint8_t *p; if(memcmp(pb, isosig, 8)) return -1; p = &pb[40]; while(*p > 32 && p != &pb[72]) p++; *p = 0; if(md != NULL) { md->md_title = rstr_alloc((const char *)pb + 40); md->md_contenttype = CONTENT_DVD; } return 0; } /** * Check if file is an iso image * pb is guaranteed to point at 64k of data */ int fa_probe_iso(metadata_t *md, fa_handle_t *fh) { uint8_t pb[128]; if(fa_seek_lazy(fh, 0x8000, SEEK_SET) != 0x8000) return -1; if(fa_read(fh, pb, sizeof(pb)) != sizeof(pb)) return -1; return fa_probe_iso0(md, pb); } /** * */ static void fa_lavf_load_meta(metadata_t *md, AVFormatContext *fctx, const char *filename) { int i; char tmp1[1024]; int has_video = 0; int has_audio = 0; md->md_artist = libav_metadata_rstr(fctx->metadata, "artist") ?: libav_metadata_rstr(fctx->metadata, "author"); md->md_album = libav_metadata_rstr(fctx->metadata, "album"); md->md_format = rstr_alloc(fctx->iformat->long_name); if(fctx->duration != AV_NOPTS_VALUE) md->md_duration = (float)fctx->duration / 1000000; for(i = 0; i < fctx->nb_streams; i++) { AVStream *stream = fctx->streams[i]; AVCodecContext *avctx = stream->codec; if(avctx->codec_type == AVMEDIA_TYPE_AUDIO) has_audio = 1; if(avctx->codec_type == AVMEDIA_TYPE_VIDEO && !(stream->disposition & AV_DISPOSITION_ATTACHED_PIC)) has_video = 1; } if(has_audio && !has_video) { md->md_contenttype = CONTENT_AUDIO; md->md_title = libav_metadata_rstr(fctx->metadata, "title"); md->md_track = libav_metadata_int(fctx->metadata, "track", filename ? atoi(filename) : 0); return; } has_audio = 0; has_video = 0; if(1) { int atrack = 0; int strack = 0; int vtrack = 0; /* Check each stream */ for(i = 0; i < fctx->nb_streams; i++) { AVStream *stream = fctx->streams[i]; AVCodecContext *avctx = stream->codec; AVCodec *codec = avcodec_find_decoder(avctx->codec_id); AVDictionaryEntry *lang, *title; int tn; char str[256]; avcodec_string(str, sizeof(str), avctx, 0); TRACE(TRACE_DEBUG, "Probe", " Stream #%d: %s", i, str); switch(avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: has_video = !!codec; tn = ++vtrack; break; case AVMEDIA_TYPE_AUDIO: has_audio = !!codec; tn = ++atrack; break; case AVMEDIA_TYPE_SUBTITLE: tn = ++strack; break; default: continue; } if(codec == NULL) { snprintf(tmp1, sizeof(tmp1), "%s", codecname(avctx->codec_id)); } else { metadata_from_libav(tmp1, sizeof(tmp1), codec, avctx); } lang = av_dict_get(stream->metadata, "language", NULL, AV_DICT_IGNORE_SUFFIX); title = av_dict_get(stream->metadata, "title", NULL, AV_DICT_IGNORE_SUFFIX); metadata_add_stream(md, codecname(avctx->codec_id), avctx->codec_type, i, title ? title->value : NULL, tmp1, lang ? lang->value : NULL, stream->disposition, tn, avctx->channels); } md->md_contenttype = CONTENT_FILE; if(has_video) { md->md_contenttype = CONTENT_VIDEO; } else if(has_audio) { md->md_contenttype = CONTENT_AUDIO; } } } /** * */ metadata_t * fa_probe_metadata(const char *url, char *errbuf, size_t errsize, const char *filename, prop_t *stats) { const char *postfix = strrchr(url, '.'); if(postfix != NULL) { // Some files can just be figured out by the file ending if(!strcmp(postfix, ".m3u")) { metadata_t *md = metadata_create(); metdata_set_redirect(md, "playlist:%s", url); md->md_contenttype = CONTENT_PLAYLIST; return md; } } AVFormatContext *fctx; int park = 1; fa_open_extra_t foe = { .foe_stats = stats }; fa_handle_t *fh = fa_open_ex(url, errbuf, errsize, FA_BUFFERED_SMALL, &foe); if(fh == NULL) return NULL; metadata_t *md = metadata_create(); uint8_t buf[4097]; int l = fa_read(fh, buf, sizeof(buf) - 1); if(l > 0) { buf[l] = 0; #if ENABLE_PLUGINS plugin_probe_for_autoinstall(fh, buf, l, url); #endif #if ENABLE_VMIR if(np_fa_probe(fh, buf, l, md, url) == 0) { fa_close_with_park(fh, park); return md; } #endif if(fa_probe_header(md, url, fh, filename, buf, l)) { fa_close_with_park(fh, park); return md; } } fa_seek(fh, 0, SEEK_SET); if(!fa_probe_iso(md, fh)) { fa_close_with_park(fh, park); return md; } int strategy = fa_libav_get_strategy_for_file(fh); AVIOContext *avio = fa_libav_reopen(fh, 0); if((fctx = fa_libav_open_format(avio, url, errbuf, errsize, NULL, strategy)) == NULL) { fa_libav_close(avio); metadata_destroy(md); return NULL; } fa_lavf_load_meta(md, fctx, filename); fa_libav_close_format(fctx, park); return md; } /** * */ metadata_t * fa_metadata_from_fctx(AVFormatContext *fctx) { metadata_t *md = metadata_create(); fa_lavf_load_meta(md, fctx, NULL); return md; } /** * Probe a directory */ metadata_t * fa_probe_dir(const char *url) { metadata_t *md = metadata_create(); char path[URL_MAX]; struct fa_stat fs; md->md_contenttype = CONTENT_DIR; fa_pathjoin(path, sizeof(path), url, "VIDEO_TS"); if(fa_stat(path, &fs, NULL, 0) == 0 && fs.fs_type == CONTENT_DIR) { md->md_contenttype = CONTENT_DVD; return md; } fa_pathjoin(path, sizeof(path), url, "video_ts"); if(fa_stat(path, &fs, NULL, 0) == 0 && fs.fs_type == CONTENT_DIR) { md->md_contenttype = CONTENT_DVD; return md; } return md; }
{ "pile_set_name": "Github" }
/* Copyright 2013 David Axmark Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* * UnixTimeStamp.cpp */ #include "UnixTimeStamp.h" /* * default constructor */ UnixTimeStamp::UnixTimeStamp() { } /* * constructor * @param date - the calendar Date * @param time - the time point */ UnixTimeStamp::UnixTimeStamp(const Date &date, const Time &time) { set(date, time); } /* * initializes a UnixTimeStamp object. * @param date - the calendar Date * @param time - the time point */ void UnixTimeStamp::set(const Date &date, const Time &time) { MAUtil::String dateStr = compose_ISO_8601_Date(date); MAUtil::String timeStr = compose_ISO_8601_Time(time); mUnixTimeStamp = dateStr + "T" + timeStr; } /* * returns the time point */ const MAUtil::String &UnixTimeStamp::getTimeStamp() const { return mUnixTimeStamp; } /* * returns true if the UnixTimeStamp object is not initialized. */ bool UnixTimeStamp::isEmpty() const { return (0 == mUnixTimeStamp.size()); } /* * helper function that creates a string holding a calendar date in the form: YYYY-MM-DD (according to the * ISO_8601 standard). * @param date - the calendar date, from which the string is formed. */ MAUtil::String UnixTimeStamp::compose_ISO_8601_Date(const Date &date) { MAUtil::String temp; temp = date.mYear; if(date.mMonth.size()>0) { temp += "-"; temp += date.mMonth; } if(date.mDay.size()>0) { temp += "-"; temp += date.mDay; } return temp; } /* * helper function that creates a string holding a time point in the form: hh:mm:ss (according to the * ISO_8601 standard). * @param time - the time point, from which the string is formed. */ MAUtil::String UnixTimeStamp::compose_ISO_8601_Time(const Time &time) { MAUtil::String temp; temp = time.mHour; temp += ":"; if(time.mMinute.size()>0) { temp += time.mMinute; } else { temp += "00"; } temp += ":"; if(time.mSecond.size()>0) { temp += time.mSecond; } else { temp += "00"; } return temp; }
{ "pile_set_name": "Github" }
<!doctype html> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, minimum-scale=1, initial-scale=1, user-scalable=yes"> <title>LrnappCis: lrnapp-cis Demo</title> <script src="../../../node_modules/@lrnwebcomponents/deduping-fix/deduping-fix.js"></script> <script src="../../../node_modules/web-animations-js/web-animations-next-lite.min.js"></script> <script type="module"> import '@polymer/iron-demo-helpers/demo-pages-shared-styles.js' import '@polymer/iron-demo-helpers/demo-snippet.js'; import '../lrnapp-open-studio.js'; </script> <style is="custom-style" include="demo-pages-shared-styles"></style> </head> <body> <lrnapp-open-studio csrf-token="rc_Sp_yX8jXEkQBZIkpfKh6Kfkvu_XqyfRbtj0_zhnA" end-point="/components/@lrnwebcomponents/elmsln-apps/lib/lrnapp-open-studio/demo/" base-path="/components/@lrnwebcomponents/elmsln-apps/lib/lrnapp-open-studio/" source-path="data.json" class="webcomponent-module-selector" elmsln-course="sing100" elmsln-section="master"> </lrnapp-open-studio> </body> </html>
{ "pile_set_name": "Github" }
package dns import ( "bytes" "fmt" "strconv" "strings" ) // Parse the $GENERATE statement as used in BIND9 zones. // See http://www.zytrax.com/books/dns/ch8/generate.html for instance. // We are called after '$GENERATE '. After which we expect: // * the range (12-24/2) // * lhs (ownername) // * [[ttl][class]] // * type // * rhs (rdata) // But we are lazy here, only the range is parsed *all* occurrences // of $ after that are interpreted. // Any error are returned as a string value, the empty string signals // "no error". func generate(l lex, c chan lex, t chan *Token, o string) string { step := 1 if i := strings.IndexAny(l.token, "/"); i != -1 { if i+1 == len(l.token) { return "bad step in $GENERATE range" } if s, e := strconv.Atoi(l.token[i+1:]); e == nil { if s < 0 { return "bad step in $GENERATE range" } step = s } else { return "bad step in $GENERATE range" } l.token = l.token[:i] } sx := strings.SplitN(l.token, "-", 2) if len(sx) != 2 { return "bad start-stop in $GENERATE range" } start, err := strconv.Atoi(sx[0]) if err != nil { return "bad start in $GENERATE range" } end, err := strconv.Atoi(sx[1]) if err != nil { return "bad stop in $GENERATE range" } if end < 0 || start < 0 || end < start { return "bad range in $GENERATE range" } <-c // _BLANK // Create a complete new string, which we then parse again. s := "" BuildRR: l = <-c if l.value != zNewline && l.value != zEOF { s += l.token goto BuildRR } for i := start; i <= end; i += step { var ( escape bool dom bytes.Buffer mod string err string offset int ) for j := 0; j < len(s); j++ { // No 'range' because we need to jump around switch s[j] { case '\\': if escape { dom.WriteByte('\\') escape = false continue } escape = true case '$': mod = "%d" offset = 0 if escape { dom.WriteByte('$') escape = false continue } escape = false if j+1 >= len(s) { // End of the string dom.WriteString(fmt.Sprintf(mod, i+offset)) continue } else { if s[j+1] == '$' { dom.WriteByte('$') j++ continue } } // Search for { and } if s[j+1] == '{' { // Modifier block sep := strings.Index(s[j+2:], "}") if sep == -1 { return "bad modifier in $GENERATE" } mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) if err != "" { return err } j += 2 + sep // Jump to it } dom.WriteString(fmt.Sprintf(mod, i+offset)) default: if escape { // Pretty useless here escape = false continue } dom.WriteByte(s[j]) } } // Re-parse the RR and send it on the current channel t rx, e := NewRR("$ORIGIN " + o + "\n" + dom.String()) if e != nil { return e.(*ParseError).err } t <- &Token{RR: rx} // Its more efficient to first built the rrlist and then parse it in // one go! But is this a problem? } return "" } // Convert a $GENERATE modifier 0,0,d to something Printf can deal with. func modToPrintf(s string) (string, int, string) { xs := strings.SplitN(s, ",", 3) if len(xs) != 3 { return "", 0, "bad modifier in $GENERATE" } // xs[0] is offset, xs[1] is width, xs[2] is base if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { return "", 0, "bad base in $GENERATE" } offset, err := strconv.Atoi(xs[0]) if err != nil || offset > 255 { return "", 0, "bad offset in $GENERATE" } width, err := strconv.Atoi(xs[1]) if err != nil || width > 255 { return "", offset, "bad width in $GENERATE" } switch { case width < 0: return "", offset, "bad width in $GENERATE" case width == 0: return "%" + xs[1] + xs[2], offset, "" } return "%0" + xs[1] + xs[2], offset, "" }
{ "pile_set_name": "Github" }