text
stringlengths
2
100k
meta
dict
// // Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 25 2017 03:49:04). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard. // #import <Cocoa/NSTextView.h> @interface HUDTextView : NSTextView { } - (BOOL)acceptsFirstResponder; - (BOOL)isOpaque; - (void)drawViewBackgroundInRect:(struct CGRect)arg1; - (id)initWithFrame:(struct CGRect)arg1; @end
{ "pile_set_name": "Github" }
Search Best Parameters for Re-Ranking =====================================
{ "pile_set_name": "Github" }
! -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- ! ! The contents of this file are subject to the Mozilla Public ! License Version 1.1 (the "License"); you may not use this file ! except in compliance with the License. You may obtain a copy of ! the License at http://www.mozilla.org/MPL/ ! ! Software distributed under the License is distributed on an "AS ! IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or ! implied. See the License for the specific language governing ! rights and limitations under the License. ! ! The Original Code is the Netscape Portable Runtime (NSPR). ! ! The Initial Developer of the Original Code is Netscape ! Communications Corporation. Portions created by Netscape are ! Copyright (C) 1998-2000 Netscape Communications Corporation. All ! Rights Reserved. ! ! Contributor(s): ! ! Alternatively, the contents of this file may be used under the ! terms of the GNU General Public License Version 2 or later (the ! "GPL"), in which case the provisions of the GPL are applicable ! instead of those above. If you wish to allow use of your ! version of this file only under the terms of the GPL and not to ! allow others to use your version of this file under the MPL, ! indicate your decision by deleting the provisions above and ! replace them with the notice and other provisions required by ! the GPL. If you do not delete the provisions above, a recipient ! may use your version of this file under either the MPL or the ! GPL. ! ! ! atomic increment, decrement and swap routines for V8+ sparc (ultrasparc) ! using CAS (compare-and-swap) atomic instructions ! ! this MUST be compiled with an ultrasparc-aware assembler ! ! standard asm linkage macros; this module must be compiled ! with the -P option (use C preprocessor) #include <sys/asm_linkage.h> ! ====================================================================== ! ! Perform the sequence a = a + 1 atomically with respect to other ! fetch-and-adds to location a in a wait-free fashion. ! ! usage : val = PR_AtomicIncrement(address) ! return: current value (you'd think this would be old val) ! ! ----------------------- ! Note on REGISTER USAGE: ! as this is a LEAF procedure, a new stack frame is not created; ! we use the caller's stack frame so what would normally be %i (input) ! registers are actually %o (output registers). Also, we must not ! overwrite the contents of %l (local) registers as they are not ! assumed to be volatile during calls. ! ! So, the registers used are: ! %o0 [input] - the address of the value to increment ! %o1 [local] - work register ! %o2 [local] - work register ! %o3 [local] - work register ! ----------------------- ENTRY(PR_AtomicIncrement) ! standard assembler/ELF prologue retryAI: ld [%o0], %o2 ! set o2 to the current value add %o2, 0x1, %o3 ! calc the new value mov %o3, %o1 ! save the return value cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed cmp %o2, %o3 ! see if we set the value bne retryAI ! if not, try again nop ! empty out the branch pipeline retl ! return back to the caller mov %o1, %o0 ! set the return code to the new value SET_SIZE(PR_AtomicIncrement) ! standard assembler/ELF epilogue ! ! end ! ! ====================================================================== ! ! ====================================================================== ! ! Perform the sequence a = a - 1 atomically with respect to other ! fetch-and-decs to location a in a wait-free fashion. ! ! usage : val = PR_AtomicDecrement(address) ! return: current value (you'd think this would be old val) ! ! ----------------------- ! Note on REGISTER USAGE: ! as this is a LEAF procedure, a new stack frame is not created; ! we use the caller's stack frame so what would normally be %i (input) ! registers are actually %o (output registers). Also, we must not ! overwrite the contents of %l (local) registers as they are not ! assumed to be volatile during calls. ! ! So, the registers used are: ! %o0 [input] - the address of the value to increment ! %o1 [local] - work register ! %o2 [local] - work register ! %o3 [local] - work register ! ----------------------- ENTRY(PR_AtomicDecrement) ! standard assembler/ELF prologue retryAD: ld [%o0], %o2 ! set o2 to the current value sub %o2, 0x1, %o3 ! calc the new value mov %o3, %o1 ! save the return value cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed cmp %o2, %o3 ! see if we set the value bne retryAD ! if not, try again nop ! empty out the branch pipeline retl ! return back to the caller mov %o1, %o0 ! set the return code to the new value SET_SIZE(PR_AtomicDecrement) ! standard assembler/ELF epilogue ! ! end ! ! ====================================================================== ! ! ====================================================================== ! ! Perform the sequence a = b atomically with respect to other ! fetch-and-stores to location a in a wait-free fashion. ! ! usage : old_val = PR_AtomicSet(address, newval) ! ! ----------------------- ! Note on REGISTER USAGE: ! as this is a LEAF procedure, a new stack frame is not created; ! we use the caller's stack frame so what would normally be %i (input) ! registers are actually %o (output registers). Also, we must not ! overwrite the contents of %l (local) registers as they are not ! assumed to be volatile during calls. ! ! So, the registers used are: ! %o0 [input] - the address of the value to increment ! %o1 [input] - the new value to set for [%o0] ! %o2 [local] - work register ! %o3 [local] - work register ! ----------------------- ENTRY(PR_AtomicSet) ! standard assembler/ELF prologue retryAS: ld [%o0], %o2 ! set o2 to the current value mov %o1, %o3 ! set up the new value cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed cmp %o2, %o3 ! see if we set the value bne retryAS ! if not, try again nop ! empty out the branch pipeline retl ! return back to the caller mov %o3, %o0 ! set the return code to the prev value SET_SIZE(PR_AtomicSet) ! standard assembler/ELF epilogue ! ! end ! ! ====================================================================== ! ! ====================================================================== ! ! Perform the sequence a = a + b atomically with respect to other ! fetch-and-adds to location a in a wait-free fashion. ! ! usage : newval = PR_AtomicAdd(address, val) ! return: the value after addition ! ENTRY(PR_AtomicAdd) ! standard assembler/ELF prologue retryAA: ld [%o0], %o2 ! set o2 to the current value add %o2, %o1, %o3 ! calc the new value mov %o3, %o4 ! save the return value cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed cmp %o2, %o3 ! see if we set the value bne retryAA ! if not, try again nop ! empty out the branch pipeline retl ! return back to the caller mov %o4, %o0 ! set the return code to the new value SET_SIZE(PR_AtomicAdd) ! standard assembler/ELF epilogue ! ! end !
{ "pile_set_name": "Github" }
package org.matsim.contrib.osm.networkReader; public class OsmTags { public static final String MOTORWAY = "motorway"; public static final String MOTORWAY_LINK = "motorway_link"; public static final String TRUNK = "trunk"; public static final String TRUNK_LINK = "trunk_link"; public static final String PRIMARY = "primary"; public static final String PRIMARY_LINK = "primary_link"; public static final String SECONDARY = "secondary"; public static final String SECONDARY_LINK = "secondary_link"; public static final String TERTIARY = "tertiary"; public static final String TERTIARY_LINK = "tertiary_link"; public static final String UNCLASSIFIED = "unclassified"; public static final String RESIDENTIAL = "residential"; public static final String LIVING_STREET = "living_street"; public static final String TRACK = "track"; public static final String CYCLEWAY = "cycleway"; public static final String SERVICE = "service"; public static final String FOOTWAY = "footway"; public static final String PEDESTRIAN = "pedestrian"; public static final String PATH = "path"; public static final String STEPS = "steps"; public static final String HIGHWAY = "highway"; public static final String MAXSPEED = "maxspeed"; public static final String ONEWAY = "oneway"; public static final String ROUNDABOUT = "roundabout"; public static final String JUNCTION = "junction"; public static final String LANES = "lanes"; public static final String LANES_FORWARD = "lanes:forward"; public static final String LANES_BACKWARD = "lanes:backward"; public static final String SURFACE = "surface"; public static final String SMOOTHNESS = "smoothness"; public static final String BICYCLE = "bicycle"; public final static String ONEWAYBICYCLE = "oneway:bicycle"; public static final String MPH = "mph"; public static final String TRAFFIC_SINGALS = "traffic_signals"; public static final String CROSSING = "crossing"; public static final String TYPE = "type"; public static final String RESTRICTION = "restriction"; }
{ "pile_set_name": "Github" }
/////////////////////////////////////////////////////////////////////////////// // results_cache.hpp // // Copyright 2008 Eric Niebler. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_XPRESSIVE_DETAIL_CORE_RESULTS_CACHE_HPP_EAN_10_04_2005 #define BOOST_XPRESSIVE_DETAIL_CORE_RESULTS_CACHE_HPP_EAN_10_04_2005 // MS compatible compilers support #pragma once #if defined(_MSC_VER) # pragma once #endif #include <cstddef> #include <boost/detail/workaround.hpp> #include <boost/assert.hpp> #include <boost/xpressive/detail/detail_fwd.hpp> #include <boost/xpressive/detail/core/list.hpp> #include <boost/xpressive/detail/core/access.hpp> #include <boost/xpressive/match_results.hpp> namespace boost { namespace xpressive { namespace detail { /////////////////////////////////////////////////////////////////////////////// // nested_results #if BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3206)) template<typename BidiIter> struct nested_results : detail::list<match_results<BidiIter> > { friend struct results_cache<BidiIter>; friend struct match_results<BidiIter>; }; #else template<typename BidiIter> struct nested_results : private detail::list<match_results<BidiIter> > { friend struct results_cache<BidiIter>; friend struct xpressive::match_results<BidiIter>; typedef list<xpressive::match_results<BidiIter> > base_type; typedef typename base_type::iterator iterator; typedef typename base_type::const_iterator const_iterator; typedef typename base_type::pointer pointer; typedef typename base_type::const_pointer const_pointer; typedef typename base_type::reference reference; typedef typename base_type::const_reference const_reference; typedef typename base_type::size_type size_type; using base_type::begin; using base_type::end; using base_type::size; using base_type::empty; using base_type::front; using base_type::back; }; #endif /////////////////////////////////////////////////////////////////////////////// // results_cache // // cache storage for reclaimed match_results structs template<typename BidiIter> struct results_cache { typedef core_access<BidiIter> access; match_results<BidiIter> &append_new(nested_results<BidiIter> &out) { if(this->cache_.empty()) { out.push_back(match_results<BidiIter>()); } else { BOOST_ASSERT(access::get_nested_results(this->cache_.back()).empty()); out.splice(out.end(), this->cache_, --this->cache_.end()); } return out.back(); } // move the last match_results struct into the cache void reclaim_last(nested_results<BidiIter> &out) { BOOST_ASSERT(!out.empty()); // first, reclaim any nested results nested_results<BidiIter> &nested = access::get_nested_results(out.back()); if(!nested.empty()) { this->reclaim_all(nested); } // then, reclaim the last match_results this->cache_.splice(this->cache_.end(), out, --out.end()); } // move the last n match_results structs into the cache void reclaim_last_n(nested_results<BidiIter> &out, std::size_t count) { for(; 0 != count; --count) { this->reclaim_last(out); } } void reclaim_all(nested_results<BidiIter> &out) { typedef typename nested_results<BidiIter>::iterator iter_type; // first, recursively reclaim all the nested results for(iter_type begin = out.begin(); begin != out.end(); ++begin) { nested_results<BidiIter> &nested = access::get_nested_results(*begin); if(!nested.empty()) { this->reclaim_all(nested); } } // next, reclaim the results themselves this->cache_.splice(this->cache_.end(), out); } private: nested_results<BidiIter> cache_; }; }}} // namespace boost::xpressive::detail #endif
{ "pile_set_name": "Github" }
StartChar: at.sc Encoding: 1114635 -1 1634 Width: 916 VWidth: 0 Flags: HMW LayerCount: 3 Fore SplineSet 492 47 m 0 451 47 448 84 438 84 c 0 428 84 385 47 322 47 c 0 256 47 204 86 204 156 c 0 204 277 305 355 415 355 c 0 435 355 468 351 480 347 c 0 495 342 499 341 503 341 c 0 513 341 518 360 528 360 c 0 532 360 549 356 546 352 c 0 537 328 527 288 524 272 c 10 503 140 l 18 502 131 500 122 500 113 c 0 500 98 504 84 518 84 c 0 588 84 652 137 652 220 c 8 652 359 533 467 394 467 c 8 236 467 97 360 97 191 c 8 97 32 234 -91 393 -91 c 16 458 -91 550 -56 600 -14 c 8 606 -9 613 -4 618 -4 c 0 627 -4 632 -8 632 -17 c 0 632 -41 490 -131 382 -131 c 8 204 -131 50 -3 50 181 c 0 50 395 223 507 414 507 c 16 563 507 699 384 699 235 c 0 699 118 599 47 492 47 c 0 393 325 m 0 336 325 275 273 275 175 c 0 275 103 320 81 358 81 c 0 382 81 398 89 418 103 c 0 430 111 439 121 445 152 c 2 464 255 l 2 465 259 465 263 465 267 c 0 465 304 436 325 393 325 c 0 EndSplineSet Validated: 1 Comment: "." EndChar
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include "os/mynewt.h" #include <hal/hal_bsp.h> #include <hal/hal_os_tick.h> #include <mcu/cmsis_nvic.h> #include "mcu/mcu.h" #include "os_priv.h" /* * From HAL_CM3.s */ extern void SVC_Handler(void); extern void PendSV_Handler(void); extern void SysTick_Handler(void); /* Initial program status register */ #define INITIAL_xPSR 0x01000000 /* * Initial LR indicating basic frame. * See ARMv7-M Architecture Ref Manual */ #define INITIAL_LR 0xfffffffd; /* * Exception priorities. The higher the number, the lower the priority. A * higher priority exception will interrupt a lower priority exception. */ #define PEND_SV_PRIO ((1 << __NVIC_PRIO_BITS) - 1) #define OS_TICK_PRIO (PEND_SV_PRIO - 1) /* Make the SVC instruction highest priority */ #define SVC_PRIO (1) /* Stack frame structure */ struct stack_frame { uint32_t r4; uint32_t r5; uint32_t r6; uint32_t r7; uint32_t r8; uint32_t r9; uint32_t r10; uint32_t r11; uint32_t r0; uint32_t r1; uint32_t r2; uint32_t r3; uint32_t r12; uint32_t lr; uint32_t pc; uint32_t xpsr; }; #define SVC_ArgN(n) \ register int __r##n __asm("r"#n); #define SVC_Arg0() \ SVC_ArgN(0) \ SVC_ArgN(1) \ SVC_ArgN(2) \ SVC_ArgN(3) #if (defined (__CORTEX_M0)) || defined (__CORTEX_M0PLUS) #define SVC_Call(f) \ __asm volatile \ ( \ "ldr r7,="#f"\n\t" \ "mov r12,r7\n\t" \ "svc 0" \ : "=r" (__r0), "=r" (__r1), "=r" (__r2), "=r" (__r3) \ : "r" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) \ : "r7", "r12", "lr", "cc" \ ); #else #define SVC_Call(f) \ __asm volatile \ ( \ "ldr r12,="#f"\n\t" \ "svc 0" \ : "=r" (__r0), "=r" (__r1), "=r" (__r2), "=r" (__r3) \ : "r" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) \ : "r12", "lr", "cc" \ ); #endif /* XXX: determine how we will deal with running un-privileged */ uint32_t os_flags = OS_RUN_PRIV; void timer_handler(void) { os_time_advance(1); } void os_arch_ctx_sw(struct os_task *t) { os_sched_ctx_sw_hook(t); /* Set PendSV interrupt pending bit to force context switch */ SCB->ICSR = SCB_ICSR_PENDSVSET_Msk; } os_sr_t os_arch_save_sr(void) { uint32_t isr_ctx; #if MCU_CRITICAL_BASEPRI isr_ctx = __get_BASEPRI(); __set_BASEPRI((MCU_CRITICAL_BASEPRI) << (8 - __NVIC_PRIO_BITS)); #else isr_ctx = __get_PRIMASK() & 1; __disable_irq(); #endif return isr_ctx; } void os_arch_restore_sr(os_sr_t isr_ctx) { #if MCU_CRITICAL_BASEPRI __set_BASEPRI(isr_ctx); #else if (!isr_ctx) { __enable_irq(); } #endif } int os_arch_in_critical(void) { int ret; #if MCU_CRITICAL_BASEPRI ret = __get_BASEPRI() > 0; #else ret = __get_PRIMASK() & 1; #endif return ret; } static void os_arch_task_return_handler(void) { /* * If you are stuck here it means that task finished by * simple return which is not supported. */ while (1); } os_stack_t * os_arch_task_stack_init(struct os_task *t, os_stack_t *stack_top, int size) { int i; os_stack_t *s; struct stack_frame *sf; /* Get stack frame pointer */ s = (os_stack_t *) ((uint8_t *) stack_top - sizeof(*sf)); /* Zero out R1-R3, R12 */ for (i = 9; i < 13; ++i) { s[i] = 0; } /* Set registers R4 - R11 on stack. */ os_arch_init_task_stack(s); /* Set remaining portions of stack frame */ sf = (struct stack_frame *) s; sf->xpsr = INITIAL_xPSR; sf->pc = (uint32_t)t->t_func; sf->r0 = (uint32_t)t->t_arg; /* Set function to cache returns from tasks. */ sf->lr = (uint32_t)os_arch_task_return_handler; return (s); } void os_arch_init(void) { /* * Trap on divide-by-zero. */ SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk; os_init_idle_task(); } __attribute__((always_inline)) static inline void svc_os_arch_init(void) { SVC_Arg0(); SVC_Call(os_arch_init); } os_error_t os_arch_os_init(void) { os_error_t err; int i; /* Cannot be called within an ISR */ err = OS_ERR_IN_ISR; if (__get_IPSR() == 0) { err = OS_OK; /* Drop priority for all interrupts */ for (i = 0; i < sizeof(NVIC->IP); i++) { NVIC->IP[i] = -1; } /* * Install default interrupt handler for all interrupts except Reset, * which'll print out system state at the time of the interrupt, and * few other regs which should help in trying to figure out what went * wrong. */ for (i = -NVIC_USER_IRQ_OFFSET + 2; i < NVIC_NUM_VECTORS - NVIC_USER_IRQ_OFFSET; i++) { NVIC_SetVector(i, (uint32_t)os_default_irq_asm); } /* Install our system interrupt handlers */ NVIC_SetVector(SVC_IRQ_NUMBER, (uint32_t)SVC_Handler); NVIC_SetVector(PendSV_IRQn, (uint32_t)PendSV_Handler); NVIC_SetVector(SysTick_IRQn, (uint32_t)SysTick_Handler); /* Set the PendSV interrupt exception priority to the lowest priority */ NVIC_SetPriority(PendSV_IRQn, PEND_SV_PRIO); /* Set the SVC interrupt to priority 0 (highest configurable) */ NVIC_SetPriority(SVC_IRQ_NUMBER, SVC_PRIO); /* Check if privileged or not */ if ((__get_CONTROL() & 1) == 0) { os_arch_init(); } else { svc_os_arch_init(); } } return err; } uint32_t os_arch_start(void) { struct os_task *t; /* Get the highest priority ready to run to set the current task */ t = os_sched_next_task(); os_sched_set_current_task(t); /* Adjust PSP so it looks like this task just took an exception */ __set_PSP((uint32_t)t->t_stackptr + offsetof(struct stack_frame, r0)); /* Intitialize and start system clock timer */ os_tick_init(OS_TICKS_PER_SEC, OS_TICK_PRIO); /* Mark the OS as started, right before we run our first task */ g_os_started = 1; /* Perform context switch */ os_arch_ctx_sw(t); return (uint32_t)(t->t_arg); } __attribute__((always_inline)) static inline void svc_os_arch_start(void) { SVC_Arg0(); SVC_Call(os_arch_start); } /** * Start the OS. First check to see if we are running with the correct stack * pointer set (PSP) and privilege mode (PRIV). * * * @return os_error_t */ os_error_t os_arch_os_start(void) { os_error_t err; /* * Set the os environment. This will set stack pointers and, based * on the contents of os_flags, will determine if the tasks run in * privileged or un-privileged mode. * * We switch to using "empty" part of idle task's stack until * the svc_os_arch_start() executes SVC, and we will never return. */ os_set_env(g_idle_task.t_stackptr - 1); err = OS_ERR_IN_ISR; if (__get_IPSR() == 0) { /* * The following switch statement is really just a sanity check to * insure that the os initialization routine was called prior to the * os start routine. */ err = OS_OK; switch (__get_CONTROL() & 0x03) { /* * These two cases are for completeness. Thread mode should be set * to use PSP already. * * Fall-through intentional! */ case 0x00: case 0x01: err = OS_ERR_PRIV; break; case 0x02: /* * We are running in Privileged Thread mode w/SP = PSP but we * are supposed to be un-privileged. */ if ((os_flags & 1) == OS_RUN_UNPRIV) { err = OS_ERR_PRIV; } break; case 0x03: /* * We are running in Unprivileged Thread mode w/SP = PSP but we * are supposed to be privileged. */ if ((os_flags & 1) == OS_RUN_PRIV) { err = OS_ERR_PRIV; } break; } if (err == OS_OK) { /* Always start OS through SVC call */ svc_os_arch_start(); } } return err; }
{ "pile_set_name": "Github" }
package glfw //#define GLFW_INCLUDE_NONE //#include "glfw/include/GLFW/glfw3.h" import "C" const ( VersionMajor = C.GLFW_VERSION_MAJOR // This is incremented when the API is changed in non-compatible ways. VersionMinor = C.GLFW_VERSION_MINOR // This is incremented when features are added to the API but it remains backward-compatible. VersionRevision = C.GLFW_VERSION_REVISION // This is incremented when a bug fix release is made that does not contain any API changes. ) // Init initializes the GLFW library. Before most GLFW functions can be used, // GLFW must be initialized, and before a program terminates GLFW should be // terminated in order to free any resources allocated during or after // initialization. // // If this function fails, it calls Terminate before returning. If it succeeds, // you should call Terminate before the program exits. // // Additional calls to this function after successful initialization but before // termination will succeed but will do nothing. // // This function may take several seconds to complete on some systems, while on // other systems it may take only a fraction of a second to complete. // // On Mac OS X, this function will change the current directory of the // application to the Contents/Resources subdirectory of the application's // bundle, if present. // // This function may only be called from the main thread. func Init() error { C.glfwInit() return acceptError(APIUnavailable) } // Terminate destroys all remaining windows, frees any allocated resources and // sets the library to an uninitialized state. Once this is called, you must // again call Init successfully before you will be able to use most GLFW // functions. // // If GLFW has been successfully initialized, this function should be called // before the program exits. If initialization fails, there is no need to call // this function, as it is called by Init before it returns failure. // // This function may only be called from the main thread. func Terminate() { flushErrors() C.glfwTerminate() } // GetVersion retrieves the major, minor and revision numbers of the GLFW // library. It is intended for when you are using GLFW as a shared library and // want to ensure that you are using the minimum required version. // // This function may be called before Init. func GetVersion() (major, minor, revision int) { var ( maj C.int min C.int rev C.int ) C.glfwGetVersion(&maj, &min, &rev) return int(maj), int(min), int(rev) } // GetVersionString returns a static string generated at compile-time according // to which configuration macros were defined. This is intended for use when // submitting bug reports, to allow developers to see which code paths are // enabled in a binary. // // This function may be called before Init. func GetVersionString() string { return C.GoString(C.glfwGetVersionString()) }
{ "pile_set_name": "Github" }
h1. Using this tool This page lets you create HTML by entering text in a simple format that's easy to read and write. * Type Textile text in the left window * See the HTML in the right Textile is a lightweight markup language billed as a "humane web text editor". It originated in the blogging software Textpattern. "It is described as":http://textile.thresholdstate.com/: bq. Textile takes plain text with *simple* markup and produces valid XHTML. It's used in web applications, content management systems, blogging software and online forums. Try it for yourself with the Textile quick reference and preview. This document is written in Textile; you can see the plain-text version on the left. To get a feel for Textile's syntax, type some text into the left window and watch the results in the right. You can see a Textile syntax guide by switching the right-hand window from _Preview_ to _Syntax Guide_. Textile-js is a JavaScript port of Textile. You can get the full source code from its "GitHub repository":https://github.com/borgar/js-textile. **Start with a "(clear)blank page (clear text)":# or edit this document in the left window.**
{ "pile_set_name": "Github" }
--- lang: en title: 'API docs: repository.defaultcrudrepository.includerelatedmodels' keywords: LoopBack 4.0, LoopBack 4, Node.js, TypeScript, OpenAPI sidebar: lb4_sidebar editurl: https://github.com/strongloop/loopback-next/tree/master/packages/repository permalink: /doc/en/lb4/apidocs.repository.defaultcrudrepository.includerelatedmodels.html --- <!-- Do not edit this file. It is automatically generated by API Documenter. --> [Home](./index.md) &gt; [@loopback/repository](./repository.md) &gt; [DefaultCrudRepository](./repository.defaultcrudrepository.md) &gt; [includeRelatedModels](./repository.defaultcrudrepository.includerelatedmodels.md) ## DefaultCrudRepository.includeRelatedModels() method Returns model instances that include related models of this repository that have a registered resolver. <b>Signature:</b> ```typescript protected includeRelatedModels(entities: T[], include?: Inclusion[], options?: Options): Promise<(T & Relations)[]>; ``` ## Parameters | Parameter | Type | Description | | --- | --- | --- | | entities | T\[\] | An array of entity instances or data | | include | [Inclusion](./filter.inclusion.md)<!-- -->\[\] | Inclusion filter | | options | [Options](./repository.options.md) | Options for the operations | <b>Returns:</b> Promise&lt;(T &amp; Relations)\[\]&gt;
{ "pile_set_name": "Github" }
<!--Sample Configuration file--> <configuration> <application-config application-id="myapp" enable-cache-exception="true" default-region-name="default" key-case-sensitivity="false"> <cache-regions> <region name="AbsoluteExpirationRegion" cache-name="mycache" priority="Default" expiration-type="sliding" expiration-period="180" /> <region name="default" cache-name="mycache" priority="default" expiration-type="none" expiration-period="0" use-async="false" /> </cache-regions> <database-dependencies> <dependency entity-name="nhibernator.BLL.Customer" type="sql" sql-statement="select ContactName from dbo.Customers where CustomerID =?" cache-key-format="depdency.customer:[pk]"/> </database-dependencies> </application-config> </configuration>
{ "pile_set_name": "Github" }
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.asgard.codenarc import org.codehaus.groovy.ast.PackageNode import org.codenarc.rule.AbstractRule import org.codenarc.source.SourceCode /** * Makes sure there are no blank lines before the package declaration of a source code file. */ class BlankLineBeforePackageRule extends AbstractRule { String name = 'BlankLineBeforePackage' int priority = 3 @Override void applyTo(SourceCode sourceCode, List violations) { PackageNode packageNode = sourceCode.ast?.package if (packageNode) { for (int index = 0; index < packageNode.lineNumber; index++) { if (sourceCode.line(index).isEmpty()) { violations.add(createViolation(index, sourceCode.line(index), "Blank line precedes package declaration in file $sourceCode.name")) } } } } }
{ "pile_set_name": "Github" }
package org.lamw.appjcentermikrotikrouterosdemo1; import java.lang.reflect.Field; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; import android.graphics.Rect; import android.graphics.drawable.BitmapDrawable; import android.graphics.drawable.ColorDrawable; import android.graphics.drawable.Drawable; import android.os.Handler; import android.util.Log; import android.view.MotionEvent; import android.view.View; import android.view.ViewGroup; //------------------------------------------------------------------------- //jImageBtn //------------------------------------------------------------------------- public class jImageBtn extends View { private Controls controls = null; // Control Class for Event private jCommons LAMWCommon; private Paint mPaint = null; private Bitmap bmpUp = null; private Bitmap bmpDn = null; private Rect rect; private int btnState = 0; // Normal/Up = 0 , Pressed = 1 private Boolean enabled = true; // private int sleep = 150; private int savedBackColor = Color.TRANSPARENT; //Constructor public jImageBtn(android.content.Context context, Controls ctrls,long pasobj ) { super(context); controls = ctrls; LAMWCommon = new jCommons(this,context,pasobj); mPaint = new Paint(); rect = new Rect(0,0,200,200); } public void setButton(String fileup, String filedn) { setButtonUp(fileup); setButtonDown(filedn); } public void setButtonUp( String fileup) { // /data/data/com.example.appimagebtndemo1/files/btn_red.jpg BitmapFactory.Options bo = new BitmapFactory.Options(); if( bo != null ){ bo.inScaled = false; bmpUp = BitmapFactory.decodeFile(fileup,bo); if( bmpUp != null ){ rect = new Rect(0, 0, bmpUp.getWidth(), bmpUp.getHeight()); LAMWCommon.setLParamWidth(bmpUp.getWidth()); LAMWCommon.setLParamHeight(bmpUp.getHeight()); invalidate(); } } } public void setButtonDown( String filedn ) { // /data/data/com.example.appimagebtndemo1/files/btn_blue.jpg BitmapFactory.Options bo = new BitmapFactory.Options(); if( bo != null ){ bo.inScaled = false; bmpDn = BitmapFactory.decodeFile(filedn, bo); if( bmpDn != null ){ rect = new Rect(0, 0, bmpDn.getWidth(), bmpDn.getHeight()); invalidate(); } } } private int GetDrawableResourceId(String _resName) { try { Class<?> res = R.drawable.class; Field field = res.getField(_resName); //"drawableName" if( field != null ){ int drawableId = field.getInt(null); return drawableId; } else return 0; } catch (Exception e) { //Log.e("GetDrawableResourceId", "Failure to get drawable id.", e); return 0; } } public Drawable GetDrawableResourceById(int _resID) { if( _resID == 0 ) return null; Drawable res = null; if (android.os.Build.VERSION.SDK_INT < 21 ) { res = this.controls.activity.getResources().getDrawable(_resID); } //[ifdef_api21up] if(android.os.Build.VERSION.SDK_INT >= 21) res = this.controls.activity.getResources().getDrawable(_resID, null); //[endif_api21up] return res; } /* *** It does not load the images correctly according to the resolution *** * * public Bitmap GetBitmapResource(String _resourceDrawableIdentifier, boolean _inScaled) { int id = GetDrawableResourceId(_resourceDrawableIdentifier); BitmapFactory.Options bo = new BitmapFactory.Options(); bo.inScaled = _inScaled; //false; return BitmapFactory.decodeResource(this.controls.activity.getResources(), id, bo); }*/ public void setButtonUpByRes(String resup) { // ..res/drawable //bmpUp = GetBitmapResource(resup, false); Drawable d = GetDrawableResourceById(GetDrawableResourceId(resup)); if( d != null ){ bmpUp = ((BitmapDrawable)d).getBitmap(); if( bmpUp != null ){ rect = new Rect(0,0,bmpUp.getWidth(),bmpUp.getHeight()); LAMWCommon.setLParamWidth(bmpUp.getWidth()); LAMWCommon.setLParamHeight(bmpUp.getHeight()); invalidate(); } } } public void setButtonDownByRes(String resdn) { // ..res/drawable //bmpDn = bmpUp = GetBitmapResource(resdn, false); Drawable d = GetDrawableResourceById(GetDrawableResourceId(resdn)); if( d != null ){ bmpDn = ((BitmapDrawable)d).getBitmap(); if(bmpDn != null){ rect = new Rect(0,0,bmpDn.getWidth(),bmpDn.getHeight()); invalidate(); } } } // @Override public boolean onTouchEvent( MotionEvent event) { //LORDMAN 2013-08-16 if (enabled == false) { return false; } int actType = event.getAction()&MotionEvent.ACTION_MASK; switch(actType) { case MotionEvent.ACTION_DOWN: { btnState = 1; //savedBackColor = GetBackgroundColor(); //SetBackgroundColor(Color.YELLOW); invalidate(); final Handler handler = new Handler(); handler.postDelayed(new Runnable() { @Override public void run() { // Do something after: 1s = 1000ms } }, sleep); //1s = 1000ms break; } case MotionEvent.ACTION_MOVE: // { break; } Fixed the bug: The button stays down after clicking and moving case MotionEvent.ACTION_UP : { if ( btnState == 1 ){ //try fix twice event! btnState = 0; controls.pOnClick(LAMWCommon.getPasObj(), Const.Click_Default); //SetBackgroundColor(Color.GREEN); invalidate(); } break; } } return true; } @Override public void onDraw( Canvas canvas) { if (btnState == 0) { if (bmpUp != null) { LAMWCommon.setLParamWidth(bmpUp.getWidth()); LAMWCommon.setLParamHeight(bmpUp.getHeight()); canvas.drawBitmap(bmpUp,null,rect,null); } } else { if (bmpDn != null) { LAMWCommon.setLParamWidth(bmpDn.getWidth()); LAMWCommon.setLParamHeight(bmpDn.getHeight()); canvas.drawBitmap(bmpDn,null,rect,null); } } } public void SetSleepDown(int _sleepMiliSeconds) { sleep = _sleepMiliSeconds; } public int GetBackgroundColor() { int c = Color.TRANSPARENT; Drawable background = this.getBackground(); if (background instanceof ColorDrawable) { c = ((ColorDrawable)this.getBackground()).getColor(); } else { //if (mIsRounded = true) c = mBackgroundColor; } return c; } public void SetBackgroundColor(int _color) { if (this != null) { //mBackgroundColor = _color; this.setBackgroundColor(_color); //this.setAlpha(0.5f); } } public void setEnabled(boolean value) { enabled = value; } public void Free() { if (bmpUp != null) { bmpUp.recycle(); } if (bmpDn != null) { bmpDn.recycle(); } LAMWCommon.free(); bmpUp = null; bmpDn = null; mPaint = null; rect = null; } public long GetPasObj() { return LAMWCommon.getPasObj(); } public void setParent(ViewGroup _viewgroup ) { LAMWCommon.setParent(_viewgroup); } public ViewGroup GetParent() { return LAMWCommon.getParent(); } public void RemoveFromViewParent() { LAMWCommon.removeFromViewParent(); } public void setLeftTopRightBottomWidthHeight(int left, int top, int right, int bottom, int w, int h) { LAMWCommon.setLeftTopRightBottomWidthHeight(left,top,right,bottom,w,h); } public void setLParamWidth(int w) { LAMWCommon.setLParamWidth(w); } public void setLParamHeight(int h) { LAMWCommon.setLParamHeight(h); } public int GetLParamHeight() { return LAMWCommon.getLParamHeight(); } public int GetLParamWidth() { return LAMWCommon.getLParamWidth(); } public void setLGravity(int _g) { LAMWCommon.setLGravity(_g); } public void setLWeight(float _w) { LAMWCommon.setLWeight(_w); } public void addLParamsAnchorRule(int rule) { LAMWCommon.addLParamsAnchorRule(rule); } public void addLParamsParentRule(int rule) { LAMWCommon.addLParamsParentRule(rule); } public void setLayoutAll(int idAnchor) { LAMWCommon.setLayoutAll(idAnchor); } public void ClearLayoutAll() { LAMWCommon.clearLayoutAll(); } }
{ "pile_set_name": "Github" }
#!/usr/bin/env julia --project ## @article{10.1175/1520-0469(2000)057<1052:ALESSO>2.0.CO;2, ## author = {Kosović, Branko and Curry, Judith A.}, ## title = "{A Large Eddy Simulation Study of a Quasi-Steady, ## Stably Stratified Atmospheric Boundary Layer}", ## journal = {Journal of the Atmospheric Sciences}, ## volume = {57}, ## number = {8}, ## pages = {1052-1068}, ## year = {2000}, ## month = {04}, ## issn = {0022-4928}, ## doi = {10.1175/1520-0469(2000)057<1052:ALESSO>2.0.CO;2}, ## url = {https://doi.org/10.1175/1520-0469(2000)057<1052:ALESSO>2.0.CO;2}, ## } ## @article{doi:10.1029/2018MS001534, ## author = {Nishizawa, S. and Kitamura, Y.}, ## title = {A Surface Flux Scheme Based on the Monin-Obukhov Similarity for Finite Volume Models}, ## journal = {Journal of Advances in Modeling Earth Systems}, ## volume = {10}, ## number = {12}, ## pages = {3159-3175}, ## year = {2018} ## doi = {10.1029/2018MS001534}, ## url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2018MS001534}, ## } using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Filters using ClimateMachine.Mesh.Grids using ClimateMachine.ODESolvers using ClimateMachine.Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using Distributions using Random using StaticArrays using Test using DocStringExtensions using LinearAlgebra using CLIMAParameters using CLIMAParameters.Planet: R_d, cp_d, cv_d, MSLP, grav, day struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import ClimateMachine.Atmos: atmos_source!, flux_second_order! using ClimateMachine.Atmos: altitude, recover_thermo_state """ StableBL Geostrophic Forcing (Source) """ struct StableBLGeostrophic{FT} <: Source "Coriolis parameter [s⁻¹]" f_coriolis::FT "Eastward geostrophic velocity `[m/s]` (Base)" u_geostrophic::FT "Eastward geostrophic velocity `[m/s]` (Slope)" u_slope::FT "Northward geostrophic velocity `[m/s]`" v_geostrophic::FT end function atmos_source!( s::StableBLGeostrophic, atmos::AtmosModel, source::Vars, state::Vars, diffusive::Vars, aux::Vars, t::Real, direction, ) f_coriolis = s.f_coriolis u_geostrophic = s.u_geostrophic u_slope = s.u_slope v_geostrophic = s.v_geostrophic z = altitude(atmos, aux) # Note z dependence of eastward geostrophic velocity u_geo = SVector(u_geostrophic + u_slope * z, v_geostrophic, 0) ẑ = vertical_unit_vector(atmos, aux) fkvector = f_coriolis * ẑ # Accumulate sources source.ρu -= fkvector × (state.ρu .- state.ρ * u_geo) return nothing end """ StableBL Sponge (Source) """ struct StableBLSponge{FT} <: Source "Maximum domain altitude (m)" z_max::FT "Altitude at with sponge starts (m)" z_sponge::FT "Sponge Strength 0 ⩽ α_max ⩽ 1" α_max::FT "Sponge exponent" γ::FT "Eastward geostrophic velocity `[m/s]` (Base)" u_geostrophic::FT "Eastward geostrophic velocity `[m/s]` (Slope)" u_slope::FT "Northward geostrophic velocity `[m/s]`" v_geostrophic::FT end function atmos_source!( s::StableBLSponge, atmos::AtmosModel, source::Vars, state::Vars, diffusive::Vars, aux::Vars, t::Real, direction, ) z_max = s.z_max z_sponge = s.z_sponge α_max = s.α_max γ = s.γ u_geostrophic = s.u_geostrophic u_slope = s.u_slope v_geostrophic = s.v_geostrophic z = altitude(atmos, aux) u_geo = SVector(u_geostrophic + u_slope * z, v_geostrophic, 0) ẑ = vertical_unit_vector(atmos, aux) # Accumulate sources if z_sponge <= z r = (z - z_sponge) / (z_max - z_sponge) β_sponge = α_max * sinpi(r / 2)^s.γ source.ρu -= β_sponge * (state.ρu .- state.ρ * u_geo) end return nothing end """ Initial Condition for StableBoundaryLayer LES """ function init_problem!(problem, bl, state, aux, (x, y, z), t) # Problem floating point precision FT = eltype(state) R_gas::FT = R_d(bl.param_set) c_p::FT = cp_d(bl.param_set) c_v::FT = cv_d(bl.param_set) p0::FT = MSLP(bl.param_set) _grav::FT = grav(bl.param_set) γ::FT = c_p / c_v # Initialise speeds [u = Eastward, v = Northward, w = Vertical] u::FT = 8 v::FT = 0 w::FT = 0 # Assign piecewise quantities to θ_liq and q_tot θ_liq::FT = 0 q_tot::FT = 0 # Piecewise functions for potential temperature and total moisture z1 = FT(100) if z <= z1 θ_liq = FT(265) else θ_liq = FT(265) + FT(0.01) * (z - z1) end θ = θ_liq π_exner = FT(1) - _grav / (c_p * θ) * z # exner pressure ρ = p0 / (R_gas * θ) * (π_exner)^(c_v / R_gas) # density # Establish thermodynamic state and moist phase partitioning TS = LiquidIcePotTempSHumEquil(bl.param_set, θ_liq, ρ, q_tot) # Compute momentum contributions ρu = ρ * u ρv = ρ * v ρw = ρ * w # Compute energy contributions e_kin = FT(1 // 2) * (u^2 + v^2 + w^2) e_pot = _grav * z ρe_tot = ρ * total_energy(e_kin, e_pot, TS) # Assign initial conditions for prognostic state variables state.ρ = ρ state.ρu = SVector(ρu, ρv, ρw) state.ρe = ρe_tot state.moisture.ρq_tot = ρ * q_tot if z <= FT(50) # Add random perturbations to bottom 50m of model state.ρe += rand() * ρe_tot / 100 end end function surface_temperature_variation(state, aux, t) FT = eltype(state) ρ = state.ρ q_tot = state.moisture.ρq_tot / ρ θ_liq_sfc = FT(265) - FT(1 / 4) * (t / 3600) TS = LiquidIcePotTempSHumEquil(param_set, θ_liq_sfc, ρ, q_tot) return air_temperature(TS) end function config_problem(::Type{FT}, N, resolution, xmax, ymax, zmax) where {FT} ics = init_problem! # Initial conditions C_smag = FT(0.23) # Smagorinsky coefficient C_drag = FT(0.001) # Momentum exchange coefficient z_sponge = FT(300) # Start of sponge layer α_max = FT(0.75) # Strength of sponge layer (timescale) γ = 2 # Strength of sponge layer (exponent) u_geostrophic = FT(8) # Eastward relaxation speed u_slope = FT(0) # Slope of altitude-dependent relaxation speed v_geostrophic = FT(0) # Northward relaxation speed f_coriolis = FT(1.39e-4) # Coriolis parameter q_sfc = FT(0) u_star = FT(0.30) # Assemble source components source = ( Gravity(), StableBLSponge{FT}( zmax, z_sponge, α_max, γ, u_geostrophic, u_slope, v_geostrophic, ), StableBLGeostrophic{FT}( f_coriolis, u_geostrophic, u_slope, v_geostrophic, ), ) # Choose default IMEX solver ode_solver_type = ClimateMachine.ExplicitSolverType() # Set up problem initial and boundary conditions moisture_flux = FT(0) problem = AtmosProblem( init_state_prognostic = ics, boundarycondition = ( AtmosBC( momentum = Impenetrable(DragLaw( # normPu_int is the internal horizontal speed # P represents the projection onto the horizontal (state, aux, t, normPu_int) -> (u_star / normPu_int)^2, )), energy = BulkFormulaEnergy( (state, aux, t, normPu_int) -> C_drag, (state, aux, t) -> ( surface_temperature_variation(state, aux, t), q_sfc, ), ), moisture = BulkFormulaMoisture( (state, aux, t, normPu_int) -> C_drag, (state, aux, t) -> q_sfc, ), ), AtmosBC(), ), ) # Assemble model components model = AtmosModel{FT}( AtmosLESConfigType, param_set; problem = problem, turbulence = SmagorinskyLilly{FT}(C_smag), moisture = EquilMoist{FT}(; maxiter = 5, tolerance = FT(0.1)), source = source, ) # Assemble configuration config = ClimateMachine.AtmosLESConfiguration( "StableBoundaryLayer", N, resolution, xmax, ymax, zmax, param_set, init_problem!, solver_type = ode_solver_type, model = model, ) return config end function config_diagnostics(driver_config) default_dgngrp = setup_atmos_default_diagnostics( AtmosLESConfigType(), "2500steps", driver_config.name, ) core_dgngrp = setup_atmos_core_diagnostics( AtmosLESConfigType(), "2500steps", driver_config.name, ) return ClimateMachine.DiagnosticsConfiguration([ default_dgngrp, core_dgngrp, ]) end function main() FT = Float64 # DG polynomial order N = 4 # Domain resolution and size Δh = FT(20) Δv = FT(20) resolution = (Δh, Δh, Δv) # Prescribe domain parameters xmax = FT(100) ymax = FT(100) zmax = FT(400) t0 = FT(0) # Required simulation time == 9hours timeend = FT(3600 * 0.1) CFLmax = FT(0.4) driver_config = config_problem(FT, N, resolution, xmax, ymax, zmax) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, init_on_cpu = true, Courant_number = CFLmax, ) dgn_config = config_diagnostics(driver_config) cbtmarfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, ("moisture.ρq_tot",), solver_config.dg.grid, TMARFilter(), ) nothing end # State variable Q = solver_config.Q # Volume geometry information vgeo = driver_config.grid.vgeo M = vgeo[:, Grids._M, :] # Unpack prognostic vars ρ₀ = Q.ρ ρe₀ = Q.ρe # DG variable sums Σρ₀ = sum(ρ₀ .* M) Σρe₀ = sum(ρe₀ .* M) cb_check_cons = GenericCallbacks.EveryXSimulationSteps(3000) do Q = solver_config.Q δρ = (sum(Q.ρ .* M) - Σρ₀) / Σρ₀ δρe = (sum(Q.ρe .* M) .- Σρe₀) ./ Σρe₀ @show (abs(δρ)) @show (abs(δρe)) nothing end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (cbtmarfilter, cb_check_cons), check_euclidean_distance = true, ) end main()
{ "pile_set_name": "Github" }
#define DTM_IDCODE 0x01 /* * Identifies the release version of this part. */ #define DTM_IDCODE_VERSION_OFFSET 28 #define DTM_IDCODE_VERSION_LENGTH 4 #define DTM_IDCODE_VERSION (0xf << DTM_IDCODE_VERSION_OFFSET) /* * Identifies the designer's part number of this part. */ #define DTM_IDCODE_PARTNUMBER_OFFSET 12 #define DTM_IDCODE_PARTNUMBER_LENGTH 16 #define DTM_IDCODE_PARTNUMBER (0xffff << DTM_IDCODE_PARTNUMBER_OFFSET) /* * Identifies the designer/manufacturer of this part. Bits 6:0 must be * bits 6:0 of the designer/manufacturer's Identification Code as * assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16 * count of the number of continuation characters (0x7f) in that same * Identification Code. */ #define DTM_IDCODE_MANUFID_OFFSET 1 #define DTM_IDCODE_MANUFID_LENGTH 11 #define DTM_IDCODE_MANUFID (0x7ff << DTM_IDCODE_MANUFID_OFFSET) #define DTM_IDCODE_1_OFFSET 0 #define DTM_IDCODE_1_LENGTH 1 #define DTM_IDCODE_1 (0x1 << DTM_IDCODE_1_OFFSET) #define DTM_DTMCS 0x10 /* * Writing 1 to this bit does a hard reset of the DTM, * causing the DTM to forget about any outstanding DMI transactions. * In general this should only be used when the Debugger has * reason to expect that the outstanding DMI transaction will never * complete (e.g. a reset condition caused an inflight DMI transaction to * be cancelled). */ #define DTM_DTMCS_DMIHARDRESET_OFFSET 17 #define DTM_DTMCS_DMIHARDRESET_LENGTH 1 #define DTM_DTMCS_DMIHARDRESET (0x1 << DTM_DTMCS_DMIHARDRESET_OFFSET) /* * Writing 1 to this bit clears the sticky error state * and allows the DTM to retry or complete the previous * transaction. */ #define DTM_DTMCS_DMIRESET_OFFSET 16 #define DTM_DTMCS_DMIRESET_LENGTH 1 #define DTM_DTMCS_DMIRESET (0x1 << DTM_DTMCS_DMIRESET_OFFSET) /* * This is a hint to the debugger of the minimum number of * cycles a debugger should spend in * Run-Test/Idle after every DMI scan to avoid a `busy' * return code (\Fdmistat of 3). A debugger must still * check \Fdmistat when necessary. * * 0: It is not necessary to enter Run-Test/Idle at all. * * 1: Enter Run-Test/Idle and leave it immediately. * * 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving. * * And so on. */ #define DTM_DTMCS_IDLE_OFFSET 12 #define DTM_DTMCS_IDLE_LENGTH 3 #define DTM_DTMCS_IDLE (0x7 << DTM_DTMCS_IDLE_OFFSET) /* * 0: No error. * * 1: Reserved. Interpret the same as 2. * * 2: An operation failed (resulted in \Fop of 2). * * 3: An operation was attempted while a DMI access was still in * progress (resulted in \Fop of 3). */ #define DTM_DTMCS_DMISTAT_OFFSET 10 #define DTM_DTMCS_DMISTAT_LENGTH 2 #define DTM_DTMCS_DMISTAT (0x3 << DTM_DTMCS_DMISTAT_OFFSET) /* * The size of \Faddress in \Rdmi. */ #define DTM_DTMCS_ABITS_OFFSET 4 #define DTM_DTMCS_ABITS_LENGTH 6 #define DTM_DTMCS_ABITS (0x3f << DTM_DTMCS_ABITS_OFFSET) /* * 0: Version described in spec version 0.11. * * 1: Version described in spec version 0.13 (and later?), which * reduces the DMI data width to 32 bits. * * Other values are reserved for future use. */ #define DTM_DTMCS_VERSION_OFFSET 0 #define DTM_DTMCS_VERSION_LENGTH 4 #define DTM_DTMCS_VERSION (0xf << DTM_DTMCS_VERSION_OFFSET) #define DTM_DMI 0x11 /* * Address used for DMI access. In Update-DR this value is used * to access the DM over the DMI. */ #define DTM_DMI_ADDRESS_OFFSET 34 #define DTM_DMI_ADDRESS_LENGTH abits #define DTM_DMI_ADDRESS (((1L<<abits)-1) << DTM_DMI_ADDRESS_OFFSET) /* * The data to send to the DM over the DMI during Update-DR, and * the data returned from the DM as a result of the previous operation. */ #define DTM_DMI_DATA_OFFSET 2 #define DTM_DMI_DATA_LENGTH 32 #define DTM_DMI_DATA (0xffffffffL << DTM_DMI_DATA_OFFSET) /* * When the debugger writes this field, it has the following meaning: * * 0: Ignore \Fdata and \Faddress. (nop) * * Don't send anything over the DMI during Update-DR. * This operation should never result in a busy or error response. * The address and data reported in the following Capture-DR * are undefined. * * 1: Read from \Faddress. (read) * * 2: Write \Fdata to \Faddress. (write) * * 3: Reserved. * * When the debugger reads this field, it means the following: * * 0: The previous operation completed successfully. * * 1: Reserved. * * 2: A previous operation failed. The data scanned into \Rdmi in * this access will be ignored. This status is sticky and can be * cleared by writing \Fdmireset in \Rdtmcs. * * This indicates that the DM itself responded with an error, e.g. * in the System Bus and Serial Port overflow/underflow cases. * * 3: An operation was attempted while a DMI request is still in * progress. The data scanned into \Rdmi in this access will be * ignored. This status is sticky and can be cleared by writing * \Fdmireset in \Rdtmcs. If a debugger sees this status, it * needs to give the target more TCK edges between Update-DR and * Capture-DR. The simplest way to do that is to add extra transitions * in Run-Test/Idle. * * (The DTM, DM, and/or component may be in different clock domains, * so synchronization may be required. Some relatively fixed number of * TCK ticks may be needed for the request to reach the DM, complete, * and for the response to be synchronized back into the TCK domain.) */ #define DTM_DMI_OP_OFFSET 0 #define DTM_DMI_OP_LENGTH 2 #define DTM_DMI_OP (0x3L << DTM_DMI_OP_OFFSET) #define CSR_DCSR 0x7b0 /* * 0: There is no external debug support. * * 4: External debug support exists as it is described in this document. */ #define CSR_DCSR_XDEBUGVER_OFFSET 28 #define CSR_DCSR_XDEBUGVER_LENGTH 4 #define CSR_DCSR_XDEBUGVER (0xf << CSR_DCSR_XDEBUGVER_OFFSET) /* * When 1, {\tt ebreak} instructions in Machine Mode enter Debug Mode. */ #define CSR_DCSR_EBREAKM_OFFSET 15 #define CSR_DCSR_EBREAKM_LENGTH 1 #define CSR_DCSR_EBREAKM (0x1 << CSR_DCSR_EBREAKM_OFFSET) /* * When 1, {\tt ebreak} instructions in Hypervisor Mode enter Debug Mode. */ #define CSR_DCSR_EBREAKH_OFFSET 14 #define CSR_DCSR_EBREAKH_LENGTH 1 #define CSR_DCSR_EBREAKH (0x1 << CSR_DCSR_EBREAKH_OFFSET) /* * When 1, {\tt ebreak} instructions in Supervisor Mode enter Debug Mode. */ #define CSR_DCSR_EBREAKS_OFFSET 13 #define CSR_DCSR_EBREAKS_LENGTH 1 #define CSR_DCSR_EBREAKS (0x1 << CSR_DCSR_EBREAKS_OFFSET) /* * When 1, {\tt ebreak} instructions in User/Application Mode enter * Debug Mode. */ #define CSR_DCSR_EBREAKU_OFFSET 12 #define CSR_DCSR_EBREAKU_LENGTH 1 #define CSR_DCSR_EBREAKU (0x1 << CSR_DCSR_EBREAKU_OFFSET) /* * 0: Increment counters as usual. * * 1: Don't increment any counters while in Debug Mode. This includes * the {\tt cycle} and {\tt instret} CSRs. This is preferred for most * debugging scenarios. * * An implementation may choose not to support writing to this bit. * The debugger must read back the value it writes to check whether * the feature is supported. */ #define CSR_DCSR_STOPCOUNT_OFFSET 10 #define CSR_DCSR_STOPCOUNT_LENGTH 1 #define CSR_DCSR_STOPCOUNT (0x1 << CSR_DCSR_STOPCOUNT_OFFSET) /* * 0: Increment timers as usual. * * 1: Don't increment any hart-local timers while in Debug Mode. * * An implementation may choose not to support writing to this bit. * The debugger must read back the value it writes to check whether * the feature is supported. */ #define CSR_DCSR_STOPTIME_OFFSET 9 #define CSR_DCSR_STOPTIME_LENGTH 1 #define CSR_DCSR_STOPTIME (0x1 << CSR_DCSR_STOPTIME_OFFSET) /* * Explains why Debug Mode was entered. * * When there are multiple reasons to enter Debug Mode in a single * cycle, the cause with the highest priority is the one written. * * 1: An {\tt ebreak} instruction was executed. (priority 3) * * 2: The Trigger Module caused a halt. (priority 4) * * 3: \Fhaltreq was set. (priority 2) * * 4: The hart single stepped because \Fstep was set. (priority 1) * * Other values are reserved for future use. */ #define CSR_DCSR_CAUSE_OFFSET 6 #define CSR_DCSR_CAUSE_LENGTH 3 #define CSR_DCSR_CAUSE (0x7 << CSR_DCSR_CAUSE_OFFSET) /* * When set and not in Debug Mode, the hart will only execute a single * instruction and then enter Debug Mode. * Interrupts are disabled when this bit is set. * If the instruction does not complete due to an exception, * the hart will immediately enter Debug Mode before executing * the trap handler, with appropriate exception registers set. */ #define CSR_DCSR_STEP_OFFSET 2 #define CSR_DCSR_STEP_LENGTH 1 #define CSR_DCSR_STEP (0x1 << CSR_DCSR_STEP_OFFSET) /* * Contains the privilege level the hart was operating in when Debug * Mode was entered. The encoding is described in Table * \ref{tab:privlevel}. A debugger can change this value to change * the hart's privilege level when exiting Debug Mode. * * Not all privilege levels are supported on all harts. If the * encoding written is not supported or the debugger is not allowed to * change to it, the hart may change to any supported privilege level. */ #define CSR_DCSR_PRV_OFFSET 0 #define CSR_DCSR_PRV_LENGTH 2 #define CSR_DCSR_PRV (0x3 << CSR_DCSR_PRV_OFFSET) #define CSR_DPC 0x7b1 #define CSR_DPC_DPC_OFFSET 0 #define CSR_DPC_DPC_LENGTH XLEN #define CSR_DPC_DPC (((1L<<XLEN)-1) << CSR_DPC_DPC_OFFSET) #define CSR_DSCRATCH0 0x7b2 #define CSR_DSCRATCH1 0x7b3 #define CSR_TSELECT 0x7a0 #define CSR_TSELECT_INDEX_OFFSET 0 #define CSR_TSELECT_INDEX_LENGTH XLEN #define CSR_TSELECT_INDEX (((1L<<XLEN)-1) << CSR_TSELECT_INDEX_OFFSET) #define CSR_TDATA1 0x7a1 /* * 0: There is no trigger at this \Rtselect. * * 1: The trigger is a legacy SiFive address match trigger. These * should not be implemented and aren't further documented here. * * 2: The trigger is an address/data match trigger. The remaining bits * in this register act as described in \Rmcontrol. * * 3: The trigger is an instruction count trigger. The remaining bits * in this register act as described in \Ricount. * * 15: This trigger exists (so enumeration shouldn't terminate), but * is not currently available. * * Other values are reserved for future use. */ #define CSR_TDATA1_TYPE_OFFSET XLEN-4 #define CSR_TDATA1_TYPE_LENGTH 4 #define CSR_TDATA1_TYPE (0xfL << CSR_TDATA1_TYPE_OFFSET) /* * 0: Both Debug and M Mode can write the {\tt tdata} registers at the * selected \Rtselect. * * 1: Only Debug Mode can write the {\tt tdata} registers at the * selected \Rtselect. Writes from other modes are ignored. * * This bit is only writable from Debug Mode. */ #define CSR_TDATA1_HMODE_OFFSET XLEN-5 #define CSR_TDATA1_HMODE_LENGTH 1 #define CSR_TDATA1_HMODE (0x1L << CSR_TDATA1_HMODE_OFFSET) /* * Trigger-specific data. */ #define CSR_TDATA1_DATA_OFFSET 0 #define CSR_TDATA1_DATA_LENGTH XLEN - 5 #define CSR_TDATA1_DATA (((1L<<XLEN - 5)-1) << CSR_TDATA1_DATA_OFFSET) #define CSR_TDATA2 0x7a2 #define CSR_TDATA2_DATA_OFFSET 0 #define CSR_TDATA2_DATA_LENGTH XLEN #define CSR_TDATA2_DATA (((1L<<XLEN)-1) << CSR_TDATA2_DATA_OFFSET) #define CSR_TDATA3 0x7a3 #define CSR_TDATA3_DATA_OFFSET 0 #define CSR_TDATA3_DATA_LENGTH XLEN #define CSR_TDATA3_DATA (((1L<<XLEN)-1) << CSR_TDATA3_DATA_OFFSET) #define CSR_MCONTROL 0x7a1 #define CSR_MCONTROL_TYPE_OFFSET XLEN-4 #define CSR_MCONTROL_TYPE_LENGTH 4 #define CSR_MCONTROL_TYPE (0xfL << CSR_MCONTROL_TYPE_OFFSET) #define CSR_MCONTROL_DMODE_OFFSET XLEN-5 #define CSR_MCONTROL_DMODE_LENGTH 1 #define CSR_MCONTROL_DMODE (0x1L << CSR_MCONTROL_DMODE_OFFSET) /* * Specifies the largest naturally aligned powers-of-two (NAPOT) range * supported by the hardware. The value is the logarithm base 2 of the * number of bytes in that range. A value of 0 indicates that only * exact value matches are supported (one byte range). A value of 63 * corresponds to the maximum NAPOT range, which is $2^{63}$ bytes in * size. */ #define CSR_MCONTROL_MASKMAX_OFFSET XLEN-11 #define CSR_MCONTROL_MASKMAX_LENGTH 6 #define CSR_MCONTROL_MASKMAX (0x3fL << CSR_MCONTROL_MASKMAX_OFFSET) /* * 0: Perform a match on the virtual address. * * 1: Perform a match on the data value loaded/stored, or the * instruction executed. */ #define CSR_MCONTROL_SELECT_OFFSET 19 #define CSR_MCONTROL_SELECT_LENGTH 1 #define CSR_MCONTROL_SELECT (0x1L << CSR_MCONTROL_SELECT_OFFSET) /* * 0: The action for this trigger will be taken just before the * instruction that triggered it is executed, but after all preceding * instructions are are committed. * * 1: The action for this trigger will be taken after the instruction * that triggered it is executed. It should be taken before the next * instruction is executed, but it is better to implement triggers and * not implement that suggestion than to not implement them at all. * * Most hardware will only implement one timing or the other, possibly * dependent on \Fselect, \Fexecute, \Fload, and \Fstore. This bit * primarily exists for the hardware to communicate to the debugger * what will happen. Hardware may implement the bit fully writable, in * which case the debugger has a little more control. * * Data load triggers with \Ftiming of 0 will result in the same load * happening again when the debugger lets the core run. For data load * triggers, debuggers must first attempt to set the breakpoint with * \Ftiming of 1. * * A chain of triggers that don't all have the same \Ftiming value * will never fire (unless consecutive instructions match the * appropriate triggers). */ #define CSR_MCONTROL_TIMING_OFFSET 18 #define CSR_MCONTROL_TIMING_LENGTH 1 #define CSR_MCONTROL_TIMING (0x1L << CSR_MCONTROL_TIMING_OFFSET) /* * Determines what happens when this trigger matches. * * 0: Raise a breakpoint exception. (Used when software wants to use * the trigger module without an external debugger attached.) * * 1: Enter Debug Mode. (Only supported when \Fhmode is 1.) * * 2: Start tracing. * * 3: Stop tracing. * * 4: Emit trace data for this match. If it is a data access match, * emit appropriate Load/Store Address/Data. If it is an instruction * execution, emit its PC. * * Other values are reserved for future use. */ #define CSR_MCONTROL_ACTION_OFFSET 12 #define CSR_MCONTROL_ACTION_LENGTH 6 #define CSR_MCONTROL_ACTION (0x3fL << CSR_MCONTROL_ACTION_OFFSET) /* * 0: When this trigger matches, the configured action is taken. * * 1: While this trigger does not match, it prevents the trigger with * the next index from matching. */ #define CSR_MCONTROL_CHAIN_OFFSET 11 #define CSR_MCONTROL_CHAIN_LENGTH 1 #define CSR_MCONTROL_CHAIN (0x1L << CSR_MCONTROL_CHAIN_OFFSET) /* * 0: Matches when the value equals \Rtdatatwo. * * 1: Matches when the top M bits of the value match the top M bits of * \Rtdatatwo. M is XLEN-1 minus the index of the least-significant * bit containing 0 in \Rtdatatwo. * * 2: Matches when the value is greater than or equal to \Rtdatatwo. * * 3: Matches when the value is less than \Rtdatatwo. * * 4: Matches when the lower half of the value equals the lower half * of \Rtdatatwo after the lower half of the value is ANDed with the * upper half of \Rtdatatwo. * * 5: Matches when the upper half of the value equals the lower half * of \Rtdatatwo after the upper half of the value is ANDed with the * upper half of \Rtdatatwo. * * Other values are reserved for future use. */ #define CSR_MCONTROL_MATCH_OFFSET 7 #define CSR_MCONTROL_MATCH_LENGTH 4 #define CSR_MCONTROL_MATCH (0xfL << CSR_MCONTROL_MATCH_OFFSET) /* * When set, enable this trigger in M mode. */ #define CSR_MCONTROL_M_OFFSET 6 #define CSR_MCONTROL_M_LENGTH 1 #define CSR_MCONTROL_M (0x1L << CSR_MCONTROL_M_OFFSET) /* * When set, enable this trigger in H mode. */ #define CSR_MCONTROL_H_OFFSET 5 #define CSR_MCONTROL_H_LENGTH 1 #define CSR_MCONTROL_H (0x1L << CSR_MCONTROL_H_OFFSET) /* * When set, enable this trigger in S mode. */ #define CSR_MCONTROL_S_OFFSET 4 #define CSR_MCONTROL_S_LENGTH 1 #define CSR_MCONTROL_S (0x1L << CSR_MCONTROL_S_OFFSET) /* * When set, enable this trigger in U mode. */ #define CSR_MCONTROL_U_OFFSET 3 #define CSR_MCONTROL_U_LENGTH 1 #define CSR_MCONTROL_U (0x1L << CSR_MCONTROL_U_OFFSET) /* * When set, the trigger fires on the virtual address or opcode of an * instruction that is executed. */ #define CSR_MCONTROL_EXECUTE_OFFSET 2 #define CSR_MCONTROL_EXECUTE_LENGTH 1 #define CSR_MCONTROL_EXECUTE (0x1L << CSR_MCONTROL_EXECUTE_OFFSET) /* * When set, the trigger fires on the virtual address or data of a store. */ #define CSR_MCONTROL_STORE_OFFSET 1 #define CSR_MCONTROL_STORE_LENGTH 1 #define CSR_MCONTROL_STORE (0x1L << CSR_MCONTROL_STORE_OFFSET) /* * When set, the trigger fires on the virtual address or data of a load. */ #define CSR_MCONTROL_LOAD_OFFSET 0 #define CSR_MCONTROL_LOAD_LENGTH 1 #define CSR_MCONTROL_LOAD (0x1L << CSR_MCONTROL_LOAD_OFFSET) #define CSR_ICOUNT 0x7a1 #define CSR_ICOUNT_TYPE_OFFSET XLEN-4 #define CSR_ICOUNT_TYPE_LENGTH 4 #define CSR_ICOUNT_TYPE (0xfL << CSR_ICOUNT_TYPE_OFFSET) #define CSR_ICOUNT_DMODE_OFFSET XLEN-5 #define CSR_ICOUNT_DMODE_LENGTH 1 #define CSR_ICOUNT_DMODE (0x1L << CSR_ICOUNT_DMODE_OFFSET) /* * When count is decremented to 0, the trigger fires. Instead of * changing \Fcount from 1 to 0, it is also acceptable for hardware to * clear \Fm, \Fh, \Fs, and \Fu. This allows \Fcount to be hard-wired * to 1 if this register just exists for single step. */ #define CSR_ICOUNT_COUNT_OFFSET 10 #define CSR_ICOUNT_COUNT_LENGTH 14 #define CSR_ICOUNT_COUNT (0x3fffL << CSR_ICOUNT_COUNT_OFFSET) /* * When set, every instruction completed or exception taken in M mode decrements \Fcount * by 1. */ #define CSR_ICOUNT_M_OFFSET 9 #define CSR_ICOUNT_M_LENGTH 1 #define CSR_ICOUNT_M (0x1L << CSR_ICOUNT_M_OFFSET) /* * When set, every instruction completed or exception taken in in H mode decrements \Fcount * by 1. */ #define CSR_ICOUNT_H_OFFSET 8 #define CSR_ICOUNT_H_LENGTH 1 #define CSR_ICOUNT_H (0x1L << CSR_ICOUNT_H_OFFSET) /* * When set, every instruction completed or exception taken in S mode decrements \Fcount * by 1. */ #define CSR_ICOUNT_S_OFFSET 7 #define CSR_ICOUNT_S_LENGTH 1 #define CSR_ICOUNT_S (0x1L << CSR_ICOUNT_S_OFFSET) /* * When set, every instruction completed or exception taken in U mode decrements \Fcount * by 1. */ #define CSR_ICOUNT_U_OFFSET 6 #define CSR_ICOUNT_U_LENGTH 1 #define CSR_ICOUNT_U (0x1L << CSR_ICOUNT_U_OFFSET) /* * Determines what happens when this trigger matches. * * 0: Raise a breakpoint exception. (Used when software wants to use the * trigger module without an external debugger attached.) * * 1: Enter Debug Mode. (Only supported when \Fhmode is 1.) * * 2: Start tracing. * * 3: Stop tracing. * * 4: Emit trace data for this match. If it is a data access match, * emit appropriate Load/Store Address/Data. If it is an instruction * execution, emit its PC. * * Other values are reserved for future use. */ #define CSR_ICOUNT_ACTION_OFFSET 0 #define CSR_ICOUNT_ACTION_LENGTH 6 #define CSR_ICOUNT_ACTION (0x3fL << CSR_ICOUNT_ACTION_OFFSET) #define DMI_DMSTATUS 0x11 /* * This field is 1 when all currently selected harts have acknowledged the previous \Fresumereq. */ #define DMI_DMSTATUS_ALLRESUMEACK_OFFSET 17 #define DMI_DMSTATUS_ALLRESUMEACK_LENGTH 1 #define DMI_DMSTATUS_ALLRESUMEACK (0x1 << DMI_DMSTATUS_ALLRESUMEACK_OFFSET) /* * This field is 1 when any currently selected hart has acknowledged the previous \Fresumereq. */ #define DMI_DMSTATUS_ANYRESUMEACK_OFFSET 16 #define DMI_DMSTATUS_ANYRESUMEACK_LENGTH 1 #define DMI_DMSTATUS_ANYRESUMEACK (0x1 << DMI_DMSTATUS_ANYRESUMEACK_OFFSET) /* * This field is 1 when all currently selected harts do not exist in this system. */ #define DMI_DMSTATUS_ALLNONEXISTENT_OFFSET 15 #define DMI_DMSTATUS_ALLNONEXISTENT_LENGTH 1 #define DMI_DMSTATUS_ALLNONEXISTENT (0x1 << DMI_DMSTATUS_ALLNONEXISTENT_OFFSET) /* * This field is 1 when any currently selected hart does not exist in this system. */ #define DMI_DMSTATUS_ANYNONEXISTENT_OFFSET 14 #define DMI_DMSTATUS_ANYNONEXISTENT_LENGTH 1 #define DMI_DMSTATUS_ANYNONEXISTENT (0x1 << DMI_DMSTATUS_ANYNONEXISTENT_OFFSET) /* * This field is 1 when all currently selected harts are unavailable. */ #define DMI_DMSTATUS_ALLUNAVAIL_OFFSET 13 #define DMI_DMSTATUS_ALLUNAVAIL_LENGTH 1 #define DMI_DMSTATUS_ALLUNAVAIL (0x1 << DMI_DMSTATUS_ALLUNAVAIL_OFFSET) /* * This field is 1 when any currently selected hart is unavailable. */ #define DMI_DMSTATUS_ANYUNAVAIL_OFFSET 12 #define DMI_DMSTATUS_ANYUNAVAIL_LENGTH 1 #define DMI_DMSTATUS_ANYUNAVAIL (0x1 << DMI_DMSTATUS_ANYUNAVAIL_OFFSET) /* * This field is 1 when all currently selected harts are running. */ #define DMI_DMSTATUS_ALLRUNNING_OFFSET 11 #define DMI_DMSTATUS_ALLRUNNING_LENGTH 1 #define DMI_DMSTATUS_ALLRUNNING (0x1 << DMI_DMSTATUS_ALLRUNNING_OFFSET) /* * This field is 1 when any currently selected hart is running. */ #define DMI_DMSTATUS_ANYRUNNING_OFFSET 10 #define DMI_DMSTATUS_ANYRUNNING_LENGTH 1 #define DMI_DMSTATUS_ANYRUNNING (0x1 << DMI_DMSTATUS_ANYRUNNING_OFFSET) /* * This field is 1 when all currently selected harts are halted. */ #define DMI_DMSTATUS_ALLHALTED_OFFSET 9 #define DMI_DMSTATUS_ALLHALTED_LENGTH 1 #define DMI_DMSTATUS_ALLHALTED (0x1 << DMI_DMSTATUS_ALLHALTED_OFFSET) /* * This field is 1 when any currently selected hart is halted. */ #define DMI_DMSTATUS_ANYHALTED_OFFSET 8 #define DMI_DMSTATUS_ANYHALTED_LENGTH 1 #define DMI_DMSTATUS_ANYHALTED (0x1 << DMI_DMSTATUS_ANYHALTED_OFFSET) /* * 0 when authentication is required before using the DM. 1 when the * authentication check has passed. On components that don't implement * authentication, this bit must be preset as 1. */ #define DMI_DMSTATUS_AUTHENTICATED_OFFSET 7 #define DMI_DMSTATUS_AUTHENTICATED_LENGTH 1 #define DMI_DMSTATUS_AUTHENTICATED (0x1 << DMI_DMSTATUS_AUTHENTICATED_OFFSET) /* * 0: The authentication module is ready to process the next * read/write to \Rauthdata. * * 1: The authentication module is busy. Accessing \Rauthdata results * in unspecified behavior. * * \Fauthbusy only becomes set in immediate response to an access to * \Rauthdata. */ #define DMI_DMSTATUS_AUTHBUSY_OFFSET 6 #define DMI_DMSTATUS_AUTHBUSY_LENGTH 1 #define DMI_DMSTATUS_AUTHBUSY (0x1 << DMI_DMSTATUS_AUTHBUSY_OFFSET) #define DMI_DMSTATUS_CFGSTRVALID_OFFSET 4 #define DMI_DMSTATUS_CFGSTRVALID_LENGTH 1 #define DMI_DMSTATUS_CFGSTRVALID (0x1 << DMI_DMSTATUS_CFGSTRVALID_OFFSET) /* * 0: There is no Debug Module present. * * 1: There is a Debug Module and it conforms to version 0.11 of this * specification. * * 2: There is a Debug Module and it conforms to version 0.13 of this * specification. */ #define DMI_DMSTATUS_VERSION_OFFSET 0 #define DMI_DMSTATUS_VERSION_LENGTH 4 #define DMI_DMSTATUS_VERSION (0xf << DMI_DMSTATUS_VERSION_OFFSET) #define DMI_DMCONTROL 0x10 /* * Halt request signal for all currently selected harts. When set to * 1, each selected hart will halt if it is not currently halted. * * Writing 1 or 0 has no effect on a hart which is already halted, but * the bit should be cleared to 0 before the hart is resumed. * Setting both \Fhaltreq and \Fresumereq leads to undefined behavior. * * Writes apply to the new value of \Fhartsel and \Fhasel. */ #define DMI_DMCONTROL_HALTREQ_OFFSET 31 #define DMI_DMCONTROL_HALTREQ_LENGTH 1 #define DMI_DMCONTROL_HALTREQ (0x1 << DMI_DMCONTROL_HALTREQ_OFFSET) /* * Resume request signal for all currently selected harts. When set to 1, * each selected hart will resume if it is currently halted. * Setting both \Fhaltreq and \Fresumereq leads to undefined behavior. * * Writes apply to the new value of \Fhartsel and \Fhasel. */ #define DMI_DMCONTROL_RESUMEREQ_OFFSET 30 #define DMI_DMCONTROL_RESUMEREQ_LENGTH 1 #define DMI_DMCONTROL_RESUMEREQ (0x1 << DMI_DMCONTROL_RESUMEREQ_OFFSET) /* * This optional bit controls reset to all the currently selected harts. * To perform a reset the debugger writes 1, and then writes 0 to * deassert the reset signal. * * If this feature is not implemented, the bit always stays 0, so * after writing 1 the debugger can read the register back to see if * the feature is supported. * * Writes apply to the new value of \Fhartsel and \Fhasel. */ #define DMI_DMCONTROL_HARTRESET_OFFSET 29 #define DMI_DMCONTROL_HARTRESET_LENGTH 1 #define DMI_DMCONTROL_HARTRESET (0x1 << DMI_DMCONTROL_HARTRESET_OFFSET) /* * Selects the definition of currently selected harts. * * 0: There is a single currently selected hart, that selected by \Fhartsel. * * 1: There may be multiple currently selected harts -- that selected by \Fhartsel, * plus those selected by the hart array mask register. * * An implementation which does not implement the hart array mask register * should tie this field to 0. A debugger which wishes to use the hart array * mask register feature should set this bit and read back to see if the functionality * is supported. */ #define DMI_DMCONTROL_HASEL_OFFSET 26 #define DMI_DMCONTROL_HASEL_LENGTH 1 #define DMI_DMCONTROL_HASEL (0x1 << DMI_DMCONTROL_HASEL_OFFSET) /* * The DM-specific index of the hart to select. This hart is always part of the * currently selected harts. */ #define DMI_DMCONTROL_HARTSEL_OFFSET 16 #define DMI_DMCONTROL_HARTSEL_LENGTH 10 #define DMI_DMCONTROL_HARTSEL (0x3ff << DMI_DMCONTROL_HARTSEL_OFFSET) /* * This bit controls the reset signal from the DM to the rest of the * system. To perform a system reset the debugger writes 1, * and then writes 0 * to deassert the reset. This bit must not reset the Debug Module * registers. What it does reset is platform-specific (it may * reset nothing). */ #define DMI_DMCONTROL_NDMRESET_OFFSET 1 #define DMI_DMCONTROL_NDMRESET_LENGTH 1 #define DMI_DMCONTROL_NDMRESET (0x1 << DMI_DMCONTROL_NDMRESET_OFFSET) /* * This bit serves as a reset signal for the Debug Module itself. * * 0: The module's state, including authentication mechanism, * takes its reset values (the \Fdmactive bit is the only bit which can * be written to something other than its reset value). * * 1: The module functions normally. * * No other mechanism should exist that may result in resetting the * Debug Module after power up, including the platform's system reset * or Debug Transport reset signals. * * A debugger may pulse this bit low to get the debug module into a * known state. * * Implementations may use this bit to aid debugging, for example by * preventing the Debug Module from being power gated while debugging * is active. */ #define DMI_DMCONTROL_DMACTIVE_OFFSET 0 #define DMI_DMCONTROL_DMACTIVE_LENGTH 1 #define DMI_DMCONTROL_DMACTIVE (0x1 << DMI_DMCONTROL_DMACTIVE_OFFSET) #define DMI_HARTINFO 0x12 /* * Number of {\tt dscratch} registers available for the debugger * to use during program buffer execution, starting from \Rdscratchzero. * The debugger can make no assumptions about the contents of these * registers between commands. */ #define DMI_HARTINFO_NSCRATCH_OFFSET 20 #define DMI_HARTINFO_NSCRATCH_LENGTH 4 #define DMI_HARTINFO_NSCRATCH (0xf << DMI_HARTINFO_NSCRATCH_OFFSET) /* * 0: The {\tt data} registers are shadowed in the hart by CSR * registers. Each CSR register is XLEN bits in size, and corresponds * to a single argument, per Table~\ref{tab:datareg}. * * 1: The {\tt data} registers are shadowed in the hart's memory map. * Each register takes up 4 bytes in the memory map. */ #define DMI_HARTINFO_DATAACCESS_OFFSET 16 #define DMI_HARTINFO_DATAACCESS_LENGTH 1 #define DMI_HARTINFO_DATAACCESS (0x1 << DMI_HARTINFO_DATAACCESS_OFFSET) /* * If \Fdataaccess is 0: Number of CSR registers dedicated to * shadowing the {\tt data} registers. * * If \Fdataaccess is 1: Number of 32-bit words in the memory map * dedicated to shadowing the {\tt data} registers. */ #define DMI_HARTINFO_DATASIZE_OFFSET 12 #define DMI_HARTINFO_DATASIZE_LENGTH 4 #define DMI_HARTINFO_DATASIZE (0xf << DMI_HARTINFO_DATASIZE_OFFSET) /* * If \Fdataaccess is 0: The number of the first CSR dedicated to * shadowing the {\tt data} registers. * * If \Fdataaccess is 1: Signed address of RAM where the {\tt data} * registers are shadowed. */ #define DMI_HARTINFO_DATAADDR_OFFSET 0 #define DMI_HARTINFO_DATAADDR_LENGTH 12 #define DMI_HARTINFO_DATAADDR (0xfff << DMI_HARTINFO_DATAADDR_OFFSET) #define DMI_HALTSUM 0x13 #define DMI_HALTSUM_HALT1023_992_OFFSET 31 #define DMI_HALTSUM_HALT1023_992_LENGTH 1 #define DMI_HALTSUM_HALT1023_992 (0x1 << DMI_HALTSUM_HALT1023_992_OFFSET) #define DMI_HALTSUM_HALT991_960_OFFSET 30 #define DMI_HALTSUM_HALT991_960_LENGTH 1 #define DMI_HALTSUM_HALT991_960 (0x1 << DMI_HALTSUM_HALT991_960_OFFSET) #define DMI_HALTSUM_HALT959_928_OFFSET 29 #define DMI_HALTSUM_HALT959_928_LENGTH 1 #define DMI_HALTSUM_HALT959_928 (0x1 << DMI_HALTSUM_HALT959_928_OFFSET) #define DMI_HALTSUM_HALT927_896_OFFSET 28 #define DMI_HALTSUM_HALT927_896_LENGTH 1 #define DMI_HALTSUM_HALT927_896 (0x1 << DMI_HALTSUM_HALT927_896_OFFSET) #define DMI_HALTSUM_HALT895_864_OFFSET 27 #define DMI_HALTSUM_HALT895_864_LENGTH 1 #define DMI_HALTSUM_HALT895_864 (0x1 << DMI_HALTSUM_HALT895_864_OFFSET) #define DMI_HALTSUM_HALT863_832_OFFSET 26 #define DMI_HALTSUM_HALT863_832_LENGTH 1 #define DMI_HALTSUM_HALT863_832 (0x1 << DMI_HALTSUM_HALT863_832_OFFSET) #define DMI_HALTSUM_HALT831_800_OFFSET 25 #define DMI_HALTSUM_HALT831_800_LENGTH 1 #define DMI_HALTSUM_HALT831_800 (0x1 << DMI_HALTSUM_HALT831_800_OFFSET) #define DMI_HALTSUM_HALT799_768_OFFSET 24 #define DMI_HALTSUM_HALT799_768_LENGTH 1 #define DMI_HALTSUM_HALT799_768 (0x1 << DMI_HALTSUM_HALT799_768_OFFSET) #define DMI_HALTSUM_HALT767_736_OFFSET 23 #define DMI_HALTSUM_HALT767_736_LENGTH 1 #define DMI_HALTSUM_HALT767_736 (0x1 << DMI_HALTSUM_HALT767_736_OFFSET) #define DMI_HALTSUM_HALT735_704_OFFSET 22 #define DMI_HALTSUM_HALT735_704_LENGTH 1 #define DMI_HALTSUM_HALT735_704 (0x1 << DMI_HALTSUM_HALT735_704_OFFSET) #define DMI_HALTSUM_HALT703_672_OFFSET 21 #define DMI_HALTSUM_HALT703_672_LENGTH 1 #define DMI_HALTSUM_HALT703_672 (0x1 << DMI_HALTSUM_HALT703_672_OFFSET) #define DMI_HALTSUM_HALT671_640_OFFSET 20 #define DMI_HALTSUM_HALT671_640_LENGTH 1 #define DMI_HALTSUM_HALT671_640 (0x1 << DMI_HALTSUM_HALT671_640_OFFSET) #define DMI_HALTSUM_HALT639_608_OFFSET 19 #define DMI_HALTSUM_HALT639_608_LENGTH 1 #define DMI_HALTSUM_HALT639_608 (0x1 << DMI_HALTSUM_HALT639_608_OFFSET) #define DMI_HALTSUM_HALT607_576_OFFSET 18 #define DMI_HALTSUM_HALT607_576_LENGTH 1 #define DMI_HALTSUM_HALT607_576 (0x1 << DMI_HALTSUM_HALT607_576_OFFSET) #define DMI_HALTSUM_HALT575_544_OFFSET 17 #define DMI_HALTSUM_HALT575_544_LENGTH 1 #define DMI_HALTSUM_HALT575_544 (0x1 << DMI_HALTSUM_HALT575_544_OFFSET) #define DMI_HALTSUM_HALT543_512_OFFSET 16 #define DMI_HALTSUM_HALT543_512_LENGTH 1 #define DMI_HALTSUM_HALT543_512 (0x1 << DMI_HALTSUM_HALT543_512_OFFSET) #define DMI_HALTSUM_HALT511_480_OFFSET 15 #define DMI_HALTSUM_HALT511_480_LENGTH 1 #define DMI_HALTSUM_HALT511_480 (0x1 << DMI_HALTSUM_HALT511_480_OFFSET) #define DMI_HALTSUM_HALT479_448_OFFSET 14 #define DMI_HALTSUM_HALT479_448_LENGTH 1 #define DMI_HALTSUM_HALT479_448 (0x1 << DMI_HALTSUM_HALT479_448_OFFSET) #define DMI_HALTSUM_HALT447_416_OFFSET 13 #define DMI_HALTSUM_HALT447_416_LENGTH 1 #define DMI_HALTSUM_HALT447_416 (0x1 << DMI_HALTSUM_HALT447_416_OFFSET) #define DMI_HALTSUM_HALT415_384_OFFSET 12 #define DMI_HALTSUM_HALT415_384_LENGTH 1 #define DMI_HALTSUM_HALT415_384 (0x1 << DMI_HALTSUM_HALT415_384_OFFSET) #define DMI_HALTSUM_HALT383_352_OFFSET 11 #define DMI_HALTSUM_HALT383_352_LENGTH 1 #define DMI_HALTSUM_HALT383_352 (0x1 << DMI_HALTSUM_HALT383_352_OFFSET) #define DMI_HALTSUM_HALT351_320_OFFSET 10 #define DMI_HALTSUM_HALT351_320_LENGTH 1 #define DMI_HALTSUM_HALT351_320 (0x1 << DMI_HALTSUM_HALT351_320_OFFSET) #define DMI_HALTSUM_HALT319_288_OFFSET 9 #define DMI_HALTSUM_HALT319_288_LENGTH 1 #define DMI_HALTSUM_HALT319_288 (0x1 << DMI_HALTSUM_HALT319_288_OFFSET) #define DMI_HALTSUM_HALT287_256_OFFSET 8 #define DMI_HALTSUM_HALT287_256_LENGTH 1 #define DMI_HALTSUM_HALT287_256 (0x1 << DMI_HALTSUM_HALT287_256_OFFSET) #define DMI_HALTSUM_HALT255_224_OFFSET 7 #define DMI_HALTSUM_HALT255_224_LENGTH 1 #define DMI_HALTSUM_HALT255_224 (0x1 << DMI_HALTSUM_HALT255_224_OFFSET) #define DMI_HALTSUM_HALT223_192_OFFSET 6 #define DMI_HALTSUM_HALT223_192_LENGTH 1 #define DMI_HALTSUM_HALT223_192 (0x1 << DMI_HALTSUM_HALT223_192_OFFSET) #define DMI_HALTSUM_HALT191_160_OFFSET 5 #define DMI_HALTSUM_HALT191_160_LENGTH 1 #define DMI_HALTSUM_HALT191_160 (0x1 << DMI_HALTSUM_HALT191_160_OFFSET) #define DMI_HALTSUM_HALT159_128_OFFSET 4 #define DMI_HALTSUM_HALT159_128_LENGTH 1 #define DMI_HALTSUM_HALT159_128 (0x1 << DMI_HALTSUM_HALT159_128_OFFSET) #define DMI_HALTSUM_HALT127_96_OFFSET 3 #define DMI_HALTSUM_HALT127_96_LENGTH 1 #define DMI_HALTSUM_HALT127_96 (0x1 << DMI_HALTSUM_HALT127_96_OFFSET) #define DMI_HALTSUM_HALT95_64_OFFSET 2 #define DMI_HALTSUM_HALT95_64_LENGTH 1 #define DMI_HALTSUM_HALT95_64 (0x1 << DMI_HALTSUM_HALT95_64_OFFSET) #define DMI_HALTSUM_HALT63_32_OFFSET 1 #define DMI_HALTSUM_HALT63_32_LENGTH 1 #define DMI_HALTSUM_HALT63_32 (0x1 << DMI_HALTSUM_HALT63_32_OFFSET) #define DMI_HALTSUM_HALT31_0_OFFSET 0 #define DMI_HALTSUM_HALT31_0_LENGTH 1 #define DMI_HALTSUM_HALT31_0 (0x1 << DMI_HALTSUM_HALT31_0_OFFSET) #define DMI_HAWINDOWSEL 0x14 #define DMI_HAWINDOWSEL_HAWINDOWSEL_OFFSET 0 #define DMI_HAWINDOWSEL_HAWINDOWSEL_LENGTH 5 #define DMI_HAWINDOWSEL_HAWINDOWSEL (0x1f << DMI_HAWINDOWSEL_HAWINDOWSEL_OFFSET) #define DMI_HAWINDOW 0x15 #define DMI_HAWINDOW_MASKDATA_OFFSET 0 #define DMI_HAWINDOW_MASKDATA_LENGTH 32 #define DMI_HAWINDOW_MASKDATA (0xffffffff << DMI_HAWINDOW_MASKDATA_OFFSET) #define DMI_ABSTRACTCS 0x16 /* * Size of the Program Buffer, in 32-bit words. Valid sizes are 0 - 16. * * TODO: Explain what can be done with each size of the buffer, to suggest * why you would want more or less words. */ #define DMI_ABSTRACTCS_PROGSIZE_OFFSET 24 #define DMI_ABSTRACTCS_PROGSIZE_LENGTH 5 #define DMI_ABSTRACTCS_PROGSIZE (0x1f << DMI_ABSTRACTCS_PROGSIZE_OFFSET) /* * 1: An abstract command is currently being executed. * * This bit is set as soon as \Rcommand is written, and is * not cleared until that command has completed. */ #define DMI_ABSTRACTCS_BUSY_OFFSET 12 #define DMI_ABSTRACTCS_BUSY_LENGTH 1 #define DMI_ABSTRACTCS_BUSY (0x1 << DMI_ABSTRACTCS_BUSY_OFFSET) /* * Gets set if an abstract command fails. The bits in this field remain set until * they are cleared by writing 1 to them. No abstract command is * started until the value is reset to 0. * * 0 (none): No error. * * 1 (busy): An abstract command was executing while \Rcommand, * \Rabstractcs, \Rabstractauto was written, or when one * of the {\tt data} or {\tt progbuf} registers was read or written. * * 2 (not supported): The requested command is not supported. A * command that is not supported while the hart is running may be * supported when it is halted. * * 3 (exception): An exception occurred while executing the command * (eg. while executing the Program Buffer). * * 4 (halt/resume): An abstract command couldn't execute because the * hart wasn't in the expected state (running/halted). * * 7 (other): The command failed for another reason. */ #define DMI_ABSTRACTCS_CMDERR_OFFSET 8 #define DMI_ABSTRACTCS_CMDERR_LENGTH 3 #define DMI_ABSTRACTCS_CMDERR (0x7 << DMI_ABSTRACTCS_CMDERR_OFFSET) /* * Number of {\tt data} registers that are implemented as part of the * abstract command interface. Valid sizes are 0 - 12. */ #define DMI_ABSTRACTCS_DATACOUNT_OFFSET 0 #define DMI_ABSTRACTCS_DATACOUNT_LENGTH 5 #define DMI_ABSTRACTCS_DATACOUNT (0x1f << DMI_ABSTRACTCS_DATACOUNT_OFFSET) #define DMI_COMMAND 0x17 /* * The type determines the overall functionality of this * abstract command. */ #define DMI_COMMAND_CMDTYPE_OFFSET 24 #define DMI_COMMAND_CMDTYPE_LENGTH 8 #define DMI_COMMAND_CMDTYPE (0xff << DMI_COMMAND_CMDTYPE_OFFSET) /* * This field is interpreted in a command-specific manner, * described for each abstract command. */ #define DMI_COMMAND_CONTROL_OFFSET 0 #define DMI_COMMAND_CONTROL_LENGTH 24 #define DMI_COMMAND_CONTROL (0xffffff << DMI_COMMAND_CONTROL_OFFSET) #define DMI_ABSTRACTAUTO 0x18 /* * When a bit in this field is 1, read or write accesses the corresponding {\tt progbuf} word * cause the command in \Rcommand to be executed again. */ #define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET 16 #define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_LENGTH 16 #define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF (0xffff << DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET) /* * When a bit in this field is 1, read or write accesses the corresponding {\tt data} word * cause the command in \Rcommand to be executed again. */ #define DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET 0 #define DMI_ABSTRACTAUTO_AUTOEXECDATA_LENGTH 12 #define DMI_ABSTRACTAUTO_AUTOEXECDATA (0xfff << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) #define DMI_CFGSTRADDR0 0x19 #define DMI_CFGSTRADDR0_ADDR_OFFSET 0 #define DMI_CFGSTRADDR0_ADDR_LENGTH 32 #define DMI_CFGSTRADDR0_ADDR (0xffffffff << DMI_CFGSTRADDR0_ADDR_OFFSET) #define DMI_CFGSTRADDR1 0x1a #define DMI_CFGSTRADDR2 0x1b #define DMI_CFGSTRADDR3 0x1c #define DMI_DATA0 0x04 #define DMI_DATA0_DATA_OFFSET 0 #define DMI_DATA0_DATA_LENGTH 32 #define DMI_DATA0_DATA (0xffffffff << DMI_DATA0_DATA_OFFSET) #define DMI_DATA11 0x0f #define DMI_PROGBUF0 0x20 #define DMI_PROGBUF0_DATA_OFFSET 0 #define DMI_PROGBUF0_DATA_LENGTH 32 #define DMI_PROGBUF0_DATA (0xffffffff << DMI_PROGBUF0_DATA_OFFSET) #define DMI_PROGBUF15 0x2f #define DMI_AUTHDATA 0x30 #define DMI_AUTHDATA_DATA_OFFSET 0 #define DMI_AUTHDATA_DATA_LENGTH 32 #define DMI_AUTHDATA_DATA (0xffffffff << DMI_AUTHDATA_DATA_OFFSET) #define DMI_SERCS 0x34 /* * Number of supported serial ports. */ #define DMI_SERCS_SERIALCOUNT_OFFSET 28 #define DMI_SERCS_SERIALCOUNT_LENGTH 4 #define DMI_SERCS_SERIALCOUNT (0xf << DMI_SERCS_SERIALCOUNT_OFFSET) /* * Select which serial port is accessed by \Rserrx and \Rsertx. */ #define DMI_SERCS_SERIAL_OFFSET 24 #define DMI_SERCS_SERIAL_LENGTH 3 #define DMI_SERCS_SERIAL (0x7 << DMI_SERCS_SERIAL_OFFSET) #define DMI_SERCS_ERROR7_OFFSET 23 #define DMI_SERCS_ERROR7_LENGTH 1 #define DMI_SERCS_ERROR7 (0x1 << DMI_SERCS_ERROR7_OFFSET) #define DMI_SERCS_VALID7_OFFSET 22 #define DMI_SERCS_VALID7_LENGTH 1 #define DMI_SERCS_VALID7 (0x1 << DMI_SERCS_VALID7_OFFSET) #define DMI_SERCS_FULL7_OFFSET 21 #define DMI_SERCS_FULL7_LENGTH 1 #define DMI_SERCS_FULL7 (0x1 << DMI_SERCS_FULL7_OFFSET) #define DMI_SERCS_ERROR6_OFFSET 20 #define DMI_SERCS_ERROR6_LENGTH 1 #define DMI_SERCS_ERROR6 (0x1 << DMI_SERCS_ERROR6_OFFSET) #define DMI_SERCS_VALID6_OFFSET 19 #define DMI_SERCS_VALID6_LENGTH 1 #define DMI_SERCS_VALID6 (0x1 << DMI_SERCS_VALID6_OFFSET) #define DMI_SERCS_FULL6_OFFSET 18 #define DMI_SERCS_FULL6_LENGTH 1 #define DMI_SERCS_FULL6 (0x1 << DMI_SERCS_FULL6_OFFSET) #define DMI_SERCS_ERROR5_OFFSET 17 #define DMI_SERCS_ERROR5_LENGTH 1 #define DMI_SERCS_ERROR5 (0x1 << DMI_SERCS_ERROR5_OFFSET) #define DMI_SERCS_VALID5_OFFSET 16 #define DMI_SERCS_VALID5_LENGTH 1 #define DMI_SERCS_VALID5 (0x1 << DMI_SERCS_VALID5_OFFSET) #define DMI_SERCS_FULL5_OFFSET 15 #define DMI_SERCS_FULL5_LENGTH 1 #define DMI_SERCS_FULL5 (0x1 << DMI_SERCS_FULL5_OFFSET) #define DMI_SERCS_ERROR4_OFFSET 14 #define DMI_SERCS_ERROR4_LENGTH 1 #define DMI_SERCS_ERROR4 (0x1 << DMI_SERCS_ERROR4_OFFSET) #define DMI_SERCS_VALID4_OFFSET 13 #define DMI_SERCS_VALID4_LENGTH 1 #define DMI_SERCS_VALID4 (0x1 << DMI_SERCS_VALID4_OFFSET) #define DMI_SERCS_FULL4_OFFSET 12 #define DMI_SERCS_FULL4_LENGTH 1 #define DMI_SERCS_FULL4 (0x1 << DMI_SERCS_FULL4_OFFSET) #define DMI_SERCS_ERROR3_OFFSET 11 #define DMI_SERCS_ERROR3_LENGTH 1 #define DMI_SERCS_ERROR3 (0x1 << DMI_SERCS_ERROR3_OFFSET) #define DMI_SERCS_VALID3_OFFSET 10 #define DMI_SERCS_VALID3_LENGTH 1 #define DMI_SERCS_VALID3 (0x1 << DMI_SERCS_VALID3_OFFSET) #define DMI_SERCS_FULL3_OFFSET 9 #define DMI_SERCS_FULL3_LENGTH 1 #define DMI_SERCS_FULL3 (0x1 << DMI_SERCS_FULL3_OFFSET) #define DMI_SERCS_ERROR2_OFFSET 8 #define DMI_SERCS_ERROR2_LENGTH 1 #define DMI_SERCS_ERROR2 (0x1 << DMI_SERCS_ERROR2_OFFSET) #define DMI_SERCS_VALID2_OFFSET 7 #define DMI_SERCS_VALID2_LENGTH 1 #define DMI_SERCS_VALID2 (0x1 << DMI_SERCS_VALID2_OFFSET) #define DMI_SERCS_FULL2_OFFSET 6 #define DMI_SERCS_FULL2_LENGTH 1 #define DMI_SERCS_FULL2 (0x1 << DMI_SERCS_FULL2_OFFSET) #define DMI_SERCS_ERROR1_OFFSET 5 #define DMI_SERCS_ERROR1_LENGTH 1 #define DMI_SERCS_ERROR1 (0x1 << DMI_SERCS_ERROR1_OFFSET) #define DMI_SERCS_VALID1_OFFSET 4 #define DMI_SERCS_VALID1_LENGTH 1 #define DMI_SERCS_VALID1 (0x1 << DMI_SERCS_VALID1_OFFSET) #define DMI_SERCS_FULL1_OFFSET 3 #define DMI_SERCS_FULL1_LENGTH 1 #define DMI_SERCS_FULL1 (0x1 << DMI_SERCS_FULL1_OFFSET) /* * 1 when the debugger-to-core queue for serial port 0 has * over or underflowed. This bit will remain set until it is reset by * writing 1 to this bit. */ #define DMI_SERCS_ERROR0_OFFSET 2 #define DMI_SERCS_ERROR0_LENGTH 1 #define DMI_SERCS_ERROR0 (0x1 << DMI_SERCS_ERROR0_OFFSET) /* * 1 when the core-to-debugger queue for serial port 0 is not empty. */ #define DMI_SERCS_VALID0_OFFSET 1 #define DMI_SERCS_VALID0_LENGTH 1 #define DMI_SERCS_VALID0 (0x1 << DMI_SERCS_VALID0_OFFSET) /* * 1 when the debugger-to-core queue for serial port 0 is full. */ #define DMI_SERCS_FULL0_OFFSET 0 #define DMI_SERCS_FULL0_LENGTH 1 #define DMI_SERCS_FULL0 (0x1 << DMI_SERCS_FULL0_OFFSET) #define DMI_SERTX 0x35 #define DMI_SERTX_DATA_OFFSET 0 #define DMI_SERTX_DATA_LENGTH 32 #define DMI_SERTX_DATA (0xffffffff << DMI_SERTX_DATA_OFFSET) #define DMI_SERRX 0x36 #define DMI_SERRX_DATA_OFFSET 0 #define DMI_SERRX_DATA_LENGTH 32 #define DMI_SERRX_DATA (0xffffffff << DMI_SERRX_DATA_OFFSET) #define DMI_SBCS 0x38 /* * When a 1 is written here, triggers a read at the address in {\tt * sbaddress} using the access size set by \Fsbaccess. */ #define DMI_SBCS_SBSINGLEREAD_OFFSET 20 #define DMI_SBCS_SBSINGLEREAD_LENGTH 1 #define DMI_SBCS_SBSINGLEREAD (0x1 << DMI_SBCS_SBSINGLEREAD_OFFSET) /* * Select the access size to use for system bus accesses triggered by * writes to the {\tt sbaddress} registers or \Rsbdatazero. * * 0: 8-bit * * 1: 16-bit * * 2: 32-bit * * 3: 64-bit * * 4: 128-bit * * If an unsupported system bus access size is written here, * the DM may not perform the access, or may perform the access * with any access size. */ #define DMI_SBCS_SBACCESS_OFFSET 17 #define DMI_SBCS_SBACCESS_LENGTH 3 #define DMI_SBCS_SBACCESS (0x7 << DMI_SBCS_SBACCESS_OFFSET) /* * When 1, the internal address value (used by the system bus master) * is incremented by the access size (in bytes) selected in \Fsbaccess * after every system bus access. */ #define DMI_SBCS_SBAUTOINCREMENT_OFFSET 16 #define DMI_SBCS_SBAUTOINCREMENT_LENGTH 1 #define DMI_SBCS_SBAUTOINCREMENT (0x1 << DMI_SBCS_SBAUTOINCREMENT_OFFSET) /* * When 1, every read from \Rsbdatazero automatically triggers a system * bus read at the new address. */ #define DMI_SBCS_SBAUTOREAD_OFFSET 15 #define DMI_SBCS_SBAUTOREAD_LENGTH 1 #define DMI_SBCS_SBAUTOREAD (0x1 << DMI_SBCS_SBAUTOREAD_OFFSET) /* * When the debug module's system bus * master causes a bus error, this field gets set. The bits in this * field remain set until they are cleared by writing 1 to them. * While this field is non-zero, no more system bus accesses can be * initiated by the debug module. * * 0: There was no bus error. * * 1: There was a timeout. * * 2: A bad address was accessed. * * 3: There was some other error (eg. alignment). * * 4: The system bus master was busy when one of the * {\tt sbaddress} or {\tt sbdata} registers was written, * or the {\tt sbdata} register was read when it had * stale data. */ #define DMI_SBCS_SBERROR_OFFSET 12 #define DMI_SBCS_SBERROR_LENGTH 3 #define DMI_SBCS_SBERROR (0x7 << DMI_SBCS_SBERROR_OFFSET) /* * Width of system bus addresses in bits. (0 indicates there is no bus * access support.) */ #define DMI_SBCS_SBASIZE_OFFSET 5 #define DMI_SBCS_SBASIZE_LENGTH 7 #define DMI_SBCS_SBASIZE (0x7f << DMI_SBCS_SBASIZE_OFFSET) /* * 1 when 128-bit system bus accesses are supported. */ #define DMI_SBCS_SBACCESS128_OFFSET 4 #define DMI_SBCS_SBACCESS128_LENGTH 1 #define DMI_SBCS_SBACCESS128 (0x1 << DMI_SBCS_SBACCESS128_OFFSET) /* * 1 when 64-bit system bus accesses are supported. */ #define DMI_SBCS_SBACCESS64_OFFSET 3 #define DMI_SBCS_SBACCESS64_LENGTH 1 #define DMI_SBCS_SBACCESS64 (0x1 << DMI_SBCS_SBACCESS64_OFFSET) /* * 1 when 32-bit system bus accesses are supported. */ #define DMI_SBCS_SBACCESS32_OFFSET 2 #define DMI_SBCS_SBACCESS32_LENGTH 1 #define DMI_SBCS_SBACCESS32 (0x1 << DMI_SBCS_SBACCESS32_OFFSET) /* * 1 when 16-bit system bus accesses are supported. */ #define DMI_SBCS_SBACCESS16_OFFSET 1 #define DMI_SBCS_SBACCESS16_LENGTH 1 #define DMI_SBCS_SBACCESS16 (0x1 << DMI_SBCS_SBACCESS16_OFFSET) /* * 1 when 8-bit system bus accesses are supported. */ #define DMI_SBCS_SBACCESS8_OFFSET 0 #define DMI_SBCS_SBACCESS8_LENGTH 1 #define DMI_SBCS_SBACCESS8 (0x1 << DMI_SBCS_SBACCESS8_OFFSET) #define DMI_SBADDRESS0 0x39 /* * Accesses bits 31:0 of the internal address. */ #define DMI_SBADDRESS0_ADDRESS_OFFSET 0 #define DMI_SBADDRESS0_ADDRESS_LENGTH 32 #define DMI_SBADDRESS0_ADDRESS (0xffffffff << DMI_SBADDRESS0_ADDRESS_OFFSET) #define DMI_SBADDRESS1 0x3a /* * Accesses bits 63:32 of the internal address (if the system address * bus is that wide). */ #define DMI_SBADDRESS1_ADDRESS_OFFSET 0 #define DMI_SBADDRESS1_ADDRESS_LENGTH 32 #define DMI_SBADDRESS1_ADDRESS (0xffffffff << DMI_SBADDRESS1_ADDRESS_OFFSET) #define DMI_SBADDRESS2 0x3b /* * Accesses bits 95:64 of the internal address (if the system address * bus is that wide). */ #define DMI_SBADDRESS2_ADDRESS_OFFSET 0 #define DMI_SBADDRESS2_ADDRESS_LENGTH 32 #define DMI_SBADDRESS2_ADDRESS (0xffffffff << DMI_SBADDRESS2_ADDRESS_OFFSET) #define DMI_SBDATA0 0x3c /* * Accesses bits 31:0 of the internal data. */ #define DMI_SBDATA0_DATA_OFFSET 0 #define DMI_SBDATA0_DATA_LENGTH 32 #define DMI_SBDATA0_DATA (0xffffffff << DMI_SBDATA0_DATA_OFFSET) #define DMI_SBDATA1 0x3d /* * Accesses bits 63:32 of the internal data (if the system bus is * that wide). */ #define DMI_SBDATA1_DATA_OFFSET 0 #define DMI_SBDATA1_DATA_LENGTH 32 #define DMI_SBDATA1_DATA (0xffffffff << DMI_SBDATA1_DATA_OFFSET) #define DMI_SBDATA2 0x3e /* * Accesses bits 95:64 of the internal data (if the system bus is * that wide). */ #define DMI_SBDATA2_DATA_OFFSET 0 #define DMI_SBDATA2_DATA_LENGTH 32 #define DMI_SBDATA2_DATA (0xffffffff << DMI_SBDATA2_DATA_OFFSET) #define DMI_SBDATA3 0x3f /* * Accesses bits 127:96 of the internal data (if the system bus is * that wide). */ #define DMI_SBDATA3_DATA_OFFSET 0 #define DMI_SBDATA3_DATA_LENGTH 32 #define DMI_SBDATA3_DATA (0xffffffff << DMI_SBDATA3_DATA_OFFSET) #define TRACE 0x728 /* * 1 if the trace buffer has wrapped since the last time \Fdiscard was * written. 0 otherwise. */ #define TRACE_WRAPPED_OFFSET 24 #define TRACE_WRAPPED_LENGTH 1 #define TRACE_WRAPPED (0x1 << TRACE_WRAPPED_OFFSET) /* * Emit Timestamp trace sequences. */ #define TRACE_EMITTIMESTAMP_OFFSET 23 #define TRACE_EMITTIMESTAMP_LENGTH 1 #define TRACE_EMITTIMESTAMP (0x1 << TRACE_EMITTIMESTAMP_OFFSET) /* * Emit Store Data trace sequences. */ #define TRACE_EMITSTOREDATA_OFFSET 22 #define TRACE_EMITSTOREDATA_LENGTH 1 #define TRACE_EMITSTOREDATA (0x1 << TRACE_EMITSTOREDATA_OFFSET) /* * Emit Load Data trace sequences. */ #define TRACE_EMITLOADDATA_OFFSET 21 #define TRACE_EMITLOADDATA_LENGTH 1 #define TRACE_EMITLOADDATA (0x1 << TRACE_EMITLOADDATA_OFFSET) /* * Emit Store Address trace sequences. */ #define TRACE_EMITSTOREADDR_OFFSET 20 #define TRACE_EMITSTOREADDR_LENGTH 1 #define TRACE_EMITSTOREADDR (0x1 << TRACE_EMITSTOREADDR_OFFSET) /* * Emit Load Address trace sequences. */ #define TRACE_EMITLOADADDR_OFFSET 19 #define TRACE_EMITLOADADDR_LENGTH 1 #define TRACE_EMITLOADADDR (0x1 << TRACE_EMITLOADADDR_OFFSET) /* * Emit Privilege Level trace sequences. */ #define TRACE_EMITPRIV_OFFSET 18 #define TRACE_EMITPRIV_LENGTH 1 #define TRACE_EMITPRIV (0x1 << TRACE_EMITPRIV_OFFSET) /* * Emit Branch Taken and Branch Not Taken trace sequences. */ #define TRACE_EMITBRANCH_OFFSET 17 #define TRACE_EMITBRANCH_LENGTH 1 #define TRACE_EMITBRANCH (0x1 << TRACE_EMITBRANCH_OFFSET) /* * Emit PC trace sequences. */ #define TRACE_EMITPC_OFFSET 16 #define TRACE_EMITPC_LENGTH 1 #define TRACE_EMITPC (0x1 << TRACE_EMITPC_OFFSET) /* * Determine what happens when the trace buffer is full. 0 means wrap * and overwrite. 1 means turn off trace until \Fdiscard is written as 1. * 2 means cause a trace full exception. 3 is reserved for future use. */ #define TRACE_FULLACTION_OFFSET 8 #define TRACE_FULLACTION_LENGTH 2 #define TRACE_FULLACTION (0x3 << TRACE_FULLACTION_OFFSET) /* * 0: Trace to a dedicated on-core RAM (which is not further defined in * this spec). * * 1: Trace to RAM on the system bus. * * 2: Send trace data to a dedicated off-chip interface (which is not * defined in this spec). This does not affect execution speed. * * 3: Reserved for future use. * * Options 0 and 1 slow down execution (eg. because of system bus * contention). */ #define TRACE_DESTINATION_OFFSET 4 #define TRACE_DESTINATION_LENGTH 2 #define TRACE_DESTINATION (0x3 << TRACE_DESTINATION_OFFSET) /* * When 1, the trace logic may stall processor execution to ensure it * can emit all the trace sequences required. When 0 individual trace * sequences may be dropped. */ #define TRACE_STALL_OFFSET 2 #define TRACE_STALL_LENGTH 1 #define TRACE_STALL (0x1 << TRACE_STALL_OFFSET) /* * Writing 1 to this bit tells the trace logic that any trace * collected is no longer required. When tracing to RAM, it resets the * trace write pointer to the start of the memory, as well as * \Fwrapped. */ #define TRACE_DISCARD_OFFSET 1 #define TRACE_DISCARD_LENGTH 1 #define TRACE_DISCARD (0x1 << TRACE_DISCARD_OFFSET) #define TRACE_SUPPORTED_OFFSET 0 #define TRACE_SUPPORTED_LENGTH 1 #define TRACE_SUPPORTED (0x1 << TRACE_SUPPORTED_OFFSET) #define TBUFSTART 0x729 #define TBUFEND 0x72a #define TBUFWRITE 0x72b #define SHORTNAME 0x123 /* * Description of what this field is used for. */ #define SHORTNAME_FIELD_OFFSET 0 #define SHORTNAME_FIELD_LENGTH 8 #define SHORTNAME_FIELD (0xff << SHORTNAME_FIELD_OFFSET) #define AC_ACCESS_REGISTER None /* * This is 0 to indicate Access Register Command. */ #define AC_ACCESS_REGISTER_CMDTYPE_OFFSET 24 #define AC_ACCESS_REGISTER_CMDTYPE_LENGTH 8 #define AC_ACCESS_REGISTER_CMDTYPE (0xff << AC_ACCESS_REGISTER_CMDTYPE_OFFSET) /* * 2: Access the lowest 32 bits of the register. * * 3: Access the lowest 64 bits of the register. * * 4: Access the lowest 128 bits of the register. * * If \Fsize specifies a size larger than the register's actual size, * then the access must fail. If a register is accessible, then reads of \Fsize * less than or equal to the register's actual size must be supported. */ #define AC_ACCESS_REGISTER_SIZE_OFFSET 20 #define AC_ACCESS_REGISTER_SIZE_LENGTH 3 #define AC_ACCESS_REGISTER_SIZE (0x7 << AC_ACCESS_REGISTER_SIZE_OFFSET) /* * When 1, execute the program in the Program Buffer exactly once * after performing the transfer, if any. */ #define AC_ACCESS_REGISTER_POSTEXEC_OFFSET 18 #define AC_ACCESS_REGISTER_POSTEXEC_LENGTH 1 #define AC_ACCESS_REGISTER_POSTEXEC (0x1 << AC_ACCESS_REGISTER_POSTEXEC_OFFSET) /* * 0: Don't do the operation specified by \Fwrite. * * 1: Do the operation specified by \Fwrite. */ #define AC_ACCESS_REGISTER_TRANSFER_OFFSET 17 #define AC_ACCESS_REGISTER_TRANSFER_LENGTH 1 #define AC_ACCESS_REGISTER_TRANSFER (0x1 << AC_ACCESS_REGISTER_TRANSFER_OFFSET) /* * When \Ftransfer is set: * 0: Copy data from the specified register into {\tt arg0} portion * of {\tt data}. * * 1: Copy data from {\tt arg0} portion of {\tt data} into the * specified register. */ #define AC_ACCESS_REGISTER_WRITE_OFFSET 16 #define AC_ACCESS_REGISTER_WRITE_LENGTH 1 #define AC_ACCESS_REGISTER_WRITE (0x1 << AC_ACCESS_REGISTER_WRITE_OFFSET) /* * Number of the register to access, as described in * Table~\ref{tab:regno}. * \Rdpc may be used as an alias for PC if this command is * supported on a non-halted hart. */ #define AC_ACCESS_REGISTER_REGNO_OFFSET 0 #define AC_ACCESS_REGISTER_REGNO_LENGTH 16 #define AC_ACCESS_REGISTER_REGNO (0xffff << AC_ACCESS_REGISTER_REGNO_OFFSET) #define AC_QUICK_ACCESS None /* * This is 1 to indicate Quick Access command. */ #define AC_QUICK_ACCESS_CMDTYPE_OFFSET 24 #define AC_QUICK_ACCESS_CMDTYPE_LENGTH 8 #define AC_QUICK_ACCESS_CMDTYPE (0xff << AC_QUICK_ACCESS_CMDTYPE_OFFSET) #define VIRT_PRIV virtual /* * Contains the privilege level the hart was operating in when Debug * Mode was entered. The encoding is described in Table * \ref{tab:privlevel}. A user can write this value to change the * hart's privilege level when exiting Debug Mode. */ #define VIRT_PRIV_PRV_OFFSET 0 #define VIRT_PRIV_PRV_LENGTH 2 #define VIRT_PRIV_PRV (0x3 << VIRT_PRIV_PRV_OFFSET)
{ "pile_set_name": "Github" }
/* * Copyright (c) 2020 NetEase Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Project: curve * Created Date: 18-8-23 * Author: wudemiao */ #include "src/chunkserver/braft_cli_service2.h" #include <glog/logging.h> #include <brpc/controller.h> // brpc::Controller #include <braft/node_manager.h> // NodeManager #include <braft/closure_helper.h> // NewCallback #include <cerrno> #include <vector> #include <string> namespace curve { namespace chunkserver { static void add_peer_returned(brpc::Controller *cntl, const AddPeerRequest2 *request, AddPeerResponse2 *response, std::vector<braft::PeerId> old_peers, scoped_refptr<braft::NodeImpl> /*node*/, Closure *done, const butil::Status &st) { brpc::ClosureGuard done_guard(done); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } bool already_exists = false; for (size_t i = 0; i < old_peers.size(); ++i) { response->add_oldpeers()->set_address(old_peers[i].to_string()); response->add_newpeers()->set_address(old_peers[i].to_string()); if (old_peers[i] == request->addpeer().address()) { already_exists = true; } } if (!already_exists) { response->add_newpeers()->set_address(request->addpeer().address()); } } void BRaftCliServiceImpl2::AddPeer(RpcController *controller, const AddPeerRequest2 *request, AddPeerResponse2 *response, Closure *done) { brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr<braft::NodeImpl> node; LogicPoolID logicPoolId = request->logicpoolid(); CopysetID copysetId = request->copysetid(); butil::Status st = get_node(&node, logicPoolId, copysetId, request->leader().address()); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } std::vector<braft::PeerId> peers; st = node->list_peers(&peers); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } braft::PeerId adding_peer; if (adding_peer.parse(request->addpeer().address()) != 0) { cntl->SetFailed(EINVAL, "Fail to parse peer_id %s", request->addpeer().address().c_str()); return; } LOG(WARNING) << "Receive AddPeerRequest to " << node->node_id() << " from " << cntl->remote_side() << ", adding " << request->addpeer().address(); braft::Closure *add_peer_done = NewCallback( add_peer_returned, cntl, request, response, peers, node, done_guard.release()); return node->add_peer(adding_peer, add_peer_done); } static void remove_peer_returned(brpc::Controller *cntl, const RemovePeerRequest2 *request, RemovePeerResponse2 *response, std::vector<braft::PeerId> old_peers, scoped_refptr<braft::NodeImpl> /*node*/, Closure *done, const butil::Status &st) { brpc::ClosureGuard done_guard(done); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } for (size_t i = 0; i < old_peers.size(); ++i) { response->add_oldpeers()->set_address(old_peers[i].to_string()); if (old_peers[i] != request->removepeer().address()) { response->add_newpeers()->set_address(old_peers[i].to_string()); } } } void BRaftCliServiceImpl2::RemovePeer(RpcController *controller, const RemovePeerRequest2 *request, RemovePeerResponse2 *response, Closure *done) { brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr<braft::NodeImpl> node; LogicPoolID logicPoolId = request->logicpoolid(); CopysetID copysetId = request->copysetid(); butil::Status st = get_node(&node, logicPoolId, copysetId, request->leader().address()); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } std::vector<braft::PeerId> peers; st = node->list_peers(&peers); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } braft::PeerId removing_peer; if (removing_peer.parse(request->removepeer().address()) != 0) { cntl->SetFailed(EINVAL, "Fail to parse peer_id %s", request->removepeer().address().c_str()); return; } LOG(WARNING) << "Receive RemovePeerRequest to " << node->node_id() << " from " << cntl->remote_side() << ", removing " << request->removepeer().address(); braft::Closure *remove_peer_done = NewCallback( remove_peer_returned, cntl, request, response, peers, node, done_guard.release()); return node->remove_peer(removing_peer, remove_peer_done); } static void change_peers_returned(brpc::Controller* cntl, const ChangePeersRequest2* request, ChangePeersResponse2* response, std::vector<braft::PeerId> old_peers, Configuration new_peers, scoped_refptr<braft::NodeImpl> /*node*/, ::google::protobuf::Closure* done, const butil::Status& st) { brpc::ClosureGuard done_guard(done); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } for (size_t i = 0; i < old_peers.size(); ++i) { response->add_oldpeers()->set_address(old_peers[i].to_string()); } for (Configuration::const_iterator iter = new_peers.begin(); iter != new_peers.end(); ++iter) { response->add_newpeers()->set_address(iter->to_string()); } } void BRaftCliServiceImpl2::ChangePeers(RpcController *controller, const ChangePeersRequest2 *request, ChangePeersResponse2 *response, Closure *done) { brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr<braft::NodeImpl> node; LogicPoolID logicPoolId = request->logicpoolid(); CopysetID copysetId = request->copysetid(); butil::Status st = get_node(&node, logicPoolId, copysetId, request->leader().address()); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } std::vector<braft::PeerId> peers; st = node->list_peers(&peers); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } Configuration conf; for (int i = 0; i < request->newpeers_size(); ++i) { PeerId peer; if (peer.parse(request->newpeers(i).address()) != 0) { cntl->SetFailed(EINVAL, "Fail to p2arse %s", request->newpeers(i).address().c_str()); return; } conf.add_peer(peer); } LOG(WARNING) << "Receive ChangePeersRequest to " << node->node_id() << " from " << cntl->remote_side() << ", new conf: " << conf; braft::Closure* change_peers_done = NewCallback( change_peers_returned, cntl, request, response, peers, conf, node, done_guard.release()); return node->change_peers(conf, change_peers_done); } void BRaftCliServiceImpl2::GetLeader(RpcController *controller, const GetLeaderRequest2 *request, GetLeaderResponse2 *response, Closure *done) { brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); std::vector<scoped_refptr<braft::NodeImpl> > nodes; braft::NodeManager *const nm = braft::NodeManager::GetInstance(); LogicPoolID logicPoolId = request->logicpoolid(); CopysetID copysetId = request->copysetid(); braft::GroupId group_id = ToGroupId(logicPoolId, copysetId); nm->get_nodes_by_group_id(group_id, &nodes); if (nodes.empty()) { cntl->SetFailed(ENOENT, "No nodes in group %s", group_id.c_str()); return; } for (size_t i = 0; i < nodes.size(); ++i) { braft::PeerId leader_id = nodes[i]->leader_id(); if (!leader_id.is_empty()) { Peer *peer = new Peer(); response->set_allocated_leader(peer); peer->set_address(leader_id.to_string()); return; } } cntl->SetFailed(EAGAIN, "Unknown leader"); } butil::Status BRaftCliServiceImpl2::get_node( scoped_refptr<braft::NodeImpl> *node, const LogicPoolID &logicPoolId, const CopysetID &copysetId, const std::string &peer_id) { braft::GroupId group_id = ToGroupId(logicPoolId, copysetId); braft::NodeManager *const nm = braft::NodeManager::GetInstance(); /* peer id is required have been guaranteed in proto */ *node = nm->get(group_id, peer_id); if (!(*node)) { return butil::Status(ENOENT, "Fail to find node %s in group %s", peer_id.c_str(), group_id.c_str()); } if ((*node)->disable_cli()) { return butil::Status(EACCES, "CliService is not allowed to access node " "%s", (*node)->node_id().to_string().c_str()); } return butil::Status::OK(); } void BRaftCliServiceImpl2::TransferLeader( RpcController *controller, const TransferLeaderRequest2 *request, TransferLeaderResponse2 *response, ::google::protobuf::Closure *done) { brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr<braft::NodeImpl> node; LogicPoolID logicPoolId = request->logicpoolid(); CopysetID copysetId = request->copysetid(); butil::Status st = get_node(&node, logicPoolId, copysetId, request->leader().address()); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } braft::PeerId peer = braft::ANY_PEER; if (peer.parse(request->transferee().address()) != 0) { cntl->SetFailed(EINVAL, "Fail to parse %s", request->transferee().address().c_str()); return; } const int rc = node->transfer_leadership_to(peer); if (rc != 0) { cntl->SetFailed(rc, "Fail to invoke transfer_leadership_to : %s", berror(rc)); return; } } void BRaftCliServiceImpl2::ResetPeer(RpcController* controller, const ResetPeerRequest2* request, ResetPeerResponse2* response, Closure* done) { brpc::Controller* cntl = (brpc::Controller*)controller; brpc::ClosureGuard done_guard(done); scoped_refptr<braft::NodeImpl> node; LogicPoolID logicPoolId = request->logicpoolid(); CopysetID copysetId = request->copysetid(); butil::Status st = get_node(&node, logicPoolId, copysetId, request->requestpeer().address()); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); return; } Configuration conf; for (int i = 0; i < request->newpeers_size(); ++i) { PeerId peer; if (peer.parse(request->newpeers(i).address()) != 0) { cntl->SetFailed(EINVAL, "Fail to p2arse %s", request->newpeers(i).address().c_str()); return; } conf.add_peer(peer); } LOG(WARNING) << "Receive ResetPeerRequest to " << node->node_id() << " from " << cntl->remote_side() << ", new conf: " << conf; st = node->reset_peers(conf); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); } } } // namespace chunkserver } // namespace curve
{ "pile_set_name": "Github" }
dev tun proto udp remote ru-nv-smart.serverlocation.co 80 resolv-retry infinite client auth-user-pass /config/openvpn-credentials.txt nobind persist-key persist-tun remote-cert-tls server compress lzo reneg-sec 0 # Uncomment following line if you use MS Windows # block-outside-dns verb 3 <ca> -----BEGIN CERTIFICATE----- MIIFOTCCBCGgAwIBAgIJALHEFe9IQlCzMA0GCSqGSIb3DQEBCwUAMIHDMQswCQYD VQQGEwJTQzENMAsGA1UECBMETWFoZTERMA8GA1UEBxMIVmljdG9yaWExHTAbBgNV BAoTFEdsb2JhbCBTdGVhbHRoLCBJbmMuMQwwCgYDVQQLEwNWUE4xIDAeBgNVBAMT F0dsb2JhbCBTdGVhbHRoLCBJbmMuIENBMRswGQYDVQQpExJzZXJ2ZXJsb2NhdGlv bi1rZXkxJjAkBgkqhkiG9w0BCQEWF2FkbWluQHNlcnZlcmxvY2F0aW9uLmNvMB4X DTE1MDIyNTIwMDIzMFoXDTI1MDIyMjIwMDIzMFowgcMxCzAJBgNVBAYTAlNDMQ0w CwYDVQQIEwRNYWhlMREwDwYDVQQHEwhWaWN0b3JpYTEdMBsGA1UEChMUR2xvYmFs IFN0ZWFsdGgsIEluYy4xDDAKBgNVBAsTA1ZQTjEgMB4GA1UEAxMXR2xvYmFsIFN0 ZWFsdGgsIEluYy4gQ0ExGzAZBgNVBCkTEnNlcnZlcmxvY2F0aW9uLWtleTEmMCQG CSqGSIb3DQEJARYXYWRtaW5Ac2VydmVybG9jYXRpb24uY28wggEiMA0GCSqGSIb3 DQEBAQUAA4IBDwAwggEKAoIBAQDA94FmLbk3VPchYZmBCTc0okUFO6AwTn8trAVX r6GVypCDmuWyCPAzCG47qT2rBlWPJMXYbmtJEq/Vrh9gcU7LYw4NQjSnXnBQ10wX c3B+mG4x807IBwH87N2Fl6ZbL5mChIdssUalS3QyARc5Xp6YAJrX3I/UninPXYjz jSxvMrSTnFHwS757F1vLv5z5+Udahz22+u+sqdkN31EnAsM917/fOpkWo0fd/x0r 59d0wYSeqRzqCf9UoQff08/8b+XN+kmR82S7othHEaLXBCgdXHk/lrp5zy4n1+AF lwEXx51UNS8u5YUHlX0orJC1lTJfWjCvTWo2u/XC5iXcrEGbAgMBAAGjggEsMIIB KDAdBgNVHQ4EFgQU69+VyGvTYVeqitctj3s/q7vcEbcwgfgGA1UdIwSB8DCB7YAU 69+VyGvTYVeqitctj3s/q7vcEbehgcmkgcYwgcMxCzAJBgNVBAYTAlNDMQ0wCwYD VQQIEwRNYWhlMREwDwYDVQQHEwhWaWN0b3JpYTEdMBsGA1UEChMUR2xvYmFsIFN0 ZWFsdGgsIEluYy4xDDAKBgNVBAsTA1ZQTjEgMB4GA1UEAxMXR2xvYmFsIFN0ZWFs dGgsIEluYy4gQ0ExGzAZBgNVBCkTEnNlcnZlcmxvY2F0aW9uLWtleTEmMCQGCSqG SIb3DQEJARYXYWRtaW5Ac2VydmVybG9jYXRpb24uY2+CCQCxxBXvSEJQszAMBgNV HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBYkrR6R3QmQ04zWc5r4C7fhR7N +rOqljrpbMXL6QfJTQJbAX2EJeHEyhjYh6xf4I3LWiM1rpSdJi8CbMagSRZulBqQ v9ceszpFOpaoM4kgfDKWW+Z7R4cOZxZKmym1heuvcLcqMwOEk0qN7b6fyipSci38 /LnVdMHDLqnJUndTjhtN6sHmCKrBx9I3V9Yp1CAHUnEvX8mZAYKjbdhuhKhwaMiq wOVCxXj8f872XtjATq/y1Y21vI8yv94NsK1C0zK+FBzxWWnXXQTzYBsNfCoZpox5 7LaXKtnKPSsaucbDlB2ECLqAydp8Q0f2pj0hF3X7mi5NmHEKqKc8T5ROar4D -----END CERTIFICATE----- </ca>
{ "pile_set_name": "Github" }
/**************************************************************************** ** ** Copyright (C) 2017 The Qt Company Ltd. ** Contact: https://www.qt.io/licensing/ ** ** This file is part of the FOO module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:GPL-EXCEPT$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see https://www.qt.io/terms-conditions. For further ** information use the contact form at https://www.qt.io/contact-us. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 3 as published by the Free Software ** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT ** included in the packaging of this file. Please review the following ** information to ensure the GNU General Public License requirements will ** be met: https://www.gnu.org/licenses/gpl-3.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ function Component() { // constructor component.loaded.connect(this, Component.prototype.loaded); installer.addWizardPage(component, "Page", QInstaller.TargetDirectory) } Component.prototype.createOperations = function() { try { // call the base create operations function component.createOperations(); } catch (e) { console.log(e); } } Component.prototype.loaded = function () { var page = gui.pageByObjectName("DynamicPage"); if (page != null) { page.entered.connect(Component.prototype.dynamicPageEntered); } } Component.prototype.dynamicPageEntered = function () { var pageWidget = gui.pageWidgetByObjectName("DynamicPage"); if (pageWidget != null) { pageWidget.m_pageLabel.text = qsTr("This is a dynamically created page."); } }
{ "pile_set_name": "Github" }
/* * reserved comment block * DO NOT REMOVE OR ALTER! */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sun.org.apache.bcel.internal.generic; /** * Denote entity that refers to an index, e.g. local variable instructions, * RET, CPInstruction, etc. * * @version $Id$ */ public interface IndexedInstruction { int getIndex(); void setIndex( int index ); }
{ "pile_set_name": "Github" }
The PDAL team acknowledges and thanks the following people and companies for their contributions to this project. The open source community appreciates their efforts. Howard Butler Hobu, Inc. howard at hobu.co Michael P. Gerlek Flaxen Geo Consulting mpg at flaxen.com Andrew Bell Hobu, Inc. andrew at hobu.co Paul Ramsey Boundless pramsey at cleverelephant.ca Bradley Chambers Grover Consulting Services brad dot chambers at gmail.com Mateusz Loskot mateusz at loskot.net Chris Foster https://github.com/c42f Pete Gadomski USACE pete dot gadomski at gmail.com Kirk McKelvey LizardTech kmckelvey at lizardtech.com
{ "pile_set_name": "Github" }
let str = "yogendra"; let result = str .split("") .reverse() .join(""); console.log(result); //---------other way--------- let name = "yogendra"; let resTS2 = name.split("").reduceRight((item, acc) => item + acc); //let res = name.split("").reduce((item, acc)=>acc+item); console.log(resTS2);
{ "pile_set_name": "Github" }
/* Copyright 2010-2016 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Reflection; namespace MongoDB.Bson.Serialization.Attributes { /// <summary> /// Specifies the type of the serializer to use for a class. /// </summary> [AttributeUsage(AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Interface | AttributeTargets.Property | AttributeTargets.Field)] public class BsonSerializerAttribute : Attribute, IBsonMemberMapAttribute { // private fields private Type _serializerType; // constructors /// <summary> /// Initializes a new instance of the BsonSerializerAttribute class. /// </summary> public BsonSerializerAttribute() { } /// <summary> /// Initializes a new instance of the BsonSerializerAttribute class. /// </summary> /// <param name="serializerType">The type of the serializer to use for a class.</param> public BsonSerializerAttribute(Type serializerType) { _serializerType = serializerType; } // public properties /// <summary> /// Gets or sets the type of the serializer to use for a class. /// </summary> public Type SerializerType { get { return _serializerType; } set { _serializerType = value; } } // public methods /// <summary> /// Applies a modification to the member map. /// </summary> /// <param name="memberMap">The member map.</param> public void Apply(BsonMemberMap memberMap) { var serializer = CreateSerializer(memberMap.MemberType); memberMap.SetSerializer(serializer); } /// <summary> /// Creates a serializer for a type based on the serializer type specified by the attribute. /// </summary> /// <param name="type">The type that a serializer should be created for.</param> /// <returns>A serializer for the type.</returns> internal IBsonSerializer CreateSerializer(Type type) { var typeInfo = type.GetTypeInfo(); if (typeInfo.ContainsGenericParameters) { var message = "Cannot create a serializer because the type to serialize is an open generic type."; throw new InvalidOperationException(message); } var serializerTypeInfo = _serializerType.GetTypeInfo(); if (serializerTypeInfo.ContainsGenericParameters && !typeInfo.IsGenericType) { var message = "Cannot create a serializer because the serializer type is an open generic type and the type to serialize is not generic."; throw new InvalidOperationException(message); } if (serializerTypeInfo.ContainsGenericParameters) { var genericArguments = typeInfo.GetGenericArguments(); var closedSerializerType = _serializerType.MakeGenericType(genericArguments); return (IBsonSerializer)Activator.CreateInstance(closedSerializerType); } else { return (IBsonSerializer)Activator.CreateInstance(_serializerType); } } } }
{ "pile_set_name": "Github" }
.\" $NetBSD: elf_getarsym.3,v 1.2 2014/03/09 16:58:04 christos Exp $ .\" .\" Copyright (c) 2006,2008 Joseph Koshy. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" This software is provided by Joseph Koshy ``as is'' and .\" any express or implied warranties, including, but not limited to, the .\" implied warranties of merchantability and fitness for a particular purpose .\" are disclaimed. in no event shall Joseph Koshy be liable .\" for any direct, indirect, incidental, special, exemplary, or consequential .\" damages (including, but not limited to, procurement of substitute goods .\" or services; loss of use, data, or profits; or business interruption) .\" however caused and on any theory of liability, whether in contract, strict .\" liability, or tort (including negligence or otherwise) arising in any way .\" out of the use of this software, even if advised of the possibility of .\" such damage. .\" .\" Id: elf_getarsym.3 189 2008-07-20 10:38:08Z jkoshy .\" .Dd August 15, 2006 .Os .Dt ELF_GETARSYM 3 .Sh NAME .Nm elf_getarsym .Nd retrieve the symbol table of an archive .Sh LIBRARY .Lb libelf .Sh SYNOPSIS .In libelf.h .Ft "Elf_Arsym *" .Fn elf_getarsym "Elf *elf" "size_t *ptr" .Sh DESCRIPTION The function .Fn elf_getarsym retrieves the symbol table for an .Xr ar 1 archive, if one is available. .Pp Argument .Ar elf should be a descriptor for an .Xr ar 1 archive opened using .Fn elf_begin or .Fn elf_memory . .Pp If the archive .Ar elf contains a symbol table with n entries, this function returns a pointer to an array of n+1 .Vt Elf_Arsym structures. An .Vt Elf_Arsym structure has the following elements: .Bl -tag -width indent -compact .It Vt "char *" Va as_name This structure member is a pointer to a null-terminated symbol name. .It Vt "off_t" Va as_off This structure member contains the byte offset from the beginning of the archive to the header for the archive member. This value is suitable for use with .Xr elf_rand 3 . .It Vt "unsigned long" Va as_hash This structure member contains a portable hash value for the symbol name, as computed by .Xr elf_hash 3 . .El .Pp The last entry of the returned array will have a NULL value for member .Va as_name , a zero value for member .Va as_off and an illegal value of ~0UL for .Va as_hash . .Pp If argument .Ar ptr is non-null, the .Fn elf_getarsym function will store the number of table entries returned (including the sentinel entry at the end) into the location it points to. .Sh RETURN VALUES Function .Fn elf_getarsym returns a pointer to an array of .Vt Elf_Arsym structures if successful, or a NULL pointer if an error was encountered. .Pp If argument .Ar ptr is non-null and there was no error, the library will store the number of archive symbol entries returned into the location it points to. If argument .Ar ptr is non-null and an error was encountered, the library will set the location pointed to by it to zero. .Sh ERRORS Function .Fn elf_getarsym may fail with the following errors: .Bl -tag -width "[ELF_E_RESOURCE]" .It Bq Er ELF_E_ARGUMENT Argument .Ar elf was NULL. .It Bq Er ELF_E_ARGUMENT Argument .Ar elf was not a descriptor for an .Xr ar 1 archive. .El .Sh SEE ALSO .Xr elf 3 , .Xr elf_begin 3 , .Xr elf_getarhdr 3 , .Xr elf_hash 3 , .Xr elf_memory 3 , .Xr elf_next 3 , .Xr elf_rand 3
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using Xamarin.Forms; using Xamarin.Forms.CustomAttributes; using Xamarin.Forms.Internals; namespace Xamarin.Forms.Controls.Issues { class FrameSubclass : Frame { private double _originalTranslationY; private readonly PanGestureRecognizer _panGesture; public FrameSubclass() { _panGesture = new PanGestureRecognizer(); _panGesture.PanUpdated += PanGesture_PanUpdated; GestureRecognizers.Add(_panGesture); } ~FrameSubclass() { _panGesture.PanUpdated -= PanGesture_PanUpdated; } private void PanGesture_PanUpdated(object sender, PanUpdatedEventArgs e) { switch (e.StatusType) { case GestureStatus.Started: _originalTranslationY = TranslationY; break; case GestureStatus.Running: TranslationY = _originalTranslationY + e.TotalY; break; case GestureStatus.Canceled: _originalTranslationY = 0; break; } } } [Preserve(AllMembers = true)] [Issue(IssueTracker.Github, 10422, "[iOS] Frame render issue using TranslationY", PlatformAffected.iOS)] public partial class Issue10422 : ContentPage { public Issue10422() { #if APP InitializeComponent (); #endif } } }
{ "pile_set_name": "Github" }
<?xml version="1.0"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <modelVersion>4.0.0</modelVersion> <artifactId>demo-zeroconfig-schemadiscovery-registry-client</artifactId> <name>Java Chassis::Demo::ZeroConfig-SchemaDiscovery Registry Client</name> <parent> <groupId>org.apache.servicecomb.demo</groupId> <artifactId>demo-zeroconfig-schemadiscovery-registry</artifactId> <version>2.1.2-SNAPSHOT</version> </parent> <properties> <demo.main>org.apache.servicecomb.demo.zeroconfig.client.ClientApplication</demo.main> </properties> <dependencies> <dependency> <groupId>javax.ws.rs</groupId> <artifactId>javax.ws.rs-api</artifactId> </dependency> <dependency> <groupId>org.apache.servicecomb</groupId> <artifactId>java-chassis-spring-boot-starter-servlet</artifactId> </dependency> <dependency> <groupId>org.apache.servicecomb</groupId> <artifactId>registry-schema-discovery</artifactId> </dependency> <dependency> <groupId>org.apache.servicecomb</groupId> <artifactId>registry-zero-config</artifactId> </dependency> <dependency> <groupId>org.apache.servicecomb.demo</groupId> <artifactId>demo-schema</artifactId> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-maven-plugin</artifactId> </plugin> <plugin> <groupId>com.github.odavid.maven.plugins</groupId> <artifactId>mixin-maven-plugin</artifactId> <configuration> <mixins> <mixin> <groupId>org.apache.servicecomb.demo</groupId> <artifactId>docker-build-config</artifactId> <version>${project.version}</version> </mixin> </mixins> </configuration> </plugin> </plugins> </build> <profiles> <profile> <id>docker</id> <build> <plugins> <plugin> <groupId>io.fabric8</groupId> <artifactId>docker-maven-plugin</artifactId> </plugin> <plugin> <groupId>org.commonjava.maven.plugins</groupId> <artifactId>directory-maven-plugin</artifactId> </plugin> </plugins> </build> </profile> </profiles> </project>
{ "pile_set_name": "Github" }
#!/bin/sh cat << EOF $DAVFS2_HOST1 $DAVFS2_USER1 $DAVFS2_PASS1 EOF
{ "pile_set_name": "Github" }
{ "delegatecallAndOOGatTxLevel" : { "_info" : { "comment" : "", "filling-rpc-server" : "Geth-1.9.6-unstable-63b18027-20190920", "filling-tool-version" : "retesteth-0.0.1+commit.0ae18aef.Linux.g++", "lllcversion" : "Version: 0.5.12-develop.2019.9.13+commit.2d601a4f.Linux.g++", "source" : "src/GeneralStateTestsFiller/stDelegatecallTestHomestead/delegatecallAndOOGatTxLevelFiller.json", "sourceHash" : "19894e4bc30dff2a78b120dff216766216160d9f3a64d1ba1568818c574bdbdb" }, "env" : { "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", "currentDifficulty" : "0x020000", "currentGasLimit" : "0x01c9c380", "currentNumber" : "0x01", "currentTimestamp" : "0x03e8", "previousHash" : "0x5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6" }, "post" : { "Istanbul" : [ { "indexes" : { "data" : 0, "gas" : 0, "value" : 0 }, "hash" : "0xa6c348fa5d4f200b3a7fde1be4c9f780467d1aac16e59f7e06a1f8a06200e27f", "logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347" } ] }, "pre" : { "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "0x0186a0", "code" : "0x600060006000600073945304eb96065b2a98b57a48a06ae28d285a71b5622dc6c1f460005500", "nonce" : "0x00", "storage" : { } }, "0x945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "0x17", "code" : "0x6001600155603760005360026000f3", "nonce" : "0x00", "storage" : { } }, "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "0x0de0b6b3a7640000", "code" : "0x", "nonce" : "0x00", "storage" : { } } }, "transaction" : { "data" : [ "0x" ], "gasLimit" : [ "0x2dc6c0" ], "gasPrice" : "0x01", "nonce" : "0x00", "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", "value" : [ "0x00" ] } } }
{ "pile_set_name": "Github" }
// // WTMyFollowingItem.m // v2ex // // Created by 无头骑士 GJ on 16/8/9. // Copyright © 2016年 无头骑士 GJ. All rights reserved. // #import "WTMyFollowingItem.h" @implementation WTMyFollowingItem @end
{ "pile_set_name": "Github" }
/* * Copyright (C) 2010-2017 FBReader.ORG Limited <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ package org.geometerplus.android.fbreader.network.action; import java.util.*; import java.io.File; import android.app.AlertDialog; import android.app.Activity; import android.net.Uri; import android.content.Intent; import android.content.DialogInterface; import org.geometerplus.zlibrary.core.resources.ZLResource; import org.geometerplus.zlibrary.ui.android.R; import org.geometerplus.android.fbreader.FBReader; import org.geometerplus.fbreader.book.IBookCollection; import org.geometerplus.fbreader.network.*; import org.geometerplus.fbreader.network.tree.NetworkBookTree; import org.geometerplus.fbreader.network.tree.BasketCatalogTree; import org.geometerplus.fbreader.network.urlInfo.*; import org.geometerplus.android.fbreader.network.*; import org.geometerplus.android.fbreader.network.auth.ActivityNetworkContext; public abstract class NetworkBookActions { private static boolean useFullReferences(NetworkBookItem book) { return book.reference(UrlInfo.Type.Book) != null || book.reference(UrlInfo.Type.BookConditional) != null; } private static boolean useDemoReferences(NetworkBookItem book, IBookCollection collection) { return book.reference(UrlInfo.Type.BookDemo) != null && book.localCopyFileName(collection) == null && book.reference(UrlInfo.Type.Book) == null; } public static class NBAction extends BookAction { private final IBookCollection myCollection; private final int myId; private final String myArg; public NBAction(Activity activity, IBookCollection collection, int id, String key) { this(activity, collection, id, key, null); } public NBAction(Activity activity, IBookCollection collection, int id, String key, String arg) { super(activity, id, key); myCollection = collection; myId = id; myArg = arg; } @Override public boolean isEnabled(NetworkTree tree) { return myId >= 0; } @Override public String getContextLabel(NetworkTree tree) { final String base = super.getContextLabel(tree); return myArg == null ? base : base.replace("%s", myArg); } @Override public void run(NetworkTree tree) { runActionStatic(myActivity, (NetworkBookTree)tree, myId, myCollection); } } public static int getBookStatus(NetworkBookItem book, IBookCollection collection, BookDownloaderServiceConnection connection) { if (useFullReferences(book)) { final BookUrlInfo reference = book.reference(UrlInfo.Type.Book); if (reference != null && connection != null && connection.isBeingDownloaded(reference.Url)) { return R.drawable.ic_list_downloading; } else if (book.localCopyFileName(collection) != null) { return R.drawable.ic_list_flag; } else if (reference != null) { return R.drawable.ic_list_download; } } if (book.getStatus(collection) == NetworkBookItem.Status.CanBePurchased) { return R.drawable.ic_list_buy; } return 0; } public static List<NBAction> getContextMenuActions(Activity activity, NetworkBookTree tree, IBookCollection collection, BookDownloaderServiceConnection connection) { if (tree == null) { throw new IllegalArgumentException("tree == null"); } final NetworkBookItem book = tree.Book; List<NBAction> actions = new LinkedList<NBAction>(); if (useFullReferences(book)) { final BookUrlInfo reference = book.reference(UrlInfo.Type.Book); if (reference != null && connection != null && connection.isBeingDownloaded(reference.Url)) { actions.add(new NBAction(activity, collection, ActionCode.TREE_NO_ACTION, "alreadyDownloading")); } else if (book.localCopyFileName(collection) != null) { actions.add(new NBAction(activity, collection, ActionCode.READ_BOOK, "read")); actions.add(new NBAction(activity, collection, ActionCode.DELETE_BOOK, "delete")); } else if (reference != null) { actions.add(new NBAction(activity, collection, ActionCode.DOWNLOAD_BOOK, "download")); } } if (useDemoReferences(book, collection)) { final BookUrlInfo reference = book.reference(UrlInfo.Type.BookDemo); if (connection != null && connection.isBeingDownloaded(reference.Url)) { actions.add(new NBAction(activity, collection, ActionCode.TREE_NO_ACTION, "alreadyDownloadingDemo")); } else if (reference.localCopyFileName(UrlInfo.Type.BookDemo) != null) { actions.add(new NBAction(activity, collection, ActionCode.READ_DEMO, "readDemo")); actions.add(new NBAction(activity, collection, ActionCode.DELETE_DEMO, "deleteDemo")); } else { actions.add(new NBAction(activity, collection, ActionCode.DOWNLOAD_DEMO, "downloadDemo")); } } if (book.getStatus(collection) == NetworkBookItem.Status.CanBePurchased) { final BookBuyUrlInfo reference = book.buyInfo(); final int id = reference.InfoType == UrlInfo.Type.BookBuy ? ActionCode.BUY_DIRECTLY : ActionCode.BUY_IN_BROWSER; final String priceString = reference.Price != null ? String.valueOf(reference.Price) : ""; actions.add(new NBAction(activity, collection, id, "buy", priceString)); final BasketItem basketItem = book.Link.getBasketItem(); if (basketItem != null) { if (basketItem.contains(book)) { if (tree.Parent instanceof BasketCatalogTree || activity instanceof NetworkLibraryActivity) { actions.add(new NBAction(activity, collection, ActionCode.REMOVE_BOOK_FROM_BASKET, "removeFromBasket")); } else { actions.add(new NBAction(activity, collection, ActionCode.OPEN_BASKET, "openBasket")); } } else { actions.add(new NBAction(activity, collection, ActionCode.ADD_BOOK_TO_BASKET, "addToBasket")); } } } return actions; } private static boolean runActionStatic(Activity activity, NetworkBookTree tree, int actionCode, IBookCollection collection) { final NetworkBookItem book = tree.Book; switch (actionCode) { case ActionCode.DOWNLOAD_BOOK: Util.doDownloadBook(activity, book, false); return true; case ActionCode.DOWNLOAD_DEMO: Util.doDownloadBook(activity, book, true); return true; case ActionCode.READ_BOOK: doReadBook(activity, book, collection, false); return true; case ActionCode.READ_DEMO: doReadBook(activity, book, collection, true); return true; case ActionCode.DELETE_BOOK: tryToDeleteBook(activity, book, false); return true; case ActionCode.DELETE_DEMO: tryToDeleteBook(activity, book, true); return true; case ActionCode.BUY_DIRECTLY: doBuyDirectly(activity, tree); return true; case ActionCode.BUY_IN_BROWSER: doBuyInBrowser(activity, book); return true; case ActionCode.ADD_BOOK_TO_BASKET: book.Link.getBasketItem().add(book); return true; case ActionCode.REMOVE_BOOK_FROM_BASKET: book.Link.getBasketItem().remove(book); return true; case ActionCode.OPEN_BASKET: new OpenCatalogAction(activity, new ActivityNetworkContext(activity)) .run(Util.networkLibrary(activity).getFakeBasketTree(book.Link.getBasketItem())); return true; } return false; } private static void doReadBook(Activity activity, final NetworkBookItem book, IBookCollection collection, boolean demo) { String local = null; if (!demo) { local = book.localCopyFileName(collection); } else { final BookUrlInfo reference = book.reference(UrlInfo.Type.BookDemo); if (reference != null) { local = reference.localCopyFileName(UrlInfo.Type.BookDemo); } } if (local != null) { activity.startActivity( new Intent(Intent.ACTION_VIEW, Uri.fromFile(new File(local)), activity.getApplicationContext(), FBReader.class ).addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP | Intent.FLAG_ACTIVITY_NEW_TASK) ); } } private static void tryToDeleteBook(final Activity activity, final NetworkBookItem book, final boolean demo) { final ZLResource dialogResource = ZLResource.resource("dialog"); final ZLResource buttonResource = dialogResource.getResource("button"); final ZLResource boxResource = dialogResource.getResource("deleteBookBox"); new AlertDialog.Builder(activity) .setTitle(book.Title) .setMessage(boxResource.getResource("message").getValue()) .setIcon(0) .setPositiveButton(buttonResource.getResource("yes").getValue(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { // TODO: remove information about book from Library??? if (!demo) { book.removeLocalFiles(); } else { final BookUrlInfo reference = book.reference(UrlInfo.Type.BookDemo); if (reference != null) { final String fileName = reference.localCopyFileName(UrlInfo.Type.BookDemo); if (fileName != null) { new File(fileName).delete(); } } } Util.networkLibrary(activity).fireModelChangedEvent(NetworkLibrary.ChangeListener.Code.SomeCode); } }) .setNegativeButton(buttonResource.getResource("no").getValue(), null) .create().show(); } private static void doBuyDirectly(Activity activity, NetworkBookTree tree) { BuyBooksActivity.run(activity, tree); } private static void doBuyInBrowser(Activity activity, final NetworkBookItem book) { BookUrlInfo reference = book.reference(UrlInfo.Type.BookBuyInBrowser); if (reference != null) { Util.openInBrowser(activity, reference.Url); } } }
{ "pile_set_name": "Github" }
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import with_statement # kindlekey.py # Copyright © 2010-2013 by some_updates and Apprentice Alf # # Currently requires alfcrypto.py which requires the alfcrypto library # Revision history: # 1.0 - Kindle info file decryption, extracted from k4mobidedrm, etc. # 1.1 - Added Tkinter to match adobekey.py # 1.2 - Fixed testing of successful retrieval on Mac # 1.3 - Added getkey interface for Windows DeDRM application # Simplified some of the Kindle for Mac code. # 1.4 - Remove dependency on alfcrypto # 1.5 - moved unicode_argv call inside main for Windows DeDRM compatibility # 1.6 - Fixed a problem getting the disk serial numbers # 1.7 - Work if TkInter is missing # 1.8 - Fixes for Kindle for Mac, and non-ascii in Windows user names # 1.9 - Fixes for Unicode in Windows user names """ Retrieve Kindle for PC/Mac user key. """ __license__ = 'GPL v3' __version__ = '1.9' import sys, os, re from struct import pack, unpack, unpack_from import json import getopt # Routines common to Mac and PC # Wrap a stream so that output gets flushed immediately # and also make sure that any unicode strings get # encoded using "replace" before writing them. class SafeUnbuffered: def __init__(self, stream): self.stream = stream self.encoding = stream.encoding if self.encoding == None: self.encoding = "utf-8" def write(self, data): if isinstance(data,unicode): data = data.encode(self.encoding,"replace") self.stream.write(data) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) try: from calibre.constants import iswindows, isosx except: iswindows = sys.platform.startswith('win') isosx = sys.platform.startswith('darwin') def unicode_argv(): if iswindows: # Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode # strings. # Versions 2.x of Python don't support Unicode in sys.argv on # Windows, with the underlying Windows API instead replacing multi-byte # characters with '?'. So use shell32.GetCommandLineArgvW to get sys.argv # as a list of Unicode strings and encode them as utf-8 from ctypes import POINTER, byref, cdll, c_int, windll from ctypes.wintypes import LPCWSTR, LPWSTR GetCommandLineW = cdll.kernel32.GetCommandLineW GetCommandLineW.argtypes = [] GetCommandLineW.restype = LPCWSTR CommandLineToArgvW = windll.shell32.CommandLineToArgvW CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)] CommandLineToArgvW.restype = POINTER(LPWSTR) cmd = GetCommandLineW() argc = c_int(0) argv = CommandLineToArgvW(cmd, byref(argc)) if argc.value > 0: # Remove Python executable and commands if present start = argc.value - len(sys.argv) return [argv[i] for i in xrange(start, argc.value)] # if we don't have any arguments at all, just pass back script name # this should never happen return [u"kindlekey.py"] else: argvencoding = sys.stdin.encoding if argvencoding == None: argvencoding = "utf-8" return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv] class DrmException(Exception): pass # crypto digestroutines import hashlib def MD5(message): ctx = hashlib.md5() ctx.update(message) return ctx.digest() def SHA1(message): ctx = hashlib.sha1() ctx.update(message) return ctx.digest() def SHA256(message): ctx = hashlib.sha256() ctx.update(message) return ctx.digest() # For K4M/PC 1.6.X and later # generate table of prime number less than or equal to int n def primes(n): if n==2: return [2] elif n<2: return [] s=range(3,n+1,2) mroot = n ** 0.5 half=(n+1)/2-1 i=0 m=3 while m <= mroot: if s[i]: j=(m*m-3)/2 s[j]=0 while j<half: s[j]=0 j+=m i=i+1 m=2*i+3 return [2]+[x for x in s if x] # Encode the bytes in data with the characters in map def encode(data, map): result = '' for char in data: value = ord(char) Q = (value ^ 0x80) // len(map) R = value % len(map) result += map[Q] result += map[R] return result # Hash the bytes in data and then encode the digest with the characters in map def encodeHash(data,map): return encode(MD5(data),map) # Decode the string in data with the characters in map. Returns the decoded bytes def decode(data,map): result = '' for i in range (0,len(data)-1,2): high = map.find(data[i]) low = map.find(data[i+1]) if (high == -1) or (low == -1) : break value = (((high * len(map)) ^ 0x80) & 0xFF) + low result += pack('B',value) return result # Routines unique to Mac and PC if iswindows: from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \ create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \ string_at, Structure, c_void_p, cast import _winreg as winreg MAX_PATH = 255 kernel32 = windll.kernel32 advapi32 = windll.advapi32 crypt32 = windll.crypt32 try: # try to get fast routines from alfcrypto from alfcrypto import AES_CBC, KeyIVGen except: # alfcrypto not available, so use python implementations """ Routines for doing AES CBC in one file Modified by some_updates to extract and combine only those parts needed for AES CBC into one simple to add python file Original Version Copyright (c) 2002 by Paul A. Lambert Under: CryptoPy Artisitic License Version 1.0 See the wonderful pure python package cryptopy-1.2.5 and read its LICENSE.txt for complete license details. """ class CryptoError(Exception): """ Base class for crypto exceptions """ def __init__(self,errorMessage='Error!'): self.message = errorMessage def __str__(self): return self.message class InitCryptoError(CryptoError): """ Crypto errors during algorithm initialization """ class BadKeySizeError(InitCryptoError): """ Bad key size error """ class EncryptError(CryptoError): """ Error in encryption processing """ class DecryptError(CryptoError): """ Error in decryption processing """ class DecryptNotBlockAlignedError(DecryptError): """ Error in decryption processing """ def xorS(a,b): """ XOR two strings """ assert len(a)==len(b) x = [] for i in range(len(a)): x.append( chr(ord(a[i])^ord(b[i]))) return ''.join(x) def xor(a,b): """ XOR two strings """ x = [] for i in range(min(len(a),len(b))): x.append( chr(ord(a[i])^ord(b[i]))) return ''.join(x) """ Base 'BlockCipher' and Pad classes for cipher instances. BlockCipher supports automatic padding and type conversion. The BlockCipher class was written to make the actual algorithm code more readable and not for performance. """ class BlockCipher: """ Block ciphers """ def __init__(self): self.reset() def reset(self): self.resetEncrypt() self.resetDecrypt() def resetEncrypt(self): self.encryptBlockCount = 0 self.bytesToEncrypt = '' def resetDecrypt(self): self.decryptBlockCount = 0 self.bytesToDecrypt = '' def encrypt(self, plainText, more = None): """ Encrypt a string and return a binary string """ self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize) cipherText = '' for i in range(numBlocks): bStart = i*self.blockSize ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize]) self.encryptBlockCount += 1 cipherText += ctBlock if numExtraBytes > 0: # save any bytes that are not block aligned self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:] else: self.bytesToEncrypt = '' if more == None: # no more data expected from caller finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize) if len(finalBytes) > 0: ctBlock = self.encryptBlock(finalBytes) self.encryptBlockCount += 1 cipherText += ctBlock self.resetEncrypt() return cipherText def decrypt(self, cipherText, more = None): """ Decrypt a string and return a string """ self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize) if more == None: # no more calls to decrypt, should have all the data if numExtraBytes != 0: raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt' # hold back some bytes in case last decrypt has zero len if (more != None) and (numExtraBytes == 0) and (numBlocks >0) : numBlocks -= 1 numExtraBytes = self.blockSize plainText = '' for i in range(numBlocks): bStart = i*self.blockSize ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize]) self.decryptBlockCount += 1 plainText += ptBlock if numExtraBytes > 0: # save any bytes that are not block aligned self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:] else: self.bytesToEncrypt = '' if more == None: # last decrypt remove padding plainText = self.padding.removePad(plainText, self.blockSize) self.resetDecrypt() return plainText class Pad: def __init__(self): pass # eventually could put in calculation of min and max size extension class padWithPadLen(Pad): """ Pad a binary string with the length of the padding """ def addPad(self, extraBytes, blockSize): """ Add padding to a binary string to make it an even multiple of the block size """ blocks, numExtraBytes = divmod(len(extraBytes), blockSize) padLength = blockSize - numExtraBytes return extraBytes + padLength*chr(padLength) def removePad(self, paddedBinaryString, blockSize): """ Remove padding from a binary string """ if not(0<len(paddedBinaryString)): raise DecryptNotBlockAlignedError, 'Expected More Data' return paddedBinaryString[:-ord(paddedBinaryString[-1])] class noPadding(Pad): """ No padding. Use this to get ECB behavior from encrypt/decrypt """ def addPad(self, extraBytes, blockSize): """ Add no padding """ return extraBytes def removePad(self, paddedBinaryString, blockSize): """ Remove no padding """ return paddedBinaryString """ Rijndael encryption algorithm This byte oriented implementation is intended to closely match FIPS specification for readability. It is not implemented for performance. """ class Rijndael(BlockCipher): """ Rijndael encryption algorithm """ def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ): self.name = 'RIJNDAEL' self.keySize = keySize self.strength = keySize*8 self.blockSize = blockSize # blockSize is in bytes self.padding = padding # change default to noPadding() to get normal ECB behavior assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes' assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes' self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words self.Nk = keySize/4 # Nk is the key length in 32-bit words self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of # the block (Nb) and key (Nk) sizes. if key != None: self.setKey(key) def setKey(self, key): """ Set a key and generate the expanded key """ assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter' self.__expandedKey = keyExpansion(self, key) self.reset() # BlockCipher.reset() def encryptBlock(self, plainTextBlock): """ Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """ self.state = self._toBlock(plainTextBlock) AddRoundKey(self, self.__expandedKey[0:self.Nb]) for round in range(1,self.Nr): #for round = 1 step 1 to Nr SubBytes(self) ShiftRows(self) MixColumns(self) AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb]) SubBytes(self) ShiftRows(self) AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb]) return self._toBString(self.state) def decryptBlock(self, encryptedBlock): """ decrypt a block (array of bytes) """ self.state = self._toBlock(encryptedBlock) AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb]) for round in range(self.Nr-1,0,-1): InvShiftRows(self) InvSubBytes(self) AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb]) InvMixColumns(self) InvShiftRows(self) InvSubBytes(self) AddRoundKey(self, self.__expandedKey[0:self.Nb]) return self._toBString(self.state) def _toBlock(self, bs): """ Convert binary string to array of bytes, state[col][row]""" assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize' return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)] def _toBString(self, block): """ Convert block (array of bytes) to binary string """ l = [] for col in block: for rowElement in col: l.append(chr(rowElement)) return ''.join(l) #------------------------------------- """ Number of rounds Nr = NrTable[Nb][Nk] Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8 ------------------------------------- """ NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14}, 5: {4:11, 5:11, 6:12, 7:13, 8:14}, 6: {4:12, 5:12, 6:12, 7:13, 8:14}, 7: {4:13, 5:13, 6:13, 7:13, 8:14}, 8: {4:14, 5:14, 6:14, 7:14, 8:14}} #------------------------------------- def keyExpansion(algInstance, keyString): """ Expand a string of size keySize into a larger array """ Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability key = [ord(byte) for byte in keyString] # convert string to list w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)] for i in range(Nk,Nb*(Nr+1)): temp = w[i-1] # a four byte column if (i%Nk) == 0 : temp = temp[1:]+[temp[0]] # RotWord(temp) temp = [ Sbox[byte] for byte in temp ] temp[0] ^= Rcon[i/Nk] elif Nk > 6 and i%Nk == 4 : temp = [ Sbox[byte] for byte in temp ] # SubWord(temp) w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] ) return w Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!! 0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6, 0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91) #------------------------------------- def AddRoundKey(algInstance, keyBlock): """ XOR the algorithm state with a block of key material """ for column in range(algInstance.Nb): for row in range(4): algInstance.state[column][row] ^= keyBlock[column][row] #------------------------------------- def SubBytes(algInstance): for column in range(algInstance.Nb): for row in range(4): algInstance.state[column][row] = Sbox[algInstance.state[column][row]] def InvSubBytes(algInstance): for column in range(algInstance.Nb): for row in range(4): algInstance.state[column][row] = InvSbox[algInstance.state[column][row]] Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5, 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0, 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc, 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a, 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0, 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b, 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85, 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5, 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17, 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88, 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c, 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9, 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6, 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e, 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94, 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68, 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16) InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38, 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb, 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87, 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb, 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d, 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e, 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2, 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25, 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16, 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92, 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda, 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84, 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a, 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06, 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02, 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b, 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea, 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73, 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85, 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e, 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89, 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b, 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20, 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4, 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31, 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f, 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d, 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef, 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0, 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61, 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26, 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d) #------------------------------------- """ For each block size (Nb), the ShiftRow operation shifts row i by the amount Ci. Note that row 0 is not shifted. Nb C1 C2 C3 ------------------- """ shiftOffset = { 4 : ( 0, 1, 2, 3), 5 : ( 0, 1, 2, 3), 6 : ( 0, 1, 2, 3), 7 : ( 0, 1, 2, 4), 8 : ( 0, 1, 3, 4) } def ShiftRows(algInstance): tmp = [0]*algInstance.Nb # list of size Nb for r in range(1,4): # row 0 reamains unchanged and can be skipped for c in range(algInstance.Nb): tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r] for c in range(algInstance.Nb): algInstance.state[c][r] = tmp[c] def InvShiftRows(algInstance): tmp = [0]*algInstance.Nb # list of size Nb for r in range(1,4): # row 0 reamains unchanged and can be skipped for c in range(algInstance.Nb): tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r] for c in range(algInstance.Nb): algInstance.state[c][r] = tmp[c] #------------------------------------- def MixColumns(a): Sprime = [0,0,0,0] for j in range(a.Nb): # for each column Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3]) Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3]) Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3]) Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3]) for i in range(4): a.state[j][i] = Sprime[i] def InvMixColumns(a): """ Mix the four bytes of every column in a linear way This is the opposite operation of Mixcolumn """ Sprime = [0,0,0,0] for j in range(a.Nb): # for each column Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3]) Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3]) Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3]) Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3]) for i in range(4): a.state[j][i] = Sprime[i] #------------------------------------- def mul(a, b): """ Multiply two elements of GF(2^m) needed for MixColumn and InvMixColumn """ if (a !=0 and b!=0): return Alogtable[(Logtable[a] + Logtable[b])%255] else: return 0 Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3, 100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193, 125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120, 101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142, 150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56, 102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16, 126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186, 43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87, 175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232, 44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160, 127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183, 204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157, 151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209, 83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171, 68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165, 103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7) Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170, 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49, 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205, 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, 131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, 181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163, 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160, 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, 195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, 159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, 155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84, 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202, 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1) """ AES Encryption Algorithm The AES algorithm is just Rijndael algorithm restricted to the default blockSize of 128 bits. """ class AES(Rijndael): """ The AES algorithm is the Rijndael block cipher restricted to block sizes of 128 bits and key sizes of 128, 192 or 256 bits """ def __init__(self, key = None, padding = padWithPadLen(), keySize=16): """ Initialize AES, keySize is in bytes """ if not (keySize == 16 or keySize == 24 or keySize == 32) : raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes' Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 ) self.name = 'AES' """ CBC mode of encryption for block ciphers. This algorithm mode wraps any BlockCipher to make a Cipher Block Chaining mode. """ from random import Random # should change to crypto.random!!! class CBC(BlockCipher): """ The CBC class wraps block ciphers to make cipher block chaining (CBC) mode algorithms. The initialization (IV) is automatic if set to None. Padding is also automatic based on the Pad class used to initialize the algorithm """ def __init__(self, blockCipherInstance, padding = padWithPadLen()): """ CBC algorithms are created by initializing with a BlockCipher instance """ self.baseCipher = blockCipherInstance self.name = self.baseCipher.name + '_CBC' self.blockSize = self.baseCipher.blockSize self.keySize = self.baseCipher.keySize self.padding = padding self.baseCipher.padding = noPadding() # baseCipher should NOT pad!! self.r = Random() # for IV generation, currently uses # mediocre standard distro version <---------------- import time newSeed = time.ctime()+str(self.r) # seed with instance location self.r.seed(newSeed) # to make unique self.reset() def setKey(self, key): self.baseCipher.setKey(key) # Overload to reset both CBC state and the wrapped baseCipher def resetEncrypt(self): BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class) self.baseCipher.resetEncrypt() # reset base cipher encrypt state def resetDecrypt(self): BlockCipher.resetDecrypt(self) # reset CBC state (super class) self.baseCipher.resetDecrypt() # reset base cipher decrypt state def encrypt(self, plainText, iv=None, more=None): """ CBC encryption - overloads baseCipher to allow optional explicit IV when iv=None, iv is auto generated! """ if self.encryptBlockCount == 0: self.iv = iv else: assert(iv==None), 'IV used only on first call to encrypt' return BlockCipher.encrypt(self,plainText, more=more) def decrypt(self, cipherText, iv=None, more=None): """ CBC decryption - overloads baseCipher to allow optional explicit IV when iv=None, iv is auto generated! """ if self.decryptBlockCount == 0: self.iv = iv else: assert(iv==None), 'IV used only on first call to decrypt' return BlockCipher.decrypt(self, cipherText, more=more) def encryptBlock(self, plainTextBlock): """ CBC block encryption, IV is set with 'encrypt' """ auto_IV = '' if self.encryptBlockCount == 0: if self.iv == None: # generate IV and use self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)]) self.prior_encr_CT_block = self.iv auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic else: # application provided IV assert(len(self.iv) == self.blockSize ),'IV must be same length as block' self.prior_encr_CT_block = self.iv """ encrypt the prior CT XORed with the PT """ ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) ) self.prior_encr_CT_block = ct return auto_IV+ct def decryptBlock(self, encryptedBlock): """ Decrypt a single block """ if self.decryptBlockCount == 0: # first call, process IV if self.iv == None: # auto decrypt IV? self.prior_CT_block = encryptedBlock return '' else: assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption" self.prior_CT_block = self.iv dct = self.baseCipher.decryptBlock(encryptedBlock) """ XOR the prior decrypted CT with the prior CT """ dct_XOR_priorCT = xor( self.prior_CT_block, dct ) self.prior_CT_block = encryptedBlock return dct_XOR_priorCT """ AES_CBC Encryption Algorithm """ class aescbc_AES_CBC(CBC): """ AES encryption in CBC feedback mode """ def __init__(self, key=None, padding=padWithPadLen(), keySize=16): CBC.__init__( self, AES(key, noPadding(), keySize), padding) self.name = 'AES_CBC' class AES_CBC(object): def __init__(self): self._key = None self._iv = None self.aes = None def set_decrypt_key(self, userkey, iv): self._key = userkey self._iv = iv self.aes = aescbc_AES_CBC(userkey, noPadding(), len(userkey)) def decrypt(self, data): iv = self._iv cleartext = self.aes.decrypt(iv + data) return cleartext import hmac class KeyIVGen(object): # this only exists in openssl so we will use pure python implementation instead # PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1', # [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p]) def pbkdf2(self, passwd, salt, iter, keylen): def xorstr( a, b ): if len(a) != len(b): raise Exception("xorstr(): lengths differ") return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b))) def prf( h, data ): hm = h.copy() hm.update( data ) return hm.digest() def pbkdf2_F( h, salt, itercount, blocknum ): U = prf( h, salt + pack('>i',blocknum ) ) T = U for i in range(2, itercount+1): U = prf( h, U ) T = xorstr( T, U ) return T sha = hashlib.sha1 digest_size = sha().digest_size # l - number of output blocks to produce l = keylen / digest_size if keylen % digest_size != 0: l += 1 h = hmac.new( passwd, None, sha ) T = "" for i in range(1, l+1): T += pbkdf2_F( h, salt, iter, i ) return T[0: keylen] def UnprotectHeaderData(encryptedData): passwdData = 'header_key_data' salt = 'HEADER.2011' iter = 0x80 keylen = 0x100 key_iv = KeyIVGen().pbkdf2(passwdData, salt, iter, keylen) key = key_iv[0:32] iv = key_iv[32:48] aes=AES_CBC() aes.set_decrypt_key(key, iv) cleartext = aes.decrypt(encryptedData) return cleartext # Various character maps used to decrypt kindle info values. # Probably supposed to act as obfuscation charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_" charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE" # New maps in K4PC 1.9.0 testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M" testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG" testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD" # interface with Windows OS Routines class DataBlob(Structure): _fields_ = [('cbData', c_uint), ('pbData', c_void_p)] DataBlob_p = POINTER(DataBlob) def GetSystemDirectory(): GetSystemDirectoryW = kernel32.GetSystemDirectoryW GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint] GetSystemDirectoryW.restype = c_uint def GetSystemDirectory(): buffer = create_unicode_buffer(MAX_PATH + 1) GetSystemDirectoryW(buffer, len(buffer)) return buffer.value return GetSystemDirectory GetSystemDirectory = GetSystemDirectory() def GetVolumeSerialNumber(): GetVolumeInformationW = kernel32.GetVolumeInformationW GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint, POINTER(c_uint), POINTER(c_uint), POINTER(c_uint), c_wchar_p, c_uint] GetVolumeInformationW.restype = c_uint def GetVolumeSerialNumber(path = GetSystemDirectory().split('\\')[0] + '\\'): vsn = c_uint(0) GetVolumeInformationW(path, None, 0, byref(vsn), None, None, None, 0) return str(vsn.value) return GetVolumeSerialNumber GetVolumeSerialNumber = GetVolumeSerialNumber() def GetIDString(): vsn = GetVolumeSerialNumber() #print('Using Volume Serial Number for ID: '+vsn) return vsn def getLastError(): GetLastError = kernel32.GetLastError GetLastError.argtypes = None GetLastError.restype = c_uint def getLastError(): return GetLastError() return getLastError getLastError = getLastError() def GetUserName(): GetUserNameW = advapi32.GetUserNameW GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)] GetUserNameW.restype = c_uint def GetUserName(): buffer = create_unicode_buffer(2) size = c_uint(len(buffer)) while not GetUserNameW(buffer, byref(size)): errcd = getLastError() if errcd == 234: # bad wine implementation up through wine 1.3.21 return "AlternateUserName" buffer = create_unicode_buffer(len(buffer) * 2) size.value = len(buffer) return buffer.value.encode('utf-16-le')[::2] return GetUserName GetUserName = GetUserName() def CryptUnprotectData(): _CryptUnprotectData = crypt32.CryptUnprotectData _CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p, c_void_p, c_void_p, c_uint, DataBlob_p] _CryptUnprotectData.restype = c_uint def CryptUnprotectData(indata, entropy, flags): indatab = create_string_buffer(indata) indata = DataBlob(len(indata), cast(indatab, c_void_p)) entropyb = create_string_buffer(entropy) entropy = DataBlob(len(entropy), cast(entropyb, c_void_p)) outdata = DataBlob() if not _CryptUnprotectData(byref(indata), None, byref(entropy), None, None, flags, byref(outdata)): # raise DrmException("Failed to Unprotect Data") return 'failed' return string_at(outdata.pbData, outdata.cbData) return CryptUnprotectData CryptUnprotectData = CryptUnprotectData() # Returns Environmental Variables that contain unicode def getEnvironmentVariable(name): import ctypes name = unicode(name) # make sure string argument is unicode n = ctypes.windll.kernel32.GetEnvironmentVariableW(name, None, 0) if n == 0: return None buf = ctypes.create_unicode_buffer(u'\0'*n) ctypes.windll.kernel32.GetEnvironmentVariableW(name, buf, n) return buf.value # Locate all of the kindle-info style files and return as list def getKindleInfoFiles(): kInfoFiles = [] # some 64 bit machines do not have the proper registry key for some reason # or the python interface to the 32 vs 64 bit registry is broken path = "" if 'LOCALAPPDATA' in os.environ.keys(): # Python 2.x does not return unicode env. Use Python 3.x path = winreg.ExpandEnvironmentStrings(u"%LOCALAPPDATA%") # this is just another alternative. # path = getEnvironmentVariable('LOCALAPPDATA') if not os.path.isdir(path): path = "" else: # User Shell Folders show take precedent over Shell Folders if present try: # this will still break regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\User Shell Folders\\") path = winreg.QueryValueEx(regkey, 'Local AppData')[0] if not os.path.isdir(path): path = "" try: regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\") path = winreg.QueryValueEx(regkey, 'Local AppData')[0] if not os.path.isdir(path): path = "" except RegError: pass except RegError: pass found = False if path == "": print ('Could not find the folder in which to look for kinfoFiles.') else: # Probably not the best. To Fix (shouldn't ignore in encoding) or use utf-8 print(u'searching for kinfoFiles in ' + path.encode('ascii', 'ignore')) # look for (K4PC 1.9.0 and later) .kinf2011 file kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011' if os.path.isfile(kinfopath): found = True print('Found K4PC 1.9+ kinf2011 file: ' + kinfopath.encode('ascii','ignore')) kInfoFiles.append(kinfopath) # look for (K4PC 1.6.0 and later) rainier.2.1.1.kinf file kinfopath = path +'\\Amazon\\Kindle\\storage\\rainier.2.1.1.kinf' if os.path.isfile(kinfopath): found = True print('Found K4PC 1.6-1.8 kinf file: ' + kinfopath) kInfoFiles.append(kinfopath) # look for (K4PC 1.5.0 and later) rainier.2.1.1.kinf file kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf' if os.path.isfile(kinfopath): found = True print('Found K4PC 1.5 kinf file: ' + kinfopath) kInfoFiles.append(kinfopath) # look for original (earlier than K4PC 1.5.0) kindle-info files kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info' if os.path.isfile(kinfopath): found = True print('Found K4PC kindle.info file: ' + kinfopath) kInfoFiles.append(kinfopath) if not found: print('No K4PC kindle.info/kinf/kinf2011 files have been found.') return kInfoFiles # determine type of kindle info provided and return a # database of keynames and values def getDBfromFile(kInfoFile): names = ['kindle.account.tokens','kindle.cookie.item','eulaVersionAccepted','login_date','kindle.token.item','login','kindle.key.item','kindle.name.info','kindle.device.info', 'MazamaRandomNumber', 'max_date', 'SIGVERIF'] DB = {} with open(kInfoFile, 'rb') as infoReader: hdr = infoReader.read(1) data = infoReader.read() if data.find('{') != -1 : # older style kindle-info file items = data.split('{') for item in items: if item != '': keyhash, rawdata = item.split(':') keyname = "unknown" for name in names: if encodeHash(name,charMap2) == keyhash: keyname = name break if keyname == "unknown": keyname = keyhash encryptedValue = decode(rawdata,charMap2) DB[keyname] = CryptUnprotectData(encryptedValue, "", 0) elif hdr == '/': # else rainier-2-1-1 .kinf file # the .kinf file uses "/" to separate it into records # so remove the trailing "/" to make it easy to use split data = data[:-1] items = data.split('/') # loop through the item records until all are processed while len(items) > 0: # get the first item record item = items.pop(0) # the first 32 chars of the first record of a group # is the MD5 hash of the key name encoded by charMap5 keyhash = item[0:32] # the raw keyhash string is used to create entropy for the actual # CryptProtectData Blob that represents that keys contents entropy = SHA1(keyhash) # the remainder of the first record when decoded with charMap5 # has the ':' split char followed by the string representation # of the number of records that follow # and make up the contents srcnt = decode(item[34:],charMap5) rcnt = int(srcnt) # read and store in rcnt records of data # that make up the contents value edlst = [] for i in xrange(rcnt): item = items.pop(0) edlst.append(item) keyname = "unknown" for name in names: if encodeHash(name,charMap5) == keyhash: keyname = name break if keyname == "unknown": keyname = keyhash # the charMap5 encoded contents data has had a length # of chars (always odd) cut off of the front and moved # to the end to prevent decoding using charMap5 from # working properly, and thereby preventing the ensuing # CryptUnprotectData call from succeeding. # The offset into the charMap5 encoded contents seems to be: # len(contents)-largest prime number <= int(len(content)/3) # (in other words split "about" 2/3rds of the way through) # move first offsets chars to end to align for decode by charMap5 encdata = "".join(edlst) contlen = len(encdata) noffset = contlen - primes(int(contlen/3))[-1] # now properly split and recombine # by moving noffset chars from the start of the # string to the end of the string pfx = encdata[0:noffset] encdata = encdata[noffset:] encdata = encdata + pfx # decode using Map5 to get the CryptProtect Data encryptedValue = decode(encdata,charMap5) DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1) else: # else newest .kinf2011 style .kinf file # the .kinf file uses "/" to separate it into records # so remove the trailing "/" to make it easy to use split # need to put back the first char read because it it part # of the added entropy blob data = hdr + data[:-1] items = data.split('/') # starts with and encoded and encrypted header blob headerblob = items.pop(0) encryptedValue = decode(headerblob, testMap1) cleartext = UnprotectHeaderData(encryptedValue) # now extract the pieces that form the added entropy pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE) for m in re.finditer(pattern, cleartext): added_entropy = m.group(2) + m.group(4) # loop through the item records until all are processed while len(items) > 0: # get the first item record item = items.pop(0) # the first 32 chars of the first record of a group # is the MD5 hash of the key name encoded by charMap5 keyhash = item[0:32] # the sha1 of raw keyhash string is used to create entropy along # with the added entropy provided above from the headerblob entropy = SHA1(keyhash) + added_entropy # the remainder of the first record when decoded with charMap5 # has the ':' split char followed by the string representation # of the number of records that follow # and make up the contents srcnt = decode(item[34:],charMap5) rcnt = int(srcnt) # read and store in rcnt records of data # that make up the contents value edlst = [] for i in xrange(rcnt): item = items.pop(0) edlst.append(item) # key names now use the new testMap8 encoding keyname = "unknown" for name in names: if encodeHash(name,testMap8) == keyhash: keyname = name break # the testMap8 encoded contents data has had a length # of chars (always odd) cut off of the front and moved # to the end to prevent decoding using testMap8 from # working properly, and thereby preventing the ensuing # CryptUnprotectData call from succeeding. # The offset into the testMap8 encoded contents seems to be: # len(contents)-largest prime number <= int(len(content)/3) # (in other words split "about" 2/3rds of the way through) # move first offsets chars to end to align for decode by testMap8 # by moving noffset chars from the start of the # string to the end of the string encdata = "".join(edlst) contlen = len(encdata) noffset = contlen - primes(int(contlen/3))[-1] pfx = encdata[0:noffset] encdata = encdata[noffset:] encdata = encdata + pfx # decode using new testMap8 to get the original CryptProtect Data encryptedValue = decode(encdata,testMap8) cleartext = CryptUnprotectData(encryptedValue, entropy, 1) DB[keyname] = cleartext if 'kindle.account.tokens' in DB: print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(GetIDString(), GetUserName().decode("latin-1")) # store values used in decryption DB['IDString'] = GetIDString() DB['UserName'] = GetUserName() else: DB = {} return DB elif isosx: import copy import subprocess # interface to needed routines in openssl's libcrypto def _load_crypto_libcrypto(): from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \ Structure, c_ulong, create_string_buffer, addressof, string_at, cast from ctypes.util import find_library libcrypto = find_library('crypto') if libcrypto is None: raise DrmException(u"libcrypto not found") libcrypto = CDLL(libcrypto) # From OpenSSL's crypto aes header # # AES_ENCRYPT 1 # AES_DECRYPT 0 # AES_MAXNR 14 (in bytes) # AES_BLOCK_SIZE 16 (in bytes) # # struct aes_key_st { # unsigned long rd_key[4 *(AES_MAXNR + 1)]; # int rounds; # }; # typedef struct aes_key_st AES_KEY; # # int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key); # # note: the ivec string, and output buffer are both mutable # void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, # const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc); AES_MAXNR = 14 c_char_pp = POINTER(c_char_p) c_int_p = POINTER(c_int) class AES_KEY(Structure): _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)] AES_KEY_p = POINTER(AES_KEY) def F(restype, name, argtypes): func = getattr(libcrypto, name) func.restype = restype func.argtypes = argtypes return func AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) # From OpenSSL's Crypto evp/p5_crpt2.c # # int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen, # const unsigned char *salt, int saltlen, int iter, # int keylen, unsigned char *out); PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1', [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p]) class LibCrypto(object): def __init__(self): self._blocksize = 0 self._keyctx = None self._iv = 0 def set_decrypt_key(self, userkey, iv): self._blocksize = len(userkey) if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) : raise DrmException(u"AES improper key used") return keyctx = self._keyctx = AES_KEY() self._iv = iv self._userkey = userkey rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx) if rv < 0: raise DrmException(u"Failed to initialize AES key") def decrypt(self, data): out = create_string_buffer(len(data)) mutable_iv = create_string_buffer(self._iv, len(self._iv)) keyctx = self._keyctx rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0) if rv == 0: raise DrmException(u"AES decryption failed") return out.raw def keyivgen(self, passwd, salt, iter, keylen): saltlen = len(salt) passlen = len(passwd) out = create_string_buffer(keylen) rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out) return out.raw return LibCrypto def _load_crypto(): LibCrypto = None try: LibCrypto = _load_crypto_libcrypto() except (ImportError, DrmException): pass return LibCrypto LibCrypto = _load_crypto() # Various character maps used to decrypt books. Probably supposed to act as obfuscation charMap1 = 'n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M' charMap2 = 'ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM' # For kinf approach of K4Mac 1.6.X or later # On K4PC charMap5 = 'AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE' # For Mac they seem to re-use charMap2 here charMap5 = charMap2 # new in K4M 1.9.X testMap8 = 'YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD' # uses a sub process to get the Hard Drive Serial Number using ioreg # returns serial numbers of all internal hard drive drives def GetVolumesSerialNumbers(): sernums = [] sernum = os.getenv('MYSERIALNUMBER') if sernum != None: sernums.append(sernum.strip()) cmdline = '/usr/sbin/ioreg -w 0 -r -c AppleAHCIDiskDriver' cmdline = cmdline.encode(sys.getfilesystemencoding()) p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) out1, out2 = p.communicate() reslst = out1.split('\n') cnt = len(reslst) bsdname = None sernum = None foundIt = False for j in xrange(cnt): resline = reslst[j] pp = resline.find('\"Serial Number\" = \"') if pp >= 0: sernum = resline[pp+19:-1] sernums.append(sernum.strip()) return sernums def GetUserHomeAppSupKindleDirParitionName(): home = os.getenv('HOME') dpath = home + '/Library' cmdline = '/sbin/mount' cmdline = cmdline.encode(sys.getfilesystemencoding()) p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) out1, out2 = p.communicate() reslst = out1.split('\n') cnt = len(reslst) disk = '' foundIt = False for j in xrange(cnt): resline = reslst[j] if resline.startswith('/dev'): (devpart, mpath) = resline.split(' on ') dpart = devpart[5:] pp = mpath.find('(') if pp >= 0: mpath = mpath[:pp-1] if dpath.startswith(mpath): disk = dpart return disk # uses a sub process to get the UUID of the specified disk partition using ioreg def GetDiskPartitionUUIDs(diskpart): uuids = [] uuidnum = os.getenv('MYUUIDNUMBER') if uuidnum != None: uuids.append(strip(uuidnum)) cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver' cmdline = cmdline.encode(sys.getfilesystemencoding()) p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) out1, out2 = p.communicate() reslst = out1.split('\n') cnt = len(reslst) bsdname = None uuidnum = None foundIt = False nest = 0 uuidnest = -1 partnest = -2 for j in xrange(cnt): resline = reslst[j] if resline.find('{') >= 0: nest += 1 if resline.find('}') >= 0: nest -= 1 pp = resline.find('\"UUID\" = \"') if pp >= 0: uuidnum = resline[pp+10:-1] uuidnum = uuidnum.strip() uuidnest = nest if partnest == uuidnest and uuidnest > 0: foundIt = True break bb = resline.find('\"BSD Name\" = \"') if bb >= 0: bsdname = resline[bb+14:-1] bsdname = bsdname.strip() if (bsdname == diskpart): partnest = nest else : partnest = -2 if partnest == uuidnest and partnest > 0: foundIt = True break if nest == 0: partnest = -2 uuidnest = -1 uuidnum = None bsdname = None if foundIt: uuids.append(uuidnum) return uuids def GetMACAddressesMunged(): macnums = [] macnum = os.getenv('MYMACNUM') if macnum != None: macnums.append(macnum) cmdline = '/sbin/ifconfig en0' cmdline = cmdline.encode(sys.getfilesystemencoding()) p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) out1, out2 = p.communicate() reslst = out1.split('\n') cnt = len(reslst) macnum = None foundIt = False for j in xrange(cnt): resline = reslst[j] pp = resline.find('ether ') if pp >= 0: macnum = resline[pp+6:-1] macnum = macnum.strip() # print 'original mac', macnum # now munge it up the way Kindle app does # by xoring it with 0xa5 and swapping elements 3 and 4 maclst = macnum.split(':') n = len(maclst) if n != 6: fountIt = False break for i in range(6): maclst[i] = int('0x' + maclst[i], 0) mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00] mlst[5] = maclst[5] ^ 0xa5 mlst[4] = maclst[3] ^ 0xa5 mlst[3] = maclst[4] ^ 0xa5 mlst[2] = maclst[2] ^ 0xa5 mlst[1] = maclst[1] ^ 0xa5 mlst[0] = maclst[0] ^ 0xa5 macnum = '%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x' % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5]) foundIt = True break if foundIt: macnums.append(macnum) return macnums # uses unix env to get username instead of using sysctlbyname def GetUserName(): username = os.getenv('USER') return username def GetIDStrings(): # Return all possible ID Strings strings = [] strings.extend(GetMACAddressesMunged()) strings.extend(GetVolumesSerialNumbers()) diskpart = GetUserHomeAppSupKindleDirParitionName() strings.extend(GetDiskPartitionUUIDs(diskpart)) strings.append('9999999999') #print strings return strings # implements an Pseudo Mac Version of Windows built-in Crypto routine # used by Kindle for Mac versions < 1.6.0 class CryptUnprotectData(object): def __init__(self, IDString): sp = IDString + '!@#' + GetUserName() passwdData = encode(SHA256(sp),charMap1) salt = '16743' self.crp = LibCrypto() iter = 0x3e8 keylen = 0x80 key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen) self.key = key_iv[0:32] self.iv = key_iv[32:48] self.crp.set_decrypt_key(self.key, self.iv) def decrypt(self, encryptedData): cleartext = self.crp.decrypt(encryptedData) cleartext = decode(cleartext,charMap1) return cleartext # implements an Pseudo Mac Version of Windows built-in Crypto routine # used for Kindle for Mac Versions >= 1.6.0 class CryptUnprotectDataV2(object): def __init__(self, IDString): sp = GetUserName() + ':&%:' + IDString passwdData = encode(SHA256(sp),charMap5) # salt generation as per the code salt = 0x0512981d * 2 * 1 * 1 salt = str(salt) + GetUserName() salt = encode(salt,charMap5) self.crp = LibCrypto() iter = 0x800 keylen = 0x400 key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen) self.key = key_iv[0:32] self.iv = key_iv[32:48] self.crp.set_decrypt_key(self.key, self.iv) def decrypt(self, encryptedData): cleartext = self.crp.decrypt(encryptedData) cleartext = decode(cleartext, charMap5) return cleartext # unprotect the new header blob in .kinf2011 # used in Kindle for Mac Version >= 1.9.0 def UnprotectHeaderData(encryptedData): passwdData = 'header_key_data' salt = 'HEADER.2011' iter = 0x80 keylen = 0x100 crp = LibCrypto() key_iv = crp.keyivgen(passwdData, salt, iter, keylen) key = key_iv[0:32] iv = key_iv[32:48] crp.set_decrypt_key(key,iv) cleartext = crp.decrypt(encryptedData) return cleartext # implements an Pseudo Mac Version of Windows built-in Crypto routine # used for Kindle for Mac Versions >= 1.9.0 class CryptUnprotectDataV3(object): def __init__(self, entropy, IDString): sp = GetUserName() + '+@#$%+' + IDString passwdData = encode(SHA256(sp),charMap2) salt = entropy self.crp = LibCrypto() iter = 0x800 keylen = 0x400 key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen) self.key = key_iv[0:32] self.iv = key_iv[32:48] self.crp.set_decrypt_key(self.key, self.iv) def decrypt(self, encryptedData): cleartext = self.crp.decrypt(encryptedData) cleartext = decode(cleartext, charMap2) return cleartext # Locate the .kindle-info files def getKindleInfoFiles(): # file searches can take a long time on some systems, so just look in known specific places. kInfoFiles=[] found = False home = os.getenv('HOME') # check for .kinf2011 file in new location (App Store Kindle for Mac) testpath = home + '/Library/Containers/com.amazon.Kindle/Data/Library/Application Support/Kindle/storage/.kinf2011' if os.path.isfile(testpath): kInfoFiles.append(testpath) print('Found k4Mac kinf2011 file: ' + testpath) found = True # check for .kinf2011 files from 1.10 testpath = home + '/Library/Application Support/Kindle/storage/.kinf2011' if os.path.isfile(testpath): kInfoFiles.append(testpath) print('Found k4Mac kinf2011 file: ' + testpath) found = True # check for .rainier-2.1.1-kinf files from 1.6 testpath = home + '/Library/Application Support/Kindle/storage/.rainier-2.1.1-kinf' if os.path.isfile(testpath): kInfoFiles.append(testpath) print('Found k4Mac rainier file: ' + testpath) found = True # check for .kindle-info files from 1.4 testpath = home + '/Library/Application Support/Kindle/storage/.kindle-info' if os.path.isfile(testpath): kInfoFiles.append(testpath) print('Found k4Mac kindle-info file: ' + testpath) found = True # check for .kindle-info file from 1.2.2 testpath = home + '/Library/Application Support/Amazon/Kindle/storage/.kindle-info' if os.path.isfile(testpath): kInfoFiles.append(testpath) print('Found k4Mac kindle-info file: ' + testpath) found = True # check for .kindle-info file from 1.0 beta 1 (27214) testpath = home + '/Library/Application Support/Amazon/Kindle for Mac/storage/.kindle-info' if os.path.isfile(testpath): kInfoFiles.append(testpath) print('Found k4Mac kindle-info file: ' + testpath) found = True if not found: print('No k4Mac kindle-info/rainier/kinf2011 files have been found.') return kInfoFiles # determine type of kindle info provided and return a # database of keynames and values def getDBfromFile(kInfoFile): names = ['kindle.account.tokens','kindle.cookie.item','eulaVersionAccepted','login_date','kindle.token.item','login','kindle.key.item','kindle.name.info','kindle.device.info', 'MazamaRandomNumber', 'max_date', 'SIGVERIF'] with open(kInfoFile, 'rb') as infoReader: filehdr = infoReader.read(1) filedata = infoReader.read() IDStrings = GetIDStrings() for IDString in IDStrings: DB = {} #print "trying IDString:",IDString try: hdr = filehdr data = filedata if data.find('[') != -1 : # older style kindle-info file cud = CryptUnprotectData(IDString) items = data.split('[') for item in items: if item != '': keyhash, rawdata = item.split(':') keyname = 'unknown' for name in names: if encodeHash(name,charMap2) == keyhash: keyname = name break if keyname == 'unknown': keyname = keyhash encryptedValue = decode(rawdata,charMap2) cleartext = cud.decrypt(encryptedValue) if len(cleartext) > 0: DB[keyname] = cleartext if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB: break elif hdr == '/': # else newer style .kinf file used by K4Mac >= 1.6.0 # the .kinf file uses '/' to separate it into records # so remove the trailing '/' to make it easy to use split data = data[:-1] items = data.split('/') cud = CryptUnprotectDataV2(IDString) # loop through the item records until all are processed while len(items) > 0: # get the first item record item = items.pop(0) # the first 32 chars of the first record of a group # is the MD5 hash of the key name encoded by charMap5 keyhash = item[0:32] keyname = 'unknown' # the raw keyhash string is also used to create entropy for the actual # CryptProtectData Blob that represents that keys contents # 'entropy' not used for K4Mac only K4PC # entropy = SHA1(keyhash) # the remainder of the first record when decoded with charMap5 # has the ':' split char followed by the string representation # of the number of records that follow # and make up the contents srcnt = decode(item[34:],charMap5) rcnt = int(srcnt) # read and store in rcnt records of data # that make up the contents value edlst = [] for i in xrange(rcnt): item = items.pop(0) edlst.append(item) keyname = 'unknown' for name in names: if encodeHash(name,charMap5) == keyhash: keyname = name break if keyname == 'unknown': keyname = keyhash # the charMap5 encoded contents data has had a length # of chars (always odd) cut off of the front and moved # to the end to prevent decoding using charMap5 from # working properly, and thereby preventing the ensuing # CryptUnprotectData call from succeeding. # The offset into the charMap5 encoded contents seems to be: # len(contents) - largest prime number less than or equal to int(len(content)/3) # (in other words split 'about' 2/3rds of the way through) # move first offsets chars to end to align for decode by charMap5 encdata = ''.join(edlst) contlen = len(encdata) # now properly split and recombine # by moving noffset chars from the start of the # string to the end of the string noffset = contlen - primes(int(contlen/3))[-1] pfx = encdata[0:noffset] encdata = encdata[noffset:] encdata = encdata + pfx # decode using charMap5 to get the CryptProtect Data encryptedValue = decode(encdata,charMap5) cleartext = cud.decrypt(encryptedValue) if len(cleartext) > 0: DB[keyname] = cleartext if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB: break else: # the latest .kinf2011 version for K4M 1.9.1 # put back the hdr char, it is needed data = hdr + data data = data[:-1] items = data.split('/') # the headerblob is the encrypted information needed to build the entropy string headerblob = items.pop(0) encryptedValue = decode(headerblob, charMap1) cleartext = UnprotectHeaderData(encryptedValue) # now extract the pieces in the same way # this version is different from K4PC it scales the build number by multipying by 735 pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE) for m in re.finditer(pattern, cleartext): entropy = str(int(m.group(2)) * 0x2df) + m.group(4) cud = CryptUnprotectDataV3(entropy,IDString) # loop through the item records until all are processed while len(items) > 0: # get the first item record item = items.pop(0) # the first 32 chars of the first record of a group # is the MD5 hash of the key name encoded by charMap5 keyhash = item[0:32] keyname = 'unknown' # unlike K4PC the keyhash is not used in generating entropy # entropy = SHA1(keyhash) + added_entropy # entropy = added_entropy # the remainder of the first record when decoded with charMap5 # has the ':' split char followed by the string representation # of the number of records that follow # and make up the contents srcnt = decode(item[34:],charMap5) rcnt = int(srcnt) # read and store in rcnt records of data # that make up the contents value edlst = [] for i in xrange(rcnt): item = items.pop(0) edlst.append(item) keyname = 'unknown' for name in names: if encodeHash(name,testMap8) == keyhash: keyname = name break if keyname == 'unknown': keyname = keyhash # the testMap8 encoded contents data has had a length # of chars (always odd) cut off of the front and moved # to the end to prevent decoding using testMap8 from # working properly, and thereby preventing the ensuing # CryptUnprotectData call from succeeding. # The offset into the testMap8 encoded contents seems to be: # len(contents) - largest prime number less than or equal to int(len(content)/3) # (in other words split 'about' 2/3rds of the way through) # move first offsets chars to end to align for decode by testMap8 encdata = ''.join(edlst) contlen = len(encdata) # now properly split and recombine # by moving noffset chars from the start of the # string to the end of the string noffset = contlen - primes(int(contlen/3))[-1] pfx = encdata[0:noffset] encdata = encdata[noffset:] encdata = encdata + pfx # decode using testMap8 to get the CryptProtect Data encryptedValue = decode(encdata,testMap8) cleartext = cud.decrypt(encryptedValue) # print keyname # print cleartext if len(cleartext) > 0: DB[keyname] = cleartext if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB: break except: pass if 'kindle.account.tokens' in DB: # store values used in decryption print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(IDString, GetUserName()) DB['IDString'] = IDString DB['UserName'] = GetUserName() else: print u"Couldn't decrypt file." DB = {} return DB else: def getDBfromFile(kInfoFile): raise DrmException(u"This script only runs under Windows or Mac OS X.") return {} def kindlekeys(files = []): keys = [] if files == []: files = getKindleInfoFiles() for file in files: key = getDBfromFile(file) if key: # convert all values to hex, just in case. for keyname in key: key[keyname]=key[keyname].encode('hex') keys.append(key) return keys # interface for Python DeDRM # returns single key or multiple keys, depending on path or file passed in def getkey(outpath, files=[]): keys = kindlekeys(files) if len(keys) > 0: if not os.path.isdir(outpath): outfile = outpath with file(outfile, 'w') as keyfileout: keyfileout.write(json.dumps(keys[0])) print u"Saved a key to {0}".format(outfile) else: keycount = 0 for key in keys: while True: keycount += 1 outfile = os.path.join(outpath,u"kindlekey{0:d}.k4i".format(keycount)) if not os.path.exists(outfile): break with file(outfile, 'w') as keyfileout: keyfileout.write(json.dumps(key)) print u"Saved a key to {0}".format(outfile) return True return False def usage(progname): print u"Finds, decrypts and saves the default Kindle For Mac/PC encryption keys." print u"Keys are saved to the current directory, or a specified output directory." print u"If a file name is passed instead of a directory, only the first key is saved, in that file." print u"Usage:" print u" {0:s} [-h] [-k <kindle.info>] [<outpath>]".format(progname) def cli_main(): sys.stdout=SafeUnbuffered(sys.stdout) sys.stderr=SafeUnbuffered(sys.stderr) argv=unicode_argv() progname = os.path.basename(argv[0]) print u"{0} v{1}\nCopyright © 2010-2013 some_updates and Apprentice Alf".format(progname,__version__) try: opts, args = getopt.getopt(argv[1:], "hk:") except getopt.GetoptError, err: print u"Error in options or arguments: {0}".format(err.args[0]) usage(progname) sys.exit(2) files = [] for o, a in opts: if o == "-h": usage(progname) sys.exit(0) if o == "-k": files = [a] if len(args) > 1: usage(progname) sys.exit(2) if len(args) == 1: # save to the specified file or directory outpath = args[0] if not os.path.isabs(outpath): outpath = os.path.abspath(outpath) else: # save to the same directory as the script outpath = os.path.dirname(argv[0]) # make sure the outpath is the outpath = os.path.realpath(os.path.normpath(outpath)) if not getkey(outpath, files): print u"Could not retrieve Kindle for Mac/PC key." return 0 def gui_main(): try: import Tkinter import Tkconstants import tkMessageBox import traceback except: return cli_main() class ExceptionDialog(Tkinter.Frame): def __init__(self, root, text): Tkinter.Frame.__init__(self, root, border=5) label = Tkinter.Label(self, text=u"Unexpected error:", anchor=Tkconstants.W, justify=Tkconstants.LEFT) label.pack(fill=Tkconstants.X, expand=0) self.text = Tkinter.Text(self) self.text.pack(fill=Tkconstants.BOTH, expand=1) self.text.insert(Tkconstants.END, text) argv=unicode_argv() root = Tkinter.Tk() root.withdraw() progpath, progname = os.path.split(argv[0]) success = False try: keys = kindlekeys() keycount = 0 for key in keys: while True: keycount += 1 outfile = os.path.join(progpath,u"kindlekey{0:d}.k4i".format(keycount)) if not os.path.exists(outfile): break with file(outfile, 'w') as keyfileout: keyfileout.write(json.dumps(key)) success = True tkMessageBox.showinfo(progname, u"Key successfully retrieved to {0}".format(outfile)) except DrmException, e: tkMessageBox.showerror(progname, u"Error: {0}".format(str(e))) except Exception: root.wm_state('normal') root.title(progname) text = traceback.format_exc() ExceptionDialog(root, text).pack(fill=Tkconstants.BOTH, expand=1) root.mainloop() if not success: return 1 return 0 if __name__ == '__main__': if len(sys.argv) > 1: sys.exit(cli_main()) sys.exit(gui_main())
{ "pile_set_name": "Github" }
#include <errno.h> #include <sys/socket.h> #include <netdb.h> #include <iostream> #include <string> #include <sstream> #include <vector> #include <stdio.h> #include <pthread.h> #include <sched.h> #include "jsb_dbg.h" #include "jsb_config.h" #include "jsdbgapi.h" #if DEBUG #define TRACE_DEBUGGER_SERVER(...) CCLOG(__VA_ARGS__) #else #define TRACE_DEBUGGER_SERVER(...) #endif // #if DEBUG using namespace std; pthread_t debugThread; string inData; string outData; vector<string> queue; pthread_mutex_t g_qMutex; pthread_mutex_t g_rwMutex; bool vmLock = false; jsval frame = JSVAL_NULL, script = JSVAL_NULL; int clientSocket; void debugProcessInput(string data) { NSString* str = [NSString stringWithUTF8String:data.c_str()]; if (vmLock) { pthread_mutex_lock(&g_qMutex); queue.push_back(string(data)); pthread_mutex_unlock(&g_qMutex); } else { [[JSBCore sharedInstance] performSelector:@selector(debugProcessInput:) onThread:[NSThread mainThread] withObject:str waitUntilDone:YES]; } } static void _clientSocketWriteAndClearString(string& s) { #if JSB_DEBUGGER_OUTPUT_STDOUT write(STDOUT_FILENO, s.c_str(), s.length()); #endif write(clientSocket, s.c_str(), s.length()); s.clear(); } void clearBuffers() { pthread_mutex_lock(&g_rwMutex); { // only process input if there's something and we're not locked if (inData.length() > 0) { debugProcessInput(inData); inData.clear(); } if (outData.length() > 0) { _clientSocketWriteAndClearString(outData); } } pthread_mutex_unlock(&g_rwMutex); } void* serverEntryPoint(void*) { #if TARGET_OS_IPHONE || TARGET_OS_MAC // this just in case @autoreleasepool { #endif // init the mutex assert(pthread_mutex_init(&g_rwMutex, NULL) == 0); assert(pthread_mutex_init(&g_qMutex, NULL) == 0); // start a server, accept the connection and keep reading data from it struct addrinfo hints, *result, *rp; int s; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; // TCP int err; stringstream portstr; portstr << JSB_DEBUGGER_PORT; const char* tmp = portstr.str().c_str(); if ((err = getaddrinfo(NULL, tmp, &hints, &result)) != 0) { printf("error: %s\n", gai_strerror(err)); } for (rp = result; rp != NULL; rp = rp->ai_next) { if ((s = socket(rp->ai_family, rp->ai_socktype, 0)) < 0) { continue; } int optval = 1; if ((setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char*)&optval, sizeof(optval))) < 0) { close(s); TRACE_DEBUGGER_SERVER(@"debug server : error setting socket option SO_REUSEADDR"); return NULL; } if ((setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval))) < 0) { close(s); TRACE_DEBUGGER_SERVER(@"debug server : error setting socket option SO_NOSIGPIPE"); return NULL; } if ((::bind(s, rp->ai_addr, rp->ai_addrlen)) == 0) { break; } close(s); s = -1; } if (s < 0 || rp == NULL) { TRACE_DEBUGGER_SERVER(@"debug server : error creating/binding socket"); return NULL; } freeaddrinfo(result); listen(s, 1); while (true) { clientSocket = accept(s, NULL, NULL); if (clientSocket < 0) { TRACE_DEBUGGER_SERVER(@"debug server : error on accept"); return NULL; } else { // read/write data TRACE_DEBUGGER_SERVER(@"debug server : client connected"); char buf[256]; int readBytes; while ((readBytes = read(clientSocket, buf, 256)) > 0) { buf[readBytes] = '\0'; TRACE_DEBUGGER_SERVER(@"debug server : received command >%s", buf); // no other thread is using this inData.append(buf); // process any input, send any output clearBuffers(); } // while(read) close(clientSocket); } } // while(true) // we're done, destroy the mutex pthread_mutex_destroy(&g_rwMutex); pthread_mutex_destroy(&g_qMutex); #if TARGET_OS_IPHONE || TARGET_OS_MAC } #endif return NULL; } @implementation JSBCore (Debugger) /** * if we're on a breakpoint, this will pass the right frame & script */ - (void)debugProcessInput:(NSString *)str { JSString* jsstr = JS_NewStringCopyZ(_cx, [str UTF8String]); jsval argv[3] = { STRING_TO_JSVAL(jsstr), frame, script }; { JSAutoCompartment ac(_cx, _debugObject); JS_WrapValue(_cx, &argv[0]); JS_WrapValue(_cx, &argv[1]); JS_WrapValue(_cx, &argv[2]); jsval outval; JSBool ok = JS_CallFunctionName(_cx, _debugObject, "processInput", 3, argv, &outval); if (!ok) { JS_ReportPendingException(_cx); } } } - (void)enableDebugger { if (_debugObject == NULL) { _debugObject = JSB_NewGlobalObject(_cx, true); { JSAutoCompartment ac(_cx, _debugObject); // these are used in the debug program JS_DefineFunction(_cx, _debugObject, "_bufferWrite", JSBDebug_BufferWrite, 1, JSPROP_READONLY | JSPROP_PERMANENT); JS_DefineFunction(_cx, _debugObject, "_bufferRead", JSBDebug_BufferRead, 0, JSPROP_READONLY | JSPROP_PERMANENT); JS_DefineFunction(_cx, _debugObject, "_lockVM", JSBDebug_LockExecution, 2, JSPROP_READONLY | JSPROP_PERMANENT); JS_DefineFunction(_cx, _debugObject, "_unlockVM", JSBDebug_UnlockExecution, 0, JSPROP_READONLY | JSPROP_PERMANENT); JS_DefineFunction(_cx, _debugObject, "log", JSB_core_log, 0, JSPROP_READONLY | JSPROP_PERMANENT); [self runScript:@"jsb_debugger.js" withContainer:_debugObject]; jsval outval; // prepare the debugger jsval oldGlobal = OBJECT_TO_JSVAL(_object); JS_WrapValue(_cx, &oldGlobal); JSBool ok = JS_CallFunctionName(_cx, _debugObject, "_prepareDebugger", 1, &oldGlobal, &outval); if (!ok) { JS_ReportPendingException(_cx); } } { // define the start debugger function JSAutoCompartment ae(_cx, _object); JS_DefineFunction(_cx, _object, "startDebugger", JSBDebug_StartDebugger, 3, JSPROP_READONLY | JSPROP_PERMANENT); } // start bg thread pthread_create(&debugThread, NULL, serverEntryPoint, NULL); } } @end JSBool JSBDebug_StartDebugger(JSContext* cx, unsigned argc, jsval* vp) { JSObject *debugGlobal = [[JSBCore sharedInstance] debugObject]; if (argc >= 2) { jsval* argv = JS_ARGV(cx, vp); jsval outval; // JS_WrapObject(cx, debugGlobal->address()); JSAutoCompartment ac(cx, debugGlobal); JSBool ok = JS_CallFunctionName(cx, debugGlobal, "_startDebugger", argc, argv, &outval); if (!ok) { JS_ReportPendingException(cx); } return ok; } else { JS_ReportError(cx, "Invalid call to startDebugger()"); } return JS_FALSE; } JSBool JSBDebug_BufferRead(JSContext* cx, unsigned argc, jsval* vp) { if (argc == 0) { JSString* str; // this is safe because we're already inside a lock (from clearBuffers) if (vmLock) { pthread_mutex_lock(&g_rwMutex); } str = JS_NewStringCopyZ(cx, inData.c_str()); inData.clear(); if (vmLock) { pthread_mutex_unlock(&g_rwMutex); } JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(str)); } else { JS_SET_RVAL(cx, vp, JSVAL_NULL); } return JS_TRUE; } JSBool JSBDebug_BufferWrite(JSContext* cx, unsigned argc, jsval* vp) { if (argc == 1) { jsval* argv = JS_ARGV(cx, vp); const char* str; JSString* jsstr = JS_ValueToString(cx, argv[0]); str = JS_EncodeString(cx, jsstr); // this is safe because we're already inside a lock (from clearBuffers) outData.append(str); _clientSocketWriteAndClearString(outData); JS_free(cx, (void*)str); } return JS_TRUE; } // this should lock the execution of the running thread, waiting for a signal JSBool JSBDebug_LockExecution(JSContext* cx, unsigned argc, jsval* vp) { assert([NSThread currentThread] == [NSThread mainThread]); if (argc == 2) { jsval* argv = JS_ARGV(cx, vp); frame = argv[0]; script = argv[1]; vmLock = true; while (vmLock) { // try to read the input, if there's anything pthread_mutex_lock(&g_qMutex); while (queue.size() > 0) { vector<string>::iterator first = queue.begin(); string str = *first; NSString *nsstr = [NSString stringWithUTF8String:str.c_str()]; [[JSBCore sharedInstance] performSelector:@selector(debugProcessInput:) withObject:nsstr]; queue.erase(first); } pthread_mutex_unlock(&g_qMutex); sched_yield(); } frame = JSVAL_NULL; script = JSVAL_NULL; return JS_TRUE; } JS_ReportError(cx, "invalid call to _lockVM"); return JS_FALSE; } JSBool JSBDebug_UnlockExecution(JSContext* cx, unsigned argc, jsval* vp) { vmLock = false; return JS_TRUE; }
{ "pile_set_name": "Github" }
// Octopus MFS is an integrated suite for managing a Micro Finance Institution: // clients, contracts, accounting, reporting and risk // Copyright © 2006,2007 OCTO Technology & OXUS Development Network // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License along // with this program; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. // // Website: http://www.opencbs.com // Contact: [email protected] using OpenCBS.CoreDomain.FundingLines; using OpenCBS.ExceptionsHandler.Exceptions.FundingLineExceptions; namespace OpenCBS.Services.Rules { class ValidateAmountofEventFundingLine:IStrategyFundingLineEvent { #region IStrategyFundingLineEvent Members public void ApplyRules(FundingLineEvent e) { if (e.Amount <= decimal.Zero) throw new OpenCbsFundingLineEventException(OpenCbsFundingLineEventExceptionEnum.AmountIsLessZero); if (e.Code == string.Empty) throw new OpenCbsFundingLineEventException(OpenCbsFundingLineEventExceptionEnum.AmountIsEmpty); if (e.Amount > e.FundingLine.RealRemainingAmount && e.Type == Enums.OFundingLineEventTypes.Disbursment) throw new OpenCbsFundingLineEventException(OpenCbsFundingLineEventExceptionEnum.CommitmentFinancialIsNotEnough); if (e.Amount > e.FundingLine.AnticipatedRemainingAmount && e.Movement == Enums.OBookingDirections.Debit && e.Type != Enums.OFundingLineEventTypes.Disbursment) throw new OpenCbsFundingLineEventException(OpenCbsFundingLineEventExceptionEnum.CommitmentFinancialIsNotEnough); } #endregion } }
{ "pile_set_name": "Github" }
{ "name": "Xilinx Iyuv2Rgb_Ro Test", "description": "HLS case", "flow": "hls", "platform_whitelist": [ "u200", "zcu102", "zcu104", "zc706" ], "platform_blacklist": [], "part_whitelist": [], "part_blacklist": [], "project": "cvtcolor", "solution": "sol1", "clock": "3.3", "topfunction": "cvtcolor_iyuv2rgb", "top": { "source": [ "${XF_PROJ_ROOT}/L1/examples/cvtcolor/xf_cvt_color_accel_gen_vitis.cpp" ], "cflags": "-I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x", "csimflags": "-I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x" }, "testbench": { "source": [ "${XF_PROJ_ROOT}/L1/examples/cvtcolor/xf_cvt_color_tb_gen_vitis.cpp" ], "cflags": "-I${OPENCV_INCLUDE} -I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x", "ldflags": "-L ${OPENCV_LIB} -lopencv_imgcodecs -lopencv_imgproc -lopencv_core -lopencv_highgui -lopencv_flann -lopencv_features2d", "argv": { "hls_csim": " ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/input/testcase1_RGBA2IYUV_Y.png ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/input/testcase1_RGBA2IYUV_U.png ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/input/testcase1_RGBA2IYUV_V.png ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/output/testcase1_ocv.png ", "hls_cosim": " ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/input/testcase1_RGBA2IYUV_Y.png ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/input/testcase1_RGBA2IYUV_U.png ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/input/testcase1_RGBA2IYUV_V.png ${XF_PROJ_ROOT}/data/cvtcolor/IYUV2RGB/output/testcase1_ocv.png " }, "stdmath": false, "csimflags": "-I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x" }, "testinfo": { "disable": false, "jobs": [ { "index": 0, "dependency": [], "env": "", "cmd": "", "max_memory_MB": 16384, "max_time_min": { "hls_vivado_syn": 300, "hls_csim": 60, "hls_cosim": 300, "hls_vivado_impl": 300, "hls_csynth": 60 } } ], "targets": [ "hls_csim", "hls_csynth", "hls_cosim", "hls_vivado_syn", "hls_vivado_impl" ], "category": "full" } }
{ "pile_set_name": "Github" }
/* * @BEGIN LICENSE * * Psi4: an open-source quantum chemistry software package * * Copyright (c) 2007-2019 The Psi4 Developers. * * The copyrights for code used from other parties are included in * the corresponding files. * * This file is part of Psi4. * * Psi4 is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, version 3. * * Psi4 is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License along * with Psi4; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * @END LICENSE */ /*! \file \ingroup CCTRIPLES \brief Enter brief description of file here */ #include <cstdio> #include <cstdlib> #include <cmath> #include "psi4/libciomr/libciomr.h" #include "psi4/libqt/qt.h" #include "psi4/libdpd/dpd.h" #include "MOInfo.h" #include "Params.h" #define EXTERN #include "globals.h" #include "psi4/libpsi4util/PsiOutStream.h" namespace psi { namespace cctriples { double ET_UHF_AAB() { int cnt; int h, nirreps; int Gi, Gj, Gk, Ga, Gb, Gc, Gd, Gl; int Gji, Gij, Gjk, Gkj, Gik, Gki, Gijk; int Gab, Gbc, Gac, Gcb, Gca; int Gid, Gjd, Gkd; int Gil, Gjl, Gkl; int I, J, K, A, B, C; int i, j, k, a, b, c; int ij, ji, ik, ki, jk, kj; int ab, ba, ac, ca, bc, cb; int dc, ad, bd; int lc, la, lb; int id, jd, kd; int il, jl, kl; int *aoccpi, *avirtpi, *aocc_off, *avir_off; int *boccpi, *bvirtpi, *bocc_off, *bvir_off; double value_c, value_d, dijk, denom, ET_AAB; double t_ia, t_ib, t_ja, t_jb, t_kc; double f_ia, f_ib, f_ja, f_jb, f_kc; double D_jkbc, D_jkac, D_ikbc, D_ikac, D_jiab; double t_jkbc, t_jkac, t_ikbc, t_ikac, t_jiab; int nrows, ncols, nlinks; dpdbuf4 T2AB, T2AA, T2BA; dpdbuf4 FAAints, FABints, FBAints; dpdbuf4 EAAints, EABints, EBAints; dpdbuf4 DAAints, DABints; dpdfile2 T1A, T1B, fIJ, fij, fAB, fab, fIA, fia; double ***WABc, ***WBcA, ***WAcB, ***WcAB, ***WcBA, ***VABc; int nijk, mijk; nirreps = moinfo.nirreps; aoccpi = moinfo.aoccpi; avirtpi = moinfo.avirtpi; aocc_off = moinfo.aocc_off; avir_off = moinfo.avir_off; boccpi = moinfo.boccpi; bvirtpi = moinfo.bvirtpi; bocc_off = moinfo.bocc_off; bvir_off = moinfo.bvir_off; global_dpd_->file2_init(&fIJ, PSIF_CC_OEI, 0, 0, 0, "fIJ"); global_dpd_->file2_init(&fij, PSIF_CC_OEI, 0, 2, 2, "fij"); global_dpd_->file2_init(&fAB, PSIF_CC_OEI, 0, 1, 1, "fAB"); global_dpd_->file2_init(&fab, PSIF_CC_OEI, 0, 3, 3, "fab"); global_dpd_->file2_init(&fIA, PSIF_CC_OEI, 0, 0, 1, "fIA"); global_dpd_->file2_init(&fia, PSIF_CC_OEI, 0, 2, 3, "fia"); global_dpd_->file2_mat_init(&fIJ); global_dpd_->file2_mat_init(&fij); global_dpd_->file2_mat_init(&fAB); global_dpd_->file2_mat_init(&fab); global_dpd_->file2_mat_init(&fIA); global_dpd_->file2_mat_init(&fia); global_dpd_->file2_mat_rd(&fIJ); global_dpd_->file2_mat_rd(&fij); global_dpd_->file2_mat_rd(&fAB); global_dpd_->file2_mat_rd(&fab); global_dpd_->file2_mat_rd(&fIA); global_dpd_->file2_mat_rd(&fia); global_dpd_->file2_init(&T1A, PSIF_CC_OEI, 0, 0, 1, "tIA"); global_dpd_->file2_mat_init(&T1A); global_dpd_->file2_mat_rd(&T1A); global_dpd_->file2_init(&T1B, PSIF_CC_OEI, 0, 2, 3, "tia"); global_dpd_->file2_mat_init(&T1B); global_dpd_->file2_mat_rd(&T1B); global_dpd_->buf4_init(&T2AA, PSIF_CC_TAMPS, 0, 0, 5, 2, 7, 0, "tIJAB"); global_dpd_->buf4_init(&T2AB, PSIF_CC_TAMPS, 0, 22, 28, 22, 28, 0, "tIjAb"); global_dpd_->buf4_init(&T2BA, PSIF_CC_TAMPS, 0, 23, 29, 23, 29, 0, "tiJaB"); global_dpd_->buf4_init(&FAAints, PSIF_CC_FINTS, 0, 20, 5, 20, 5, 1, "F <IA|BC>"); global_dpd_->buf4_init(&FABints, PSIF_CC_FINTS, 0, 24, 28, 24, 28, 0, "F <Ia|Bc>"); global_dpd_->buf4_init(&FBAints, PSIF_CC_FINTS, 0, 27, 29, 27, 29, 0, "F <iA|bC>"); global_dpd_->buf4_init(&EAAints, PSIF_CC_EINTS, 0, 0, 20, 2, 20, 0, "E <IJ||KA> (I>J,KA)"); global_dpd_->buf4_init(&EABints, PSIF_CC_EINTS, 0, 22, 24, 22, 24, 0, "E <Ij|Ka>"); global_dpd_->buf4_init(&EBAints, PSIF_CC_EINTS, 0, 23, 27, 23, 27, 0, "E <iJ|kA>"); global_dpd_->buf4_init(&DAAints, PSIF_CC_DINTS, 0, 0, 5, 0, 5, 0, "D <IJ||AB>"); global_dpd_->buf4_init(&DABints, PSIF_CC_DINTS, 0, 22, 28, 22, 28, 0, "D <Ij|Ab>"); for (h = 0; h < nirreps; h++) { global_dpd_->buf4_mat_irrep_init(&T2AA, h); global_dpd_->buf4_mat_irrep_rd(&T2AA, h); global_dpd_->buf4_mat_irrep_init(&T2AB, h); global_dpd_->buf4_mat_irrep_rd(&T2AB, h); global_dpd_->buf4_mat_irrep_init(&T2BA, h); global_dpd_->buf4_mat_irrep_rd(&T2BA, h); global_dpd_->buf4_mat_irrep_init(&EAAints, h); global_dpd_->buf4_mat_irrep_rd(&EAAints, h); global_dpd_->buf4_mat_irrep_init(&EABints, h); global_dpd_->buf4_mat_irrep_rd(&EABints, h); global_dpd_->buf4_mat_irrep_init(&EBAints, h); global_dpd_->buf4_mat_irrep_rd(&EBAints, h); global_dpd_->buf4_mat_irrep_init(&DAAints, h); global_dpd_->buf4_mat_irrep_rd(&DAAints, h); global_dpd_->buf4_mat_irrep_init(&DABints, h); global_dpd_->buf4_mat_irrep_rd(&DABints, h); } /* Compute the number of IJK combinations in this spin case */ nijk = 0; for (Gi = 0; Gi < nirreps; Gi++) for (Gj = 0; Gj < nirreps; Gj++) for (Gk = 0; Gk < nirreps; Gk++) for (i = 0; i < aoccpi[Gi]; i++) { I = aocc_off[Gi] + i; for (j = 0; j < aoccpi[Gj]; j++) { J = aocc_off[Gj] + j; for (k = 0; k < boccpi[Gk]; k++) { K = bocc_off[Gk] + k; if (I > J) nijk++; } } } auto mode = std::ostream::trunc; auto printer = std::make_shared<PsiOutStream>("ijk.dat", mode); // ffile(&ijkfile,"ijk.dat",0); printer->Printf("Spin Case: AAB\n"); printer->Printf("Number of IJK combintions: %d\n", nijk); printer->Printf("\nCurrent IJK Combination:\n"); mijk = 0; ET_AAB = 0.0; WABc = (double ***)malloc(nirreps * sizeof(double **)); WBcA = (double ***)malloc(nirreps * sizeof(double **)); WAcB = (double ***)malloc(nirreps * sizeof(double **)); WcAB = (double ***)malloc(nirreps * sizeof(double **)); WcBA = (double ***)malloc(nirreps * sizeof(double **)); VABc = (double ***)malloc(nirreps * sizeof(double **)); for (Gi = 0; Gi < nirreps; Gi++) { for (Gj = 0; Gj < nirreps; Gj++) { for (Gk = 0; Gk < nirreps; Gk++) { Gij = Gji = Gi ^ Gj; Gjk = Gkj = Gj ^ Gk; Gik = Gki = Gi ^ Gk; Gijk = Gi ^ Gj ^ Gk; for (i = 0; i < aoccpi[Gi]; i++) { I = aocc_off[Gi] + i; for (j = 0; j < aoccpi[Gj]; j++) { J = aocc_off[Gj] + j; for (k = 0; k < boccpi[Gk]; k++) { K = bocc_off[Gk] + k; if (I > J) { mijk++; printer->Printf("%d\n", mijk); ij = EAAints.params->rowidx[I][J]; ji = EAAints.params->rowidx[J][I]; jk = EABints.params->rowidx[J][K]; kj = EBAints.params->rowidx[K][J]; ik = EABints.params->rowidx[I][K]; ki = EBAints.params->rowidx[K][I]; dijk = 0.0; if (fIJ.params->rowtot[Gi]) dijk += fIJ.matrix[Gi][i][i]; if (fIJ.params->rowtot[Gj]) dijk += fIJ.matrix[Gj][j][j]; if (fij.params->rowtot[Gk]) dijk += fij.matrix[Gk][k][k]; for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; WABc[Gab] = global_dpd_->dpd_block_matrix(FAAints.params->coltot[Gab], bvirtpi[Gc]); } for (Gd = 0; Gd < nirreps; Gd++) { /* +t_JkDc * F_IDAB */ Gab = Gid = Gi ^ Gd; Gc = Gjk ^ Gd; dc = T2AB.col_offset[Gjk][Gd]; id = FAAints.row_offset[Gid][I]; FAAints.matrix[Gid] = global_dpd_->dpd_block_matrix(avirtpi[Gd], FAAints.params->coltot[Gid]); global_dpd_->buf4_mat_irrep_rd_block(&FAAints, Gid, id, avirtpi[Gd]); nrows = FAAints.params->coltot[Gid]; ncols = bvirtpi[Gc]; nlinks = avirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, 1.0, &(FAAints.matrix[Gid][0][0]), nrows, &(T2AB.matrix[Gjk][jk][dc]), ncols, 1.0, &(WABc[Gab][0][0]), ncols); global_dpd_->free_dpd_block(FAAints.matrix[Gid], avirtpi[Gd], FAAints.params->coltot[Gid]); /* -t_IkDc * F_JDAB */ Gab = Gjd = Gj ^ Gd; Gc = Gik ^ Gd; dc = T2AB.col_offset[Gik][Gd]; jd = FAAints.row_offset[Gjd][J]; FAAints.matrix[Gjd] = global_dpd_->dpd_block_matrix(avirtpi[Gd], FAAints.params->coltot[Gjd]); global_dpd_->buf4_mat_irrep_rd_block(&FAAints, Gjd, jd, avirtpi[Gd]); nrows = FAAints.params->coltot[Gjd]; ncols = bvirtpi[Gc]; nlinks = avirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, -1.0, &(FAAints.matrix[Gjd][0][0]), nrows, &(T2AB.matrix[Gik][ik][dc]), ncols, 1.0, &(WABc[Gab][0][0]), ncols); global_dpd_->free_dpd_block(FAAints.matrix[Gjd], avirtpi[Gd], FAAints.params->coltot[Gjd]); } for (Gl = 0; Gl < nirreps; Gl++) { /* -t_ILAB * E_JkLc */ Gab = Gil = Gi ^ Gl; Gc = Gjk ^ Gl; lc = EABints.col_offset[Gjk][Gl]; il = T2AA.row_offset[Gil][I]; nrows = T2AA.params->coltot[Gil]; ncols = bvirtpi[Gc]; nlinks = aoccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, -1.0, &(T2AA.matrix[Gil][il][0]), nrows, &(EABints.matrix[Gjk][jk][lc]), ncols, 1.0, &(WABc[Gab][0][0]), ncols); /* +t_JLAB * E_IkLc */ Gab = Gjl = Gj ^ Gl; Gc = Gik ^ Gl; lc = EABints.col_offset[Gik][Gl]; jl = T2AA.row_offset[Gjl][J]; nrows = T2AA.params->coltot[Gjl]; ncols = bvirtpi[Gc]; nlinks = aoccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, 1.0, &(T2AA.matrix[Gjl][jl][0]), nrows, &(EABints.matrix[Gik][ik][lc]), ncols, 1.0, &(WABc[Gab][0][0]), ncols); } for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; WBcA[Gab] = global_dpd_->dpd_block_matrix(FABints.params->coltot[Gab], avirtpi[Gc]); } for (Gd = 0; Gd < nirreps; Gd++) { /* -t_JkAd * F_IdBc */ Gbc = Gid = Gi ^ Gd; Ga = Gjk ^ Gd; ad = T2AB.col_offset[Gjk][Ga]; id = FABints.row_offset[Gid][I]; FABints.matrix[Gid] = global_dpd_->dpd_block_matrix(bvirtpi[Gd], FABints.params->coltot[Gid]); global_dpd_->buf4_mat_irrep_rd_block(&FABints, Gid, id, bvirtpi[Gd]); nrows = FABints.params->coltot[Gid]; ncols = avirtpi[Ga]; nlinks = bvirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 't', nrows, ncols, nlinks, -1.0, &(FABints.matrix[Gid][0][0]), nrows, &(T2AB.matrix[Gjk][jk][ad]), nlinks, 1.0, &(WBcA[Gbc][0][0]), ncols); global_dpd_->free_dpd_block(FABints.matrix[Gid], bvirtpi[Gd], FABints.params->coltot[Gid]); /* +t_IkAd * F_JdBc */ Gbc = Gjd = Gj ^ Gd; Ga = Gik ^ Gd; ad = T2AB.col_offset[Gik][Ga]; jd = FABints.row_offset[Gjd][J]; FABints.matrix[Gjd] = global_dpd_->dpd_block_matrix(bvirtpi[Gd], FABints.params->coltot[Gjd]); global_dpd_->buf4_mat_irrep_rd_block(&FABints, Gjd, jd, bvirtpi[Gd]); nrows = FABints.params->coltot[Gjd]; ncols = avirtpi[Ga]; nlinks = bvirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 't', nrows, ncols, nlinks, 1.0, &(FABints.matrix[Gjd][0][0]), nrows, &(T2AB.matrix[Gik][ik][ad]), nlinks, 1.0, &(WBcA[Gbc][0][0]), ncols); global_dpd_->free_dpd_block(FABints.matrix[Gjd], bvirtpi[Gd], FABints.params->coltot[Gjd]); } for (Gl = 0; Gl < nirreps; Gl++) { /* +t_IlBc * E_kJlA */ Gbc = Gil = Gi ^ Gl; Ga = Gkj ^ Gl; la = EBAints.col_offset[Gkj][Gl]; il = T2AB.row_offset[Gil][I]; nrows = T2AB.params->coltot[Gil]; ncols = avirtpi[Ga]; nlinks = boccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, 1.0, &(T2AB.matrix[Gil][il][0]), nrows, &(EBAints.matrix[Gkj][kj][la]), ncols, 1.0, &(WBcA[Gbc][0][0]), ncols); /* -t_JlBc * E_kIlA */ Gbc = Gjl = Gj ^ Gl; Ga = Gki ^ Gl; la = EBAints.col_offset[Gki][Gl]; jl = T2AB.row_offset[Gjl][J]; nrows = T2AB.params->coltot[Gjl]; ncols = avirtpi[Ga]; nlinks = boccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, -1.0, &(T2AB.matrix[Gjl][jl][0]), nrows, &(EBAints.matrix[Gki][ki][la]), ncols, 1.0, &(WBcA[Gbc][0][0]), ncols); } global_dpd_->sort_3d(WBcA, WABc, nirreps, Gijk, FABints.params->coltot, FABints.params->colidx, FABints.params->colorb, FABints.params->rsym, FABints.params->ssym, avir_off, bvir_off, avirtpi, avir_off, FAAints.params->colidx, cab, 1); for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; global_dpd_->free_dpd_block(WBcA[Gab], FABints.params->coltot[Gab], avirtpi[Gc]); WAcB[Gab] = global_dpd_->dpd_block_matrix(FABints.params->coltot[Gab], avirtpi[Gc]); } for (Gd = 0; Gd < nirreps; Gd++) { /* +t_JkBd * F_IdAc */ Gac = Gid = Gi ^ Gd; Gb = Gjk ^ Gd; bd = T2AB.col_offset[Gjk][Gb]; id = FABints.row_offset[Gid][I]; FABints.matrix[Gid] = global_dpd_->dpd_block_matrix(bvirtpi[Gd], FABints.params->coltot[Gid]); global_dpd_->buf4_mat_irrep_rd_block(&FABints, Gid, id, bvirtpi[Gd]); nrows = FABints.params->coltot[Gid]; ncols = avirtpi[Gb]; nlinks = bvirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 't', nrows, ncols, nlinks, 1.0, &(FABints.matrix[Gid][0][0]), nrows, &(T2AB.matrix[Gjk][jk][bd]), nlinks, 1.0, &(WAcB[Gac][0][0]), ncols); global_dpd_->free_dpd_block(FABints.matrix[Gid], bvirtpi[Gd], FABints.params->coltot[Gid]); /* -t_IkBd * F_JdAc */ Gac = Gjd = Gj ^ Gd; Gb = Gik ^ Gd; bd = T2AB.col_offset[Gik][Gb]; jd = FABints.row_offset[Gjd][J]; FABints.matrix[Gjd] = global_dpd_->dpd_block_matrix(bvirtpi[Gd], FABints.params->coltot[Gjd]); global_dpd_->buf4_mat_irrep_rd_block(&FABints, Gjd, jd, bvirtpi[Gd]); nrows = FABints.params->coltot[Gjd]; ncols = avirtpi[Gb]; nlinks = bvirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 't', nrows, ncols, nlinks, -1.0, &(FABints.matrix[Gjd][0][0]), nrows, &(T2AB.matrix[Gik][ik][bd]), nlinks, 1.0, &(WAcB[Gac][0][0]), ncols); global_dpd_->free_dpd_block(FABints.matrix[Gjd], bvirtpi[Gd], FABints.params->coltot[Gjd]); } for (Gl = 0; Gl < nirreps; Gl++) { /* -t_IlAc * E_kJlB */ Gac = Gil = Gi ^ Gl; Gb = Gkj ^ Gl; lb = EBAints.col_offset[Gkj][Gl]; il = T2AB.row_offset[Gil][I]; nrows = T2AB.params->coltot[Gil]; ncols = avirtpi[Gb]; nlinks = boccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, -1.0, &(T2AB.matrix[Gil][il][0]), nrows, &(EBAints.matrix[Gkj][kj][lb]), ncols, 1.0, &(WAcB[Gac][0][0]), ncols); /* +t_JlAc * E_kIlB */ Gac = Gjl = Gj ^ Gl; Gb = Gki ^ Gl; lb = EBAints.col_offset[Gki][Gl]; jl = T2AB.row_offset[Gjl][J]; nrows = T2AB.params->coltot[Gjl]; ncols = avirtpi[Gb]; nlinks = boccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, 1.0, &(T2AB.matrix[Gjl][jl][0]), nrows, &(EBAints.matrix[Gki][ki][lb]), ncols, 1.0, &(WAcB[Gac][0][0]), ncols); } global_dpd_->sort_3d(WAcB, WABc, nirreps, Gijk, FABints.params->coltot, FABints.params->colidx, FABints.params->colorb, FABints.params->rsym, FABints.params->ssym, avir_off, bvir_off, avirtpi, avir_off, FAAints.params->colidx, acb, 1); for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; global_dpd_->free_dpd_block(WAcB[Gab], FABints.params->coltot[Gab], avirtpi[Gc]); WcBA[Gab] = global_dpd_->dpd_block_matrix(FBAints.params->coltot[Gab], avirtpi[Gc]); } for (Gd = 0; Gd < nirreps; Gd++) { /* -t_JIAD * F_kDcB */ Gcb = Gkd = Gk ^ Gd; Ga = Gji ^ Gd; ad = T2AA.col_offset[Gji][Ga]; kd = FBAints.row_offset[Gkd][K]; FBAints.matrix[Gkd] = global_dpd_->dpd_block_matrix(avirtpi[Gd], FBAints.params->coltot[Gkd]); global_dpd_->buf4_mat_irrep_rd_block(&FBAints, Gkd, kd, avirtpi[Gd]); nrows = FBAints.params->coltot[Gkd]; ncols = avirtpi[Ga]; nlinks = avirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 't', nrows, ncols, nlinks, -1.0, &(FBAints.matrix[Gkd][0][0]), nrows, &(T2AA.matrix[Gji][ji][ad]), nlinks, 1.0, &(WcBA[Gcb][0][0]), ncols); global_dpd_->free_dpd_block(FBAints.matrix[Gkd], avirtpi[Gd], FBAints.params->coltot[Gkd]); } for (Gl = 0; Gl < nirreps; Gl++) { /* -t_kLcB * E_JILA */ Gcb = Gkl = Gk ^ Gl; Ga = Gji ^ Gl; la = EAAints.col_offset[Gji][Gl]; kl = T2BA.row_offset[Gkl][K]; nrows = T2BA.params->coltot[Gkl]; ncols = avirtpi[Ga]; nlinks = aoccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, -1.0, &(T2BA.matrix[Gkl][kl][0]), nrows, &(EAAints.matrix[Gji][ji][la]), ncols, 1.0, &(WcBA[Gcb][0][0]), ncols); } global_dpd_->sort_3d(WcBA, WABc, nirreps, Gijk, FBAints.params->coltot, FBAints.params->colidx, FBAints.params->colorb, FBAints.params->rsym, FBAints.params->ssym, bvir_off, avir_off, avirtpi, avir_off, FAAints.params->colidx, cba, 1); for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; global_dpd_->free_dpd_block(WcBA[Gab], FBAints.params->coltot[Gab], avirtpi[Gc]); WcAB[Gab] = global_dpd_->dpd_block_matrix(FBAints.params->coltot[Gab], avirtpi[Gc]); } for (Gd = 0; Gd < nirreps; Gd++) { /* +t_JIBD * F_kDcA */ Gca = Gkd = Gk ^ Gd; Gb = Gji ^ Gd; bd = T2AA.col_offset[Gji][Gb]; kd = FBAints.row_offset[Gkd][K]; FBAints.matrix[Gkd] = global_dpd_->dpd_block_matrix(avirtpi[Gd], FBAints.params->coltot[Gkd]); global_dpd_->buf4_mat_irrep_rd_block(&FBAints, Gkd, kd, avirtpi[Gd]); nrows = FBAints.params->coltot[Gkd]; ncols = avirtpi[Gb]; nlinks = avirtpi[Gd]; if (nrows && ncols && nlinks) C_DGEMM('t', 't', nrows, ncols, nlinks, 1.0, &(FBAints.matrix[Gkd][0][0]), nrows, &(T2AA.matrix[Gji][ji][bd]), nlinks, 1.0, &(WcAB[Gca][0][0]), ncols); global_dpd_->free_dpd_block(FBAints.matrix[Gkd], avirtpi[Gd], FBAints.params->coltot[Gkd]); } for (Gl = 0; Gl < nirreps; Gl++) { /* t_kLcA * E_JILB */ Gca = Gkl = Gk ^ Gl; Gb = Gji ^ Gl; lb = EAAints.col_offset[Gji][Gl]; kl = T2BA.row_offset[Gkl][K]; nrows = T2BA.params->coltot[Gkl]; ncols = avirtpi[Gb]; nlinks = aoccpi[Gl]; if (nrows && ncols && nlinks) C_DGEMM('t', 'n', nrows, ncols, nlinks, 1.0, &(T2BA.matrix[Gkl][kl][0]), nrows, &(EAAints.matrix[Gji][ji][lb]), ncols, 1.0, &(WcAB[Gca][0][0]), ncols); } global_dpd_->sort_3d(WcAB, WABc, nirreps, Gijk, FBAints.params->coltot, FBAints.params->colidx, FBAints.params->colorb, FBAints.params->rsym, FBAints.params->ssym, bvir_off, avir_off, avirtpi, avir_off, FAAints.params->colidx, bca, 1); for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; global_dpd_->free_dpd_block(WcAB[Gab], FBAints.params->coltot[Gab], avirtpi[Gc]); VABc[Gab] = global_dpd_->dpd_block_matrix(FAAints.params->coltot[Gab], bvirtpi[Gc]); } /* Add disconnected triples and finish W and V arrays */ for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; for (ab = 0; ab < FAAints.params->coltot[Gab]; ab++) { A = FAAints.params->colorb[Gab][ab][0]; Ga = FAAints.params->rsym[A]; a = A - avir_off[Ga]; B = FAAints.params->colorb[Gab][ab][1]; Gb = FAAints.params->ssym[B]; b = B - avir_off[Gb]; Gbc = Gb ^ Gc; Gac = Ga ^ Gc; for (c = 0; c < bvirtpi[Gc]; c++) { C = bvir_off[Gc] + c; bc = DABints.params->colidx[B][C]; ac = DABints.params->colidx[A][C]; /* +t_IA * D_JkBc + f_IA * t_JkBc */ if (Gi == Ga && Gjk == Gbc) { t_ia = D_jkbc = f_ia = t_jkbc = 0.0; if (T1A.params->rowtot[Gi] && T1A.params->coltot[Gi]) { t_ia = T1A.matrix[Gi][i][a]; f_ia = fIA.matrix[Gi][i][a]; } if (DABints.params->rowtot[Gjk] && DABints.params->coltot[Gjk]) { D_jkbc = DABints.matrix[Gjk][jk][bc]; t_jkbc = T2AB.matrix[Gjk][jk][bc]; } VABc[Gab][ab][c] += t_ia * D_jkbc + f_ia * t_jkbc; } /* -t_IB * D_JkAc - f_IB * t_JkAc */ if (Gi == Gb && Gjk == Gac) { t_ib = D_jkac = f_ib = t_jkac = 0.0; if (T1A.params->rowtot[Gi] && T1A.params->coltot[Gi]) { t_ib = T1A.matrix[Gi][i][b]; f_ib = fIA.matrix[Gi][i][b]; } if (DABints.params->rowtot[Gjk] && DABints.params->coltot[Gjk]) { D_jkac = DABints.matrix[Gjk][jk][ac]; t_jkac = T2AB.matrix[Gjk][jk][ac]; } VABc[Gab][ab][c] -= t_ib * D_jkac + f_ib * t_jkac; } /* -t_JA * D_IkBc - f_JA * t_IkBc */ if (Gj == Ga && Gik == Gbc) { t_ja = D_ikbc = f_ja = t_ikbc = 0.0; if (T1A.params->rowtot[Gj] && T1A.params->coltot[Gj]) { t_ja = T1A.matrix[Gj][j][a]; f_ja = fIA.matrix[Gj][j][a]; } if (DABints.params->rowtot[Gik] && DABints.params->coltot[Gik]) { D_ikbc = DABints.matrix[Gik][ik][bc]; t_ikbc = T2AB.matrix[Gik][ik][bc]; } VABc[Gab][ab][c] -= t_ja * D_ikbc + f_ja * t_ikbc; } /* +t_JB * D_IkAc + f_JB * t_IkAc */ if (Gj == Gb && Gik == Gac) { t_jb = D_ikac = f_jb = t_ikac = 0.0; if (T1A.params->rowtot[Gj] && T1A.params->coltot[Gj]) { t_jb = T1A.matrix[Gj][j][b]; f_jb = fIA.matrix[Gj][j][b]; } if (DABints.params->rowtot[Gik] && DABints.params->coltot[Gik]) { D_ikac = DABints.matrix[Gik][ik][ac]; t_ikac = T2AB.matrix[Gik][ik][ac]; } VABc[Gab][ab][c] += t_jb * D_ikac + f_jb * t_ikac; } /* -t_kc * D_JIAB - f_kc * t_JIAB */ if (Gk == Gc && Gji == Gab) { t_kc = D_jiab = f_kc = t_jiab = 0.0; if (T1B.params->rowtot[Gk] && T1B.params->coltot[Gk]) { t_kc = T1B.matrix[Gk][k][c]; f_kc = fia.matrix[Gk][k][c]; } if (DAAints.params->rowtot[Gji] && DAAints.params->coltot[Gji]) { D_jiab = DAAints.matrix[Gji][ji][ab]; t_jiab = T2AA.matrix[Gji][ji][ab]; } VABc[Gab][ab][c] -= t_kc * D_jiab + f_kc * t_jiab; } /* Sum V and W into V */ VABc[Gab][ab][c] += WABc[Gab][ab][c]; /* Build the rest of the denominator and divide it into W */ denom = dijk; if (fAB.params->rowtot[Ga]) denom -= fAB.matrix[Ga][a][a]; if (fAB.params->rowtot[Gb]) denom -= fAB.matrix[Gb][b][b]; if (fab.params->rowtot[Gc]) denom -= fab.matrix[Gc][c][c]; WABc[Gab][ab][c] /= denom; } /* c */ } /* ab */ } /* Gab */ /* 1/2 Dot product of final V and W is the energy for this ijk triple */ for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; ET_AAB += dot_block(WABc[Gab], VABc[Gab], FAAints.params->coltot[Gab], bvirtpi[Gc], 0.5); } for (Gab = 0; Gab < nirreps; Gab++) { Gc = Gab ^ Gijk; global_dpd_->free_dpd_block(WABc[Gab], FAAints.params->coltot[Gab], bvirtpi[Gc]); global_dpd_->free_dpd_block(VABc[Gab], FAAints.params->coltot[Gab], bvirtpi[Gc]); } } /* I >= J */ } /* k */ } /* j */ } /* i */ } /* Gk */ } /* Gj */ } /* Gi */ /* outfile->Printf( "cnt = %d\n", cnt); */ /* outfile->Printf( "ET_AAB = %20.14f\n", ET_AAB); */ free(WABc); free(WBcA); free(WAcB); free(WcAB); free(WcBA); free(VABc); for (h = 0; h < nirreps; h++) { global_dpd_->buf4_mat_irrep_close(&T2AA, h); global_dpd_->buf4_mat_irrep_close(&T2AB, h); global_dpd_->buf4_mat_irrep_close(&T2BA, h); global_dpd_->buf4_mat_irrep_close(&EAAints, h); global_dpd_->buf4_mat_irrep_close(&EABints, h); global_dpd_->buf4_mat_irrep_close(&EBAints, h); global_dpd_->buf4_mat_irrep_close(&DAAints, h); global_dpd_->buf4_mat_irrep_close(&DABints, h); } global_dpd_->buf4_close(&T2AA); global_dpd_->buf4_close(&T2AB); global_dpd_->buf4_close(&T2BA); global_dpd_->buf4_close(&FAAints); global_dpd_->buf4_close(&FABints); global_dpd_->buf4_close(&FBAints); global_dpd_->buf4_close(&EAAints); global_dpd_->buf4_close(&EABints); global_dpd_->buf4_close(&EBAints); global_dpd_->buf4_close(&DAAints); global_dpd_->buf4_close(&DABints); global_dpd_->file2_mat_close(&T1A); global_dpd_->file2_close(&T1A); global_dpd_->file2_mat_close(&T1B); global_dpd_->file2_close(&T1B); global_dpd_->file2_mat_close(&fIJ); global_dpd_->file2_mat_close(&fij); global_dpd_->file2_mat_close(&fAB); global_dpd_->file2_mat_close(&fab); global_dpd_->file2_mat_close(&fIA); global_dpd_->file2_mat_close(&fia); global_dpd_->file2_close(&fIJ); global_dpd_->file2_close(&fij); global_dpd_->file2_close(&fAB); global_dpd_->file2_close(&fab); global_dpd_->file2_close(&fIA); global_dpd_->file2_close(&fia); return ET_AAB; } } // namespace cctriples } // namespace psi
{ "pile_set_name": "Github" }
/** * lodash (Custom Build) <https://lodash.com/> * Build: `lodash modularize exports="npm" -o ./` * Copyright jQuery Foundation and other contributors <https://jquery.org/> * Released under MIT license <https://lodash.com/license> * Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE> * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors */ /** Used as references for various `Number` constants. */ var MAX_SAFE_INTEGER = 9007199254740991; /** `Object#toString` result references. */ var argsTag = '[object Arguments]', funcTag = '[object Function]', genTag = '[object GeneratorFunction]'; /** Used to detect unsigned integer values. */ var reIsUint = /^(?:0|[1-9]\d*)$/; /** * A faster alternative to `Function#apply`, this function invokes `func` * with the `this` binding of `thisArg` and the arguments of `args`. * * @private * @param {Function} func The function to invoke. * @param {*} thisArg The `this` binding of `func`. * @param {Array} args The arguments to invoke `func` with. * @returns {*} Returns the result of `func`. */ function apply(func, thisArg, args) { switch (args.length) { case 0: return func.call(thisArg); case 1: return func.call(thisArg, args[0]); case 2: return func.call(thisArg, args[0], args[1]); case 3: return func.call(thisArg, args[0], args[1], args[2]); } return func.apply(thisArg, args); } /** * The base implementation of `_.times` without support for iteratee shorthands * or max array length checks. * * @private * @param {number} n The number of times to invoke `iteratee`. * @param {Function} iteratee The function invoked per iteration. * @returns {Array} Returns the array of results. */ function baseTimes(n, iteratee) { var index = -1, result = Array(n); while (++index < n) { result[index] = iteratee(index); } return result; } /** Used for built-in method references. */ var objectProto = Object.prototype; /** Used to check objects for own properties. */ var hasOwnProperty = objectProto.hasOwnProperty; /** * Used to resolve the * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) * of values. */ var objectToString = objectProto.toString; /** Built-in value references. */ var propertyIsEnumerable = objectProto.propertyIsEnumerable; /* Built-in method references for those with the same name as other `lodash` methods. */ var nativeMax = Math.max; /** * Creates an array of the enumerable property names of the array-like `value`. * * @private * @param {*} value The value to query. * @param {boolean} inherited Specify returning inherited property names. * @returns {Array} Returns the array of property names. */ function arrayLikeKeys(value, inherited) { // Safari 8.1 makes `arguments.callee` enumerable in strict mode. // Safari 9 makes `arguments.length` enumerable in strict mode. var result = (isArray(value) || isArguments(value)) ? baseTimes(value.length, String) : []; var length = result.length, skipIndexes = !!length; for (var key in value) { if ((inherited || hasOwnProperty.call(value, key)) && !(skipIndexes && (key == 'length' || isIndex(key, length)))) { result.push(key); } } return result; } /** * Used by `_.defaults` to customize its `_.assignIn` use. * * @private * @param {*} objValue The destination value. * @param {*} srcValue The source value. * @param {string} key The key of the property to assign. * @param {Object} object The parent object of `objValue`. * @returns {*} Returns the value to assign. */ function assignInDefaults(objValue, srcValue, key, object) { if (objValue === undefined || (eq(objValue, objectProto[key]) && !hasOwnProperty.call(object, key))) { return srcValue; } return objValue; } /** * Assigns `value` to `key` of `object` if the existing value is not equivalent * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) * for equality comparisons. * * @private * @param {Object} object The object to modify. * @param {string} key The key of the property to assign. * @param {*} value The value to assign. */ function assignValue(object, key, value) { var objValue = object[key]; if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) || (value === undefined && !(key in object))) { object[key] = value; } } /** * The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense. * * @private * @param {Object} object The object to query. * @returns {Array} Returns the array of property names. */ function baseKeysIn(object) { if (!isObject(object)) { return nativeKeysIn(object); } var isProto = isPrototype(object), result = []; for (var key in object) { if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) { result.push(key); } } return result; } /** * The base implementation of `_.rest` which doesn't validate or coerce arguments. * * @private * @param {Function} func The function to apply a rest parameter to. * @param {number} [start=func.length-1] The start position of the rest parameter. * @returns {Function} Returns the new function. */ function baseRest(func, start) { start = nativeMax(start === undefined ? (func.length - 1) : start, 0); return function() { var args = arguments, index = -1, length = nativeMax(args.length - start, 0), array = Array(length); while (++index < length) { array[index] = args[start + index]; } index = -1; var otherArgs = Array(start + 1); while (++index < start) { otherArgs[index] = args[index]; } otherArgs[start] = array; return apply(func, this, otherArgs); }; } /** * Copies properties of `source` to `object`. * * @private * @param {Object} source The object to copy properties from. * @param {Array} props The property identifiers to copy. * @param {Object} [object={}] The object to copy properties to. * @param {Function} [customizer] The function to customize copied values. * @returns {Object} Returns `object`. */ function copyObject(source, props, object, customizer) { object || (object = {}); var index = -1, length = props.length; while (++index < length) { var key = props[index]; var newValue = customizer ? customizer(object[key], source[key], key, object, source) : undefined; assignValue(object, key, newValue === undefined ? source[key] : newValue); } return object; } /** * Creates a function like `_.assign`. * * @private * @param {Function} assigner The function to assign values. * @returns {Function} Returns the new assigner function. */ function createAssigner(assigner) { return baseRest(function(object, sources) { var index = -1, length = sources.length, customizer = length > 1 ? sources[length - 1] : undefined, guard = length > 2 ? sources[2] : undefined; customizer = (assigner.length > 3 && typeof customizer == 'function') ? (length--, customizer) : undefined; if (guard && isIterateeCall(sources[0], sources[1], guard)) { customizer = length < 3 ? undefined : customizer; length = 1; } object = Object(object); while (++index < length) { var source = sources[index]; if (source) { assigner(object, source, index, customizer); } } return object; }); } /** * Checks if `value` is a valid array-like index. * * @private * @param {*} value The value to check. * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. */ function isIndex(value, length) { length = length == null ? MAX_SAFE_INTEGER : length; return !!length && (typeof value == 'number' || reIsUint.test(value)) && (value > -1 && value % 1 == 0 && value < length); } /** * Checks if the given arguments are from an iteratee call. * * @private * @param {*} value The potential iteratee value argument. * @param {*} index The potential iteratee index or key argument. * @param {*} object The potential iteratee object argument. * @returns {boolean} Returns `true` if the arguments are from an iteratee call, * else `false`. */ function isIterateeCall(value, index, object) { if (!isObject(object)) { return false; } var type = typeof index; if (type == 'number' ? (isArrayLike(object) && isIndex(index, object.length)) : (type == 'string' && index in object) ) { return eq(object[index], value); } return false; } /** * Checks if `value` is likely a prototype object. * * @private * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is a prototype, else `false`. */ function isPrototype(value) { var Ctor = value && value.constructor, proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto; return value === proto; } /** * This function is like * [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) * except that it includes inherited enumerable properties. * * @private * @param {Object} object The object to query. * @returns {Array} Returns the array of property names. */ function nativeKeysIn(object) { var result = []; if (object != null) { for (var key in Object(object)) { result.push(key); } } return result; } /** * Performs a * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) * comparison between two values to determine if they are equivalent. * * @static * @memberOf _ * @since 4.0.0 * @category Lang * @param {*} value The value to compare. * @param {*} other The other value to compare. * @returns {boolean} Returns `true` if the values are equivalent, else `false`. * @example * * var object = { 'a': 1 }; * var other = { 'a': 1 }; * * _.eq(object, object); * // => true * * _.eq(object, other); * // => false * * _.eq('a', 'a'); * // => true * * _.eq('a', Object('a')); * // => false * * _.eq(NaN, NaN); * // => true */ function eq(value, other) { return value === other || (value !== value && other !== other); } /** * Checks if `value` is likely an `arguments` object. * * @static * @memberOf _ * @since 0.1.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is an `arguments` object, * else `false`. * @example * * _.isArguments(function() { return arguments; }()); * // => true * * _.isArguments([1, 2, 3]); * // => false */ function isArguments(value) { // Safari 8.1 makes `arguments.callee` enumerable in strict mode. return isArrayLikeObject(value) && hasOwnProperty.call(value, 'callee') && (!propertyIsEnumerable.call(value, 'callee') || objectToString.call(value) == argsTag); } /** * Checks if `value` is classified as an `Array` object. * * @static * @memberOf _ * @since 0.1.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is an array, else `false`. * @example * * _.isArray([1, 2, 3]); * // => true * * _.isArray(document.body.children); * // => false * * _.isArray('abc'); * // => false * * _.isArray(_.noop); * // => false */ var isArray = Array.isArray; /** * Checks if `value` is array-like. A value is considered array-like if it's * not a function and has a `value.length` that's an integer greater than or * equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`. * * @static * @memberOf _ * @since 4.0.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is array-like, else `false`. * @example * * _.isArrayLike([1, 2, 3]); * // => true * * _.isArrayLike(document.body.children); * // => true * * _.isArrayLike('abc'); * // => true * * _.isArrayLike(_.noop); * // => false */ function isArrayLike(value) { return value != null && isLength(value.length) && !isFunction(value); } /** * This method is like `_.isArrayLike` except that it also checks if `value` * is an object. * * @static * @memberOf _ * @since 4.0.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is an array-like object, * else `false`. * @example * * _.isArrayLikeObject([1, 2, 3]); * // => true * * _.isArrayLikeObject(document.body.children); * // => true * * _.isArrayLikeObject('abc'); * // => false * * _.isArrayLikeObject(_.noop); * // => false */ function isArrayLikeObject(value) { return isObjectLike(value) && isArrayLike(value); } /** * Checks if `value` is classified as a `Function` object. * * @static * @memberOf _ * @since 0.1.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is a function, else `false`. * @example * * _.isFunction(_); * // => true * * _.isFunction(/abc/); * // => false */ function isFunction(value) { // The use of `Object#toString` avoids issues with the `typeof` operator // in Safari 8-9 which returns 'object' for typed array and other constructors. var tag = isObject(value) ? objectToString.call(value) : ''; return tag == funcTag || tag == genTag; } /** * Checks if `value` is a valid array-like length. * * **Note:** This method is loosely based on * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). * * @static * @memberOf _ * @since 4.0.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is a valid length, else `false`. * @example * * _.isLength(3); * // => true * * _.isLength(Number.MIN_VALUE); * // => false * * _.isLength(Infinity); * // => false * * _.isLength('3'); * // => false */ function isLength(value) { return typeof value == 'number' && value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER; } /** * Checks if `value` is the * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) * * @static * @memberOf _ * @since 0.1.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is an object, else `false`. * @example * * _.isObject({}); * // => true * * _.isObject([1, 2, 3]); * // => true * * _.isObject(_.noop); * // => true * * _.isObject(null); * // => false */ function isObject(value) { var type = typeof value; return !!value && (type == 'object' || type == 'function'); } /** * Checks if `value` is object-like. A value is object-like if it's not `null` * and has a `typeof` result of "object". * * @static * @memberOf _ * @since 4.0.0 * @category Lang * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is object-like, else `false`. * @example * * _.isObjectLike({}); * // => true * * _.isObjectLike([1, 2, 3]); * // => true * * _.isObjectLike(_.noop); * // => false * * _.isObjectLike(null); * // => false */ function isObjectLike(value) { return !!value && typeof value == 'object'; } /** * This method is like `_.assignIn` except that it accepts `customizer` * which is invoked to produce the assigned values. If `customizer` returns * `undefined`, assignment is handled by the method instead. The `customizer` * is invoked with five arguments: (objValue, srcValue, key, object, source). * * **Note:** This method mutates `object`. * * @static * @memberOf _ * @since 4.0.0 * @alias extendWith * @category Object * @param {Object} object The destination object. * @param {...Object} sources The source objects. * @param {Function} [customizer] The function to customize assigned values. * @returns {Object} Returns `object`. * @see _.assignWith * @example * * function customizer(objValue, srcValue) { * return _.isUndefined(objValue) ? srcValue : objValue; * } * * var defaults = _.partialRight(_.assignInWith, customizer); * * defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); * // => { 'a': 1, 'b': 2 } */ var assignInWith = createAssigner(function(object, source, srcIndex, customizer) { copyObject(source, keysIn(source), object, customizer); }); /** * Assigns own and inherited enumerable string keyed properties of source * objects to the destination object for all destination properties that * resolve to `undefined`. Source objects are applied from left to right. * Once a property is set, additional values of the same property are ignored. * * **Note:** This method mutates `object`. * * @static * @since 0.1.0 * @memberOf _ * @category Object * @param {Object} object The destination object. * @param {...Object} [sources] The source objects. * @returns {Object} Returns `object`. * @see _.defaultsDeep * @example * * _.defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); * // => { 'a': 1, 'b': 2 } */ var defaults = baseRest(function(args) { args.push(undefined, assignInDefaults); return apply(assignInWith, undefined, args); }); /** * Creates an array of the own and inherited enumerable property names of `object`. * * **Note:** Non-object values are coerced to objects. * * @static * @memberOf _ * @since 3.0.0 * @category Object * @param {Object} object The object to query. * @returns {Array} Returns the array of property names. * @example * * function Foo() { * this.a = 1; * this.b = 2; * } * * Foo.prototype.c = 3; * * _.keysIn(new Foo); * // => ['a', 'b', 'c'] (iteration order is not guaranteed) */ function keysIn(object) { return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object); } module.exports = defaults;
{ "pile_set_name": "Github" }
/* * cocos2d for iPhone: http://www.cocos2d-iphone.org * * Copyright (c) 2013 Scott Lembcke * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #import "CCPhysicsBody.h" #import "CCPhysicsShape.h" #import "CCPhysicsNode.h" #import "CCPhysicsJoint.h" #import "ObjectiveChipmunk/ObjectiveChipmunk.h" // In the future, this header will be useful for writing your own Objective-Chipmunk // code to interact with CCPhysics. For now, it's not very well documented on how to do it. // Do ask questions on the Cocos2D forums if you are interested in learning how. // // Things to consider: // Projectile bodies? // Interpolation? // Post-step callbacks? // What to do about CCActions? // Check argument types for delegate callbacks? // Angular velocity in degrees? // Warnings for CCPhysicsCollisionPair methods in the wrong event cycle? // Should CCPhysicsCollisionPair.userData retain? #if CP_USE_CGTYPES #define CCP_TO_CPV(p) (p) #define CPV_TO_CCP(p) (p) #define CPTRANSFORM_TO_CGAFFINETRANSFORM(t) (t) #define CGAFFINETRANSFORM_TO_CPTRANSFORM(t) (t) #else // If Chipmunk is not configured to use CG types then they will need to be converted. static inline cpVect CCP_TO_CPV(CGPoint p){return cpv(p.x, p.y);} static inline CGPoint CPV_TO_CCP(cpVect p){return CGPointMake(p.x, p.y);} static inline CGAffineTransform CPTRANSFORM_TO_CGAFFINETRANSFORM(cpTransform t){return CGAffineTransformMake(t.a, t.b, t.c, t.d, t.tx, t.ty);} static inline cpTransform CGAFFINETRANSFORM_TO_CPTRANSFORM(CGAffineTransform t){return cpTransformNew(t.a, t.b, t.c, t.d, t.tx, t.ty);} #endif @interface CCPhysicsBody (ObjectiveChipmunk)<ChipmunkObject> /** The CCNode this physics body is attached to. */ @property(nonatomic, weak) CCNode *node; /** The CCPhysicsNode this body is added to. */ @property(nonatomic, readonly) CCPhysicsNode *physicsNode; /** Returns YES if the body is currently added to a physicsNode. */ @property(nonatomic, readonly) BOOL isRunning; /** The position of the body relative to the space. */ @property(nonatomic, assign) CGPoint absolutePosition; /** The rotation of the body relative to the space. */ @property(nonatomic, assign) CGFloat absoluteRadians; /** The position of the body relative to its parent node. */ @property(nonatomic, assign) CGPoint relativePosition; /** The rotation of the body relative to its parent node. */ @property(nonatomic, assign) CGFloat relativeRotation; /** The transform of the body relative to the space. */ @property(nonatomic, readonly) CGAffineTransform absoluteTransform; /** Chipmunk Body. */ @property(nonatomic, readonly) ChipmunkBody *body; /** Implements the ChipmunkObject protocol. */ @property(nonatomic, readonly) NSArray *chipmunkObjects; /** Is static bodies transform dirty to animations */ @property(nonatomic, readonly) BOOL isKinematicTransformDirty; /** * Add joint to body. * * @param joint Physics joint to use. */ -(void)addJoint:(CCPhysicsJoint *)joint; /** * Remove joint from body. * * @param joint Physics joint to remove. */ -(void)removeJoint:(CCPhysicsJoint *)joint; /** * Used for deferring collision type setup until there is access to the physics node. * * @param physics Physics node. * @param transform Transform to use. */ -(void)willAddToPhysicsNode:(CCPhysicsNode *)physics nonRigidTransform:(cpTransform)transform; /** * Used for deferring collision type setup until there is access to the physics node. * * @param physics Physics node. */ -(void)didAddToPhysicsNode:(CCPhysicsNode *)physics; /** * Used for deferring collision type setup until there is access to the physics node. * * @param physics Physics node. */ -(void)didRemoveFromPhysicsNode:(CCPhysicsNode *)physics; /** * For static bodies that are now in motion, update their kinetic properties. */ -(void)updateKinetics:(CCTime)delta; @end @interface CCPhysicsShape(ObjectiveChipmunk) /** Access to the underlying Objective-Chipmunk shape object. */ @property(nonatomic, readonly) ChipmunkShape *shape; /** Next shape in the linked list. */ @property(nonatomic, strong) CCPhysicsShape *next; /** Body this shape is attached to. */ @property(nonatomic, weak) CCPhysicsBody *body; /** * Used for deferring collision type setup until there is access to the physics node. * * @param physics Physics node. * @param transform Transform to use. */ -(void)willAddToPhysicsNode:(CCPhysicsNode *)physics nonRigidTransform:(cpTransform)transform; /** * Used for deferring collision type setup until there is access to the physics node. * * @param physics Physics node. */ -(void)didRemoveFromPhysicsNode:(CCPhysicsNode *)physics; /** * TODO: * * @param transform Non riged transform. */ -(void)rescaleShape:(cpTransform)transform; @end @interface CCPhysicsJoint(ObjectiveChipmunk)<ChipmunkObject> /** Access to the underlying Objective-Chipmunk object. */ @property(nonatomic, readonly) ChipmunkConstraint *constraint; /** Returns YES if the body is currently added to a physicsNode. */ @property(nonatomic, readonly) BOOL isRunning; /** Joints can be scaled, which updates their max/min lenghts, restLengths and stiffness. */ @property(nonatomic, assign) float scale; /** * Add the join to the physics node, but only if both connected bodies are running. * * @param physicsNode Physics node. */ -(void)tryAddToPhysicsNode:(CCPhysicsNode *)physicsNode; /** * Remove the joint from the physics node, but only if the joint is added. * * @param physicsNode Physics node. */ -(void)tryRemoveFromPhysicsNode:(CCPhysicsNode *)physicsNode; /** * Used for deferring collision type setup until there is access to the physics node. * * @param physics Physics node. */ -(void)willAddToPhysicsNode:(CCPhysicsNode *)physics; /** * Used to initialize the scale to a setting without adjusting the min/max/rest lengths. * * @param scale to be reset to. */ -(void)resetScale:(float)_scale; @end @interface CCPhysicsCollisionPair(ObjectiveChipmunk) /// Access to the underlying Objective-Chipmunk object. @property(nonatomic, assign) cpArbiter *arbiter; @end @interface CCPhysicsNode(ObjectiveChipmunk) /** Access to the underlying Objective-Chipmunk object. */ @property(nonatomic, readonly) ChipmunkSpace *space; /** List of nodes that are currently kinetic due to parent animations*/ @property(nonatomic, readonly) NSMutableSet * kineticNodes; /** * Intern and copy a string to ensure it can be checked by reference * Used for collision type identifiers by CCPhysics. * Nil and @"default" both return the value nil. * * @param string Intern string. * * @return String. */ -(NSString *)internString:(NSString *)string; /** * Retain and track a category identifier and return its index. * Up to 32 categories can be tracked for a space. * * @param category String category. * * @return Category index. */ -(NSUInteger)indexForCategory:(NSString *)category; /** * Convert an array of NSStrings for collision category identifiers into a category bitmask. * The categories are retained and assigned indexes. * Up to 32 categories can be tracked for a space. * * @param categories Array of categories. * * @return Bitmask. */ -(cpBitmask)bitmaskForCategories:(NSArray *)categories; /** * Convert a cpBitmask value to an array of collision category strings. * Ignores any bits that don't have a collision category assigned in the physics node. * * @param categories Category bitmask. * * @return Array of collision categories. */ -(NSArray *)categoriesForBitmask:(cpBitmask)categories; @end
{ "pile_set_name": "Github" }
# Docker Release Process This document describes how the Docker project is released. The Docker project release process targets the Engine, Compose, Kitematic, Machine, Swarm, Distribution, Notary and their underlying dependencies (libnetwork, libkv, etc...). Step-by-step technical details of the process are described in [RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). ## Release cycle The Docker project follows a **time-based release cycle** and ships every nine weeks. A release cycle starts the same day the previous release cycle ends. The first six weeks of the cycle are dedicated to development and review. During this phase, new features and bugfixes submitted to any of the projects are **eligible** to be shipped as part of the next release. No changeset submitted during this period is however guaranteed to be merged for the current release cycle. ## The freeze period Six weeks after the beginning of the cycle, the codebase is officially frozen and the codebase reaches a state close to the final release. A Release Candidate (RC) gets created at the same time. The freeze period is used to find bugs and get feedback on the state of the RC before the release. During this freeze period, while the `master` branch will continue its normal development cycle, no new features are accepted into the RC. As bugs are fixed in `master` the release owner will selectively 'cherry-pick' critical ones to be included into the RC. As the RC changes, new ones are made available for the community to test and review. This period lasts for three weeks. ## How to maximize chances of being merged before the freeze date? First of all, there is never a guarantee that a specific changeset is going to be merged. However there are different actions to follow to maximize the chances for a changeset to be merged: - The team gives priority to review the PRs aligned with the Roadmap (usually defined by a ROADMAP.md file at the root of the repository). - The earlier a PR is opened, the more time the maintainers have to review. For example, if a PR is opened the day before the freeze date, it’s very unlikely that it will be merged for the release. - Constant communication with the maintainers (mailing-list, IRC, GitHub issues, etc.) allows to get early feedback on the design before getting into the implementation, which usually reduces the time needed to discuss a changeset. - If the code is commented, fully tested and by extension follows every single rules defined by the [CONTRIBUTING guide]( https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help the maintainers by speeding up the review. ## The release At the end of the freeze (nine weeks after the start of the cycle), all the projects are released together. ``` Codebase Release Start of is frozen (end of the the Cycle (7th week) 9th week) +---------------------------------------+---------------------+ | | | | Development phase | Freeze phase | | | | +---------------------------------------+---------------------+ 6 weeks 3 weeks <---------------------------------------><--------------------> ``` ## Exceptions If a critical issue is found at the end of the freeze period and more time is needed to address it, the release will be pushed back. When a release gets pushed back, the next release cycle gets delayed as well.
{ "pile_set_name": "Github" }
// @flow /*eslint indent:0*/ import {gamecubeCardinals} from "../defaults"; import type {GamepadInfo} from "../gamepadInfo"; export const betop : GamepadInfo = { a : { kind : "pressed", index : 1 } , b : { kind : "pressed", index : 2 } , x : { kind : "pressed", index : 0 } , y : { kind : "pressed", index : 3 } , z : { kind : "pressed", index : 5 } , r : { kind : "pressed", index : 7 } , l : { kind : "pressed", index : 6 } , s : { kind : "pressed", index : 9 } , lA : { kind : "axis" , index : 7, min : -0.867, max : 0.867 } , rA : { kind : "axis" , index : 6, min : -0.867, max : 0.867 } , dpad : { kind : "axis", index : 9} , ls : { kind : "axes", xIndex : 0, yIndex : 1, cardinals : gamecubeCardinals } , cs : { kind : "axes", xIndex : 2, yIndex : 5, cardinals : gamecubeCardinals } , isGC : true , ids : [ { name : "Betop controller", id : "Betop Controller", vendor : "20bc", product : "1264" } ] };
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: 4e7b48ed57e5255458f6fe5514aaacd4 NativeFormatImporter: externalObjects: {} mainObjectFileID: 100100000 userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^<target^>` where ^<target^> is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pdf to make a PDF file with rst2pdf echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pdf" ( %SPHINXBUILD% -b pdf %ALLSPHINXOPTS% %BUILDDIR%/pdf if errorlevel 1 exit /b 1 echo. echo.Build finished. The PDF file is in %BUILDDIR%/pdf. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\OwncloudDocumentation.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\OwncloudDocumentation.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end
{ "pile_set_name": "Github" }
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import re from ._typing import TYPE_CHECKING, cast from .version import InvalidVersion, Version if TYPE_CHECKING: # pragma: no cover from typing import NewType, Union NormalizedName = NewType("NormalizedName", str) _canonicalize_regex = re.compile(r"[-_.]+") def canonicalize_name(name): # type: (str) -> NormalizedName # This is taken from PEP 503. value = _canonicalize_regex.sub("-", name).lower() return cast("NormalizedName", value) def canonicalize_version(_version): # type: (str) -> Union[Version, str] """ This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. """ try: version = Version(_version) except InvalidVersion: # Legacy versions cannot be normalized return _version parts = [] # Epoch if version.epoch != 0: parts.append("{0}!".format(version.epoch)) # Release segment # NB: This strips trailing '.0's to normalize parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release))) # Pre-release if version.pre is not None: parts.append("".join(str(x) for x in version.pre)) # Post-release if version.post is not None: parts.append(".post{0}".format(version.post)) # Development release if version.dev is not None: parts.append(".dev{0}".format(version.dev)) # Local version segment if version.local is not None: parts.append("+{0}".format(version.local)) return "".join(parts)
{ "pile_set_name": "Github" }
import { steps } from "./index"; describe("Steps function", () => { let spy: jest.SpyInstance; beforeEach(() => { spy = jest.spyOn(console, "log"); }); afterEach(() => { spy.mockRestore(); }); test("should prints steps when called with 1", () => { steps(1); expect(spy.mock.calls[0][0]).toEqual("#"); expect(spy.mock.calls.length).toEqual(1); }); test("should prints steps when called with 2", () => { steps(2); expect(spy.mock.calls[0][0]).toEqual("# "); expect(spy.mock.calls[1][0]).toEqual("##"); expect(spy.mock.calls.length).toEqual(2); }); test("should prints steps when called with 3", () => { steps(3); expect(spy.mock.calls[0][0]).toEqual("# "); expect(spy.mock.calls[1][0]).toEqual("## "); expect(spy.mock.calls[2][0]).toEqual("###"); expect(spy.mock.calls.length).toEqual(3); }); });
{ "pile_set_name": "Github" }
<!doctype html> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes"> <link rel="stylesheet" href="default.css"> <script src="highlight.pack.js"></script> <script>hljs.initHighlightingOnLoad();</script> <title>Go String与Byte切片之间的转换</title> </head> <body> <p>String转换到Byte数组时,每个byte(byte类型其实就是uint8)保存字符串对应字节的数值。</p> <p>注意Go的字符串是UTF-8编码的,每个字符长度是不确定的,一些字符可能是1、2、3或者4个字节结尾。</p> <p>示例1:</p> <pre><code>package main import "fmt" func main() { s1 := "abcd" b1 := []byte(s1) fmt.Println(b1) // [97 98 99 100] s2 := "中文" b2 := []byte(s2) fmt.Println(b2) // [228 184 173 230 150 135], unicode,每个中文字符会由三个byte组成 r1 := []rune(s1) fmt.Println(r1) // [97 98 99 100], 每个字一个数值 r2 := []rune(s2) fmt.Println(r2) // [20013 25991], 每个字一个数值 } </code></pre></body> </html>
{ "pile_set_name": "Github" }
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build plan9 package os import "syscall" func executable() (string, error) { fn := "/proc/" + itoa(Getpid()) + "/text" f, err := Open(fn) if err != nil { return "", err } defer f.Close() return syscall.Fd2path(int(f.Fd())) }
{ "pile_set_name": "Github" }
rand128-2-6 rand128-2 -1 -1.0 150 150 128 16 1 5 8 68 20 8 9 21 8 32 61 8 8 8 8 41 45 8 38 3 8 30 58 8 22 21 8 62 16 8 38 68 8 9 16 8 25 44 8 24 38 8 8 51 8 42 14 8
{ "pile_set_name": "Github" }
# -*- mode: snippet -*- # name: nx # key: nx # -- nx_uint${1:8}_t ${2:var}; $0
{ "pile_set_name": "Github" }
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ package com.facebook.react.uimanager; import static org.fest.assertions.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import com.facebook.react.bridge.ReactApplicationContext; import com.facebook.react.common.MapBuilder; import java.util.Arrays; import java.util.List; import java.util.Map; import org.fest.assertions.data.MapEntry; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.modules.junit4.rule.PowerMockRule; import org.robolectric.RobolectricTestRunner; import org.robolectric.RuntimeEnvironment; @RunWith(RobolectricTestRunner.class) @PowerMockIgnore({"org.mockito.*", "org.robolectric.*", "android.*"}) public class UIManagerModuleConstantsTest { @Rule public PowerMockRule rule = new PowerMockRule(); private static final String CUSTOM_BUBBLING_EVENT_TYPES = "customBubblingEventTypes"; private static final String CUSTOM_DIRECT_EVENT_TYPES = "customDirectEventTypes"; private static final Map TWIRL_BUBBLING_EVENT_MAP = MapBuilder.of( "phasedRegistrationNames", MapBuilder.of( "bubbled", "onTwirl", "captured", "onTwirlCaptured")); private static final Map TWIRL_DIRECT_EVENT_MAP = MapBuilder.of("registrationName", "onTwirl"); private ReactApplicationContext mReactContext; @Before public void setUp() { mReactContext = new ReactApplicationContext(RuntimeEnvironment.application); } @Test public void testNoCustomConstants() { List<ViewManager> viewManagers = Arrays.asList(mock(ViewManager.class)); UIManagerModule uiManagerModule = new UIManagerModule(mReactContext, viewManagers, 0); Map<String, Object> constants = uiManagerModule.getConstants(); assertThat(constants) .containsKey(CUSTOM_BUBBLING_EVENT_TYPES) .containsKey(CUSTOM_DIRECT_EVENT_TYPES) .containsKey("Dimensions"); } @Test public void testCustomBubblingEvents() { ViewManager mockViewManager = mock(ViewManager.class); List<ViewManager> viewManagers = Arrays.asList(mockViewManager); when(mockViewManager.getExportedCustomBubblingEventTypeConstants()) .thenReturn(MapBuilder.of("onTwirl", TWIRL_BUBBLING_EVENT_MAP)); UIManagerModule uiManagerModule = new UIManagerModule(mReactContext, viewManagers, 0); Map<String, Object> constants = uiManagerModule.getConstants(); assertThat((Map) constants.get(CUSTOM_BUBBLING_EVENT_TYPES)) .contains(MapEntry.entry("onTwirl", TWIRL_BUBBLING_EVENT_MAP)) .containsKey("topChange"); } @Test public void testCustomDirectEvents() { ViewManager mockViewManager = mock(ViewManager.class); List<ViewManager> viewManagers = Arrays.asList(mockViewManager); when(mockViewManager.getExportedCustomDirectEventTypeConstants()) .thenReturn(MapBuilder.of("onTwirl", TWIRL_DIRECT_EVENT_MAP)); UIManagerModule uiManagerModule = new UIManagerModule(mReactContext, viewManagers, 0); Map<String, Object> constants = uiManagerModule.getConstants(); assertThat((Map) constants.get(CUSTOM_DIRECT_EVENT_TYPES)) .contains(MapEntry.entry("onTwirl", TWIRL_DIRECT_EVENT_MAP)) .containsKey("topLoadingStart"); } @Test public void testCustomViewConstants() { ViewManager mockViewManager = mock(ViewManager.class); List<ViewManager> viewManagers = Arrays.asList(mockViewManager); when(mockViewManager.getName()).thenReturn("RedPandaPhotoOfTheDayView"); when(mockViewManager.getExportedViewConstants()) .thenReturn(MapBuilder.of("PhotoSizeType", MapBuilder.of("Small", 1, "Large", 2))); UIManagerModule uiManagerModule = new UIManagerModule(mReactContext, viewManagers, 0); Map<String, Object> constants = uiManagerModule.getConstants(); assertThat(constants).containsKey("RedPandaPhotoOfTheDayView"); assertThat((Map) constants.get("RedPandaPhotoOfTheDayView")).containsKey("Constants"); assertThat((Map) valueAtPath(constants, "RedPandaPhotoOfTheDayView", "Constants")) .containsKey("PhotoSizeType"); } @Test public void testNativeProps() { ViewManager mockViewManager = mock(ViewManager.class); List<ViewManager> viewManagers = Arrays.asList(mockViewManager); when(mockViewManager.getName()).thenReturn("SomeView"); when(mockViewManager.getNativeProps()) .thenReturn(MapBuilder.of("fooProp", "number")); UIManagerModule uiManagerModule = new UIManagerModule(mReactContext, viewManagers, 0); Map<String, Object> constants = uiManagerModule.getConstants(); assertThat((String) valueAtPath(constants, "SomeView", "NativeProps", "fooProp")) .isEqualTo("number"); } @Test public void testMergeConstants() { ViewManager managerX = mock(ViewManager.class); when(managerX.getExportedCustomDirectEventTypeConstants()).thenReturn(MapBuilder.of( "onTwirl", MapBuilder.of( "registrationName", "onTwirl", "keyToOverride", "valueX", "mapToMerge", MapBuilder.of("keyToOverride", "innerValueX", "anotherKey", "valueX")))); ViewManager managerY = mock(ViewManager.class); when(managerY.getExportedCustomDirectEventTypeConstants()).thenReturn(MapBuilder.of( "onTwirl", MapBuilder.of( "extraKey", "extraValue", "keyToOverride", "valueY", "mapToMerge", MapBuilder.of("keyToOverride", "innerValueY", "extraKey", "valueY")))); List<ViewManager> viewManagers = Arrays.asList(managerX, managerY); UIManagerModule uiManagerModule = new UIManagerModule(mReactContext, viewManagers, 0); Map<String, Object> constants = uiManagerModule.getConstants(); assertThat((Map) constants.get(CUSTOM_DIRECT_EVENT_TYPES)).containsKey("onTwirl"); Map twirlMap = (Map) valueAtPath(constants, CUSTOM_DIRECT_EVENT_TYPES, "onTwirl"); assertThat(twirlMap) .contains(MapEntry.entry("registrationName", "onTwirl")) .contains(MapEntry.entry("keyToOverride", "valueY")) .contains(MapEntry.entry("extraKey", "extraValue")) .containsKey("mapToMerge"); Map mapToMerge = (Map) valueAtPath(twirlMap, "mapToMerge"); assertThat(mapToMerge) .contains(MapEntry.entry("keyToOverride", "innerValueY")) .contains(MapEntry.entry("anotherKey", "valueX")) .contains(MapEntry.entry("extraKey", "valueY")); } private static Object valueAtPath(Map nestedMap, String... keyPath) { assertThat(keyPath).isNotEmpty(); Object value = nestedMap; for (String key : keyPath) { assertThat(value).isInstanceOf(Map.class); nestedMap = (Map) value; assertThat(nestedMap).containsKey(key); value = nestedMap.get(key); } return value; } }
{ "pile_set_name": "Github" }
{ "object": { "pins": [ { "package": "Realm", "repositoryURL": "https://github.com/realm/realm-cocoa", "state": { "branch": null, "revision": "f64ac045d8cb171d8e317d9b854df7215aed7466", "version": "5.4.2" } }, { "package": "RealmCore", "repositoryURL": "https://github.com/realm/realm-core", "state": { "branch": null, "revision": "e051fc73c56830bf3ab0b8a82f7a613968cec6c6", "version": "6.0.26" } }, { "package": "Sodium", "repositoryURL": "https://github.com/jedisct1/swift-sodium", "state": { "branch": "master", "revision": "86b616ddb6522e05e14c000dab6eac690ef48227", "version": null } } ] }, "version": 1 }
{ "pile_set_name": "Github" }
# Tests for CNI code. # Compile a single C++ file and produce a .o file. OPTIONS is a list # of options to pass to the compiler. Returns 0 on failure, 1 on # success. proc gcj_cni_compile_cxx_to_o {file {options {}}} { global srcdir subdir set name [file rootname [file tail $file]] set oname ${name}.o # Find the generated header. lappend options "additional_flags=-I. -I.. -I$srcdir/$subdir -fdollars-in-identifiers" # Find libgcj headers. lappend options "additional_flags=-I$srcdir/.." set x [libjava_prune_warnings \ [target_compile $file $oname object $options]] if {$x != ""} { verbose "target_compile failed: $x" 2 fail "[file tail $file] compilation" return 0 } pass "[file tail $file] compilation" return 1 } # Build header files given name of .java file. Return 0 on failure. proc gcj_cni_build_headers {file} { global libgcj_jar set gcjh [find_gcjh] # Currently we only build a header file for the main class from the # .java file, and then others on an ad hoc basis. set list {} set main [file rootname [file tail $file]] lappend list $main # ... for instance, an obvious hack. if {$main == "shortfield"} { lappend list shortfieldbase } foreach file $list { set cmd "$gcjh -cni -force -classpath .:$libgcj_jar $file" verbose $cmd set x [string trim [libjava_prune_warnings \ [lindex [local_exec $cmd "" "" 300] 1]]] if {$x != ""} { verbose "local_exec failed: $x" 2 fail "$main header generation" return 0 } } pass "$main header generation" return 1 } # Do all the work for a single CNI test. Return 0 on failure. proc gcj_cni_test_one {file} { global runtests # The base name. We use it for several purposes. set main [file rootname [file tail $file]] if {! [runtest_file_p $runtests $main]} { # Simply skip it. return 1 } # if {! [bytecompile_file $file [pwd]]} { # fail "bytecompile $file" # # FIXME - should use `untested' on all remaining tests. # # But that is hard. # return 0 # } # pass "bytecompile $file" # if {! [gcj_cni_build_headers $file]} { # # FIXME # return 0 # } set cfile [file join [file dirname $file] nat$main.cc] if {! [gcj_cni_compile_cxx_to_o $cfile]} { # FIXME return 0 } if {! [gcj_link $main $main [list $file nat$main.o]]} { # FIXME return 0 } if {! [gcj_invoke $main [file rootname $file].out {}]} { # FIXME return 0 } # When we succeed we remove all our clutter. eval gcj_cleanup [glob -nocomplain -- ${main}.*] [list $main nat$main.o] return 1 } # Run the CNI tests. proc gcj_cni_run {} { global srcdir subdir global build_triplet host_triplet # For now we only test CNI on native builds. if {$build_triplet == $host_triplet} { catch { lsort [glob -nocomplain ${srcdir}/${subdir}/*.jar] } srcfiles foreach x $srcfiles { gcj_cni_test_one $x } } else { verbose "CNI tests not run in cross-compilation environment" } } gcj_cni_run
{ "pile_set_name": "Github" }
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Global definitions for the ARCnet interface. * * Authors: David Woodhouse and Avery Pennarun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IF_ARCNET_H #define _LINUX_IF_ARCNET_H #include <linux/types.h> #include <linux/if_ether.h> /* * These are the defined ARCnet Protocol ID's. */ /* CAP mode */ /* No macro but uses 1-8 */ /* RFC1201 Protocol ID's */ #define ARC_P_IP 212 /* 0xD4 */ #define ARC_P_IPV6 196 /* 0xC4: RFC2497 */ #define ARC_P_ARP 213 /* 0xD5 */ #define ARC_P_RARP 214 /* 0xD6 */ #define ARC_P_IPX 250 /* 0xFA */ #define ARC_P_NOVELL_EC 236 /* 0xEC */ /* Old RFC1051 Protocol ID's */ #define ARC_P_IP_RFC1051 240 /* 0xF0 */ #define ARC_P_ARP_RFC1051 241 /* 0xF1 */ /* MS LanMan/WfWg "NDIS" encapsulation */ #define ARC_P_ETHER 232 /* 0xE8 */ /* Unsupported/indirectly supported protocols */ #define ARC_P_DATAPOINT_BOOT 0 /* very old Datapoint equipment */ #define ARC_P_DATAPOINT_MOUNT 1 #define ARC_P_POWERLAN_BEACON 8 /* Probably ATA-Netbios related */ #define ARC_P_POWERLAN_BEACON2 243 /* 0xF3 */ #define ARC_P_LANSOFT 251 /* 0xFB - what is this? */ #define ARC_P_ATALK 0xDD /* Hardware address length */ #define ARCNET_ALEN 1 /* * The RFC1201-specific components of an arcnet packet header. */ struct arc_rfc1201 { __u8 proto; /* protocol ID field - varies */ __u8 split_flag; /* for use with split packets */ __be16 sequence; /* sequence number */ __u8 payload[0]; /* space remaining in packet (504 bytes)*/ }; #define RFC1201_HDR_SIZE 4 /* * The RFC1051-specific components. */ struct arc_rfc1051 { __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ __u8 payload[0]; /* 507 bytes */ }; #define RFC1051_HDR_SIZE 1 /* * The ethernet-encap-specific components. We have a real ethernet header * and some data. */ struct arc_eth_encap { __u8 proto; /* Always ARC_P_ETHER */ struct ethhdr eth; /* standard ethernet header (yuck!) */ __u8 payload[0]; /* 493 bytes */ }; #define ETH_ENCAP_HDR_SIZE 14 struct arc_cap { __u8 proto; __u8 cookie[sizeof(int)]; /* Actually NOT sent over the network */ union { __u8 ack; __u8 raw[0]; /* 507 bytes */ } mes; }; /* * The data needed by the actual arcnet hardware. * * Now, in the real arcnet hardware, the third and fourth bytes are the * 'offset' specification instead of the length, and the soft data is at * the _end_ of the 512-byte buffer. We hide this complexity inside the * driver. */ struct arc_hardware { __u8 source, /* source ARCnet - filled in automagically */ dest, /* destination ARCnet - 0 for broadcast */ offset[2]; /* offset bytes (some weird semantics) */ }; #define ARC_HDR_SIZE 4 /* * This is an ARCnet frame header, as seen by the kernel (and userspace, * when you do a raw packet capture). */ struct archdr { /* hardware requirements */ struct arc_hardware hard; /* arcnet encapsulation-specific bits */ union { struct arc_rfc1201 rfc1201; struct arc_rfc1051 rfc1051; struct arc_eth_encap eth_encap; struct arc_cap cap; __u8 raw[0]; /* 508 bytes */ } soft; }; #endif /* _LINUX_IF_ARCNET_H */
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: dc4b9a79e57aef2438e8141566ce9323 folderAsset: yes timeCreated: 1472694377 licenseType: Pro DefaultImporter: userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
import plotly.graph_objs as go from dash.dependencies import Input, Output, State from dash.exceptions import PreventUpdate from dtale.charts.utils import MAX_GROUPS def init_callbacks(dash_app): def lock_zoom(clicks, relayout_data, figure): if not clicks: raise PreventUpdate figure = go.Figure(figure) if relayout_data: figure.update_layout(scene_camera=relayout_data["scene.camera"]) return figure for i in range(1, MAX_GROUPS + 1): dash_app.callback( Output("chart-{}".format(i), "figure"), [Input("lock-zoom-btn", "n_clicks")], [ State("chart-{}".format(i), "relayoutData"), State("chart-{}".format(i), "figure"), ], )(lock_zoom)
{ "pile_set_name": "Github" }
--- !ruby/object:RI::MethodDescription aliases: [] block_params: comment: full_name: Gem::DocManager::configured_args= is_singleton: true name: configured_args= params: (args) visibility: public
{ "pile_set_name": "Github" }
### FILE="Main.annotation" ## Copyright: Public domain. ## Filename: KEYRUPT,_UPRUPT.agc ## Purpose: This program is designed to extensively test the Apollo Guidance Computer ## (specifically the LM instantiation of it). It is built on top of a heavily ## stripped-down Aurora 12, with all code ostensibly added by the DAP Group ## removed. Instead Borealis expands upon the tests provided by Aurora, ## including corrected tests from Retread 44 and tests from Ron Burkey's ## Validation. ## Assembler: yaYUL ## Contact: Mike Stewart <[email protected]>. ## Website: www.ibiblio.org/apollo/index.html ## Mod history: 2016-12-20 MAS Created from Aurora 12 (with much DAP stuff removed). BANK 7 KEYRUPT1 TS BANKRUPT XCH Q TS QRUPT TC LODSAMPT # TIME IS SNATCHED IN RUPT FOR NOUN 65. CAF LOW5 EXTEND RAND MNKEYIN KEYCOM TS RUPTREG4 CAF CHRPRIO TC NOVAC EBANK= DSPCOUNT 2CADR CHARIN CA RUPTREG4 INDEX LOCCTR TS MPAC # LEAVE 5 BIT KEY CDE IN MPAC FOR CHARIN TC RESUME # UPRUPT PROGRAM UPRUPT TS BANKRUPT XCH Q TS QRUPT TC LODSAMPT # TIME IS SNATCHED IN RUPT FOR NOUN 65. CAF ZERO XCH INLINK TS KEYTEMP1 CAF BIT3 # TURN ON UPACT LIGHT EXTEND # (BIT 3 OF CHANNEL 11) WOR DSALMOUT UPRUPT1 CAF LOW5 # TEST FOR TRIPLE CHAR REDUNDANCY MASK KEYTEMP1 # LOW5 OF WORD XCH KEYTEMP1 # LOW5 INTO KEYTEMP1 XCH SR # WHOLE WORD INTO SR TS KEYTEMP2 # ORIGINAL SR INTO KEYTEMP2 TC SRGHT5 MASK LOW5 # MID 5 AD HI10 TC UPTEST TC SRGHT5 MASK LOW5 # HIGH 5 COM TC UPTEST UPOK TC RESTORSR # CODE IS GOOD CS ELRCODE # IF CODE = ERROR LIGHT RESET, PUT +0 AD KEYTEMP1 # INTO BIT1 OF UPLOCK. CCS A # IF CODE NOT= ELR, PASS CODE ONLY IF TC TSTUPLOK # BIT1 OF UPLOCK = 0. ELRCODE OCT 22 TC TSTUPLOK CS BIT1 # PUT 0 INTO BIT1 OF UPLOCK MASK UPLOCK TS UPLOCK TC ACCEPTUP TSTUPLOK CAF BIT1 MASK UPLOCK CCS A TC RESUME # BIT1 OF UPLOCK = 1. ACCEPTUP XCH KEYTEMP1 # BIT1 OF UPLOCK = 0. TC KEYCOM TMFAIL2 TC RESTORSR # CODE IS BAD CS BIT1 # LOCK OUT FURTHER UPLINK ACTIVITY (BY MASK UPLOCK # PUTTING 1 INTO BIT1 OF UPLOCK) UNTIL ELR AD BIT1 # IS SENT UP UPLINK. TS UPLOCK TMFAIL1 TC TMALM TC RESUME RESTORSR XCH KEYTEMP2 DOUBLE TS SR TC Q TMALM = RESUME # FOR NOW SRGHT5 CS SR CS SR CS SR CS SR CS SR CS A TC Q # DELIVERS WORD UNCOMPLEMENTED UPTEST AD KEYTEMP1 CCS A TC TMFAIL2 HI10 OCT 77740 TC TMFAIL2 TC Q # UPACT IS TURNED OFF BY VBRELDSP, ALSO BY ERROR LIGHT RESET. # THE RECEPTION OF A BAD CODE BY UPLINK LOCKS OUT FURTHER UPLINK ACTIVITY # BY PLACING A 1 INTO BIT1 OF UPLOCK. BIT9 (ALONG WITH BIT11) OF TMKEYBUF # IS SET TO 1 TO SEND AN INDICATION OF THIS SITUATION DOWN THE DOWNLINK. # THE UPLINK INTERLOCK IS ALLOWED WHEN AN ERROR LIGHT RESET CODE IS SENT # UP THE UPLINK, OR WHEN A FRESH START IS PERFORMED. ENDKRURS EQUALS
{ "pile_set_name": "Github" }
const hotClient = require('webpack-hot-middleware/client?noInfo=true&reload=true') hotClient.subscribe(event => { /** * Reload browser when HTMLWebpackPlugin emits a new index.html */ if (event.action === 'reload') { window.location.reload() } /** * Notify `mainWindow` when `main` process is compiling, * giving notice for an expected reload of the `electron` process */ if (event.action === 'compiling') { document.body.innerHTML += ` <style> #dev-client { background: #4fc08d; border-radius: 4px; bottom: 20px; box-shadow: 0 4px 5px 0 rgba(0, 0, 0, 0.14), 0 1px 10px 0 rgba(0, 0, 0, 0.12), 0 2px 4px -1px rgba(0, 0, 0, 0.3); color: #fff; font-family: 'Source Sans Pro', sans-serif; left: 20px; padding: 8px 12px; position: absolute; } </style> <div id="dev-client"> Compiling Main Process... </div> ` } })
{ "pile_set_name": "Github" }
/* * Initialization. */ static void PyThread__init_thread(void) { } /* * Thread support. */ long PyThread_start_new_thread(void (*func)(void *), void *arg) { int success = 0; /* init not needed when SOLARIS_THREADS and */ /* C_THREADS implemented properly */ dprintf(("PyThread_start_new_thread called\n")); if (!initialized) PyThread_init_thread(); return success < 0 ? -1 : 0; } long PyThread_get_thread_ident(void) { if (!initialized) PyThread_init_thread(); } void PyThread_exit_thread(void) { dprintf(("PyThread_exit_thread called\n")); if (!initialized) exit(0); } /* * Lock support. */ PyThread_type_lock PyThread_allocate_lock(void) { dprintf(("PyThread_allocate_lock called\n")); if (!initialized) PyThread_init_thread(); dprintf(("PyThread_allocate_lock() -> %p\n", lock)); return (PyThread_type_lock) lock; } void PyThread_free_lock(PyThread_type_lock lock) { dprintf(("PyThread_free_lock(%p) called\n", lock)); } int PyThread_acquire_lock(PyThread_type_lock lock, int waitflag) { int success; dprintf(("PyThread_acquire_lock(%p, %d) called\n", lock, waitflag)); dprintf(("PyThread_acquire_lock(%p, %d) -> %d\n", lock, waitflag, success)); return success; } void PyThread_release_lock(PyThread_type_lock lock) { dprintf(("PyThread_release_lock(%p) called\n", lock)); }
{ "pile_set_name": "Github" }
/* * Copyright Andrey Semashev 2007 - 2015. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) */ /*! * \file keywords/auto_flush.hpp * \author Andrey Semashev * \date 14.03.2009 * * The header contains the \c auto_flush keyword declaration. */ #ifndef BOOST_LOG_KEYWORDS_AUTO_FLUSH_HPP_INCLUDED_ #define BOOST_LOG_KEYWORDS_AUTO_FLUSH_HPP_INCLUDED_ #include <boost/parameter/keyword.hpp> #include <boost/log/detail/config.hpp> #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once #endif namespace boost { BOOST_LOG_OPEN_NAMESPACE namespace keywords { //! The keyword for passing auto flush flag to a sink backend initialization BOOST_PARAMETER_KEYWORD(tag, auto_flush) } // namespace keywords BOOST_LOG_CLOSE_NAMESPACE // namespace log } // namespace boost #endif // BOOST_LOG_KEYWORDS_AUTO_FLUSH_HPP_INCLUDED_
{ "pile_set_name": "Github" }
import React from 'react' import NotificationItem from '../NotificationItem' import PagePath from 'components/PageList/PagePath' import { Notification } from 'client/types/crowi' interface Props { actionUsers: string notification: Notification onClick: () => void } export default class PageCommentNotification extends React.Component<Props> { render() { const notification = this.props.notification return ( <NotificationItem {...this.props} icon="comment"> <span> <b>{this.props.actionUsers}</b> commented on <PagePath page={notification.target} /> </span> </NotificationItem> ) } }
{ "pile_set_name": "Github" }
var EventEmitter = require('events').EventEmitter , _ = require('./utils') ; var root = window; var $; var SocialBase = module.exports = function () { this.collection = []; this.init.apply(this, arguments); $ = SocialBase.$ || root.jQuery || root.Zepto || root.$; if (!$) throw "jQuery or Zepto is required to use SocialFeed."; }; _.inherits(SocialBase, EventEmitter); /** Extend from Backbone (Copyright (c) 2010-2013 Jeremy Ashkenas, DocumentCloud) */ SocialBase.extend = function (protoProps) { var parent = this , child = function(){ return parent.apply(this, arguments); } ; _.extend(child, parent); var Surrogate = function () { this.constructor = child; }; Surrogate.prototype = parent.prototype; child.prototype = new Surrogate; if (protoProps) { _.extend(child.prototype, protoProps); } child.__super__ = parent.prototype; return child; }; /** // From Backbone */ SocialBase.fetch = function (options) { if (options.dataType.toLowerCase() === 'jsonp') { options.callback = options.callbackParameter || "callback"; } return $.ajax(options); }; _.extend(SocialBase.prototype, { ajaxSettings: { dataType: 'jsonp', type: 'GET' } , init: function (ident) { this.ident = ident; } , fetch: function (options) { options = options ? _.clone(options) : {}; var url = _.result(this, 'url') , module = this , success = options.success ; options.url = url; options.success = function(resp) { var parsed = module.parse(resp); module.collection = parsed; if (success) success(module, parsed, options); module.emit('fetched', module, parsed, options); }; var error = options.error; options.error = function(xOptions, textStatus) { if (error) error(module, textStatus, xOptions); module.emit('error', module, textStatus, xOptions); }; if (!url && this.data) { options.success(_.result(this, 'data')); return void 0; } return SocialBase.fetch(_.extend(this.ajaxSettings, options)); } , parse: function (resp) { return resp; } , orderBy: function (item) { } , render: function (item) { } });
{ "pile_set_name": "Github" }
{""} {"fo\"obar"} {''} {'fo\'obar'} ---------------------------------------------------- [ ["smarty", [ ["delimiter", "{"], ["string", "\"\""], ["delimiter", "}"] ]], ["smarty", [ ["delimiter", "{"], ["string", "\"fo\\\"obar\""], ["delimiter", "}"] ]], ["smarty", [ ["delimiter", "{"], ["string", "''"], ["delimiter", "}"] ]], ["smarty", [ ["delimiter", "{"], ["string", "'fo\\'obar'"], ["delimiter", "}"] ]] ] ---------------------------------------------------- Checks for strings.
{ "pile_set_name": "Github" }
import os import sys from pathlib import Path from lockdoors import infogathering from lockdoors import webhack from lockdoors import exploitation from lockdoors import reverse from lockdoors import encdyc from lockdoors import passattack from lockdoors import shells from lockdoors import privesc from lockdoors import soceng from lockdoors import psafrt from lockdoors import wtpp from lockdoors import about from lockdoors import update # VARS config = str(Path.home()) + "/.config/lockdoor/" # Functions def getinstalldir(): f = open(config + 'lockdoor.conf') contents = f.read().rstrip('\n') f.close() installdirc = contents.replace('Location:', '') return installdirc def printlogo(): print(""" \033[94m ..',,,'.. \033[0m \033[94m .',;;;;;;;;,'. \033[0m \033[94m ..,;;;;;;;;;;;;;;,.. \033[0m \033[94m .,;;;,'..'''''.',;;;,. \033[0m \033[94m .;;;;. .. .. .;;;;' \033[0m\033[91m ( \033[0m \033[94m .,;;;. ... .;;;;. \033[0m\033[91m )\ ) ) ( \033[0m \033[94m ..,;,. ... .,;,.. \033[0m\033[91m (()/( ( /( )\ ) ( \033[0m \033[94m .';;'. .',;'. \033[0m\033[91m /(_)) ( ( )\())(()/( ( ( )( \033[0m \033[94m ..',,;;;;;,,,,;;;;;,,'.. \033[0m\033[91m (_)) )\ )\ ((_)\ ((_)) )\ )\ (()\ \033[0m \033[94m .','.....................''. \033[0m\033[91m | | ((_) ((_)| |(_) _| | ((_) ((_) ((_)\033[0m \033[94m .',..',,,,,,,,,,,,,,,,,,,..,,. \033[0m\033[91m | |__ / _ \/ _| | / // _` |/ _ \/ _ \| '_|\033[0m \033[94m .;,..,;;;;;;'....';;;;;;;..,;. \033[0m\033[91m |____|\___/\__| |_\_\\__,_|\___/\___/|_| \033[0m \033[94m ';;..,;;;;;,..,,..';;;;;,..,;' \033[0m\033[92m © Sofiane Hamlaoui | 2020 \033[0m \033[94m.';;..,;;;;,. .... .,;;;;,..;;,.\033[0m\033[92m Lockdoor : A Penetration Testing framework\033[0m \033[94m ';;..,;;;;' .... .;;;;,..;;,. \033[0m\033[92m v2.2.4 \033[0m \033[94m .,;'.';;;;'. .. .';;;;,.';,. \033[0m \033[94m ....;;;;;,'''''',;;;;;'... \033[0m \033[94m ..................\033[0m""") def oktocont(): ans = input("\033[0;36mPress Enter to Continue...\033[0m") def clr(): os.system('clear') def spc(): print("") def prilogspc(): printlogo() spc() def clscprilo(): clr() printlogo() def popp(): spc() oktocont() printlogo() spc() def okenc(): spc() oktocont() encdyc.menu() def pop(): spc() oktocont() spc() def okex(): spc() oktocont() exploitation.menu() def okinf(): spc() oktocont() infogathering.menu() def okpa(): spc() oktocont() passattack.menu() def okpr(): spc() oktocont() privesc.menu() def okrev(): spc() oktocont() reverse.menu() def oksh(): spc() oktocont() shells.menu() def okso(): spc() oktocont() soceng.menu() def okwe(): spc() oktocont() webhack.menu()
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!-- Copyright (C) 2006 W3C (R) (MIT ERCIM Keio), All Rights Reserved. W3C liability, trademark and document use rules apply. http://www.w3.org/Consortium/Legal/ipr-notice http://www.w3.org/Consortium/Legal/copyright-documents Generated from: $Id: examples.xml,v 1.57 2008/02/20 16:41:48 pdowney Exp $ --> <env:Envelope xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:p="http://www.w3.org/2002/ws/databinding/patterns/6/09/" xmlns:ex="http://www.w3.org/2002/ws/databinding/examples/6/09/" xmlns:env="http://www.w3.org/2003/05/soap-envelope"> <env:Header/> <env:Body> <ex:echoDecimalElement> <ex:decimalElement xmlns:wsdl11="http://schemas.xmlsoap.org/wsdl/" xmlns:soap11enc="http://schemas.xmlsoap.org/soap/encoding/">+100000000000000000000000000000000000000000000.00</ex:decimalElement> </ex:echoDecimalElement> </env:Body> </env:Envelope>
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link rel="../img/favicon.ico"> <title>Getting help - guizero</title> <link rel="stylesheet" href="//use.fontawesome.com/releases/v5.5.0/css/all.css" integrity="sha384-B4dIYHKNBt8Bc12p+WXckhzcICo0wtJAoU8YZTY5qE0Id1GSseTk6S+L3BlXeVIU" crossorigin="anonymous"> <link rel="stylesheet" href="//cdn.jsdelivr.net/npm/[email protected]/build/web/hack.min.css"> <link href='//fonts.googleapis.com/css?family=PT+Sans:400,400italic,700,700italic&subset=latin-ext,latin' rel='stylesheet' type='text/css'> <link href='//fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,700italic,400,300,600,700&subset=latin-ext,latin' rel='stylesheet' type='text/css'> <link href="../css/bootstrap-custom.min.css" rel="stylesheet"> <link href="../css/base.min.css" rel="stylesheet"> <link href="../css/cinder.min.css" rel="stylesheet"> <link href="../css/highlight.min.css" rel="stylesheet"> <!-- HTML5 shim and Respond.js IE8 support of HTML5 elements and media queries --> <!--[if lt IE 9]> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/html5shiv.min.js"></script> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dest/respond.min.js"></script> <![endif]--> <script src="//ajax.googleapis.com/ajax/libs/webfont/1.6.26/webfont.js"></script> <script> WebFont.load({ google: { families: ['Open Sans', 'PT Sans'] } }); </script> </head> <body> <div class="navbar navbar-default navbar-fixed-top" role="navigation"> <div class="container"> <!-- Collapsed navigation --> <div class="navbar-header"> <!-- Expander button --> <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <!-- Main title --> <a class="navbar-brand" href="..">guizero</a> </div> <!-- Expanded navigation --> <div class="navbar-collapse collapse"> <!-- Main navigation --> <ul class="nav navbar-nav"> <li > <a href="../about/">About</a> </li> <li > <a href="..">Installation</a> </li> <li class="dropdown active"> <a href="#" class="dropdown-toggle" data-toggle="dropdown">Using guizero <b class="caret"></b></a> <ul class="dropdown-menu"> <li > <a href="../start/">Getting started</a> </li> <li > <a href="../commands/">Commands</a> </li> <li > <a href="../multiple_windows/">Multiple windows</a> </li> <li > <a href="../layout/">Layouts</a> </li> <li > <a href="../alerts/">Pop-ups</a> </li> <li > <a href="../size/">Sizes</a> </li> <li > <a href="../colors/">Colors</a> </li> <li > <a href="../images/">Images</a> </li> <li > <a href="../blocking/">Loops and sleeping</a> </li> <li > <a href="../events/">Events</a> </li> <li > <a href="../usingtk/">Using tkinter</a> </li> <li class="active"> <a href="./">Getting help</a> </li> </ul> </li> <li > <a href="../recipes/">Recipes</a> </li> <li class="dropdown"> <a href="#" class="dropdown-toggle" data-toggle="dropdown">Widgets <b class="caret"></b></a> <ul class="dropdown-menu"> <li > <a href="../widgetoverview/">Overview</a> </li> <li > <a href="../app/">App</a> </li> <li > <a href="../box/">Box</a> </li> <li > <a href="../buttongroup/">ButtonGroup</a> </li> <li > <a href="../checkbox/">CheckBox</a> </li> <li > <a href="../combo/">Combo</a> </li> <li > <a href="../drawing/">Drawing</a> </li> <li > <a href="../listbox/">ListBox</a> </li> <li > <a href="../menubar/">MenuBar</a> </li> <li > <a href="../picture/">Picture</a> </li> <li > <a href="../pushbutton/">PushButton</a> </li> <li > <a href="../slider/">Slider</a> </li> <li > <a href="../text/">Text</a> </li> <li > <a href="../textbox/">TextBox</a> </li> <li > <a href="../waffle/">Waffle</a> </li> <li > <a href="../window/">Window</a> </li> </ul> </li> <li class="dropdown"> <a href="#" class="dropdown-toggle" data-toggle="dropdown">Contributing <b class="caret"></b></a> <ul class="dropdown-menu"> <li > <a href="../contributing/">Notes</a> </li> <li > <a href="../development/">Developing</a> </li> <li > <a href="../deployment/">Deploying</a> </li> </ul> </li> <li > <a href="../changelog/">Change log</a> </li> </ul> <ul class="nav navbar-nav navbar-right"> <li> <a href="#" data-toggle="modal" data-target="#mkdocs_search_modal"> <i class="fas fa-search"></i> Search </a> </li> <li > <a rel="prev" href="../usingtk/"> <i class="fas fa-arrow-left"></i> Previous </a> </li> <li > <a rel="next" href="../recipes/"> Next <i class="fas fa-arrow-right"></i> </a> </li> </ul> </div> </div> </div> <div class="container"> <div class="col-md-3"><div class="bs-sidebar hidden-print affix well" role="complementary"> <ul class="nav bs-sidenav"> <li class="first-level active"><a href="#getting-help">Getting help</a></li> <li class="second-level"><a href="#getting-help_1">Getting help</a></li> <li class="second-level"><a href="#bugs-and-feature-requests">Bugs and feature requests</a></li> </ul> </div></div> <div class="col-md-9" role="main"> <h1 id="getting-help">Getting help</h1> <p>You may encounter a problem when using guizero, so here are some ways you can get help to solve your problem.</p> <h3 id="getting-help_1">Getting help</h3> <p>If you have a question about your guizero program it is a good idea to join a community to ask for support:</p> <ul> <li><a href="https://www.raspberrypi.org/forums/viewforum.php?f=32&amp;sid=b95bfdce6565681fad633c58a7e0e686">Raspberry Pi Forums</a> - some people post questions about guizero on these forums and other members of the community step in to help. The creators of guizero periodically check the Python forum.</li> <li><a href="https://stackoverflow.com/">Stack Overflow</a> - a popular site for techy questions</li> <li><a href="https://www.computingatschool.org.uk/">Computing at School</a> - a useful site for teachers</li> </ul> <p>If you would like to read guides and resources for guizero there are many freely available:</p> <ul> <li> <p><a href="https://projects.raspberrypi.org/en/projects/getting-started-with-guis">Getting started with GUIs</a> - a beginners guide to guizero by the Raspberry Pi Foundation</p> </li> <li> <p><a href="https://youtu.be/3dab9xX6Uyg">Python guizero video</a> - a series of videos by <a href="https://www.youtube.com/channel/UCTHAn3Viqm9lZO2YSALXNWw">Devon Schafer</a> on using guizero.</p> </li> <li> <p><a href="https://github.com/lawsie/guizero/tree/master/examples">Example programs</a> - some example programs to get you going.</p> </li> <li> <p><a href="https://github.com/bennuttall/guizero-examples">Using guizero with hardware</a> - Ben Nuttall's projects using guizero combined with hardware</p> </li> <li> <p><a href="http://www.cotswoldjam.org/downloads/2017-03/cat-name-generator/">Cat name generator</a> - materials for a kids workshop, first run at the <a href="http://www.cotswoldjam.org">Cotswold Jam</a></p> </li> <li> <p><a href="https://helloworld.raspberrypi.org/issues/2">Name your pet</a> - article on page 42 of <a href="https://helloworld.raspberrypi.org/">Hello World</a> magazine issue 2.</p> </li> </ul> <h3 id="bugs-and-feature-requests">Bugs and feature requests</h3> <p>Like many Python libraries, guizero has a <a href="https://github.com/lawsie/guizero">GitHub repository</a> where you can add issues. These issues come under two headings:</p> <ul> <li>You found a bug in guizero</li> <li>You would like to request we consider including a new feature</li> </ul> <p>We look at and respond to all issues created on GitHub. However, we ask that you only create issues for bugs and feature requests, rather than support, as we do not have time to answer everything :)</p></div> </div> <footer class="col-md-12 text-center"> <hr> <p> <small>Documentation built with <a href="http://www.mkdocs.org/">MkDocs</a>.</p></small> </footer> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.12.4/jquery.min.js"></script> <script src="../js/bootstrap-3.0.3.min.js"></script> <script src="../js/highlight.pack.js"></script> <script>hljs.initHighlightingOnLoad();</script> <script>var base_url = ".."</script> <script src="../js/base.js"></script> <script src="../search/main.js"></script> <div class="modal" id="mkdocs_search_modal" tabindex="-1" role="dialog" aria-labelledby="searchModalLabel" aria-hidden="true"> <div class="modal-dialog modal-lg"> <div class="modal-content"> <div class="modal-header"> <h4 class="modal-title" id="searchModalLabel">Search</h4> <button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">&times;</span><span class="sr-only">Close</span></button> </div> <div class="modal-body"> <p> From here you can search these documents. Enter your search terms below. </p> <form> <div class="form-group"> <input type="text" class="form-control" placeholder="Search..." id="mkdocs-search-query" title="Type search term here"> </div> </form> <div id="mkdocs-search-results"></div> </div> <div class="modal-footer"> </div> </div> </div> </div><div class="modal" id="mkdocs_keyboard_modal" tabindex="-1" role="dialog" aria-labelledby="keyboardModalLabel" aria-hidden="true"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <h4 class="modal-title" id="keyboardModalLabel">Keyboard Shortcuts</h4> <button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">&times;</span><span class="sr-only">Close</span></button> </div> <div class="modal-body"> <table class="table"> <thead> <tr> <th style="width: 20%;">Keys</th> <th>Action</th> </tr> </thead> <tbody> <tr> <td class="help shortcut"><kbd>?</kbd></td> <td>Open this help</td> </tr> <tr> <td class="next shortcut"><kbd>n</kbd></td> <td>Next page</td> </tr> <tr> <td class="prev shortcut"><kbd>p</kbd></td> <td>Previous page</td> </tr> <tr> <td class="search shortcut"><kbd>s</kbd></td> <td>Search</td> </tr> </tbody> </table> </div> <div class="modal-footer"> </div> </div> </div> </div> </body> </html>
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.spi.failover.always; import org.apache.ignite.mxbean.MXBeanDescription; import org.apache.ignite.spi.IgniteSpiManagementMBean; /** * Management bean for {@link AlwaysFailoverSpi}. */ @MXBeanDescription("MBean that provides access to always failover SPI configuration.") public interface AlwaysFailoverSpiMBean extends IgniteSpiManagementMBean { /** * Gets maximum number of attempts to execute a failed job on another node. * If not specified, {@link AlwaysFailoverSpi#DFLT_MAX_FAILOVER_ATTEMPTS} value will be used. * * @return Maximum number of attempts to execute a failed job on another node. */ @MXBeanDescription("Maximum number of attempts to execute a failed job on another node.") public int getMaximumFailoverAttempts(); /** * Get total number of jobs that were failed over. * * @return Total number of failed over jobs. */ @MXBeanDescription("Total number of jobs that were failed over.") public int getTotalFailoverJobsCount(); }
{ "pile_set_name": "Github" }
/* * Copyright (C) 2013 4th Line GmbH, Switzerland * * The contents of this file are subject to the terms of either the GNU * Lesser General Public License Version 2 or later ("LGPL") or the * Common Development and Distribution License Version 1 or later * ("CDDL") (collectively, the "License"). You may not use this file * except in compliance with the License. See LICENSE.txt for more * information. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ package org.fourthline.cling.model.message.header; import org.fourthline.cling.model.types.ServiceType; import java.net.URI; /** * @author Christian Bauer */ public class ServiceTypeHeader extends UpnpHeader<ServiceType> { public ServiceTypeHeader() { } public ServiceTypeHeader(URI uri) { setString(uri.toString()); } public ServiceTypeHeader(ServiceType value) { setValue(value); } public void setString(String s) throws InvalidHeaderException { try { setValue(ServiceType.valueOf(s)); } catch (RuntimeException ex) { throw new InvalidHeaderException("Invalid service type header value, " + ex.getMessage()); } } public String getString() { return getValue().toString(); } }
{ "pile_set_name": "Github" }
/* * Copyright 2015 Linaro Limited * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/clk-provider.h> #include <linux/regmap.h> #include <linux/reset-controller.h> #include <dt-bindings/clock/qcom,gcc-msm8916.h> #include <dt-bindings/reset/qcom,gcc-msm8916.h> #include "common.h" #include "clk-regmap.h" #include "clk-pll.h" #include "clk-rcg.h" #include "clk-branch.h" #include "reset.h" #include "gdsc.h" enum { P_XO, P_GPLL0, P_GPLL0_AUX, P_BIMC, P_GPLL1, P_GPLL1_AUX, P_GPLL2, P_GPLL2_AUX, P_SLEEP_CLK, P_DSI0_PHYPLL_BYTE, P_DSI0_PHYPLL_DSI, P_EXT_PRI_I2S, P_EXT_SEC_I2S, P_EXT_MCLK, }; static const struct parent_map gcc_xo_gpll0_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, }; static const char * const gcc_xo_gpll0[] = { "xo", "gpll0_vote", }; static const struct parent_map gcc_xo_gpll0_bimc_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_BIMC, 2 }, }; static const char * const gcc_xo_gpll0_bimc[] = { "xo", "gpll0_vote", "bimc_pll_vote", }; static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = { { P_XO, 0 }, { P_GPLL0_AUX, 3 }, { P_GPLL1, 1 }, { P_GPLL2_AUX, 2 }, }; static const char * const gcc_xo_gpll0a_gpll1_gpll2a[] = { "xo", "gpll0_vote", "gpll1_vote", "gpll2_vote", }; static const struct parent_map gcc_xo_gpll0_gpll2_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_GPLL2, 2 }, }; static const char * const gcc_xo_gpll0_gpll2[] = { "xo", "gpll0_vote", "gpll2_vote", }; static const struct parent_map gcc_xo_gpll0a_map[] = { { P_XO, 0 }, { P_GPLL0_AUX, 2 }, }; static const char * const gcc_xo_gpll0a[] = { "xo", "gpll0_vote", }; static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_GPLL1_AUX, 2 }, { P_SLEEP_CLK, 6 }, }; static const char * const gcc_xo_gpll0_gpll1a_sleep[] = { "xo", "gpll0_vote", "gpll1_vote", "sleep_clk", }; static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_GPLL1_AUX, 2 }, }; static const char * const gcc_xo_gpll0_gpll1a[] = { "xo", "gpll0_vote", "gpll1_vote", }; static const struct parent_map gcc_xo_dsibyte_map[] = { { P_XO, 0, }, { P_DSI0_PHYPLL_BYTE, 2 }, }; static const char * const gcc_xo_dsibyte[] = { "xo", "dsi0pllbyte", }; static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = { { P_XO, 0 }, { P_GPLL0_AUX, 2 }, { P_DSI0_PHYPLL_BYTE, 1 }, }; static const char * const gcc_xo_gpll0a_dsibyte[] = { "xo", "gpll0_vote", "dsi0pllbyte", }; static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_DSI0_PHYPLL_DSI, 2 }, }; static const char * const gcc_xo_gpll0_dsiphy[] = { "xo", "gpll0_vote", "dsi0pll", }; static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = { { P_XO, 0 }, { P_GPLL0_AUX, 2 }, { P_DSI0_PHYPLL_DSI, 1 }, }; static const char * const gcc_xo_gpll0a_dsiphy[] = { "xo", "gpll0_vote", "dsi0pll", }; static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = { { P_XO, 0 }, { P_GPLL0_AUX, 1 }, { P_GPLL1, 3 }, { P_GPLL2, 2 }, }; static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = { "xo", "gpll0_vote", "gpll1_vote", "gpll2_vote", }; static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_GPLL1, 2 }, { P_SLEEP_CLK, 6 } }; static const char * const gcc_xo_gpll0_gpll1_sleep[] = { "xo", "gpll0_vote", "gpll1_vote", "sleep_clk", }; static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = { { P_XO, 0 }, { P_GPLL1, 1 }, { P_EXT_PRI_I2S, 2 }, { P_EXT_MCLK, 3 }, { P_SLEEP_CLK, 6 } }; static const char * const gcc_xo_gpll1_epi2s_emclk_sleep[] = { "xo", "gpll1_vote", "ext_pri_i2s", "ext_mclk", "sleep_clk", }; static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = { { P_XO, 0 }, { P_GPLL1, 1 }, { P_EXT_SEC_I2S, 2 }, { P_EXT_MCLK, 3 }, { P_SLEEP_CLK, 6 } }; static const char * const gcc_xo_gpll1_esi2s_emclk_sleep[] = { "xo", "gpll1_vote", "ext_sec_i2s", "ext_mclk", "sleep_clk", }; static const struct parent_map gcc_xo_sleep_map[] = { { P_XO, 0 }, { P_SLEEP_CLK, 6 } }; static const char * const gcc_xo_sleep[] = { "xo", "sleep_clk", }; static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = { { P_XO, 0 }, { P_GPLL1, 1 }, { P_EXT_MCLK, 2 }, { P_SLEEP_CLK, 6 } }; static const char * const gcc_xo_gpll1_emclk_sleep[] = { "xo", "gpll1_vote", "ext_mclk", "sleep_clk", }; #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } static struct clk_pll gpll0 = { .l_reg = 0x21004, .m_reg = 0x21008, .n_reg = 0x2100c, .config_reg = 0x21014, .mode_reg = 0x21000, .status_reg = 0x2101c, .status_bit = 17, .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, .ops = &clk_pll_ops, }, }; static struct clk_regmap gpll0_vote = { .enable_reg = 0x45000, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpll0_vote", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, .ops = &clk_pll_vote_ops, }, }; static struct clk_pll gpll1 = { .l_reg = 0x20004, .m_reg = 0x20008, .n_reg = 0x2000c, .config_reg = 0x20014, .mode_reg = 0x20000, .status_reg = 0x2001c, .status_bit = 17, .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, .ops = &clk_pll_ops, }, }; static struct clk_regmap gpll1_vote = { .enable_reg = 0x45000, .enable_mask = BIT(1), .hw.init = &(struct clk_init_data){ .name = "gpll1_vote", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, .ops = &clk_pll_vote_ops, }, }; static struct clk_pll gpll2 = { .l_reg = 0x4a004, .m_reg = 0x4a008, .n_reg = 0x4a00c, .config_reg = 0x4a014, .mode_reg = 0x4a000, .status_reg = 0x4a01c, .status_bit = 17, .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, .ops = &clk_pll_ops, }, }; static struct clk_regmap gpll2_vote = { .enable_reg = 0x45000, .enable_mask = BIT(2), .hw.init = &(struct clk_init_data){ .name = "gpll2_vote", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, .ops = &clk_pll_vote_ops, }, }; static struct clk_pll bimc_pll = { .l_reg = 0x23004, .m_reg = 0x23008, .n_reg = 0x2300c, .config_reg = 0x23014, .mode_reg = 0x23000, .status_reg = 0x2301c, .status_bit = 17, .clkr.hw.init = &(struct clk_init_data){ .name = "bimc_pll", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, .ops = &clk_pll_ops, }, }; static struct clk_regmap bimc_pll_vote = { .enable_reg = 0x45000, .enable_mask = BIT(3), .hw.init = &(struct clk_init_data){ .name = "bimc_pll_vote", .parent_names = (const char *[]){ "bimc_pll" }, .num_parents = 1, .ops = &clk_pll_vote_ops, }, }; static struct clk_rcg2 pcnoc_bfdcd_clk_src = { .cmd_rcgr = 0x27000, .hid_width = 5, .parent_map = gcc_xo_gpll0_bimc_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcnoc_bfdcd_clk_src", .parent_names = gcc_xo_gpll0_bimc, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 system_noc_bfdcd_clk_src = { .cmd_rcgr = 0x26004, .hid_width = 5, .parent_map = gcc_xo_gpll0_bimc_map, .clkr.hw.init = &(struct clk_init_data){ .name = "system_noc_bfdcd_clk_src", .parent_names = gcc_xo_gpll0_bimc, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = { F(40000000, P_GPLL0, 10, 1, 2), F(80000000, P_GPLL0, 10, 0, 0), { } }; static struct clk_rcg2 camss_ahb_clk_src = { .cmd_rcgr = 0x5a000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_camss_ahb_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "camss_ahb_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_apss_ahb_clk[] = { F(19200000, P_XO, 1, 0, 0), F(50000000, P_GPLL0, 16, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), F(133330000, P_GPLL0, 6, 0, 0), { } }; static struct clk_rcg2 apss_ahb_clk_src = { .cmd_rcgr = 0x46000, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_apss_ahb_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "apss_ahb_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_csi0_1_clk[] = { F(100000000, P_GPLL0, 8, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), { } }; static struct clk_rcg2 csi0_clk_src = { .cmd_rcgr = 0x4e020, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_camss_csi0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "csi0_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csi1_clk_src = { .cmd_rcgr = 0x4f020, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_camss_csi0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "csi1_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = { F(19200000, P_XO, 1, 0, 0), F(50000000, P_GPLL0_AUX, 16, 0, 0), F(80000000, P_GPLL0_AUX, 10, 0, 0), F(100000000, P_GPLL0_AUX, 8, 0, 0), F(160000000, P_GPLL0_AUX, 5, 0, 0), F(177780000, P_GPLL0_AUX, 4.5, 0, 0), F(200000000, P_GPLL0_AUX, 4, 0, 0), F(266670000, P_GPLL0_AUX, 3, 0, 0), F(294912000, P_GPLL1, 3, 0, 0), F(310000000, P_GPLL2, 3, 0, 0), F(400000000, P_GPLL0_AUX, 2, 0, 0), { } }; static struct clk_rcg2 gfx3d_clk_src = { .cmd_rcgr = 0x59000, .hid_width = 5, .parent_map = gcc_xo_gpll0a_gpll1_gpll2a_map, .freq_tbl = ftbl_gcc_oxili_gfx3d_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "gfx3d_clk_src", .parent_names = gcc_xo_gpll0a_gpll1_gpll2a, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_vfe0_clk[] = { F(50000000, P_GPLL0, 16, 0, 0), F(80000000, P_GPLL0, 10, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), F(160000000, P_GPLL0, 5, 0, 0), F(177780000, P_GPLL0, 4.5, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), F(266670000, P_GPLL0, 3, 0, 0), F(320000000, P_GPLL0, 2.5, 0, 0), F(400000000, P_GPLL0, 2, 0, 0), F(465000000, P_GPLL2, 2, 0, 0), { } }; static struct clk_rcg2 vfe0_clk_src = { .cmd_rcgr = 0x58000, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll2_map, .freq_tbl = ftbl_gcc_camss_vfe0_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "vfe0_clk_src", .parent_names = gcc_xo_gpll0_gpll2, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_i2c_apps_clk[] = { F(19200000, P_XO, 1, 0, 0), F(50000000, P_GPLL0, 16, 0, 0), { } }; static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = { .cmd_rcgr = 0x0200c, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup1_i2c_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_spi_apps_clk[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), F(16000000, P_GPLL0, 10, 1, 5), F(19200000, P_XO, 1, 0, 0), F(25000000, P_GPLL0, 16, 1, 2), F(50000000, P_GPLL0, 16, 0, 0), { } }; static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = { .cmd_rcgr = 0x02024, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup1_spi_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = { .cmd_rcgr = 0x03000, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup2_i2c_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = { .cmd_rcgr = 0x03014, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup2_spi_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = { .cmd_rcgr = 0x04000, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup3_i2c_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = { .cmd_rcgr = 0x04024, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup3_spi_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = { .cmd_rcgr = 0x05000, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup4_i2c_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = { .cmd_rcgr = 0x05024, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup4_spi_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = { .cmd_rcgr = 0x06000, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup5_i2c_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = { .cmd_rcgr = 0x06024, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup5_spi_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = { .cmd_rcgr = 0x07000, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup6_i2c_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = { .cmd_rcgr = 0x07024, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup6_spi_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_blsp1_uart1_6_apps_clk[] = { F(3686400, P_GPLL0, 1, 72, 15625), F(7372800, P_GPLL0, 1, 144, 15625), F(14745600, P_GPLL0, 1, 288, 15625), F(16000000, P_GPLL0, 10, 1, 5), F(19200000, P_XO, 1, 0, 0), F(24000000, P_GPLL0, 1, 3, 100), F(25000000, P_GPLL0, 16, 1, 2), F(32000000, P_GPLL0, 1, 1, 25), F(40000000, P_GPLL0, 1, 1, 20), F(46400000, P_GPLL0, 1, 29, 500), F(48000000, P_GPLL0, 1, 3, 50), F(51200000, P_GPLL0, 1, 8, 125), F(56000000, P_GPLL0, 1, 7, 100), F(58982400, P_GPLL0, 1, 1152, 15625), F(60000000, P_GPLL0, 1, 3, 40), { } }; static struct clk_rcg2 blsp1_uart1_apps_clk_src = { .cmd_rcgr = 0x02044, .mnd_width = 16, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_uart1_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 blsp1_uart2_apps_clk_src = { .cmd_rcgr = 0x03034, .mnd_width = 16, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_uart2_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 cci_clk_src = { .cmd_rcgr = 0x51000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0a_map, .freq_tbl = ftbl_gcc_camss_cci_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "cci_clk_src", .parent_names = gcc_xo_gpll0a, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = { F(100000000, P_GPLL0, 8, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), { } }; static struct clk_rcg2 camss_gp0_clk_src = { .cmd_rcgr = 0x54000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, .freq_tbl = ftbl_gcc_camss_gp0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "camss_gp0_clk_src", .parent_names = gcc_xo_gpll0_gpll1a_sleep, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 camss_gp1_clk_src = { .cmd_rcgr = 0x55000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, .freq_tbl = ftbl_gcc_camss_gp0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "camss_gp1_clk_src", .parent_names = gcc_xo_gpll0_gpll1a_sleep, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_jpeg0_clk[] = { F(133330000, P_GPLL0, 6, 0, 0), F(266670000, P_GPLL0, 3, 0, 0), F(320000000, P_GPLL0, 2.5, 0, 0), { } }; static struct clk_rcg2 jpeg0_clk_src = { .cmd_rcgr = 0x57000, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_camss_jpeg0_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "jpeg0_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = { F(9600000, P_XO, 2, 0, 0), F(23880000, P_GPLL0, 1, 2, 67), F(66670000, P_GPLL0, 12, 0, 0), { } }; static struct clk_rcg2 mclk0_clk_src = { .cmd_rcgr = 0x52000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, .freq_tbl = ftbl_gcc_camss_mclk0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "mclk0_clk_src", .parent_names = gcc_xo_gpll0_gpll1a_sleep, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 mclk1_clk_src = { .cmd_rcgr = 0x53000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, .freq_tbl = ftbl_gcc_camss_mclk0_1_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "mclk1_clk_src", .parent_names = gcc_xo_gpll0_gpll1a_sleep, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_csi0_1phytimer_clk[] = { F(100000000, P_GPLL0, 8, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), { } }; static struct clk_rcg2 csi0phytimer_clk_src = { .cmd_rcgr = 0x4e000, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_map, .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "csi0phytimer_clk_src", .parent_names = gcc_xo_gpll0_gpll1a, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 csi1phytimer_clk_src = { .cmd_rcgr = 0x4f000, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_map, .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "csi1phytimer_clk_src", .parent_names = gcc_xo_gpll0_gpll1a, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = { F(160000000, P_GPLL0, 5, 0, 0), F(320000000, P_GPLL0, 2.5, 0, 0), F(465000000, P_GPLL2, 2, 0, 0), { } }; static struct clk_rcg2 cpp_clk_src = { .cmd_rcgr = 0x58018, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll2_map, .freq_tbl = ftbl_gcc_camss_cpp_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "cpp_clk_src", .parent_names = gcc_xo_gpll0_gpll2, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_crypto_clk[] = { F(50000000, P_GPLL0, 16, 0, 0), F(80000000, P_GPLL0, 10, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), F(160000000, P_GPLL0, 5, 0, 0), { } }; static struct clk_rcg2 crypto_clk_src = { .cmd_rcgr = 0x16004, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_crypto_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "crypto_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 gp1_clk_src = { .cmd_rcgr = 0x08004, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, .freq_tbl = ftbl_gcc_gp1_3_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "gp1_clk_src", .parent_names = gcc_xo_gpll0_gpll1a_sleep, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 gp2_clk_src = { .cmd_rcgr = 0x09004, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, .freq_tbl = ftbl_gcc_gp1_3_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "gp2_clk_src", .parent_names = gcc_xo_gpll0_gpll1a_sleep, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 gp3_clk_src = { .cmd_rcgr = 0x0a004, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, .freq_tbl = ftbl_gcc_gp1_3_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "gp3_clk_src", .parent_names = gcc_xo_gpll0_gpll1a_sleep, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 byte0_clk_src = { .cmd_rcgr = 0x4d044, .hid_width = 5, .parent_map = gcc_xo_gpll0a_dsibyte_map, .clkr.hw.init = &(struct clk_init_data){ .name = "byte0_clk_src", .parent_names = gcc_xo_gpll0a_dsibyte, .num_parents = 3, .ops = &clk_byte2_ops, .flags = CLK_SET_RATE_PARENT, }, }; static const struct freq_tbl ftbl_gcc_mdss_esc0_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 esc0_clk_src = { .cmd_rcgr = 0x4d05c, .hid_width = 5, .parent_map = gcc_xo_dsibyte_map, .freq_tbl = ftbl_gcc_mdss_esc0_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "esc0_clk_src", .parent_names = gcc_xo_dsibyte, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = { F(50000000, P_GPLL0, 16, 0, 0), F(80000000, P_GPLL0, 10, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), F(160000000, P_GPLL0, 5, 0, 0), F(177780000, P_GPLL0, 4.5, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), F(266670000, P_GPLL0, 3, 0, 0), F(320000000, P_GPLL0, 2.5, 0, 0), { } }; static struct clk_rcg2 mdp_clk_src = { .cmd_rcgr = 0x4d014, .hid_width = 5, .parent_map = gcc_xo_gpll0_dsiphy_map, .freq_tbl = ftbl_gcc_mdss_mdp_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "mdp_clk_src", .parent_names = gcc_xo_gpll0_dsiphy, .num_parents = 3, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 pclk0_clk_src = { .cmd_rcgr = 0x4d000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0a_dsiphy_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pclk0_clk_src", .parent_names = gcc_xo_gpll0a_dsiphy, .num_parents = 3, .ops = &clk_pixel_ops, .flags = CLK_SET_RATE_PARENT, }, }; static const struct freq_tbl ftbl_gcc_mdss_vsync_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 vsync_clk_src = { .cmd_rcgr = 0x4d02c, .hid_width = 5, .parent_map = gcc_xo_gpll0a_map, .freq_tbl = ftbl_gcc_mdss_vsync_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "vsync_clk_src", .parent_names = gcc_xo_gpll0a, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_pdm2_clk[] = { F(64000000, P_GPLL0, 12.5, 0, 0), { } }; static struct clk_rcg2 pdm2_clk_src = { .cmd_rcgr = 0x44010, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_pdm2_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "pdm2_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { F(144000, P_XO, 16, 3, 25), F(400000, P_XO, 12, 1, 4), F(20000000, P_GPLL0, 10, 1, 4), F(25000000, P_GPLL0, 16, 1, 2), F(50000000, P_GPLL0, 16, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), F(177770000, P_GPLL0, 4.5, 0, 0), { } }; static struct clk_rcg2 sdcc1_apps_clk_src = { .cmd_rcgr = 0x42004, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_sdcc1_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_floor_ops, }, }; static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk[] = { F(144000, P_XO, 16, 3, 25), F(400000, P_XO, 12, 1, 4), F(20000000, P_GPLL0, 10, 1, 4), F(25000000, P_GPLL0, 16, 1, 2), F(50000000, P_GPLL0, 16, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), { } }; static struct clk_rcg2 sdcc2_apps_clk_src = { .cmd_rcgr = 0x43004, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_sdcc2_apps_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "sdcc2_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_floor_ops, }, }; static const struct freq_tbl ftbl_gcc_apss_tcu_clk[] = { F(155000000, P_GPLL2, 6, 0, 0), F(310000000, P_GPLL2, 3, 0, 0), F(400000000, P_GPLL0, 2, 0, 0), { } }; static struct clk_rcg2 apss_tcu_clk_src = { .cmd_rcgr = 0x1207c, .hid_width = 5, .parent_map = gcc_xo_gpll0a_gpll1_gpll2_map, .freq_tbl = ftbl_gcc_apss_tcu_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "apss_tcu_clk_src", .parent_names = gcc_xo_gpll0a_gpll1_gpll2, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_bimc_gpu_clk[] = { F(19200000, P_XO, 1, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), F(266500000, P_BIMC, 4, 0, 0), F(400000000, P_GPLL0, 2, 0, 0), F(533000000, P_BIMC, 2, 0, 0), { } }; static struct clk_rcg2 bimc_gpu_clk_src = { .cmd_rcgr = 0x31028, .hid_width = 5, .parent_map = gcc_xo_gpll0_bimc_map, .freq_tbl = ftbl_gcc_bimc_gpu_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "bimc_gpu_clk_src", .parent_names = gcc_xo_gpll0_bimc, .num_parents = 3, .flags = CLK_GET_RATE_NOCACHE, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = { F(80000000, P_GPLL0, 10, 0, 0), { } }; static struct clk_rcg2 usb_hs_system_clk_src = { .cmd_rcgr = 0x41010, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_usb_hs_system_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "usb_hs_system_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_gcc_ultaudio_ahb_clk[] = { F(3200000, P_XO, 6, 0, 0), F(6400000, P_XO, 3, 0, 0), F(9600000, P_XO, 2, 0, 0), F(19200000, P_XO, 1, 0, 0), F(40000000, P_GPLL0, 10, 1, 2), F(66670000, P_GPLL0, 12, 0, 0), F(80000000, P_GPLL0, 10, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), { } }; static struct clk_rcg2 ultaudio_ahbfabric_clk_src = { .cmd_rcgr = 0x1c010, .hid_width = 5, .mnd_width = 8, .parent_map = gcc_xo_gpll0_gpll1_sleep_map, .freq_tbl = ftbl_gcc_ultaudio_ahb_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "ultaudio_ahbfabric_clk_src", .parent_names = gcc_xo_gpll0_gpll1_sleep, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = { .halt_reg = 0x1c028, .clkr = { .enable_reg = 0x1c028, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_ahbfabric_ixfabric_clk", .parent_names = (const char *[]){ "ultaudio_ahbfabric_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = { .halt_reg = 0x1c024, .clkr = { .enable_reg = 0x1c024, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk", .parent_names = (const char *[]){ "ultaudio_ahbfabric_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = { F(128000, P_XO, 10, 1, 15), F(256000, P_XO, 5, 1, 15), F(384000, P_XO, 5, 1, 10), F(512000, P_XO, 5, 2, 15), F(576000, P_XO, 5, 3, 20), F(705600, P_GPLL1, 16, 1, 80), F(768000, P_XO, 5, 1, 5), F(800000, P_XO, 5, 5, 24), F(1024000, P_XO, 5, 4, 15), F(1152000, P_XO, 1, 3, 50), F(1411200, P_GPLL1, 16, 1, 40), F(1536000, P_XO, 1, 2, 25), F(1600000, P_XO, 12, 0, 0), F(1728000, P_XO, 5, 9, 20), F(2048000, P_XO, 5, 8, 15), F(2304000, P_XO, 5, 3, 5), F(2400000, P_XO, 8, 0, 0), F(2822400, P_GPLL1, 16, 1, 20), F(3072000, P_XO, 5, 4, 5), F(4096000, P_GPLL1, 9, 2, 49), F(4800000, P_XO, 4, 0, 0), F(5644800, P_GPLL1, 16, 1, 10), F(6144000, P_GPLL1, 7, 1, 21), F(8192000, P_GPLL1, 9, 4, 49), F(9600000, P_XO, 2, 0, 0), F(11289600, P_GPLL1, 16, 1, 5), F(12288000, P_GPLL1, 7, 2, 21), { } }; static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = { .cmd_rcgr = 0x1c054, .hid_width = 5, .mnd_width = 8, .parent_map = gcc_xo_gpll1_epi2s_emclk_sleep_map, .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "ultaudio_lpaif_pri_i2s_clk_src", .parent_names = gcc_xo_gpll1_epi2s_emclk_sleep, .num_parents = 5, .ops = &clk_rcg2_ops, }, }; static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = { .halt_reg = 0x1c068, .clkr = { .enable_reg = 0x1c068, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_lpaif_pri_i2s_clk", .parent_names = (const char *[]){ "ultaudio_lpaif_pri_i2s_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = { .cmd_rcgr = 0x1c06c, .hid_width = 5, .mnd_width = 8, .parent_map = gcc_xo_gpll1_esi2s_emclk_sleep_map, .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "ultaudio_lpaif_sec_i2s_clk_src", .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep, .num_parents = 5, .ops = &clk_rcg2_ops, }, }; static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = { .halt_reg = 0x1c080, .clkr = { .enable_reg = 0x1c080, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_lpaif_sec_i2s_clk", .parent_names = (const char *[]){ "ultaudio_lpaif_sec_i2s_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = { .cmd_rcgr = 0x1c084, .hid_width = 5, .mnd_width = 8, .parent_map = gcc_xo_gpll1_emclk_sleep_map, .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "ultaudio_lpaif_aux_i2s_clk_src", .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep, .num_parents = 5, .ops = &clk_rcg2_ops, }, }; static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = { .halt_reg = 0x1c098, .clkr = { .enable_reg = 0x1c098, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_lpaif_aux_i2s_clk", .parent_names = (const char *[]){ "ultaudio_lpaif_aux_i2s_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static const struct freq_tbl ftbl_gcc_ultaudio_xo_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; static struct clk_rcg2 ultaudio_xo_clk_src = { .cmd_rcgr = 0x1c034, .hid_width = 5, .parent_map = gcc_xo_sleep_map, .freq_tbl = ftbl_gcc_ultaudio_xo_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "ultaudio_xo_clk_src", .parent_names = gcc_xo_sleep, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_branch gcc_ultaudio_avsync_xo_clk = { .halt_reg = 0x1c04c, .clkr = { .enable_reg = 0x1c04c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_avsync_xo_clk", .parent_names = (const char *[]){ "ultaudio_xo_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_ultaudio_stc_xo_clk = { .halt_reg = 0x1c050, .clkr = { .enable_reg = 0x1c050, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_stc_xo_clk", .parent_names = (const char *[]){ "ultaudio_xo_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static const struct freq_tbl ftbl_codec_clk[] = { F(9600000, P_XO, 2, 0, 0), F(12288000, P_XO, 1, 16, 25), F(19200000, P_XO, 1, 0, 0), F(11289600, P_EXT_MCLK, 1, 0, 0), { } }; static struct clk_rcg2 codec_digcodec_clk_src = { .cmd_rcgr = 0x1c09c, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll1_emclk_sleep_map, .freq_tbl = ftbl_codec_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "codec_digcodec_clk_src", .parent_names = gcc_xo_gpll1_emclk_sleep, .num_parents = 4, .ops = &clk_rcg2_ops, }, }; static struct clk_branch gcc_codec_digcodec_clk = { .halt_reg = 0x1c0b0, .clkr = { .enable_reg = 0x1c0b0, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_codec_digcodec_clk", .parent_names = (const char *[]){ "codec_digcodec_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = { .halt_reg = 0x1c000, .clkr = { .enable_reg = 0x1c000, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_pcnoc_mport_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = { .halt_reg = 0x1c004, .clkr = { .enable_reg = 0x1c004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ultaudio_pcnoc_sway_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = { F(100000000, P_GPLL0, 8, 0, 0), F(160000000, P_GPLL0, 5, 0, 0), F(228570000, P_GPLL0, 3.5, 0, 0), { } }; static struct clk_rcg2 vcodec0_clk_src = { .cmd_rcgr = 0x4C000, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, .freq_tbl = ftbl_gcc_venus0_vcodec0_clk, .clkr.hw.init = &(struct clk_init_data){ .name = "vcodec0_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, }; static struct clk_branch gcc_blsp1_ahb_clk = { .halt_reg = 0x01008, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x45004, .enable_mask = BIT(10), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_sleep_clk = { .halt_reg = 0x01004, .clkr = { .enable_reg = 0x01004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_sleep_clk", .parent_names = (const char *[]){ "sleep_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = { .halt_reg = 0x02008, .clkr = { .enable_reg = 0x02008, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup1_i2c_apps_clk", .parent_names = (const char *[]){ "blsp1_qup1_i2c_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = { .halt_reg = 0x02004, .clkr = { .enable_reg = 0x02004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup1_spi_apps_clk", .parent_names = (const char *[]){ "blsp1_qup1_spi_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = { .halt_reg = 0x03010, .clkr = { .enable_reg = 0x03010, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup2_i2c_apps_clk", .parent_names = (const char *[]){ "blsp1_qup2_i2c_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { .halt_reg = 0x0300c, .clkr = { .enable_reg = 0x0300c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup2_spi_apps_clk", .parent_names = (const char *[]){ "blsp1_qup2_spi_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = { .halt_reg = 0x04020, .clkr = { .enable_reg = 0x04020, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup3_i2c_apps_clk", .parent_names = (const char *[]){ "blsp1_qup3_i2c_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = { .halt_reg = 0x0401c, .clkr = { .enable_reg = 0x0401c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup3_spi_apps_clk", .parent_names = (const char *[]){ "blsp1_qup3_spi_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = { .halt_reg = 0x05020, .clkr = { .enable_reg = 0x05020, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup4_i2c_apps_clk", .parent_names = (const char *[]){ "blsp1_qup4_i2c_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = { .halt_reg = 0x0501c, .clkr = { .enable_reg = 0x0501c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup4_spi_apps_clk", .parent_names = (const char *[]){ "blsp1_qup4_spi_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = { .halt_reg = 0x06020, .clkr = { .enable_reg = 0x06020, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup5_i2c_apps_clk", .parent_names = (const char *[]){ "blsp1_qup5_i2c_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = { .halt_reg = 0x0601c, .clkr = { .enable_reg = 0x0601c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup5_spi_apps_clk", .parent_names = (const char *[]){ "blsp1_qup5_spi_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = { .halt_reg = 0x07020, .clkr = { .enable_reg = 0x07020, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup6_i2c_apps_clk", .parent_names = (const char *[]){ "blsp1_qup6_i2c_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = { .halt_reg = 0x0701c, .clkr = { .enable_reg = 0x0701c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_qup6_spi_apps_clk", .parent_names = (const char *[]){ "blsp1_qup6_spi_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_uart1_apps_clk = { .halt_reg = 0x0203c, .clkr = { .enable_reg = 0x0203c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_uart1_apps_clk", .parent_names = (const char *[]){ "blsp1_uart1_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_blsp1_uart2_apps_clk = { .halt_reg = 0x0302c, .clkr = { .enable_reg = 0x0302c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_uart2_apps_clk", .parent_names = (const char *[]){ "blsp1_uart2_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_boot_rom_ahb_clk = { .halt_reg = 0x1300c, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x45004, .enable_mask = BIT(7), .hw.init = &(struct clk_init_data){ .name = "gcc_boot_rom_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_cci_ahb_clk = { .halt_reg = 0x5101c, .clkr = { .enable_reg = 0x5101c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_cci_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_cci_clk = { .halt_reg = 0x51018, .clkr = { .enable_reg = 0x51018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_cci_clk", .parent_names = (const char *[]){ "cci_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi0_ahb_clk = { .halt_reg = 0x4e040, .clkr = { .enable_reg = 0x4e040, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi0_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi0_clk = { .halt_reg = 0x4e03c, .clkr = { .enable_reg = 0x4e03c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi0_clk", .parent_names = (const char *[]){ "csi0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi0phy_clk = { .halt_reg = 0x4e048, .clkr = { .enable_reg = 0x4e048, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi0phy_clk", .parent_names = (const char *[]){ "csi0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi0pix_clk = { .halt_reg = 0x4e058, .clkr = { .enable_reg = 0x4e058, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi0pix_clk", .parent_names = (const char *[]){ "csi0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi0rdi_clk = { .halt_reg = 0x4e050, .clkr = { .enable_reg = 0x4e050, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi0rdi_clk", .parent_names = (const char *[]){ "csi0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi1_ahb_clk = { .halt_reg = 0x4f040, .clkr = { .enable_reg = 0x4f040, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi1_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi1_clk = { .halt_reg = 0x4f03c, .clkr = { .enable_reg = 0x4f03c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi1_clk", .parent_names = (const char *[]){ "csi1_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi1phy_clk = { .halt_reg = 0x4f048, .clkr = { .enable_reg = 0x4f048, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi1phy_clk", .parent_names = (const char *[]){ "csi1_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi1pix_clk = { .halt_reg = 0x4f058, .clkr = { .enable_reg = 0x4f058, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi1pix_clk", .parent_names = (const char *[]){ "csi1_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi1rdi_clk = { .halt_reg = 0x4f050, .clkr = { .enable_reg = 0x4f050, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi1rdi_clk", .parent_names = (const char *[]){ "csi1_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi_vfe0_clk = { .halt_reg = 0x58050, .clkr = { .enable_reg = 0x58050, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi_vfe0_clk", .parent_names = (const char *[]){ "vfe0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_gp0_clk = { .halt_reg = 0x54018, .clkr = { .enable_reg = 0x54018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_gp0_clk", .parent_names = (const char *[]){ "camss_gp0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_gp1_clk = { .halt_reg = 0x55018, .clkr = { .enable_reg = 0x55018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_gp1_clk", .parent_names = (const char *[]){ "camss_gp1_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_ispif_ahb_clk = { .halt_reg = 0x50004, .clkr = { .enable_reg = 0x50004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_ispif_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_jpeg0_clk = { .halt_reg = 0x57020, .clkr = { .enable_reg = 0x57020, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_jpeg0_clk", .parent_names = (const char *[]){ "jpeg0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_jpeg_ahb_clk = { .halt_reg = 0x57024, .clkr = { .enable_reg = 0x57024, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_jpeg_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_jpeg_axi_clk = { .halt_reg = 0x57028, .clkr = { .enable_reg = 0x57028, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_jpeg_axi_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_mclk0_clk = { .halt_reg = 0x52018, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_mclk0_clk", .parent_names = (const char *[]){ "mclk0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_mclk1_clk = { .halt_reg = 0x53018, .clkr = { .enable_reg = 0x53018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_mclk1_clk", .parent_names = (const char *[]){ "mclk1_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_micro_ahb_clk = { .halt_reg = 0x5600c, .clkr = { .enable_reg = 0x5600c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_micro_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi0phytimer_clk = { .halt_reg = 0x4e01c, .clkr = { .enable_reg = 0x4e01c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi0phytimer_clk", .parent_names = (const char *[]){ "csi0phytimer_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_csi1phytimer_clk = { .halt_reg = 0x4f01c, .clkr = { .enable_reg = 0x4f01c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_csi1phytimer_clk", .parent_names = (const char *[]){ "csi1phytimer_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_ahb_clk = { .halt_reg = 0x5a014, .clkr = { .enable_reg = 0x5a014, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_top_ahb_clk = { .halt_reg = 0x56004, .clkr = { .enable_reg = 0x56004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_top_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_cpp_ahb_clk = { .halt_reg = 0x58040, .clkr = { .enable_reg = 0x58040, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_cpp_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_cpp_clk = { .halt_reg = 0x5803c, .clkr = { .enable_reg = 0x5803c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_cpp_clk", .parent_names = (const char *[]){ "cpp_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_vfe0_clk = { .halt_reg = 0x58038, .clkr = { .enable_reg = 0x58038, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_vfe0_clk", .parent_names = (const char *[]){ "vfe0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_vfe_ahb_clk = { .halt_reg = 0x58044, .clkr = { .enable_reg = 0x58044, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_vfe_ahb_clk", .parent_names = (const char *[]){ "camss_ahb_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_camss_vfe_axi_clk = { .halt_reg = 0x58048, .clkr = { .enable_reg = 0x58048, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_camss_vfe_axi_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_crypto_ahb_clk = { .halt_reg = 0x16024, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x45004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_crypto_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_crypto_axi_clk = { .halt_reg = 0x16020, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x45004, .enable_mask = BIT(1), .hw.init = &(struct clk_init_data){ .name = "gcc_crypto_axi_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_crypto_clk = { .halt_reg = 0x1601c, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x45004, .enable_mask = BIT(2), .hw.init = &(struct clk_init_data){ .name = "gcc_crypto_clk", .parent_names = (const char *[]){ "crypto_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_oxili_gmem_clk = { .halt_reg = 0x59024, .clkr = { .enable_reg = 0x59024, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_oxili_gmem_clk", .parent_names = (const char *[]){ "gfx3d_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_gp1_clk = { .halt_reg = 0x08000, .clkr = { .enable_reg = 0x08000, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_gp1_clk", .parent_names = (const char *[]){ "gp1_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_gp2_clk = { .halt_reg = 0x09000, .clkr = { .enable_reg = 0x09000, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_gp2_clk", .parent_names = (const char *[]){ "gp2_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_gp3_clk = { .halt_reg = 0x0a000, .clkr = { .enable_reg = 0x0a000, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_gp3_clk", .parent_names = (const char *[]){ "gp3_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdss_ahb_clk = { .halt_reg = 0x4d07c, .clkr = { .enable_reg = 0x4d07c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mdss_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdss_axi_clk = { .halt_reg = 0x4d080, .clkr = { .enable_reg = 0x4d080, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mdss_axi_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdss_byte0_clk = { .halt_reg = 0x4d094, .clkr = { .enable_reg = 0x4d094, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mdss_byte0_clk", .parent_names = (const char *[]){ "byte0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdss_esc0_clk = { .halt_reg = 0x4d098, .clkr = { .enable_reg = 0x4d098, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mdss_esc0_clk", .parent_names = (const char *[]){ "esc0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdss_mdp_clk = { .halt_reg = 0x4D088, .clkr = { .enable_reg = 0x4D088, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mdss_mdp_clk", .parent_names = (const char *[]){ "mdp_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdss_pclk0_clk = { .halt_reg = 0x4d084, .clkr = { .enable_reg = 0x4d084, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mdss_pclk0_clk", .parent_names = (const char *[]){ "pclk0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdss_vsync_clk = { .halt_reg = 0x4d090, .clkr = { .enable_reg = 0x4d090, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mdss_vsync_clk", .parent_names = (const char *[]){ "vsync_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mss_cfg_ahb_clk = { .halt_reg = 0x49000, .clkr = { .enable_reg = 0x49000, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_cfg_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mss_q6_bimc_axi_clk = { .halt_reg = 0x49004, .clkr = { .enable_reg = 0x49004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_q6_bimc_axi_clk", .parent_names = (const char *[]){ "bimc_ddr_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_oxili_ahb_clk = { .halt_reg = 0x59028, .clkr = { .enable_reg = 0x59028, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_oxili_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_oxili_gfx3d_clk = { .halt_reg = 0x59020, .clkr = { .enable_reg = 0x59020, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_oxili_gfx3d_clk", .parent_names = (const char *[]){ "gfx3d_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_pdm2_clk = { .halt_reg = 0x4400c, .clkr = { .enable_reg = 0x4400c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pdm2_clk", .parent_names = (const char *[]){ "pdm2_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_pdm_ahb_clk = { .halt_reg = 0x44004, .clkr = { .enable_reg = 0x44004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pdm_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_prng_ahb_clk = { .halt_reg = 0x13004, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x45004, .enable_mask = BIT(8), .hw.init = &(struct clk_init_data){ .name = "gcc_prng_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_sdcc1_ahb_clk = { .halt_reg = 0x4201c, .clkr = { .enable_reg = 0x4201c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc1_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_sdcc1_apps_clk = { .halt_reg = 0x42018, .clkr = { .enable_reg = 0x42018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc1_apps_clk", .parent_names = (const char *[]){ "sdcc1_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_sdcc2_ahb_clk = { .halt_reg = 0x4301c, .clkr = { .enable_reg = 0x4301c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc2_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_sdcc2_apps_clk = { .halt_reg = 0x43018, .clkr = { .enable_reg = 0x43018, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc2_apps_clk", .parent_names = (const char *[]){ "sdcc2_apps_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_rcg2 bimc_ddr_clk_src = { .cmd_rcgr = 0x32004, .hid_width = 5, .parent_map = gcc_xo_gpll0_bimc_map, .clkr.hw.init = &(struct clk_init_data){ .name = "bimc_ddr_clk_src", .parent_names = gcc_xo_gpll0_bimc, .num_parents = 3, .ops = &clk_rcg2_ops, .flags = CLK_GET_RATE_NOCACHE, }, }; static struct clk_branch gcc_apss_tcu_clk = { .halt_reg = 0x12018, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(1), .hw.init = &(struct clk_init_data){ .name = "gcc_apss_tcu_clk", .parent_names = (const char *[]){ "bimc_ddr_clk_src", }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_gfx_tcu_clk = { .halt_reg = 0x12020, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(2), .hw.init = &(struct clk_init_data){ .name = "gcc_gfx_tcu_clk", .parent_names = (const char *[]){ "bimc_ddr_clk_src", }, .num_parents = 1, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_gtcu_ahb_clk = { .halt_reg = 0x12044, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(13), .hw.init = &(struct clk_init_data){ .name = "gcc_gtcu_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_bimc_gfx_clk = { .halt_reg = 0x31024, .clkr = { .enable_reg = 0x31024, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_bimc_gfx_clk", .parent_names = (const char *[]){ "bimc_gpu_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_bimc_gpu_clk = { .halt_reg = 0x31040, .clkr = { .enable_reg = 0x31040, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_bimc_gpu_clk", .parent_names = (const char *[]){ "bimc_gpu_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_jpeg_tbu_clk = { .halt_reg = 0x12034, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(10), .hw.init = &(struct clk_init_data){ .name = "gcc_jpeg_tbu_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_mdp_tbu_clk = { .halt_reg = 0x1201c, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(4), .hw.init = &(struct clk_init_data){ .name = "gcc_mdp_tbu_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_smmu_cfg_clk = { .halt_reg = 0x12038, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(12), .hw.init = &(struct clk_init_data){ .name = "gcc_smmu_cfg_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_venus_tbu_clk = { .halt_reg = 0x12014, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(5), .hw.init = &(struct clk_init_data){ .name = "gcc_venus_tbu_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_vfe_tbu_clk = { .halt_reg = 0x1203c, .clkr = { .enable_reg = 0x4500c, .enable_mask = BIT(9), .hw.init = &(struct clk_init_data){ .name = "gcc_vfe_tbu_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_usb2a_phy_sleep_clk = { .halt_reg = 0x4102c, .clkr = { .enable_reg = 0x4102c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_usb2a_phy_sleep_clk", .parent_names = (const char *[]){ "sleep_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_usb_hs_ahb_clk = { .halt_reg = 0x41008, .clkr = { .enable_reg = 0x41008, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_usb_hs_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_usb_hs_system_clk = { .halt_reg = 0x41004, .clkr = { .enable_reg = 0x41004, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_usb_hs_system_clk", .parent_names = (const char *[]){ "usb_hs_system_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_venus0_ahb_clk = { .halt_reg = 0x4c020, .clkr = { .enable_reg = 0x4c020, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_venus0_ahb_clk", .parent_names = (const char *[]){ "pcnoc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_venus0_axi_clk = { .halt_reg = 0x4c024, .clkr = { .enable_reg = 0x4c024, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_venus0_axi_clk", .parent_names = (const char *[]){ "system_noc_bfdcd_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch gcc_venus0_vcodec0_clk = { .halt_reg = 0x4c01c, .clkr = { .enable_reg = 0x4c01c, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_venus0_vcodec0_clk", .parent_names = (const char *[]){ "vcodec0_clk_src", }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct gdsc venus_gdsc = { .gdscr = 0x4c018, .pd = { .name = "venus", }, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc mdss_gdsc = { .gdscr = 0x4d078, .pd = { .name = "mdss", }, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc jpeg_gdsc = { .gdscr = 0x5701c, .pd = { .name = "jpeg", }, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc vfe_gdsc = { .gdscr = 0x58034, .pd = { .name = "vfe", }, .pwrsts = PWRSTS_OFF_ON, }; static struct gdsc oxili_gdsc = { .gdscr = 0x5901c, .pd = { .name = "oxili", }, .pwrsts = PWRSTS_OFF_ON, }; static struct clk_regmap *gcc_msm8916_clocks[] = { [GPLL0] = &gpll0.clkr, [GPLL0_VOTE] = &gpll0_vote, [BIMC_PLL] = &bimc_pll.clkr, [BIMC_PLL_VOTE] = &bimc_pll_vote, [GPLL1] = &gpll1.clkr, [GPLL1_VOTE] = &gpll1_vote, [GPLL2] = &gpll2.clkr, [GPLL2_VOTE] = &gpll2_vote, [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr, [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr, [CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr, [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr, [CSI0_CLK_SRC] = &csi0_clk_src.clkr, [CSI1_CLK_SRC] = &csi1_clk_src.clkr, [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr, [VFE0_CLK_SRC] = &vfe0_clk_src.clkr, [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr, [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr, [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr, [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr, [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr, [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr, [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr, [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr, [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr, [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr, [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr, [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr, [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr, [CCI_CLK_SRC] = &cci_clk_src.clkr, [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr, [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr, [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr, [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr, [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr, [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr, [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr, [CPP_CLK_SRC] = &cpp_clk_src.clkr, [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr, [GP1_CLK_SRC] = &gp1_clk_src.clkr, [GP2_CLK_SRC] = &gp2_clk_src.clkr, [GP3_CLK_SRC] = &gp3_clk_src.clkr, [BYTE0_CLK_SRC] = &byte0_clk_src.clkr, [ESC0_CLK_SRC] = &esc0_clk_src.clkr, [MDP_CLK_SRC] = &mdp_clk_src.clkr, [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr, [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, [PDM2_CLK_SRC] = &pdm2_clk_src.clkr, [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr, [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr, [APSS_TCU_CLK_SRC] = &apss_tcu_clk_src.clkr, [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr, [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr, [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr, [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr, [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr, [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr, [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr, [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr, [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr, [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr, [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr, [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr, [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr, [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr, [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr, [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr, [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr, [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr, [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr, [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr, [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr, [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr, [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr, [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr, [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr, [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr, [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr, [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr, [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr, [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr, [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr, [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr, [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr, [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr, [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr, [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr, [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr, [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr, [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr, [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr, [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr, [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr, [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr, [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr, [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr, [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr, [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr, [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr, [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr, [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr, [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr, [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr, [GCC_OXILI_GMEM_CLK] = &gcc_oxili_gmem_clk.clkr, [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr, [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr, [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr, [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr, [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr, [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr, [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr, [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr, [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr, [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr, [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr, [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr, [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr, [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr, [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr, [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr, [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr, [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr, [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr, [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr, [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr, [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr, [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr, [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr, [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr, [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr, [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr, [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr, [ULTAUDIO_AHBFABRIC_CLK_SRC] = &ultaudio_ahbfabric_clk_src.clkr, [ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC] = &ultaudio_lpaif_pri_i2s_clk_src.clkr, [ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC] = &ultaudio_lpaif_sec_i2s_clk_src.clkr, [ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC] = &ultaudio_lpaif_aux_i2s_clk_src.clkr, [ULTAUDIO_XO_CLK_SRC] = &ultaudio_xo_clk_src.clkr, [CODEC_DIGCODEC_CLK_SRC] = &codec_digcodec_clk_src.clkr, [GCC_ULTAUDIO_PCNOC_MPORT_CLK] = &gcc_ultaudio_pcnoc_mport_clk.clkr, [GCC_ULTAUDIO_PCNOC_SWAY_CLK] = &gcc_ultaudio_pcnoc_sway_clk.clkr, [GCC_ULTAUDIO_AVSYNC_XO_CLK] = &gcc_ultaudio_avsync_xo_clk.clkr, [GCC_ULTAUDIO_STC_XO_CLK] = &gcc_ultaudio_stc_xo_clk.clkr, [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_clk.clkr, [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_lpm_clk.clkr, [GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK] = &gcc_ultaudio_lpaif_pri_i2s_clk.clkr, [GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr, [GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr, [GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr, [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr, }; static struct gdsc *gcc_msm8916_gdscs[] = { [VENUS_GDSC] = &venus_gdsc, [MDSS_GDSC] = &mdss_gdsc, [JPEG_GDSC] = &jpeg_gdsc, [VFE_GDSC] = &vfe_gdsc, [OXILI_GDSC] = &oxili_gdsc, }; static const struct qcom_reset_map gcc_msm8916_resets[] = { [GCC_BLSP1_BCR] = { 0x01000 }, [GCC_BLSP1_QUP1_BCR] = { 0x02000 }, [GCC_BLSP1_UART1_BCR] = { 0x02038 }, [GCC_BLSP1_QUP2_BCR] = { 0x03008 }, [GCC_BLSP1_UART2_BCR] = { 0x03028 }, [GCC_BLSP1_QUP3_BCR] = { 0x04018 }, [GCC_BLSP1_QUP4_BCR] = { 0x05018 }, [GCC_BLSP1_QUP5_BCR] = { 0x06018 }, [GCC_BLSP1_QUP6_BCR] = { 0x07018 }, [GCC_IMEM_BCR] = { 0x0e000 }, [GCC_SMMU_BCR] = { 0x12000 }, [GCC_APSS_TCU_BCR] = { 0x12050 }, [GCC_SMMU_XPU_BCR] = { 0x12054 }, [GCC_PCNOC_TBU_BCR] = { 0x12058 }, [GCC_PRNG_BCR] = { 0x13000 }, [GCC_BOOT_ROM_BCR] = { 0x13008 }, [GCC_CRYPTO_BCR] = { 0x16000 }, [GCC_SEC_CTRL_BCR] = { 0x1a000 }, [GCC_AUDIO_CORE_BCR] = { 0x1c008 }, [GCC_ULT_AUDIO_BCR] = { 0x1c0b4 }, [GCC_DEHR_BCR] = { 0x1f000 }, [GCC_SYSTEM_NOC_BCR] = { 0x26000 }, [GCC_PCNOC_BCR] = { 0x27018 }, [GCC_TCSR_BCR] = { 0x28000 }, [GCC_QDSS_BCR] = { 0x29000 }, [GCC_DCD_BCR] = { 0x2a000 }, [GCC_MSG_RAM_BCR] = { 0x2b000 }, [GCC_MPM_BCR] = { 0x2c000 }, [GCC_SPMI_BCR] = { 0x2e000 }, [GCC_SPDM_BCR] = { 0x2f000 }, [GCC_MM_SPDM_BCR] = { 0x2f024 }, [GCC_BIMC_BCR] = { 0x31000 }, [GCC_RBCPR_BCR] = { 0x33000 }, [GCC_TLMM_BCR] = { 0x34000 }, [GCC_USB_HS_BCR] = { 0x41000 }, [GCC_USB2A_PHY_BCR] = { 0x41028 }, [GCC_SDCC1_BCR] = { 0x42000 }, [GCC_SDCC2_BCR] = { 0x43000 }, [GCC_PDM_BCR] = { 0x44000 }, [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000 }, [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000 }, [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008 }, [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010 }, [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018 }, [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020 }, [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028 }, [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030 }, [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038 }, [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040 }, [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048 }, [GCC_MMSS_BCR] = { 0x4b000 }, [GCC_VENUS0_BCR] = { 0x4c014 }, [GCC_MDSS_BCR] = { 0x4d074 }, [GCC_CAMSS_PHY0_BCR] = { 0x4e018 }, [GCC_CAMSS_CSI0_BCR] = { 0x4e038 }, [GCC_CAMSS_CSI0PHY_BCR] = { 0x4e044 }, [GCC_CAMSS_CSI0RDI_BCR] = { 0x4e04c }, [GCC_CAMSS_CSI0PIX_BCR] = { 0x4e054 }, [GCC_CAMSS_PHY1_BCR] = { 0x4f018 }, [GCC_CAMSS_CSI1_BCR] = { 0x4f038 }, [GCC_CAMSS_CSI1PHY_BCR] = { 0x4f044 }, [GCC_CAMSS_CSI1RDI_BCR] = { 0x4f04c }, [GCC_CAMSS_CSI1PIX_BCR] = { 0x4f054 }, [GCC_CAMSS_ISPIF_BCR] = { 0x50000 }, [GCC_CAMSS_CCI_BCR] = { 0x51014 }, [GCC_CAMSS_MCLK0_BCR] = { 0x52014 }, [GCC_CAMSS_MCLK1_BCR] = { 0x53014 }, [GCC_CAMSS_GP0_BCR] = { 0x54014 }, [GCC_CAMSS_GP1_BCR] = { 0x55014 }, [GCC_CAMSS_TOP_BCR] = { 0x56000 }, [GCC_CAMSS_MICRO_BCR] = { 0x56008 }, [GCC_CAMSS_JPEG_BCR] = { 0x57018 }, [GCC_CAMSS_VFE_BCR] = { 0x58030 }, [GCC_CAMSS_CSI_VFE0_BCR] = { 0x5804c }, [GCC_OXILI_BCR] = { 0x59018 }, [GCC_GMEM_BCR] = { 0x5902c }, [GCC_CAMSS_AHB_BCR] = { 0x5a018 }, [GCC_MDP_TBU_BCR] = { 0x62000 }, [GCC_GFX_TBU_BCR] = { 0x63000 }, [GCC_GFX_TCU_BCR] = { 0x64000 }, [GCC_MSS_TBU_AXI_BCR] = { 0x65000 }, [GCC_MSS_TBU_GSS_AXI_BCR] = { 0x66000 }, [GCC_MSS_TBU_Q6_AXI_BCR] = { 0x67000 }, [GCC_GTCU_AHB_BCR] = { 0x68000 }, [GCC_SMMU_CFG_BCR] = { 0x69000 }, [GCC_VFE_TBU_BCR] = { 0x6a000 }, [GCC_VENUS_TBU_BCR] = { 0x6b000 }, [GCC_JPEG_TBU_BCR] = { 0x6c000 }, [GCC_PRONTO_TBU_BCR] = { 0x6d000 }, [GCC_SMMU_CATS_BCR] = { 0x7c000 }, }; static const struct regmap_config gcc_msm8916_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x80000, .fast_io = true, }; static const struct qcom_cc_desc gcc_msm8916_desc = { .config = &gcc_msm8916_regmap_config, .clks = gcc_msm8916_clocks, .num_clks = ARRAY_SIZE(gcc_msm8916_clocks), .resets = gcc_msm8916_resets, .num_resets = ARRAY_SIZE(gcc_msm8916_resets), .gdscs = gcc_msm8916_gdscs, .num_gdscs = ARRAY_SIZE(gcc_msm8916_gdscs), }; static const struct of_device_id gcc_msm8916_match_table[] = { { .compatible = "qcom,gcc-msm8916" }, { } }; MODULE_DEVICE_TABLE(of, gcc_msm8916_match_table); static int gcc_msm8916_probe(struct platform_device *pdev) { int ret; struct device *dev = &pdev->dev; ret = qcom_cc_register_board_clk(dev, "xo_board", "xo", 19200000); if (ret) return ret; ret = qcom_cc_register_sleep_clk(dev); if (ret) return ret; return qcom_cc_probe(pdev, &gcc_msm8916_desc); } static struct platform_driver gcc_msm8916_driver = { .probe = gcc_msm8916_probe, .driver = { .name = "gcc-msm8916", .of_match_table = gcc_msm8916_match_table, }, }; static int __init gcc_msm8916_init(void) { return platform_driver_register(&gcc_msm8916_driver); } core_initcall(gcc_msm8916_init); static void __exit gcc_msm8916_exit(void) { platform_driver_unregister(&gcc_msm8916_driver); } module_exit(gcc_msm8916_exit); MODULE_DESCRIPTION("Qualcomm GCC MSM8916 Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:gcc-msm8916");
{ "pile_set_name": "Github" }
// // NSAttributeDescription+MagicalDataImport.h // Magical Record // // Created by Saul Mora on 9/4/11. // Copyright 2011 Magical Panda Software LLC. All rights reserved. // #import <CoreData/CoreData.h> @interface NSAttributeDescription (MagicalRecord_DataImport) - (NSString *) MR_primaryKey; - (id) MR_valueForKeyPath:(NSString *)keyPath fromObjectData:(id)objectData; @end
{ "pile_set_name": "Github" }
#import "Expecta.h" EXPMatcherInterface(_beGreaterThan, (id expected)); EXPMatcherInterface(beGreaterThan, (id expected)); #define beGreaterThan(expected) _beGreaterThan(EXPObjectify((expected)))
{ "pile_set_name": "Github" }
# # iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1 # # Written by Hye-Shik Chang <[email protected]> # import _codecs_iso2022, codecs import _multibytecodec as mbc codec = _codecs_iso2022.getcodec('iso2022_jp_1') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='iso2022_jp_1', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
{ "pile_set_name": "Github" }
{ "images" : [ { "idiom" : "universal", "scale" : "1x" }, { "idiom" : "universal", "filename" : "style16.jpg", "scale" : "2x" }, { "idiom" : "universal", "scale" : "3x" } ], "info" : { "version" : 1, "author" : "xcode" } }
{ "pile_set_name": "Github" }
<?php /** * Copyright © Magento, Inc. All rights reserved. * See COPYING.txt for license details. */ namespace Magento\Setup\Console\Command; use Magento\Framework\ObjectManagerInterface; use Magento\Setup\Model\ObjectManagerProvider; use Symfony\Component\Console\Input\InputArgument; use Symfony\Component\Console\Input\InputInterface; use Symfony\Component\Console\Input\InputOption; use Symfony\Component\Console\Output\OutputInterface; /** * Abstract class for Enable and Disable commands to consolidate common logic */ abstract class AbstractModuleCommand extends AbstractSetupCommand { /** * Names of input arguments or options */ const INPUT_KEY_MODULES = 'module'; const INPUT_KEY_CLEAR_STATIC_CONTENT = 'clear-static-content'; /** * Object manager * * @var ObjectManagerInterface */ protected $objectManager; /** * Inject dependencies * * @param ObjectManagerProvider $objectManagerProvider */ public function __construct(ObjectManagerProvider $objectManagerProvider) { $this->objectManager = $objectManagerProvider->get(); parent::__construct(); } /** * {@inheritdoc} */ protected function configure() { $this->addArgument( self::INPUT_KEY_MODULES, InputArgument::IS_ARRAY | ($this->isModuleRequired() ? InputArgument::REQUIRED : InputArgument::OPTIONAL), 'Name of the module' ); $this->addOption( self::INPUT_KEY_CLEAR_STATIC_CONTENT, 'c', InputOption::VALUE_NONE, 'Clear generated static view files. Necessary, if the module(s) have static view files' ); parent::configure(); } /** * Returns if module argument is required * * @return bool */ abstract protected function isModuleRequired(); /** * Cleanup after updated modules status * * @param InputInterface $input * @param OutputInterface $output * @return void */ protected function cleanup(InputInterface $input, OutputInterface $output) { /** @var \Magento\Framework\App\Cache $cache */ $cache = $this->objectManager->get(\Magento\Framework\App\Cache::class); $cache->clean(); $output->writeln('<info>Cache cleared successfully.</info>'); /** @var \Magento\Framework\App\State\CleanupFiles $cleanupFiles */ $cleanupFiles = $this->objectManager->get(\Magento\Framework\App\State\CleanupFiles::class); $cleanupFiles->clearCodeGeneratedClasses(); $output->writeln( "<info>Generated classes cleared successfully. Please run the 'setup:di:compile' command to " . 'generate classes.</info>' ); if ($input->getOption(self::INPUT_KEY_CLEAR_STATIC_CONTENT)) { $cleanupFiles->clearMaterializedViewFiles(); $output->writeln('<info>Generated static view files cleared successfully.</info>'); } else { $output->writeln( "<info>Info: Some modules might require static view files to be cleared. To do this, run '" . $this->getName() . "' with the --" . self::INPUT_KEY_CLEAR_STATIC_CONTENT . ' option to clear them.</info>' ); } } }
{ "pile_set_name": "Github" }
# Autodetecting setup.py script for building the Python extensions # __version__ = "$Revision$" import sys, os, imp, re, optparse from glob import glob from platform import machine as platform_machine import sysconfig from distutils import log from distutils import text_file from distutils.errors import * from distutils.core import Extension, setup from distutils.command.build_ext import build_ext from distutils.command.install import install from distutils.command.install_lib import install_lib from distutils.spawn import find_executable cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ def get_platform(): # cross build if "_PYTHON_HOST_PLATFORM" in os.environ: return os.environ["_PYTHON_HOST_PLATFORM"] # Get value of sys.platform if sys.platform.startswith('osf1'): return 'osf1' return sys.platform host_platform = get_platform() # Were we compiled --with-pydebug or with #define Py_DEBUG? COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS")) # This global variable is used to hold the list of modules to be disabled. disabled_module_list = [] def add_dir_to_list(dirlist, dir): """Add the directory 'dir' to the list 'dirlist' (at the front) if 1) 'dir' is not already in 'dirlist' 2) 'dir' actually exists, and is a directory.""" if dir is not None and os.path.isdir(dir) and dir not in dirlist: dirlist.insert(0, dir) def macosx_sdk_root(): """ Return the directory of the current OSX SDK, or '/' if no SDK was specified. """ cflags = sysconfig.get_config_var('CFLAGS') m = re.search(r'-isysroot\s+(\S+)', cflags) if m is None: sysroot = '/' else: sysroot = m.group(1) return sysroot def is_macosx_sdk_path(path): """ Returns True if 'path' can be located in an OSX SDK """ return ( (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/') or path.startswith('/Library/') ) def find_file(filename, std_dirs, paths): """Searches for the directory where a given file is located, and returns a possibly-empty list of additional directories, or None if the file couldn't be found at all. 'filename' is the name of a file, such as readline.h or libcrypto.a. 'std_dirs' is the list of standard system directories; if the file is found in one of them, no additional directives are needed. 'paths' is a list of additional locations to check; if the file is found in one of them, the resulting list will contain the directory. """ if host_platform == 'darwin': # Honor the MacOSX SDK setting when one was specified. # An SDK is a directory with the same structure as a real # system, but with only header files and libraries. sysroot = macosx_sdk_root() # Check the standard locations for dir in std_dirs: f = os.path.join(dir, filename) if host_platform == 'darwin' and is_macosx_sdk_path(dir): f = os.path.join(sysroot, dir[1:], filename) if os.path.exists(f): return [] # Check the additional directories for dir in paths: f = os.path.join(dir, filename) if host_platform == 'darwin' and is_macosx_sdk_path(dir): f = os.path.join(sysroot, dir[1:], filename) if os.path.exists(f): return [dir] # Not found anywhere return None def find_library_file(compiler, libname, std_dirs, paths): result = compiler.find_library_file(std_dirs + paths, libname) if result is None: return None if host_platform == 'darwin': sysroot = macosx_sdk_root() # Check whether the found file is in one of the standard directories dirname = os.path.dirname(result) for p in std_dirs: # Ensure path doesn't end with path separator p = p.rstrip(os.sep) if host_platform == 'darwin' and is_macosx_sdk_path(p): # Note that, as of Xcode 7, Apple SDKs may contain textual stub # libraries with .tbd extensions rather than the normal .dylib # shared libraries installed in /. The Apple compiler tool # chain handles this transparently but it can cause problems # for programs that are being built with an SDK and searching # for specific libraries. Distutils find_library_file() now # knows to also search for and return .tbd files. But callers # of find_library_file need to keep in mind that the base filename # of the returned SDK library file might have a different extension # from that of the library file installed on the running system, # for example: # /Applications/Xcode.app/Contents/Developer/Platforms/ # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/ # usr/lib/libedit.tbd # vs # /usr/lib/libedit.dylib if os.path.join(sysroot, p[1:]) == dirname: return [ ] if p == dirname: return [ ] # Otherwise, it must have been in one of the additional directories, # so we have to figure out which one. for p in paths: # Ensure path doesn't end with path separator p = p.rstrip(os.sep) if host_platform == 'darwin' and is_macosx_sdk_path(p): if os.path.join(sysroot, p[1:]) == dirname: return [ p ] if p == dirname: return [p] else: assert False, "Internal error: Path not found in std_dirs or paths" def module_enabled(extlist, modname): """Returns whether the module 'modname' is present in the list of extensions 'extlist'.""" extlist = [ext for ext in extlist if ext.name == modname] return len(extlist) def find_module_file(module, dirlist): """Find a module in a set of possible folders. If it is not found return the unadorned filename""" list = find_file(module, [], dirlist) if not list: return module if len(list) > 1: log.info("WARNING: multiple copies of %s found"%module) return os.path.join(list[0], module) class PyBuildExt(build_ext): def __init__(self, dist): build_ext.__init__(self, dist) self.failed = [] def build_extensions(self): # Detect which modules should be compiled missing = self.detect_modules() # Remove modules that are present on the disabled list extensions = [ext for ext in self.extensions if ext.name not in disabled_module_list] # move ctypes to the end, it depends on other modules ext_map = dict((ext.name, i) for i, ext in enumerate(extensions)) if "_ctypes" in ext_map: ctypes = extensions.pop(ext_map["_ctypes"]) extensions.append(ctypes) self.extensions = extensions # Fix up the autodetected modules, prefixing all the source files # with Modules/ and adding Python's include directory to the path. (srcdir,) = sysconfig.get_config_vars('srcdir') if not srcdir: # Maybe running on Windows but not using CYGWIN? raise ValueError("No source directory; cannot proceed.") srcdir = os.path.abspath(srcdir) moddirlist = [os.path.join(srcdir, 'Modules')] # Platform-dependent module source and include directories incdirlist = [] if host_platform == 'darwin' and ("--disable-toolbox-glue" not in sysconfig.get_config_var("CONFIG_ARGS")): # Mac OS X also includes some mac-specific modules macmoddir = os.path.join(srcdir, 'Mac/Modules') moddirlist.append(macmoddir) incdirlist.append(os.path.join(srcdir, 'Mac/Include')) # Fix up the paths for scripts, too self.distribution.scripts = [os.path.join(srcdir, filename) for filename in self.distribution.scripts] # Python header files headers = [sysconfig.get_config_h_filename()] headers += glob(os.path.join(sysconfig.get_path('include'), "*.h")) for ext in self.extensions[:]: ext.sources = [ find_module_file(filename, moddirlist) for filename in ext.sources ] if ext.depends is not None: ext.depends = [find_module_file(filename, moddirlist) for filename in ext.depends] else: ext.depends = [] # re-compile extensions if a header file has been changed ext.depends.extend(headers) # platform specific include directories ext.include_dirs.extend(incdirlist) # If a module has already been built statically, # don't build it here if ext.name in sys.builtin_module_names: self.extensions.remove(ext) # Parse Modules/Setup and Modules/Setup.local to figure out which # modules are turned on in the file. remove_modules = [] for filename in ('Modules/Setup', 'Modules/Setup.local'): input = text_file.TextFile(filename, join_lines=1) while 1: line = input.readline() if not line: break line = line.split() remove_modules.append(line[0]) input.close() for ext in self.extensions[:]: if ext.name in remove_modules: self.extensions.remove(ext) # When you run "make CC=altcc" or something similar, you really want # those environment variables passed into the setup.py phase. Here's # a small set of useful ones. compiler = os.environ.get('CC') args = {} # unfortunately, distutils doesn't let us provide separate C and C++ # compilers if compiler is not None: (ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS') args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags self.compiler.set_executables(**args) build_ext.build_extensions(self) longest = 0 if self.extensions: longest = max([len(e.name) for e in self.extensions]) if self.failed: longest = max(longest, max([len(name) for name in self.failed])) def print_three_column(lst): lst.sort(key=str.lower) # guarantee zip() doesn't drop anything while len(lst) % 3: lst.append("") for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]): print "%-*s %-*s %-*s" % (longest, e, longest, f, longest, g) if missing: print print ("Python build finished, but the necessary bits to build " "these modules were not found:") print_three_column(missing) print ("To find the necessary bits, look in setup.py in" " detect_modules() for the module's name.") print if self.failed: failed = self.failed[:] print print "Failed to build these modules:" print_three_column(failed) print def build_extension(self, ext): if ext.name == '_ctypes': if not self.configure_ctypes(ext): return try: build_ext.build_extension(self, ext) except (CCompilerError, DistutilsError), why: self.announce('WARNING: building of extension "%s" failed: %s' % (ext.name, sys.exc_info()[1])) self.failed.append(ext.name) return # Workaround for Mac OS X: The Carbon-based modules cannot be # reliably imported into a command-line Python if 'Carbon' in ext.extra_link_args: self.announce( 'WARNING: skipping import check for Carbon-based "%s"' % ext.name) return if host_platform == 'darwin' and ( sys.maxint > 2**32 and '-arch' in ext.extra_link_args): # Don't bother doing an import check when an extension was # build with an explicit '-arch' flag on OSX. That's currently # only used to build 32-bit only extensions in a 4-way # universal build and loading 32-bit code into a 64-bit # process will fail. self.announce( 'WARNING: skipping import check for "%s"' % ext.name) return # Workaround for Cygwin: Cygwin currently has fork issues when many # modules have been imported if host_platform == 'cygwin': self.announce('WARNING: skipping import check for Cygwin-based "%s"' % ext.name) return ext_filename = os.path.join( self.build_lib, self.get_ext_filename(self.get_ext_fullname(ext.name))) # Don't try to load extensions for cross builds if cross_compiling: return try: imp.load_dynamic(ext.name, ext_filename) except ImportError, why: self.failed.append(ext.name) self.announce('*** WARNING: renaming "%s" since importing it' ' failed: %s' % (ext.name, why), level=3) assert not self.inplace basename, tail = os.path.splitext(ext_filename) newname = basename + "_failed" + tail if os.path.exists(newname): os.remove(newname) os.rename(ext_filename, newname) # XXX -- This relies on a Vile HACK in # distutils.command.build_ext.build_extension(). The # _built_objects attribute is stored there strictly for # use here. # If there is a failure, _built_objects may not be there, # so catch the AttributeError and move on. try: for filename in self._built_objects: os.remove(filename) except AttributeError: self.announce('unable to remove files (ignored)') except: exc_type, why, tb = sys.exc_info() self.announce('*** WARNING: importing extension "%s" ' 'failed with %s: %s' % (ext.name, exc_type, why), level=3) self.failed.append(ext.name) def add_multiarch_paths(self): # Debian/Ubuntu multiarch support. # https://wiki.ubuntu.com/MultiarchSpec cc = sysconfig.get_config_var('CC') tmpfile = os.path.join(self.build_temp, 'multiarch') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) ret = os.system( '%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile)) multiarch_path_component = '' try: if ret >> 8 == 0: with open(tmpfile) as fp: multiarch_path_component = fp.readline().strip() finally: os.unlink(tmpfile) if multiarch_path_component != '': add_dir_to_list(self.compiler.library_dirs, '/usr/lib/' + multiarch_path_component) add_dir_to_list(self.compiler.include_dirs, '/usr/include/' + multiarch_path_component) return if not find_executable('dpkg-architecture'): return opt = '' if cross_compiling: opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE') tmpfile = os.path.join(self.build_temp, 'multiarch') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) ret = os.system( 'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' % (opt, tmpfile)) try: if ret >> 8 == 0: with open(tmpfile) as fp: multiarch_path_component = fp.readline().strip() add_dir_to_list(self.compiler.library_dirs, '/usr/lib/' + multiarch_path_component) add_dir_to_list(self.compiler.include_dirs, '/usr/include/' + multiarch_path_component) finally: os.unlink(tmpfile) def add_gcc_paths(self): gcc = sysconfig.get_config_var('CC') tmpfile = os.path.join(self.build_temp, 'gccpaths') if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile)) is_gcc = False in_incdirs = False inc_dirs = [] lib_dirs = [] try: if ret >> 8 == 0: with open(tmpfile) as fp: for line in fp.readlines(): if line.startswith("gcc version"): is_gcc = True elif line.startswith("#include <...>"): in_incdirs = True elif line.startswith("End of search list"): in_incdirs = False elif is_gcc and line.startswith("LIBRARY_PATH"): for d in line.strip().split("=")[1].split(":"): d = os.path.normpath(d) if '/gcc/' not in d: add_dir_to_list(self.compiler.library_dirs, d) elif is_gcc and in_incdirs and '/gcc/' not in line: add_dir_to_list(self.compiler.include_dirs, line.strip()) finally: os.unlink(tmpfile) def detect_modules(self): # Ensure that /usr/local is always used if not cross_compiling: add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') if cross_compiling: self.add_gcc_paths() self.add_multiarch_paths() # Add paths specified in the environment variables LDFLAGS and # CPPFLAGS for header and library files. # We must get the values from the Makefile and not the environment # directly since an inconsistently reproducible issue comes up where # the environment variable is not set even though the value were passed # into configure and stored in the Makefile (issue found on OS X 10.3). for env_var, arg_name, dir_list in ( ('LDFLAGS', '-R', self.compiler.runtime_library_dirs), ('LDFLAGS', '-L', self.compiler.library_dirs), ('CPPFLAGS', '-I', self.compiler.include_dirs)): env_val = sysconfig.get_config_var(env_var) if env_val: # To prevent optparse from raising an exception about any # options in env_val that it doesn't know about we strip out # all double dashes and any dashes followed by a character # that is not for the option we are dealing with. # # Please note that order of the regex is important! We must # strip out double-dashes first so that we don't end up with # substituting "--Long" to "-Long" and thus lead to "ong" being # used for a library directory. env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1], ' ', env_val) parser = optparse.OptionParser() # Make sure that allowing args interspersed with options is # allowed parser.allow_interspersed_args = True parser.error = lambda msg: None parser.add_option(arg_name, dest="dirs", action="append") options = parser.parse_args(env_val.split())[0] if options.dirs: for directory in reversed(options.dirs): add_dir_to_list(dir_list, directory) if os.path.normpath(sys.prefix) != '/usr' \ and not sysconfig.get_config_var('PYTHONFRAMEWORK'): # OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework # (PYTHONFRAMEWORK is set) to avoid # linking problems when # building a framework with different architectures than # the one that is currently installed (issue #7473) add_dir_to_list(self.compiler.library_dirs, sysconfig.get_config_var("LIBDIR")) add_dir_to_list(self.compiler.include_dirs, sysconfig.get_config_var("INCLUDEDIR")) try: have_unicode = unicode except NameError: have_unicode = 0 # lib_dirs and inc_dirs are used to search for files; # if a file is found in one of those directories, it can # be assumed that no additional -I,-L directives are needed. inc_dirs = self.compiler.include_dirs[:] lib_dirs = self.compiler.library_dirs[:] if not cross_compiling: for d in ( '/usr/include', ): add_dir_to_list(inc_dirs, d) for d in ( '/lib64', '/usr/lib64', '/lib', '/usr/lib', ): add_dir_to_list(lib_dirs, d) exts = [] missing = [] config_h = sysconfig.get_config_h_filename() config_h_vars = sysconfig.parse_config_h(open(config_h)) srcdir = sysconfig.get_config_var('srcdir') # Check for AtheOS which has libraries in non-standard locations if host_platform == 'atheos': lib_dirs += ['/system/libs', '/atheos/autolnk/lib'] lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep) inc_dirs += ['/system/include', '/atheos/autolnk/include'] inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep) # OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb) if host_platform in ['osf1', 'unixware7', 'openunix8']: lib_dirs += ['/usr/ccs/lib'] # HP-UX11iv3 keeps files in lib/hpux folders. if host_platform == 'hp-ux11': lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32'] if host_platform == 'darwin': # This should work on any unixy platform ;-) # If the user has bothered specifying additional -I and -L flags # in OPT and LDFLAGS we might as well use them here. # NOTE: using shlex.split would technically be more correct, but # also gives a bootstrap problem. Let's hope nobody uses directories # with whitespace in the name to store libraries. cflags, ldflags = sysconfig.get_config_vars( 'CFLAGS', 'LDFLAGS') for item in cflags.split(): if item.startswith('-I'): inc_dirs.append(item[2:]) for item in ldflags.split(): if item.startswith('-L'): lib_dirs.append(item[2:]) # Check for MacOS X, which doesn't need libm.a at all math_libs = ['m'] if host_platform in ['darwin', 'beos']: math_libs = [] # XXX Omitted modules: gl, pure, dl, SGI-specific modules # # The following modules are all pretty straightforward, and compile # on pretty much any POSIXish platform. # # Some modules that are normally always on: #exts.append( Extension('_weakref', ['_weakref.c']) ) # array objects exts.append( Extension('array', ['arraymodule.c']) ) shared_math = 'Modules/_math.o' # complex math library functions exts.append( Extension('cmath', ['cmathmodule.c'], extra_objects=[shared_math], depends=['_math.h', shared_math], libraries=math_libs) ) # math library functions, e.g. sin() exts.append( Extension('math', ['mathmodule.c'], extra_objects=[shared_math], depends=['_math.h', shared_math], libraries=math_libs) ) # fast string operations implemented in C exts.append( Extension('strop', ['stropmodule.c']) ) # time operations and variables exts.append( Extension('time', ['timemodule.c'], libraries=math_libs) ) exts.append( Extension('datetime', ['datetimemodule.c', 'timemodule.c'], libraries=math_libs) ) # fast iterator tools implemented in C exts.append( Extension("itertools", ["itertoolsmodule.c"]) ) # code that will be builtins in the future, but conflict with the # current builtins exts.append( Extension('future_builtins', ['future_builtins.c']) ) # random number generator implemented in C exts.append( Extension("_random", ["_randommodule.c"]) ) # high-performance collections exts.append( Extension("_collections", ["_collectionsmodule.c"]) ) # bisect exts.append( Extension("_bisect", ["_bisectmodule.c"]) ) # heapq exts.append( Extension("_heapq", ["_heapqmodule.c"]) ) # operator.add() and similar goodies exts.append( Extension('operator', ['operator.c']) ) # Python 3.1 _io library exts.append( Extension("_io", ["_io/bufferedio.c", "_io/bytesio.c", "_io/fileio.c", "_io/iobase.c", "_io/_iomodule.c", "_io/stringio.c", "_io/textio.c"], depends=["_io/_iomodule.h"], include_dirs=["Modules/_io"])) # _functools exts.append( Extension("_functools", ["_functoolsmodule.c"]) ) # _json speedups exts.append( Extension("_json", ["_json.c"]) ) # Python C API test module exts.append( Extension('_testcapi', ['_testcapimodule.c'], depends=['testcapi_long.h']) ) # profilers (_lsprof is for cProfile.py) exts.append( Extension('_hotshot', ['_hotshot.c']) ) exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) ) # static Unicode character database if have_unicode: exts.append( Extension('unicodedata', ['unicodedata.c']) ) else: missing.append('unicodedata') # access to ISO C locale support data = open('pyconfig.h').read() m = re.search(r"#s*define\s+WITH_LIBINTL\s+1\s*", data) if m is not None: locale_libs = ['intl'] else: locale_libs = [] if host_platform == 'darwin': locale_extra_link_args = ['-framework', 'CoreFoundation'] else: locale_extra_link_args = [] exts.append( Extension('_locale', ['_localemodule.c'], libraries=locale_libs, extra_link_args=locale_extra_link_args) ) # Modules with some UNIX dependencies -- on by default: # (If you have a really backward UNIX, select and socket may not be # supported...) # fcntl(2) and ioctl(2) libs = [] if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)): # May be necessary on AIX for flock function libs = ['bsd'] exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) ) # pwd(3) exts.append( Extension('pwd', ['pwdmodule.c']) ) # grp(3) exts.append( Extension('grp', ['grpmodule.c']) ) # spwd, shadow passwords if (config_h_vars.get('HAVE_GETSPNAM', False) or config_h_vars.get('HAVE_GETSPENT', False)): exts.append( Extension('spwd', ['spwdmodule.c']) ) else: missing.append('spwd') # select(2); not on ancient System V exts.append( Extension('select', ['selectmodule.c']) ) # Fred Drake's interface to the Python parser exts.append( Extension('parser', ['parsermodule.c']) ) # cStringIO and cPickle exts.append( Extension('cStringIO', ['cStringIO.c']) ) exts.append( Extension('cPickle', ['cPickle.c']) ) # Memory-mapped files (also works on Win32). if host_platform not in ['atheos']: exts.append( Extension('mmap', ['mmapmodule.c']) ) else: missing.append('mmap') # Lance Ellinghaus's syslog module # syslog daemon interface exts.append( Extension('syslog', ['syslogmodule.c']) ) # George Neville-Neil's timing module: # Deprecated in PEP 4 http://www.python.org/peps/pep-0004.html # http://mail.python.org/pipermail/python-dev/2006-January/060023.html #exts.append( Extension('timing', ['timingmodule.c']) ) # # Here ends the simple stuff. From here on, modules need certain # libraries, are platform-specific, or present other surprises. # # Multimedia modules # These don't work for 64-bit platforms!!! # These represent audio samples or images as strings: # Operations on audio samples # According to #993173, this one should actually work fine on # 64-bit platforms. exts.append( Extension('audioop', ['audioop.c']) ) # Disabled on 64-bit platforms if sys.maxsize != 9223372036854775807L: # Operations on images exts.append( Extension('imageop', ['imageop.c']) ) else: missing.extend(['imageop']) # readline do_readline = self.compiler.find_library_file(lib_dirs, 'readline') readline_termcap_library = "" curses_library = "" # Determine if readline is already linked against curses or tinfo. if do_readline and find_executable('ldd'): fp = os.popen("ldd %s" % do_readline) ldd_output = fp.readlines() ret = fp.close() if ret is None or ret >> 8 == 0: for ln in ldd_output: if 'curses' in ln: readline_termcap_library = re.sub( r'.*lib(n?cursesw?)\.so.*', r'\1', ln ).rstrip() break if 'tinfo' in ln: # termcap interface split out from ncurses readline_termcap_library = 'tinfo' break # Issue 7384: If readline is already linked against curses, # use the same library for the readline and curses modules. if 'curses' in readline_termcap_library: curses_library = readline_termcap_library elif self.compiler.find_library_file(lib_dirs, 'ncursesw'): curses_library = 'ncursesw' elif self.compiler.find_library_file(lib_dirs, 'ncurses'): curses_library = 'ncurses' elif self.compiler.find_library_file(lib_dirs, 'curses'): curses_library = 'curses' if host_platform == 'darwin': os_release = int(os.uname()[2].split('.')[0]) dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') if (dep_target and (tuple(int(n) for n in dep_target.split('.')[0:2]) < (10, 5) ) ): os_release = 8 if os_release < 9: # MacOSX 10.4 has a broken readline. Don't try to build # the readline module unless the user has installed a fixed # readline package if find_file('readline/rlconf.h', inc_dirs, []) is None: do_readline = False if do_readline: if host_platform == 'darwin' and os_release < 9: # In every directory on the search path search for a dynamic # library and then a static library, instead of first looking # for dynamic libraries on the entire path. # This way a statically linked custom readline gets picked up # before the (possibly broken) dynamic library in /usr/lib. readline_extra_link_args = ('-Wl,-search_paths_first',) else: readline_extra_link_args = () readline_libs = ['readline'] if readline_termcap_library: pass # Issue 7384: Already linked against curses or tinfo. elif curses_library: readline_libs.append(curses_library) elif self.compiler.find_library_file(lib_dirs + ['/usr/lib/termcap'], 'termcap'): readline_libs.append('termcap') exts.append( Extension('readline', ['readline.c'], library_dirs=['/usr/lib/termcap'], extra_link_args=readline_extra_link_args, libraries=readline_libs) ) else: missing.append('readline') # crypt module. if self.compiler.find_library_file(lib_dirs, 'crypt'): libs = ['crypt'] else: libs = [] exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) ) # CSV files exts.append( Extension('_csv', ['_csv.c']) ) # socket(2) exts.append( Extension('_socket', ['socketmodule.c', 'timemodule.c'], depends=['socketmodule.h'], libraries=math_libs) ) # Detect SSL support for the socket module (via _ssl) search_for_ssl_incs_in = [ '/usr/local/ssl/include', '/usr/contrib/ssl/include/' ] ssl_incs = find_file('openssl/ssl.h', inc_dirs, search_for_ssl_incs_in ) if ssl_incs is not None: krb5_h = find_file('krb5.h', inc_dirs, ['/usr/kerberos/include']) if krb5_h: ssl_incs += krb5_h ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs, ['/usr/local/ssl/lib', '/usr/contrib/ssl/lib/' ] ) if (ssl_incs is not None and ssl_libs is not None): exts.append( Extension('_ssl', ['_ssl.c'], include_dirs = ssl_incs, library_dirs = ssl_libs, libraries = ['ssl', 'crypto'], depends = ['socketmodule.h']), ) else: missing.append('_ssl') # find out which version of OpenSSL we have openssl_ver = 0 openssl_ver_re = re.compile( '^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' ) # look for the openssl version header on the compiler search path. opensslv_h = find_file('openssl/opensslv.h', [], inc_dirs + search_for_ssl_incs_in) if opensslv_h: name = os.path.join(opensslv_h[0], 'openssl/opensslv.h') if host_platform == 'darwin' and is_macosx_sdk_path(name): name = os.path.join(macosx_sdk_root(), name[1:]) try: incfile = open(name, 'r') for line in incfile: m = openssl_ver_re.match(line) if m: openssl_ver = eval(m.group(1)) except IOError, msg: print "IOError while reading opensshv.h:", msg pass min_openssl_ver = 0x00907000 have_any_openssl = ssl_incs is not None and ssl_libs is not None have_usable_openssl = (have_any_openssl and openssl_ver >= min_openssl_ver) if have_any_openssl: if have_usable_openssl: # The _hashlib module wraps optimized implementations # of hash functions from the OpenSSL library. exts.append( Extension('_hashlib', ['_hashopenssl.c'], include_dirs = ssl_incs, library_dirs = ssl_libs, libraries = ['ssl', 'crypto']) ) else: print ("warning: openssl 0x%08x is too old for _hashlib" % openssl_ver) missing.append('_hashlib') if COMPILED_WITH_PYDEBUG or not have_usable_openssl: # The _sha module implements the SHA1 hash algorithm. exts.append( Extension('_sha', ['shamodule.c']) ) # The _md5 module implements the RSA Data Security, Inc. MD5 # Message-Digest Algorithm, described in RFC 1321. The # necessary files md5.c and md5.h are included here. exts.append( Extension('_md5', sources = ['md5module.c', 'md5.c'], depends = ['md5.h']) ) min_sha2_openssl_ver = 0x00908000 if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver: # OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash exts.append( Extension('_sha256', ['sha256module.c']) ) exts.append( Extension('_sha512', ['sha512module.c']) ) # Modules that provide persistent dictionary-like semantics. You will # probably want to arrange for at least one of them to be available on # your machine, though none are defined by default because of library # dependencies. The Python module anydbm.py provides an # implementation independent wrapper for these; dumbdbm.py provides # similar functionality (but slower of course) implemented in Python. # Sleepycat^WOracle Berkeley DB interface. # http://www.oracle.com/database/berkeley-db/db/index.html # # This requires the Sleepycat^WOracle DB code. The supported versions # are set below. Visit the URL above to download # a release. Most open source OSes come with one or more # versions of BerkeleyDB already installed. max_db_ver = (5, 3) min_db_ver = (4, 3) db_setup_debug = False # verbose debug prints from this script? def allow_db_ver(db_ver): """Returns a boolean if the given BerkeleyDB version is acceptable. Args: db_ver: A tuple of the version to verify. """ if not (min_db_ver <= db_ver <= max_db_ver): return False # Use this function to filter out known bad configurations. if (4, 6) == db_ver[:2]: # BerkeleyDB 4.6.x is not stable on many architectures. arch = platform_machine() if arch not in ('i386', 'i486', 'i586', 'i686', 'x86_64', 'ia64'): return False return True def gen_db_minor_ver_nums(major): if major == 5: for x in range(max_db_ver[1]+1): if allow_db_ver((5, x)): yield x elif major == 4: for x in range(9): if allow_db_ver((4, x)): yield x elif major == 3: for x in (3,): if allow_db_ver((3, x)): yield x else: raise ValueError("unknown major BerkeleyDB version", major) # construct a list of paths to look for the header file in on # top of the normal inc_dirs. db_inc_paths = [ '/usr/include/db4', '/usr/local/include/db4', '/opt/sfw/include/db4', '/usr/include/db3', '/usr/local/include/db3', '/opt/sfw/include/db3', # Fink defaults (http://fink.sourceforge.net/) '/sw/include/db4', '/sw/include/db3', ] # 4.x minor number specific paths for x in gen_db_minor_ver_nums(4): db_inc_paths.append('/usr/include/db4%d' % x) db_inc_paths.append('/usr/include/db4.%d' % x) db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x) db_inc_paths.append('/usr/local/include/db4%d' % x) db_inc_paths.append('/pkg/db-4.%d/include' % x) db_inc_paths.append('/opt/db-4.%d/include' % x) # MacPorts default (http://www.macports.org/) db_inc_paths.append('/opt/local/include/db4%d' % x) # 3.x minor number specific paths for x in gen_db_minor_ver_nums(3): db_inc_paths.append('/usr/include/db3%d' % x) db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x) db_inc_paths.append('/usr/local/include/db3%d' % x) db_inc_paths.append('/pkg/db-3.%d/include' % x) db_inc_paths.append('/opt/db-3.%d/include' % x) if cross_compiling: db_inc_paths = [] # Add some common subdirectories for Sleepycat DB to the list, # based on the standard include directories. This way DB3/4 gets # picked up when it is installed in a non-standard prefix and # the user has added that prefix into inc_dirs. std_variants = [] for dn in inc_dirs: std_variants.append(os.path.join(dn, 'db3')) std_variants.append(os.path.join(dn, 'db4')) for x in gen_db_minor_ver_nums(4): std_variants.append(os.path.join(dn, "db4%d"%x)) std_variants.append(os.path.join(dn, "db4.%d"%x)) for x in gen_db_minor_ver_nums(3): std_variants.append(os.path.join(dn, "db3%d"%x)) std_variants.append(os.path.join(dn, "db3.%d"%x)) db_inc_paths = std_variants + db_inc_paths db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)] db_ver_inc_map = {} if host_platform == 'darwin': sysroot = macosx_sdk_root() class db_found(Exception): pass try: # See whether there is a Sleepycat header in the standard # search path. for d in inc_dirs + db_inc_paths: f = os.path.join(d, "db.h") if host_platform == 'darwin' and is_macosx_sdk_path(d): f = os.path.join(sysroot, d[1:], "db.h") if db_setup_debug: print "db: looking for db.h in", f if os.path.exists(f): f = open(f).read() m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f) if m: db_major = int(m.group(1)) m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f) db_minor = int(m.group(1)) db_ver = (db_major, db_minor) # Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug if db_ver == (4, 6): m = re.search(r"#define\WDB_VERSION_PATCH\W(\d+)", f) db_patch = int(m.group(1)) if db_patch < 21: print "db.h:", db_ver, "patch", db_patch, print "being ignored (4.6.x must be >= 4.6.21)" continue if ( (db_ver not in db_ver_inc_map) and allow_db_ver(db_ver) ): # save the include directory with the db.h version # (first occurrence only) db_ver_inc_map[db_ver] = d if db_setup_debug: print "db.h: found", db_ver, "in", d else: # we already found a header for this library version if db_setup_debug: print "db.h: ignoring", d else: # ignore this header, it didn't contain a version number if db_setup_debug: print "db.h: no version number version in", d db_found_vers = db_ver_inc_map.keys() db_found_vers.sort() while db_found_vers: db_ver = db_found_vers.pop() db_incdir = db_ver_inc_map[db_ver] # check lib directories parallel to the location of the header db_dirs_to_check = [ db_incdir.replace("include", 'lib64'), db_incdir.replace("include", 'lib'), ] if host_platform != 'darwin': db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check) else: # Same as other branch, but takes OSX SDK into account tmp = [] for dn in db_dirs_to_check: if is_macosx_sdk_path(dn): if os.path.isdir(os.path.join(sysroot, dn[1:])): tmp.append(dn) else: if os.path.isdir(dn): tmp.append(dn) db_dirs_to_check = tmp # Look for a version specific db-X.Y before an ambiguous dbX # XXX should we -ever- look for a dbX name? Do any # systems really not name their library by version and # symlink to more general names? for dblib in (('db-%d.%d' % db_ver), ('db%d%d' % db_ver), ('db%d' % db_ver[0])): dblib_file = self.compiler.find_library_file( db_dirs_to_check + lib_dirs, dblib ) if dblib_file: dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ] raise db_found else: if db_setup_debug: print "db lib: ", dblib, "not found" except db_found: if db_setup_debug: print "bsddb using BerkeleyDB lib:", db_ver, dblib print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir db_incs = [db_incdir] dblibs = [dblib] # We add the runtime_library_dirs argument because the # BerkeleyDB lib we're linking against often isn't in the # system dynamic library search path. This is usually # correct and most trouble free, but may cause problems in # some unusual system configurations (e.g. the directory # is on an NFS server that goes away). exts.append(Extension('_bsddb', ['_bsddb.c'], depends = ['bsddb.h'], library_dirs=dblib_dir, runtime_library_dirs=dblib_dir, include_dirs=db_incs, libraries=dblibs)) else: if db_setup_debug: print "db: no appropriate library found" db_incs = None dblibs = [] dblib_dir = None missing.append('_bsddb') # The sqlite interface sqlite_setup_debug = False # verbose debug prints from this script? # We hunt for #define SQLITE_VERSION "n.n.n" # We need to find >= sqlite version 3.0.8 sqlite_incdir = sqlite_libdir = None sqlite_inc_paths = [ '/usr/include', '/usr/include/sqlite', '/usr/include/sqlite3', '/usr/local/include', '/usr/local/include/sqlite', '/usr/local/include/sqlite3', ] if cross_compiling: sqlite_inc_paths = [] MIN_SQLITE_VERSION_NUMBER = (3, 0, 8) MIN_SQLITE_VERSION = ".".join([str(x) for x in MIN_SQLITE_VERSION_NUMBER]) # Scan the default include directories before the SQLite specific # ones. This allows one to override the copy of sqlite on OSX, # where /usr/include contains an old version of sqlite. if host_platform == 'darwin': sysroot = macosx_sdk_root() for d_ in inc_dirs + sqlite_inc_paths: d = d_ if host_platform == 'darwin' and is_macosx_sdk_path(d): d = os.path.join(sysroot, d[1:]) f = os.path.join(d, "sqlite3.h") if os.path.exists(f): if sqlite_setup_debug: print "sqlite: found %s"%f incf = open(f).read() m = re.search( r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf) if m: sqlite_version = m.group(1) sqlite_version_tuple = tuple([int(x) for x in sqlite_version.split(".")]) if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER: # we win! if sqlite_setup_debug: print "%s/sqlite3.h: version %s"%(d, sqlite_version) sqlite_incdir = d break else: if sqlite_setup_debug: print "%s: version %d is too old, need >= %s"%(d, sqlite_version, MIN_SQLITE_VERSION) elif sqlite_setup_debug: print "sqlite: %s had no SQLITE_VERSION"%(f,) if sqlite_incdir: sqlite_dirs_to_check = [ os.path.join(sqlite_incdir, '..', 'lib64'), os.path.join(sqlite_incdir, '..', 'lib'), os.path.join(sqlite_incdir, '..', '..', 'lib64'), os.path.join(sqlite_incdir, '..', '..', 'lib'), ] sqlite_libfile = self.compiler.find_library_file( sqlite_dirs_to_check + lib_dirs, 'sqlite3') if sqlite_libfile: sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))] if sqlite_incdir and sqlite_libdir: sqlite_srcs = ['_sqlite/cache.c', '_sqlite/connection.c', '_sqlite/cursor.c', '_sqlite/microprotocols.c', '_sqlite/module.c', '_sqlite/prepare_protocol.c', '_sqlite/row.c', '_sqlite/statement.c', '_sqlite/util.c', ] sqlite_defines = [] if host_platform != "win32": sqlite_defines.append(('MODULE_NAME', '"sqlite3"')) else: sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"')) # Comment this out if you want the sqlite3 module to be able to load extensions. sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1")) if host_platform == 'darwin': # In every directory on the search path search for a dynamic # library and then a static library, instead of first looking # for dynamic libraries on the entire path. # This way a statically linked custom sqlite gets picked up # before the dynamic library in /usr/lib. sqlite_extra_link_args = ('-Wl,-search_paths_first',) else: sqlite_extra_link_args = () exts.append(Extension('_sqlite3', sqlite_srcs, define_macros=sqlite_defines, include_dirs=["Modules/_sqlite", sqlite_incdir], library_dirs=sqlite_libdir, extra_link_args=sqlite_extra_link_args, libraries=["sqlite3",])) else: missing.append('_sqlite3') # Look for Berkeley db 1.85. Note that it is built as a different # module name so it can be included even when later versions are # available. A very restrictive search is performed to avoid # accidentally building this module with a later version of the # underlying db library. May BSD-ish Unixes incorporate db 1.85 # symbols into libc and place the include file in /usr/include. # # If the better bsddb library can be built (db_incs is defined) # we do not build this one. Otherwise this build will pick up # the more recent berkeleydb's db.h file first in the include path # when attempting to compile and it will fail. f = "/usr/include/db.h" if host_platform == 'darwin': if is_macosx_sdk_path(f): sysroot = macosx_sdk_root() f = os.path.join(sysroot, f[1:]) if os.path.exists(f) and not db_incs: data = open(f).read() m = re.search(r"#s*define\s+HASHVERSION\s+2\s*", data) if m is not None: # bingo - old version used hash file format version 2 ### XXX this should be fixed to not be platform-dependent ### but I don't have direct access to an osf1 platform and ### seemed to be muffing the search somehow libraries = host_platform == "osf1" and ['db'] or None if libraries is not None: exts.append(Extension('bsddb185', ['bsddbmodule.c'], libraries=libraries)) else: exts.append(Extension('bsddb185', ['bsddbmodule.c'])) else: missing.append('bsddb185') else: missing.append('bsddb185') dbm_order = ['gdbm'] # The standard Unix dbm module: if host_platform not in ['cygwin']: config_args = [arg.strip("'") for arg in sysconfig.get_config_var("CONFIG_ARGS").split()] dbm_args = [arg for arg in config_args if arg.startswith('--with-dbmliborder=')] if dbm_args: dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":") else: dbm_order = "ndbm:gdbm:bdb".split(":") dbmext = None for cand in dbm_order: if cand == "ndbm": if find_file("ndbm.h", inc_dirs, []) is not None: # Some systems have -lndbm, others have -lgdbm_compat, # others don't have either if self.compiler.find_library_file(lib_dirs, 'ndbm'): ndbm_libs = ['ndbm'] elif self.compiler.find_library_file(lib_dirs, 'gdbm_compat'): ndbm_libs = ['gdbm_compat'] else: ndbm_libs = [] print "building dbm using ndbm" dbmext = Extension('dbm', ['dbmmodule.c'], define_macros=[ ('HAVE_NDBM_H',None), ], libraries=ndbm_libs) break elif cand == "gdbm": if self.compiler.find_library_file(lib_dirs, 'gdbm'): gdbm_libs = ['gdbm'] if self.compiler.find_library_file(lib_dirs, 'gdbm_compat'): gdbm_libs.append('gdbm_compat') if find_file("gdbm/ndbm.h", inc_dirs, []) is not None: print "building dbm using gdbm" dbmext = Extension( 'dbm', ['dbmmodule.c'], define_macros=[ ('HAVE_GDBM_NDBM_H', None), ], libraries = gdbm_libs) break if find_file("gdbm-ndbm.h", inc_dirs, []) is not None: print "building dbm using gdbm" dbmext = Extension( 'dbm', ['dbmmodule.c'], define_macros=[ ('HAVE_GDBM_DASH_NDBM_H', None), ], libraries = gdbm_libs) break elif cand == "bdb": if db_incs is not None: print "building dbm using bdb" dbmext = Extension('dbm', ['dbmmodule.c'], library_dirs=dblib_dir, runtime_library_dirs=dblib_dir, include_dirs=db_incs, define_macros=[ ('HAVE_BERKDB_H', None), ('DB_DBM_HSEARCH', None), ], libraries=dblibs) break if dbmext is not None: exts.append(dbmext) else: missing.append('dbm') # Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm: if ('gdbm' in dbm_order and self.compiler.find_library_file(lib_dirs, 'gdbm')): exts.append( Extension('gdbm', ['gdbmmodule.c'], libraries = ['gdbm'] ) ) else: missing.append('gdbm') # Unix-only modules if host_platform not in ['win32']: # Steen Lumholt's termios module exts.append( Extension('termios', ['termios.c']) ) # Jeremy Hylton's rlimit interface if host_platform not in ['atheos']: exts.append( Extension('resource', ['resource.c']) ) else: missing.append('resource') # Sun yellow pages. Some systems have the functions in libc. if (host_platform not in ['cygwin', 'atheos', 'qnx6'] and find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None): if (self.compiler.find_library_file(lib_dirs, 'nsl')): libs = ['nsl'] else: libs = [] exts.append( Extension('nis', ['nismodule.c'], libraries = libs) ) else: missing.append('nis') else: missing.extend(['nis', 'resource', 'termios']) # Curses support, requiring the System V version of curses, often # provided by the ncurses library. panel_library = 'panel' curses_incs = None if curses_library.startswith('ncurses'): if curses_library == 'ncursesw': # Bug 1464056: If _curses.so links with ncursesw, # _curses_panel.so must link with panelw. panel_library = 'panelw' curses_libs = [curses_library] curses_incs = find_file('curses.h', inc_dirs, [os.path.join(d, 'ncursesw') for d in inc_dirs]) exts.append( Extension('_curses', ['_cursesmodule.c'], include_dirs = curses_incs, libraries = curses_libs) ) elif curses_library == 'curses' and host_platform != 'darwin': # OSX has an old Berkeley curses, not good enough for # the _curses module. if (self.compiler.find_library_file(lib_dirs, 'terminfo')): curses_libs = ['curses', 'terminfo'] elif (self.compiler.find_library_file(lib_dirs, 'termcap')): curses_libs = ['curses', 'termcap'] else: curses_libs = ['curses'] exts.append( Extension('_curses', ['_cursesmodule.c'], libraries = curses_libs) ) else: missing.append('_curses') # If the curses module is enabled, check for the panel module if (module_enabled(exts, '_curses') and self.compiler.find_library_file(lib_dirs, panel_library)): exts.append( Extension('_curses_panel', ['_curses_panel.c'], include_dirs = curses_incs, libraries = [panel_library] + curses_libs) ) else: missing.append('_curses_panel') # Andrew Kuchling's zlib module. Note that some versions of zlib # 1.1.3 have security problems. See CERT Advisory CA-2002-07: # http://www.cert.org/advisories/CA-2002-07.html # # zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to # patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For # now, we still accept 1.1.3, because we think it's difficult to # exploit this in Python, and we'd rather make it RedHat's problem # than our problem <wink>. # # You can upgrade zlib to version 1.1.4 yourself by going to # http://www.gzip.org/zlib/ zlib_inc = find_file('zlib.h', [], inc_dirs) have_zlib = False if zlib_inc is not None: zlib_h = zlib_inc[0] + '/zlib.h' version = '"0.0.0"' version_req = '"1.1.3"' if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h): zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:]) fp = open(zlib_h) while 1: line = fp.readline() if not line: break if line.startswith('#define ZLIB_VERSION'): version = line.split()[2] break if version >= version_req: if (self.compiler.find_library_file(lib_dirs, 'z')): if host_platform == "darwin": zlib_extra_link_args = ('-Wl,-search_paths_first',) else: zlib_extra_link_args = () exts.append( Extension('zlib', ['zlibmodule.c'], libraries = ['z'], extra_link_args = zlib_extra_link_args)) have_zlib = True else: missing.append('zlib') else: missing.append('zlib') else: missing.append('zlib') # Helper module for various ascii-encoders. Uses zlib for an optimized # crc32 if we have it. Otherwise binascii uses its own. if have_zlib: extra_compile_args = ['-DUSE_ZLIB_CRC32'] libraries = ['z'] extra_link_args = zlib_extra_link_args else: extra_compile_args = [] libraries = [] extra_link_args = [] exts.append( Extension('binascii', ['binascii.c'], extra_compile_args = extra_compile_args, libraries = libraries, extra_link_args = extra_link_args) ) # Gustavo Niemeyer's bz2 module. if (self.compiler.find_library_file(lib_dirs, 'bz2')): if host_platform == "darwin": bz2_extra_link_args = ('-Wl,-search_paths_first',) else: bz2_extra_link_args = () exts.append( Extension('bz2', ['bz2module.c'], libraries = ['bz2'], extra_link_args = bz2_extra_link_args) ) else: missing.append('bz2') # Interface to the Expat XML parser # # Expat was written by James Clark and is now maintained by a group of # developers on SourceForge; see www.libexpat.org for more information. # The pyexpat module was written by Paul Prescod after a prototype by # Jack Jansen. The Expat source is included in Modules/expat/. Usage # of a system shared libexpat.so is possible with --with-system-expat # configure option. # # More information on Expat can be found at www.libexpat.org. # if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"): expat_inc = [] define_macros = [] expat_lib = ['expat'] expat_sources = [] expat_depends = [] else: expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')] define_macros = [ ('HAVE_EXPAT_CONFIG_H', '1'), ] expat_lib = [] expat_sources = ['expat/xmlparse.c', 'expat/xmlrole.c', 'expat/xmltok.c'] expat_depends = ['expat/ascii.h', 'expat/asciitab.h', 'expat/expat.h', 'expat/expat_config.h', 'expat/expat_external.h', 'expat/internal.h', 'expat/latin1tab.h', 'expat/utf8tab.h', 'expat/xmlrole.h', 'expat/xmltok.h', 'expat/xmltok_impl.h' ] exts.append(Extension('pyexpat', define_macros = define_macros, include_dirs = expat_inc, libraries = expat_lib, sources = ['pyexpat.c'] + expat_sources, depends = expat_depends, )) # Fredrik Lundh's cElementTree module. Note that this also # uses expat (via the CAPI hook in pyexpat). if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')): define_macros.append(('USE_PYEXPAT_CAPI', None)) exts.append(Extension('_elementtree', define_macros = define_macros, include_dirs = expat_inc, libraries = expat_lib, sources = ['_elementtree.c'], depends = ['pyexpat.c'] + expat_sources + expat_depends, )) else: missing.append('_elementtree') # Hye-Shik Chang's CJKCodecs modules. if have_unicode: exts.append(Extension('_multibytecodec', ['cjkcodecs/multibytecodec.c'])) for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'): exts.append(Extension('_codecs_%s' % loc, ['cjkcodecs/_codecs_%s.c' % loc])) else: missing.append('_multibytecodec') for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'): missing.append('_codecs_%s' % loc) # Dynamic loading module if sys.maxint == 0x7fffffff: # This requires sizeof(int) == sizeof(long) == sizeof(char*) dl_inc = find_file('dlfcn.h', [], inc_dirs) if (dl_inc is not None) and (host_platform not in ['atheos']): exts.append( Extension('dl', ['dlmodule.c']) ) else: missing.append('dl') else: missing.append('dl') # Thomas Heller's _ctypes module self.detect_ctypes(inc_dirs, lib_dirs) # Richard Oudkerk's multiprocessing module if host_platform == 'win32': # Windows macros = dict() libraries = ['ws2_32'] elif host_platform == 'darwin': # Mac OSX macros = dict() libraries = [] elif host_platform == 'cygwin': # Cygwin macros = dict() libraries = [] elif host_platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'): # FreeBSD's P1003.1b semaphore support is very experimental # and has many known problems. (as of June 2008) macros = dict() libraries = [] elif host_platform.startswith('openbsd'): macros = dict() libraries = [] elif host_platform.startswith('netbsd'): macros = dict() libraries = [] else: # Linux and other unices macros = dict() libraries = ['rt'] if host_platform == 'win32': multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c', '_multiprocessing/semaphore.c', '_multiprocessing/pipe_connection.c', '_multiprocessing/socket_connection.c', '_multiprocessing/win32_functions.c' ] else: multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c', '_multiprocessing/socket_connection.c' ] if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')): multiprocessing_srcs.append('_multiprocessing/semaphore.c') if sysconfig.get_config_var('WITH_THREAD'): exts.append ( Extension('_multiprocessing', multiprocessing_srcs, define_macros=macros.items(), include_dirs=["Modules/_multiprocessing"])) else: missing.append('_multiprocessing') # End multiprocessing # Platform-specific libraries if host_platform == 'linux2': # Linux-specific modules exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) ) else: missing.append('linuxaudiodev') if (host_platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8') or host_platform.startswith("gnukfreebsd")): exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) ) else: missing.append('ossaudiodev') if host_platform == 'sunos5': # SunOS specific modules exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) ) else: missing.append('sunaudiodev') if host_platform == 'darwin': # _scproxy exts.append(Extension("_scproxy", [os.path.join(srcdir, "Mac/Modules/_scproxy.c")], extra_link_args= [ '-framework', 'SystemConfiguration', '-framework', 'CoreFoundation' ])) if host_platform == 'darwin' and ("--disable-toolbox-glue" not in sysconfig.get_config_var("CONFIG_ARGS")): if int(os.uname()[2].split('.')[0]) >= 8: # We're on Mac OS X 10.4 or later, the compiler should # support '-Wno-deprecated-declarations'. This will # suppress deprecation warnings for the Carbon extensions, # these extensions wrap the Carbon APIs and even those # parts that are deprecated. carbon_extra_compile_args = ['-Wno-deprecated-declarations'] else: carbon_extra_compile_args = [] # Mac OS X specific modules. def macSrcExists(name1, name2=''): if not name1: return None names = (name1,) if name2: names = (name1, name2) path = os.path.join(srcdir, 'Mac', 'Modules', *names) return os.path.exists(path) def addMacExtension(name, kwds, extra_srcs=[]): dirname = '' if name[0] == '_': dirname = name[1:].lower() cname = name + '.c' cmodulename = name + 'module.c' # Check for NNN.c, NNNmodule.c, _nnn/NNN.c, _nnn/NNNmodule.c if macSrcExists(cname): srcs = [cname] elif macSrcExists(cmodulename): srcs = [cmodulename] elif macSrcExists(dirname, cname): # XXX(nnorwitz): If all the names ended with module, we # wouldn't need this condition. ibcarbon is the only one. srcs = [os.path.join(dirname, cname)] elif macSrcExists(dirname, cmodulename): srcs = [os.path.join(dirname, cmodulename)] else: raise RuntimeError("%s not found" % name) # Here's the whole point: add the extension with sources exts.append(Extension(name, srcs + extra_srcs, **kwds)) # Core Foundation core_kwds = {'extra_compile_args': carbon_extra_compile_args, 'extra_link_args': ['-framework', 'CoreFoundation'], } addMacExtension('_CF', core_kwds, ['cf/pycfbridge.c']) addMacExtension('autoGIL', core_kwds) # Carbon carbon_kwds = {'extra_compile_args': carbon_extra_compile_args, 'extra_link_args': ['-framework', 'Carbon'], } CARBON_EXTS = ['ColorPicker', 'gestalt', 'MacOS', 'Nav', 'OSATerminology', 'icglue', # All these are in subdirs '_AE', '_AH', '_App', '_CarbonEvt', '_Cm', '_Ctl', '_Dlg', '_Drag', '_Evt', '_File', '_Folder', '_Fm', '_Help', '_Icn', '_IBCarbon', '_List', '_Menu', '_Mlte', '_OSA', '_Res', '_Qd', '_Qdoffs', '_Scrap', '_Snd', '_TE', ] for name in CARBON_EXTS: addMacExtension(name, carbon_kwds) # Workaround for a bug in the version of gcc shipped with Xcode 3. # The _Win extension should build just like the other Carbon extensions, but # this actually results in a hard crash of the linker. # if '-arch ppc64' in cflags and '-arch ppc' in cflags: win_kwds = {'extra_compile_args': carbon_extra_compile_args + ['-arch', 'i386', '-arch', 'ppc'], 'extra_link_args': ['-framework', 'Carbon', '-arch', 'i386', '-arch', 'ppc'], } addMacExtension('_Win', win_kwds) else: addMacExtension('_Win', carbon_kwds) # Application Services & QuickTime app_kwds = {'extra_compile_args': carbon_extra_compile_args, 'extra_link_args': ['-framework','ApplicationServices'], } addMacExtension('_Launch', app_kwds) addMacExtension('_CG', app_kwds) exts.append( Extension('_Qt', ['qt/_Qtmodule.c'], extra_compile_args=carbon_extra_compile_args, extra_link_args=['-framework', 'QuickTime', '-framework', 'Carbon']) ) self.extensions.extend(exts) # Call the method for detecting whether _tkinter can be compiled self.detect_tkinter(inc_dirs, lib_dirs) if '_tkinter' not in [e.name for e in self.extensions]: missing.append('_tkinter') ## # Uncomment these lines if you want to play with xxmodule.c ## ext = Extension('xx', ['xxmodule.c']) ## self.extensions.append(ext) return missing def detect_tkinter_explicitly(self): # Build _tkinter using explicit locations for Tcl/Tk. # # This is enabled when both arguments are given to ./configure: # # --with-tcltk-includes="-I/path/to/tclincludes \ # -I/path/to/tkincludes" # --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \ # -L/path/to/tklibs -ltkm.n" # # These values can also be specified or overridden via make: # make TCLTK_INCLUDES="..." TCLTK_LIBS="..." # # This can be useful for building and testing tkinter with multiple # versions of Tcl/Tk. Note that a build of Tk depends on a particular # build of Tcl so you need to specify both arguments and use care when # overriding. # The _TCLTK variables are created in the Makefile sharedmods target. tcltk_includes = os.environ.get('_TCLTK_INCLUDES') tcltk_libs = os.environ.get('_TCLTK_LIBS') if not (tcltk_includes and tcltk_libs): # Resume default configuration search. return 0 extra_compile_args = tcltk_includes.split() extra_link_args = tcltk_libs.split() ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'], define_macros=[('WITH_APPINIT', 1)], extra_compile_args = extra_compile_args, extra_link_args = extra_link_args, ) self.extensions.append(ext) return 1 def detect_tkinter_darwin(self, inc_dirs, lib_dirs): # The _tkinter module, using frameworks. Since frameworks are quite # different the UNIX search logic is not sharable. from os.path import join, exists framework_dirs = [ '/Library/Frameworks', '/System/Library/Frameworks/', join(os.getenv('HOME'), '/Library/Frameworks') ] sysroot = macosx_sdk_root() # Find the directory that contains the Tcl.framework and Tk.framework # bundles. # XXX distutils should support -F! for F in framework_dirs: # both Tcl.framework and Tk.framework should be present for fw in 'Tcl', 'Tk': if is_macosx_sdk_path(F): if not exists(join(sysroot, F[1:], fw + '.framework')): break else: if not exists(join(F, fw + '.framework')): break else: # ok, F is now directory with both frameworks. Continure # building break else: # Tk and Tcl frameworks not found. Normal "unix" tkinter search # will now resume. return 0 # For 8.4a2, we must add -I options that point inside the Tcl and Tk # frameworks. In later release we should hopefully be able to pass # the -F option to gcc, which specifies a framework lookup path. # include_dirs = [ join(F, fw + '.framework', H) for fw in 'Tcl', 'Tk' for H in 'Headers', 'Versions/Current/PrivateHeaders' ] # For 8.4a2, the X11 headers are not included. Rather than include a # complicated search, this is a hard-coded path. It could bail out # if X11 libs are not found... include_dirs.append('/usr/X11R6/include') frameworks = ['-framework', 'Tcl', '-framework', 'Tk'] # All existing framework builds of Tcl/Tk don't support 64-bit # architectures. cflags = sysconfig.get_config_vars('CFLAGS')[0] archs = re.findall('-arch\s+(\w+)', cflags) if is_macosx_sdk_path(F): fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(os.path.join(sysroot, F[1:]),)) else: fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(F,)) detected_archs = [] for ln in fp: a = ln.split()[-1] if a in archs: detected_archs.append(ln.split()[-1]) fp.close() for a in detected_archs: frameworks.append('-arch') frameworks.append(a) ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'], define_macros=[('WITH_APPINIT', 1)], include_dirs = include_dirs, libraries = [], extra_compile_args = frameworks[2:], extra_link_args = frameworks, ) self.extensions.append(ext) return 1 def detect_tkinter(self, inc_dirs, lib_dirs): # The _tkinter module. # Check whether --with-tcltk-includes and --with-tcltk-libs were # configured or passed into the make target. If so, use these values # to build tkinter and bypass the searches for Tcl and TK in standard # locations. if self.detect_tkinter_explicitly(): return # Rather than complicate the code below, detecting and building # AquaTk is a separate method. Only one Tkinter will be built on # Darwin - either AquaTk, if it is found, or X11 based Tk. if (host_platform == 'darwin' and self.detect_tkinter_darwin(inc_dirs, lib_dirs)): return # Assume we haven't found any of the libraries or include files # The versions with dots are used on Unix, and the versions without # dots on Windows, for detection by cygwin. tcllib = tklib = tcl_includes = tk_includes = None for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83', '8.2', '82', '8.1', '81', '8.0', '80']: tklib = self.compiler.find_library_file(lib_dirs, 'tk' + version) tcllib = self.compiler.find_library_file(lib_dirs, 'tcl' + version) if tklib and tcllib: # Exit the loop when we've found the Tcl/Tk libraries break # Now check for the header files if tklib and tcllib: # Check for the include files on Debian and {Free,Open}BSD, where # they're put in /usr/include/{tcl,tk}X.Y dotversion = version if '.' not in dotversion and "bsd" in host_platform.lower(): # OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a, # but the include subdirs are named like .../include/tcl8.3. dotversion = dotversion[:-1] + '.' + dotversion[-1] tcl_include_sub = [] tk_include_sub = [] for dir in inc_dirs: tcl_include_sub += [dir + os.sep + "tcl" + dotversion] tk_include_sub += [dir + os.sep + "tk" + dotversion] tk_include_sub += tcl_include_sub tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub) tk_includes = find_file('tk.h', inc_dirs, tk_include_sub) if (tcllib is None or tklib is None or tcl_includes is None or tk_includes is None): self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2) return # OK... everything seems to be present for Tcl/Tk. include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = [] for dir in tcl_includes + tk_includes: if dir not in include_dirs: include_dirs.append(dir) # Check for various platform-specific directories if host_platform == 'sunos5': include_dirs.append('/usr/openwin/include') added_lib_dirs.append('/usr/openwin/lib') elif os.path.exists('/usr/X11R6/include'): include_dirs.append('/usr/X11R6/include') added_lib_dirs.append('/usr/X11R6/lib64') added_lib_dirs.append('/usr/X11R6/lib') elif os.path.exists('/usr/X11R5/include'): include_dirs.append('/usr/X11R5/include') added_lib_dirs.append('/usr/X11R5/lib') else: # Assume default location for X11 include_dirs.append('/usr/X11/include') added_lib_dirs.append('/usr/X11/lib') # If Cygwin, then verify that X is installed before proceeding if host_platform == 'cygwin': x11_inc = find_file('X11/Xlib.h', [], include_dirs) if x11_inc is None: return # Check for BLT extension if self.compiler.find_library_file(lib_dirs + added_lib_dirs, 'BLT8.0'): defs.append( ('WITH_BLT', 1) ) libs.append('BLT8.0') elif self.compiler.find_library_file(lib_dirs + added_lib_dirs, 'BLT'): defs.append( ('WITH_BLT', 1) ) libs.append('BLT') # Add the Tcl/Tk libraries libs.append('tk'+ version) libs.append('tcl'+ version) if host_platform in ['aix3', 'aix4']: libs.append('ld') # Finally, link with the X11 libraries (not appropriate on cygwin) if host_platform != "cygwin": libs.append('X11') ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'], define_macros=[('WITH_APPINIT', 1)] + defs, include_dirs = include_dirs, libraries = libs, library_dirs = added_lib_dirs, ) self.extensions.append(ext) # XXX handle these, but how to detect? # *** Uncomment and edit for PIL (TkImaging) extension only: # -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \ # *** Uncomment and edit for TOGL extension only: # -DWITH_TOGL togl.c \ # *** Uncomment these for TOGL extension only: # -lGL -lGLU -lXext -lXmu \ def configure_ctypes_darwin(self, ext): # Darwin (OS X) uses preconfigured files, in # the Modules/_ctypes/libffi_osx directory. srcdir = sysconfig.get_config_var('srcdir') ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules', '_ctypes', 'libffi_osx')) sources = [os.path.join(ffi_srcdir, p) for p in ['ffi.c', 'x86/darwin64.S', 'x86/x86-darwin.S', 'x86/x86-ffi_darwin.c', 'x86/x86-ffi64.c', 'powerpc/ppc-darwin.S', 'powerpc/ppc-darwin_closure.S', 'powerpc/ppc-ffi_darwin.c', 'powerpc/ppc64-darwin_closure.S', ]] # Add .S (preprocessed assembly) to C compiler source extensions. self.compiler.src_extensions.append('.S') include_dirs = [os.path.join(ffi_srcdir, 'include'), os.path.join(ffi_srcdir, 'powerpc')] ext.include_dirs.extend(include_dirs) ext.sources.extend(sources) return True def configure_ctypes(self, ext): if not self.use_system_libffi: if host_platform == 'darwin': return self.configure_ctypes_darwin(ext) srcdir = sysconfig.get_config_var('srcdir') ffi_builddir = os.path.join(self.build_temp, 'libffi') ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules', '_ctypes', 'libffi')) ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py') from distutils.dep_util import newer_group config_sources = [os.path.join(ffi_srcdir, fname) for fname in os.listdir(ffi_srcdir) if os.path.isfile(os.path.join(ffi_srcdir, fname))] if self.force or newer_group(config_sources, ffi_configfile): from distutils.dir_util import mkpath mkpath(ffi_builddir) config_args = [arg for arg in sysconfig.get_config_var("CONFIG_ARGS").split() if (('--host=' in arg) or ('--build=' in arg))] if not self.verbose: config_args.append("-q") # Pass empty CFLAGS because we'll just append the resulting # CFLAGS to Python's; -g or -O2 is to be avoided. cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \ % (ffi_builddir, ffi_srcdir, " ".join(config_args)) res = os.system(cmd) if res or not os.path.exists(ffi_configfile): print "Failed to configure _ctypes module" return False fficonfig = {} with open(ffi_configfile) as f: exec f in fficonfig # Add .S (preprocessed assembly) to C compiler source extensions. self.compiler.src_extensions.append('.S') include_dirs = [os.path.join(ffi_builddir, 'include'), ffi_builddir, os.path.join(ffi_srcdir, 'src')] extra_compile_args = fficonfig['ffi_cflags'].split() ext.sources.extend(os.path.join(ffi_srcdir, f) for f in fficonfig['ffi_sources']) ext.include_dirs.extend(include_dirs) ext.extra_compile_args.extend(extra_compile_args) return True def detect_ctypes(self, inc_dirs, lib_dirs): self.use_system_libffi = False include_dirs = [] extra_compile_args = [] extra_link_args = [] sources = ['_ctypes/_ctypes.c', '_ctypes/callbacks.c', '_ctypes/callproc.c', '_ctypes/stgdict.c', '_ctypes/cfield.c'] depends = ['_ctypes/ctypes.h'] if host_platform == 'darwin': sources.append('_ctypes/malloc_closure.c') sources.append('_ctypes/darwin/dlfcn_simple.c') extra_compile_args.append('-DMACOSX') include_dirs.append('_ctypes/darwin') # XXX Is this still needed? ## extra_link_args.extend(['-read_only_relocs', 'warning']) elif host_platform == 'sunos5': # XXX This shouldn't be necessary; it appears that some # of the assembler code is non-PIC (i.e. it has relocations # when it shouldn't. The proper fix would be to rewrite # the assembler code to be PIC. # This only works with GCC; the Sun compiler likely refuses # this option. If you want to compile ctypes with the Sun # compiler, please research a proper solution, instead of # finding some -z option for the Sun compiler. extra_link_args.append('-mimpure-text') elif host_platform.startswith('hp-ux'): extra_link_args.append('-fPIC') ext = Extension('_ctypes', include_dirs=include_dirs, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=[], sources=sources, depends=depends) ext_test = Extension('_ctypes_test', sources=['_ctypes/_ctypes_test.c']) self.extensions.extend([ext, ext_test]) if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"): return if host_platform == 'darwin': # OS X 10.5 comes with libffi.dylib; the include files are # in /usr/include/ffi inc_dirs.append('/usr/include/ffi') ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")] if not ffi_inc or ffi_inc[0] == '': ffi_inc = find_file('ffi.h', [], inc_dirs) if ffi_inc is not None: ffi_h = ffi_inc[0] + '/ffi.h' with open(ffi_h) as f: for line in f: line = line.strip() if line.startswith(('#define LIBFFI_H', '#define ffi_wrapper_h')): break else: ffi_inc = None print('Header file {} does not define LIBFFI_H or ' 'ffi_wrapper_h'.format(ffi_h)) ffi_lib = None if ffi_inc is not None: for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'): if (self.compiler.find_library_file(lib_dirs, lib_name)): ffi_lib = lib_name break if ffi_inc and ffi_lib: ext.include_dirs.extend(ffi_inc) ext.libraries.append(ffi_lib) self.use_system_libffi = True class PyBuildInstall(install): # Suppress the warning about installation into the lib_dynload # directory, which is not in sys.path when running Python during # installation: def initialize_options (self): install.initialize_options(self) self.warn_dir=0 class PyBuildInstallLib(install_lib): # Do exactly what install_lib does but make sure correct access modes get # set on installed directories and files. All installed files with get # mode 644 unless they are a shared library in which case they will get # mode 755. All installed directories will get mode 755. so_ext = sysconfig.get_config_var("SO") def install(self): outfiles = install_lib.install(self) self.set_file_modes(outfiles, 0644, 0755) self.set_dir_modes(self.install_dir, 0755) return outfiles def set_file_modes(self, files, defaultMode, sharedLibMode): if not self.is_chmod_supported(): return if not files: return for filename in files: if os.path.islink(filename): continue mode = defaultMode if filename.endswith(self.so_ext): mode = sharedLibMode log.info("changing mode of %s to %o", filename, mode) if not self.dry_run: os.chmod(filename, mode) def set_dir_modes(self, dirname, mode): if not self.is_chmod_supported(): return os.path.walk(dirname, self.set_dir_modes_visitor, mode) def set_dir_modes_visitor(self, mode, dirname, names): if os.path.islink(dirname): return log.info("changing mode of %s to %o", dirname, mode) if not self.dry_run: os.chmod(dirname, mode) def is_chmod_supported(self): return hasattr(os, 'chmod') SUMMARY = """ Python is an interpreted, interactive, object-oriented programming language. It is often compared to Tcl, Perl, Scheme or Java. Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++. Python is also usable as an extension language for applications that need a programmable interface. The Python implementation is portable: it runs on many brands of UNIX, on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't listed here, it may still be supported, if there's a C compiler for it. Ask around on comp.lang.python -- or just try compiling Python yourself. """ CLASSIFIERS = """ Development Status :: 6 - Mature License :: OSI Approved :: Python Software Foundation License Natural Language :: English Programming Language :: C Programming Language :: Python Topic :: Software Development """ def main(): # turn off warnings when deprecated modules are imported import warnings warnings.filterwarnings("ignore",category=DeprecationWarning) setup(# PyPI Metadata (PEP 301) name = "Python", version = sys.version.split()[0], url = "http://www.python.org/%s" % sys.version[:3], maintainer = "Guido van Rossum and the Python community", maintainer_email = "[email protected]", description = "A high-level object-oriented programming language", long_description = SUMMARY.strip(), license = "PSF license", classifiers = filter(None, CLASSIFIERS.split("\n")), platforms = ["Many"], # Build info cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall, 'install_lib':PyBuildInstallLib}, # The struct module is defined here, because build_ext won't be # called unless there's at least one extension module defined. ext_modules=[Extension('_struct', ['_struct.c'])], # Scripts to install scripts = ['Tools/scripts/pydoc', 'Tools/scripts/idle', 'Tools/scripts/2to3', 'Lib/smtpd.py'] ) # --install-platlib if __name__ == '__main__': main()
{ "pile_set_name": "Github" }
************************************************************************ file with basedata : mf64_.bas initial value random generator: 1855 ************************************************************************ projects : 1 jobs (incl. supersource/sink ): 32 horizon : 260 RESOURCES - renewable : 2 R - nonrenewable : 2 N - doubly constrained : 0 D ************************************************************************ PROJECT INFORMATION: pronr. #jobs rel.date duedate tardcost MPM-Time 1 30 0 25 19 25 ************************************************************************ PRECEDENCE RELATIONS: jobnr. #modes #successors successors 1 1 3 2 3 4 2 3 3 6 8 10 3 3 3 9 15 17 4 3 2 5 7 5 3 3 9 12 13 6 3 1 21 7 3 3 9 18 23 8 3 1 11 9 3 2 16 22 10 3 2 12 15 11 3 3 12 16 20 12 3 3 14 21 25 13 3 3 16 19 30 14 3 1 17 15 3 2 23 28 16 3 1 26 17 3 1 23 18 3 2 20 21 19 3 2 29 31 20 3 2 22 29 21 3 1 24 22 3 2 24 30 23 3 1 27 24 3 1 28 25 3 3 26 27 28 26 3 1 31 27 3 2 29 30 28 3 1 31 29 3 1 32 30 3 1 32 31 3 1 32 32 1 0 ************************************************************************ REQUESTS/DURATIONS: jobnr. mode duration R 1 R 2 N 1 N 2 ------------------------------------------------------------------------ 1 1 0 0 0 0 0 2 1 1 7 5 7 10 2 3 6 4 6 9 3 9 6 3 3 9 3 1 2 8 4 8 8 2 5 8 4 6 7 3 6 7 3 4 5 4 1 3 6 2 10 2 2 6 6 1 9 2 3 10 3 1 8 1 5 1 5 5 4 9 7 2 10 4 2 8 4 3 10 5 1 7 4 6 1 5 8 7 4 10 2 7 7 5 3 9 3 10 5 4 3 9 7 1 1 1 4 9 6 2 7 1 4 8 5 3 8 1 4 4 5 8 1 1 5 1 4 6 2 9 4 1 3 5 3 9 3 1 4 5 9 1 2 7 6 9 5 2 6 5 6 5 4 3 9 3 5 4 2 10 1 3 7 7 9 3 2 7 7 7 5 3 3 8 4 5 3 3 11 1 5 2 9 10 8 2 6 2 9 5 7 3 9 2 8 2 5 12 1 2 8 6 8 7 2 6 5 4 6 4 3 9 3 2 6 3 13 1 1 3 8 8 8 2 2 2 5 8 6 3 10 1 4 6 4 14 1 2 7 9 9 8 2 5 5 8 6 7 3 9 4 5 5 7 15 1 2 10 9 10 9 2 4 9 6 9 5 3 9 9 5 8 2 16 1 3 6 6 8 4 2 4 6 5 7 4 3 9 6 2 7 3 17 1 2 9 5 5 6 2 4 7 3 4 6 3 9 4 3 4 5 18 1 2 6 5 8 8 2 6 6 4 7 4 3 10 5 3 4 1 19 1 6 4 5 4 6 2 6 5 5 3 6 3 10 2 5 1 4 20 1 6 7 6 3 9 2 7 5 5 2 9 3 8 4 4 2 9 21 1 1 4 6 10 7 2 6 3 6 9 7 3 9 3 6 9 4 22 1 4 6 6 9 6 2 7 5 5 9 4 3 10 2 5 9 3 23 1 2 5 7 9 5 2 2 4 7 10 7 3 3 3 7 4 2 24 1 1 2 2 5 8 2 3 2 1 4 8 3 5 2 1 1 7 25 1 6 8 2 3 9 2 7 8 1 3 5 3 10 7 1 2 3 26 1 2 6 9 4 7 2 5 6 7 3 6 3 8 6 6 3 6 27 1 4 4 8 6 8 2 5 3 7 6 7 3 7 2 7 3 6 28 1 1 7 7 7 7 2 4 7 6 5 7 3 8 6 6 2 5 29 1 4 9 10 10 9 2 7 7 7 8 8 3 10 4 5 7 8 30 1 4 9 7 9 9 2 8 8 5 7 6 3 10 5 3 2 6 31 1 6 2 7 7 5 2 8 1 7 5 5 3 9 1 6 3 4 32 1 0 0 0 0 0 ************************************************************************ RESOURCEAVAILABILITIES: R 1 R 2 N 1 N 2 37 34 222 212 ************************************************************************
{ "pile_set_name": "Github" }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Linux system calls. // This file is compiled as ordinary Go code, // but it is also input to mksyscall, // which parses the //sys lines and generates system call stubs. // Note that sometimes we use a lowercase //sys name and // wrap it in our own nicer implementation. package unix import ( "encoding/binary" "runtime" "syscall" "unsafe" ) /* * Wrapped */ func Access(path string, mode uint32) (err error) { return Faccessat(AT_FDCWD, path, mode, 0) } func Chmod(path string, mode uint32) (err error) { return Fchmodat(AT_FDCWD, path, mode, 0) } func Chown(path string, uid int, gid int) (err error) { return Fchownat(AT_FDCWD, path, uid, gid, 0) } func Creat(path string, mode uint32) (fd int, err error) { return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) } //sys FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) //sys fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) (err error) { if pathname == "" { return fanotifyMark(fd, flags, mask, dirFd, nil) } p, err := BytePtrFromString(pathname) if err != nil { return err } return fanotifyMark(fd, flags, mask, dirFd, p) } //sys fchmodat(dirfd int, path string, mode uint32) (err error) func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior // and check the flags. Otherwise the mode would be applied to the symlink // destination which is not what the user expects. if flags&^AT_SYMLINK_NOFOLLOW != 0 { return EINVAL } else if flags&AT_SYMLINK_NOFOLLOW != 0 { return EOPNOTSUPP } return fchmodat(dirfd, path, mode) } //sys ioctl(fd int, req uint, arg uintptr) (err error) // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. // IoctlRetInt performs an ioctl operation specified by req on a device // associated with opened file descriptor fd, and returns a non-negative // integer that is returned by the ioctl syscall. func IoctlRetInt(fd int, req uint) (int, error) { ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) if err != 0 { return 0, err } return int(ret), nil } // IoctlSetPointerInt performs an ioctl operation which sets an // integer value on fd, using the specified request number. The ioctl // argument is called with a pointer to the integer value, rather than // passing the integer value directly. func IoctlSetPointerInt(fd int, req uint, value int) error { v := int32(value) return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) } func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) return &value, err } func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { var value RTCWkAlrm err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) return &value, err } //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0) } func Mkdir(path string, mode uint32) (err error) { return Mkdirat(AT_FDCWD, path, mode) } func Mknod(path string, mode uint32, dev int) (err error) { return Mknodat(AT_FDCWD, path, mode, dev) } func Open(path string, mode int, perm uint32) (fd int, err error) { return openat(AT_FDCWD, path, mode|O_LARGEFILE, perm) } //sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { return openat(dirfd, path, flags|O_LARGEFILE, mode) } //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { if len(fds) == 0 { return ppoll(nil, 0, timeout, sigmask) } return ppoll(&fds[0], len(fds), timeout, sigmask) } //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) func Readlink(path string, buf []byte) (n int, err error) { return Readlinkat(AT_FDCWD, path, buf) } func Rename(oldpath string, newpath string) (err error) { return Renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath) } func Rmdir(path string) error { return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR) } //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) func Symlink(oldpath string, newpath string) (err error) { return Symlinkat(oldpath, AT_FDCWD, newpath) } func Unlink(path string) error { return Unlinkat(AT_FDCWD, path, 0) } //sys Unlinkat(dirfd int, path string, flags int) (err error) func Utimes(path string, tv []Timeval) error { if tv == nil { err := utimensat(AT_FDCWD, path, nil, 0) if err != ENOSYS { return err } return utimes(path, nil) } if len(tv) != 2 { return EINVAL } var ts [2]Timespec ts[0] = NsecToTimespec(TimevalToNsec(tv[0])) ts[1] = NsecToTimespec(TimevalToNsec(tv[1])) err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) if err != ENOSYS { return err } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) func UtimesNano(path string, ts []Timespec) error { if ts == nil { err := utimensat(AT_FDCWD, path, nil, 0) if err != ENOSYS { return err } return utimes(path, nil) } if len(ts) != 2 { return EINVAL } err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) if err != ENOSYS { return err } // If the utimensat syscall isn't available (utimensat was added to Linux // in 2.6.22, Released, 8 July 2007) then fall back to utimes var tv [2]Timeval for i := 0; i < 2; i++ { tv[i] = NsecToTimeval(TimespecToNsec(ts[i])) } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { if ts == nil { return utimensat(dirfd, path, nil, flags) } if len(ts) != 2 { return EINVAL } return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) } func Futimesat(dirfd int, path string, tv []Timeval) error { if tv == nil { return futimesat(dirfd, path, nil) } if len(tv) != 2 { return EINVAL } return futimesat(dirfd, path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } func Futimes(fd int, tv []Timeval) (err error) { // Believe it or not, this is the best we can do on Linux // (and is what glibc does). return Utimes("/proc/self/fd/"+itoa(fd), tv) } const ImplementsGetwd = true //sys Getcwd(buf []byte) (n int, err error) func Getwd() (wd string, err error) { var buf [PathMax]byte n, err := Getcwd(buf[0:]) if err != nil { return "", err } // Getcwd returns the number of bytes written to buf, including the NUL. if n < 1 || n > len(buf) || buf[n-1] != 0 { return "", EINVAL } return string(buf[0 : n-1]), nil } func Getgroups() (gids []int, err error) { n, err := getgroups(0, nil) if err != nil { return nil, err } if n == 0 { return nil, nil } // Sanity check group count. Max is 1<<16 on Linux. if n < 0 || n > 1<<20 { return nil, EINVAL } a := make([]_Gid_t, n) n, err = getgroups(n, &a[0]) if err != nil { return nil, err } gids = make([]int, n) for i, v := range a[0:n] { gids[i] = int(v) } return } func Setgroups(gids []int) (err error) { if len(gids) == 0 { return setgroups(0, nil) } a := make([]_Gid_t, len(gids)) for i, v := range gids { a[i] = _Gid_t(v) } return setgroups(len(a), &a[0]) } type WaitStatus uint32 // Wait status is 7 bits at bottom, either 0 (exited), // 0x7F (stopped), or a signal number that caused an exit. // The 0x80 bit is whether there was a core dump. // An extra number (exit code, signal causing a stop) // is in the high bits. At least that's the idea. // There are various irregularities. For example, the // "continued" status is 0xFFFF, distinguishing itself // from stopped via the core dump bit. const ( mask = 0x7F core = 0x80 exited = 0x00 stopped = 0x7F shift = 8 ) func (w WaitStatus) Exited() bool { return w&mask == exited } func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited } func (w WaitStatus) Stopped() bool { return w&0xFF == stopped } func (w WaitStatus) Continued() bool { return w == 0xFFFF } func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } func (w WaitStatus) ExitStatus() int { if !w.Exited() { return -1 } return int(w>>shift) & 0xFF } func (w WaitStatus) Signal() syscall.Signal { if !w.Signaled() { return -1 } return syscall.Signal(w & mask) } func (w WaitStatus) StopSignal() syscall.Signal { if !w.Stopped() { return -1 } return syscall.Signal(w>>shift) & 0xFF } func (w WaitStatus) TrapCause() int { if w.StopSignal() != SIGTRAP { return -1 } return int(w>>shift) >> 8 } //sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int wpid, err = wait4(pid, &status, options, rusage) if wstatus != nil { *wstatus = WaitStatus(status) } return } func Mkfifo(path string, mode uint32) error { return Mknod(path, mode|S_IFIFO, 0) } func Mkfifoat(dirfd int, path string, mode uint32) error { return Mknodat(dirfd, path, mode|S_IFIFO, 0) } func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL } sa.raw.Family = AF_INET p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) for i := 0; i < len(sa.Addr); i++ { sa.raw.Addr[i] = sa.Addr[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL } sa.raw.Family = AF_INET6 p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId for i := 0; i < len(sa.Addr); i++ { sa.raw.Addr[i] = sa.Addr[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { name := sa.Name n := len(name) if n >= len(sa.raw.Path) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX for i := 0; i < n; i++ { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. sl := _Socklen(2) if n > 0 { sl += _Socklen(n) + 1 } if sa.raw.Path[0] == '@' { sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- } return unsafe.Pointer(&sa.raw), sl, nil } // SockaddrLinklayer implements the Sockaddr interface for AF_PACKET type sockets. type SockaddrLinklayer struct { Protocol uint16 Ifindex int Hatype uint16 Pkttype uint8 Halen uint8 Addr [8]byte raw RawSockaddrLinklayer } func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff { return nil, 0, EINVAL } sa.raw.Family = AF_PACKET sa.raw.Protocol = sa.Protocol sa.raw.Ifindex = int32(sa.Ifindex) sa.raw.Hatype = sa.Hatype sa.raw.Pkttype = sa.Pkttype sa.raw.Halen = sa.Halen for i := 0; i < len(sa.Addr); i++ { sa.raw.Addr[i] = sa.Addr[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil } // SockaddrNetlink implements the Sockaddr interface for AF_NETLINK type sockets. type SockaddrNetlink struct { Family uint16 Pad uint16 Pid uint32 Groups uint32 raw RawSockaddrNetlink } func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_NETLINK sa.raw.Pad = sa.Pad sa.raw.Pid = sa.Pid sa.raw.Groups = sa.Groups return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil } // SockaddrHCI implements the Sockaddr interface for AF_BLUETOOTH type sockets // using the HCI protocol. type SockaddrHCI struct { Dev uint16 Channel uint16 raw RawSockaddrHCI } func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_BLUETOOTH sa.raw.Dev = sa.Dev sa.raw.Channel = sa.Channel return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil } // SockaddrL2 implements the Sockaddr interface for AF_BLUETOOTH type sockets // using the L2CAP protocol. type SockaddrL2 struct { PSM uint16 CID uint16 Addr [6]uint8 AddrType uint8 raw RawSockaddrL2 } func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_BLUETOOTH psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) psm[0] = byte(sa.PSM) psm[1] = byte(sa.PSM >> 8) for i := 0; i < len(sa.Addr); i++ { sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] } cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) cid[0] = byte(sa.CID) cid[1] = byte(sa.CID >> 8) sa.raw.Bdaddr_type = sa.AddrType return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil } // SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets // using the RFCOMM protocol. // // Server example: // // fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) // _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ // Channel: 1, // Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 // }) // _ = Listen(fd, 1) // nfd, sa, _ := Accept(fd) // fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) // Read(nfd, buf) // // Client example: // // fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) // _ = Connect(fd, &SockaddrRFCOMM{ // Channel: 1, // Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 // }) // Write(fd, []byte(`hello`)) type SockaddrRFCOMM struct { // Addr represents a bluetooth address, byte ordering is little-endian. Addr [6]uint8 // Channel is a designated bluetooth channel, only 1-30 are available for use. // Since Linux 2.6.7 and further zero value is the first available channel. Channel uint8 raw RawSockaddrRFCOMM } func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_BLUETOOTH sa.raw.Channel = sa.Channel sa.raw.Bdaddr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil } // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. // The RxID and TxID fields are used for transport protocol addressing in // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with // zero values for CAN_RAW and CAN_BCM sockets as they have no meaning. // // The SockaddrCAN struct must be bound to the socket file descriptor // using Bind before the CAN socket can be used. // // // Read one raw CAN frame // fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) // addr := &SockaddrCAN{Ifindex: index} // Bind(fd, addr) // frame := make([]byte, 16) // Read(fd, frame) // // The full SocketCAN documentation can be found in the linux kernel // archives at: https://www.kernel.org/doc/Documentation/networking/can.txt type SockaddrCAN struct { Ifindex int RxID uint32 TxID uint32 raw RawSockaddrCAN } func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff { return nil, 0, EINVAL } sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) for i := 0; i < 4; i++ { sa.raw.Addr[i] = rx[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) for i := 0; i < 4; i++ { sa.raw.Addr[i+4] = tx[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil } // SockaddrALG implements the Sockaddr interface for AF_ALG type sockets. // SockaddrALG enables userspace access to the Linux kernel's cryptography // subsystem. The Type and Name fields specify which type of hash or cipher // should be used with a given socket. // // To create a file descriptor that provides access to a hash or cipher, both // Bind and Accept must be used. Once the setup process is complete, input // data can be written to the socket, processed by the kernel, and then read // back as hash output or ciphertext. // // Here is an example of using an AF_ALG socket with SHA1 hashing. // The initial socket setup process is as follows: // // // Open a socket to perform SHA1 hashing. // fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) // addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} // unix.Bind(fd, addr) // // Note: unix.Accept does not work at this time; must invoke accept() // // manually using unix.Syscall. // hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) // // Once a file descriptor has been returned from Accept, it may be used to // perform SHA1 hashing. The descriptor is not safe for concurrent use, but // may be re-used repeatedly with subsequent Write and Read operations. // // When hashing a small byte slice or string, a single Write and Read may // be used: // // // Assume hashfd is already configured using the setup process. // hash := os.NewFile(hashfd, "sha1") // // Hash an input string and read the results. Each Write discards // // previous hash state. Read always reads the current state. // b := make([]byte, 20) // for i := 0; i < 2; i++ { // io.WriteString(hash, "Hello, world.") // hash.Read(b) // fmt.Println(hex.EncodeToString(b)) // } // // Output: // // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // For hashing larger byte slices, or byte streams such as those read from // a file or socket, use Sendto with MSG_MORE to instruct the kernel to update // the hash digest instead of creating a new one for a given chunk and finalizing it. // // // Assume hashfd and addr are already configured using the setup process. // hash := os.NewFile(hashfd, "sha1") // // Hash the contents of a file. // f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") // b := make([]byte, 4096) // for { // n, err := f.Read(b) // if err == io.EOF { // break // } // unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) // } // hash.Read(b) // fmt.Println(hex.EncodeToString(b)) // // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 // // For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. type SockaddrALG struct { Type string Name string Feature uint32 Mask uint32 raw RawSockaddrALG } func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { // Leave room for NUL byte terminator. if len(sa.Type) > 13 { return nil, 0, EINVAL } if len(sa.Name) > 63 { return nil, 0, EINVAL } sa.raw.Family = AF_ALG sa.raw.Feat = sa.Feature sa.raw.Mask = sa.Mask typ, err := ByteSliceFromString(sa.Type) if err != nil { return nil, 0, err } name, err := ByteSliceFromString(sa.Name) if err != nil { return nil, 0, err } copy(sa.raw.Type[:], typ) copy(sa.raw.Name[:], name) return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil } // SockaddrVM implements the Sockaddr interface for AF_VSOCK type sockets. // SockaddrVM provides access to Linux VM sockets: a mechanism that enables // bidirectional communication between a hypervisor and its guest virtual // machines. type SockaddrVM struct { // CID and Port specify a context ID and port address for a VM socket. // Guests have a unique CID, and hosts may have a well-known CID of: // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. // - VMADDR_CID_HOST: refers to other processes on the host. CID uint32 Port uint32 raw RawSockaddrVM } func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_VSOCK sa.raw.Port = sa.Port sa.raw.Cid = sa.CID return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } type SockaddrXDP struct { Flags uint16 Ifindex uint32 QueueID uint32 SharedUmemFD uint32 raw RawSockaddrXDP } func (sa *SockaddrXDP) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_XDP sa.raw.Flags = sa.Flags sa.raw.Ifindex = sa.Ifindex sa.raw.Queue_id = sa.QueueID sa.raw.Shared_umem_fd = sa.SharedUmemFD return unsafe.Pointer(&sa.raw), SizeofSockaddrXDP, nil } // This constant mirrors the #define of PX_PROTO_OE in // linux/if_pppox.h. We're defining this by hand here instead of // autogenerating through mkerrors.sh because including // linux/if_pppox.h causes some declaration conflicts with other // includes (linux/if_pppox.h includes linux/in.h, which conflicts // with netinet/in.h). Given that we only need a single zero constant // out of that file, it's cleaner to just define it by hand here. const px_proto_oe = 0 type SockaddrPPPoE struct { SID uint16 Remote []byte Dev string raw RawSockaddrPPPoX } func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { if len(sa.Remote) != 6 { return nil, 0, EINVAL } if len(sa.Dev) > IFNAMSIZ-1 { return nil, 0, EINVAL } *(*uint16)(unsafe.Pointer(&sa.raw[0])) = AF_PPPOX // This next field is in host-endian byte order. We can't use the // same unsafe pointer cast as above, because this value is not // 32-bit aligned and some architectures don't allow unaligned // access. // // However, the value of px_proto_oe is 0, so we can use // encoding/binary helpers to write the bytes without worrying // about the ordering. binary.BigEndian.PutUint32(sa.raw[2:6], px_proto_oe) // This field is deliberately big-endian, unlike the previous // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) for i := 14; i < 14+IFNAMSIZ; i++ { sa.raw[i] = 0 } copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } // SockaddrTIPC implements the Sockaddr interface for AF_TIPC type sockets. // For more information on TIPC, see: http://tipc.sourceforge.net/. type SockaddrTIPC struct { // Scope is the publication scopes when binding service/service range. // Should be set to TIPC_CLUSTER_SCOPE or TIPC_NODE_SCOPE. Scope int // Addr is the type of address used to manipulate a socket. Addr must be // one of: // - *TIPCSocketAddr: "id" variant in the C addr union // - *TIPCServiceRange: "nameseq" variant in the C addr union // - *TIPCServiceName: "name" variant in the C addr union // // If nil, EINVAL will be returned when the structure is used. Addr TIPCAddr raw RawSockaddrTIPC } // TIPCAddr is implemented by types that can be used as an address for // SockaddrTIPC. It is only implemented by *TIPCSocketAddr, *TIPCServiceRange, // and *TIPCServiceName. type TIPCAddr interface { tipcAddrtype() uint8 tipcAddr() [12]byte } func (sa *TIPCSocketAddr) tipcAddr() [12]byte { var out [12]byte copy(out[:], (*(*[unsafe.Sizeof(TIPCSocketAddr{})]byte)(unsafe.Pointer(sa)))[:]) return out } func (sa *TIPCSocketAddr) tipcAddrtype() uint8 { return TIPC_SOCKET_ADDR } func (sa *TIPCServiceRange) tipcAddr() [12]byte { var out [12]byte copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceRange{})]byte)(unsafe.Pointer(sa)))[:]) return out } func (sa *TIPCServiceRange) tipcAddrtype() uint8 { return TIPC_SERVICE_RANGE } func (sa *TIPCServiceName) tipcAddr() [12]byte { var out [12]byte copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceName{})]byte)(unsafe.Pointer(sa)))[:]) return out } func (sa *TIPCServiceName) tipcAddrtype() uint8 { return TIPC_SERVICE_ADDR } func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Addr == nil { return nil, 0, EINVAL } sa.raw.Family = AF_TIPC sa.raw.Scope = int8(sa.Scope) sa.raw.Addrtype = sa.Addr.tipcAddrtype() sa.raw.Addr = sa.Addr.tipcAddr() return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil } // SockaddrL2TPIP implements the Sockaddr interface for IPPROTO_L2TP/AF_INET sockets. type SockaddrL2TPIP struct { Addr [4]byte ConnId uint32 raw RawSockaddrL2TPIP } func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET sa.raw.Conn_id = sa.ConnId for i := 0; i < len(sa.Addr); i++ { sa.raw.Addr[i] = sa.Addr[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil } // SockaddrL2TPIP6 implements the Sockaddr interface for IPPROTO_L2TP/AF_INET6 sockets. type SockaddrL2TPIP6 struct { Addr [16]byte ZoneId uint32 ConnId uint32 raw RawSockaddrL2TPIP6 } func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET6 sa.raw.Conn_id = sa.ConnId sa.raw.Scope_id = sa.ZoneId for i := 0; i < len(sa.Addr); i++ { sa.raw.Addr[i] = sa.Addr[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil } func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa)) sa := new(SockaddrNetlink) sa.Family = pp.Family sa.Pad = pp.Pad sa.Pid = pp.Pid sa.Groups = pp.Groups return sa, nil case AF_PACKET: pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa)) sa := new(SockaddrLinklayer) sa.Protocol = pp.Protocol sa.Ifindex = int(pp.Ifindex) sa.Hatype = pp.Hatype sa.Pkttype = pp.Pkttype sa.Halen = pp.Halen for i := 0; i < len(sa.Addr); i++ { sa.Addr[i] = pp.Addr[i] } return sa, nil case AF_UNIX: pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) sa := new(SockaddrUnix) if pp.Path[0] == 0 { // "Abstract" Unix domain socket. // Rewrite leading NUL as @ for textual display. // (This is the standard convention.) // Not friendly to overwrite in place, // but the callers below don't care. pp.Path[0] = '@' } // Assume path ends at NUL. // This is not technically the Linux semantics for // abstract Unix domain sockets--they are supposed // to be uninterpreted fixed-size binary blobs--but // everyone uses this convention. n := 0 for n < len(pp.Path) && pp.Path[n] != 0 { n++ } bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil case AF_INET: proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) if err != nil { return nil, err } switch proto { case IPPROTO_L2TP: pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa)) sa := new(SockaddrL2TPIP) sa.ConnId = pp.Conn_id for i := 0; i < len(sa.Addr); i++ { sa.Addr[i] = pp.Addr[i] } return sa, nil default: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) for i := 0; i < len(sa.Addr); i++ { sa.Addr[i] = pp.Addr[i] } return sa, nil } case AF_INET6: proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) if err != nil { return nil, err } switch proto { case IPPROTO_L2TP: pp := (*RawSockaddrL2TPIP6)(unsafe.Pointer(rsa)) sa := new(SockaddrL2TPIP6) sa.ConnId = pp.Conn_id sa.ZoneId = pp.Scope_id for i := 0; i < len(sa.Addr); i++ { sa.Addr[i] = pp.Addr[i] } return sa, nil default: pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) sa := new(SockaddrInet6) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id for i := 0; i < len(sa.Addr); i++ { sa.Addr[i] = pp.Addr[i] } return sa, nil } case AF_VSOCK: pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) sa := &SockaddrVM{ CID: pp.Cid, Port: pp.Port, } return sa, nil case AF_BLUETOOTH: proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) if err != nil { return nil, err } // only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections switch proto { case BTPROTO_L2CAP: pp := (*RawSockaddrL2)(unsafe.Pointer(rsa)) sa := &SockaddrL2{ PSM: pp.Psm, CID: pp.Cid, Addr: pp.Bdaddr, AddrType: pp.Bdaddr_type, } return sa, nil case BTPROTO_RFCOMM: pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa)) sa := &SockaddrRFCOMM{ Channel: pp.Channel, Addr: pp.Bdaddr, } return sa, nil } case AF_XDP: pp := (*RawSockaddrXDP)(unsafe.Pointer(rsa)) sa := &SockaddrXDP{ Flags: pp.Flags, Ifindex: pp.Ifindex, QueueID: pp.Queue_id, SharedUmemFD: pp.Shared_umem_fd, } return sa, nil case AF_PPPOX: pp := (*RawSockaddrPPPoX)(unsafe.Pointer(rsa)) if binary.BigEndian.Uint32(pp[2:6]) != px_proto_oe { return nil, EINVAL } sa := &SockaddrPPPoE{ SID: binary.BigEndian.Uint16(pp[6:8]), Remote: pp[8:14], } for i := 14; i < 14+IFNAMSIZ; i++ { if pp[i] == 0 { sa.Dev = string(pp[14:i]) break } } return sa, nil case AF_TIPC: pp := (*RawSockaddrTIPC)(unsafe.Pointer(rsa)) sa := &SockaddrTIPC{ Scope: int(pp.Scope), } // Determine which union variant is present in pp.Addr by checking // pp.Addrtype. switch pp.Addrtype { case TIPC_SERVICE_RANGE: sa.Addr = (*TIPCServiceRange)(unsafe.Pointer(&pp.Addr)) case TIPC_SERVICE_ADDR: sa.Addr = (*TIPCServiceName)(unsafe.Pointer(&pp.Addr)) case TIPC_SOCKET_ADDR: sa.Addr = (*TIPCSocketAddr)(unsafe.Pointer(&pp.Addr)) default: return nil, EINVAL } return sa, nil } return nil, EAFNOSUPPORT } func Accept(fd int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny nfd, err = accept(fd, &rsa, &len) if err != nil { return } sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 } return } func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny nfd, err = accept4(fd, &rsa, &len, flags) if err != nil { return } if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 } return } func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny if err = getsockname(fd, &rsa, &len); err != nil { return } return anyToSockaddr(fd, &rsa) } func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { var value IPMreqn vallen := _Socklen(SizeofIPMreqn) err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) return &value, err } func GetsockoptUcred(fd, level, opt int) (*Ucred, error) { var value Ucred vallen := _Socklen(SizeofUcred) err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) return &value, err } func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { var value TCPInfo vallen := _Socklen(SizeofTCPInfo) err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) return &value, err } // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { buf := make([]byte, 256) vallen := _Socklen(len(buf)) err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) if err != nil { if err == ERANGE { buf = make([]byte, vallen) err = getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) } if err != nil { return "", err } } return string(buf[:vallen-1]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { var value TpacketStats vallen := _Socklen(SizeofTpacketStats) err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) return &value, err } func GetsockoptTpacketStatsV3(fd, level, opt int) (*TpacketStatsV3, error) { var value TpacketStatsV3 vallen := _Socklen(SizeofTpacketStatsV3) err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) return &value, err } func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } func SetsockoptPacketMreq(fd, level, opt int, mreq *PacketMreq) error { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } // SetsockoptSockFprog attaches a classic BPF or an extended BPF program to a // socket to filter incoming packets. See 'man 7 socket' for usage information. func SetsockoptSockFprog(fd, level, opt int, fprog *SockFprog) error { return setsockopt(fd, level, opt, unsafe.Pointer(fprog), unsafe.Sizeof(*fprog)) } func SetsockoptCanRawFilter(fd, level, opt int, filter []CanFilter) error { var p unsafe.Pointer if len(filter) > 0 { p = unsafe.Pointer(&filter[0]) } return setsockopt(fd, level, opt, p, uintptr(len(filter)*SizeofCanFilter)) } func SetsockoptTpacketReq(fd, level, opt int, tp *TpacketReq) error { return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp)) } func SetsockoptTpacketReq3(fd, level, opt int, tp *TpacketReq3) error { return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp)) } // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. // These commands are KEYCTL_REVOKE, KEYCTL_CHOWN, KEYCTL_CLEAR, KEYCTL_LINK, // KEYCTL_UNLINK, KEYCTL_NEGATE, KEYCTL_SET_REQKEY_KEYRING, KEYCTL_SET_TIMEOUT, // KEYCTL_ASSUME_AUTHORITY, KEYCTL_SESSION_TO_PARENT, KEYCTL_REJECT, // KEYCTL_INVALIDATE, and KEYCTL_GET_PERSISTENT. //sys KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) = SYS_KEYCTL // KeyctlBuffer calls keyctl commands in which the third and fourth // arguments are a buffer and its length, respectively. // These commands are KEYCTL_UPDATE, KEYCTL_READ, and KEYCTL_INSTANTIATE. //sys KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) = SYS_KEYCTL // KeyctlString calls keyctl commands which return a string. // These commands are KEYCTL_DESCRIBE and KEYCTL_GET_SECURITY. func KeyctlString(cmd int, id int) (string, error) { // We must loop as the string data may change in between the syscalls. // We could allocate a large buffer here to reduce the chance that the // syscall needs to be called twice; however, this is unnecessary as // the performance loss is negligible. var buffer []byte for { // Try to fill the buffer with data length, err := KeyctlBuffer(cmd, id, buffer, 0) if err != nil { return "", err } // Check if the data was written if length <= len(buffer) { // Exclude the null terminator return string(buffer[:length-1]), nil } // Make a bigger buffer if needed buffer = make([]byte, length) } } // Keyctl commands with special signatures. // KeyctlGetKeyringID implements the KEYCTL_GET_KEYRING_ID command. // See the full documentation at: // http://man7.org/linux/man-pages/man3/keyctl_get_keyring_ID.3.html func KeyctlGetKeyringID(id int, create bool) (ringid int, err error) { createInt := 0 if create { createInt = 1 } return KeyctlInt(KEYCTL_GET_KEYRING_ID, id, createInt, 0, 0) } // KeyctlSetperm implements the KEYCTL_SETPERM command. The perm value is the // key handle permission mask as described in the "keyctl setperm" section of // http://man7.org/linux/man-pages/man1/keyctl.1.html. // See the full documentation at: // http://man7.org/linux/man-pages/man3/keyctl_setperm.3.html func KeyctlSetperm(id int, perm uint32) error { _, err := KeyctlInt(KEYCTL_SETPERM, id, int(perm), 0, 0) return err } //sys keyctlJoin(cmd int, arg2 string) (ret int, err error) = SYS_KEYCTL // KeyctlJoinSessionKeyring implements the KEYCTL_JOIN_SESSION_KEYRING command. // See the full documentation at: // http://man7.org/linux/man-pages/man3/keyctl_join_session_keyring.3.html func KeyctlJoinSessionKeyring(name string) (ringid int, err error) { return keyctlJoin(KEYCTL_JOIN_SESSION_KEYRING, name) } //sys keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) = SYS_KEYCTL // KeyctlSearch implements the KEYCTL_SEARCH command. // See the full documentation at: // http://man7.org/linux/man-pages/man3/keyctl_search.3.html func KeyctlSearch(ringid int, keyType, description string, destRingid int) (id int, err error) { return keyctlSearch(KEYCTL_SEARCH, ringid, keyType, description, destRingid) } //sys keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) = SYS_KEYCTL // KeyctlInstantiateIOV implements the KEYCTL_INSTANTIATE_IOV command. This // command is similar to KEYCTL_INSTANTIATE, except that the payload is a slice // of Iovec (each of which represents a buffer) instead of a single buffer. // See the full documentation at: // http://man7.org/linux/man-pages/man3/keyctl_instantiate_iov.3.html func KeyctlInstantiateIOV(id int, payload []Iovec, ringid int) error { return keyctlIOV(KEYCTL_INSTANTIATE_IOV, id, payload, ringid) } //sys keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) = SYS_KEYCTL // KeyctlDHCompute implements the KEYCTL_DH_COMPUTE command. This command // computes a Diffie-Hellman shared secret based on the provide params. The // secret is written to the provided buffer and the returned size is the number // of bytes written (returning an error if there is insufficient space in the // buffer). If a nil buffer is passed in, this function returns the minimum // buffer length needed to store the appropriate data. Note that this differs // from KEYCTL_READ's behavior which always returns the requested payload size. // See the full documentation at: // http://man7.org/linux/man-pages/man3/keyctl_dh_compute.3.html func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error) { return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer) } // KeyctlRestrictKeyring implements the KEYCTL_RESTRICT_KEYRING command. This // command limits the set of keys that can be linked to the keyring, regardless // of keyring permissions. The command requires the "setattr" permission. // // When called with an empty keyType the command locks the keyring, preventing // any further keys from being linked to the keyring. // // The "asymmetric" keyType defines restrictions requiring key payloads to be // DER encoded X.509 certificates signed by keys in another keyring. Restrictions // for "asymmetric" include "builtin_trusted", "builtin_and_secondary_trusted", // "key_or_keyring:<key>", and "key_or_keyring:<key>:chain". // // As of Linux 4.12, only the "asymmetric" keyType defines type-specific // restrictions. // // See the full documentation at: // http://man7.org/linux/man-pages/man3/keyctl_restrict_keyring.3.html // http://man7.org/linux/man-pages/man2/keyctl.2.html func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error { if keyType == "" { return keyctlRestrictKeyring(KEYCTL_RESTRICT_KEYRING, ringid) } return keyctlRestrictKeyringByType(KEYCTL_RESTRICT_KEYRING, ringid, keyType, restriction) } //sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL //sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr var rsa RawSockaddrAny msg.Name = (*byte)(unsafe.Pointer(&rsa)) msg.Namelen = uint32(SizeofSockaddrAny) var iov Iovec if len(p) > 0 { iov.Base = &p[0] iov.SetLen(len(p)) } var dummy byte if len(oob) > 0 { if len(p) == 0 { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { return } // receive at least one normal byte if sockType != SOCK_DGRAM { iov.Base = &dummy iov.SetLen(1) } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } msg.Iov = &iov msg.Iovlen = 1 if n, err = recvmsg(fd, &msg, flags); err != nil { return } oobn = int(msg.Controllen) recvflags = int(msg.Flags) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { from, err = anyToSockaddr(fd, &rsa) } return } func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { _, err = SendmsgN(fd, p, oob, to, flags) return } func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { var ptr unsafe.Pointer var salen _Socklen if to != nil { var err error ptr, salen, err = to.sockaddr() if err != nil { return 0, err } } var msg Msghdr msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) var iov Iovec if len(p) > 0 { iov.Base = &p[0] iov.SetLen(len(p)) } var dummy byte if len(oob) > 0 { if len(p) == 0 { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { return 0, err } // send at least one normal byte if sockType != SOCK_DGRAM { iov.Base = &dummy iov.SetLen(1) } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } msg.Iov = &iov msg.Iovlen = 1 if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } if len(oob) > 0 && len(p) == 0 { n = 0 } return n, nil } // BindToDevice binds the socket associated with fd to device. func BindToDevice(fd int, device string) (err error) { return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device) } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it // to retrieve arbitrary-length data. // The ptrace syscall differs from glibc's ptrace. // Peeks returns the word in *data, not as the return value. var buf [SizeofPtr]byte // Leading edge. PEEKTEXT/PEEKDATA don't require aligned // access (PEEKUSER warns that it might), but if we don't // align our reads, we might straddle an unmapped page // boundary and not get the bytes leading up to the page // boundary. n := 0 if addr%SizeofPtr != 0 { err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } n += copy(out, buf[addr%SizeofPtr:]) out = out[n:] } // Remainder. for len(out) > 0 { // We use an internal buffer to guarantee alignment. // It's not documented if this is necessary, but we're paranoid. err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return n, err } copied := copy(out, buf[0:]) n += copied out = out[copied:] } return n, nil } func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) { return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { return ptracePeek(PTRACE_PEEKDATA, pid, addr, out) } func PtracePeekUser(pid int, addr uintptr, out []byte) (count int, err error) { return ptracePeek(PTRACE_PEEKUSR, pid, addr, out) } func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) { // As for ptracePeek, we need to align our accesses to deal // with the possibility of straddling an invalid page. // Leading edge. n := 0 if addr%SizeofPtr != 0 { var buf [SizeofPtr]byte err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } n += copy(buf[addr%SizeofPtr:], data) word := *((*uintptr)(unsafe.Pointer(&buf[0]))) err = ptrace(pokeReq, pid, addr-addr%SizeofPtr, word) if err != nil { return 0, err } data = data[n:] } // Interior. for len(data) > SizeofPtr { word := *((*uintptr)(unsafe.Pointer(&data[0]))) err = ptrace(pokeReq, pid, addr+uintptr(n), word) if err != nil { return n, err } n += SizeofPtr data = data[SizeofPtr:] } // Trailing edge. if len(data) > 0 { var buf [SizeofPtr]byte err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return n, err } copy(buf[0:], data) word := *((*uintptr)(unsafe.Pointer(&buf[0]))) err = ptrace(pokeReq, pid, addr+uintptr(n), word) if err != nil { return n, err } n += len(data) } return n, nil } func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data) } func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data) } func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data) } func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) } func PtraceSetOptions(pid int, options int) (err error) { return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options)) } func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) msg = uint(data) return } func PtraceCont(pid int, signal int) (err error) { return ptrace(PTRACE_CONT, pid, 0, uintptr(signal)) } func PtraceSyscall(pid int, signal int) (err error) { return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal)) } func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } func PtraceInterrupt(pid int) (err error) { return ptrace(PTRACE_INTERRUPT, pid, 0, 0) } func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } func PtraceSeize(pid int) (err error) { return ptrace(PTRACE_SEIZE, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } //sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) func Reboot(cmd int) (err error) { return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "") } func direntIno(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) } func direntReclen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) } func direntNamlen(buf []byte) (uint64, bool) { reclen, ok := direntReclen(buf) if !ok { return 0, false } return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } //sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { // Certain file systems get rather angry and EINVAL if you give // them an empty string of data, rather than NULL. if data == "" { return mount(source, target, fstype, flags, nil) } datap, err := BytePtrFromString(data) if err != nil { return err } return mount(source, target, fstype, flags, datap) } func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } return sendfile(outfd, infd, offset, count) } // Sendto // Recvfrom // Socketpair /* * Direct access */ //sys Acct(path string) (err error) //sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) //sys Adjtimex(buf *Timex) (state int, err error) //sysnb Capget(hdr *CapUserHeader, data *CapUserData) (err error) //sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) func Dup2(oldfd, newfd int) error { // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { return Dup3(oldfd, newfd, 0) } return dup2(oldfd, newfd) } //sys Dup3(oldfd int, newfd int, flags int) (err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) //sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD2 //sys Exit(code int) = SYS_EXIT_GROUP //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) //sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) //sys FinitModule(fd int, params string, flags int) (err error) //sys Flistxattr(fd int, dest []byte) (sz int, err error) //sys Flock(fd int, how int) (err error) //sys Fremovexattr(fd int, attr string) (err error) //sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) //sys Fsync(fd int) (err error) //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) func Getpgrp() (pid int) { pid, _ = Getpgid(0) return } //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) //sys Getrandom(buf []byte, flags int) (n int, err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) //sys Getxattr(path string, attr string, dest []byte) (sz int, err error) //sys InitModule(moduleImage []byte, params string) (err error) //sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) //sysnb InotifyInit1(flags int) (fd int, err error) //sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) //sysnb Kill(pid int, sig syscall.Signal) (err error) //sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG //sys Lgetxattr(path string, attr string, dest []byte) (sz int, err error) //sys Listxattr(path string, dest []byte) (sz int, err error) //sys Llistxattr(path string, dest []byte) (sz int, err error) //sys Lremovexattr(path string, attr string) (err error) //sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) //sys MemfdCreate(name string, flags int) (fd int, err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) //sys RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) //sys Setdomainname(p []byte) (err error) //sys Sethostname(p []byte) (err error) //sysnb Setpgid(pid int, pgid int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (int, error) { ret, _, err := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if err != 0 { return 0, err } return int(ret), nil } // issue 1435. // On linux Setuid and Setgid only affects the current thread, not the process. // This does not match what most callers expect so we must return an error // here rather than letting the caller think that the call succeeded. func Setuid(uid int) (err error) { return EOPNOTSUPP } func Setgid(uid int) (err error) { return EOPNOTSUPP } // SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set. // setfsgid(2) will return a non-nil error only if its caller lacks CAP_SETUID capability. // If the call fails due to other reasons, current fsgid will be returned. func SetfsgidRetGid(gid int) (int, error) { return setfsgid(gid) } // SetfsuidRetUid sets fsuid for current thread and returns previous fsuid set. // setfsgid(2) will return a non-nil error only if its caller lacks CAP_SETUID capability // If the call fails due to other reasons, current fsuid will be returned. func SetfsuidRetUid(uid int) (int, error) { return setfsuid(uid) } func Setfsgid(gid int) error { _, err := setfsgid(gid) return err } func Setfsuid(uid int) error { _, err := setfsuid(uid) return err } func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { return signalfd(fd, sigmask, _C__NSIG/8, flags) } //sys Setpriority(which int, who int, prio int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error) //sys signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) = SYS_SIGNALFD4 //sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) //sys Sync() //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) //sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) //sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) //sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) //sysnb Uname(buf *Utsname) (err error) //sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2 //sys Unshare(flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT //sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ //sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE //sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV //sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV //sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV //sys pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PWRITEV //sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 //sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 func bytes2iovec(bs [][]byte) []Iovec { iovecs := make([]Iovec, len(bs)) for i, b := range bs { iovecs[i].SetLen(len(b)) if len(b) > 0 { iovecs[i].Base = &b[0] } else { iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) } } return iovecs } // offs2lohi splits offs into its lower and upper unsigned long. On 64-bit // systems, hi will always be 0. On 32-bit systems, offs will be split in half. // preadv/pwritev chose this calling convention so they don't need to add a // padding-register for alignment on ARM. func offs2lohi(offs int64) (lo, hi uintptr) { return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) } func Readv(fd int, iovs [][]byte) (n int, err error) { iovecs := bytes2iovec(iovs) n, err = readv(fd, iovecs) readvRacedetect(iovecs, n, err) return n, err } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { iovecs := bytes2iovec(iovs) lo, hi := offs2lohi(offset) n, err = preadv(fd, iovecs, lo, hi) readvRacedetect(iovecs, n, err) return n, err } func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { iovecs := bytes2iovec(iovs) lo, hi := offs2lohi(offset) n, err = preadv2(fd, iovecs, lo, hi, flags) readvRacedetect(iovecs, n, err) return n, err } func readvRacedetect(iovecs []Iovec, n int, err error) { if !raceenabled { return } for i := 0; n > 0 && i < len(iovecs); i++ { m := int(iovecs[i].Len) if m > n { m = n } n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) } } if err == nil { raceAcquire(unsafe.Pointer(&ioSync)) } } func Writev(fd int, iovs [][]byte) (n int, err error) { iovecs := bytes2iovec(iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } n, err = writev(fd, iovecs) writevRacedetect(iovecs, n) return n, err } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { iovecs := bytes2iovec(iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } lo, hi := offs2lohi(offset) n, err = pwritev(fd, iovecs, lo, hi) writevRacedetect(iovecs, n) return n, err } func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { iovecs := bytes2iovec(iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } lo, hi := offs2lohi(offset) n, err = pwritev2(fd, iovecs, lo, hi, flags) writevRacedetect(iovecs, n) return n, err } func writevRacedetect(iovecs []Iovec, n int) { if !raceenabled { return } for i := 0; n > 0 && i < len(iovecs); i++ { m := int(iovecs[i].Len) if m > n { m = n } n -= m if m > 0 { raceReadRange(unsafe.Pointer(iovecs[i].Base), m) } } } // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) var mapper = &mmapper{ active: make(map[*byte][]byte), mmap: mmap, munmap: munmap, } func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { return mapper.Mmap(fd, offset, length, prot, flags) } func Munmap(b []byte) (err error) { return mapper.Munmap(b) } //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) //sys Msync(b []byte, flags int) (err error) //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, // using the specified flags. func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { var p unsafe.Pointer if len(iovs) > 0 { p = unsafe.Pointer(&iovs[0]) } n, _, errno := Syscall6(SYS_VMSPLICE, uintptr(fd), uintptr(p), uintptr(len(iovs)), uintptr(flags), 0, 0) if errno != 0 { return 0, syscall.Errno(errno) } return int(n), nil } func isGroupMember(gid int) bool { groups, err := Getgroups() if err != nil { return false } for _, g := range groups { if g == gid { return true } } return false } //sys faccessat(dirfd int, path string, mode uint32) (err error) func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { return EINVAL } // The Linux kernel faccessat system call does not take any flags. // The glibc faccessat implements the flags itself; see // https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/faccessat.c;hb=HEAD // Because people naturally expect syscall.Faccessat to act // like C faccessat, we do the same. if flags == 0 { return faccessat(dirfd, path, mode) } var st Stat_t if err := Fstatat(dirfd, path, &st, flags&AT_SYMLINK_NOFOLLOW); err != nil { return err } mode &= 7 if mode == 0 { return nil } var uid int if flags&AT_EACCESS != 0 { uid = Geteuid() } else { uid = Getuid() } if uid == 0 { if mode&1 == 0 { // Root can read and write any file. return nil } if st.Mode&0111 != 0 { // Root can execute any file that anybody can execute. return nil } return EACCES } var fmode uint32 if uint32(uid) == st.Uid { fmode = (st.Mode >> 6) & 7 } else { var gid int if flags&AT_EACCESS != 0 { gid = Getegid() } else { gid = Getgid() } if uint32(gid) == st.Gid || isGroupMember(gid) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 } } if fmode&mode == mode { return nil } return EACCES } //sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT //sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT // fileHandle is the argument to nameToHandleAt and openByHandleAt. We // originally tried to generate it via unix/linux/types.go with "type // fileHandle C.struct_file_handle" but that generated empty structs // for mips64 and mips64le. Instead, hard code it for now (it's the // same everywhere else) until the mips64 generator issue is fixed. type fileHandle struct { Bytes uint32 Type int32 } // FileHandle represents the C struct file_handle used by // name_to_handle_at (see NameToHandleAt) and open_by_handle_at (see // OpenByHandleAt). type FileHandle struct { *fileHandle } // NewFileHandle constructs a FileHandle. func NewFileHandle(handleType int32, handle []byte) FileHandle { const hdrSize = unsafe.Sizeof(fileHandle{}) buf := make([]byte, hdrSize+uintptr(len(handle))) copy(buf[hdrSize:], handle) fh := (*fileHandle)(unsafe.Pointer(&buf[0])) fh.Type = handleType fh.Bytes = uint32(len(handle)) return FileHandle{fh} } func (fh *FileHandle) Size() int { return int(fh.fileHandle.Bytes) } func (fh *FileHandle) Type() int32 { return fh.fileHandle.Type } func (fh *FileHandle) Bytes() []byte { n := fh.Size() if n == 0 { return nil } return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n] } // NameToHandleAt wraps the name_to_handle_at system call; it obtains // a handle for a path name. func NameToHandleAt(dirfd int, path string, flags int) (handle FileHandle, mountID int, err error) { var mid _C_int // Try first with a small buffer, assuming the handle will // only be 32 bytes. size := uint32(32 + unsafe.Sizeof(fileHandle{})) didResize := false for { buf := make([]byte, size) fh := (*fileHandle)(unsafe.Pointer(&buf[0])) fh.Bytes = size - uint32(unsafe.Sizeof(fileHandle{})) err = nameToHandleAt(dirfd, path, fh, &mid, flags) if err == EOVERFLOW { if didResize { // We shouldn't need to resize more than once return } didResize = true size = fh.Bytes + uint32(unsafe.Sizeof(fileHandle{})) continue } if err != nil { return } return FileHandle{fh}, int(mid), nil } } // OpenByHandleAt wraps the open_by_handle_at system call; it opens a // file via a handle as previously returned by NameToHandleAt. func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err error) { return openByHandleAt(mountFD, handle.fileHandle, flags) } // Klogset wraps the sys_syslog system call; it sets console_loglevel to // the value specified by arg and passes a dummy pointer to bufp. func Klogset(typ int, arg int) (err error) { var p unsafe.Pointer _, _, errno := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(p), uintptr(arg)) if errno != 0 { return errnoErr(errno) } return nil } /* * Unimplemented */ // AfsSyscall // Alarm // ArchPrctl // Brk // ClockNanosleep // ClockSettime // Clone // EpollCtlOld // EpollPwait // EpollWaitOld // Execve // Fork // Futex // GetKernelSyms // GetMempolicy // GetRobustList // GetThreadArea // Getitimer // Getpmsg // IoCancel // IoDestroy // IoGetevents // IoSetup // IoSubmit // IoprioGet // IoprioSet // KexecLoad // LookupDcookie // Mbind // MigratePages // Mincore // ModifyLdt // Mount // MovePages // MqGetsetattr // MqNotify // MqOpen // MqTimedreceive // MqTimedsend // MqUnlink // Mremap // Msgctl // Msgget // Msgrcv // Msgsnd // Nfsservctl // Personality // Pselect6 // Ptrace // Putpmsg // Quotactl // Readahead // Readv // RemapFilePages // RestartSyscall // RtSigaction // RtSigpending // RtSigprocmask // RtSigqueueinfo // RtSigreturn // RtSigsuspend // RtSigtimedwait // SchedGetPriorityMax // SchedGetPriorityMin // SchedGetparam // SchedGetscheduler // SchedRrGetInterval // SchedSetparam // SchedYield // Security // Semctl // Semget // Semop // Semtimedop // SetMempolicy // SetRobustList // SetThreadArea // SetTidAddress // Shmat // Shmctl // Shmdt // Shmget // Sigaltstack // Swapoff // Swapon // Sysfs // TimerCreate // TimerDelete // TimerGetoverrun // TimerGettime // TimerSettime // Tkill (obsolete) // Tuxcall // Umount2 // Uselib // Utimensat // Vfork // Vhangup // Vserver // Waitid // _Sysctl
{ "pile_set_name": "Github" }
package apoc.neighbors; import apoc.result.ListResult; import apoc.result.LongResult; import apoc.result.NodeListResult; import apoc.result.NodeResult; import org.neo4j.graphdb.*; import org.neo4j.internal.helpers.collection.Iterables; import org.neo4j.internal.helpers.collection.Pair; import org.neo4j.procedure.Context; import org.neo4j.procedure.Description; import org.neo4j.procedure.Name; import org.neo4j.procedure.Procedure; import org.roaringbitmap.longlong.Roaring64NavigableMap; import java.util.*; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; import static apoc.path.RelationshipTypeAndDirections.parse; public class Neighbors { @Context public Transaction tx; private Iterable<Relationship> getRelationshipsByTypeAndDirection(Node node, Pair<RelationshipType, Direction> typesAndDirection) { // as policy if both elements in the pair are null we return an empty result if (typesAndDirection.first() == null) { return typesAndDirection.other() == null ? Iterables.empty() : node.getRelationships(typesAndDirection.other()); } if (typesAndDirection.other() == null) { return typesAndDirection.first() == null ? Iterables.empty() : node.getRelationships(typesAndDirection.first()); } return node.getRelationships(typesAndDirection.other(), typesAndDirection.first()); } @Procedure("apoc.neighbors.tohop") @Description("apoc.neighbors.tohop(node, rel-direction-pattern, distance) - returns distinct nodes of the given relationships in the pattern up to a certain distance, can use '>' or '<' for all outgoing or incoming relationships") public Stream<NodeResult> neighbors(@Name("node") Node node, @Name(value = "types", defaultValue = "") String types, @Name(value="distance", defaultValue = "1") Long distance) { if (distance < 1) return Stream.empty(); if (types==null || types.isEmpty()) return Stream.empty(); final long startNodeId = node.getId(); // Initialize bitmaps for iteration Roaring64NavigableMap seen = new Roaring64NavigableMap(); Roaring64NavigableMap nextA = new Roaring64NavigableMap(); Roaring64NavigableMap nextB = new Roaring64NavigableMap(); long nodeId = node.getId(); seen.addLong(nodeId); Iterator<Long> iterator; List<Pair<RelationshipType, Direction>> typesAndDirections = parse(types); // First Hop for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { nextB.addLong(r.getOtherNodeId(nodeId)); } } for(int i = 1; i < distance; i++) { // next even Hop nextB.andNot(seen); seen.or(nextB); nextA.clear(); iterator = nextB.iterator(); while (iterator.hasNext()) { nodeId = iterator.next(); node = tx.getNodeById(nodeId); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { nextA.add((r.getOtherNodeId(nodeId))); } } } i++; if (i < distance) { // next odd Hop nextA.andNot(seen); seen.or(nextA); nextB.clear(); iterator = nextA.iterator(); while (iterator.hasNext()) { nodeId = iterator.next(); node = tx.getNodeById(nodeId); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { nextB.add(r.getOtherNodeId(nodeId)); } } } } } if((distance % 2) == 0) { seen.or(nextA); } else { seen.or(nextB); } // remove starting node seen.removeLong(startNodeId); return StreamSupport.stream(Spliterators.spliteratorUnknownSize(seen.iterator(), Spliterator.SORTED), false) .map(x -> new NodeResult(tx.getNodeById(x))); } @Procedure("apoc.neighbors.tohop.count") @Description("apoc.neighbors.tohop.count(node, rel-direction-pattern, distance) - returns distinct count of nodes of the given relationships in the pattern up to a certain distance, can use '>' or '<' for all outgoing or incoming relationships") public Stream<LongResult> neighborsCount(@Name("node") Node node, @Name(value = "types", defaultValue = "") String types, @Name(value="distance", defaultValue = "1") Long distance) { if (distance < 1) return Stream.empty(); if (types==null || types.isEmpty()) return Stream.empty(); final long startNodeId = node.getId(); // Initialize bitmaps for iteration Roaring64NavigableMap seen = new Roaring64NavigableMap(); Roaring64NavigableMap nextA = new Roaring64NavigableMap(); Roaring64NavigableMap nextB = new Roaring64NavigableMap(); long nodeId = node.getId(); seen.add(nodeId); Iterator<Long> iterator; List<Pair<RelationshipType, Direction>> typesAndDirections = parse(types); // First Hop for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { nextB.add(r.getOtherNodeId(nodeId)); } } for(int i = 1; i < distance; i++) { // next even Hop nextB.andNot(seen); seen.or(nextB); nextA.clear(); iterator = nextB.iterator(); while (iterator.hasNext()) { nodeId = iterator.next(); node = tx.getNodeById(nodeId); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { nextA.add(r.getOtherNodeId(nodeId)); } } } i++; if (i < distance) { // next odd Hop nextA.andNot(seen); seen.or(nextA); nextB.clear(); iterator = nextA.iterator(); while (iterator.hasNext()) { nodeId = iterator.next(); node = tx.getNodeById(nodeId); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { nextB.add(r.getOtherNodeId(nodeId)); } } } } } if((distance % 2) == 0) { seen.or(nextA); } else { seen.or(nextB); } // remove starting node seen.removeLong(startNodeId); return Stream.of(new LongResult(seen.getLongCardinality())); } @Procedure("apoc.neighbors.byhop") @Description("apoc.neighbors.byhop(node, rel-direction-pattern, distance) - returns distinct nodes of the given relationships in the pattern at each distance, can use '>' or '<' for all outgoing or incoming relationships") public Stream<NodeListResult> neighborsByHop(@Name("node") Node node, @Name(value = "types", defaultValue = "") String types, @Name(value="distance", defaultValue = "1") Long distance) { if (distance < 1) return Stream.empty(); if (types==null || types.isEmpty()) return Stream.empty(); // Initialize bitmaps for iteration Roaring64NavigableMap[] seen = new Roaring64NavigableMap[distance.intValue()]; for(int i = 0; i < distance; i++) { seen[i] = new Roaring64NavigableMap(); } long nodeId = node.getId(); Iterator<Long> iterator; List<Pair<RelationshipType, Direction>> typesAndDirections = parse(types); // First Hop for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[0].add(r.getOtherNodeId(nodeId)); } } for(int i = 1; i < distance; i++) { iterator = seen[i-1].iterator(); while (iterator.hasNext()) { node = tx.getNodeById(iterator.next()); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[i].add(r.getOtherNodeId(node.getId())); } } } for(int j = 0; j < i; j++){ seen[i].andNot(seen[j]); seen[i].removeLong(nodeId); } } return Arrays.stream(seen).map(x -> new NodeListResult( StreamSupport.stream(Spliterators.spliteratorUnknownSize(x.iterator(), Spliterator.SORTED), false) .map(y -> tx.getNodeById((long) y)) .collect(Collectors.toList()))); } @Procedure("apoc.neighbors.byhop.count") @Description("apoc.neighbors.byhop.count(node, rel-direction-pattern, distance) - returns distinct nodes of the given relationships in the pattern at each distance, can use '>' or '<' for all outgoing or incoming relationships") public Stream<ListResult> neighborsByHopCount(@Name("node") Node node, @Name(value = "types", defaultValue = "") String types, @Name(value="distance", defaultValue = "1") Long distance) { if (distance < 1) return Stream.empty(); if (types==null || types.isEmpty()) return Stream.empty(); // Initialize bitmaps for iteration Roaring64NavigableMap[] seen = new Roaring64NavigableMap[distance.intValue()]; for(int i = 0; i < distance; i++) { seen[i] = new Roaring64NavigableMap(); } long nodeId = node.getId(); Iterator<Long> iterator; List<Pair<RelationshipType, Direction>> typesAndDirections = parse(types); // First Hop for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[0].add(r.getOtherNodeId(nodeId)); } } for(int i = 1; i < distance; i++) { iterator = seen[i-1].iterator(); while (iterator.hasNext()) { node = tx.getNodeById(iterator.next()); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[i].add(r.getOtherNodeId(node.getId())); } } } for(int j = 0; j < i; j++){ seen[i].andNot(seen[j]); seen[i].removeLong(nodeId); } } ArrayList counts = new ArrayList<Long>(); for(int i = 0; i < distance; i++) { counts.add(seen[i].getLongCardinality()); } return Stream.of(new ListResult(counts)); } @Procedure("apoc.neighbors.athop") @Description("apoc.neighbors.athop(node, rel-direction-pattern, distance) - returns distinct nodes of the given relationships in the pattern at a distance, can use '>' or '<' for all outgoing or incoming relationships") public Stream<NodeResult> neighborsAtHop(@Name("node") Node node, @Name(value = "types", defaultValue = "") String types, @Name(value="distance", defaultValue = "1") Long distance) { if (distance < 1) return Stream.empty(); if (types==null || types.isEmpty()) return Stream.empty(); // Initialize bitmaps for iteration Roaring64NavigableMap[] seen = new Roaring64NavigableMap[distance.intValue()]; for(int i = 0; i < distance; i++) { seen[i] = new Roaring64NavigableMap(); } long nodeId = node.getId(); Iterator<Long> iterator; List<Pair<RelationshipType, Direction>> typesAndDirections = parse(types); // First Hop for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[0].add(r.getOtherNodeId(nodeId)); } } for(int i = 1; i < distance; i++) { iterator = seen[i-1].iterator(); while (iterator.hasNext()) { node = tx.getNodeById(iterator.next()); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[i].add(r.getOtherNodeId(node.getId())); } } } for(int j = 0; j < i; j++){ seen[i].andNot(seen[j]); seen[i].removeLong(nodeId); } } return StreamSupport .stream(Spliterators.spliteratorUnknownSize(seen[distance.intValue() - 1].iterator(), Spliterator.SORTED), false).map(y -> new NodeResult(tx.getNodeById(y))); } @Procedure("apoc.neighbors.athop.count") @Description("apoc.neighbors.athop.count(node, rel-direction-pattern, distance) - returns distinct nodes of the given relationships in the pattern at a distance, can use '>' or '<' for all outgoing or incoming relationships") public Stream<LongResult> neighborsAtHopCount(@Name("node") Node node, @Name(value = "types", defaultValue = "") String types, @Name(value="distance", defaultValue = "1") Long distance) { if (distance < 1) return Stream.empty(); if (types == null || types.isEmpty()) return Stream.empty(); // Initialize bitmaps for iteration Roaring64NavigableMap[] seen = new Roaring64NavigableMap[distance.intValue()]; for (int i = 0; i < distance; i++) { seen[i] = new Roaring64NavigableMap(); } long nodeId = node.getId(); Iterator<Long> iterator; List<Pair<RelationshipType, Direction>> typesAndDirections = parse(types); // First Hop for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[0].add(r.getOtherNodeId(nodeId)); } } for (int i = 1; i < distance; i++) { iterator = seen[i - 1].iterator(); while (iterator.hasNext()) { node = tx.getNodeById(iterator.next()); for (Pair<RelationshipType, Direction> pair : typesAndDirections) { for (Relationship r : getRelationshipsByTypeAndDirection(node, pair)) { seen[i].add(r.getOtherNodeId(node.getId())); } } } for (int j = 0; j < i; j++) { seen[i].andNot(seen[j]); seen[i].removeLong(nodeId); } } return Stream.of(new LongResult(seen[distance.intValue() - 1].getLongCardinality())); } }
{ "pile_set_name": "Github" }
require File.expand_path('../boot', __FILE__) require 'rails/all' # Require the gems listed in Gemfile, including any gems # you've limited to :test, :development, or :production. Bundler.require(*Rails.groups) module SampleApp class Application < Rails::Application # Settings in config/environments/* take precedence over those specified here. # Application configuration should go into files in config/initializers # -- all .rb files in that directory are automatically loaded. # Set Time.zone default to the specified zone and make Active Record auto-convert to this zone. # Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC. # config.time_zone = 'Central Time (US & Canada)' # The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded. # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s] # config.i18n.default_locale = :de # Do not swallow errors in after_commit/after_rollback callbacks. config.active_record.raise_in_transactional_callbacks = true end end
{ "pile_set_name": "Github" }
name: also-has-exe-foo version: 0.1.0.0 build-type: Simple cabal-version: >=1.10 executable foo hs-source-dirs: app main-is: Main.hs ghc-options: -threaded -rtsopts -with-rtsopts=-N build-depends: base default-language: Haskell2010
{ "pile_set_name": "Github" }
# QA Test Plan: React Transcript Editor Last updated: _26th August 2019_ version: _>=1.0.6_ This doc provides a lightweight set of steps and checklists for manual QA test. This should be done before every major release. _The assumption underlying this doc is that anyone, even without technical skills, should be able to conduct QA testing._ ## Overview React Transcript Editor it's a react component, to make transcribing audio and video easier and faster. Please see project repository the Github [README](https://github.com/bbc/react-transcript-editor/blob/master/README.md) page,for an overview of what the component does and how it works (https://github.com/bbc/react-transcript-editor). <!-- Here is a video demo of the main use case: https://youtu.be/4z143-nJlzs. --> ## Where to test: <!-- _URL of where testing should be performed (staging, sandbox)_ --> The app can be tested using the demo application at https://bbc.github.io/react-transcript-editor/ For updating the the demo app with latest version of the component [see these instructions](https://github.com/bbc/react-transcript-editor#build---demo) ## Where to log bugs: <!-- _Provide link to Fogbugz, Github, Trello, etc. Also include to whom the bugs should be assigned (if applicable)._ --> If you find any bugs or issues, please [open an issue in Github](https://github.com/bbc/react-transcript-editor/issues/new?template=bug_report.md) label it as 'QA Testing' and assign it to [@jamesdools](https://github.com/jamesdools). If it's connect to one or more of the QA steps listed below make a note of the corresponding number. For things like typos feel free to directly do a PR with the changes. ## Browsers/devices: By default we aim for the component to work on the following version of the following browsers and devices. - [ ] Desktop - Mac - Chrome - [ ] Desktop - Windows - Chrome - [ ] Desktop - Windows - Internet Explore _(Because of BBC users)_ Other browsers and devices are not part of out core effort but we welcome feedback. When you raise an issue please indicate the operating system, device, and browser. --- ## Items to test There are 5 main parts for QA testing 0. [Component Interface](0-component-interface.md) 1. [Player Controls](1-player-controls.md) 2. [Timed Text Editor](2-timed-text-editor.md) 3. [Settings](3-settings.md) 4. [Keyboard Shortcuts](4-keyboard-shortcuts.md) 5. [Analytics](5-analytics.md) see each section for more details.
{ "pile_set_name": "Github" }
<?php namespace AlibabaCloud\Client\Resolver; use RuntimeException; use ArgumentCountError; /** * Trait CallTrait * * @codeCoverageIgnore * @package AlibabaCloud\Client\Resolver */ trait CallTrait { /** * Magic method for set or get request parameters. * * @param string $name * @param mixed $arguments * * @return $this */ public function __call($name, $arguments) { if (strncmp($name, 'get', 3) === 0) { $parameter = \mb_strcut($name, 3); return $this->__get($parameter); } if (strncmp($name, 'with', 4) === 0) { $parameter = \mb_strcut($name, 4); $value = $this->getCallArguments($name, $arguments); $this->data[$parameter] = $value; $this->parameterPosition()[$parameter] = $value; return $this; } if (strncmp($name, 'set', 3) === 0) { $parameter = \mb_strcut($name, 3); $with_method = "with$parameter"; return $this->$with_method($this->getCallArguments($name, $arguments)); } throw new RuntimeException('Call to undefined method ' . __CLASS__ . '::' . $name . '()'); } /** * @param string $name * @param array $arguments * @param int $index * * @return mixed */ private function getCallArguments($name, array $arguments, $index = 0) { if (!isset($arguments[$index])) { throw new ArgumentCountError("Missing arguments to method $name"); } return $arguments[$index]; } }
{ "pile_set_name": "Github" }
<div id="container"> <div id="content"></div> </div> #parent { height: 110px; padding-top: 90px; width: 200px; } #child { height: 20px; margin-left: auto; width: 20px; }
{ "pile_set_name": "Github" }
static void <%=c_iter%>(na_loop_t *const lp) { size_t i, s1; char *p1; size_t *idx1; dtype x; volatile VALUE a, y; INIT_COUNTER(lp, i); INIT_PTR_IDX(lp, 0, p1, s1, idx1); a = rb_ary_new2(i); rb_ary_push(lp->args[1].value, a); if (idx1) { for (; i--;) { GET_DATA_INDEX(p1,idx1,dtype,x); y = m_data_to_num(x); rb_ary_push(a,y); } } else { for (; i--;) { GET_DATA_STRIDE(p1,s1,dtype,x); y = m_data_to_num(x); rb_ary_push(a,y); } } } /* Convert self to Array. @overload <%=name%> @return [Array] */ static VALUE <%=c_func(0)%>(VALUE self) { ndfunc_arg_in_t ain[3] = {{Qnil,0},{sym_loop_opt},{sym_option}}; ndfunc_arg_out_t aout[1] = {{rb_cArray,0}}; // dummy? ndfunc_t ndf = { <%=c_iter%>, FULL_LOOP_NIP, 3, 1, ain, aout }; return na_ndloop_cast_narray_to_rarray(&ndf, self, Qnil); }
{ "pile_set_name": "Github" }
package(default_visibility = ["//visibility:public"]) load( "@io_tweag_rules_haskell//haskell:haskell.bzl", "haskell_binary", ) load("@//:sparkle.bzl", "sparkle_package") haskell_binary( name = "hello-hs", srcs = ["HelloSpark.hs"], main_file = "HelloSpark.hs", deps = [ "//:sparkle-lib", ], prebuilt_dependencies = [ "base", "distributed-closure", "text", ], compiler_flags = ["-threaded", "-dynamic", "-pie"], ) sparkle_package( name = "sparkle-example-hello", src = ":hello-hs", )
{ "pile_set_name": "Github" }
<?php /** * Copyright since 2007 PrestaShop SA and Contributors * PrestaShop is an International Registered Trademark & Property of PrestaShop SA * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.md. * It is also available through the world-wide-web at this URL: * https://opensource.org/licenses/OSL-3.0 * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade PrestaShop to newer * versions in the future. If you wish to customize PrestaShop for your * needs please refer to https://devdocs.prestashop.com/ for more information. * * @author PrestaShop SA and Contributors <[email protected]> * @copyright Since 2007 PrestaShop SA and Contributors * @license https://opensource.org/licenses/OSL-3.0 Open Software License (OSL 3.0) */ namespace PrestaShop\PrestaShop\Core\Domain\Category\Exception; /** * Is thrown when trying to delete a root category for current shop context */ class CannotDeleteRootCategoryForShopException extends CategoryException { }
{ "pile_set_name": "Github" }
defmodule GitGud.Web.FallbackController do @moduledoc """ Translates controller action results into valid `Plug.Conn` responses. See `Phoenix.Controller.action_fallback/1` for more details. """ use GitGud.Web, :controller require Logger alias GitGud.Web.ErrorView def call(conn, {:error, :bad_request}) do conn |> put_status(:bad_request) |> put_layout(:app) |> put_view(ErrorView) |> render(:"400") end def call(conn, {:error, :unauthorized}) do conn |> put_status(:unauthorized) |> put_layout(:app) |> put_view(ErrorView) |> render(:"401") end def call(conn, {:error, :not_found}) do conn |> put_status(:not_found) |> put_layout(:app) |> put_view(ErrorView) |> render(:"404") end def call(conn, val) do Logger.warn("Uncaught #{inspect val}") conn |> put_status(:internal_server_error) |> put_layout(:app) |> put_view(ErrorView) |> render(:"500") end end
{ "pile_set_name": "Github" }
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ package com.facebook.imagepipeline.cache; import android.graphics.Bitmap; import android.os.SystemClock; import androidx.annotation.VisibleForTesting; import com.facebook.cache.common.HasDebugData; import com.facebook.common.internal.Objects; import com.facebook.common.internal.Preconditions; import com.facebook.common.internal.Predicate; import com.facebook.common.internal.Supplier; import com.facebook.common.memory.MemoryTrimType; import com.facebook.common.references.CloseableReference; import com.facebook.common.references.ResourceReleaser; import java.util.ArrayList; import java.util.Map; import java.util.WeakHashMap; import javax.annotation.Nullable; import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; /** * Layer of memory cache stack responsible for managing eviction of the the cached items. * * <p>This layer is responsible for LRU eviction strategy and for maintaining the size boundaries of * the cached items. * * <p>Only the exclusively owned elements, i.e. the elements not referenced by any client, can be * evicted. * * @param <K> the key type * @param <V> the value type */ @ThreadSafe public class LruCountingMemoryCache<K, V> implements CountingMemoryCache<K, V>, MemoryCache<K, V>, HasDebugData { private final @Nullable EntryStateObserver<K> mEntryStateObserver; // Contains the items that are not being used by any client and are hence viable for eviction. @GuardedBy("this") @VisibleForTesting final CountingLruMap<K, Entry<K, V>> mExclusiveEntries; // Contains all the cached items including the exclusively owned ones. @GuardedBy("this") @VisibleForTesting final CountingLruMap<K, Entry<K, V>> mCachedEntries; @GuardedBy("this") @VisibleForTesting final Map<Bitmap, Object> mOtherEntries = new WeakHashMap<>(); private final ValueDescriptor<V> mValueDescriptor; private final CacheTrimStrategy mCacheTrimStrategy; // Cache size constraints. private final Supplier<MemoryCacheParams> mMemoryCacheParamsSupplier; @GuardedBy("this") protected MemoryCacheParams mMemoryCacheParams; @GuardedBy("this") private long mLastCacheParamsCheck; public LruCountingMemoryCache( ValueDescriptor<V> valueDescriptor, CacheTrimStrategy cacheTrimStrategy, Supplier<MemoryCacheParams> memoryCacheParamsSupplier, @Nullable EntryStateObserver<K> entryStateObserver) { mValueDescriptor = valueDescriptor; mExclusiveEntries = new CountingLruMap<>(wrapValueDescriptor(valueDescriptor)); mCachedEntries = new CountingLruMap<>(wrapValueDescriptor(valueDescriptor)); mCacheTrimStrategy = cacheTrimStrategy; mMemoryCacheParamsSupplier = memoryCacheParamsSupplier; mMemoryCacheParams = mMemoryCacheParamsSupplier.get(); mLastCacheParamsCheck = SystemClock.uptimeMillis(); mEntryStateObserver = entryStateObserver; } private ValueDescriptor<Entry<K, V>> wrapValueDescriptor( final ValueDescriptor<V> evictableValueDescriptor) { return new ValueDescriptor<Entry<K, V>>() { @Override public int getSizeInBytes(Entry<K, V> entry) { return evictableValueDescriptor.getSizeInBytes(entry.valueRef.get()); } }; } /** * Caches the given key-value pair. * * <p>Important: the client should use the returned reference instead of the original one. It is * the caller's responsibility to close the returned reference once not needed anymore. * * @return the new reference to be used, null if the value cannot be cached */ public CloseableReference<V> cache(final K key, final CloseableReference<V> valueRef) { return cache(key, valueRef, mEntryStateObserver); } /** * Caches the given key-value pair. * * <p>Important: the client should use the returned reference instead of the original one. It is * the caller's responsibility to close the returned reference once not needed anymore. * * @return the new reference to be used, null if the value cannot be cached */ @Override public @Nullable CloseableReference<V> cache( final K key, final CloseableReference<V> valueRef, final EntryStateObserver<K> observer) { Preconditions.checkNotNull(key); Preconditions.checkNotNull(valueRef); maybeUpdateCacheParams(); Entry<K, V> oldExclusive; CloseableReference<V> oldRefToClose = null; CloseableReference<V> clientRef = null; synchronized (this) { // remove the old item (if any) as it is stale now oldExclusive = mExclusiveEntries.remove(key); Entry<K, V> oldEntry = mCachedEntries.remove(key); if (oldEntry != null) { makeOrphan(oldEntry); oldRefToClose = referenceToClose(oldEntry); } if (canCacheNewValue(valueRef.get())) { Entry<K, V> newEntry = Entry.of(key, valueRef, observer); mCachedEntries.put(key, newEntry); clientRef = newClientReference(newEntry); } } CloseableReference.closeSafely(oldRefToClose); maybeNotifyExclusiveEntryRemoval(oldExclusive); maybeEvictEntries(); return clientRef; } /** Checks the cache constraints to determine whether the new value can be cached or not. */ private synchronized boolean canCacheNewValue(V value) { int newValueSize = mValueDescriptor.getSizeInBytes(value); return (newValueSize <= mMemoryCacheParams.maxCacheEntrySize) && (getInUseCount() <= mMemoryCacheParams.maxCacheEntries - 1) && (getInUseSizeInBytes() <= mMemoryCacheParams.maxCacheSize - newValueSize); } /** * Gets the item with the given key, or null if there is no such item. * * <p>It is the caller's responsibility to close the returned reference once not needed anymore. */ @Nullable public CloseableReference<V> get(final K key) { Preconditions.checkNotNull(key); Entry<K, V> oldExclusive; CloseableReference<V> clientRef = null; synchronized (this) { oldExclusive = mExclusiveEntries.remove(key); Entry<K, V> entry = mCachedEntries.get(key); if (entry != null) { clientRef = newClientReference(entry); } } maybeNotifyExclusiveEntryRemoval(oldExclusive); maybeUpdateCacheParams(); maybeEvictEntries(); return clientRef; } /** * Probes whether the object corresponding to the key is in the cache. Note that the act of * probing touches the item (if present in cache), thus changing its LRU timestamp. */ @Override public void probe(final K key) { Preconditions.checkNotNull(key); Entry<K, V> oldExclusive; synchronized (this) { oldExclusive = mExclusiveEntries.remove(key); if (oldExclusive != null) { mExclusiveEntries.put(key, oldExclusive); } } } /** Creates a new reference for the client. */ private synchronized CloseableReference<V> newClientReference(final Entry<K, V> entry) { increaseClientCount(entry); return CloseableReference.of( entry.valueRef.get(), new ResourceReleaser<V>() { @Override public void release(V unused) { releaseClientReference(entry); } }); } /** Called when the client closes its reference. */ private void releaseClientReference(final Entry<K, V> entry) { Preconditions.checkNotNull(entry); boolean isExclusiveAdded; CloseableReference<V> oldRefToClose; synchronized (this) { decreaseClientCount(entry); isExclusiveAdded = maybeAddToExclusives(entry); oldRefToClose = referenceToClose(entry); } CloseableReference.closeSafely(oldRefToClose); maybeNotifyExclusiveEntryInsertion(isExclusiveAdded ? entry : null); maybeUpdateCacheParams(); maybeEvictEntries(); } /** Adds the entry to the exclusively owned queue if it is viable for eviction. */ private synchronized boolean maybeAddToExclusives(Entry<K, V> entry) { if (!entry.isOrphan && entry.clientCount == 0) { mExclusiveEntries.put(entry.key, entry); return true; } return false; } /** * Gets the value with the given key to be reused, or null if there is no such value. * * <p>The item can be reused only if it is exclusively owned by the cache. */ @Override @Nullable public CloseableReference<V> reuse(K key) { Preconditions.checkNotNull(key); CloseableReference<V> clientRef = null; boolean removed = false; Entry<K, V> oldExclusive = null; synchronized (this) { oldExclusive = mExclusiveEntries.remove(key); if (oldExclusive != null) { Entry<K, V> entry = mCachedEntries.remove(key); Preconditions.checkNotNull(entry); Preconditions.checkState(entry.clientCount == 0); // optimization: instead of cloning and then closing the original reference, // we just do a move clientRef = entry.valueRef; removed = true; } } if (removed) { maybeNotifyExclusiveEntryRemoval(oldExclusive); } return clientRef; } /** * Removes all the items from the cache whose key matches the specified predicate. * * @param predicate returns true if an item with the given key should be removed * @return number of the items removed from the cache */ public int removeAll(Predicate<K> predicate) { ArrayList<Entry<K, V>> oldExclusives; ArrayList<Entry<K, V>> oldEntries; synchronized (this) { oldExclusives = mExclusiveEntries.removeAll(predicate); oldEntries = mCachedEntries.removeAll(predicate); makeOrphans(oldEntries); } maybeClose(oldEntries); maybeNotifyExclusiveEntryRemoval(oldExclusives); maybeUpdateCacheParams(); maybeEvictEntries(); return oldEntries.size(); } /** Removes all the items from the cache. */ @Override public void clear() { ArrayList<Entry<K, V>> oldExclusives; ArrayList<Entry<K, V>> oldEntries; synchronized (this) { oldExclusives = mExclusiveEntries.clear(); oldEntries = mCachedEntries.clear(); makeOrphans(oldEntries); } maybeClose(oldEntries); maybeNotifyExclusiveEntryRemoval(oldExclusives); maybeUpdateCacheParams(); } /** * Check if any items from the cache whose key matches the specified predicate. * * @param predicate returns true if an item with the given key matches * @return true is any items matches from the cache */ @Override public synchronized boolean contains(Predicate<K> predicate) { return !mCachedEntries.getMatchingEntries(predicate).isEmpty(); } /** * Check if an item with the given cache key is currently in the cache. * * @param key returns true if an item with the given key matches * @return true is any items matches from the cache */ @Override public synchronized boolean contains(K key) { return mCachedEntries.contains(key); } /** Trims the cache according to the specified trimming strategy and the given trim type. */ @Override public void trim(MemoryTrimType trimType) { ArrayList<Entry<K, V>> oldEntries; final double trimRatio = mCacheTrimStrategy.getTrimRatio(trimType); synchronized (this) { int targetCacheSize = (int) (mCachedEntries.getSizeInBytes() * (1 - trimRatio)); int targetEvictionQueueSize = Math.max(0, targetCacheSize - getInUseSizeInBytes()); oldEntries = trimExclusivelyOwnedEntries(Integer.MAX_VALUE, targetEvictionQueueSize); makeOrphans(oldEntries); } maybeClose(oldEntries); maybeNotifyExclusiveEntryRemoval(oldEntries); maybeUpdateCacheParams(); maybeEvictEntries(); } /** Updates the cache params (constraints) if enough time has passed since the last update. */ private synchronized void maybeUpdateCacheParams() { if (mLastCacheParamsCheck + mMemoryCacheParams.paramsCheckIntervalMs > SystemClock.uptimeMillis()) { return; } mLastCacheParamsCheck = SystemClock.uptimeMillis(); mMemoryCacheParams = mMemoryCacheParamsSupplier.get(); } public MemoryCacheParams getMemoryCacheParams() { return mMemoryCacheParams; } @Override public CountingLruMap<K, Entry<K, V>> getCachedEntries() { return mCachedEntries; } @Override public Map<Bitmap, Object> getOtherEntries() { return mOtherEntries; } /** * Removes the exclusively owned items until the cache constraints are met. * * <p>This method invokes the external {@link CloseableReference#close} method, so it must not be * called while holding the <code>this</code> lock. */ @Override public void maybeEvictEntries() { ArrayList<Entry<K, V>> oldEntries; synchronized (this) { int maxCount = Math.min( mMemoryCacheParams.maxEvictionQueueEntries, mMemoryCacheParams.maxCacheEntries - getInUseCount()); int maxSize = Math.min( mMemoryCacheParams.maxEvictionQueueSize, mMemoryCacheParams.maxCacheSize - getInUseSizeInBytes()); oldEntries = trimExclusivelyOwnedEntries(maxCount, maxSize); makeOrphans(oldEntries); } maybeClose(oldEntries); maybeNotifyExclusiveEntryRemoval(oldEntries); } /** * Removes the exclusively owned items until there is at most <code>count</code> of them and they * occupy no more than <code>size</code> bytes. * * <p>This method returns the removed items instead of actually closing them, so it is safe to be * called while holding the <code>this</code> lock. */ @Nullable private synchronized ArrayList<Entry<K, V>> trimExclusivelyOwnedEntries(int count, int size) { count = Math.max(count, 0); size = Math.max(size, 0); // fast path without array allocation if no eviction is necessary if (mExclusiveEntries.getCount() <= count && mExclusiveEntries.getSizeInBytes() <= size) { return null; } ArrayList<Entry<K, V>> oldEntries = new ArrayList<>(); while (mExclusiveEntries.getCount() > count || mExclusiveEntries.getSizeInBytes() > size) { K key = mExclusiveEntries.getFirstKey(); mExclusiveEntries.remove(key); oldEntries.add(mCachedEntries.remove(key)); } return oldEntries; } /** * Notifies the client that the cache no longer tracks the given items. * * <p>This method invokes the external {@link CloseableReference#close} method, so it must not be * called while holding the <code>this</code> lock. */ private void maybeClose(@Nullable ArrayList<Entry<K, V>> oldEntries) { if (oldEntries != null) { for (Entry<K, V> oldEntry : oldEntries) { CloseableReference.closeSafely(referenceToClose(oldEntry)); } } } private void maybeNotifyExclusiveEntryRemoval(@Nullable ArrayList<Entry<K, V>> entries) { if (entries != null) { for (Entry<K, V> entry : entries) { maybeNotifyExclusiveEntryRemoval(entry); } } } private static <K, V> void maybeNotifyExclusiveEntryRemoval(@Nullable Entry<K, V> entry) { if (entry != null && entry.observer != null) { entry.observer.onExclusivityChanged(entry.key, false); } } private static <K, V> void maybeNotifyExclusiveEntryInsertion(@Nullable Entry<K, V> entry) { if (entry != null && entry.observer != null) { entry.observer.onExclusivityChanged(entry.key, true); } } /** Marks the given entries as orphans. */ private synchronized void makeOrphans(@Nullable ArrayList<Entry<K, V>> oldEntries) { if (oldEntries != null) { for (Entry<K, V> oldEntry : oldEntries) { makeOrphan(oldEntry); } } } /** Marks the entry as orphan. */ private synchronized void makeOrphan(Entry<K, V> entry) { Preconditions.checkNotNull(entry); Preconditions.checkState(!entry.isOrphan); entry.isOrphan = true; } /** Increases the entry's client count. */ private synchronized void increaseClientCount(Entry<K, V> entry) { Preconditions.checkNotNull(entry); Preconditions.checkState(!entry.isOrphan); entry.clientCount++; } /** Decreases the entry's client count. */ private synchronized void decreaseClientCount(Entry<K, V> entry) { Preconditions.checkNotNull(entry); Preconditions.checkState(entry.clientCount > 0); entry.clientCount--; } /** Returns the value reference of the entry if it should be closed, null otherwise. */ @Nullable private synchronized CloseableReference<V> referenceToClose(Entry<K, V> entry) { Preconditions.checkNotNull(entry); return (entry.isOrphan && entry.clientCount == 0) ? entry.valueRef : null; } /** Gets the total number of all currently cached items. */ @Override public synchronized int getCount() { return mCachedEntries.getCount(); } /** Gets the total size in bytes of all currently cached items. */ @Override public synchronized int getSizeInBytes() { return mCachedEntries.getSizeInBytes(); } /** Gets the number of the cached items that are used by at least one client. */ public synchronized int getInUseCount() { return mCachedEntries.getCount() - mExclusiveEntries.getCount(); } /** Gets the total size in bytes of the cached items that are used by at least one client. */ @Override public synchronized int getInUseSizeInBytes() { return mCachedEntries.getSizeInBytes() - mExclusiveEntries.getSizeInBytes(); } /** Gets the number of the exclusively owned items. */ @Override public synchronized int getEvictionQueueCount() { return mExclusiveEntries.getCount(); } /** Gets the total size in bytes of the exclusively owned items. */ @Override public synchronized int getEvictionQueueSizeInBytes() { return mExclusiveEntries.getSizeInBytes(); } @Override public @Nullable String getDebugData() { return Objects.toStringHelper("CountingMemoryCache") .add("cached_entries_count:", mCachedEntries.getCount()) .add("cached_entries_size_bytes", mCachedEntries.getSizeInBytes()) .add("exclusive_entries_count", mExclusiveEntries.getCount()) .add("exclusive_entries_size_bytes", mExclusiveEntries.getSizeInBytes()) .toString(); } }
{ "pile_set_name": "Github" }
7
{ "pile_set_name": "Github" }
module github.com/go-openapi/jsonpointer require ( github.com/go-openapi/swag v0.19.5 github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect github.com/stretchr/testify v1.3.0 ) go 1.13
{ "pile_set_name": "Github" }
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_252) on Mon Sep 14 15:53:51 PDT 2020 --> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Uses of Class org.apache.orc.impl.RecordReaderImpl.ZeroPositionProvider (ORC Core 1.6.4 API)</title> <meta name="date" content="2020-09-14"> <link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../../script.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class org.apache.orc.impl.RecordReaderImpl.ZeroPositionProvider (ORC Core 1.6.4 API)"; } } catch(err) { } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../org/apache/orc/impl/RecordReaderImpl.ZeroPositionProvider.html" title="class in org.apache.orc.impl">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../../../../../overview-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/apache/orc/impl/class-use/RecordReaderImpl.ZeroPositionProvider.html" target="_top">Frames</a></li> <li><a href="RecordReaderImpl.ZeroPositionProvider.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h2 title="Uses of Class org.apache.orc.impl.RecordReaderImpl.ZeroPositionProvider" class="title">Uses of Class<br>org.apache.orc.impl.RecordReaderImpl.ZeroPositionProvider</h2> </div> <div class="classUseContainer">No usage of org.apache.orc.impl.RecordReaderImpl.ZeroPositionProvider</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../org/apache/orc/impl/RecordReaderImpl.ZeroPositionProvider.html" title="class in org.apache.orc.impl">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../../../../../overview-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/apache/orc/impl/class-use/RecordReaderImpl.ZeroPositionProvider.html" target="_top">Frames</a></li> <li><a href="RecordReaderImpl.ZeroPositionProvider.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small>Copyright &#169; 2013&#x2013;2020 <a href="https://www.apache.org/">The Apache Software Foundation</a>. All rights reserved.</small></p> </body> </html>
{ "pile_set_name": "Github" }
import { NgModule } from '@angular/core'; import { RouterModule, Routes } from '@angular/router'; import { ComponentDetailsModule } from 'app/components/shared/component-details/component-details.module'; import { setComponentRoutes } from 'app/content/components/components'; import { LoadingMaskDemoSharedModule } from './demos/loading-mask-demo-basic/loading-mask-demo-basic.shared'; import { LoadingMaskDemoBasicComponent } from './demos/loading-mask-demo-basic/loading-mask-demo-basic.component'; const routes: Routes = setComponentRoutes({ overviewDemoComponent: LoadingMaskDemoBasicComponent, id: 'loading-mask', }); @NgModule({ imports: [LoadingMaskDemoSharedModule, ComponentDetailsModule, RouterModule.forChild(routes)], }) export class LoadingMaskDemoModule {}
{ "pile_set_name": "Github" }
/* +------------------------------------------------------------------------+ | Phalcon Framework | +------------------------------------------------------------------------+ | Copyright (c) 2011-2014 Phalcon Team (http://www.phalconphp.com) | +------------------------------------------------------------------------+ | This source file is subject to the New BSD License that is bundled | | with this package in the file docs/LICENSE.txt. | | | | If you did not receive a copy of the license and are unable to | | obtain it through the world-wide-web, please send an email | | to [email protected] so we can send you a copy immediately. | +------------------------------------------------------------------------+ | Authors: Andres Gutierrez <[email protected]> | | Eduar Carvajal <[email protected]> | | ZhuZongXin <[email protected]> | +------------------------------------------------------------------------+ */ #ifndef PHALCON_SYNC_SEMAPHORE_H #define PHALCON_SYNC_SEMAPHORE_H #include "php_phalcon.h" #if PHALCON_USE_SHM_OPEN #include "sync/common.h" /* Semaphore */ typedef struct _phalcon_sync_semaphore_object { int MxNamed; char *MxMem; phalcon_semaphore_wrapper MxPthreadSemaphore; int MxAutoUnlock; volatile unsigned int MxCount; zend_object std; } phalcon_sync_semaphore_object; static inline phalcon_sync_semaphore_object *phalcon_sync_semaphore_object_from_obj(zend_object *obj) { return (phalcon_sync_semaphore_object*)((char*)(obj) - XtOffsetOf(phalcon_sync_semaphore_object, std)); } extern zend_class_entry *phalcon_sync_semaphore_ce; PHALCON_INIT_CLASS(Phalcon_Sync_Semaphore); #endif #endif /* PHALCON_SYNC_SEMAPHORE_H */
{ "pile_set_name": "Github" }
import Foundation /** Adds methods to World to support top-level DSL functions (Swift) and macros (Objective-C). These functions map directly to the DSL that test writers use in their specs. */ extension World { internal func beforeSuite(_ closure: @escaping BeforeSuiteClosure) { suiteHooks.appendBefore(closure) } internal func afterSuite(_ closure: @escaping AfterSuiteClosure) { suiteHooks.appendAfter(closure) } internal func sharedExamples(_ name: String, closure: @escaping SharedExampleClosure) { registerSharedExample(name, closure: closure) } internal func describe(_ description: String, flags: FilterFlags, closure: () -> ()) { guard currentExampleMetadata == nil else { raiseError("'describe' cannot be used inside '\(currentPhase)', 'describe' may only be used inside 'context' or 'describe'. ") } guard currentExampleGroup != nil else { raiseError("Error: example group was not created by its parent QuickSpec spec. Check that describe() or context() was used in QuickSpec.spec() and not a more general context (i.e. an XCTestCase test)") } let group = ExampleGroup(description: description, flags: flags) currentExampleGroup.appendExampleGroup(group) performWithCurrentExampleGroup(group, closure: closure) } internal func context(_ description: String, flags: FilterFlags, closure: () -> ()) { guard currentExampleMetadata == nil else { raiseError("'context' cannot be used inside '\(currentPhase)', 'context' may only be used inside 'context' or 'describe'. ") } self.describe(description, flags: flags, closure: closure) } internal func fdescribe(_ description: String, flags: FilterFlags, closure: () -> ()) { var focusedFlags = flags focusedFlags[Filter.focused] = true self.describe(description, flags: focusedFlags, closure: closure) } internal func xdescribe(_ description: String, flags: FilterFlags, closure: () -> ()) { var pendingFlags = flags pendingFlags[Filter.pending] = true self.describe(description, flags: pendingFlags, closure: closure) } internal func beforeEach(_ closure: @escaping BeforeExampleClosure) { guard currentExampleMetadata == nil else { raiseError("'beforeEach' cannot be used inside '\(currentPhase)', 'beforeEach' may only be used inside 'context' or 'describe'. ") } currentExampleGroup.hooks.appendBefore(closure) } #if _runtime(_ObjC) @objc(beforeEachWithMetadata:) internal func beforeEach(closure: @escaping BeforeExampleWithMetadataClosure) { currentExampleGroup.hooks.appendBefore(closure) } #else internal func beforeEach(closure: @escaping BeforeExampleWithMetadataClosure) { currentExampleGroup.hooks.appendBefore(closure) } #endif internal func afterEach(_ closure: @escaping AfterExampleClosure) { guard currentExampleMetadata == nil else { raiseError("'afterEach' cannot be used inside '\(currentPhase)', 'afterEach' may only be used inside 'context' or 'describe'. ") } currentExampleGroup.hooks.appendAfter(closure) } #if _runtime(_ObjC) @objc(afterEachWithMetadata:) internal func afterEach(closure: @escaping AfterExampleWithMetadataClosure) { currentExampleGroup.hooks.appendAfter(closure) } #else internal func afterEach(closure: @escaping AfterExampleWithMetadataClosure) { currentExampleGroup.hooks.appendAfter(closure) } #endif internal func it(_ description: String, flags: FilterFlags, file: String, line: UInt, closure: @escaping () -> ()) { if beforesCurrentlyExecuting { raiseError("'it' cannot be used inside 'beforeEach', 'it' may only be used inside 'context' or 'describe'. ") } if aftersCurrentlyExecuting { raiseError("'it' cannot be used inside 'afterEach', 'it' may only be used inside 'context' or 'describe'. ") } guard currentExampleMetadata == nil else { raiseError("'it' cannot be used inside 'it', 'it' may only be used inside 'context' or 'describe'. ") } let callsite = Callsite(file: file, line: line) let example = Example(description: description, callsite: callsite, flags: flags, closure: closure) currentExampleGroup.appendExample(example) } internal func fit(_ description: String, flags: FilterFlags, file: String, line: UInt, closure: @escaping () -> ()) { var focusedFlags = flags focusedFlags[Filter.focused] = true self.it(description, flags: focusedFlags, file: file, line: line, closure: closure) } internal func xit(_ description: String, flags: FilterFlags, file: String, line: UInt, closure: @escaping () -> ()) { var pendingFlags = flags pendingFlags[Filter.pending] = true self.it(description, flags: pendingFlags, file: file, line: line, closure: closure) } internal func itBehavesLike(_ name: String, sharedExampleContext: @escaping SharedExampleContext, flags: FilterFlags, file: String, line: UInt) { guard currentExampleMetadata == nil else { raiseError("'itBehavesLike' cannot be used inside '\(currentPhase)', 'itBehavesLike' may only be used inside 'context' or 'describe'. ") } let callsite = Callsite(file: file, line: line) let closure = World.sharedWorld.sharedExample(name) let group = ExampleGroup(description: name, flags: flags) currentExampleGroup.appendExampleGroup(group) performWithCurrentExampleGroup(group) { closure(sharedExampleContext) } group.walkDownExamples { (example: Example) in example.isSharedExample = true example.callsite = callsite } } #if _runtime(_ObjC) @objc(itWithDescription:flags:file:line:closure:) private func objc_it(_ description: String, flags: FilterFlags, file: String, line: UInt, closure: @escaping () -> ()) { it(description, flags: flags, file: file, line: line, closure: closure) } @objc(fitWithDescription:flags:file:line:closure:) private func objc_fit(_ description: String, flags: FilterFlags, file: String, line: UInt, closure: @escaping () -> ()) { fit(description, flags: flags, file: file, line: line, closure: closure) } @objc(xitWithDescription:flags:file:line:closure:) private func objc_xit(_ description: String, flags: FilterFlags, file: String, line: UInt, closure: @escaping () -> ()) { xit(description, flags: flags, file: file, line: line, closure: closure) } @objc(itBehavesLikeSharedExampleNamed:sharedExampleContext:flags:file:line:) private func objc_itBehavesLike(_ name: String, sharedExampleContext: @escaping SharedExampleContext, flags: FilterFlags, file: String, line: UInt) { itBehavesLike(name, sharedExampleContext: sharedExampleContext, flags: flags, file: file, line: line) } #endif internal func pending(_ description: String, closure: () -> ()) { print("Pending: \(description)") } private var currentPhase: String { if beforesCurrentlyExecuting { return "beforeEach" } else if aftersCurrentlyExecuting { return "afterEach" } return "it" } }
{ "pile_set_name": "Github" }
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ class Google_Service_Dataflow_StreamingConfigTask extends Google_Collection { protected $collection_key = 'streamingComputationConfigs'; protected $streamingComputationConfigsType = 'Google_Service_Dataflow_StreamingComputationConfig'; protected $streamingComputationConfigsDataType = 'array'; public $userStepToStateFamilyNameMap; public $windmillServiceEndpoint; public $windmillServicePort; /** * @param Google_Service_Dataflow_StreamingComputationConfig */ public function setStreamingComputationConfigs($streamingComputationConfigs) { $this->streamingComputationConfigs = $streamingComputationConfigs; } /** * @return Google_Service_Dataflow_StreamingComputationConfig */ public function getStreamingComputationConfigs() { return $this->streamingComputationConfigs; } public function setUserStepToStateFamilyNameMap($userStepToStateFamilyNameMap) { $this->userStepToStateFamilyNameMap = $userStepToStateFamilyNameMap; } public function getUserStepToStateFamilyNameMap() { return $this->userStepToStateFamilyNameMap; } public function setWindmillServiceEndpoint($windmillServiceEndpoint) { $this->windmillServiceEndpoint = $windmillServiceEndpoint; } public function getWindmillServiceEndpoint() { return $this->windmillServiceEndpoint; } public function setWindmillServicePort($windmillServicePort) { $this->windmillServicePort = $windmillServicePort; } public function getWindmillServicePort() { return $this->windmillServicePort; } }
{ "pile_set_name": "Github" }
// // Switch.swift // RxSwift // // Created by Krunoslav Zaher on 3/12/15. // Copyright © 2015 Krunoslav Zaher. All rights reserved. // extension ObservableType { /** Projects each element of an observable sequence into a new sequence of observable sequences and then transforms an observable sequence of observable sequences into an observable sequence producing values only from the most recent observable sequence. It is a combination of `map` + `switchLatest` operator - seealso: [flatMapLatest operator on reactivex.io](http://reactivex.io/documentation/operators/flatmap.html) - parameter selector: A transform function to apply to each element. - returns: An observable sequence whose elements are the result of invoking the transform function on each element of source producing an Observable of Observable sequences and that at any point in time produces the elements of the most recent inner observable sequence that has been received. */ public func flatMapLatest<O: ObservableConvertibleType>(_ selector: @escaping (E) throws -> O) -> Observable<O.E> { return FlatMapLatest(source: asObservable(), selector: selector) } } extension ObservableType where E : ObservableConvertibleType { /** Transforms an observable sequence of observable sequences into an observable sequence producing values only from the most recent observable sequence. Each time a new inner observable sequence is received, unsubscribe from the previous inner observable sequence. - seealso: [switch operator on reactivex.io](http://reactivex.io/documentation/operators/switch.html) - returns: The observable sequence that at any point in time produces the elements of the most recent inner observable sequence that has been received. */ public func switchLatest() -> Observable<E.E> { return Switch(source: asObservable()) } } fileprivate class SwitchSink<SourceType, S: ObservableConvertibleType, O: ObserverType> : Sink<O> , ObserverType where S.E == O.E { typealias E = SourceType fileprivate let _subscriptions: SingleAssignmentDisposable = SingleAssignmentDisposable() fileprivate let _innerSubscription: SerialDisposable = SerialDisposable() let _lock = RecursiveLock() // state fileprivate var _stopped = false fileprivate var _latest = 0 fileprivate var _hasLatest = false override init(observer: O, cancel: Cancelable) { super.init(observer: observer, cancel: cancel) } func run(_ source: Observable<SourceType>) -> Disposable { let subscription = source.subscribe(self) _subscriptions.setDisposable(subscription) return Disposables.create(_subscriptions, _innerSubscription) } func performMap(_ element: SourceType) throws -> S { rxAbstractMethod() } @inline(__always) final private func nextElementArrived(element: E) -> (Int, Observable<S.E>)? { _lock.lock(); defer { _lock.unlock() } // { do { let observable = try performMap(element).asObservable() _hasLatest = true _latest = _latest &+ 1 return (_latest, observable) } catch let error { forwardOn(.error(error)) dispose() } return nil // } } func on(_ event: Event<E>) { switch event { case .next(let element): if let (latest, observable) = nextElementArrived(element: element) { let d = SingleAssignmentDisposable() _innerSubscription.disposable = d let observer = SwitchSinkIter(parent: self, id: latest, _self: d) let disposable = observable.subscribe(observer) d.setDisposable(disposable) } case .error(let error): _lock.lock(); defer { _lock.unlock() } forwardOn(.error(error)) dispose() case .completed: _lock.lock(); defer { _lock.unlock() } _stopped = true _subscriptions.dispose() if !_hasLatest { forwardOn(.completed) dispose() } } } } final fileprivate class SwitchSinkIter<SourceType, S: ObservableConvertibleType, O: ObserverType> : ObserverType , LockOwnerType , SynchronizedOnType where S.E == O.E { typealias E = S.E typealias Parent = SwitchSink<SourceType, S, O> fileprivate let _parent: Parent fileprivate let _id: Int fileprivate let _self: Disposable var _lock: RecursiveLock { return _parent._lock } init(parent: Parent, id: Int, _self: Disposable) { _parent = parent _id = id self._self = _self } func on(_ event: Event<E>) { synchronizedOn(event) } func _synchronized_on(_ event: Event<E>) { switch event { case .next: break case .error, .completed: _self.dispose() } if _parent._latest != _id { return } switch event { case .next: _parent.forwardOn(event) case .error: _parent.forwardOn(event) _parent.dispose() case .completed: _parent._hasLatest = false if _parent._stopped { _parent.forwardOn(event) _parent.dispose() } } } } // MARK: Specializations final fileprivate class SwitchIdentitySink<S: ObservableConvertibleType, O: ObserverType> : SwitchSink<S, S, O> where O.E == S.E { override init(observer: O, cancel: Cancelable) { super.init(observer: observer, cancel: cancel) } override func performMap(_ element: S) throws -> S { return element } } final fileprivate class MapSwitchSink<SourceType, S: ObservableConvertibleType, O: ObserverType> : SwitchSink<SourceType, S, O> where O.E == S.E { typealias Selector = (SourceType) throws -> S fileprivate let _selector: Selector init(selector: @escaping Selector, observer: O, cancel: Cancelable) { _selector = selector super.init(observer: observer, cancel: cancel) } override func performMap(_ element: SourceType) throws -> S { return try _selector(element) } } // MARK: Producers final fileprivate class Switch<S: ObservableConvertibleType> : Producer<S.E> { fileprivate let _source: Observable<S> init(source: Observable<S>) { _source = source } override func run<O : ObserverType>(_ observer: O, cancel: Cancelable) -> (sink: Disposable, subscription: Disposable) where O.E == S.E { let sink = SwitchIdentitySink<S, O>(observer: observer, cancel: cancel) let subscription = sink.run(_source) return (sink: sink, subscription: subscription) } } final fileprivate class FlatMapLatest<SourceType, S: ObservableConvertibleType> : Producer<S.E> { typealias Selector = (SourceType) throws -> S fileprivate let _source: Observable<SourceType> fileprivate let _selector: Selector init(source: Observable<SourceType>, selector: @escaping Selector) { _source = source _selector = selector } override func run<O : ObserverType>(_ observer: O, cancel: Cancelable) -> (sink: Disposable, subscription: Disposable) where O.E == S.E { let sink = MapSwitchSink<SourceType, S, O>(selector: _selector, observer: observer, cancel: cancel) let subscription = sink.run(_source) return (sink: sink, subscription: subscription) } }
{ "pile_set_name": "Github" }
鮎川真理最新番号 【HMP-009】極 口全ワイセツ 【DSV-041】非常事態 鮎川真理 【P-081】興奮120% 爆裂10 超エッチギャルまとめて10人 【P-053】唇気楼 鮎川真理 【P-022】内濡拡大 鮎川真理 【DP-022】手が抜けない 鮎川真理 【DP-017】田村ガンの暴露ビデオ 【MKDV-026】淫女伝説 鮎川真理&田村香織 【MKDV-047】デジタルリマスタリングREMIX 黄金伝説 【MKDV-062】M KING of BEST VOL.2 【DEX-020】DECADE EX 20 鮎川真理 【PDV-117】復刻 フラッシュバック14 鮎川真理 【MKDV-161】Re:鮎川真理 【GYR-003】夜のAVヒットスタジオ ゴージャス300分 【AVD-321】鮎川真理 HISTORY 【KA-1254】ボッキー25 おくちでヌピヌパッ☆ 【KA-1242】ボッキー23 おくちでムグして 【KR-9207】痴漢電車 中谷友美 2004 【KSV-006B】口全ワイセツBEST 2 【TSV-007】口全ワイセツスペシャル 怒涛のおしゃぶり列伝 【ARD-033】Allure2000 Vol.6 【PSV-031】口全ワイセツスペシャル スロートのど自慢大会 【KA-1255】フラッシュバック 鮎川真理 【ARD-009】口唇伝説 【BNDV-054】エピソード[1988]伝説になった女優たち…</a>2002-07-10h.m.p$$$VB115分钟
{ "pile_set_name": "Github" }
# OpenXPKI::Server::Workflow::Condition::CertificateNotYetRevoked # Written by Alexander Klink for the OpenXPKI project 2007 # Copyright (c) 2007 by The OpenXPKI Project package OpenXPKI::Server::Workflow::Condition::CertificateNotYetRevoked; use strict; use warnings; use base qw( Workflow::Condition ); use Workflow::Exception qw( condition_error configuration_error ); use OpenXPKI::Server::Context qw( CTX ); use OpenXPKI::Serialization::Simple; use OpenXPKI::Debug; use English; use OpenXPKI::Exception; sub evaluate { ##! 1: 'start' my ( $self, $workflow ) = @_; my $context = $workflow->context(); my $identifier = $context->param('cert_identifier'); my $reason_code = $context->param('reason_code') || ''; my $pki_realm = CTX('session')->data->pki_realm; OpenXPKI::Exception->throw( message => 'I18N_OPENXPKI_SERVER_WORKFLOW_CONDITION_CERTIFICATE_NOT_YET_REVOKED_IDENTIFIER_MISSING', ) unless $identifier; my $cert = CTX('dbi')->select_one( from => 'certificate', columns => [ 'status' ], where => { identifier => $identifier, pki_realm => $pki_realm, } ); CTX('log')->application()->debug("Cert status is ".$cert->{status}); ##! 16: 'status: ' . $cert->{'STATUS'} condition_error 'I18N_OPENXPKI_SERVER_WORKFLOW_CONDITION_CERTIFICATE_NOT_YET_REVOKED_CERT_IN_STATE_CRL_ISSUANCE_PENDING' if ('CRL_ISSUANCE_PENDING' eq $cert->{status}); condition_error 'I18N_OPENXPKI_SERVER_WORKFLOW_CONDITION_CERTIFICATE_NOT_YET_REVOKED_CERT_IN_STATE_REVOKED' if ('REVOKED' eq $cert->{status}); condition_error 'I18N_OPENXPKI_SERVER_WORKFLOW_CONDITION_CERTIFICATE_NOT_YET_REVOKED_CERT_ON_HOLD_AND_REASON_CODE_NOT_REMOVE_FROM_CRL' if ('HOLD' eq $cert->{status} and $reason_code ne 'removeFromCRL'); condition_error 'I18N_OPENXPKI_SERVER_WORKFLOW_CONDITION_CERTIFICATE_NOT_YET_REVOKED_CERT_NOT_ON_HOLD_AND_REASON_CODE_REMOVE_FROM_CRL' if ($cert->{status} ne 'HOLD' and $reason_code eq 'removeFromCRL'); return 1; } 1; __END__ =head1 NAME OpenXPKI::Server::Workflow::Condition::CertificateNotYetRevoked =head1 SYNOPSIS <action name="do_something"> <condition name="certificate_not_yet_revoked" class="OpenXPKI::Server::Workflow::Condition::CertificateNotYetRevoked"> </condition> </action> =head1 DESCRIPTION The condition checks if the certificate from a CRR has not yet been revoked. It furthermore throws an exception when the certificate is in state 'HOLD' and the reason code is not 'removeFromCRL'.
{ "pile_set_name": "Github" }
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2015 * Texas Instruments Incorporated - http://www.ti.com/ */ #ifndef _RPROC_H_ #define _RPROC_H_ /* * Note: The platform data support is not meant for use with newer * platforms. This is meant only for legacy devices. This mode of * initialization *will* be eventually removed once all necessary * platforms have moved to dm/fdt. */ #include <dm/platdata.h> /* For platform data support - non dt world */ /** * enum rproc_mem_type - What type of memory model does the rproc use * @RPROC_INTERNAL_MEMORY_MAPPED: Remote processor uses own memory and is memory * mapped to the host processor over an address range. * * Please note that this is an enumeration of memory model of different types * of remote processors. Few of the remote processors do have own internal * memories, while others use external memory for instruction and data. */ enum rproc_mem_type { RPROC_INTERNAL_MEMORY_MAPPED = 0, }; /** * struct dm_rproc_uclass_pdata - platform data for a CPU * @name: Platform-specific way of naming the Remote proc * @mem_type: one of 'enum rproc_mem_type' * @driver_plat_data: driver specific platform data that may be needed. * * This can be accessed with dev_get_uclass_platdata() for any UCLASS_REMOTEPROC * device. * */ struct dm_rproc_uclass_pdata { const char *name; enum rproc_mem_type mem_type; void *driver_plat_data; }; /** * struct dm_rproc_ops - Operations that are provided by remote proc driver * @init: Initialize the remoteproc device invoked after probe (optional) * Return 0 on success, -ve error on fail * @load: Load the remoteproc device using data provided(mandatory) * This takes the following additional arguments. * addr- Address of the binary image to be loaded * size- Size of the binary image to be loaded * Return 0 on success, -ve error on fail * @start: Start the remoteproc device (mandatory) * Return 0 on success, -ve error on fail * @stop: Stop the remoteproc device (optional) * Return 0 on success, -ve error on fail * @reset: Reset the remote proc device (optional) * Return 0 on success, -ve error on fail * @is_running: Check if the remote processor is running(optional) * Return 0 on success, 1 if not running, -ve on others errors * @ping: Ping the remote device for basic communication check(optional) * Return 0 on success, 1 if not responding, -ve on other errors */ struct dm_rproc_ops { int (*init)(struct udevice *dev); int (*load)(struct udevice *dev, ulong addr, ulong size); int (*start)(struct udevice *dev); int (*stop)(struct udevice *dev); int (*reset)(struct udevice *dev); int (*is_running)(struct udevice *dev); int (*ping)(struct udevice *dev); }; /* Accessor */ #define rproc_get_ops(dev) ((struct dm_rproc_ops *)(dev)->driver->ops) #ifdef CONFIG_REMOTEPROC /** * rproc_init() - Initialize all bound remote proc devices * * Return: 0 if all ok, else appropriate error value. */ int rproc_init(void); /** * rproc_is_initialized() - check to see if remoteproc devices are initialized * * Return: 0 if all devices are initialized, else appropriate error value. */ bool rproc_is_initialized(void); /** * rproc_load() - load binary to a remote processor * @id: id of the remote processor * @addr: address in memory where the binary image is located * @size: size of the binary image * * Return: 0 if all ok, else appropriate error value. */ int rproc_load(int id, ulong addr, ulong size); /** * rproc_start() - Start a remote processor * @id: id of the remote processor * * Return: 0 if all ok, else appropriate error value. */ int rproc_start(int id); /** * rproc_stop() - Stop a remote processor * @id: id of the remote processor * * Return: 0 if all ok, else appropriate error value. */ int rproc_stop(int id); /** * rproc_reset() - reset a remote processor * @id: id of the remote processor * * Return: 0 if all ok, else appropriate error value. */ int rproc_reset(int id); /** * rproc_ping() - ping a remote processor to check if it can communicate * @id: id of the remote processor * * NOTE: this might need communication path available, which is not implemented * as part of remoteproc framework - hook on to appropriate bus architecture to * do the same * * Return: 0 if all ok, else appropriate error value. */ int rproc_ping(int id); /** * rproc_is_running() - check to see if remote processor is running * @id: id of the remote processor * * NOTE: this may not involve actual communication capability of the remote * processor, but just ensures that it is out of reset and executing code. * * Return: 0 if all ok, else appropriate error value. */ int rproc_is_running(int id); #else static inline int rproc_init(void) { return -ENOSYS; } static inline bool rproc_is_initialized(void) { return false; } static inline int rproc_load(int id, ulong addr, ulong size) { return -ENOSYS; } static inline int rproc_start(int id) { return -ENOSYS; } static inline int rproc_stop(int id) { return -ENOSYS; } static inline int rproc_reset(int id) { return -ENOSYS; } static inline int rproc_ping(int id) { return -ENOSYS; } static inline int rproc_is_running(int id) { return -ENOSYS; } #endif #endif /* _RPROC_H_ */
{ "pile_set_name": "Github" }
/** * @private */ Ext.define('Ext.fx.runner.Css', { extend: 'Ext.Evented', requires: [ 'Ext.fx.Animation' ], prefixedProperties: { 'transform' : true, 'transform-origin' : true, 'perspective' : true, 'transform-style' : true, 'transition' : true, 'transition-property' : true, 'transition-duration' : true, 'transition-timing-function': true, 'transition-delay' : true, 'animation' : true, 'animation-name' : true, 'animation-duration' : true, 'animation-iteration-count' : true, 'animation-direction' : true, 'animation-timing-function' : true, 'animation-delay' : true }, lengthProperties: { 'top' : true, 'right' : true, 'bottom' : true, 'left' : true, 'width' : true, 'height' : true, 'max-height' : true, 'max-width' : true, 'min-height' : true, 'min-width' : true, 'margin-bottom' : true, 'margin-left' : true, 'margin-right' : true, 'margin-top' : true, 'padding-bottom' : true, 'padding-left' : true, 'padding-right' : true, 'padding-top' : true, 'border-bottom-width': true, 'border-left-width' : true, 'border-right-width' : true, 'border-spacing' : true, 'border-top-width' : true, 'border-width' : true, 'outline-width' : true, 'letter-spacing' : true, 'line-height' : true, 'text-indent' : true, 'word-spacing' : true, 'font-size' : true, 'translate' : true, 'translateX' : true, 'translateY' : true, 'translateZ' : true, 'translate3d' : true }, durationProperties: { 'transition-duration' : true, 'transition-delay' : true, 'animation-duration' : true, 'animation-delay' : true }, angleProperties: { rotate : true, rotateX : true, rotateY : true, rotateZ : true, skew : true, skewX : true, skewY : true }, lengthUnitRegex: /([a-z%]*)$/, DEFAULT_UNIT_LENGTH: 'px', DEFAULT_UNIT_ANGLE: 'deg', DEFAULT_UNIT_DURATION: 'ms', formattedNameCache: {}, constructor: function() { var supports3dTransform = Ext.feature.has.Css3dTransforms; if (supports3dTransform) { this.transformMethods = ['translateX', 'translateY', 'translateZ', 'rotate', 'rotateX', 'rotateY', 'rotateZ', 'skewX', 'skewY', 'scaleX', 'scaleY', 'scaleZ']; } else { this.transformMethods = ['translateX', 'translateY', 'rotate', 'skewX', 'skewY', 'scaleX', 'scaleY']; } this.vendorPrefix = Ext.browser.getStyleDashPrefix(); this.ruleStylesCache = {}; return this; }, getStyleSheet: function() { var styleSheet = this.styleSheet, styleElement, styleSheets; if (!styleSheet) { styleElement = document.createElement('style'); styleElement.type = 'text/css'; (document.head || document.getElementsByTagName('head')[0]).appendChild(styleElement); styleSheets = document.styleSheets; this.styleSheet = styleSheet = styleSheets[styleSheets.length - 1]; } return styleSheet; }, applyRules: function(selectors) { var styleSheet = this.getStyleSheet(), ruleStylesCache = this.ruleStylesCache, rules = styleSheet.cssRules, selector, properties, ruleStyle, ruleStyleCache, rulesLength, name, value; for (selector in selectors) { properties = selectors[selector]; ruleStyle = ruleStylesCache[selector]; if (ruleStyle === undefined) { rulesLength = rules.length; styleSheet.insertRule(selector + '{}', rulesLength); ruleStyle = ruleStylesCache[selector] = rules.item(rulesLength).style; } ruleStyleCache = ruleStyle.$cache; if (!ruleStyleCache) { ruleStyleCache = ruleStyle.$cache = {}; } for (name in properties) { value = this.formatValue(properties[name], name); name = this.formatName(name); if (ruleStyleCache[name] !== value) { ruleStyleCache[name] = value; if (value === null) { ruleStyle.removeProperty(name); } else { ruleStyle.setProperty(name, value, 'important'); } } } } return this; }, applyStyles: function(styles) { var id, element, elementStyle, properties, name, value; for (id in styles) { if (styles.hasOwnProperty(id)) { element = document.getElementById(id); if (!element) { return this; } elementStyle = element.style; properties = styles[id]; for (name in properties) { if (properties.hasOwnProperty(name)) { value = this.formatValue(properties[name], name); name = this.formatName(name); if (value === null) { elementStyle.removeProperty(name); } else { elementStyle.setProperty(name, value, 'important'); } } } } } return this; }, formatName: function(name) { var cache = this.formattedNameCache, formattedName = cache[name]; if (!formattedName) { if ((Ext.os.is.Tizen || !Ext.feature.has.CssTransformNoPrefix) && this.prefixedProperties[name]) { formattedName = this.vendorPrefix + name; } else { formattedName = name; } cache[name] = formattedName; } return formattedName; }, formatValue: function(value, name) { var type = typeof value, lengthUnit = this.DEFAULT_UNIT_LENGTH, transformMethods, method, i, ln, transformValues, values, unit; if (value === null) { return ''; } if (type == 'string') { if (this.lengthProperties[name]) { unit = value.match(this.lengthUnitRegex)[1]; if (unit.length > 0) { //<debug error> if (unit !== lengthUnit) { Ext.Logger.error("Length unit: '" + unit + "' in value: '" + value + "' of property: '" + name + "' is not " + "valid for animation. Only 'px' is allowed"); } //</debug> } else { return value + lengthUnit; } } return value; } else if (type == 'number') { if (value == 0) { return '0'; } if (this.lengthProperties[name]) { return value + lengthUnit; } if (this.angleProperties[name]) { return value + this.DEFAULT_UNIT_ANGLE; } if (this.durationProperties[name]) { return value + this.DEFAULT_UNIT_DURATION; } } else if (name === 'transform') { transformMethods = this.transformMethods; transformValues = []; for (i = 0,ln = transformMethods.length; i < ln; i++) { method = transformMethods[i]; transformValues.push(method + '(' + this.formatValue(value[method], method) + ')'); } return transformValues.join(' '); } else if (Ext.isArray(value)) { values = []; for (i = 0,ln = value.length; i < ln; i++) { values.push(this.formatValue(value[i], name)); } return (values.length > 0) ? values.join(', ') : 'none'; } return value; } });
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: f1b8bd5d209198a459a56b1b6032b5bc MonoImporter: externalObjects: {} serializedVersion: 2 defaultReferences: [] executionOrder: 0 icon: {instanceID: 0} userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
--- changelog: last_modified_at: title: Enabling or disabling a Step conditionally redirect_from: "/jp/tips-and-tricks/disable-a-step-by-condition/" menu: steps-workflows-main: weight: 10 --- {% include not_translated_yet.html %} ## Disabling a Step If you don't want to remove the Step from your Workflow and you don't want to duplicate the Workflow either (which is the preferred way if you want to experiment with new things; you can just create a "backup" clone of your original Workflow) then you can simply disable a Step by specifying `run_if: false`. An example: - script: run_if: false inputs: - content: |- #!/bin/bash echo "This will never run, because of run_if:false" ## Running a Step only in a CI environment This is quite similar to how you [completely disable a step](#disable-a-step), but instead of specifying `false` as the `run_if` expression, you specify `.IsCI`, which will only be true in CI mode. This method can be useful to debug builds locally, where you don't want to run specific steps on your own Mac/PC. Lots of Steps have this `run_if` flag set by default, for example the `Git Clone` step is configured with `run_if: .IsCI` in the step's default configuration (`step.yml`), because the most common use case when you run a build locally is that you already have the code on your Mac/PC and so you don't want to do a `Git Clone`. Of course you can change the `run_if` property of any step, so you can specify a `run_if: true` for the `Git Clone` step if you want to run it locally too. {% include message_box.html type="note" title="Enable CI mode" content=" CI mode can be enabled on your own Mac/PC by setting the `CI` environment to `true` (e.g. with `export CI=true` in your Bash Terminal), or by running `bitrise run` with the `--ci` flag: `bitrise --ci run ...`. "%} ## Running a Step only if the Build failed _To do this you have to switch to_ `_bitrise.yml_` _mode (open the Workflow Editor on bitrise.io -> left side: click on_ `_bitrise.yml_` _to switch to the interactive_ `_bitrise.yml_` _editor)._ You have to add two properties to the Step you **only** want to run when the Build failed (at that point, when the Step would run): * `is_always_run: true` (this enables the Step to be considered to run even if a previous Step failed) * `run_if: .IsBuildFailed` (you can find more examples of the `run_if` template at: [https://github.com/bitrise-io/bitrise/blob/master/_examples/experimentals/templates/bitrise.yml](https://github.com/bitrise-io/bitrise/blob/master/_examples/experimentals/templates/bitrise.yml)). An example `script` step, which will only run if the Build failed: - script: is_always_run: true run_if: .IsBuildFailed inputs: - content: |- #!/bin/bash echo "Build Failed!" {% include message_box.html type="note" title="A **run_if** can be any valid **Go** template" content=" A `run_if` can be any valid [Go template](https://golang.org/pkg/text/template/), as long as it evaluates to `true` or `false` (or any of the String representation, e.g. `\"True\"`, `\"t\"`, `\"yes\"` or `\"y\"` are all considered to be `true`). If the template evaluates to `true` the Step will run, otherwise it won't. "%} An example `run_if` to check a **custom environment variable** (you can expose environment variables from your scripts too, using [envman](https://github.com/bitrise-io/envman/)): {% raw %} run_if: |- {{enveq "CUSTOM_ENV_VAR_KEY" "test value to test against"}} {% endraw %} This `run_if` will skip the step in every case when the value of `CUSTOM_ENV_VAR_KEY` is not `test value to test against`.
{ "pile_set_name": "Github" }
/* * Copyright (C) 2014-2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef VMHeap_h #define VMHeap_h #include "Chunk.h" #include "FixedVector.h" #include "HeapKind.h" #include "LargeRange.h" #include "Map.h" #include "Vector.h" #if BOS(DARWIN) #include "Zone.h" #endif namespace bmalloc { class BeginTag; class EndTag; class Heap; typedef enum { Sync, Async } ScavengeMode; class VMHeap { public: VMHeap(std::lock_guard<Mutex>&); LargeRange tryAllocateLargeChunk(size_t alignment, size_t); }; } // namespace bmalloc #endif // VMHeap_h
{ "pile_set_name": "Github" }